code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from project_utils import dict_to_file, get_word_count
if __name__ == "__main__":
inp_filename = 'sample.txt'
out_filename = 'count.csv'
print("Reading file ", inp_filename)
word_dict = get_word_count(inp_filename)
print("Output from get_word_count is")
print(word_dict)
print("Writing to file named", out_filename)
dict_to_file(word_dict, out_filename)
print("Done processing!")
| [
"project_utils.dict_to_file",
"project_utils.get_word_count"
] | [((213, 241), 'project_utils.get_word_count', 'get_word_count', (['inp_filename'], {}), '(inp_filename)\n', (227, 241), False, 'from project_utils import dict_to_file, get_word_count\n'), ((362, 399), 'project_utils.dict_to_file', 'dict_to_file', (['word_dict', 'out_filename'], {}), '(word_dict, out_filename)\n', (374, 399), False, 'from project_utils import dict_to_file, get_word_count\n')] |
import os
import json
import yaml
import argparse
from pyxform import xls2json
from pyxform.builder import create_survey_element_from_dict
from pprint import pprint
from ..content_variations import build_content
from ..utils.form_to_yaml_string import form_to_yaml_string
YAML_FORMAT = 'yml'
JSON_FORMAT = 'json'
XLS_FORMAT = 'xls'
XML_FORMAT = 'xml'
EXT_FORMATS = {
'.yml': YAML_FORMAT,
'.yaml': YAML_FORMAT,
'.json': JSON_FORMAT,
'.xlsx': XLS_FORMAT,
'.xls': XLS_FORMAT,
'.xml': XML_FORMAT,
}
def _lookup_format(path):
try:
return EXT_FORMATS[os.path.splitext(path)[1]]
except KeyError:
valid_extensions = ', '.join(list(EXT_FORMATS.keys()))
raise ValueError(f'No valid format found for file [ {path} ]\n'
f'Valid extensions: [{valid_extensions}]')
def sans_headers_and_no_directional_quotes(pyxform_dict):
delkeys = []
for key in pyxform_dict.keys():
if key.endswith('_header'):
delkeys.append(key)
for key in delkeys:
del pyxform_dict[key]
return json.loads(
json.dumps(pyxform_dict).replace(
'\\u201c', '\\"'
).replace(
'\\u201d', '\\"'
).replace(
'\\u2018', "'"
).replace(
'\\u2019', "'"
).replace(
'"TRUE"', 'true'
).replace(
'"FALSE"', 'false'
)
)
def open_xls(path_in):
xlspth = os.path.abspath(path_in)
return {
**sans_headers_and_no_directional_quotes(xls2json.xls_to_dict(xlspth)),
'schema': 'xlsform',
}
def open_yaml(path_in):
with open(path_in) as ff:
return yaml.safe_load(ff.read())
def form_to_xform(form_content, default_settings=None):
export_kwargs = {}
if default_settings:
export_kwargs['default_settings'] = default_settings
flat_json = form_content.export_to('xlsform', **export_kwargs)
wbjson = xls2json.workbook_to_json(flat_json)
survey = create_survey_element_from_dict(wbjson)
if hasattr(survey, '_translations'):
# tx_names is passed to the pyxform object to ensure the itext
# translations show up in the correct order.
# requires XLSForm/pyxform commit #68f0db99
tx_names = []
for tx in cc.txs.to_v1_strings():
if tx is not None:
tx_names.append(tx)
for tx_name in tx_names:
survey._translations[tx_name] = {}
return survey._to_pretty_xml()
def print_form(form, validate=False, format=None, to_file=None):
loaded_form = build_content(form, validate=validate)
def printer(string_value):
if to_file is None:
print(string_value)
else:
with open(to_file, 'w') as ff:
ff.write(string_value)
if format == 'json':
printer(json.dumps(loaded_form.export(), indent=2))
elif format == 'yml':
printer(form_to_yaml_string(loaded_form.export()))
elif format == 'xml':
default_settings = {'title': 'Form Title', 'identifier': 'generated'}
xml = form_to_xform(loaded_form,
default_settings=default_settings)
printer(xml)
else:
raise ValueError(f'Unknown format: {format}')
def run(path_in, path_out=None, validate=False, format=None):
if format is None and path_out is None:
# if no format or path is specified, then defualt output format is yml
format = 'yml'
elif format is None:
format = _lookup_format(path_out)
if not os.path.exists(path_in):
raise ValueError('Path does not exist: ' + path_in)
ext = _lookup_format(path_in)
if ext == XLS_FORMAT:
frm = open_xls(path_in)
elif ext == YAML_FORMAT:
frm = open_yaml(path_in)
elif ext == JSON_FORMAT:
frm = open_json(path_in)
print_form(frm, validate=validate, format=format, to_file=path_out)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'path_in',
help="Path to the YML file with the form definition",
)
parser.add_argument(
'-o', '--output', dest='path_out',
help='run the form through the schema validator',
)
parser.add_argument(
'-f', '--format', dest='format',
choices=['yml', 'json', 'xml'],
help='output format',
)
parser.add_argument(
'-v', '--validate', dest='validate',
action='store_true',
help='run the form through the schema validator',
)
run(**parser.parse_args().__dict__)
if __name__ == '__main__':
main()
| [
"pyxform.xls2json.workbook_to_json",
"os.path.exists",
"argparse.ArgumentParser",
"json.dumps",
"os.path.splitext",
"pyxform.xls2json.xls_to_dict",
"os.path.abspath",
"pyxform.builder.create_survey_element_from_dict"
] | [((1461, 1485), 'os.path.abspath', 'os.path.abspath', (['path_in'], {}), '(path_in)\n', (1476, 1485), False, 'import os\n'), ((1958, 1994), 'pyxform.xls2json.workbook_to_json', 'xls2json.workbook_to_json', (['flat_json'], {}), '(flat_json)\n', (1983, 1994), False, 'from pyxform import xls2json\n'), ((2008, 2047), 'pyxform.builder.create_survey_element_from_dict', 'create_survey_element_from_dict', (['wbjson'], {}), '(wbjson)\n', (2039, 2047), False, 'from pyxform.builder import create_survey_element_from_dict\n'), ((3972, 3997), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3995, 3997), False, 'import argparse\n'), ((3573, 3596), 'os.path.exists', 'os.path.exists', (['path_in'], {}), '(path_in)\n', (3587, 3596), False, 'import os\n'), ((1548, 1576), 'pyxform.xls2json.xls_to_dict', 'xls2json.xls_to_dict', (['xlspth'], {}), '(xlspth)\n', (1568, 1576), False, 'from pyxform import xls2json\n'), ((588, 610), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (604, 610), False, 'import os\n'), ((1107, 1131), 'json.dumps', 'json.dumps', (['pyxform_dict'], {}), '(pyxform_dict)\n', (1117, 1131), False, 'import json\n')] |
import builtins
import datetime
import inspect
import threading
import time
import ws
global _c,_pq,_l_ws,_sc
_c={}
_pq=None
_l_ws={}
_sc=None
_tl=threading.Lock()
def _print_q():
global _pq,_l_ws
lt=time.time()
fs=__import__("storage")
fs.set_silent("log.log")
dt=fs.read("log.log")
lc=dt.count(b"\n")
while (True):
if (len(_pq)>0):
_tl.acquire()
a,sf,_pq=" ".join([str(e) for e in _pq[0][0]]),_pq[0][1],_pq[1:]
_tl.release()
s=datetime.datetime.now().strftime(f"[{sf.filename[:-3]}{('.'+sf.function if sf.function!='<module>' else '')}, %H:%M:%S] {a}")
builtins.print(s)
s=bytes(s,"utf-8")
for k,v in list(_l_ws.items()):
if (v[1]==False):
_l_ws[k]=(v[0],True)
ws.send(b"1"+dt[:-1],thr=v[0])
ws.send(b"0"+s,thr=v[0])
dt+=s+b"\n"
lc+=1
if (lc>1024):
dt=dt[dt.index(b"\n")+1:]
if (time.time()>lt):
lt=time.time()+30
fs.write("log.log",dt)
def cache(fp):
global _c
if (fp in _c):
return _c[fp]
with open(fp,"rb") as f:
_c[fp]=f.read()
return _c[fp]
def print(*a):
global _pq
if (_pq is None):
_pq=[(a,inspect.getouterframes(inspect.currentframe(),2)[1])]
threading.Thread(target=_print_q).start()
else:
_tl.acquire()
_pq+=[(a,inspect.getouterframes(inspect.currentframe(),2)[1])]
_tl.release()
def ws_logs_start():
global _sc,_l_ws
def _ws_keep_alive(a,t):
while (a in _l_ws):
ws.send(b"null",thr=t)
time.sleep(20)
if (_sc is None):
_sc=__import__("server")
a=_sc.address()
_l_ws[a]=(threading.current_thread(),False)
thr=threading.Thread(target=_ws_keep_alive,args=(a,_l_ws[a][0]))
thr.daemon=True
thr.start()
def ws_logs_end():
global _l_ws
del _l_ws[_sc.address()]
| [
"threading.current_thread",
"threading.Lock",
"inspect.currentframe",
"time.sleep",
"ws.send",
"datetime.datetime.now",
"time.time",
"threading.Thread",
"builtins.print"
] | [((150, 166), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (164, 166), False, 'import threading\n'), ((208, 219), 'time.time', 'time.time', ([], {}), '()\n', (217, 219), False, 'import time\n'), ((1542, 1604), 'threading.Thread', 'threading.Thread', ([], {'target': '_ws_keep_alive', 'args': '(a, _l_ws[a][0])'}), '(target=_ws_keep_alive, args=(a, _l_ws[a][0]))\n', (1558, 1604), False, 'import threading\n'), ((1503, 1529), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (1527, 1529), False, 'import threading\n'), ((585, 602), 'builtins.print', 'builtins.print', (['s'], {}), '(s)\n', (599, 602), False, 'import builtins\n'), ((850, 861), 'time.time', 'time.time', ([], {}), '()\n', (859, 861), False, 'import time\n'), ((1388, 1411), 'ws.send', 'ws.send', (["b'null'"], {'thr': 't'}), "(b'null', thr=t)\n", (1395, 1411), False, 'import ws\n'), ((1414, 1428), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (1424, 1428), False, 'import time\n'), ((748, 775), 'ws.send', 'ws.send', (["(b'0' + s)"], {'thr': 'v[0]'}), "(b'0' + s, thr=v[0])\n", (755, 775), False, 'import ws\n'), ((873, 884), 'time.time', 'time.time', ([], {}), '()\n', (882, 884), False, 'import time\n'), ((1149, 1182), 'threading.Thread', 'threading.Thread', ([], {'target': '_print_q'}), '(target=_print_q)\n', (1165, 1182), False, 'import threading\n'), ((456, 479), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (477, 479), False, 'import datetime\n'), ((713, 746), 'ws.send', 'ws.send', (["(b'1' + dt[:-1])"], {'thr': 'v[0]'}), "(b'1' + dt[:-1], thr=v[0])\n", (720, 746), False, 'import ws\n'), ((1116, 1138), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1136, 1138), False, 'import inspect\n'), ((1248, 1270), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1268, 1270), False, 'import inspect\n')] |
import utils
from utils import format
import os
import tempfile
import urllib.request
import shutil
import zipfile
spire_dir = r"D:\Games\Slay the Spire Modded"
mod_dir = os.path.join("cache", "mod")
def build():
# STEP: clone FruityMod
if not os.path.exists(mod_dir):
print("Downloading {}".format("FruityMod"))
fruity_url = r"https://github.com/gskleres/FruityMod-StS/archive/v0.6.2b.zip"
utils.mkdir("cache")
download_file = tempfile.NamedTemporaryFile(suffix=".zip", dir="cache", delete=False).name
with urllib.request.urlopen(fruity_url) as response, open(download_file, "wb") as out_file:
shutil.copyfileobj(response, out_file)
utils.unzip(download_file, mod_dir, shift=1, remove=True)
# STEP: fetch libs
mod_jar = os.path.join(spire_dir, "ModTheSpire.jar")
if not os.path.exists(mod_jar):
print("Downloading ModTheSpire")
download_file = tempfile.NamedTemporaryFile(suffix=".zip", dir="..", delete=False).name
urllib.request.urlretrieve("https://github.com/kiooeht/ModTheSpire/releases/download/v2.6.0/ModTheSpire.zip", download_file)
with zipfile.ZipFile(download_file, "r") as archive, open(mod_jar, "wb") as file:
jar_data = archive.read("ModTheSpire.jar")
file.write(jar_data)
os.remove(download_file)
base_jar = os.path.join(spire_dir, "mods", "BaseMod.jar")
if not os.path.exists(base_jar):
print("Downloading BaseMod")
urllib.request.urlretrieve("https://github.com/daviscook477/BaseMod/releases/download/v2.9.1/BaseMod.jar", base_jar)
from spire import name_id
import textwrap
import io
import json
print("Generating data")
image_dir = os.path.join("assets", "images")
if os.path.exists(os.path.join("cache", "DEBUG")):
image_dir = os.path.join("todo", "images")
# STEP: generate cards
from engi_mod import cards
with open(os.path.join("templates", "card.java"), encoding="utf-8") as file:
card_template = file.read()
for card in cards:
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\cards".split("\\"), name_id(card["name"]) + ".java"), "w", encoding="utf-8") as file:
file.write(format(card_template, card))
# STEP: patch code
templates_cache = os.path.join("cache", "templates")
if not os.path.exists(templates_cache):
utils.mkdir(templates_cache)
shutil.copy(os.path.join(mod_dir, *r"src\main\java\fruitymod\FruityMod.java".split("\\")), os.path.join(templates_cache, "FruiyMod.java"))
shutil.copy(os.path.join(mod_dir, *r"src\main\java\fruitymod\characters\TheSeeker.java".split("\\")), os.path.join(templates_cache, "TheSeeker.java"))
shutil.copy(os.path.join(mod_dir, *r"src\main\resources\localization\FruityMod-CardStrings.json".split("\\")), os.path.join(templates_cache, "FruityMod-CardStrings.json"))
image_code = io.StringIO()
add_code = io.StringIO()
unlock_code = io.StringIO()
for card in cards:
id = name_id(card["name"], upper=True).lower()
image_file = os.path.join(image_dir, id + ".png")
image_file = "cards/{}.png".format(id if os.path.exists(image_file) else "runic_binding")
image_code.write(format(
'public static final String {{ name_id(card["name"], upper=True) }} = "{{ image_file }}";'
) + "\n")
if card["rarity"] != "special":
add_code.write(format(
'BaseMod.addCard(new {{ name_id(card["name"]) }}());'
) + "\n")
unlock_code.write(format(
'UnlockTracker.unlockCard("{{ card["name"] }}");'
) + "\n")
with open(os.path.join(templates_cache, "FruiyMod.java"), encoding="utf-8") as file:
fruity_lines = [line for line in file]
for i, line in enumerate(fruity_lines):
if "public static final String PHASE_COIL" in line:
fruity_lines.insert(i + 1, "\n" + textwrap.indent(image_code.getvalue(), " " * 4))
break
for i, line in enumerate(fruity_lines):
if "BaseMod.addCard(new Nexus())" in line:
fruity_lines.insert(i + 1, "\n" + textwrap.indent(add_code.getvalue(), " " * 4 * 2))
fruity_lines.insert(i + 2, "\n" + textwrap.indent(unlock_code.getvalue(), " " * 4 * 2))
break
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\FruityMod.java".split("\\")), "w", encoding="utf-8") as file:
file.write("".join(fruity_lines))
with open(os.path.join(templates_cache, "TheSeeker.java"), encoding="utf-8") as file:
seeker_lines = [line for line in file]
# STEP: starting relic
from engi_mod import relic
for i, line in enumerate(seeker_lines):
if "Arcanosphere" in line:
del seeker_lines[i:i+2]
seeker_lines.insert(i, "\n{}\n\n".format(textwrap.indent(textwrap.dedent(format("""
retVal.add("{{ relic }}");
UnlockTracker.markRelicAsSeen("{{ relic }}");
""")).strip(), " " * 4 * 2)))
break
# STEP: starting deck
from engi_mod import deck
if not deck:
deck = [card["name"] for card in cards if card["rarity"] != "special"]
for i, line in enumerate(seeker_lines):
if "Strike_P" in line:
for j, line in enumerate(seeker_lines):
if "AstralHaze" in line:
break
del seeker_lines[i:j+1]
seeker_lines.insert(i, "\n{}\n\n".format(textwrap.indent(
"\n".join('retVal.add("{}");'.format(card) for card in deck)
, " " * 4 * 2)))
break
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\characters\TheSeeker.java".split("\\")), "w", encoding="utf-8") as file:
file.write("".join(seeker_lines))
card_strings = json.load(open(os.path.join(templates_cache, "FruityMod-CardStrings.json"), encoding="utf-8"))
for card in cards:
data = {
"NAME": card["name"],
"DESCRIPTION": card["desc"],
}
desc = card.get("upgrade_desc")
if desc:
data["UPGRADE_DESCRIPTION"] = desc
card_strings[card["name"]] = data
json.dump(card_strings,
open(os.path.join(mod_dir, *r"src\main\resources\localization\FruityMod-CardStrings.json".split("\\")),
"w", encoding="utf-8"), sort_keys=True, indent=4)
# STEP: generate powers
from engi_mod import powers
with open(os.path.join("templates", "power.java"), encoding="utf-8") as file:
power_template = file.read()
for power in powers:
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\powers".split("\\"), power["id"] + ".java"), "w", encoding="utf-8") as file:
file.write(format(power_template, power))
# STEP: generate actions
from engi_mod import actions
with open(os.path.join("templates", "action.java"), encoding="utf-8") as file:
action_template = file.read()
for action in actions:
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\actions\unique".split("\\"), action["id"] + ".java"), "w", encoding="utf-8") as file:
file.write(format(action_template, action))
# STEP: generate java files
from engi_mod import javas
with open(os.path.join("templates", "java.java"), encoding="utf-8") as file:
java_template = file.read()
for java in javas:
with open(os.path.join(mod_dir, *r"src\main\java".split("\\"), *java["package"], java["name"] + ".java"), "w", encoding="utf-8") as file:
file.write(format(java_template, java))
# STEP: card images
print("Generating images")
import numpy as np
portrait_masks = {}
for type in "attack skill power".split():
image = utils.open_data(os.path.join("templates", "1024Portraits_{}_mask.png".format(type)))
image = image / 255
image = np.repeat(image[:,:,:1], 4, axis=-1)
portrait_masks[type] = image
for card in cards:
id = name_id(card["name"], upper=True).lower()
image_file = os.path.join(image_dir, id + ".png")
target_p_file = os.path.join(mod_dir, *r"src\main\resources\img\cards".split("\\"), id + "_p" + ".png")
target_file = os.path.join(mod_dir, *r"src\main\resources\img\cards".split("\\"), id + ".png")
if os.path.exists(target_p_file):
continue
if os.path.exists(image_file):
image = utils.open_data(image_file)
from skimage.transform import resize
target = 500, 380
r = image.shape[0] / image.shape[1]
if r >= target[0] / target[1]:
size = np.ceil(target[1] * r).astype("int"), target[1]
x = np.round((size[0] - target[0]) / 2).astype("int")
image = resize(image, size, mode="edge")[x:x+target[0]]
else:
size = target[0], np.ceil(target[0] / r).astype("int")
image = resize(image, size, mode="edge")[:,:target[1]]
image *= portrait_masks[card["type"]]
from PIL import Image
img = Image.fromarray(np.round(image * 255).astype("uint8").transpose((1, 0, 2)))
img.save(target_p_file)
target = 250, 190
image = resize(image, target, mode="edge")
img = Image.fromarray(np.round(image * 255).astype("uint8").transpose((1, 0, 2)))
img.save(target_file)
# STEP: card borders
utils.sync(os.path.join("assets", "512"), os.path.join(mod_dir, *r"src\main\resources\img\512".split("\\")))
utils.sync(os.path.join("assets", "1024"), os.path.join(mod_dir, *r"src\main\resources\img\1024".split("\\")))
# STEP: keywords
from engi_mod import keywords
keyword_code = io.StringIO()
for name, keyword in keywords.items():
words = ", ".join('"{}"'.format(word) for word in [name.lower()] + keyword["words"])
keyword_code.write(format(
'BaseMod.addKeyword(new String[] {"{{ name }}", {{ words }}}, "{{ keyword["desc"] }}");'
) + "\n")
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\FruityMod.java".split("\\")), encoding="utf-8") as file:
fruity_lines = [line for line in file]
for i, line in enumerate(fruity_lines):
if '{"intangible", "Intangible"}, "All damage and HP loss you suffer is reduced to 1."' in line:
fruity_lines.insert(i + 1, "\n" + textwrap.indent(keyword_code.getvalue(), " " * 4 * 2))
break
with open(os.path.join(mod_dir, *r"src\main\java\fruitymod\FruityMod.java".split("\\")), "w", encoding="utf-8") as file:
file.write("".join(fruity_lines))
# STEP: mod info
old_info = os.path.join(mod_dir, *r"src\main\resources\ModTheSpire.config".split("\\"))
if os.path.exists(old_info):
os.remove(old_info)
from engi_mod import info
json.dump(info, open(os.path.join(mod_dir, *r"src\main\resources\ModTheSpire.json".split("\\")), "w", encoding="utf-8"), indent=4)
# STEP: maven project
pom_template = os.path.join(templates_cache, "pom.xml")
if not os.path.exists(pom_template):
shutil.copy(os.path.join(mod_dir, "pom.xml"), pom_template)
with open(pom_template, encoding="utf-8") as file:
pom = file.read()
pom = pom.replace("${basedir}/../lib/ModTheSpire.jar", "/".join(spire_dir.split(os.path.sep) + ["ModTheSpire.jar"]))
pom = pom.replace("${basedir}/../lib/BaseMod.jar", "/".join(spire_dir.split(os.path.sep) + ["mods", "BaseMod.jar"]))
pom = pom.replace("${basedir}/../lib/desktop-1.0.jar", "/".join(spire_dir.split(os.path.sep) + ["desktop-1.0.jar"]))
jar_file = os.path.join(spire_dir, "mods", "EngiMod.jar")
pom = pom.replace("../_ModTheSpire/mods/FruityMod.jar", "/".join(jar_file.split(os.path.sep)))
with open(os.path.join(mod_dir, "pom.xml"), "w", encoding="utf-8") as file:
file.write(pom)
# STEP: compile
if os.path.exists(jar_file):
os.remove(jar_file)
with utils.cd(mod_dir):
os.system("mvn package")
if not os.path.exists(jar_file):
print("Compilation failed")
return
# STEP: test
with utils.cd(spire_dir):
os.system("ModTheSpire.jar")
if __name__ == "__main__":
build()
| [
"zipfile.ZipFile",
"utils.cd",
"spire.name_id",
"os.remove",
"os.path.exists",
"numpy.repeat",
"utils.format",
"tempfile.NamedTemporaryFile",
"utils.open_data",
"io.StringIO",
"numpy.round",
"numpy.ceil",
"shutil.copyfileobj",
"skimage.transform.resize",
"engi_mod.keywords.items",
"os.... | [((172, 200), 'os.path.join', 'os.path.join', (['"""cache"""', '"""mod"""'], {}), "('cache', 'mod')\n", (184, 200), False, 'import os\n'), ((800, 842), 'os.path.join', 'os.path.join', (['spire_dir', '"""ModTheSpire.jar"""'], {}), "(spire_dir, 'ModTheSpire.jar')\n", (812, 842), False, 'import os\n'), ((1375, 1421), 'os.path.join', 'os.path.join', (['spire_dir', '"""mods"""', '"""BaseMod.jar"""'], {}), "(spire_dir, 'mods', 'BaseMod.jar')\n", (1387, 1421), False, 'import os\n'), ((1748, 1780), 'os.path.join', 'os.path.join', (['"""assets"""', '"""images"""'], {}), "('assets', 'images')\n", (1760, 1780), False, 'import os\n'), ((2337, 2371), 'os.path.join', 'os.path.join', (['"""cache"""', '"""templates"""'], {}), "('cache', 'templates')\n", (2349, 2371), False, 'import os\n'), ((2956, 2969), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2967, 2969), False, 'import io\n'), ((2985, 2998), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2996, 2998), False, 'import io\n'), ((3017, 3030), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3028, 3030), False, 'import io\n'), ((9852, 9865), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (9863, 9865), False, 'import io\n'), ((9891, 9907), 'engi_mod.keywords.items', 'keywords.items', ([], {}), '()\n', (9905, 9907), False, 'from engi_mod import keywords\n'), ((10880, 10904), 'os.path.exists', 'os.path.exists', (['old_info'], {}), '(old_info)\n', (10894, 10904), False, 'import os\n'), ((11145, 11185), 'os.path.join', 'os.path.join', (['templates_cache', '"""pom.xml"""'], {}), "(templates_cache, 'pom.xml')\n", (11157, 11185), False, 'import os\n'), ((11754, 11800), 'os.path.join', 'os.path.join', (['spire_dir', '"""mods"""', '"""EngiMod.jar"""'], {}), "(spire_dir, 'mods', 'EngiMod.jar')\n", (11766, 11800), False, 'import os\n'), ((12032, 12056), 'os.path.exists', 'os.path.exists', (['jar_file'], {}), '(jar_file)\n', (12046, 12056), False, 'import os\n'), ((254, 277), 'os.path.exists', 'os.path.exists', (['mod_dir'], {}), '(mod_dir)\n', (268, 277), False, 'import os\n'), ((425, 445), 'utils.mkdir', 'utils.mkdir', (['"""cache"""'], {}), "('cache')\n", (436, 445), False, 'import utils\n'), ((704, 761), 'utils.unzip', 'utils.unzip', (['download_file', 'mod_dir'], {'shift': '(1)', 'remove': '(True)'}), '(download_file, mod_dir, shift=1, remove=True)\n', (715, 761), False, 'import utils\n'), ((854, 877), 'os.path.exists', 'os.path.exists', (['mod_jar'], {}), '(mod_jar)\n', (868, 877), False, 'import os\n'), ((1335, 1359), 'os.remove', 'os.remove', (['download_file'], {}), '(download_file)\n', (1344, 1359), False, 'import os\n'), ((1433, 1457), 'os.path.exists', 'os.path.exists', (['base_jar'], {}), '(base_jar)\n', (1447, 1457), False, 'import os\n'), ((1803, 1833), 'os.path.join', 'os.path.join', (['"""cache"""', '"""DEBUG"""'], {}), "('cache', 'DEBUG')\n", (1815, 1833), False, 'import os\n'), ((1856, 1886), 'os.path.join', 'os.path.join', (['"""todo"""', '"""images"""'], {}), "('todo', 'images')\n", (1868, 1886), False, 'import os\n'), ((2383, 2414), 'os.path.exists', 'os.path.exists', (['templates_cache'], {}), '(templates_cache)\n', (2397, 2414), False, 'import os\n'), ((2424, 2452), 'utils.mkdir', 'utils.mkdir', (['templates_cache'], {}), '(templates_cache)\n', (2435, 2452), False, 'import utils\n'), ((3130, 3166), 'os.path.join', 'os.path.join', (['image_dir', "(id + '.png')"], {}), "(image_dir, id + '.png')\n", (3142, 3166), False, 'import os\n'), ((7976, 8014), 'numpy.repeat', 'np.repeat', (['image[:, :, :1]', '(4)'], {'axis': '(-1)'}), '(image[:, :, :1], 4, axis=-1)\n', (7985, 8014), True, 'import numpy as np\n'), ((8149, 8185), 'os.path.join', 'os.path.join', (['image_dir', "(id + '.png')"], {}), "(image_dir, id + '.png')\n", (8161, 8185), False, 'import os\n'), ((8412, 8441), 'os.path.exists', 'os.path.exists', (['target_p_file'], {}), '(target_p_file)\n', (8426, 8441), False, 'import os\n'), ((8475, 8501), 'os.path.exists', 'os.path.exists', (['image_file'], {}), '(image_file)\n', (8489, 8501), False, 'import os\n'), ((9564, 9593), 'os.path.join', 'os.path.join', (['"""assets"""', '"""512"""'], {}), "('assets', '512')\n", (9576, 9593), False, 'import os\n'), ((9677, 9707), 'os.path.join', 'os.path.join', (['"""assets"""', '"""1024"""'], {}), "('assets', '1024')\n", (9689, 9707), False, 'import os\n'), ((10914, 10933), 'os.remove', 'os.remove', (['old_info'], {}), '(old_info)\n', (10923, 10933), False, 'import os\n'), ((11197, 11225), 'os.path.exists', 'os.path.exists', (['pom_template'], {}), '(pom_template)\n', (11211, 11225), False, 'import os\n'), ((12066, 12085), 'os.remove', 'os.remove', (['jar_file'], {}), '(jar_file)\n', (12075, 12085), False, 'import os\n'), ((12095, 12112), 'utils.cd', 'utils.cd', (['mod_dir'], {}), '(mod_dir)\n', (12103, 12112), False, 'import utils\n'), ((12122, 12146), 'os.system', 'os.system', (['"""mvn package"""'], {}), "('mvn package')\n", (12131, 12146), False, 'import os\n'), ((12158, 12182), 'os.path.exists', 'os.path.exists', (['jar_file'], {}), '(jar_file)\n', (12172, 12182), False, 'import os\n'), ((12262, 12281), 'utils.cd', 'utils.cd', (['spire_dir'], {}), '(spire_dir)\n', (12270, 12281), False, 'import utils\n'), ((12291, 12319), 'os.system', 'os.system', (['"""ModTheSpire.jar"""'], {}), "('ModTheSpire.jar')\n", (12300, 12319), False, 'import os\n'), ((470, 539), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".zip"""', 'dir': '"""cache"""', 'delete': '(False)'}), "(suffix='.zip', dir='cache', delete=False)\n", (497, 539), False, 'import tempfile\n'), ((657, 695), 'shutil.copyfileobj', 'shutil.copyfileobj', (['response', 'out_file'], {}), '(response, out_file)\n', (675, 695), False, 'import shutil\n'), ((944, 1010), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".zip"""', 'dir': '""".."""', 'delete': '(False)'}), "(suffix='.zip', dir='..', delete=False)\n", (971, 1010), False, 'import tempfile\n'), ((1162, 1197), 'zipfile.ZipFile', 'zipfile.ZipFile', (['download_file', '"""r"""'], {}), "(download_file, 'r')\n", (1177, 1197), False, 'import zipfile\n'), ((1960, 1998), 'os.path.join', 'os.path.join', (['"""templates"""', '"""card.java"""'], {}), "('templates', 'card.java')\n", (1972, 1998), False, 'import os\n'), ((2552, 2598), 'os.path.join', 'os.path.join', (['templates_cache', '"""FruiyMod.java"""'], {}), "(templates_cache, 'FruiyMod.java')\n", (2564, 2598), False, 'import os\n'), ((2710, 2757), 'os.path.join', 'os.path.join', (['templates_cache', '"""TheSeeker.java"""'], {}), "(templates_cache, 'TheSeeker.java')\n", (2722, 2757), False, 'import os\n'), ((2878, 2937), 'os.path.join', 'os.path.join', (['templates_cache', '"""FruityMod-CardStrings.json"""'], {}), "(templates_cache, 'FruityMod-CardStrings.json')\n", (2890, 2937), False, 'import os\n'), ((3727, 3773), 'os.path.join', 'os.path.join', (['templates_cache', '"""FruiyMod.java"""'], {}), "(templates_cache, 'FruiyMod.java')\n", (3739, 3773), False, 'import os\n'), ((4558, 4605), 'os.path.join', 'os.path.join', (['templates_cache', '"""TheSeeker.java"""'], {}), "(templates_cache, 'TheSeeker.java')\n", (4570, 4605), False, 'import os\n'), ((5912, 5971), 'os.path.join', 'os.path.join', (['templates_cache', '"""FruityMod-CardStrings.json"""'], {}), "(templates_cache, 'FruityMod-CardStrings.json')\n", (5924, 5971), False, 'import os\n'), ((6532, 6571), 'os.path.join', 'os.path.join', (['"""templates"""', '"""power.java"""'], {}), "('templates', 'power.java')\n", (6544, 6571), False, 'import os\n'), ((6937, 6977), 'os.path.join', 'os.path.join', (['"""templates"""', '"""action.java"""'], {}), "('templates', 'action.java')\n", (6949, 6977), False, 'import os\n'), ((7358, 7396), 'os.path.join', 'os.path.join', (['"""templates"""', '"""java.java"""'], {}), "('templates', 'java.java')\n", (7370, 7396), False, 'import os\n'), ((8523, 8550), 'utils.open_data', 'utils.open_data', (['image_file'], {}), '(image_file)\n', (8538, 8550), False, 'import utils\n'), ((9360, 9394), 'skimage.transform.resize', 'resize', (['image', 'target'], {'mode': '"""edge"""'}), "(image, target, mode='edge')\n", (9366, 9394), False, 'from skimage.transform import resize\n'), ((11247, 11279), 'os.path.join', 'os.path.join', (['mod_dir', '"""pom.xml"""'], {}), "(mod_dir, 'pom.xml')\n", (11259, 11279), False, 'import os\n'), ((11914, 11946), 'os.path.join', 'os.path.join', (['mod_dir', '"""pom.xml"""'], {}), "(mod_dir, 'pom.xml')\n", (11926, 11946), False, 'import os\n'), ((2262, 2289), 'utils.format', 'format', (['card_template', 'card'], {}), '(card_template, card)\n', (2268, 2289), False, 'from utils import format\n'), ((3067, 3100), 'spire.name_id', 'name_id', (["card['name']"], {'upper': '(True)'}), "(card['name'], upper=True)\n", (3074, 3100), False, 'from spire import name_id\n'), ((3216, 3242), 'os.path.exists', 'os.path.exists', (['image_file'], {}), '(image_file)\n', (3230, 3242), False, 'import os\n'), ((3290, 3398), 'utils.format', 'format', (['"""public static final String {{ name_id(card["name"], upper=True) }} = "{{ image_file }}";"""'], {}), '(\n \'public static final String {{ name_id(card["name"], upper=True) }} = "{{ image_file }}";\'\n )\n', (3296, 3398), False, 'from utils import format\n'), ((6829, 6858), 'utils.format', 'format', (['power_template', 'power'], {}), '(power_template, power)\n', (6835, 6858), False, 'from utils import format\n'), ((7247, 7278), 'utils.format', 'format', (['action_template', 'action'], {}), '(action_template, action)\n', (7253, 7278), False, 'from utils import format\n'), ((7653, 7680), 'utils.format', 'format', (['java_template', 'java'], {}), '(java_template, java)\n', (7659, 7680), False, 'from utils import format\n'), ((8086, 8119), 'spire.name_id', 'name_id', (["card['name']"], {'upper': '(True)'}), "(card['name'], upper=True)\n", (8093, 8119), False, 'from spire import name_id\n'), ((10029, 10135), 'utils.format', 'format', (['"""BaseMod.addKeyword(new String[] {"{{ name }}", {{ words }}}, "{{ keyword["desc"] }}");"""'], {}), '(\n \'BaseMod.addKeyword(new String[] {"{{ name }}", {{ words }}}, "{{ keyword["desc"] }}");\'\n )\n', (10035, 10135), False, 'from utils import format\n'), ((3486, 3547), 'utils.format', 'format', (['"""BaseMod.addCard(new {{ name_id(card["name"]) }}());"""'], {}), '(\'BaseMod.addCard(new {{ name_id(card["name"]) }}());\')\n', (3492, 3547), False, 'from utils import format\n'), ((3616, 3673), 'utils.format', 'format', (['"""UnlockTracker.unlockCard("{{ card["name"] }}");"""'], {}), '(\'UnlockTracker.unlockCard("{{ card["name"] }}");\')\n', (3622, 3673), False, 'from utils import format\n'), ((8887, 8919), 'skimage.transform.resize', 'resize', (['image', 'size'], {'mode': '"""edge"""'}), "(image, size, mode='edge')\n", (8893, 8919), False, 'from skimage.transform import resize\n'), ((9048, 9080), 'skimage.transform.resize', 'resize', (['image', 'size'], {'mode': '"""edge"""'}), "(image, size, mode='edge')\n", (9054, 9080), False, 'from skimage.transform import resize\n'), ((2173, 2194), 'spire.name_id', 'name_id', (["card['name']"], {}), "(card['name'])\n", (2180, 2194), False, 'from spire import name_id\n'), ((8813, 8848), 'numpy.round', 'np.round', (['((size[0] - target[0]) / 2)'], {}), '((size[0] - target[0]) / 2)\n', (8821, 8848), True, 'import numpy as np\n'), ((8745, 8767), 'numpy.ceil', 'np.ceil', (['(target[1] * r)'], {}), '(target[1] * r)\n', (8752, 8767), True, 'import numpy as np\n'), ((8987, 9009), 'numpy.ceil', 'np.ceil', (['(target[0] / r)'], {}), '(target[0] / r)\n', (8994, 9009), True, 'import numpy as np\n'), ((9213, 9234), 'numpy.round', 'np.round', (['(image * 255)'], {}), '(image * 255)\n', (9221, 9234), True, 'import numpy as np\n'), ((9429, 9450), 'numpy.round', 'np.round', (['(image * 255)'], {}), '(image * 255)\n', (9437, 9450), True, 'import numpy as np\n'), ((4939, 5081), 'utils.format', 'format', (['"""\n retVal.add("{{ relic }}");\n UnlockTracker.markRelicAsSeen("{{ relic }}");\n """'], {}), '(\n """\n retVal.add("{{ relic }}");\n UnlockTracker.markRelicAsSeen("{{ relic }}");\n """\n )\n', (4945, 5081), False, 'from utils import format\n')] |
import errno
import os
import subprocess
import sys
from distutils import log
from distutils.command.build_ext import build_ext
from distutils.errors import DistutilsError
def exec_process(cmdline, silent=True, catch_enoent=True, input=None, **kwargs):
"""Execute a subprocess and returns the returncode, stdout buffer and stderr buffer.
Optionally prints stdout and stderr while running."""
try:
sub = subprocess.Popen(args=cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
stdout, stderr = sub.communicate(input=input)
if type(stdout) != type(""):
# decode on Python 3
# do nothing on Python 2 (it just doesn't care about encoding anyway)
stdout = stdout.decode(sys.getdefaultencoding(), "replace")
stderr = stderr.decode(sys.getdefaultencoding(), "replace")
returncode = sub.returncode
if not silent:
sys.stdout.write(stdout)
sys.stderr.write(stderr)
except OSError as e:
if e.errno == errno.ENOENT and catch_enoent:
raise DistutilsError('"%s" is not present on this system' % cmdline[0])
else:
raise
if returncode != 0:
raise DistutilsError('Got return value %d while executing "%s", stderr output was:\n%s' % (returncode, " ".join(cmdline), stderr.rstrip("\n")))
return stdout
def exec_make(cmdline, *args, **kwargs):
assert isinstance(cmdline, list)
makes = ["make"]
if "bsd" in sys.platform:
makes.insert(0, "gmake")
for make in makes:
if "bsd" in sys.platform and make == "make":
log.warn("Running plain make on BSD-derived system. It will likely fail. Consider installing GNU make from the ports collection.")
try:
return exec_process([make] + cmdline, *args, catch_enoent=False, **kwargs)
except OSError as e:
if e.errno != errno.ENOENT:
raise
raise DistutilsError('"make" is not present on this system')
class cares_build_ext(build_ext):
cares_dir = os.path.join('deps', 'c-ares')
user_options = build_ext.user_options
user_options.extend([
("cares-clean-compile", None, "Clean c-ares tree before compilation"),
])
boolean_options = build_ext.boolean_options
boolean_options.extend(["cares-clean-compile"])
def initialize_options(self):
build_ext.initialize_options(self)
self.cares_clean_compile = 0
def build_extensions(self):
if self.compiler.compiler_type == 'mingw32':
# Dirty hack to avoid linking with more than one C runtime when using MinGW
self.compiler.dll_libraries = [lib for lib in self.compiler.dll_libraries if not lib.startswith('msvcr')]
self.force = self.cares_clean_compile
if self.compiler.compiler_type == 'msvc':
self.cares_lib = os.path.join(self.cares_dir, 'cares.lib')
else:
self.cares_lib = os.path.join(self.cares_dir, 'libcares.a')
self.build_cares()
# Set compiler options
if self.compiler.compiler_type == 'mingw32':
self.compiler.add_library_dir(self.cares_dir)
self.compiler.add_library('cares')
self.extensions[0].extra_objects = [self.cares_lib]
self.compiler.add_include_dir(os.path.join(self.cares_dir, 'src'))
if sys.platform.startswith('linux'):
self.compiler.add_library('rt')
elif sys.platform == 'win32':
if self.compiler.compiler_type == 'msvc':
self.extensions[0].extra_link_args = ['/NODEFAULTLIB:libcmt']
self.compiler.add_library('advapi32')
self.compiler.add_library('iphlpapi')
self.compiler.add_library('psapi')
self.compiler.add_library('ws2_32')
build_ext.build_extensions(self)
def build_cares(self):
#self.debug_mode = bool(self.debug) or hasattr(sys, 'gettotalrefcount')
win32_msvc = self.compiler.compiler_type == 'msvc'
def build():
cflags = '-fPIC'
env = os.environ.copy()
env['CFLAGS'] = ' '.join(x for x in (cflags, env.get('CFLAGS', None)) if x)
log.info('Building c-ares...')
if win32_msvc:
exec_process('cmd.exe /C vcbuild.bat', cwd=self.cares_dir, env=env, shell=True)
else:
exec_make(['libcares.a'], cwd=self.cares_dir, env=env)
def clean():
if win32_msvc:
exec_process('cmd.exe /C vcbuild.bat clean', cwd=self.cares_dir, shell=True)
else:
exec_make(['clean'], cwd=self.cares_dir)
if self.cares_clean_compile:
clean()
if not os.path.exists(self.cares_lib):
log.info('c-ares needs to be compiled.')
build()
else:
log.info('No need to build c-ares.')
| [
"os.path.exists",
"sys.getdefaultencoding",
"subprocess.Popen",
"os.path.join",
"sys.platform.startswith",
"os.environ.copy",
"distutils.command.build_ext.build_ext.initialize_options",
"sys.stderr.write",
"distutils.errors.DistutilsError",
"distutils.log.info",
"distutils.command.build_ext.buil... | [((1996, 2050), 'distutils.errors.DistutilsError', 'DistutilsError', (['""""make" is not present on this system"""'], {}), '(\'"make" is not present on this system\')\n', (2010, 2050), False, 'from distutils.errors import DistutilsError\n'), ((2103, 2133), 'os.path.join', 'os.path.join', (['"""deps"""', '"""c-ares"""'], {}), "('deps', 'c-ares')\n", (2115, 2133), False, 'import os\n'), ((427, 543), 'subprocess.Popen', 'subprocess.Popen', ([], {'args': 'cmdline', 'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(args=cmdline, stdin=subprocess.PIPE, stdout=subprocess.\n PIPE, stderr=subprocess.PIPE, **kwargs)\n', (443, 543), False, 'import subprocess\n'), ((2432, 2466), 'distutils.command.build_ext.build_ext.initialize_options', 'build_ext.initialize_options', (['self'], {}), '(self)\n', (2460, 2466), False, 'from distutils.command.build_ext import build_ext\n'), ((3411, 3443), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (3434, 3443), False, 'import sys\n'), ((3866, 3898), 'distutils.command.build_ext.build_ext.build_extensions', 'build_ext.build_extensions', (['self'], {}), '(self)\n', (3892, 3898), False, 'from distutils.command.build_ext import build_ext\n'), ((962, 986), 'sys.stdout.write', 'sys.stdout.write', (['stdout'], {}), '(stdout)\n', (978, 986), False, 'import sys\n'), ((999, 1023), 'sys.stderr.write', 'sys.stderr.write', (['stderr'], {}), '(stderr)\n', (1015, 1023), False, 'import sys\n'), ((1664, 1804), 'distutils.log.warn', 'log.warn', (['"""Running plain make on BSD-derived system. It will likely fail. Consider installing GNU make from the ports collection."""'], {}), "(\n 'Running plain make on BSD-derived system. It will likely fail. Consider installing GNU make from the ports collection.'\n )\n", (1672, 1804), False, 'from distutils import log\n'), ((2921, 2962), 'os.path.join', 'os.path.join', (['self.cares_dir', '"""cares.lib"""'], {}), "(self.cares_dir, 'cares.lib')\n", (2933, 2962), False, 'import os\n'), ((3006, 3048), 'os.path.join', 'os.path.join', (['self.cares_dir', '"""libcares.a"""'], {}), "(self.cares_dir, 'libcares.a')\n", (3018, 3048), False, 'import os\n'), ((3363, 3398), 'os.path.join', 'os.path.join', (['self.cares_dir', '"""src"""'], {}), "(self.cares_dir, 'src')\n", (3375, 3398), False, 'import os\n'), ((4135, 4152), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (4150, 4152), False, 'import os\n'), ((4253, 4283), 'distutils.log.info', 'log.info', (['"""Building c-ares..."""'], {}), "('Building c-ares...')\n", (4261, 4283), False, 'from distutils import log\n'), ((4784, 4814), 'os.path.exists', 'os.path.exists', (['self.cares_lib'], {}), '(self.cares_lib)\n', (4798, 4814), False, 'import os\n'), ((4828, 4868), 'distutils.log.info', 'log.info', (['"""c-ares needs to be compiled."""'], {}), "('c-ares needs to be compiled.')\n", (4836, 4868), False, 'from distutils import log\n'), ((4915, 4951), 'distutils.log.info', 'log.info', (['"""No need to build c-ares."""'], {}), "('No need to build c-ares.')\n", (4923, 4951), False, 'from distutils import log\n'), ((781, 805), 'sys.getdefaultencoding', 'sys.getdefaultencoding', ([], {}), '()\n', (803, 805), False, 'import sys\n'), ((853, 877), 'sys.getdefaultencoding', 'sys.getdefaultencoding', ([], {}), '()\n', (875, 877), False, 'import sys\n'), ((1120, 1185), 'distutils.errors.DistutilsError', 'DistutilsError', (['(\'"%s" is not present on this system\' % cmdline[0])'], {}), '(\'"%s" is not present on this system\' % cmdline[0])\n', (1134, 1185), False, 'from distutils.errors import DistutilsError\n')] |
#!/usr/bin/env python3
import jwt
import requests
import base64
import json
import boto3
import time
import functools
import os
from mod_python import apache
region = json.loads(requests.get('http://169.254.169.254/latest/dynamic/instance-identity/document').text)['region']
ssm_parameter_name_env_var = 'SYNAPSE_TOKEN_AWS_SSM_PARAMETER_NAME'
kms_alias_env_var = 'KMS_KEY_ALIAS'
def headerparserhandler(req):
jwt_str = req.headers_in['x-amzn-oidc-data'] #proxy.conf ensures this header exists
try:
payload = jwt_payload(jwt_str)
if payload['userid'] == approved_user() and payload['exp'] > time.time():
store_to_ssm(req.headers_in['x-amzn-oidc-accesstoken'])
return apache.OK
else:
return apache.HTTP_UNAUTHORIZED #the userid claim does not match the userid tag
except Exception:
# if the JWT playload is invalid
return apache.HTTP_UNAUTHORIZED
def approved_user():
instance_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text
ec2 = boto3.resource('ec2',region)
vm = ec2.Instance(instance_id)
#TODO handle exception on multiple tags in this list
for tags in vm.tags:
if tags["Key"] == 'Protected/AccessApprovedCaller':
approved_caller = tags["Value"]
return approved_caller.split(':')[1] #return userid portion of tag
# taking advantage of lru cache to avoid re-putting the same access token to
# SSM Parameter Store.
# According to functools source code, arguments (i.e. the access token) are hashed,
# not stored as-is in memory
@functools.lru_cache(maxsize=1)
def store_to_ssm(access_token):
parameter_name = os.environ.get(ssm_parameter_name_env_var)
kms_key_alias = os.environ.get(kms_alias_env_var)
if not (parameter_name):
# just exit early if the parameter name to store in SSM is not found
return
ssm_client = boto3.client('ssm', region)
kms_client = boto3.client('kms', region)
key_id = kms_client.describe_key(KeyId=kms_key_alias)['KeyMetadata']['KeyId']
ssm_client.put_parameter(
Name=parameter_name,
Type='SecureString',
Value=access_token,
KeyId=key_id,
Overwrite=True
)
def jwt_payload(encoded_jwt):
# The x-amzn-oid-data header is a base64-encoded JWT signed by the ALB
# validating the signature of the JWT means the payload is authentic
# per http://docs.aws.amazon.com/elasticloadbalancing/latest/application/listener-authenticate-users.html
# Step 1: Get the key id from JWT headers (the kid field)
#encoded_jwt = headers.dict['x-amzn-oidc-data']
jwt_headers = encoded_jwt.split('.')[0]
decoded_jwt_headers = base64.b64decode(jwt_headers).decode("utf-8")
decoded_json = json.loads(decoded_jwt_headers)
kid = decoded_json['kid']
# Step 2: Get the public key from regional endpoint
pub_key = get_aws_elb_public_key(region, kid)
# Step 3: Get the payload
return jwt.decode(encoded_jwt, pub_key, algorithms=['ES256'])
@functools.lru_cache()
def get_aws_elb_public_key(region, key_id):
url = f'https://public-keys.auth.elb.{region}.amazonaws.com/{key_id}'
return requests.get(url).text
| [
"jwt.decode",
"json.loads",
"boto3.client",
"os.environ.get",
"base64.b64decode",
"requests.get",
"boto3.resource",
"functools.lru_cache",
"time.time"
] | [((1535, 1565), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (1554, 1565), False, 'import functools\n'), ((2916, 2937), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (2935, 2937), False, 'import functools\n'), ((1015, 1044), 'boto3.resource', 'boto3.resource', (['"""ec2"""', 'region'], {}), "('ec2', region)\n", (1029, 1044), False, 'import boto3\n'), ((1617, 1659), 'os.environ.get', 'os.environ.get', (['ssm_parameter_name_env_var'], {}), '(ssm_parameter_name_env_var)\n', (1631, 1659), False, 'import os\n'), ((1678, 1711), 'os.environ.get', 'os.environ.get', (['kms_alias_env_var'], {}), '(kms_alias_env_var)\n', (1692, 1711), False, 'import os\n'), ((1839, 1866), 'boto3.client', 'boto3.client', (['"""ssm"""', 'region'], {}), "('ssm', region)\n", (1851, 1866), False, 'import boto3\n'), ((1882, 1909), 'boto3.client', 'boto3.client', (['"""kms"""', 'region'], {}), "('kms', region)\n", (1894, 1909), False, 'import boto3\n'), ((2658, 2689), 'json.loads', 'json.loads', (['decoded_jwt_headers'], {}), '(decoded_jwt_headers)\n', (2668, 2689), False, 'import json\n'), ((2859, 2913), 'jwt.decode', 'jwt.decode', (['encoded_jwt', 'pub_key'], {'algorithms': "['ES256']"}), "(encoded_jwt, pub_key, algorithms=['ES256'])\n", (2869, 2913), False, 'import jwt\n'), ((933, 1000), 'requests.get', 'requests.get', (['"""http://169.254.169.254/latest/meta-data/instance-id"""'], {}), "('http://169.254.169.254/latest/meta-data/instance-id')\n", (945, 1000), False, 'import requests\n'), ((3063, 3080), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3075, 3080), False, 'import requests\n'), ((181, 266), 'requests.get', 'requests.get', (['"""http://169.254.169.254/latest/dynamic/instance-identity/document"""'], {}), "('http://169.254.169.254/latest/dynamic/instance-identity/document'\n )\n", (193, 266), False, 'import requests\n'), ((2595, 2624), 'base64.b64decode', 'base64.b64decode', (['jwt_headers'], {}), '(jwt_headers)\n', (2611, 2624), False, 'import base64\n'), ((608, 619), 'time.time', 'time.time', ([], {}), '()\n', (617, 619), False, 'import time\n')] |
from flask import Flask, request, jsonify
from flask_restful import Resource, Api
from TeiParser import Family
from dataManager import parentManager
import os
# path to "_data/N" folder
path_N = os.path.join("_data", "N")
# create the parent manager
pm_N = parentManager(path_N)
# create the Flask application
app = Flask(__name__)
api = Api(app)
# returns array of parent filenames
class MunchParents(Resource):
def get(self):
return {"data": pm_N.parents}
# returns content of the parents
class MunchFamily(Resource):
def get(self, _file):
if _file in pm_N.parents:
family = Family(os.path.join(path_N, _file))
resp = {
"title": family.data.title,
"type_text": family.data.type,
"date": {"when": [], "from": [], "to": []},
"text": "",
}
_text = []
for child in family.children:
for item in child.date:
# item when, to, or from
if child.date[item]:
resp["date"][item].append(child.date[item])
_text.append(child.text)
# access text key in python dictionary
# join items from list _text with new lines
resp["text"] = "\n\n".join(_text)
# print(resp["text"])
return resp, 200
else:
# If file is not found
return {"notFound": _file}, 404
api.add_resource(MunchParents, "/N")
api.add_resource(MunchFamily, "/N/<_file>")
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
| [
"flask_restful.Api",
"dataManager.parentManager",
"os.path.join",
"flask.Flask"
] | [((196, 222), 'os.path.join', 'os.path.join', (['"""_data"""', '"""N"""'], {}), "('_data', 'N')\n", (208, 222), False, 'import os\n'), ((258, 279), 'dataManager.parentManager', 'parentManager', (['path_N'], {}), '(path_N)\n', (271, 279), False, 'from dataManager import parentManager\n'), ((318, 333), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (323, 333), False, 'from flask import Flask, request, jsonify\n'), ((340, 348), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (343, 348), False, 'from flask_restful import Resource, Api\n'), ((625, 652), 'os.path.join', 'os.path.join', (['path_N', '_file'], {}), '(path_N, _file)\n', (637, 652), False, 'import os\n')] |
# this resizes __1.jpt to x it's original size & it turns it grayscale
import cv
import numpy
import bSpline
if __name__ == "__main__": # this is not a module
scale = 10
# load image
#cv_img = cv.LoadImage("__1.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE) # CV_LOAD_IMAGE_GRAYSCALE
cv_img = cv.LoadImage("__1.jpg", cv.CV_LOAD_IMAGE_UNCHANGED) # CV_LOAD_IMAGE_UNCHANGED
# width & height
cv_img_width = cv.GetSize(cv_img)[0]
cv_img_height = cv.GetSize(cv_img)[1]
img_tpl = numpy.zeros( ((cv_img_height * scale),(cv_img_width * scale),2) )
for h in range(0,(cv_img_height * scale),1) :
for w in range(0,(cv_img_width * scale),1) :
img_tpl[h][w][0] = (h + 0) / (cv_img_height * scale * 1.0) * cv_img_height
img_tpl[h][w][1] = (w + 0) / (cv_img_width * scale * 1.0) * cv_img_width
##bSpl = bSpline.BSpline() # v4.0
# single picture
##cv_img_out = bSpl.cubic(cv_img, img_tpl) # v4.0
#cv_img_out = bSpline.cubic(cv_img, img_tpl)
#cv.SaveImage("out__1.jpg", cv_img_out)
# multiple pictures
img_beta_f = bSpline.cubic_getBeta(cv_img, img_tpl)
cv_img_out = bSpline.cubic_setBeta(cv_img, img_tpl, img_beta_f)
cv.SaveImage("out__1.01.jpg", cv_img_out)
#cv_img_out = bSpl.cubic_setBeta(cv_img, img_tpl, img_beta_f)
#cv.SaveImage("out__1.02.jpg", cv_img_out)
#cv_img_out = bSpl.cubic_setBeta(cv_img, img_tpl, img_beta_f)
#cv.SaveImage("out__1.03.jpg", cv_img_out)
| [
"cv.GetSize",
"bSpline.cubic_setBeta",
"cv.SaveImage",
"numpy.zeros",
"bSpline.cubic_getBeta",
"cv.LoadImage"
] | [((292, 343), 'cv.LoadImage', 'cv.LoadImage', (['"""__1.jpg"""', 'cv.CV_LOAD_IMAGE_UNCHANGED'], {}), "('__1.jpg', cv.CV_LOAD_IMAGE_UNCHANGED)\n", (304, 343), False, 'import cv\n'), ((480, 541), 'numpy.zeros', 'numpy.zeros', (['(cv_img_height * scale, cv_img_width * scale, 2)'], {}), '((cv_img_height * scale, cv_img_width * scale, 2))\n', (491, 541), False, 'import numpy\n'), ((1028, 1066), 'bSpline.cubic_getBeta', 'bSpline.cubic_getBeta', (['cv_img', 'img_tpl'], {}), '(cv_img, img_tpl)\n', (1049, 1066), False, 'import bSpline\n'), ((1083, 1133), 'bSpline.cubic_setBeta', 'bSpline.cubic_setBeta', (['cv_img', 'img_tpl', 'img_beta_f'], {}), '(cv_img, img_tpl, img_beta_f)\n', (1104, 1133), False, 'import bSpline\n'), ((1135, 1176), 'cv.SaveImage', 'cv.SaveImage', (['"""out__1.01.jpg"""', 'cv_img_out'], {}), "('out__1.01.jpg', cv_img_out)\n", (1147, 1176), False, 'import cv\n'), ((406, 424), 'cv.GetSize', 'cv.GetSize', (['cv_img'], {}), '(cv_img)\n', (416, 424), False, 'import cv\n'), ((445, 463), 'cv.GetSize', 'cv.GetSize', (['cv_img'], {}), '(cv_img)\n', (455, 463), False, 'import cv\n')] |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
from scrapy import Item, Field
class RawResponseItem(Item):
appid = Field()
crawlid = Field()
url = Field()
response_url = Field()
status_code = Field()
status_msg = Field()
headers = Field()
body = Field()
links = Field()
attrs = Field()
| [
"scrapy.Field"
] | [((147, 154), 'scrapy.Field', 'Field', ([], {}), '()\n', (152, 154), False, 'from scrapy import Item, Field\n'), ((169, 176), 'scrapy.Field', 'Field', ([], {}), '()\n', (174, 176), False, 'from scrapy import Item, Field\n'), ((187, 194), 'scrapy.Field', 'Field', ([], {}), '()\n', (192, 194), False, 'from scrapy import Item, Field\n'), ((214, 221), 'scrapy.Field', 'Field', ([], {}), '()\n', (219, 221), False, 'from scrapy import Item, Field\n'), ((240, 247), 'scrapy.Field', 'Field', ([], {}), '()\n', (245, 247), False, 'from scrapy import Item, Field\n'), ((265, 272), 'scrapy.Field', 'Field', ([], {}), '()\n', (270, 272), False, 'from scrapy import Item, Field\n'), ((287, 294), 'scrapy.Field', 'Field', ([], {}), '()\n', (292, 294), False, 'from scrapy import Item, Field\n'), ((306, 313), 'scrapy.Field', 'Field', ([], {}), '()\n', (311, 313), False, 'from scrapy import Item, Field\n'), ((326, 333), 'scrapy.Field', 'Field', ([], {}), '()\n', (331, 333), False, 'from scrapy import Item, Field\n'), ((346, 353), 'scrapy.Field', 'Field', ([], {}), '()\n', (351, 353), False, 'from scrapy import Item, Field\n')] |
""" Implement alike logic as is done on www.cdecl.org
Try for example:
$ cdelc.py 'char **a;'
"""
import argparse
import io
from ppci.api import get_current_arch
from ppci.lang.c import CLexer, CParser, COptions, CContext, CSemantics
from ppci.lang.c.nodes import types, declarations
from ppci.lang.c.preprocessor import prepare_for_parsing
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('source', type=str)
args = parser.parse_args()
# print('Source:', args.source)
# Parse into ast:
arch = get_current_arch()
coptions = COptions()
ccontext = CContext(coptions, arch.info)
semantics = CSemantics(ccontext)
cparser = CParser(coptions, semantics)
clexer = CLexer(COptions())
f = io.StringIO(args.source)
tokens = clexer.lex(f, '<snippet>')
tokens = prepare_for_parsing(tokens, cparser.keywords)
cparser.init_lexer(tokens)
semantics.begin()
decl = cparser.parse_declarations()[0]
# Explain:
def explain(x):
if isinstance(x, declarations.VariableDeclaration):
return '{} is {}'.format(x.name, explain(x.typ))
elif isinstance(x, types.PointerType):
return 'a pointer to {}'.format(explain(x.element_type))
elif isinstance(x, types.ArrayType):
return 'an array of {}'.format(explain(x.element_type))
elif isinstance(x, types.BasicType):
return '{}'.format(x.type_id)
else:
print('???', x)
print(explain(decl))
| [
"ppci.api.get_current_arch",
"argparse.ArgumentParser",
"ppci.lang.c.CSemantics",
"ppci.lang.c.CContext",
"ppci.lang.c.COptions",
"ppci.lang.c.preprocessor.prepare_for_parsing",
"ppci.lang.c.CParser",
"io.StringIO"
] | [((355, 458), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (378, 458), False, 'import argparse\n'), ((584, 602), 'ppci.api.get_current_arch', 'get_current_arch', ([], {}), '()\n', (600, 602), False, 'from ppci.api import get_current_arch\n'), ((614, 624), 'ppci.lang.c.COptions', 'COptions', ([], {}), '()\n', (622, 624), False, 'from ppci.lang.c import CLexer, CParser, COptions, CContext, CSemantics\n'), ((636, 665), 'ppci.lang.c.CContext', 'CContext', (['coptions', 'arch.info'], {}), '(coptions, arch.info)\n', (644, 665), False, 'from ppci.lang.c import CLexer, CParser, COptions, CContext, CSemantics\n'), ((678, 698), 'ppci.lang.c.CSemantics', 'CSemantics', (['ccontext'], {}), '(ccontext)\n', (688, 698), False, 'from ppci.lang.c import CLexer, CParser, COptions, CContext, CSemantics\n'), ((709, 737), 'ppci.lang.c.CParser', 'CParser', (['coptions', 'semantics'], {}), '(coptions, semantics)\n', (716, 737), False, 'from ppci.lang.c import CLexer, CParser, COptions, CContext, CSemantics\n'), ((770, 794), 'io.StringIO', 'io.StringIO', (['args.source'], {}), '(args.source)\n', (781, 794), False, 'import io\n'), ((840, 885), 'ppci.lang.c.preprocessor.prepare_for_parsing', 'prepare_for_parsing', (['tokens', 'cparser.keywords'], {}), '(tokens, cparser.keywords)\n', (859, 885), False, 'from ppci.lang.c.preprocessor import prepare_for_parsing\n'), ((754, 764), 'ppci.lang.c.COptions', 'COptions', ([], {}), '()\n', (762, 764), False, 'from ppci.lang.c import CLexer, CParser, COptions, CContext, CSemantics\n')] |
# -*- coding: utf-8 -*-
import logging
import threading
import storj.exception as sjexc
from PyQt4 import QtCore, QtGui
from .qt_interfaces.dashboard_ui import Ui_MainMenu
from .bucket_edition import BucketEditingUI
from .client_config import ClientConfigurationUI
from .engine import StorjEngine
from .file_download import SingleFileDownloadUI
from .file_mirror import FileMirrorsListUI
from .file_upload import SingleFileUploadUI
from .utilities.tools import Tools
from .sync_menu import SyncMenuUI
from .resources.constants import DISPLAY_FILE_CREATION_DATE_IN_MAIN,\
FILE_LIST_SORTING_MAIN_ENABLED, BUCKETS_LIST_SORTING_ENABLED, DATA_TABLE_EDIT_ENABLED
from .resources.custom_qt_interfaces import TableModel
class ExtendedQLabel(QtGui.QLabel):
""""""
def __init(self, parent):
QtGui.QLabel.__init__(self, parent)
def mouseReleaseEvent(self, ev):
self.emit(QtCore.SIGNAL('clicked()'))
class MainUI(QtGui.QMainWindow):
"""Main UI section."""
__logger = logging.getLogger('%s.MainUI' % __name__)
def __init__(self, parent=None, bucketid=None):
QtGui.QWidget.__init__(self, parent)
self.file_manager_ui = Ui_MainMenu()
self.file_manager_ui.setupUi(self)
# self.change_loading_gif()
# Connect ComboBox change listener
QtCore.QObject.connect(self.file_manager_ui.bucket_select_combo_box,
QtCore.SIGNAL('currentIndexChanged(const QString&)'),
self.createNewFileListUpdateThread)
# Open mirrors list window
QtCore.QObject.connect(self.file_manager_ui.file_mirrors_bt,
QtCore.SIGNAL('clicked()'),
self.open_mirrors_list_window)
# Create bucket action
QtCore.QObject.connect(self.file_manager_ui.file_download_bt,
QtCore.SIGNAL('clicked()'),
self.open_single_file_download_window)
# Delete selected file
QtCore.QObject.connect(self.file_manager_ui.file_delete_bt,
QtCore.SIGNAL('clicked()'),
self.delete_selected_file)
self.connect(self, QtCore.SIGNAL('changeLoadingGif'),
self.change_loading_gif)
if not DATA_TABLE_EDIT_ENABLED:
self.file_manager_ui.files_list_tableview.setEditTriggers(
QtGui.QAbstractItemView.NoEditTriggers)
self.file_manager_ui.settings_bt.mousePressEvent = \
self.open_settings_window
self.file_manager_ui.refresh_bt.mousePressEvent = \
self.createNewFileListUpdateThread
# Delete selected file
QtCore.QObject.connect(self.file_manager_ui.new_file_upload_bt,
QtCore.SIGNAL('clicked()'),
self.open_single_file_upload_window)
# Open bucket edit window
QtCore.QObject.connect(self.file_manager_ui.edit_bucket_bt,
QtCore.SIGNAL('clicked()'),
lambda: self.open_bucket_editing_window(action='edit'))
# Open bucket edit window
QtCore.QObject.connect(self.file_manager_ui.create_bucket_bt,
QtCore.SIGNAL('clicked()'),
lambda: self.open_bucket_editing_window(action='add'))
self.storj_engine = StorjEngine() # init StorjEngine
user_email = self.storj_engine.account_manager.get_user_email()
self.file_manager_ui.account_label.setText(user_email)
self.createNewBucketResolveThread()
def open_sync_menu(self):
self.open_sync_menu_window = SyncMenuUI(self)
self.open_sync_menu_window.show()
def change_loading_gif(self, is_visible):
if is_visible:
movie = QtGui.QMovie(':/resources/loading.gif')
self.file_manager_ui.refresh_bt.setMovie(movie)
movie.start()
else:
self.file_manager_ui.refresh_bt.setPixmap(QtGui.QPixmap((':/resources/refresh.png')))
def open_bucket_editing_window(self, action):
if action == 'edit':
self.bucket_editing_window = BucketEditingUI(
self, action=action,
bucketid=str(self.current_selected_bucket_id),
dashboard_instance=self)
else:
self.bucket_editing_window = BucketEditingUI(
self, action=action, dashboard_instance=self)
self.bucket_editing_window.show()
def open_single_file_upload_window(self):
self.single_file_upload_window = SingleFileUploadUI(
self, dashboard_instance=self)
self.single_file_upload_window.show()
def open_settings_window(self, b):
self.open_settings_window = ClientConfigurationUI(self)
self.open_settings_window.show()
def delete_selected_file(self):
self.current_bucket_index = \
self.file_manager_ui.bucket_select_combo_box.currentIndex()
self.current_selected_bucket_id = \
self.bucket_id_list[self.current_bucket_index]
tablemodel = self.file_manager_ui.files_list_tableview.model()
rows = sorted(
set(index.row() for index in
self.file_manager_ui.files_list_tableview.selectedIndexes()))
selected = False
for row in rows:
selected = True
index = tablemodel.index(row, 2) # get file ID index
index_filename = tablemodel.index(row, 0) # get file name index
# We suppose data are strings
selected_file_id = str(tablemodel.data(index).toString())
selected_file_name = str(tablemodel.data(index_filename).toString())
msgBox = QtGui.QMessageBox(
QtGui.QMessageBox.Question,
'Question',
'Are you sure you want to delete this file? File name: %s' % str(selected_file_name).decode('utf-8'),
(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No))
result = msgBox.exec_()
self.__logger.debug(result)
if result == QtGui.QMessageBox.Yes:
try:
self.storj_engine.storj_client.file_remove(
str(self.current_selected_bucket_id),
str(selected_file_id))
# Update files list
self.createNewFileListUpdateThread()
QtGui.QMessageBox.about(
self,
'Success',
'File "%s" has been deleted successfully' % selected_file_name)
except sjexc.StorjBridgeApiError as e:
self.__logger.error(e)
QtGui.QMessageBox.about(
self,
'Error',
'Bridge exception occured while trying to delete file: %s' % e)
except Exception as e:
self.__logger.error(e)
QtGui.QMessageBox.about(
self,
'Error',
'Unhandled exception occured while trying to delete file: %s' % e)
if not selected:
QtGui.QMessageBox.about(
self,
'Information',
'Please select file which you want to delete')
return True
def open_mirrors_list_window(self):
self.current_bucket_index = \
self.file_manager_ui.bucket_select_combo_box.currentIndex()
self.current_selected_bucket_id = \
self.bucket_id_list[self.current_bucket_index]
tablemodel = self.file_manager_ui.files_list_tableview.model()
rows = sorted(
set(index.row() for index in
self.file_manager_ui.files_list_tableview.selectedIndexes()))
i = 0
for row in rows:
self.__logger.info('Row %d is selected' % row)
index = tablemodel.index(row, 2) # get file ID
index_filename = tablemodel.index(row, 0) # get file ID
# We suppose data are strings
selected_file_id = str(tablemodel.data(index).toString())
selected_file_name = str(tablemodel.data(index_filename).toString())
self.file_mirrors_list_window = \
FileMirrorsListUI(self,
str(self.current_selected_bucket_id),
selected_file_id,
filename=selected_file_name)
self.file_mirrors_list_window.show()
i += 1
if i == 0:
QtGui.QMessageBox.about(self,
'Warning!',
'Please select file from file list!')
self.__logger.debug(1)
def createNewFileListUpdateThread(self, a=None):
download_thread = threading.Thread(target=self.update_files_list,
args=())
download_thread.start()
def update_files_list(self):
self.tools = Tools()
model = TableModel(1, 1)
file_list_header_labels = ['File name', 'File size', 'File ID']
if DISPLAY_FILE_CREATION_DATE_IN_MAIN:
file_list_header_labels.append('Creation date')
model.setHorizontalHeaderLabels(file_list_header_labels)
self.current_bucket_index = \
self.file_manager_ui.bucket_select_combo_box.currentIndex()
self.current_selected_bucket_id = \
self.bucket_id_list[self.current_bucket_index]
i = 0
try:
self.emit(QtCore.SIGNAL('changeLoadingGif'), True)
for self.file_details in self.storj_engine.storj_client.bucket_files(str(self.current_selected_bucket_id)):
item = QtGui.QStandardItem(
str(self.file_details['filename'].replace('[DECRYPTED]', '')).decode('utf8'))
model.setItem(i, 0, item) # row, column, item (StandardItem)
file_size_str = self.tools.human_size(int(self.file_details['size'])) # get human readable file size
item = QtGui.QStandardItem(str(file_size_str))
model.setItem(i, 1, item) # row, column, item (QQtGui.StandardItem)
item = QtGui.QStandardItem(str(self.file_details['id']))
model.setItem(i, 2, item) # row, column, item (QStandardItem)
if DISPLAY_FILE_CREATION_DATE_IN_MAIN:
item = QtGui.QStandardItem(str(self.file_details['created']).replace('Z', '').replace('T', ' '))
model.setItem(i, 3, item) # row, column, item (QStandardItem)
i = i + 1
self.__logger.debug(self.file_details['filename'].replace('[DECRYPTED]', '').decode('utf8'))
except sjexc.StorjBridgeApiError as e:
self.__logger.error(e)
self.file_manager_ui.files_list_tableview.clearFocus()
self.file_manager_ui.files_list_tableview.setModel(model)
self.file_manager_ui.files_list_tableview.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)
if FILE_LIST_SORTING_MAIN_ENABLED:
self.file_manager_ui.files_list_tableview.setSortingEnabled(True)
self.file_manager_ui.files_list_tableview.horizontalHeader().sortIndicatorChanged.connect(self.handleSortIndicatorChanged)
self.file_manager_ui.files_list_tableview.sortByColumn(0, QtCore.Qt.AscendingOrder)
self.emit(QtCore.SIGNAL('changeLoadingGif'), False)
def handleSortIndicatorChanged(self, index, order):
if index != 0:
self.file_manager_ui.files_list_tableview.horizontalHeader().setSortIndicator(0, self.file_manager_ui.files_list_tableview.model().sortOrder())
def createNewBucketResolveThread(self):
download_thread = threading.Thread(
target=self.initialize_bucket_select_combobox,
args=())
download_thread.start()
def initialize_bucket_select_combobox(self):
self.file_manager_ui.bucket_select_combo_box.clear()
self.buckets_list = []
self.bucket_id_list = []
self.bucket_id_name_2D_list = []
self.storj_engine = StorjEngine() # init StorjEngine
i = 0
self.emit(QtCore.SIGNAL('changeLoadingGif'), True)
try:
for bucket in self.storj_engine.storj_client.bucket_list():
# Append buckets to list
self.bucket_id_name_2D_list.append(
[bucket.id, bucket.name.decode('utf8')])
i += 1
if BUCKETS_LIST_SORTING_ENABLED:
self.bucket_id_name_2D_list = \
sorted(self.bucket_id_name_2D_list,
key=lambda x: x[1],
reverse=False)
for arr_data in self.bucket_id_name_2D_list:
self.buckets_list.append(arr_data[1])
self.bucket_id_list.append(arr_data[0])
except sjexc.StorjBridgeApiError as e:
self.__logger.error(e)
QtGui.QMessageBox.about(
self,
'Unhandled bucket resolving exception',
'Exception: ' % e)
self.file_manager_ui.bucket_select_combo_box.addItems(
self.buckets_list)
self.emit(QtCore.SIGNAL('changeLoadingGif'), False)
def open_single_file_download_window(self):
self.current_bucket_index = \
self.file_manager_ui.bucket_select_combo_box.currentIndex()
self.current_selected_bucket_id = \
self.bucket_id_list[self.current_bucket_index]
tablemodel = self.file_manager_ui.files_list_tableview.model()
rows = sorted(set(index.row() for index in
self.file_manager_ui.files_list_tableview.selectedIndexes()))
i = 0
for row in rows:
self.__logger.info('Row %d is selected' % row)
index = tablemodel.index(row, 2) # get file ID
# We suppose data are strings
selected_file_id = str(tablemodel.data(index).toString())
self.file_mirrors_list_window = SingleFileDownloadUI(
self,
self.current_selected_bucket_id,
selected_file_id)
self.file_mirrors_list_window.show()
i += 1
if i == 0:
QtGui.QMessageBox.about(self,
'Warning!',
'Please select file from file list!')
self.__logger.debug(1)
| [
"logging.getLogger",
"PyQt4.QtGui.QMovie",
"PyQt4.QtGui.QLabel.__init__",
"PyQt4.QtGui.QPixmap",
"PyQt4.QtCore.SIGNAL",
"PyQt4.QtGui.QWidget.__init__",
"threading.Thread",
"PyQt4.QtGui.QMessageBox.about"
] | [((1007, 1048), 'logging.getLogger', 'logging.getLogger', (["('%s.MainUI' % __name__)"], {}), "('%s.MainUI' % __name__)\n", (1024, 1048), False, 'import logging\n'), ((809, 844), 'PyQt4.QtGui.QLabel.__init__', 'QtGui.QLabel.__init__', (['self', 'parent'], {}), '(self, parent)\n', (830, 844), False, 'from PyQt4 import QtCore, QtGui\n'), ((1110, 1146), 'PyQt4.QtGui.QWidget.__init__', 'QtGui.QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (1132, 1146), False, 'from PyQt4 import QtCore, QtGui\n'), ((9015, 9071), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.update_files_list', 'args': '()'}), '(target=self.update_files_list, args=())\n', (9031, 9071), False, 'import threading\n'), ((11987, 12059), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.initialize_bucket_select_combobox', 'args': '()'}), '(target=self.initialize_bucket_select_combobox, args=())\n', (12003, 12059), False, 'import threading\n'), ((901, 927), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""clicked()"""'], {}), "('clicked()')\n", (914, 927), False, 'from PyQt4 import QtCore, QtGui\n'), ((1423, 1475), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""currentIndexChanged(const QString&)"""'], {}), "('currentIndexChanged(const QString&)')\n", (1436, 1475), False, 'from PyQt4 import QtCore, QtGui\n'), ((1679, 1705), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""clicked()"""'], {}), "('clicked()')\n", (1692, 1705), False, 'from PyQt4 import QtCore, QtGui\n'), ((1902, 1928), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""clicked()"""'], {}), "('clicked()')\n", (1915, 1928), False, 'from PyQt4 import QtCore, QtGui\n'), ((2130, 2156), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""clicked()"""'], {}), "('clicked()')\n", (2143, 2156), False, 'from PyQt4 import QtCore, QtGui\n'), ((2244, 2277), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""changeLoadingGif"""'], {}), "('changeLoadingGif')\n", (2257, 2277), False, 'from PyQt4 import QtCore, QtGui\n'), ((2835, 2861), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""clicked()"""'], {}), "('clicked()')\n", (2848, 2861), False, 'from PyQt4 import QtCore, QtGui\n'), ((3065, 3091), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""clicked()"""'], {}), "('clicked()')\n", (3078, 3091), False, 'from PyQt4 import QtCore, QtGui\n'), ((3316, 3342), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""clicked()"""'], {}), "('clicked()')\n", (3329, 3342), False, 'from PyQt4 import QtCore, QtGui\n'), ((3891, 3930), 'PyQt4.QtGui.QMovie', 'QtGui.QMovie', (['""":/resources/loading.gif"""'], {}), "(':/resources/loading.gif')\n", (3903, 3930), False, 'from PyQt4 import QtCore, QtGui\n'), ((7329, 7424), 'PyQt4.QtGui.QMessageBox.about', 'QtGui.QMessageBox.about', (['self', '"""Information"""', '"""Please select file which you want to delete"""'], {}), "(self, 'Information',\n 'Please select file which you want to delete')\n", (7352, 7424), False, 'from PyQt4 import QtCore, QtGui\n'), ((8752, 8831), 'PyQt4.QtGui.QMessageBox.about', 'QtGui.QMessageBox.about', (['self', '"""Warning!"""', '"""Please select file from file list!"""'], {}), "(self, 'Warning!', 'Please select file from file list!')\n", (8775, 8831), False, 'from PyQt4 import QtCore, QtGui\n'), ((11638, 11671), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""changeLoadingGif"""'], {}), "('changeLoadingGif')\n", (11651, 11671), False, 'from PyQt4 import QtCore, QtGui\n'), ((12427, 12460), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""changeLoadingGif"""'], {}), "('changeLoadingGif')\n", (12440, 12460), False, 'from PyQt4 import QtCore, QtGui\n'), ((13482, 13515), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""changeLoadingGif"""'], {}), "('changeLoadingGif')\n", (13495, 13515), False, 'from PyQt4 import QtCore, QtGui\n'), ((14538, 14617), 'PyQt4.QtGui.QMessageBox.about', 'QtGui.QMessageBox.about', (['self', '"""Warning!"""', '"""Please select file from file list!"""'], {}), "(self, 'Warning!', 'Please select file from file list!')\n", (14561, 14617), False, 'from PyQt4 import QtCore, QtGui\n'), ((4085, 4125), 'PyQt4.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/resources/refresh.png"""'], {}), "(':/resources/refresh.png')\n", (4098, 4125), False, 'from PyQt4 import QtCore, QtGui\n'), ((9752, 9785), 'PyQt4.QtCore.SIGNAL', 'QtCore.SIGNAL', (['"""changeLoadingGif"""'], {}), "('changeLoadingGif')\n", (9765, 9785), False, 'from PyQt4 import QtCore, QtGui\n'), ((13231, 13324), 'PyQt4.QtGui.QMessageBox.about', 'QtGui.QMessageBox.about', (['self', '"""Unhandled bucket resolving exception"""', "('Exception: ' % e)"], {}), "(self, 'Unhandled bucket resolving exception', \n 'Exception: ' % e)\n", (13254, 13324), False, 'from PyQt4 import QtCore, QtGui\n'), ((6537, 6646), 'PyQt4.QtGui.QMessageBox.about', 'QtGui.QMessageBox.about', (['self', '"""Success"""', '(\'File "%s" has been deleted successfully\' % selected_file_name)'], {}), '(self, \'Success\', \n \'File "%s" has been deleted successfully\' % selected_file_name)\n', (6560, 6646), False, 'from PyQt4 import QtCore, QtGui\n'), ((6833, 6940), 'PyQt4.QtGui.QMessageBox.about', 'QtGui.QMessageBox.about', (['self', '"""Error"""', "('Bridge exception occured while trying to delete file: %s' % e)"], {}), "(self, 'Error', \n 'Bridge exception occured while trying to delete file: %s' % e)\n", (6856, 6940), False, 'from PyQt4 import QtCore, QtGui\n'), ((7112, 7222), 'PyQt4.QtGui.QMessageBox.about', 'QtGui.QMessageBox.about', (['self', '"""Error"""', "('Unhandled exception occured while trying to delete file: %s' % e)"], {}), "(self, 'Error', \n 'Unhandled exception occured while trying to delete file: %s' % e)\n", (7135, 7222), False, 'from PyQt4 import QtCore, QtGui\n')] |
from desaf109 import moeda
p = float(input('Digite um preço: R$'))
print('A metade de {} é {}'.format(moeda.moeda(p), moeda.metade(p, True)))
print('O dobro de {} é {}'.format(moeda.moeda(p), moeda.dobro(p, True)))
print('Se adcionarmos 10% fica {}'.format(moeda.aumentar(p, 10, True)))
print('Se tirarmos 13% fica {}'.format(moeda.diminuir(p, 13, True))) | [
"desaf109.moeda.metade",
"desaf109.moeda.dobro",
"desaf109.moeda.moeda",
"desaf109.moeda.aumentar",
"desaf109.moeda.diminuir"
] | [((103, 117), 'desaf109.moeda.moeda', 'moeda.moeda', (['p'], {}), '(p)\n', (114, 117), False, 'from desaf109 import moeda\n'), ((119, 140), 'desaf109.moeda.metade', 'moeda.metade', (['p', '(True)'], {}), '(p, True)\n', (131, 140), False, 'from desaf109 import moeda\n'), ((177, 191), 'desaf109.moeda.moeda', 'moeda.moeda', (['p'], {}), '(p)\n', (188, 191), False, 'from desaf109 import moeda\n'), ((193, 213), 'desaf109.moeda.dobro', 'moeda.dobro', (['p', '(True)'], {}), '(p, True)\n', (204, 213), False, 'from desaf109 import moeda\n'), ((257, 284), 'desaf109.moeda.aumentar', 'moeda.aumentar', (['p', '(10)', '(True)'], {}), '(p, 10, True)\n', (271, 284), False, 'from desaf109 import moeda\n'), ((327, 354), 'desaf109.moeda.diminuir', 'moeda.diminuir', (['p', '(13)', '(True)'], {}), '(p, 13, True)\n', (341, 354), False, 'from desaf109 import moeda\n')] |
"""
Copyright (C) Cortic Technology Corp. - All Rights Reserved
Written by <NAME> <<EMAIL>>, 2021
"""
# need to advertise different processor type, eg CPU, GPU, TPU
import traceback
import logging
from curt.base_service import BaseService
class VisionProcessorService(BaseService):
def __init__(self):
super().__init__("VisionProcessor")
def execute_function(self, worker, data):
config_worker = data[-1]
try:
if config_worker:
return worker.config_worker(data[0])
else:
if isinstance(data[0], list):
return worker.run_inference(data[0])
elif isinstance(data[0], dict):
data_list = []
for param in data[0]["ready_data"]:
data_list.append(param)
for guid in data[0].keys():
if guid != "ready_data":
data_list.append(data[0][guid])
return worker.run_inference(data_list)
except Exception as e:
logging.error(traceback.format_exc())
| [
"traceback.format_exc"
] | [((1113, 1135), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1133, 1135), False, 'import traceback\n')] |
import lamp.modules
import torch
import numpy as np
from lamp.utils import get_activation_function
class FeedforwardNeuralNetwork(lamp.modules.BaseModule):
def __init__(self, dim_in, dim_out, architecture, dropout, outf=None, dtype = None, device = None):
super(FeedforwardNeuralNetwork, self).__init__()
architecture = [dim_in] + architecture + [dim_out]
self.layers = torch.nn.Sequential()
for n in range(len(architecture)-1):
self.layers.add_module('fc{}'.format(n+1), torch.nn.Linear(architecture[n], architecture[n+1]))
if dropout is not None:
self.layers.add_module('dropout{}'.format(n+1), torch.nn.Dropout(p=0.5))
if n != len(architecture) - 2:
self.layers.add_module('activ{}'.format(n+1), torch.nn.ReLU())
else:
if outf is not None:
self.layers.add_module('out_fct', get_activation_function(outf))
self._to(device=device, dtype=dtype)
def forward(self, x):
return self.layers(x)
@classmethod
def FromLinearDecay(cls, dim_in, dim_out, num_hidden_layers, outf = None, dropout=None, dtype=None, device=None):
architecture = list(np.linspace(dim_in, dim_out, num_hidden_layers+2).astype(int))
architecture_hidden = architecture[1:-1]
return cls(dim_in, dim_out, architecture_hidden, dropout, outf, dtype, device)
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.Sequential",
"lamp.utils.get_activation_function",
"numpy.linspace",
"torch.nn.Linear"
] | [((404, 425), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (423, 425), False, 'import torch\n'), ((528, 581), 'torch.nn.Linear', 'torch.nn.Linear', (['architecture[n]', 'architecture[n + 1]'], {}), '(architecture[n], architecture[n + 1])\n', (543, 581), False, 'import torch\n'), ((682, 705), 'torch.nn.Dropout', 'torch.nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (698, 705), False, 'import torch\n'), ((813, 828), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (826, 828), False, 'import torch\n'), ((1240, 1291), 'numpy.linspace', 'np.linspace', (['dim_in', 'dim_out', '(num_hidden_layers + 2)'], {}), '(dim_in, dim_out, num_hidden_layers + 2)\n', (1251, 1291), True, 'import numpy as np\n'), ((939, 968), 'lamp.utils.get_activation_function', 'get_activation_function', (['outf'], {}), '(outf)\n', (962, 968), False, 'from lamp.utils import get_activation_function\n')] |
import numpy as np
import math
from scipy.sparse import csr_matrix, diags
from scipy import linalg
import time
try:
from numba import jit, njit
numbaOn = True
except ModuleNotFoundError:
numbaOn = False
if numbaOn:
@njit(["void(float64[:], f8, float64[:], float64[:], f8, f8)"])
def velocityImplNumba(u, t, f, expVec, dxInvHalf, mu0):
n = len(u)
uSq = np.square(u)
f[0] = dxInvHalf * (math.pow(mu0, 2) - uSq[0]) + expVec[0]
for i in range(1,n):
f[i] = dxInvHalf * ( uSq[i-1] - uSq[i] ) + expVec[i]
else:
def velocityImplNumba(u, t, f, expVec, dxInvHalf, mu0):
n = len(u)
uSq = np.square(u)
f[0] = dxInvHalf * (math.pow(mu0, 2) - uSq[0]) + expVec[0]
for i in range(1,n):
f[i] = dxInvHalf * ( uSq[i-1] - uSq[i] ) + expVec[i]
if numbaOn:
@njit(["void(float64[:], float64[:], float64[:], f8)"])
def fillDiag(u, diag, ldiag, dxInv):
n = len(u)
for i in range(n-1):
diag[i] = -dxInv*u[i]
ldiag[i] = dxInv*u[i]
diag[n-1] = -dxInv*u[n-1]
else:
def fillDiag(u, diag, ldiag, dxInv):
n = len(u)
for i in range(n-1):
diag[i] = -dxInv*u[i]
ldiag[i] = dxInv*u[i]
diag[n-1] = -dxInv*u[n-1]
class Burgers1d:
def __init__(self, Ncell):
self.mu_ = np.array([5., 0.02, 0.02])
self.xL_ = 0.
self.xR_ = 100.
self.Ncell_ = Ncell
self.dx_ = 0.
self.dxInv_ = 0.
self.dxInvHalf_ = 0.
self.xGrid_ = np.zeros(self.Ncell_)
self.U0_ = np.zeros(self.Ncell_)
self.expVec_= np.zeros(self.Ncell_)
self.diag_ = np.zeros(self.Ncell_)
self.ldiag_ = np.zeros(self.Ncell_-1)
self.setup()
def setup(self):
self.dx_ = (self.xR_ - self.xL_)/float(self.Ncell_)
self.dxInv_ = (1.0/self.dx_)
self.dxInvHalf_ = 0.5 * self.dxInv_
for i in range(0, self.Ncell_):
self.U0_[i] = 1.
self.xGrid_[i] = self.dx_*i + self.dx_*0.5
self.expVec_ = self.mu_[1] * np.exp( self.mu_[2] * self.xGrid_ )
def createVelocity(self):
return np.zeros(self.Ncell_)
def velocity(self, u, t, f):
velocityImplNumba(u, t, f[:], self.expVec_,
self.dxInvHalf_, self.mu_[0])
def createApplyJacobianResult(self, B):
return np.zeros_like(B)
def applyJacobian(self, u, B, t, result):
J = self.jacobian(u, t)
result[:] = J.dot(B)
def jacobian(self, u, t):
fillDiag(u, self.diag_, self.ldiag_, self.dxInv_)
return diags( [self.ldiag_, self.diag_], [-1,0], format='csr')
| [
"math.pow",
"numba.njit",
"numpy.square",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"scipy.sparse.diags",
"numpy.zeros_like"
] | [((227, 289), 'numba.njit', 'njit', (["['void(float64[:], f8, float64[:], float64[:], f8, f8)']"], {}), "(['void(float64[:], f8, float64[:], float64[:], f8, f8)'])\n", (231, 289), False, 'from numba import jit, njit\n'), ((798, 852), 'numba.njit', 'njit', (["['void(float64[:], float64[:], float64[:], f8)']"], {}), "(['void(float64[:], float64[:], float64[:], f8)'])\n", (802, 852), False, 'from numba import jit, njit\n'), ((373, 385), 'numpy.square', 'np.square', (['u'], {}), '(u)\n', (382, 385), True, 'import numpy as np\n'), ((622, 634), 'numpy.square', 'np.square', (['u'], {}), '(u)\n', (631, 634), True, 'import numpy as np\n'), ((1254, 1281), 'numpy.array', 'np.array', (['[5.0, 0.02, 0.02]'], {}), '([5.0, 0.02, 0.02])\n', (1262, 1281), True, 'import numpy as np\n'), ((1434, 1455), 'numpy.zeros', 'np.zeros', (['self.Ncell_'], {}), '(self.Ncell_)\n', (1442, 1455), True, 'import numpy as np\n'), ((1474, 1495), 'numpy.zeros', 'np.zeros', (['self.Ncell_'], {}), '(self.Ncell_)\n', (1482, 1495), True, 'import numpy as np\n'), ((1514, 1535), 'numpy.zeros', 'np.zeros', (['self.Ncell_'], {}), '(self.Ncell_)\n', (1522, 1535), True, 'import numpy as np\n'), ((1554, 1575), 'numpy.zeros', 'np.zeros', (['self.Ncell_'], {}), '(self.Ncell_)\n', (1562, 1575), True, 'import numpy as np\n'), ((1594, 1619), 'numpy.zeros', 'np.zeros', (['(self.Ncell_ - 1)'], {}), '(self.Ncell_ - 1)\n', (1602, 1619), True, 'import numpy as np\n'), ((2001, 2022), 'numpy.zeros', 'np.zeros', (['self.Ncell_'], {}), '(self.Ncell_)\n', (2009, 2022), True, 'import numpy as np\n'), ((2209, 2225), 'numpy.zeros_like', 'np.zeros_like', (['B'], {}), '(B)\n', (2222, 2225), True, 'import numpy as np\n'), ((2418, 2473), 'scipy.sparse.diags', 'diags', (['[self.ldiag_, self.diag_]', '[-1, 0]'], {'format': '"""csr"""'}), "([self.ldiag_, self.diag_], [-1, 0], format='csr')\n", (2423, 2473), False, 'from scipy.sparse import csr_matrix, diags\n'), ((1925, 1958), 'numpy.exp', 'np.exp', (['(self.mu_[2] * self.xGrid_)'], {}), '(self.mu_[2] * self.xGrid_)\n', (1931, 1958), True, 'import numpy as np\n'), ((410, 426), 'math.pow', 'math.pow', (['mu0', '(2)'], {}), '(mu0, 2)\n', (418, 426), False, 'import math\n'), ((659, 675), 'math.pow', 'math.pow', (['mu0', '(2)'], {}), '(mu0, 2)\n', (667, 675), False, 'import math\n')] |
import math
import pathlib
import sys
import torch
import torch.nn as nn
PROJECT_DIR = pathlib.Path(__file__).absolute().parent.parent.parent # main directory, the parent of src
if str(PROJECT_DIR) not in sys.path:
sys.path.append(str(PROJECT_DIR))
from src.model.ConvLayer import ConvLayer
from src.model.PrimaryCaps import PrimaryCaps
from src.model.DigitCaps import DigitCaps
from src.model.Decoder import Decoder
INPUT_WIDTH = 28
NUM_CONV_IN_CHANNELS = 1
CONV_KERNEL = 9
CONV_STRIDE = 1
NUM_CONV_OUT_CHANNELS = 256
NUM_PRIMARY_CHANNELS = 32
PRIMARY_CAPS_DIM = 8
PRIMARY_KERNEL = 9
PRIMARY_STRIDE = 2
DIGIT_CAPS_DIM = 16
NUM_CLASSES = 10
REGULARIZATION_SCALE = 0.0005
ITER = 3
DEC1_DIM = 512
DEC2_DIM = 1024
CUDA_ENABLED = True
SMALL_DECODER = False
DEVICE = 'cuda:0'
CONV_SHARED_WEIGHTS = 0 # disabled
PRIMARY_SHARED_WEIGHTS = 0 # disabled
DIGIT_SHARED_WEIGHTS = 0 # disabled
CONV_SHARED_BIAS = CONV_SHARED_WEIGHTS # to have coherency as default
SQUASH_APPROX = False
class Net(nn.Module):
def __init__(self,
input_wh=INPUT_WIDTH,
num_conv_in_channels=NUM_CONV_IN_CHANNELS,
conv_kernel=CONV_KERNEL,
conv_stride=CONV_STRIDE,
num_conv_out_channels=NUM_CONV_OUT_CHANNELS,
num_primary_channels=NUM_PRIMARY_CHANNELS,
primary_caps_dim=PRIMARY_CAPS_DIM,
primary_kernel=PRIMARY_KERNEL,
primary_stride=PRIMARY_STRIDE,
digit_caps_dim=DIGIT_CAPS_DIM,
num_classes=NUM_CLASSES,
regularization_scale=REGULARIZATION_SCALE,
iter=ITER,
dec1_dim=DEC1_DIM,
dec2_dim=DEC2_DIM,
cuda_enabled=CUDA_ENABLED,
small_decoder=SMALL_DECODER,
device=DEVICE,
conv_shared_weights=CONV_SHARED_WEIGHTS,
primary_shared_weights=PRIMARY_SHARED_WEIGHTS,
digit_shared_weights=DIGIT_SHARED_WEIGHTS,
conv_shared_bias=CONV_SHARED_BIAS,
squash_approx=SQUASH_APPROX):
super(Net, self).__init__()
self.cuda_enabled = cuda_enabled
if cuda_enabled:
self.device = torch.device(device)
else:
self.device = torch.device('cpu')
self.regularization_scale = regularization_scale
conv_dimension = math.floor(
(input_wh-conv_kernel+conv_stride)/conv_stride)
primary_dimension = math.floor(
(conv_dimension-primary_kernel+primary_stride)/primary_stride)
self.conv = ConvLayer(in_channels=num_conv_in_channels,
out_channels=num_conv_out_channels,
kernel_size=conv_kernel,
stride=conv_stride,
cuda_enabled=cuda_enabled,
device=device,
shared_weights=conv_shared_weights,
shared_bias=conv_shared_bias)
self.primary = PrimaryCaps(in_channels=num_conv_out_channels,
out_channels=num_primary_channels,
out_caps_dim=primary_caps_dim,
kernel_size=primary_kernel,
stride=primary_stride,
cuda_enabled=cuda_enabled,
device=device,
shared_weights=primary_shared_weights,
squash_approx=squash_approx)
self.digit = DigitCaps(in_dim=num_primary_channels*primary_dimension*primary_dimension,
out_dim=num_classes,
in_caps_dim=primary_caps_dim,
out_caps_dim=digit_caps_dim,
iter=iter,
cuda_enabled=cuda_enabled,
device=device,
shared_weights=digit_shared_weights,
squash_approx=squash_approx)
decoder_in_dim = digit_caps_dim if small_decoder else num_classes * digit_caps_dim
self.decoder = Decoder(in_dim=decoder_in_dim,
l1_dim=dec1_dim,
l2_dim=dec2_dim,
out_dim=input_wh*input_wh,
device=device,
small_decoder=small_decoder)
def forward(self, x, labels, is_training=True):
out_conv = self.conv(x)
out_primary = self.primary(out_conv)
out_digit = self.digit(out_primary)
reconstruction = self.decoder(out_digit, labels, is_training)
return out_digit, reconstruction
| [
"src.model.PrimaryCaps.PrimaryCaps",
"math.floor",
"pathlib.Path",
"src.model.Decoder.Decoder",
"src.model.DigitCaps.DigitCaps",
"src.model.ConvLayer.ConvLayer",
"torch.device"
] | [((2509, 2573), 'math.floor', 'math.floor', (['((input_wh - conv_kernel + conv_stride) / conv_stride)'], {}), '((input_wh - conv_kernel + conv_stride) / conv_stride)\n', (2519, 2573), False, 'import math\n'), ((2611, 2690), 'math.floor', 'math.floor', (['((conv_dimension - primary_kernel + primary_stride) / primary_stride)'], {}), '((conv_dimension - primary_kernel + primary_stride) / primary_stride)\n', (2621, 2690), False, 'import math\n'), ((2722, 2968), 'src.model.ConvLayer.ConvLayer', 'ConvLayer', ([], {'in_channels': 'num_conv_in_channels', 'out_channels': 'num_conv_out_channels', 'kernel_size': 'conv_kernel', 'stride': 'conv_stride', 'cuda_enabled': 'cuda_enabled', 'device': 'device', 'shared_weights': 'conv_shared_weights', 'shared_bias': 'conv_shared_bias'}), '(in_channels=num_conv_in_channels, out_channels=\n num_conv_out_channels, kernel_size=conv_kernel, stride=conv_stride,\n cuda_enabled=cuda_enabled, device=device, shared_weights=\n conv_shared_weights, shared_bias=conv_shared_bias)\n', (2731, 2968), False, 'from src.model.ConvLayer import ConvLayer\n'), ((3198, 3490), 'src.model.PrimaryCaps.PrimaryCaps', 'PrimaryCaps', ([], {'in_channels': 'num_conv_out_channels', 'out_channels': 'num_primary_channels', 'out_caps_dim': 'primary_caps_dim', 'kernel_size': 'primary_kernel', 'stride': 'primary_stride', 'cuda_enabled': 'cuda_enabled', 'device': 'device', 'shared_weights': 'primary_shared_weights', 'squash_approx': 'squash_approx'}), '(in_channels=num_conv_out_channels, out_channels=\n num_primary_channels, out_caps_dim=primary_caps_dim, kernel_size=\n primary_kernel, stride=primary_stride, cuda_enabled=cuda_enabled,\n device=device, shared_weights=primary_shared_weights, squash_approx=\n squash_approx)\n', (3209, 3490), False, 'from src.model.PrimaryCaps import PrimaryCaps\n'), ((3784, 4078), 'src.model.DigitCaps.DigitCaps', 'DigitCaps', ([], {'in_dim': '(num_primary_channels * primary_dimension * primary_dimension)', 'out_dim': 'num_classes', 'in_caps_dim': 'primary_caps_dim', 'out_caps_dim': 'digit_caps_dim', 'iter': 'iter', 'cuda_enabled': 'cuda_enabled', 'device': 'device', 'shared_weights': 'digit_shared_weights', 'squash_approx': 'squash_approx'}), '(in_dim=num_primary_channels * primary_dimension *\n primary_dimension, out_dim=num_classes, in_caps_dim=primary_caps_dim,\n out_caps_dim=digit_caps_dim, iter=iter, cuda_enabled=cuda_enabled,\n device=device, shared_weights=digit_shared_weights, squash_approx=\n squash_approx)\n', (3793, 4078), False, 'from src.model.DigitCaps import DigitCaps\n'), ((4432, 4574), 'src.model.Decoder.Decoder', 'Decoder', ([], {'in_dim': 'decoder_in_dim', 'l1_dim': 'dec1_dim', 'l2_dim': 'dec2_dim', 'out_dim': '(input_wh * input_wh)', 'device': 'device', 'small_decoder': 'small_decoder'}), '(in_dim=decoder_in_dim, l1_dim=dec1_dim, l2_dim=dec2_dim, out_dim=\n input_wh * input_wh, device=device, small_decoder=small_decoder)\n', (4439, 4574), False, 'from src.model.Decoder import Decoder\n'), ((2338, 2358), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (2350, 2358), False, 'import torch\n'), ((2401, 2420), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2413, 2420), False, 'import torch\n'), ((96, 118), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (108, 118), False, 'import pathlib\n')] |
from flask import current_app, request, Response, make_response
from rdflib import ConjunctiveGraph
from werkzeug.exceptions import abort
from depot.middleware import FileServeApp
from .entity_blueprint import entity_blueprint
from whyis.data_extensions import DATA_EXTENSIONS
from whyis.data_formats import DATA_FORMATS
from whyis.decorator import conditional_login_required
import sadi.mimeparse
from whyis.html_mime_types import HTML_MIME_TYPES
@entity_blueprint.route('/about.<format>', methods=['GET'])
@entity_blueprint.route('/<path:name>', methods=['GET'])
@entity_blueprint.route('/<path:name>.<format>', methods=['GET'])
@entity_blueprint.route('/', methods=['GET'])
@entity_blueprint.route('/home', methods=['GET'])
@entity_blueprint.route('/about', methods=['GET'])
@conditional_login_required
def view(name=None, format=None, view=None):
current_app.db.store.nsBindings = {}
entity, content_type = current_app.get_entity_uri(name, format)
resource = current_app.get_resource(entity)
# 'view' is the default view
fileid = resource.value(current_app.NS.whyis.hasFileID)
if fileid is not None and 'view' not in request.args:
fileid = fileid.value
f = None
if current_app.nanopub_depot is not None and current_app.nanopub_depot.exists(fileid):
f = current_app.nanopub_depot.get(fileid)
elif current_app.file_depot.exists(fileid):
f = current_app.file_depot.get(fileid)
if f is not None:
fsa = FileServeApp(f, current_app.config["file_archive"].get("cache_max_age",3600*24*7))
return fsa
if content_type is None:
content_type = request.headers['Accept'] if 'Accept' in request.headers else 'text/turtle'
#print entity
fmt = sadi.mimeparse.best_match([mt for mt in list(DATA_FORMATS.keys()) if mt is not None],content_type)
if 'view' in request.args or fmt in HTML_MIME_TYPES:
return current_app.render_view(resource)
elif fmt in DATA_FORMATS:
output_graph = ConjunctiveGraph()
result, status, headers = current_app.render_view(resource, view='describe')
output_graph.parse(data=result, format="json-ld")
return output_graph.serialize(format=DATA_FORMATS[fmt]), 200, {'Content-Type':content_type}
#elif 'view' in request.args or sadi.mimeparse.best_match(htmls, content_type) in htmls:
else:
return current_app.render_view(resource)
| [
"flask.current_app.get_resource",
"flask.current_app.get_entity_uri",
"flask.current_app.file_depot.exists",
"flask.current_app.nanopub_depot.exists",
"whyis.data_formats.DATA_FORMATS.keys",
"flask.current_app.file_depot.get",
"flask.current_app.nanopub_depot.get",
"rdflib.ConjunctiveGraph",
"flask.... | [((923, 963), 'flask.current_app.get_entity_uri', 'current_app.get_entity_uri', (['name', 'format'], {}), '(name, format)\n', (949, 963), False, 'from flask import current_app, request, Response, make_response\n'), ((980, 1012), 'flask.current_app.get_resource', 'current_app.get_resource', (['entity'], {}), '(entity)\n', (1004, 1012), False, 'from flask import current_app, request, Response, make_response\n'), ((1943, 1976), 'flask.current_app.render_view', 'current_app.render_view', (['resource'], {}), '(resource)\n', (1966, 1976), False, 'from flask import current_app, request, Response, make_response\n'), ((1265, 1305), 'flask.current_app.nanopub_depot.exists', 'current_app.nanopub_depot.exists', (['fileid'], {}), '(fileid)\n', (1297, 1305), False, 'from flask import current_app, request, Response, make_response\n'), ((1323, 1360), 'flask.current_app.nanopub_depot.get', 'current_app.nanopub_depot.get', (['fileid'], {}), '(fileid)\n', (1352, 1360), False, 'from flask import current_app, request, Response, make_response\n'), ((1374, 1411), 'flask.current_app.file_depot.exists', 'current_app.file_depot.exists', (['fileid'], {}), '(fileid)\n', (1403, 1411), False, 'from flask import current_app, request, Response, make_response\n'), ((2030, 2048), 'rdflib.ConjunctiveGraph', 'ConjunctiveGraph', ([], {}), '()\n', (2046, 2048), False, 'from rdflib import ConjunctiveGraph\n'), ((2083, 2133), 'flask.current_app.render_view', 'current_app.render_view', (['resource'], {'view': '"""describe"""'}), "(resource, view='describe')\n", (2106, 2133), False, 'from flask import current_app, request, Response, make_response\n'), ((2410, 2443), 'flask.current_app.render_view', 'current_app.render_view', (['resource'], {}), '(resource)\n', (2433, 2443), False, 'from flask import current_app, request, Response, make_response\n'), ((1429, 1463), 'flask.current_app.file_depot.get', 'current_app.file_depot.get', (['fileid'], {}), '(fileid)\n', (1455, 1463), False, 'from flask import current_app, request, Response, make_response\n'), ((1817, 1836), 'whyis.data_formats.DATA_FORMATS.keys', 'DATA_FORMATS.keys', ([], {}), '()\n', (1834, 1836), False, 'from whyis.data_formats import DATA_FORMATS\n')] |
# Copyright 2016 The Cebes Authors. All Rights Reserved.
#
# Licensed under the Apache License, version 2.0 (the "License").
# You may not use this work except in compliance with the License,
# which is available at www.apache.org/licenses/LICENSE-2.0
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied, as more fully set forth in the License.
#
# See the NOTICE file distributed with this work for information regarding copyright ownership.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import six
from pycebes.core import pipeline_api as pl
from pycebes.core.dataframe import Dataframe
from pycebes.core.exceptions import ServerException
from pycebes.core.pipeline import Pipeline, Model
from tests import test_base
class TestPipeline(test_base.TestBase):
def test_stage_general(self):
df = self.cylinder_bands
with Pipeline() as ppl:
s = pl.drop(df, ['hardener', 'customer'])
name = s.get_name()
self.assertIsNotNone(name)
with self.assertRaises(ValueError):
pl.drop(df, ['customer'], name=name)
self.assertIsInstance(ppl.stages, dict)
self.assertIsInstance(repr(ppl), six.string_types)
def test_drop(self):
df = self.cylinder_bands
with Pipeline() as ppl:
d = pl.drop(df, ['hardener', 'customer'], name='drop_stage')
df2 = ppl.run(d.output_df)
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2.columns) + 2, len(df.columns))
self.assertTrue('hardener' not in df2.columns)
self.assertTrue('customer' not in df2.columns)
# magic methods
self.assertTrue(d in ppl)
self.assertTrue('drop_stage' in ppl)
self.assertEqual(d, ppl['drop_stage'])
# cannot add more stages into the pipeline
with self.assertRaises(ValueError) as ex:
with ppl:
pl.drop(df, ['customer'])
self.assertIn('Cannot add more stage into this Pipeline', '{}'.format(ex.exception))
def test_placeholder(self):
with Pipeline() as ppl:
data = pl.placeholder(pl.PlaceholderTypes.DATAFRAME)
d = pl.drop(df=data, col_names=['hardener', 'customer'])
with self.assertRaises(ServerException) as ex:
ppl.run(d.output_df)
self.assertTrue('Input slot inputVal is undefined' in '{}'.format(ex.exception))
df = self.cylinder_bands
df2 = ppl.run(d.output_df, feeds={data: df})
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2.columns) + 2, len(df.columns))
self.assertTrue('hardener' not in df2.columns)
self.assertTrue('customer' not in df2.columns)
def test_value_placeholder(self):
with Pipeline() as ppl:
data = pl.placeholder(pl.PlaceholderTypes.DATAFRAME)
cols = pl.placeholder(pl.PlaceholderTypes.VALUE, value_type='array')
d = pl.drop(df=data, col_names=cols)
with self.assertRaises(ServerException) as ex:
ppl.run(d.output_df)
self.assertTrue('Input slot inputVal is undefined' in '{}'.format(ex.exception))
df = self.cylinder_bands
df2 = ppl.run(d.output_df, feeds={data: df, cols: ['hardener', 'customer']})
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2.columns) + 2, len(df.columns))
self.assertTrue('hardener' not in df2.columns)
self.assertTrue('customer' not in df2.columns)
def test_linear_regression_with_vector_assembler(self):
df = self.cylinder_bands
self.assertGreater(len(df), 10)
df = df.dropna(columns=['viscosity', 'proof_cut', 'caliper'])
self.assertGreater(len(df), 10)
with Pipeline() as ppl:
assembler = pl.vector_assembler(df, ['viscosity', 'proof_cut'], 'features')
s = pl.linear_regression(assembler.output_df, features_col='features',
label_col='caliper', prediction_col='caliper_predict', reg_param=0.001)
r = ppl.run([s.output_df, s.model, assembler.output_df])
self.assertEqual(len(r), 3)
df1 = r[0]
self.assertIsInstance(df1, Dataframe)
self.assertEqual(len(df1), len(df))
self.assertEqual(len(df1.columns), len(df.columns) + 2)
self.assertTrue('features' in df1.columns)
self.assertTrue('caliper_predict' in df1.columns)
m = r[1]
self.assertIsInstance(m, Model)
self.assertEqual(m.inputs['reg_param'], 0.001)
self.assertIsInstance(m.metadata, dict)
df2 = r[2]
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2), len(df))
self.assertEqual(len(df2.columns), len(df.columns) + 1)
self.assertTrue('features' in df2.columns)
def test_linear_regression_with_vector_assembler_with_placeholder(self):
# define the pipeline
with Pipeline() as ppl:
inp = pl.placeholder(pl.PlaceholderTypes.DATAFRAME)
assembler = pl.vector_assembler(inp, ['viscosity', 'proof_cut'], 'features')
lr = pl.linear_regression(assembler.output_df, features_col='features',
label_col='caliper', prediction_col='caliper_predict', reg_param=0.001)
# fail because placeholder is not filled
with self.assertRaises(ServerException) as ex:
ppl.run([lr.output_df, lr.model, assembler.output_df])
self.assertTrue('Input slot inputVal is undefined' in '{}'.format(ex.exception))
# run again with feeds into the placeholder
df = self.cylinder_bands.dropna(columns=['viscosity', 'proof_cut', 'caliper'])
self.assertGreater(len(df), 10)
r = ppl.run([lr.output_df, lr.model, assembler.output_df], feeds={inp: df})
self.assertEqual(len(r), 3)
df1 = r[0]
self.assertIsInstance(df1, Dataframe)
self.assertEqual(len(df1), len(df))
self.assertEqual(len(df1.columns), len(df.columns) + 2)
self.assertTrue('features' in df1.columns)
self.assertTrue('caliper_predict' in df1.columns)
pandas_df = df1.take(5)
self.assertEqual(len(pandas_df), 5)
m = r[1]
self.assertIsInstance(m, Model)
self.assertEqual(m.inputs['reg_param'], 0.001)
self.assertIsInstance(m.metadata, dict)
df2 = r[2]
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2), len(df))
self.assertEqual(len(df2.columns), len(df.columns) + 1)
self.assertTrue('features' in df2.columns)
# Run again with a different input dataframe, model ID shouldn't change
new_df = df.where(df.viscosity > 40)
r2 = ppl.run([lr.output_df, lr.model, assembler.output_df], feeds={inp: new_df})
self.assertEqual(r2[1].id, r[1].id)
def test_linear_regression_with_vector_assembler_with_placeholders(self):
# define the pipeline
with Pipeline() as ppl:
inp_df = pl.placeholder(pl.PlaceholderTypes.DATAFRAME)
inp_col = pl.placeholder(pl.PlaceholderTypes.VALUE)
assembler = pl.vector_assembler(inp_df, [''], inp_col)
s = pl.linear_regression(assembler.output_df, features_col='features',
label_col='caliper', prediction_col='caliper_predict', reg_param=0.001)
df = self.cylinder_bands.dropna(columns=['viscosity', 'proof_cut', 'caliper'])
self.assertGreater(len(df), 10)
r = ppl.run([s.output_df, s.model, assembler.output_df],
feeds={inp_df: df, inp_col: 'features', assembler.input_cols: ['viscosity', 'proof_cut']})
self.assertEqual(len(r), 3)
df1 = r[0]
self.assertIsInstance(df1, Dataframe)
self.assertEqual(len(df1), len(df))
self.assertEqual(len(df1.columns), len(df.columns) + 2)
self.assertTrue('features' in df1.columns)
self.assertTrue('caliper_predict' in df1.columns)
m = r[1]
self.assertIsInstance(m, Model)
self.assertEqual(m.inputs['reg_param'], 0.001)
self.assertIsInstance(m.metadata, dict)
df2 = r[2]
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2), len(df))
self.assertEqual(len(df2.columns), len(df.columns) + 1)
self.assertTrue('features' in df2.columns)
# assemble some other columns
df = self.cylinder_bands.dropna(columns=['viscosity', 'proof_cut', 'ink_temperature', 'caliper'])
self.assertGreater(len(df), 10)
r = ppl.run([s.output_df, s.model, assembler.output_df],
feeds={inp_df: df, inp_col: 'new_features',
assembler.input_cols: ['viscosity', 'proof_cut', 'ink_temperature'],
s.features_col: 'new_features'})
self.assertEqual(len(r), 3)
df1 = r[0]
self.assertIsInstance(df1, Dataframe)
self.assertEqual(len(df1), len(df))
self.assertEqual(len(df1.columns), len(df.columns) + 2)
self.assertTrue('new_features' in df1.columns)
self.assertTrue('caliper_predict' in df1.columns)
m = r[1]
self.assertIsInstance(m, Model)
self.assertEqual(m.inputs['reg_param'], 0.001)
self.assertIsInstance(m.metadata, dict)
df2 = r[2]
self.assertIsInstance(df2, Dataframe)
self.assertEqual(len(df2), len(df))
self.assertEqual(len(df2.columns), len(df.columns) + 1)
self.assertTrue('new_features' in df2.columns)
if __name__ == '__main__':
unittest.main()
| [
"pycebes.core.pipeline_api.placeholder",
"pycebes.core.pipeline_api.drop",
"pycebes.core.pipeline.Pipeline",
"pycebes.core.pipeline_api.linear_regression",
"pycebes.core.pipeline_api.vector_assembler",
"unittest.main"
] | [((9800, 9815), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9813, 9815), False, 'import unittest\n'), ((1009, 1019), 'pycebes.core.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (1017, 1019), False, 'from pycebes.core.pipeline import Pipeline, Model\n'), ((1044, 1081), 'pycebes.core.pipeline_api.drop', 'pl.drop', (['df', "['hardener', 'customer']"], {}), "(df, ['hardener', 'customer'])\n", (1051, 1081), True, 'from pycebes.core import pipeline_api as pl\n'), ((1435, 1445), 'pycebes.core.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (1443, 1445), False, 'from pycebes.core.pipeline import Pipeline, Model\n'), ((1470, 1526), 'pycebes.core.pipeline_api.drop', 'pl.drop', (['df', "['hardener', 'customer']"], {'name': '"""drop_stage"""'}), "(df, ['hardener', 'customer'], name='drop_stage')\n", (1477, 1526), True, 'from pycebes.core import pipeline_api as pl\n'), ((2239, 2249), 'pycebes.core.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (2247, 2249), False, 'from pycebes.core.pipeline import Pipeline, Model\n'), ((2277, 2322), 'pycebes.core.pipeline_api.placeholder', 'pl.placeholder', (['pl.PlaceholderTypes.DATAFRAME'], {}), '(pl.PlaceholderTypes.DATAFRAME)\n', (2291, 2322), True, 'from pycebes.core import pipeline_api as pl\n'), ((2339, 2391), 'pycebes.core.pipeline_api.drop', 'pl.drop', ([], {'df': 'data', 'col_names': "['hardener', 'customer']"}), "(df=data, col_names=['hardener', 'customer'])\n", (2346, 2391), True, 'from pycebes.core import pipeline_api as pl\n'), ((2929, 2939), 'pycebes.core.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (2937, 2939), False, 'from pycebes.core.pipeline import Pipeline, Model\n'), ((2967, 3012), 'pycebes.core.pipeline_api.placeholder', 'pl.placeholder', (['pl.PlaceholderTypes.DATAFRAME'], {}), '(pl.PlaceholderTypes.DATAFRAME)\n', (2981, 3012), True, 'from pycebes.core import pipeline_api as pl\n'), ((3032, 3093), 'pycebes.core.pipeline_api.placeholder', 'pl.placeholder', (['pl.PlaceholderTypes.VALUE'], {'value_type': '"""array"""'}), "(pl.PlaceholderTypes.VALUE, value_type='array')\n", (3046, 3093), True, 'from pycebes.core import pipeline_api as pl\n'), ((3110, 3142), 'pycebes.core.pipeline_api.drop', 'pl.drop', ([], {'df': 'data', 'col_names': 'cols'}), '(df=data, col_names=cols)\n', (3117, 3142), True, 'from pycebes.core import pipeline_api as pl\n'), ((3918, 3928), 'pycebes.core.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (3926, 3928), False, 'from pycebes.core.pipeline import Pipeline, Model\n'), ((3961, 4024), 'pycebes.core.pipeline_api.vector_assembler', 'pl.vector_assembler', (['df', "['viscosity', 'proof_cut']", '"""features"""'], {}), "(df, ['viscosity', 'proof_cut'], 'features')\n", (3980, 4024), True, 'from pycebes.core import pipeline_api as pl\n'), ((4041, 4183), 'pycebes.core.pipeline_api.linear_regression', 'pl.linear_regression', (['assembler.output_df'], {'features_col': '"""features"""', 'label_col': '"""caliper"""', 'prediction_col': '"""caliper_predict"""', 'reg_param': '(0.001)'}), "(assembler.output_df, features_col='features',\n label_col='caliper', prediction_col='caliper_predict', reg_param=0.001)\n", (4061, 4183), True, 'from pycebes.core import pipeline_api as pl\n'), ((5110, 5120), 'pycebes.core.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (5118, 5120), False, 'from pycebes.core.pipeline import Pipeline, Model\n'), ((5147, 5192), 'pycebes.core.pipeline_api.placeholder', 'pl.placeholder', (['pl.PlaceholderTypes.DATAFRAME'], {}), '(pl.PlaceholderTypes.DATAFRAME)\n', (5161, 5192), True, 'from pycebes.core import pipeline_api as pl\n'), ((5217, 5281), 'pycebes.core.pipeline_api.vector_assembler', 'pl.vector_assembler', (['inp', "['viscosity', 'proof_cut']", '"""features"""'], {}), "(inp, ['viscosity', 'proof_cut'], 'features')\n", (5236, 5281), True, 'from pycebes.core import pipeline_api as pl\n'), ((5299, 5441), 'pycebes.core.pipeline_api.linear_regression', 'pl.linear_regression', (['assembler.output_df'], {'features_col': '"""features"""', 'label_col': '"""caliper"""', 'prediction_col': '"""caliper_predict"""', 'reg_param': '(0.001)'}), "(assembler.output_df, features_col='features',\n label_col='caliper', prediction_col='caliper_predict', reg_param=0.001)\n", (5319, 5441), True, 'from pycebes.core import pipeline_api as pl\n'), ((7165, 7175), 'pycebes.core.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (7173, 7175), False, 'from pycebes.core.pipeline import Pipeline, Model\n'), ((7205, 7250), 'pycebes.core.pipeline_api.placeholder', 'pl.placeholder', (['pl.PlaceholderTypes.DATAFRAME'], {}), '(pl.PlaceholderTypes.DATAFRAME)\n', (7219, 7250), True, 'from pycebes.core import pipeline_api as pl\n'), ((7273, 7314), 'pycebes.core.pipeline_api.placeholder', 'pl.placeholder', (['pl.PlaceholderTypes.VALUE'], {}), '(pl.PlaceholderTypes.VALUE)\n', (7287, 7314), True, 'from pycebes.core import pipeline_api as pl\n'), ((7339, 7381), 'pycebes.core.pipeline_api.vector_assembler', 'pl.vector_assembler', (['inp_df', "['']", 'inp_col'], {}), "(inp_df, [''], inp_col)\n", (7358, 7381), True, 'from pycebes.core import pipeline_api as pl\n'), ((7398, 7540), 'pycebes.core.pipeline_api.linear_regression', 'pl.linear_regression', (['assembler.output_df'], {'features_col': '"""features"""', 'label_col': '"""caliper"""', 'prediction_col': '"""caliper_predict"""', 'reg_param': '(0.001)'}), "(assembler.output_df, features_col='features',\n label_col='caliper', prediction_col='caliper_predict', reg_param=0.001)\n", (7418, 7540), True, 'from pycebes.core import pipeline_api as pl\n'), ((1218, 1254), 'pycebes.core.pipeline_api.drop', 'pl.drop', (['df', "['customer']"], {'name': 'name'}), "(df, ['customer'], name=name)\n", (1225, 1254), True, 'from pycebes.core import pipeline_api as pl\n'), ((2074, 2099), 'pycebes.core.pipeline_api.drop', 'pl.drop', (['df', "['customer']"], {}), "(df, ['customer'])\n", (2081, 2099), True, 'from pycebes.core import pipeline_api as pl\n')] |
"""
Import wikidata nodes into KGTK file
"""
def parser():
return {
'help': 'Import wikidata nodes into KGTK file'
}
def add_arguments(parser):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
parser.add_argument("-i", action="store", type=str, dest="wikidat_file")
parser.add_argument("-o", action="store", type=str, dest="output_file")
parser.add_argument(
"-l",
action="store",
type=int,
dest="limit",
default=None)
parser.add_argument(
"-L",
action="store",
type=str,
dest="lang",
default="en")
parser.add_argument(
"-s",
action="store",
type=str,
dest="doc_id",
default="wikidata-20200203")
def run(wikidata_file, output_file, limit, lang, doc_id):
# import modules locally
import bz2
import json
import csv
site_filter = '{}wiki'.format(lang)
WD_META_ITEMS = [
"Q163875",
"Q191780",
"Q224414",
"Q4167836",
"Q4167410",
"Q4663903",
"Q11266439",
"Q13406463",
"Q15407973",
"Q18616576",
"Q19887878",
"Q22808320",
"Q23894233",
"Q33120876",
"Q42104522",
"Q47460393",
"Q64875536",
"Q66480449",
]
# filter: currently defined as OR: one hit suffices to be removed from
# further processing
exclude_list = WD_META_ITEMS
# punctuation
exclude_list.extend(["Q1383557", "Q10617810"])
# letters etc
exclude_list.extend(["Q188725", "Q19776628", "Q3841820",
"Q17907810", "Q9788", "Q9398093"])
neg_prop_filter = {
'P31': exclude_list, # instance of
'P279': exclude_list # subclass
}
title_to_id = dict()
id_to_descr = dict()
id_to_alias = dict()
to_print = False
# parse appropriate fields - depending on what we need in the KB
parse_properties = False
parse_descr = True
parse_sitelinks = True
parse_labels = True
parse_aliases = True
parse_claims = True
# create the header of the csv file
header = []
header.append('id')
if parse_labels:
header.append('label')
header.append('type')
if parse_descr:
header.append('descriptions')
if parse_aliases:
header.append('aliases')
header.append('document_id')
with open(output_file, 'w', newline='') as myfile:
wr = csv.writer(
myfile,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='')
wr.writerow(header)
rows = []
with bz2.open(wikidata_file, mode='rb') as file:
print('processing wikidata file now...')
for cnt, line in enumerate(file):
keep = False
if limit and cnt >= limit:
break
if cnt % 500000 == 0 and cnt > 0:
print('processed {} lines'.format(cnt))
clean_line = line.strip()
if clean_line.endswith(b","):
clean_line = clean_line[:-1]
if len(clean_line) > 1:
obj = json.loads(clean_line)
entry_type = obj["type"]
if entry_type == "item" or entry_type == "property":
keep = True
if keep:
row = []
qnode = obj["id"]
row.append(qnode)
if parse_labels:
labels = obj["labels"]
if labels:
lang_label = labels.get(lang, None)
if lang_label:
row.append(
'\'' + lang_label['value'] + '\'' + "@" + lang)
if to_print:
print(
"label (" + lang + "):", lang_label["value"])
else:
row.append("")
else:
row.append("")
row.append(entry_type)
if parse_descr:
descriptions = obj["descriptions"]
if descriptions:
lang_descr = descriptions.get(lang, None)
if lang_descr:
row.append(
'\'' + lang_descr['value'] + '\'' + "@" + lang)
if to_print:
print(
"description (" + lang + "):",
lang_descr["value"],
)
else:
row.append("")
else:
row.append("")
if parse_aliases:
aliases = obj["aliases"]
if aliases:
lang_aliases = aliases.get(lang, None)
if lang_aliases:
alias_list = []
for item in lang_aliases:
alias_list.append(
'\'' + item['value'] + '\'' + "@" + lang)
if to_print:
print(
"alias (" + lang + "):", item["value"])
row.append("|".join(alias_list))
else:
row.append('')
else:
row.append('')
row.append(doc_id)
rows.append(row)
if cnt % 50000 == 0 and cnt > 0:
with open(output_file, 'a', newline='') as myfile:
for row in rows:
wr = csv.writer(
myfile,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='')
wr.writerow(row)
rows = []
with open(output_file, 'a', newline='') as myfile:
for row in rows:
wr = csv.writer(
myfile,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='')
wr.writerow(row)
print('import complete')
| [
"json.loads",
"csv.writer",
"bz2.open"
] | [((2540, 2633), 'csv.writer', 'csv.writer', (['myfile'], {'quoting': 'csv.QUOTE_NONE', 'delimiter': '"""\t"""', 'escapechar': '"""\n"""', 'quotechar': '""""""'}), "(myfile, quoting=csv.QUOTE_NONE, delimiter='\\t', escapechar='\\n',\n quotechar='')\n", (2550, 2633), False, 'import csv\n'), ((2744, 2778), 'bz2.open', 'bz2.open', (['wikidata_file'], {'mode': '"""rb"""'}), "(wikidata_file, mode='rb')\n", (2752, 2778), False, 'import bz2\n'), ((6593, 6686), 'csv.writer', 'csv.writer', (['myfile'], {'quoting': 'csv.QUOTE_NONE', 'delimiter': '"""\t"""', 'escapechar': '"""\n"""', 'quotechar': '""""""'}), "(myfile, quoting=csv.QUOTE_NONE, delimiter='\\t', escapechar='\\n',\n quotechar='')\n", (6603, 6686), False, 'import csv\n'), ((3250, 3272), 'json.loads', 'json.loads', (['clean_line'], {}), '(clean_line)\n', (3260, 3272), False, 'import json\n'), ((6194, 6287), 'csv.writer', 'csv.writer', (['myfile'], {'quoting': 'csv.QUOTE_NONE', 'delimiter': '"""\t"""', 'escapechar': '"""\n"""', 'quotechar': '""""""'}), "(myfile, quoting=csv.QUOTE_NONE, delimiter='\\t', escapechar='\\n',\n quotechar='')\n", (6204, 6287), False, 'import csv\n')] |
import time
import argparse
from datetime import datetime
import logging
import numpy as np
import os
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel
from data import EqaDataLoader
from metrics import NavMetric
from models import MaskedNLLCriterion
from models import get_state, ensure_shared_grads
from data import load_vocab
from torch.autograd import Variable
from tqdm import tqdm
import time
torch.backends.cudnn.enabled = False
################################################################################################
#make models trained in pytorch 4 compatible with earlier pytorch versions
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
################################################################################################
def eval(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
eval_loader_kwargs = {
'questions_h5': getattr(args, args.eval_split + '_h5'),
'data_json': args.data_json,
'vocab': args.vocab_json,
'target_obj_conn_map_dir': args.target_obj_conn_map_dir,
'map_resolution': args.map_resolution,
'batch_size': 1,
'input_type': args.model_type,
'num_frames': 5,
'split': args.eval_split,
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': False,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
}
eval_loader = EqaDataLoader(**eval_loader_kwargs)
print('eval_loader has %d samples' % len(eval_loader.dataset))
logging.info("EVAL: eval_loader has {} samples".format(len(eval_loader.dataset)))
args.output_log_path = os.path.join(args.log_dir,
'eval_' + str(rank) + '.json')
t, epoch, best_eval_acc = 0, 0, 0.0
max_epochs = args.max_epochs
if args.mode == 'eval':
max_epochs = 1
while epoch < int(max_epochs):
invalids = []
model.load_state_dict(shared_model.state_dict())
model.eval()
# that's a lot of numbers
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
if 'cnn' in args.model_type:
done = False
while done == False:
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_length = batch
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_length[0] + 1 - i - 5 < 0:
invalids.append(idx[0])
continue
ep_inds = [
x for x in range(action_length[0] + 1 - i - 5,
action_length[0] + 1 - i)
]
sub_img_feats = torch.index_select(
img_feats, 1, torch.LongTensor(ep_inds))
init_pos = eval_loader.dataset.episode_pos_queue[
ep_inds[-1]]
h3d = eval_loader.dataset.episode_house
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append(idx[0])
continue
sub_img_feats_var = Variable(sub_img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
# sample actions till max steps or <stop>
# max no. of actions = 100
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores = model(sub_img_feats_var,
questions_var)
else:
scores = model(sub_img_feats_var)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.cuda())).view(1, 1, 3200)
sub_img_feats_var = torch.cat(
[sub_img_feats_var, img_feat_var], dim=1)
sub_img_feats_var = sub_img_feats_var[:, -5:, :]
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
logging.info("EVAL: invalids: {}".format(len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'lstm' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, questions, answer, _, actions_in, actions_out, action_lengths, _ = batch
question_var = Variable(questions.cuda())
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_lengths[0] - 1 - i < 0:
invalids.append([idx[0], i])
continue
h3d = eval_loader.dataset.episode_house
# forward through lstm till spawn
if len(eval_loader.dataset.episode_pos_queue[:-i]
) > 0:
images = eval_loader.dataset.get_frames(
h3d,
eval_loader.dataset.episode_pos_queue[:-i],
preprocess=True)
raw_img_feats = eval_loader.dataset.cnn(
Variable(torch.FloatTensor(images).cuda()))
actions_in_pruned = actions_in[:, :
action_lengths[0] -
i]
actions_in_var = Variable(actions_in_pruned.cuda())
action_lengths_pruned = action_lengths.clone(
).fill_(action_lengths[0] - i)
img_feats_var = raw_img_feats.view(1, -1, 3200)
if '+q' in args.model_type:
scores, hidden = model(
img_feats_var, question_var,
actions_in_var,
action_lengths_pruned.cpu().numpy())
else:
scores, hidden = model(
img_feats_var, False, actions_in_var,
action_lengths_pruned.cpu().numpy())
try:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
except:
invalids.append([idx[0], i])
continue
action_in = torch.LongTensor(1, 1).fill_(
actions_in[0,
action_lengths[0] - i]).cuda()
else:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
hidden = model.nav_rnn.init_hidden(1)
action_in = torch.LongTensor(1, 1).fill_(0).cuda()
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
img = h3d.env.render()
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224).cuda())).view(
1, 1, 3200)
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
actual_pos_queue = [(h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw)]
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores, hidden = model(
img_feat_var,
question_var,
Variable(action_in),
False,
hidden=hidden,
step=True)
else:
scores, hidden = model(
img_feat_var,
False,
Variable(action_in),
False,
hidden=hidden,
step=True)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.cuda())).view(1, 1, 3200)
action_in = torch.LongTensor(
1, 1).fill_(action + 1).cuda()
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
actual_pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw])
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: init_steps: {} metrics: {}".format(i, metrics.get_stat_string(mode=0)))
logging.info("EVAL: init_steps: {} invalids: {}".format(i, len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
print("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
logging.info("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
assert len(eval_loader.dataset.pruned_env_set) > 0
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'pacman' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, question, answer, actions, action_length = batch
metrics_slug = {}
h3d = eval_loader.dataset.episode_house
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if i > action_length[0]:
invalids.append([idx[0], i])
continue
question_var = Variable(question.cuda())
controller_step = False
planner_hidden = model.planner_nav_rnn.init_hidden(1)
# get hierarchical action history
(
planner_actions_in, planner_img_feats,
controller_step, controller_action_in,
controller_img_feats, init_pos,
controller_action_counter
) = eval_loader.dataset.get_hierarchical_features_till_spawn(
actions[0, :action_length[0] + 1].numpy(), i, args.max_controller_actions
)
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_img_feats_var = Variable(
planner_img_feats.cuda())
# forward planner till spawn to update hidden state
for step in range(planner_actions_in.size(0)):
planner_scores, planner_hidden = model.planner_step(
question_var, planner_img_feats_var[step]
.unsqueeze(0).unsqueeze(0),
planner_actions_in_var[step].view(1, 1),
planner_hidden
)
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
dists_to_target, pos_queue, pred_actions = [
init_dist_to_target
], [init_pos], []
planner_actions, controller_actions = [], []
episode_length = 0
if args.max_controller_actions > 1:
controller_action_counter = controller_action_counter % args.max_controller_actions
controller_action_counter = max(controller_action_counter - 1, 0)
else:
controller_action_counter = 0
first_step = True
first_step_is_controller = controller_step
planner_step = True
action = int(controller_action_in)
for step in range(args.max_episode_length):
if not first_step:
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224,
224).cuda())).view(
1, 1, 3200)
else:
img_feat_var = Variable(controller_img_feats.cuda()).view(1, 1, 3200)
if not first_step or first_step_is_controller:
# query controller to continue or not
controller_action_in = Variable(
torch.LongTensor(1, 1).fill_(action).cuda())
controller_scores = model.controller_step(
img_feat_var, controller_action_in,
planner_hidden[0])
prob = F.softmax(controller_scores, dim=1)
controller_action = int(
prob.max(1)[1].data.cpu().numpy()[0])
if controller_action == 1 and controller_action_counter < args.max_controller_actions - 1:
controller_action_counter += 1
planner_step = False
else:
controller_action_counter = 0
planner_step = True
controller_action = 0
controller_actions.append(controller_action)
first_step = False
if planner_step:
if not first_step:
action_in = torch.LongTensor(
1, 1).fill_(action + 1).cuda()
planner_scores, planner_hidden = model.planner_step(
question_var, img_feat_var,
Variable(action_in), planner_hidden)
prob = F.softmax(planner_scores, dim=1)
action = int(
prob.max(1)[1].data.cpu().numpy()[0])
planner_actions.append(action)
episode_done = action == 3 or episode_length >= args.max_episode_length
episode_length += 1
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done:
break
img, _, _ = h3d.step(action)
first_step = False
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
try:
print(metrics.get_stat_string(mode=0))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
except:
pass
print('epoch', epoch)
print('invalids', len(invalids))
logging.info("EVAL: epoch {}".format(epoch))
logging.info("EVAL: invalids {}".format(invalids))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
epoch += 1
# checkpoint if best val loss
if metrics.metrics[8][0] > best_eval_acc: # d_D_50
best_eval_acc = metrics.metrics[8][0]
if epoch % args.eval_every == 0 and args.log == True:
metrics.dump_log()
model_state = get_state(model)
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad, 'state': model_state, 'epoch': epoch}
checkpoint_path = '%s/epoch_%d_d_D_50_%.04f.pt' % (
args.checkpoint_dir, epoch, best_eval_acc)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("EVAL: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
print('[best_eval_d_D_50:%.04f]' % best_eval_acc)
logging.info("EVAL: [best_eval_d_D_50:{:.04f}]".format(best_eval_acc))
eval_loader.dataset._load_envs(start_idx=0, in_order=True)
def train(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
lossFn = torch.nn.CrossEntropyLoss().cuda()
optim = torch.optim.Adamax(
filter(lambda p: p.requires_grad, shared_model.parameters()),
lr=args.learning_rate)
train_loader_kwargs = {
'questions_h5': args.train_h5,
'data_json': args.data_json,
'vocab': args.vocab_json,
'batch_size': args.batch_size,
'input_type': args.model_type,
'num_frames': 5,
'map_resolution': args.map_resolution,
'split': 'train',
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': args.cache,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
'max_actions': args.max_actions
}
args.output_log_path = os.path.join(args.log_dir,
'train_' + str(rank) + '.json')
if 'pacman' in args.model_type:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['planner_loss', 'controller_loss'],
log_json=args.output_log_path)
else:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['loss'],
log_json=args.output_log_path)
train_loader = EqaDataLoader(**train_loader_kwargs)
print('train_loader has %d samples' % len(train_loader.dataset))
logging.info('TRAIN: train loader has {} samples'.format(len(train_loader.dataset)))
t, epoch = 0, 0
while epoch < int(args.max_epochs):
if 'cnn' in args.model_type:
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, _, actions_out, _ = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_out_var = Variable(actions_out.cuda())
if '+q' in args.model_type:
scores = model(img_feats_var, questions_var)
else:
scores = model(img_feats_var)
loss = lossFn(scores, actions_out_var)
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'lstm' in args.model_type:
lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
total_times = []
while done == False:
start_time = time.time()
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_lengths, masks = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_in_var = Variable(actions_in.cuda())
actions_out_var = Variable(actions_out.cuda())
action_lengths = action_lengths.cuda()
masks_var = Variable(masks.cuda())
action_lengths, perm_idx = action_lengths.sort(
0, descending=True)
img_feats_var = img_feats_var[perm_idx]
if '+q' in args.model_type:
questions_var = questions_var[perm_idx]
actions_in_var = actions_in_var[perm_idx]
actions_out_var = actions_out_var[perm_idx]
masks_var = masks_var[perm_idx]
if '+q' in args.model_type:
scores, hidden = model(img_feats_var, questions_var,
actions_in_var,
action_lengths.cpu().numpy())
else:
scores, hidden = model(img_feats_var, False,
actions_in_var,
action_lengths.cpu().numpy())
#block out masks
if args.curriculum:
curriculum_length = (epoch+1)*5
for i, action_length in enumerate(action_lengths):
if action_length - curriculum_length > 0:
masks_var[i, :action_length-curriculum_length] = 0
logprob = F.log_softmax(scores, dim=1)
loss = lossFn(
logprob, actions_out_var[:, :action_lengths.max()]
.contiguous().view(-1, 1),
masks_var[:, :action_lengths.max()].contiguous().view(
-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
logging.info("TRAIN LSTM loss: {:.6f}".format(loss.data[0]))
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'pacman' in args.model_type:
planner_lossFn = MaskedNLLCriterion().cuda()
controller_lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, planner_img_feats, planner_actions_in, \
planner_actions_out, planner_action_lengths, planner_masks, \
controller_img_feats, controller_actions_in, planner_hidden_idx, \
controller_outs, controller_action_lengths, controller_masks = batch
questions_var = Variable(questions.cuda())
planner_img_feats_var = Variable(planner_img_feats.cuda())
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_actions_out_var = Variable(
planner_actions_out.cuda())
planner_action_lengths = planner_action_lengths.cuda()
planner_masks_var = Variable(planner_masks.cuda())
controller_img_feats_var = Variable(
controller_img_feats.cuda())
controller_actions_in_var = Variable(
controller_actions_in.cuda())
planner_hidden_idx_var = Variable(
planner_hidden_idx.cuda())
controller_outs_var = Variable(controller_outs.cuda())
controller_action_lengths = controller_action_lengths.cuda(
)
controller_masks_var = Variable(controller_masks.cuda())
planner_action_lengths, perm_idx = planner_action_lengths.sort(
0, descending=True)
questions_var = questions_var[perm_idx]
planner_img_feats_var = planner_img_feats_var[perm_idx]
planner_actions_in_var = planner_actions_in_var[perm_idx]
planner_actions_out_var = planner_actions_out_var[perm_idx]
planner_masks_var = planner_masks_var[perm_idx]
controller_img_feats_var = controller_img_feats_var[
perm_idx]
controller_actions_in_var = controller_actions_in_var[
perm_idx]
controller_outs_var = controller_outs_var[perm_idx]
planner_hidden_idx_var = planner_hidden_idx_var[perm_idx]
controller_action_lengths = controller_action_lengths[
perm_idx]
controller_masks_var = controller_masks_var[perm_idx]
planner_scores, controller_scores, planner_hidden = model(
questions_var, planner_img_feats_var,
planner_actions_in_var,
planner_action_lengths.cpu().numpy(),
planner_hidden_idx_var, controller_img_feats_var,
controller_actions_in_var, controller_action_lengths)
planner_logprob = F.log_softmax(planner_scores, dim=1)
controller_logprob = F.log_softmax(
controller_scores, dim=1)
planner_loss = planner_lossFn(
planner_logprob,
planner_actions_out_var[:, :planner_action_lengths.max(
)].contiguous().view(-1, 1),
planner_masks_var[:, :planner_action_lengths.max()]
.contiguous().view(-1, 1))
controller_loss = controller_lossFn(
controller_logprob,
controller_outs_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1),
controller_masks_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update(
[planner_loss.data[0], controller_loss.data[0]])
logging.info("TRAINING PACMAN planner-loss: {:.6f} controller-loss: {:.6f}".format(
planner_loss.data[0], controller_loss.data[0]))
# backprop and update
if args.max_controller_actions == 1:
(planner_loss).backward()
else:
(planner_loss + controller_loss).backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
epoch += 1
if epoch % args.save_every == 0:
model_state = get_state(model)
optimizer_state = optim.state_dict()
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad,
'state': model_state,
'epoch': epoch,
'optimizer': optimizer_state}
checkpoint_path = '%s/epoch_%d_thread_%d.pt' % (
args.checkpoint_dir, epoch, rank)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("TRAIN: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data params
parser.add_argument('-train_h5', default='data/train.h5')
parser.add_argument('-val_h5', default='data/val.h5')
parser.add_argument('-test_h5', default='data/test.h5')
parser.add_argument('-data_json', default='data/data.json')
parser.add_argument('-vocab_json', default='data/vocab.json')
parser.add_argument(
'-target_obj_conn_map_dir',
default='data/target-obj-conn-maps/500')
parser.add_argument('-map_resolution', default=500, type=int)
parser.add_argument(
'-mode',
default='train+eval',
type=str,
choices=['train', 'eval', 'train+eval'])
parser.add_argument('-eval_split', default='val', type=str)
# model details
parser.add_argument(
'-model_type',
default='cnn',
choices=['cnn', 'cnn+q', 'lstm', 'lstm+q', 'lstm-mult+q', 'pacman'])
parser.add_argument('-max_episode_length', default=100, type=int)
parser.add_argument('-curriculum', default=0, type=int)
# optim params
parser.add_argument('-batch_size', default=20, type=int)
parser.add_argument('-learning_rate', default=1e-3, type=float)
parser.add_argument('-max_epochs', default=1000, type=int)
parser.add_argument('-overfit', default=False, action='store_true')
# bookkeeping
parser.add_argument('-print_every', default=5, type=int)
parser.add_argument('-eval_every', default=1, type=int)
parser.add_argument('-save_every', default=1000, type=int) #optional if you would like to save specific epochs as opposed to relying on the eval thread
parser.add_argument('-identifier', default='cnn')
parser.add_argument('-num_processes', default=1, type=int)
parser.add_argument('-max_threads_per_gpu', default=10, type=int)
# checkpointing
parser.add_argument('-checkpoint_path', default=False)
parser.add_argument('-checkpoint_dir', default='checkpoints/nav/')
parser.add_argument('-log_dir', default='logs/nav/')
parser.add_argument('-log', default=False, action='store_true')
parser.add_argument('-cache', default=False, action='store_true')
parser.add_argument('-max_controller_actions', type=int, default=5)
parser.add_argument('-max_actions', type=int)
args = parser.parse_args()
args.time_id = time.strftime("%m_%d_%H:%M")
#MAX_CONTROLLER_ACTIONS = args.max_controller_actions
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
if args.curriculum:
assert 'lstm' in args.model_type #TODO: Finish implementing curriculum for other model types
logging.basicConfig(filename=os.path.join(args.log_dir, "run_{}.log".format(
str(datetime.now()).replace(' ', '_'))),
level=logging.INFO,
format='%(asctime)-15s %(message)s')
try:
args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
args.gpus = [int(x) for x in args.gpus]
except KeyError:
print("CPU not supported")
logging.info("CPU not supported")
exit()
if args.checkpoint_path != False:
print('Loading checkpoint from %s' % args.checkpoint_path)
logging.info("Loading checkpoint from {}".format(args.checkpoint_path))
args_to_keep = ['model_type']
checkpoint = torch.load(args.checkpoint_path, map_location={
'cuda:0': 'cpu'
})
for i in args.__dict__:
if i not in args_to_keep:
checkpoint['args'][i] = args.__dict__[i]
args = type('new_dict', (object, ), checkpoint['args'])
args.checkpoint_dir = os.path.join(args.checkpoint_dir,
args.time_id + '_' + args.identifier)
args.log_dir = os.path.join(args.log_dir,
args.time_id + '_' + args.identifier)
# if set to overfit; set eval_split to train
if args.overfit == True:
args.eval_split = 'train'
print(args.__dict__)
logging.info(args.__dict__)
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
os.makedirs(args.log_dir)
if args.model_type == 'cnn':
model_kwargs = {}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
shared_model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
shared_model.share_memory()
if args.checkpoint_path != False:
print('Loading params from checkpoint: %s' % args.checkpoint_path)
logging.info("Loading params from checkpoint: {}".format(args.checkpoint_path))
shared_model.load_state_dict(checkpoint['state'])
if args.mode == 'eval':
eval(0, args, shared_model)
elif args.mode == 'train':
if args.num_processes > 1:
processes = []
for rank in range(0, args.num_processes):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
train(0, args, shared_model)
else:
processes = []
# Start the eval thread
p = mp.Process(target=eval, args=(0, args, shared_model))
p.start()
processes.append(p)
# Start the training thread(s)
for rank in range(1, args.num_processes + 1):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
| [
"metrics.NavMetric",
"models.NavCnnModel",
"torch.nn.CrossEntropyLoss",
"torch.LongTensor",
"models.get_state",
"numpy.array",
"models.MaskedNLLCriterion",
"logging.info",
"torch.nn.functional.softmax",
"os.path.exists",
"argparse.ArgumentParser",
"models.NavPlannerControllerModel",
"data.Eq... | [((3106, 3141), 'data.EqaDataLoader', 'EqaDataLoader', ([], {}), '(**eval_loader_kwargs)\n', (3119, 3141), False, 'from data import EqaDataLoader\n'), ((34031, 34067), 'data.EqaDataLoader', 'EqaDataLoader', ([], {}), '(**train_loader_kwargs)\n', (34044, 34067), False, 'from data import EqaDataLoader\n'), ((47924, 47949), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (47947, 47949), False, 'import argparse\n'), ((50244, 50272), 'time.strftime', 'time.strftime', (['"""%m_%d_%H:%M"""'], {}), "('%m_%d_%H:%M')\n", (50257, 50272), False, 'import time\n'), ((51601, 51672), 'os.path.join', 'os.path.join', (['args.checkpoint_dir', "(args.time_id + '_' + args.identifier)"], {}), "(args.checkpoint_dir, args.time_id + '_' + args.identifier)\n", (51613, 51672), False, 'import os\n'), ((51731, 51795), 'os.path.join', 'os.path.join', (['args.log_dir', "(args.time_id + '_' + args.identifier)"], {}), "(args.log_dir, args.time_id + '_' + args.identifier)\n", (51743, 51795), False, 'import os\n'), ((51972, 51999), 'logging.info', 'logging.info', (['args.__dict__'], {}), '(args.__dict__)\n', (51984, 51999), False, 'import logging\n'), ((1463, 1490), 'models.NavCnnModel', 'NavCnnModel', ([], {}), '(**model_kwargs)\n', (1474, 1490), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((3737, 4121), 'metrics.NavMetric', 'NavMetric', ([], {'info': "{'split': args.eval_split, 'thread': rank}", 'metric_names': "['d_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50', 'd_D_10',\n 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30', 'd_min_50', 'r_T_10',\n 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30', 'r_e_50', 'stop_10', 'stop_30',\n 'stop_50', 'ep_len_10', 'ep_len_30', 'ep_len_50']", 'log_json': 'args.output_log_path'}), "(info={'split': args.eval_split, 'thread': rank}, metric_names=[\n 'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50', 'd_D_10',\n 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30', 'd_min_50', 'r_T_10',\n 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30', 'r_e_50', 'stop_10', 'stop_30',\n 'stop_50', 'ep_len_10', 'ep_len_30', 'ep_len_50'], log_json=args.\n output_log_path)\n", (3746, 4121), False, 'from metrics import NavMetric\n'), ((31682, 31709), 'models.NavCnnModel', 'NavCnnModel', ([], {}), '(**model_kwargs)\n', (31693, 31709), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((33634, 33770), 'metrics.NavMetric', 'NavMetric', ([], {'info': "{'split': 'train', 'thread': rank}", 'metric_names': "['planner_loss', 'controller_loss']", 'log_json': 'args.output_log_path'}), "(info={'split': 'train', 'thread': rank}, metric_names=[\n 'planner_loss', 'controller_loss'], log_json=args.output_log_path)\n", (33643, 33770), False, 'from metrics import NavMetric\n'), ((33851, 33959), 'metrics.NavMetric', 'NavMetric', ([], {'info': "{'split': 'train', 'thread': rank}", 'metric_names': "['loss']", 'log_json': 'args.output_log_path'}), "(info={'split': 'train', 'thread': rank}, metric_names=['loss'],\n log_json=args.output_log_path)\n", (33860, 33959), False, 'from metrics import NavMetric\n'), ((50344, 50371), 'os.path.isdir', 'os.path.isdir', (['args.log_dir'], {}), '(args.log_dir)\n', (50357, 50371), False, 'import os\n'), ((50381, 50406), 'os.makedirs', 'os.makedirs', (['args.log_dir'], {}), '(args.log_dir)\n', (50392, 50406), False, 'import os\n'), ((51294, 51358), 'torch.load', 'torch.load', (['args.checkpoint_path'], {'map_location': "{'cuda:0': 'cpu'}"}), "(args.checkpoint_path, map_location={'cuda:0': 'cpu'})\n", (51304, 51358), False, 'import torch\n'), ((52012, 52047), 'os.path.exists', 'os.path.exists', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (52026, 52047), False, 'import os\n'), ((52057, 52089), 'os.makedirs', 'os.makedirs', (['args.checkpoint_dir'], {}), '(args.checkpoint_dir)\n', (52068, 52089), False, 'import os\n'), ((52098, 52123), 'os.makedirs', 'os.makedirs', (['args.log_dir'], {}), '(args.log_dir)\n', (52109, 52123), False, 'import os\n'), ((52208, 52235), 'models.NavCnnModel', 'NavCnnModel', ([], {}), '(**model_kwargs)\n', (52219, 52235), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((933, 1000), 'torch._utils._rebuild_tensor', 'torch._utils._rebuild_tensor', (['storage', 'storage_offset', 'size', 'stride'], {}), '(storage, storage_offset, size, stride)\n', (961, 1000), False, 'import torch\n'), ((1675, 1702), 'models.NavCnnModel', 'NavCnnModel', ([], {}), '(**model_kwargs)\n', (1686, 1702), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((31894, 31921), 'models.NavCnnModel', 'NavCnnModel', ([], {}), '(**model_kwargs)\n', (31905, 31921), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((32684, 32711), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (32709, 32711), False, 'import torch\n'), ((47169, 47185), 'models.get_state', 'get_state', (['model'], {}), '(model)\n', (47178, 47185), False, 'from models import get_state, ensure_shared_grads\n'), ((47842, 47881), 'torch.save', 'torch.save', (['checkpoint', 'checkpoint_path'], {}), '(checkpoint, checkpoint_path)\n', (47852, 47881), False, 'import torch\n'), ((50997, 51030), 'logging.info', 'logging.info', (['"""CPU not supported"""'], {}), "('CPU not supported')\n", (51009, 51030), False, 'import logging\n'), ((52427, 52454), 'models.NavCnnModel', 'NavCnnModel', ([], {}), '(**model_kwargs)\n', (52438, 52454), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((53900, 53953), 'torch.multiprocessing.Process', 'mp.Process', ([], {'target': 'eval', 'args': '(0, args, shared_model)'}), '(target=eval, args=(0, args, shared_model))\n', (53910, 53953), True, 'import torch.multiprocessing as mp\n'), ((1621, 1648), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (1631, 1648), False, 'from data import load_vocab\n'), ((1783, 1813), 'models.NavCnnRnnModel', 'NavCnnRnnModel', ([], {}), '(**model_kwargs)\n', (1797, 1813), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((4377, 4394), 'tqdm.tqdm', 'tqdm', (['eval_loader'], {}), '(eval_loader)\n', (4381, 4394), False, 'from tqdm import tqdm\n'), ((30672, 30688), 'models.get_state', 'get_state', (['model'], {}), '(model)\n', (30681, 30688), False, 'from models import get_state, ensure_shared_grads\n'), ((31243, 31282), 'torch.save', 'torch.save', (['checkpoint', 'checkpoint_path'], {}), '(checkpoint, checkpoint_path)\n', (31253, 31282), False, 'import torch\n'), ((31840, 31867), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (31850, 31867), False, 'from data import load_vocab\n'), ((32002, 32032), 'models.NavCnnRnnModel', 'NavCnnRnnModel', ([], {}), '(**model_kwargs)\n', (32016, 32032), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((52366, 52393), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (52376, 52393), False, 'from data import load_vocab\n'), ((52542, 52572), 'models.NavCnnRnnModel', 'NavCnnRnnModel', ([], {}), '(**model_kwargs)\n', (52556, 52572), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((54166, 54223), 'torch.multiprocessing.Process', 'mp.Process', ([], {'target': 'train', 'args': '(rank, args, shared_model)'}), '(target=train, args=(rank, args, shared_model))\n', (54176, 54223), True, 'import torch.multiprocessing as mp\n'), ((1999, 2029), 'models.NavCnnRnnModel', 'NavCnnRnnModel', ([], {}), '(**model_kwargs)\n', (2013, 2029), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((11294, 11311), 'tqdm.tqdm', 'tqdm', (['eval_loader'], {}), '(eval_loader)\n', (11298, 11311), False, 'from tqdm import tqdm\n'), ((32223, 32257), 'models.NavCnnRnnMultModel', 'NavCnnRnnMultModel', ([], {}), '(**model_kwargs)\n', (32241, 32257), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((36992, 37003), 'time.time', 'time.time', ([], {}), '()\n', (37001, 37003), False, 'import time\n'), ((52765, 52795), 'models.NavCnnRnnModel', 'NavCnnRnnModel', ([], {}), '(**model_kwargs)\n', (52779, 52795), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((53587, 53644), 'torch.multiprocessing.Process', 'mp.Process', ([], {'target': 'train', 'args': '(rank, args, shared_model)'}), '(target=train, args=(rank, args, shared_model))\n', (53597, 53644), True, 'import torch.multiprocessing as mp\n'), ((1945, 1972), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (1955, 1972), False, 'from data import load_vocab\n'), ((2220, 2254), 'models.NavCnnRnnMultModel', 'NavCnnRnnMultModel', ([], {}), '(**model_kwargs)\n', (2238, 2254), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((10632, 11016), 'metrics.NavMetric', 'NavMetric', ([], {'info': "{'split': args.eval_split, 'thread': rank}", 'metric_names': "['d_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50', 'd_D_10',\n 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30', 'd_min_50', 'r_T_10',\n 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30', 'r_e_50', 'stop_10', 'stop_30',\n 'stop_50', 'ep_len_10', 'ep_len_30', 'ep_len_50']", 'log_json': 'args.output_log_path'}), "(info={'split': args.eval_split, 'thread': rank}, metric_names=[\n 'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50', 'd_D_10',\n 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30', 'd_min_50', 'r_T_10',\n 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30', 'r_e_50', 'stop_10', 'stop_30',\n 'stop_50', 'ep_len_10', 'ep_len_30', 'ep_len_50'], log_json=args.\n output_log_path)\n", (10641, 11016), False, 'from metrics import NavMetric\n'), ((21204, 21221), 'tqdm.tqdm', 'tqdm', (['eval_loader'], {}), '(eval_loader)\n', (21208, 21221), False, 'from tqdm import tqdm\n'), ((32169, 32196), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (32179, 32196), False, 'from data import load_vocab\n'), ((32443, 32473), 'models.NavCnnRnnModel', 'NavCnnRnnModel', ([], {}), '(**model_kwargs)\n', (32457, 32473), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((36767, 36787), 'models.MaskedNLLCriterion', 'MaskedNLLCriterion', ([], {}), '()\n', (36785, 36787), False, 'from models import MaskedNLLCriterion\n'), ((39102, 39130), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (39115, 39130), True, 'import torch.nn.functional as F\n'), ((52704, 52731), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (52714, 52731), False, 'from data import load_vocab\n'), ((52930, 52971), 'models.NavPlannerControllerModel', 'NavPlannerControllerModel', ([], {}), '(**model_kwargs)\n', (52955, 52971), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((2166, 2193), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (2176, 2193), False, 'from data import load_vocab\n'), ((2382, 2423), 'models.NavPlannerControllerModel', 'NavPlannerControllerModel', ([], {}), '(**model_kwargs)\n', (2407, 2423), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((5233, 5258), 'torch.LongTensor', 'torch.LongTensor', (['ep_inds'], {}), '(ep_inds)\n', (5249, 5258), False, 'import torch\n'), ((6827, 6851), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (6836, 6851), True, 'import torch.nn.functional as F\n'), ((7530, 7581), 'torch.cat', 'torch.cat', (['[sub_img_feats_var, img_feat_var]'], {'dim': '(1)'}), '([sub_img_feats_var, img_feat_var], dim=1)\n', (7539, 7581), False, 'import torch\n'), ((20542, 20926), 'metrics.NavMetric', 'NavMetric', ([], {'info': "{'split': args.eval_split, 'thread': rank}", 'metric_names': "['d_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50', 'd_D_10',\n 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30', 'd_min_50', 'r_T_10',\n 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30', 'r_e_50', 'stop_10', 'stop_30',\n 'stop_50', 'ep_len_10', 'ep_len_30', 'ep_len_50']", 'log_json': 'args.output_log_path'}), "(info={'split': args.eval_split, 'thread': rank}, metric_names=[\n 'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50', 'd_D_10',\n 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30', 'd_min_50', 'r_T_10',\n 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30', 'r_e_50', 'stop_10', 'stop_30',\n 'stop_50', 'ep_len_10', 'ep_len_30', 'ep_len_50'], log_json=args.\n output_log_path)\n", (20551, 20926), False, 'from metrics import NavMetric\n'), ((32389, 32416), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (32399, 32416), False, 'from data import load_vocab\n'), ((32601, 32642), 'models.NavPlannerControllerModel', 'NavPlannerControllerModel', ([], {}), '(**model_kwargs)\n', (32626, 32642), False, 'from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel\n'), ((41010, 41030), 'models.MaskedNLLCriterion', 'MaskedNLLCriterion', ([], {}), '()\n', (41028, 41030), False, 'from models import MaskedNLLCriterion\n'), ((41070, 41090), 'models.MaskedNLLCriterion', 'MaskedNLLCriterion', ([], {}), '()\n', (41088, 41090), False, 'from models import MaskedNLLCriterion\n'), ((44370, 44406), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['planner_scores'], {'dim': '(1)'}), '(planner_scores, dim=1)\n', (44383, 44406), True, 'import torch.nn.functional as F\n'), ((44448, 44487), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['controller_scores'], {'dim': '(1)'}), '(controller_scores, dim=1)\n', (44461, 44487), True, 'import torch.nn.functional as F\n'), ((52878, 52905), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (52888, 52905), False, 'from data import load_vocab\n'), ((2337, 2364), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (2347, 2364), False, 'from data import load_vocab\n'), ((8501, 8526), 'numpy.array', 'np.array', (['dists_to_target'], {}), '(dists_to_target)\n', (8509, 8526), True, 'import numpy as np\n'), ((16354, 16378), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (16363, 16378), True, 'import torch.nn.functional as F\n'), ((32556, 32583), 'data.load_vocab', 'load_vocab', (['args.vocab_json'], {}), '(args.vocab_json)\n', (32566, 32583), False, 'from data import load_vocab\n'), ((50667, 50681), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (50679, 50681), False, 'from datetime import datetime\n'), ((18083, 18108), 'numpy.array', 'np.array', (['dists_to_target'], {}), '(dists_to_target)\n', (18091, 18108), True, 'import numpy as np\n'), ((15775, 15794), 'torch.autograd.Variable', 'Variable', (['action_in'], {}), '(action_in)\n', (15783, 15794), False, 'from torch.autograd import Variable\n'), ((16156, 16175), 'torch.autograd.Variable', 'Variable', (['action_in'], {}), '(action_in)\n', (16164, 16175), False, 'from torch.autograd import Variable\n'), ((25691, 25726), 'torch.nn.functional.softmax', 'F.softmax', (['controller_scores'], {'dim': '(1)'}), '(controller_scores, dim=1)\n', (25700, 25726), True, 'import torch.nn.functional as F\n'), ((26961, 26993), 'torch.nn.functional.softmax', 'F.softmax', (['planner_scores'], {'dim': '(1)'}), '(planner_scores, dim=1)\n', (26970, 26993), True, 'import torch.nn.functional as F\n'), ((28232, 28257), 'numpy.array', 'np.array', (['dists_to_target'], {}), '(dists_to_target)\n', (28240, 28257), True, 'import numpy as np\n'), ((12480, 12505), 'torch.FloatTensor', 'torch.FloatTensor', (['images'], {}), '(images)\n', (12497, 12505), False, 'import torch\n'), ((13884, 13906), 'torch.LongTensor', 'torch.LongTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (13900, 13906), False, 'import torch\n'), ((14284, 14306), 'torch.LongTensor', 'torch.LongTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (14300, 14306), False, 'import torch\n'), ((17050, 17072), 'torch.LongTensor', 'torch.LongTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (17066, 17072), False, 'import torch\n'), ((26884, 26903), 'torch.autograd.Variable', 'Variable', (['action_in'], {}), '(action_in)\n', (26892, 26903), False, 'from torch.autograd import Variable\n'), ((25404, 25426), 'torch.LongTensor', 'torch.LongTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (25420, 25426), False, 'import torch\n'), ((26598, 26620), 'torch.LongTensor', 'torch.LongTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (26614, 26620), False, 'import torch\n')] |
from BinaryModel import *
from numpy.random import rand
class MajorityModel(BinaryModel):
def __init__(self, filename=None):
self.mdlPrm = {
'addNoise' : False,
}
self.wkrIds = {}
self.imgIds = {}
if filename:
self.load_data(filename)
else:
self._setup_prior()
def __del__(self):
pass
def load_data(self, filename, skipyaml=False):
"""
Data is assumed to be in the format:
imageId workerId label
"""
# load the text data
filein = open(filename)
info = filein.readline().rstrip().split(' ')
self.numLbls = int(info[2])
self.numWkrs = int(info[1])
self.numImgs = int(info[0])
self.imgPrm = []
for i in range(self.numImgs):
self.imgPrm.append([0, 0]) # (frac +ve votes, total n votes)
self.wkrLbls = dict((id, []) for id in range(self.numWkrs))
self.imgLbls = dict((id, []) for id in range(self.numImgs))
self.labels = []
for (lino, line) in enumerate(filein):
cols = [int(c) for c in line.rstrip().split(' ')]
iId = cols[0]; wId = cols[1]; lij = int(cols[2]==1)
self.wkrLbls[wId].append([iId, lij])
self.imgLbls[iId].append([wId, lij])
self.labels.append((iId, wId, lij))
self.imgPrm[iId][0] += lij
self.imgPrm[iId][1] += 1
# renormalize img prm
for i in range(len(self.imgPrm)):
self.imgPrm[i][0] = float(self.imgPrm[i][0])/self.imgPrm[i][1]
def get_num_wkrs(self):
return self.numWkrs
def get_num_imgs(self):
return self.numImgs
def get_num_lbls(self):
return self.numLbls
def set_model_param(self, raw=[], prm=None):
"""
Sets model parameters.
Arguments:
- `raw`: raw parameter vector
- `prm`: hash of model parameter values to be changed
"""
if not prm is None:
for (k, v) in prm.iteritems():
self.mdlPrm[k] = v
def set_worker_param(self, raw):
pass
def set_image_param(self, raw):
self.imgPrm = [r for r in raw]
def get_model_param(self):
return {}
def get_worker_param_raw(self):
return {}
def get_image_param_raw(self):
return [p for p in self.imgPrm]
def get_worker_param(self, id=None):
return {}
def get_image_param(self, id=None):
return [p for p in self.imgPrm]
def get_labels(self):
if self.mdlPrm['addNoise']:
return [int((self.imgPrm[i][0]+(rand()-.5)/self.imgPrm[i][1])>.5)\
for i in range(len(self.imgPrm))]
else:
return [int(self.imgPrm[i][0]>.5) for i \
in range(len(self.imgPrm))]
# TODO: load and save parameters
def optimize_worker_param(self):
pass
def optimize_image_param(self):
pass
def objective(self, prm=None):
pass
def image_objective(self, prm=None):
pass
def image_objective_range(self, imgId, prm):
pass
def worker_objective_range(self, wkrId, prm):
pass
def gradient(self, prm=None):
return []
def worker_gradient(self, prm=None):
return []
def image_gradient(self, prm=None):
pass
def get_num_wkr_lbls(self):
return [len(self.wkrLbls[id]) for id in range(self.numWkrs)]
def get_num_img_lbls(self):
return [len(self.imgLbls[id]) for id in range(self.numImgs)]
| [
"numpy.random.rand"
] | [((2670, 2676), 'numpy.random.rand', 'rand', ([], {}), '()\n', (2674, 2676), False, 'from numpy.random import rand\n')] |
from django.db import models
from dynamic_scraper.models import Scraper, SchedulerRuntime
from scrapy.contrib.djangoitem import DjangoItem
class EventWebsite(models.Model):
name = models.CharField(max_length=200)
scraper = models.ForeignKey(Scraper, blank=True, null=True, on_delete=models.SET_NULL)
url = models.URLField()
scraper_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __unicode__(self):
return self.name + " (" + str(self.id) + ")"
class Event(models.Model):
title = models.CharField(max_length=200)
event_website = models.ForeignKey(EventWebsite)
description = models.TextField(blank=True)
url = models.URLField()
checker_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __unicode__(self):
return self.title + " (" + str(self.id) + ")"
class EventItem(DjangoItem):
django_model = Event | [
"django.db.models.URLField",
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((186, 218), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (202, 218), False, 'from django.db import models\n'), ((233, 309), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Scraper'], {'blank': '(True)', 'null': '(True)', 'on_delete': 'models.SET_NULL'}), '(Scraper, blank=True, null=True, on_delete=models.SET_NULL)\n', (250, 309), False, 'from django.db import models\n'), ((320, 337), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (335, 337), False, 'from django.db import models\n'), ((360, 450), 'django.db.models.ForeignKey', 'models.ForeignKey', (['SchedulerRuntime'], {'blank': '(True)', 'null': '(True)', 'on_delete': 'models.SET_NULL'}), '(SchedulerRuntime, blank=True, null=True, on_delete=models\n .SET_NULL)\n', (377, 450), False, 'from django.db import models\n'), ((572, 604), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (588, 604), False, 'from django.db import models\n'), ((625, 656), 'django.db.models.ForeignKey', 'models.ForeignKey', (['EventWebsite'], {}), '(EventWebsite)\n', (642, 656), False, 'from django.db import models\n'), ((676, 704), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (692, 704), False, 'from django.db import models\n'), ((715, 732), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (730, 732), False, 'from django.db import models\n'), ((755, 845), 'django.db.models.ForeignKey', 'models.ForeignKey', (['SchedulerRuntime'], {'blank': '(True)', 'null': '(True)', 'on_delete': 'models.SET_NULL'}), '(SchedulerRuntime, blank=True, null=True, on_delete=models\n .SET_NULL)\n', (772, 845), False, 'from django.db import models\n')] |
"""
PatientFinders are used to find OpenMRS patients that correspond to
CommCare cases if none of the patient identifiers listed in
OpenmrsCaseConfig.match_on_ids have successfully matched a patient.
See `README.md`__ for more context.
"""
import logging
from collections import namedtuple
from functools import partial
from operator import eq
from pprint import pformat
from dimagi.ext.couchdbkit import (
DecimalProperty,
DictProperty,
DocumentSchema,
ListProperty,
StringProperty,
)
from corehq.motech.openmrs.const import OPENMRS_DATA_TYPE_BOOLEAN
from corehq.motech.openmrs.finders_utils import (
le_days_diff,
le_levenshtein_percent,
)
from corehq.motech.value_source import (
as_value_source,
deserialize,
recurse_subclasses,
)
MATCH_TYPE_EXACT = 'exact'
MATCH_TYPE_LEVENSHTEIN = 'levenshtein' # Useful for words translated across alphabets
MATCH_TYPE_DAYS_DIFF = 'days_diff' # Useful for estimated dates of birth
MATCH_FUNCTIONS = {
MATCH_TYPE_EXACT: eq,
MATCH_TYPE_LEVENSHTEIN: le_levenshtein_percent,
MATCH_TYPE_DAYS_DIFF: le_days_diff,
}
MATCH_TYPES = tuple(MATCH_FUNCTIONS)
MATCH_TYPE_DEFAULT = MATCH_TYPE_EXACT
logger = logging.getLogger(__name__)
constant_false = {
"value": 'False',
# We are fetching from a case property or a form question value, and
# we want `get_value()` to return False (bool). `get_value()`
# serialises case properties and form question values as external
# data types. OPENMRS_DATA_TYPE_BOOLEAN is useful because it is a
# bool, not a string, so `constant_false.get_value()` will return
# False (not 'False')
"external_data_type": OPENMRS_DATA_TYPE_BOOLEAN,
}
class PatientFinder(DocumentSchema):
"""
The ``PatientFinder`` base class was developed as a way to
handle situations where patient cases are created in CommCare
instead of being imported from OpenMRS.
When patients are imported from OpenMRS, they will come with at
least one identifier that MOTECH can use to match the case in
CommCare with the corresponding patient in OpenMRS. But if the case
is registered in CommCare then we may not have an ID, or the ID
could be wrong. We need to search for a corresponding OpenMRS
patient.
Different projects may focus on different kinds of case properties,
so it was felt that a base class would allow some flexibility.
The ``PatientFinder.wrap()`` method allows you to wrap documents of
subclasses.
The ``PatientFinder.find_patients()`` method must be implemented by
subclasses. It returns a list of zero, one, or many patients. If it
returns one patient, the OpenmrsRepeater.find_or_create_patient()
will accept that patient as a true match.
.. NOTE:: The consequences of a false positive (a Type II error) are
severe: A real patient will have their valid values
overwritten by those of someone else. So ``PatientFinder``
subclasses should be written and configured to skew
towards false negatives (Type I errors). In other words,
it is much better not to choose a patient than to choose
the wrong patient.
"""
# Whether to create a new patient if no patients are found
create_missing = DictProperty(default=constant_false)
@classmethod
def wrap(cls, data):
if 'create_missing' in data and isinstance(data['create_missing'], bool):
data['create_missing'] = {
'external_data_type': OPENMRS_DATA_TYPE_BOOLEAN,
'value': str(data['create_missing'])
}
if cls is PatientFinder:
subclass = {
sub._doc_type: sub for sub in recurse_subclasses(cls)
}.get(data['doc_type'])
return subclass.wrap(data) if subclass else None
else:
return super(PatientFinder, cls).wrap(data)
def find_patients(self, requests, case, case_config):
"""
Given a case, search OpenMRS for possible matches. Return the
best results. Subclasses must define "best". If just one result
is returned, it will be chosen.
"""
raise NotImplementedError
PatientScore = namedtuple('PatientScore', ['patient', 'score'])
class PropertyWeight(DocumentSchema):
case_property = StringProperty()
weight = DecimalProperty()
match_type = StringProperty(required=False, choices=MATCH_TYPES, default=MATCH_TYPE_DEFAULT)
match_params = ListProperty(required=False)
class WeightedPropertyPatientFinder(PatientFinder):
"""
The ``WeightedPropertyPatientFinder`` class finds OpenMRS patients
that match CommCare cases by assigning weights to case properties,
and adding the weights of matching patient properties to calculate a
confidence score.
"""
# Identifiers that are searchable in OpenMRS. e.g.
# [ 'bahmni_id', 'household_id', 'last_name']
searchable_properties = ListProperty()
# The weight assigned to a matching property.
# [
# {"case_property": "bahmni_id", "weight": 0.9},
# {"case_property": "household_id", "weight": 0.9},
# {
# "case_property": "dob",
# "weight": 0.75,
# "match_type": "days_diff",
# // days_diff matches based on days difference from given date
# "match_params": [364]
# },
# {
# "case_property": "first_name",
# "weight": 0.025,
# "match_type": "levenshtein",
# // levenshtein function takes edit_distance / len
# "match_params": [0.2]
# // i.e. 20% is one edit for every 5 characters
# // e.g. "Riyaz" matches "Riaz" but not "Riazz"
# },
# {"case_property": "last_name", "weight": 0.025},
# {"case_property": "municipality", "weight": 0.2},
# ]
property_weights = ListProperty(PropertyWeight)
# The threshold that the sum of weights must pass for a CommCare case to
# be considered a match to an OpenMRS patient
threshold = DecimalProperty(default=1.0)
# If more than one patient passes `threshold`, the margin by which the
# weight of the best match must exceed the weight of the second-best match
# to be considered correct.
confidence_margin = DecimalProperty(default=0.667) # Default: Matches two thirds better than second-best
def __init__(self, *args, **kwargs):
super(WeightedPropertyPatientFinder, self).__init__(*args, **kwargs)
self._property_map = {}
def get_score(self, patient, case):
"""
Return the sum of weighted properties to give an OpenMRS
patient a score of how well they match a CommCare case.
"""
def weights():
for property_weight in self.property_weights:
prop = property_weight['case_property']
jsonpath, value_source_dict = self._property_map[prop]
weight = property_weight['weight']
matches = jsonpath.find(patient)
for match in matches:
patient_value = match.value
case_value = case.get_case_property(prop)
match_type = property_weight['match_type']
match_params = property_weight['match_params']
match_function = partial(MATCH_FUNCTIONS[match_type], *match_params)
is_equivalent = match_function(deserialize(value_source_dict, patient_value), case_value)
yield weight if is_equivalent else 0
return sum(weights())
def find_patients(self, requests, case, case_config):
"""
Matches cases to patients. Returns a list of patients, each
with a confidence score >= self.threshold
"""
from corehq.motech.openmrs.openmrs_config import get_property_map
from corehq.motech.openmrs.repeater_helpers import search_patients
self._property_map = get_property_map(case_config)
candidates = {} # key on OpenMRS UUID to filter duplicates
for prop in self.searchable_properties:
value = case.get_case_property(prop)
if value:
response_json = search_patients(requests, value)
for patient in response_json['results']:
score = self.get_score(patient, case)
if score >= self.threshold:
candidates[patient['uuid']] = PatientScore(patient, score)
if not candidates:
logger.info(
'Unable to match case "%s" (%s): No candidate patients found.',
case.name, case.get_id,
)
return []
if len(candidates) == 1:
patient = list(candidates.values())[0].patient
logger.info(
'Matched case "%s" (%s) to ONLY patient candidate: \n%s',
case.name, case.get_id, pformat(patient, indent=2),
)
return [patient]
patients_scores = sorted(candidates.values(), key=lambda candidate: candidate.score, reverse=True)
if patients_scores[0].score / patients_scores[1].score > 1 + self.confidence_margin:
# There is more than a `confidence_margin` difference
# (defaults to 10%) in score between the best-ranked
# patient and the second-best-ranked patient. Let's go with
# Patient One.
patient = patients_scores[0].patient
logger.info(
'Matched case "%s" (%s) to BEST patient candidate: \n%s',
case.name, case.get_id, pformat(patients_scores, indent=2),
)
return [patient]
# We can't be sure. Just send them all.
logger.info(
'Unable to match case "%s" (%s) to patient candidates: \n%s',
case.name, case.get_id, pformat(patients_scores, indent=2),
)
return [ps.patient for ps in patients_scores]
| [
"logging.getLogger",
"collections.namedtuple",
"corehq.motech.openmrs.repeater_helpers.search_patients",
"dimagi.ext.couchdbkit.ListProperty",
"pprint.pformat",
"dimagi.ext.couchdbkit.DecimalProperty",
"functools.partial",
"dimagi.ext.couchdbkit.StringProperty",
"dimagi.ext.couchdbkit.DictProperty",... | [((1192, 1219), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1209, 1219), False, 'import logging\n'), ((4250, 4298), 'collections.namedtuple', 'namedtuple', (['"""PatientScore"""', "['patient', 'score']"], {}), "('PatientScore', ['patient', 'score'])\n", (4260, 4298), False, 'from collections import namedtuple\n'), ((3305, 3341), 'dimagi.ext.couchdbkit.DictProperty', 'DictProperty', ([], {'default': 'constant_false'}), '(default=constant_false)\n', (3317, 3341), False, 'from dimagi.ext.couchdbkit import DecimalProperty, DictProperty, DocumentSchema, ListProperty, StringProperty\n'), ((4359, 4375), 'dimagi.ext.couchdbkit.StringProperty', 'StringProperty', ([], {}), '()\n', (4373, 4375), False, 'from dimagi.ext.couchdbkit import DecimalProperty, DictProperty, DocumentSchema, ListProperty, StringProperty\n'), ((4389, 4406), 'dimagi.ext.couchdbkit.DecimalProperty', 'DecimalProperty', ([], {}), '()\n', (4404, 4406), False, 'from dimagi.ext.couchdbkit import DecimalProperty, DictProperty, DocumentSchema, ListProperty, StringProperty\n'), ((4424, 4503), 'dimagi.ext.couchdbkit.StringProperty', 'StringProperty', ([], {'required': '(False)', 'choices': 'MATCH_TYPES', 'default': 'MATCH_TYPE_DEFAULT'}), '(required=False, choices=MATCH_TYPES, default=MATCH_TYPE_DEFAULT)\n', (4438, 4503), False, 'from dimagi.ext.couchdbkit import DecimalProperty, DictProperty, DocumentSchema, ListProperty, StringProperty\n'), ((4523, 4551), 'dimagi.ext.couchdbkit.ListProperty', 'ListProperty', ([], {'required': '(False)'}), '(required=False)\n', (4535, 4551), False, 'from dimagi.ext.couchdbkit import DecimalProperty, DictProperty, DocumentSchema, ListProperty, StringProperty\n'), ((4997, 5011), 'dimagi.ext.couchdbkit.ListProperty', 'ListProperty', ([], {}), '()\n', (5009, 5011), False, 'from dimagi.ext.couchdbkit import DecimalProperty, DictProperty, DocumentSchema, ListProperty, StringProperty\n'), ((5950, 5978), 'dimagi.ext.couchdbkit.ListProperty', 'ListProperty', (['PropertyWeight'], {}), '(PropertyWeight)\n', (5962, 5978), False, 'from dimagi.ext.couchdbkit import DecimalProperty, DictProperty, DocumentSchema, ListProperty, StringProperty\n'), ((6123, 6151), 'dimagi.ext.couchdbkit.DecimalProperty', 'DecimalProperty', ([], {'default': '(1.0)'}), '(default=1.0)\n', (6138, 6151), False, 'from dimagi.ext.couchdbkit import DecimalProperty, DictProperty, DocumentSchema, ListProperty, StringProperty\n'), ((6363, 6393), 'dimagi.ext.couchdbkit.DecimalProperty', 'DecimalProperty', ([], {'default': '(0.667)'}), '(default=0.667)\n', (6378, 6393), False, 'from dimagi.ext.couchdbkit import DecimalProperty, DictProperty, DocumentSchema, ListProperty, StringProperty\n'), ((8048, 8077), 'corehq.motech.openmrs.openmrs_config.get_property_map', 'get_property_map', (['case_config'], {}), '(case_config)\n', (8064, 8077), False, 'from corehq.motech.openmrs.openmrs_config import get_property_map\n'), ((9963, 9997), 'pprint.pformat', 'pformat', (['patients_scores'], {'indent': '(2)'}), '(patients_scores, indent=2)\n', (9970, 9997), False, 'from pprint import pformat\n'), ((8298, 8330), 'corehq.motech.openmrs.repeater_helpers.search_patients', 'search_patients', (['requests', 'value'], {}), '(requests, value)\n', (8313, 8330), False, 'from corehq.motech.openmrs.repeater_helpers import search_patients\n'), ((9016, 9042), 'pprint.pformat', 'pformat', (['patient'], {'indent': '(2)'}), '(patient, indent=2)\n', (9023, 9042), False, 'from pprint import pformat\n'), ((9705, 9739), 'pprint.pformat', 'pformat', (['patients_scores'], {'indent': '(2)'}), '(patients_scores, indent=2)\n', (9712, 9739), False, 'from pprint import pformat\n'), ((7418, 7469), 'functools.partial', 'partial', (['MATCH_FUNCTIONS[match_type]', '*match_params'], {}), '(MATCH_FUNCTIONS[match_type], *match_params)\n', (7425, 7469), False, 'from functools import partial\n'), ((7521, 7566), 'corehq.motech.value_source.deserialize', 'deserialize', (['value_source_dict', 'patient_value'], {}), '(value_source_dict, patient_value)\n', (7532, 7566), False, 'from corehq.motech.value_source import as_value_source, deserialize, recurse_subclasses\n'), ((3743, 3766), 'corehq.motech.value_source.recurse_subclasses', 'recurse_subclasses', (['cls'], {}), '(cls)\n', (3761, 3766), False, 'from corehq.motech.value_source import as_value_source, deserialize, recurse_subclasses\n')] |
from main import status, tps, server_commands, scoreboard
def setup(bot):
bot.add_cog(status.Status(bot), True)
bot.add_cog(tps.Tps(bot), True)
bot.add_cog(server_commands.ServerCommands(bot), True)
bot.add_cog(scoreboard.ScoreBoard(bot), True)
| [
"main.status.Status",
"main.scoreboard.ScoreBoard",
"main.server_commands.ServerCommands",
"main.tps.Tps"
] | [((92, 110), 'main.status.Status', 'status.Status', (['bot'], {}), '(bot)\n', (105, 110), False, 'from main import status, tps, server_commands, scoreboard\n'), ((134, 146), 'main.tps.Tps', 'tps.Tps', (['bot'], {}), '(bot)\n', (141, 146), False, 'from main import status, tps, server_commands, scoreboard\n'), ((170, 205), 'main.server_commands.ServerCommands', 'server_commands.ServerCommands', (['bot'], {}), '(bot)\n', (200, 205), False, 'from main import status, tps, server_commands, scoreboard\n'), ((229, 255), 'main.scoreboard.ScoreBoard', 'scoreboard.ScoreBoard', (['bot'], {}), '(bot)\n', (250, 255), False, 'from main import status, tps, server_commands, scoreboard\n')] |
from mongoengine import connect
from config import Config
from db.models.subscriptions import Subscriptions
class Db:
Subscriptions = None
def __init__(self, createClient=True):
config = Config()
self.db = {}
self.Subscriptions = Subscriptions
self.createClient = createClient
self.initConnection(config)
def initConnection(self, config):
connect(
db=config.data['database']['dbName'],
host=config.data['database']['host'],
port=config.data['database']['port'],
username=config.data['database']['username'],
password=config.data['database']['password'],
authentication_source=config.data['database']['dbName'],
connect=self.createClient)
| [
"config.Config",
"mongoengine.connect"
] | [((204, 212), 'config.Config', 'Config', ([], {}), '()\n', (210, 212), False, 'from config import Config\n'), ((401, 717), 'mongoengine.connect', 'connect', ([], {'db': "config.data['database']['dbName']", 'host': "config.data['database']['host']", 'port': "config.data['database']['port']", 'username': "config.data['database']['username']", 'password': "config.data['database']['password']", 'authentication_source': "config.data['database']['dbName']", 'connect': 'self.createClient'}), "(db=config.data['database']['dbName'], host=config.data['database'][\n 'host'], port=config.data['database']['port'], username=config.data[\n 'database']['username'], password=config.data['database']['password'],\n authentication_source=config.data['database']['dbName'], connect=self.\n createClient)\n", (408, 717), False, 'from mongoengine import connect\n')] |
import os
import re
import gzip
import argparse
import pandas as pd
import numpy as np
from collections import defaultdict
def get_args():
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(description="Method to create track for escape mutations")
parser.add_argument("-xlsx", help="file containing all the data")
parser.add_argument("-pid", help="pep to number", default="prot_names_pids_8.txt")
parser.add_argument("-gb_tools", help="path to gb_tools", default="./")
args = parser.parse_args()
return args
def read_pid(args):
inputfilehandler = open(args.pid, 'r')
pid = {}
aaid = {}
nucid = {}
for line in inputfilehandler:
line = line.strip()
fields = line.split()
peptide = fields[0]
pid[peptide] = fields[1]
nucid[peptide] = fields[2]
aaid[peptide] = fields[3]
inputfilehandler.close()
return (pid, aaid, nucid)
def get_start_pos(peptide, pid, aaid, nucid):
first_eight = ''.join(list(peptide)[0:8])
if first_eight in pid:
return nucid[first_eight]
return -1
def main(args):
(pid, aaid, nucid) = read_pid(args)
cd8_epitopes = pd.read_excel(args.xlsx,
skiprows=0,
header=0,
index_col=None)
print (cd8_epitopes.columns)
outfiletag = 'escape_mutations'
beddetailfilename = outfiletag+'.beddetail'
bedfilename = outfiletag+'.bed'
bbfilename = outfiletag+'.bb'
#print (cd8_epitopes['Probable Infection Location'])
#print (cd8_epitopes['Gene'])
#print (cd8_epitopes['Position of Mutation'])
#print (cd8_epitopes['AA Change'])
#print (cd8_epitopes['Codon Change'])
#print (cd8_epitopes['Wildtype Sequence'])
#print (cd8_epitopes['Mutant Sequence 1'])
#print (cd8_epitopes['Mutant Sequence 2'])
wt_mt = defaultdict(list)
mutations = []
beddetailfilehandler = open(beddetailfilename, 'w')
for i in range(0, len(cd8_epitopes['Position of Mutation'])):
chrom = "NC_045512v2"
reserved = 0
score = 1000
strand = '+'
pom = cd8_epitopes['Position of Mutation'][i]
gene = cd8_epitopes['Gene'][i]
pil = cd8_epitopes['Probable Infection Location'][i]
aa_change = cd8_epitopes['AA Change'][i]
c_change = cd8_epitopes['Codon Change'][i]
if gene+'_'+c_change+'_'+aa_change not in mutations:
mutations.append(gene+'_'+c_change+'_'+aa_change)
if ';' not in cd8_epitopes['Wildtype Sequence'][i]:
chromStart = get_start_pos(cd8_epitopes['Wildtype Sequence'][i], pid, aaid, nucid)
if chromStart != -1:
chromEnd = str(len(list(cd8_epitopes['Wildtype Sequence'][i]))*3+int(chromStart))
thickStart = str(chromStart)
thickEnd = str(chromEnd)
wt_pep = cd8_epitopes['Wildtype Sequence'][i]
mt_pep = cd8_epitopes['Mutant Sequence 1'][i]
if wt_pep not in wt_mt:
wt_mt[wt_pep].append(mt_pep)
else:
if mt_pep in wt_mt[wt_pep]:
continue
beddetailfilehandler.write(chrom+'\t'+
str(chromStart)+'\t'+
str(chromEnd)+'\t'+
wt_pep+'\t'+
str(score)+'\t'+
strand+'\t'+
thickStart+'\t'+
thickEnd+'\t'+
str(pom)+'\t'+
str(gene)+'\t'+
str(pil)+'\t'+
aa_change+'\t'+
c_change+'\t'+
mt_pep+"\n")
else:
wt_pep = cd8_epitopes['Wildtype Sequence'][i]
wt1_pep = wt_pep.split(';')[0]
wt2_pep = wt_pep.split(';')[1]
mt1_pep = cd8_epitopes['Mutant Sequence 1'][i]
mt2_pep = cd8_epitopes['Mutant Sequence 2'][i]
chromStart = get_start_pos(wt1_pep, pid, aaid, nucid)
if chromStart != -1:
chromEnd = str(len(list(wt1_pep))*3+int(chromStart))
thickStart = chromStart
thickEnd = chromEnd
if wt1_pep not in wt_mt:
wt_mt[wt_pep].append(mt_pep)
else:
if mt1_pep in wt_mt[wt1_pep]:
continue
beddetailfilehandler.write(chrom+'\t'+
str(chromStart)+'\t'+
str(chromEnd)+'\t'+
wt1_pep+'\t'+
str(score)+'\t'+
strand+'\t'+
thickStart+'\t'+
thickEnd+'\t'+
str(pom)+'\t'+
str(gene)+'\t'+
str(pil)+'\t'+
aa_change+'\t'+
c_change+'\t'+
mt1_pep+"\n")
chromStart = get_start_pos(wt2_pep, pid, aaid, nucid)
if chromStart != -1:
chromEnd = str(len(list(wt2_pep))*3+int(chromStart))
thickStart = chromStart
thickEnd = chromEnd
if wt2_pep not in wt_mt:
wt_mt[wt_pep].append(mt_pep)
else:
if mt2_pep in wt_mt[wt2_pep]:
continue
beddetailfilehandler.write(chrom+'\t'+
str(chromStart)+'\t'+
str(chromEnd)+'\t'+
wt2_pep+'\t'+
str(score)+'\t'+
strand+'\t'+
thickStart+'\t'+
thickEnd+'\t'+
str(pom)+'\t'+
str(gene)+'\t'+
str(pil)+'\t'+
aa_change+'\t'+
c_change+'\t'+
mt2_pep+"\n")
beddetailfilehandler.close()
print (len(mutations))
# use gbtools to convert from beddetail to bed and bigbed
os.system(f"bedSort {beddetailfilename} {bedfilename}")
os.system(f"bedToBigBed {bedfilename} wuhCor1.sizes {bbfilename} -tab -type=bed9+ -as=escape_mutants.as")
if __name__ == "__main__":
main(get_args())
| [
"os.system",
"collections.defaultdict",
"argparse.ArgumentParser",
"pandas.read_excel"
] | [((205, 292), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Method to create track for escape mutations"""'}), "(description=\n 'Method to create track for escape mutations')\n", (228, 292), False, 'import argparse\n'), ((1206, 1268), 'pandas.read_excel', 'pd.read_excel', (['args.xlsx'], {'skiprows': '(0)', 'header': '(0)', 'index_col': 'None'}), '(args.xlsx, skiprows=0, header=0, index_col=None)\n', (1219, 1268), True, 'import pandas as pd\n'), ((1934, 1951), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1945, 1951), False, 'from collections import defaultdict\n'), ((6272, 6327), 'os.system', 'os.system', (['f"""bedSort {beddetailfilename} {bedfilename}"""'], {}), "(f'bedSort {beddetailfilename} {bedfilename}')\n", (6281, 6327), False, 'import os\n'), ((6332, 6447), 'os.system', 'os.system', (['f"""bedToBigBed {bedfilename} wuhCor1.sizes {bbfilename} -tab -type=bed9+ -as=escape_mutants.as"""'], {}), "(\n f'bedToBigBed {bedfilename} wuhCor1.sizes {bbfilename} -tab -type=bed9+ -as=escape_mutants.as'\n )\n", (6341, 6447), False, 'import os\n')] |
#!/usr/bin/env python
from app import app
app.run(host = '0.0.0.0', port = 8089, debug = True, threaded = False, processes = 1)
| [
"app.app.run"
] | [((44, 119), 'app.app.run', 'app.run', ([], {'host': '"""0.0.0.0"""', 'port': '(8089)', 'debug': '(True)', 'threaded': '(False)', 'processes': '(1)'}), "(host='0.0.0.0', port=8089, debug=True, threaded=False, processes=1)\n", (51, 119), False, 'from app import app\n')] |
import unittest
from zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer
import pandas as pd
import pyodbc
import os
class TestTheProjectMethods(unittest.TestCase):
def test_constructor_methods(self):
self.assertEqual("<class 'zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer'>", str(type(MsSqlServer(""))))
def test_execute_sql_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
self.assertEqual(True, ms_sql.execute_sql("drop table if exists #tmp"))
def test_drop_table_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
self.assertEqual(True, ms_sql.drop_table("dbo", "table_does_not_exist"))
def test_create_table_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
df = pd.DataFrame({'column_1': [3600],
'column_2': ['12'],
'column_3': [23]
}, columns=['column_1', 'column_2', 'column_3'])
df['column_1'] = df['column_1'].astype(object)
df['column_2'] = df['column_2'].astype(str)
df['column_3'] = df['column_3'].astype(int)
ms_sql.drop_table("dbo", "table_does_not_exist")
self.assertEqual(True, ms_sql.create_table("dbo", "table_does_not_exist", df))
def test_does_table_exists(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
self.assertEqual(False, ms_sql.does_table_exists('dbo', 'test123456123'))
def test_save_dataframe_by_record_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
ms_sql.drop_table("dbo", "test_table")
ms_sql.execute_sql("create table dbo.test_table (column_1 int)")
# test
df_actual = pd.DataFrame({'column_1': [3600]}, columns=['column_1'])
self.assertEqual(True, ms_sql.save_dataframe_by_record(df_actual, "dbo", "test_table"))
self.assertEqual(1, pd.read_sql("SELECT TOP 1 column_1 FROM dbo.test_table", pyodbc.connect(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")).shape[0])
def test_save_dataframe_in_bulk_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
# test
df_actual = pd.DataFrame({'column_1': [3600]}, columns=['column_1'])
self.assertEqual(True, ms_sql.save_dataframe_in_bulk(df_actual, "dbo", "test_table"))
self.assertEqual(1, pd.read_sql("SELECT TOP 1 column_1 FROM dbo.test_table", pyodbc.connect(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")).shape[
0])
def test_1_read_data_into_dataframe_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
self.assertEqual(1,
ms_sql.read_data_into_dataframe("SELECT TOP 1 COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS").shape[0])
def test_2_read_data_into_dataframe_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
self.assertEqual(1, ms_sql.read_data_into_dataframe("""
SET NOCOUNT ON; -- This has to be here.
DROP TABLE IF EXISTS #tmp
SELECT DISTINCT TABLE_SCHEMA, TABLE_NAME into #tmp
FROM INFORMATION_SCHEMA.COLUMNS
SELECT count(1) as RECORD_COUNT from #tmp
""").shape[0])
def test_extract_to_csv_method(self):
ms_sql = MsSqlServer(
"DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\sqlexpress; DATABASE=master; Trusted_Connection=yes;")
csv_file = ms_sql.extract_to_csv("select table_schema, table_name from information_schema.tables", r"c:\temp", "test.csv")
self.assertEqual(True, os.path.exists(csv_file.full_file_name))
df = pd.read_csv(csv_file.full_file_name, sep="|")
self.assertGreater(df.shape[0], 0)
os.remove(csv_file.full_file_name)
if __name__ == '__main__':
unittest.main()
| [
"os.path.exists",
"pyodbc.connect",
"zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer",
"pandas.DataFrame",
"pandas.read_csv",
"unittest.main",
"os.remove"
] | [((4822, 4837), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4835, 4837), False, 'import unittest\n'), ((392, 527), 'zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer', 'MsSqlServer', (['"""DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;"""'], {}), "(\n 'DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;'\n )\n", (403, 527), False, 'from zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer\n'), ((666, 801), 'zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer', 'MsSqlServer', (['"""DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;"""'], {}), "(\n 'DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;'\n )\n", (677, 801), False, 'from zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer\n'), ((943, 1078), 'zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer', 'MsSqlServer', (['"""DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;"""'], {}), "(\n 'DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;'\n )\n", (954, 1078), False, 'from zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer\n'), ((1094, 1216), 'pandas.DataFrame', 'pd.DataFrame', (["{'column_1': [3600], 'column_2': ['12'], 'column_3': [23]}"], {'columns': "['column_1', 'column_2', 'column_3']"}), "({'column_1': [3600], 'column_2': ['12'], 'column_3': [23]},\n columns=['column_1', 'column_2', 'column_3'])\n", (1106, 1216), True, 'import pandas as pd\n'), ((1654, 1789), 'zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer', 'MsSqlServer', (['"""DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;"""'], {}), "(\n 'DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;'\n )\n", (1665, 1789), False, 'from zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer\n'), ((1944, 2079), 'zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer', 'MsSqlServer', (['"""DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;"""'], {}), "(\n 'DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;'\n )\n", (1955, 2079), False, 'from zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer\n'), ((2238, 2294), 'pandas.DataFrame', 'pd.DataFrame', (["{'column_1': [3600]}"], {'columns': "['column_1']"}), "({'column_1': [3600]}, columns=['column_1'])\n", (2250, 2294), True, 'import pandas as pd\n'), ((2696, 2831), 'zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer', 'MsSqlServer', (['"""DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;"""'], {}), "(\n 'DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;'\n )\n", (2707, 2831), False, 'from zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer\n'), ((2870, 2926), 'pandas.DataFrame', 'pd.DataFrame', (["{'column_1': [3600]}"], {'columns': "['column_1']"}), "({'column_1': [3600]}, columns=['column_1'])\n", (2882, 2926), True, 'import pandas as pd\n'), ((3343, 3478), 'zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer', 'MsSqlServer', (['"""DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;"""'], {}), "(\n 'DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;'\n )\n", (3354, 3478), False, 'from zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer\n'), ((3695, 3830), 'zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer', 'MsSqlServer', (['"""DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;"""'], {}), "(\n 'DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;'\n )\n", (3706, 3830), False, 'from zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer\n'), ((4303, 4438), 'zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer', 'MsSqlServer', (['"""DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;"""'], {}), "(\n 'DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;'\n )\n", (4314, 4438), False, 'from zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer\n'), ((4657, 4702), 'pandas.read_csv', 'pd.read_csv', (['csv_file.full_file_name'], {'sep': '"""|"""'}), "(csv_file.full_file_name, sep='|')\n", (4668, 4702), True, 'import pandas as pd\n'), ((4754, 4788), 'os.remove', 'os.remove', (['csv_file.full_file_name'], {}), '(csv_file.full_file_name)\n', (4763, 4788), False, 'import os\n'), ((4603, 4642), 'os.path.exists', 'os.path.exists', (['csv_file.full_file_name'], {}), '(csv_file.full_file_name)\n', (4617, 4642), False, 'import os\n'), ((316, 331), 'zeppos_microsoft_sql_server.ms_sql_server.MsSqlServer', 'MsSqlServer', (['""""""'], {}), "('')\n", (327, 331), False, 'from zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer\n'), ((2476, 2614), 'pyodbc.connect', 'pyodbc.connect', (['"""DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;"""'], {}), "(\n 'DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;'\n )\n", (2490, 2614), False, 'import pyodbc\n'), ((3106, 3244), 'pyodbc.connect', 'pyodbc.connect', (['"""DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;"""'], {}), "(\n 'DRIVER={ODBC Driver 13 for SQL Server}; SERVER=localhost\\\\sqlexpress; DATABASE=master; Trusted_Connection=yes;'\n )\n", (3120, 3244), False, 'import pyodbc\n')] |
import unittest
import io
import sys
from main import Gedcom
class TestProject(unittest.TestCase):
def test_us22_unique_id(self):
# Redirect stdout for unit test
capturedOutput = io.StringIO()
sys.stdout = capturedOutput
FILENAME="My-Family-27-Jan-2019-275.ged"
gedcom = Gedcom(FILENAME)
# Non-unique ID cases
self.assertIn('ERROR US22 for ID @F8@', capturedOutput.getvalue())
self.assertIn('ERROR US22 for ID @I15@', capturedOutput.getvalue())
# Unique ID cases
self.assertNotIn('ERROR US22 for ID @F3@', capturedOutput.getvalue())
self.assertNotIn('ERROR US22 for ID @I2@', capturedOutput.getvalue())
# Reset redirection
sys.stdout = sys.__stdout__
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"io.StringIO",
"main.Gedcom"
] | [((812, 827), 'unittest.main', 'unittest.main', ([], {}), '()\n', (825, 827), False, 'import unittest\n'), ((200, 213), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (211, 213), False, 'import io\n'), ((332, 348), 'main.Gedcom', 'Gedcom', (['FILENAME'], {}), '(FILENAME)\n', (338, 348), False, 'from main import Gedcom\n')] |
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.modules.xlinebase import XLineBase
from txircd.utils import durationToSeconds, ircLower, now
from zope.interface import implements
from fnmatch import fnmatchcase
class GLine(ModuleData, XLineBase):
implements(IPlugin, IModuleData)
name = "GLine"
core = True
lineType = "G"
def actions(self):
return [ ("register", 10, self.checkLines),
("changeident", 10, self.checkIdentChange),
("changehost", 10, self.checkHostChange),
("commandpermission-GLINE", 10, self.restrictToOper),
("statsruntype-glines", 10, self.generateInfo),
("burst", 10, self.burstLines) ]
def userCommands(self):
return [ ("GLINE", 1, UserGLine(self)) ]
def serverCommands(self):
return [ ("ADDLINE", 1, ServerAddGLine(self)),
("DELLINE", 1, ServerDelGLine(self)) ]
def load(self):
self.initializeLineStorage()
def verifyConfig(self, config):
if "client_ban_msg" in config and not isinstance(config["client_ban_msg"], basestring):
raise ConfigValidationError("client_ban_msg", "value must be a string")
def checkUserMatch(self, user, mask, data):
banMask = self.normalizeMask(mask)
userMask = ircLower("{}@{}".format(user.ident, user.host()))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.realHost))
if fnmatchcase(userMask, banMask):
return True
userMask = ircLower("{}@{}".format(user.ident, user.ip))
if fnmatchcase(userMask, banMask):
return True
return False
def killUser(self, user, reason):
self.ircd.log.info("Matched user {user.uuid} ({user.ident}@{user.host()}) against a g:line: {reason}", user=user, reason=reason)
user.sendMessage(irc.ERR_YOUREBANNEDCREEP, self.ircd.config.get("client_ban_msg", "You're banned! Email <EMAIL> for assistance."))
user.disconnect("G:Lined: {}".format(reason))
def checkLines(self, user):
banReason = self.matchUser(user)
if banReason is not None:
self.killUser(user, banReason)
return False
return True
def checkIdentChange(self, user, oldIdent, fromServer):
self.checkLines(user)
def checkHostChange(self, user, hostType, oldHost, fromServer):
if user.uuid[:3] == self.ircd.serverID:
self.checkLines(user)
def restrictToOper(self, user, data):
if not self.ircd.runActionUntilValue("userhasoperpermission", user, "command-gline", users=[user]):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the correct operator privileges")
return False
return None
class UserGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, user, params, prefix, tags):
if len(params) < 1 or len(params) == 2:
user.sendSingleError("GLineParams", irc.ERR_NEEDMOREPARAMS, "GLINE", "Not enough parameters")
return None
banmask = params[0]
if banmask in self.module.ircd.userNicks:
targetUser = self.module.ircd.users[self.module.ircd.userNicks[banmask]]
banmask = "{}@{}".format(targetUser.ident, targetUser.realHost)
else:
if "@" not in banmask:
banmask = "*@{}".format(banmask)
if len(params) == 1:
return {
"mask": banmask
}
return {
"mask": banmask,
"duration": durationToSeconds(params[1]),
"reason": " ".join(params[2:])
}
def execute(self, user, data):
banmask = data["mask"]
if "reason" in data:
if not self.module.addLine(banmask, now(), data["duration"], user.hostmask(), data["reason"]):
user.sendMessage("NOTICE", "*** G:Line for {} is already set.".format(banmask))
return True
badUsers = []
for checkUser in self.module.ircd.users.itervalues():
reason = self.module.matchUser(checkUser)
if reason is not None:
badUsers.append((checkUser, reason))
for badUser in badUsers:
self.module.killUser(*badUser)
if data["duration"] > 0:
user.sendMessage("NOTICE", "*** Timed g:line for {} has been set, to expire in {} seconds.".format(banmask, data["duration"]))
else:
user.sendMessage("NOTICE", "*** Permanent g:line for {} has been set.".format(banmask))
return True
if not self.module.delLine(banmask):
user.sendMessage("NOTICE", "*** G:Line for {} doesn't exist.".format(banmask))
return True
user.sendMessage("NOTICE", "*** G:Line for {} has been removed.".format(banmask))
return True
class ServerAddGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerAddParams(server, params, prefix, tags)
def execute(self, server, data):
if self.module.executeServerAddCommand(server, data):
badUsers = []
for user in self.module.ircd.users.itervalues():
reason = self.module.matchUser(user)
if reason is not None:
badUsers.append((user, reason))
for user in badUsers:
self.module.killUser(*user)
return True
return None
class ServerDelGLine(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, server, params, prefix, tags):
return self.module.handleServerDelParams(server, params, prefix, tags)
def execute(self, server, data):
return self.module.executeServerDelCommand(server, data)
glineModule = GLine() | [
"fnmatch.fnmatchcase",
"txircd.utils.now",
"zope.interface.implements",
"txircd.config.ConfigValidationError",
"txircd.utils.durationToSeconds"
] | [((415, 447), 'zope.interface.implements', 'implements', (['IPlugin', 'IModuleData'], {}), '(IPlugin, IModuleData)\n', (425, 447), False, 'from zope.interface import implements\n'), ((2766, 2786), 'zope.interface.implements', 'implements', (['ICommand'], {}), '(ICommand)\n', (2776, 2786), False, 'from zope.interface import implements\n'), ((4579, 4599), 'zope.interface.implements', 'implements', (['ICommand'], {}), '(ICommand)\n', (4589, 4599), False, 'from zope.interface import implements\n'), ((5168, 5188), 'zope.interface.implements', 'implements', (['ICommand'], {}), '(ICommand)\n', (5178, 5188), False, 'from zope.interface import implements\n'), ((1438, 1468), 'fnmatch.fnmatchcase', 'fnmatchcase', (['userMask', 'banMask'], {}), '(userMask, banMask)\n', (1449, 1468), False, 'from fnmatch import fnmatchcase\n'), ((1555, 1585), 'fnmatch.fnmatchcase', 'fnmatchcase', (['userMask', 'banMask'], {}), '(userMask, banMask)\n', (1566, 1585), False, 'from fnmatch import fnmatchcase\n'), ((1666, 1696), 'fnmatch.fnmatchcase', 'fnmatchcase', (['userMask', 'banMask'], {}), '(userMask, banMask)\n', (1677, 1696), False, 'from fnmatch import fnmatchcase\n'), ((1220, 1285), 'txircd.config.ConfigValidationError', 'ConfigValidationError', (['"""client_ban_msg"""', '"""value must be a string"""'], {}), "('client_ban_msg', 'value must be a string')\n", (1241, 1285), False, 'from txircd.config import ConfigValidationError\n'), ((3437, 3465), 'txircd.utils.durationToSeconds', 'durationToSeconds', (['params[1]'], {}), '(params[1])\n', (3454, 3465), False, 'from txircd.utils import durationToSeconds, ircLower, now\n'), ((3626, 3631), 'txircd.utils.now', 'now', ([], {}), '()\n', (3629, 3631), False, 'from txircd.utils import durationToSeconds, ircLower, now\n')] |
import matplotlib.pyplot as plt
import numpy
import errandpy
"""
logファイルのFitting Parameter: a,b,c,dを返します
normalized_paramの時正規化したパラメーターを返します
"""
def real_a(a, delta, min):
return (a + 1) * delta + min
def real_b(b, delta):
return b * delta
def get_z0FromLogFile(path, isLegacy=False):
with open(path, 'r') as f:
lines = f.readlines()
length = len(lines)
if isLegacy:
s = lines[length - 4].split(" ")
# print(path, lines[length - 4], int(s[13]))
else:
s = lines[length - 3].split(" ")
return int(s[13])
def legacy_get_logFileParamater(path, normalized_param=True, normMode=1) -> []:
with open(path, 'r') as f:
lines = f.readlines()
length = len(lines)
s = lines[length - 2].split(" ")
print(s)
if len(s) == 10:
result = [float(s[3]), float(s[5]), float(s[7]), float(s[9])]
else:
result = [0,0,0,0]
print(" Warning: Log File Error!!! " + path)
if normalized_param is False:
min = float(lines[0].split(" ")[1][normMode:-2])
delta = float(lines[1].split(" ")[1][normMode:-2])
result[0] = real_a(result[0], delta, min)
result[1] = real_b(result[1], delta)
return result
def get_logFileParamater(path, normalized_param=True, normMode=1) -> []:
with open(path, 'r') as f:
lines = f.readlines()
length = len(lines)
s = lines[length - 1].split(" ")
# print(s)
if len(s) == 12 or len(s) == 14:
result = [float(s[3]), float(s[5]), float(s[7]), float(s[9])]
else:
result = [0,0,0,0]
print(" Warning: Log File Error!!! " + path)
if normalized_param is False:
min = float(lines[0].split(" ")[1][normMode:-2])
delta = float(lines[1].split(" ")[1][normMode:-2])
result[0] = real_a(result[0], delta, min)
result[1] = real_b(result[1], delta)
print(result)
return result
def _f_long(x, a, b, c, d):
if errandpy.useLegacyModel:
y = a - b / (1 + c * x) ** d
else:
y = a - b / (c + x) ** d
return y
def clamp(minValue, maxValue, value):
return max(min(value, maxValue), minValue)
def clamp01(value):
return clamp(0, 1, value)
def mean_r(x, y, a, b, c, d):
ss_res = numpy.dot((y - _f_long(x, a, b, c, d)), (y - _f_long(x, a, b, c, d)))
ymean = numpy.mean(y)
ss_tot = numpy.dot((y - ymean), (y - ymean))
return 1 - ss_res / ss_tot
def normalized(array, max=1, bias=0):
minValue = array.min(keepdims=True)
maxValue = array.max(keepdims=True)
result = (array - minValue) / (maxValue - minValue) * max + bias
return result, minValue, maxValue - minValue
def draw_plt(x, y, a, b, c, d, bound, name, ze=None):
y_b = y[bound:]
plt.clf()
plt.scatter(x, y, color='red', label='Original data', alpha=0.5)
_x = x[bound:]
plt.title(name + " (Mean R: " + str(mean_r(_x, y_b, a, b, c, d)) + ")")
plt.axhline(0, color='green', linestyle='dashdot')
plt.axvline(x[bound], color='green', linestyle='dashdot')
if ze is not None:
plt.axvline(x[ze], color='blue', linestyle='dashdot')
plt.plot(x, _f_long(x, a, b, c, d), color='blue', label='Fitted line')
plt.plot(x, y - _f_long(x, a, b, c, d), color='black', label='force curve') | [
"numpy.mean",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.axhline",
"numpy.dot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axvline"
] | [((2581, 2594), 'numpy.mean', 'numpy.mean', (['y'], {}), '(y)\n', (2591, 2594), False, 'import numpy\n'), ((2609, 2640), 'numpy.dot', 'numpy.dot', (['(y - ymean)', '(y - ymean)'], {}), '(y - ymean, y - ymean)\n', (2618, 2640), False, 'import numpy\n'), ((3009, 3018), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3016, 3018), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3088), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'color': '"""red"""', 'label': '"""Original data"""', 'alpha': '(0.5)'}), "(x, y, color='red', label='Original data', alpha=0.5)\n", (3035, 3088), True, 'import matplotlib.pyplot as plt\n'), ((3195, 3245), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'color': '"""green"""', 'linestyle': '"""dashdot"""'}), "(0, color='green', linestyle='dashdot')\n", (3206, 3245), True, 'import matplotlib.pyplot as plt\n'), ((3251, 3308), 'matplotlib.pyplot.axvline', 'plt.axvline', (['x[bound]'], {'color': '"""green"""', 'linestyle': '"""dashdot"""'}), "(x[bound], color='green', linestyle='dashdot')\n", (3262, 3308), True, 'import matplotlib.pyplot as plt\n'), ((3342, 3395), 'matplotlib.pyplot.axvline', 'plt.axvline', (['x[ze]'], {'color': '"""blue"""', 'linestyle': '"""dashdot"""'}), "(x[ze], color='blue', linestyle='dashdot')\n", (3353, 3395), True, 'import matplotlib.pyplot as plt\n')] |
from brownie import Lottery, accounts, config, network
from web3 import Web3
def printStuff():
account = accounts[0]
lottery = Lottery.deploy(
config["networks"][network.show_active()]["eth_usd_price_feed"],
config["networks"][network.show_active()]["gbp_usd_price_feed"],
{"from": account},
)
entrance_fee = lottery.getEntranceFee()
print(f"Entrance fee: {entrance_fee}")
gbpUsd = lottery.getGbpUsdPrice()
print(f"GBP USD fee: {gbpUsd}")
ethUsd = lottery.getEthUsdPrice()
print(f"ETH USD fee: {ethUsd}")
def main():
printStuff()
| [
"brownie.network.show_active"
] | [((180, 201), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (199, 201), False, 'from brownie import Lottery, accounts, config, network\n'), ((253, 274), 'brownie.network.show_active', 'network.show_active', ([], {}), '()\n', (272, 274), False, 'from brownie import Lottery, accounts, config, network\n')] |
from netmiko import ConnectHandler
import os
template = """logging host 192.168.20.5 transport tcp port 514
logging trap 6
interface loopback 30
description "{rtr} loopback interface\""""
username = 'test'
password = "<PASSWORD>"
# step 1
# fetch the hostname of the router for the template
for n in range(1, 5):
ip = "192.168.20.{0}".format(n)
# device = ConnectHandler(device_type='cisco_ios', ip=ip, username='test', password='<PASSWORD>')
# output = device.send_command("show run | in hostname")
output = 'hostname new01601'
output = output.split(" ")
hostname = output[1]
generatedconfig = template.replace("{rtr}", hostname)
# step 2
# create different config files for each router ready to be pushed on routers.
configfile = open(hostname + "_syslog_config.txt", "w")
configfile.write(generatedconfig)
configfile.close()
#step3 (Validation)
# read files for each of the router (created as routername_syslog_config.txt)
print("Showing contents for generated config files....")
for file in os.listdir('./'):
if file.endswith(".txt"):
print(file)
if ("syslog_config" in file):
hostname = file.split("_")[0]
# print(hostname)
fileconfig = open(file)
print("\nShowing contents of " + hostname)
print(fileconfig.read())
fileconfig.close()
| [
"os.listdir"
] | [((1005, 1021), 'os.listdir', 'os.listdir', (['"""./"""'], {}), "('./')\n", (1015, 1021), False, 'import os\n')] |
from datetime import date
import pytest
import netsgiro
import netsgiro.records
def test_transmission_start():
record = netsgiro.records.TransmissionStart.from_string(
'NY00001055555555100008100008080000000000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.NONE
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSMISSION_START
assert record.data_transmitter == '55555555'
assert record.transmission_number == '1000081'
assert record.data_recipient == '00008080'
def test_transmission_start_fails_when_invalid_format():
line = 'XX' + ('0' * 78)
with pytest.raises(
ValueError,
match='{!r} did not match TransmissionStart record format'.format(line),
):
netsgiro.records.TransmissionStart.from_string(line)
def test_transmission_end():
record = netsgiro.records.TransmissionEnd.from_string(
'NY00008900000006000000220000000000000060'
'0170604000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.NONE
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSMISSION_END
assert record.num_transactions == 6
assert record.num_records == 22
assert record.total_amount == 600
assert record.nets_date == date(2004, 6, 17)
def test_assignment_start_for_avtalegiro_payment_requests():
record = netsgiro.records.AssignmentStart.from_string(
'NY21002000000000040000868888888888800000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_START
assert record.assignment_type == netsgiro.AssignmentType.TRANSACTIONS
assert record.agreement_id == '000000000'
assert record.assignment_number == '4000086'
assert record.assignment_account == '88888888888'
def test_assignment_start_for_avtalegiro_agreements():
record = netsgiro.records.AssignmentStart.from_string(
'NY21242000000000040000868888888888800000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_START
assert record.assignment_type == (
netsgiro.AssignmentType.AVTALEGIRO_AGREEMENTS
)
assert record.agreement_id is None
assert record.assignment_number == '4000086'
assert record.assignment_account == '88888888888'
def test_assignment_start_for_avtalegiro_cancellation():
record = netsgiro.records.AssignmentStart.from_string(
'NY21362000000000040000868888888888800000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_START
assert record.assignment_type == (
netsgiro.AssignmentType.AVTALEGIRO_CANCELLATIONS
)
assert record.agreement_id is None
assert record.assignment_number == '4000086'
assert record.assignment_account == '88888888888'
def test_assignment_start_for_ocr_giro_transactions():
record = netsgiro.records.AssignmentStart.from_string(
'NY09002000100856600000029999104276400000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_START
assert record.assignment_type == netsgiro.AssignmentType.TRANSACTIONS
assert record.agreement_id == '001008566'
assert record.assignment_number == '0000002'
assert record.assignment_account == '99991042764'
def test_assignment_end_for_avtalegiro_payment_requests():
record = netsgiro.records.AssignmentEnd.from_string(
'NY21008800000006000000200000000000000060'
'0170604170604000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_END
assert record.assignment_type == netsgiro.AssignmentType.TRANSACTIONS
assert record.num_transactions == 6
assert record.num_records == 20
assert record.total_amount == 600
assert record.nets_date_earliest == date(2004, 6, 17)
assert record.nets_date_latest == date(2004, 6, 17)
def test_assignment_end_for_avtalegiro_agreements():
record = netsgiro.records.AssignmentEnd.from_string(
'NY21248800000006000000200000000000000000'
'0000000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_END
assert record.assignment_type == (
netsgiro.AssignmentType.AVTALEGIRO_AGREEMENTS
)
assert record.num_transactions == 6
assert record.num_records == 20
assert record.total_amount is None
assert record.nets_date_earliest is None
assert record.nets_date_latest is None
def test_assignment_end_for_avtalegiro_cancellations():
record = netsgiro.records.AssignmentEnd.from_string(
'NY21368800000006000000200000000000000060'
'0170604170604000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_END
assert record.assignment_type == (
netsgiro.AssignmentType.AVTALEGIRO_CANCELLATIONS
)
assert record.num_transactions == 6
assert record.num_records == 20
assert record.total_amount == 600
assert record.nets_date_latest == date(2004, 6, 17)
assert record.nets_date_earliest == date(2004, 6, 17)
def test_assignment_end_for_ocr_giro_transactions():
record = netsgiro.records.AssignmentEnd.from_string(
'NY09008800000020000000420000000000514490'
'0200192200192200192000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.ASSIGNMENT_END
assert record.assignment_type == netsgiro.AssignmentType.TRANSACTIONS
assert record.num_transactions == 20
assert record.num_records == 42
assert record.total_amount == 5144900
assert record.nets_date == date(1992, 1, 20)
assert record.nets_date_earliest == date(1992, 1, 20)
assert record.nets_date_latest == date(1992, 1, 20)
def test_transaction_amount_item_1_for_avtalegiro_payment_request():
record = netsgiro.records.TransactionAmountItem1.from_string(
'NY2121300000001170604 00000000'
'000000100 008000011688373000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_1
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
)
assert record.transaction_number == 1
assert record.nets_date == date(2004, 6, 17)
assert record.amount == 100
assert record.kid == '008000011688373'
def test_transaction_amount_item_1_for_avtalegiro_cancellation():
record = netsgiro.records.TransactionAmountItem1.from_string(
'NY2193300000001170604 00000000'
'000000100 008000011688373000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_1
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_CANCELLATION
)
assert record.transaction_number == 1
assert record.nets_date == date(2004, 6, 17)
assert record.amount == 100
assert record.kid == '008000011688373'
def test_transaction_amount_item_1_for_ocr_giro_transactions():
record = netsgiro.records.TransactionAmountItem1.from_string(
'NY09103000000012001921320101464000000000'
'000102000 0000531000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_1
assert record.transaction_type == (
netsgiro.TransactionType.FROM_GIRO_DEBITED_ACCOUNT
)
assert record.transaction_number == 1
assert record.nets_date == date(1992, 1, 20)
assert record.centre_id == '13'
assert record.day_code == 20
assert record.partial_settlement_number == 1
assert record.partial_settlement_serial_number == '01464'
assert record.sign == '0'
assert record.amount == 102000
assert record.kid == '0000531'
def test_transaction_amount_item_2_for_avtalegiro_payment_request():
record = netsgiro.records.TransactionAmountItem2.from_string(
'NY2121310000001NAVN '
' 00000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_2
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
)
assert record.transaction_number == 1
assert record.payer_name == 'NAVN'
assert record.reference is None
def test_transaction_amount_item_2_for_ocr_giro_transactions():
record = netsgiro.records.TransactionAmountItem2.from_string(
'NY09103100000019636827194099038562000000'
'0160192999905123410000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_2
assert record.transaction_type == (
netsgiro.TransactionType.FROM_GIRO_DEBITED_ACCOUNT
)
assert record.transaction_number == 1
assert record.form_number == '9636827194'
assert record.payer_name is None
assert record.reference == '099038562'
assert record.bank_date == date(1992, 1, 16)
assert record.debit_account == '99990512341'
def test_transaction_amount_item_2_for_ocr_giro_with_data_in_filler_field():
record = netsgiro.records.TransactionAmountItem2.from_string(
'NY09103100000029797596016097596016188320'
'6160192999910055240000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_2
assert record.transaction_type == (
netsgiro.TransactionType.FROM_GIRO_DEBITED_ACCOUNT
)
assert record.transaction_number == 2
assert record.form_number == '9797596016'
assert record.payer_name is None
assert record.reference == '097596016'
assert record.bank_date == date(1992, 1, 16)
assert record.debit_account == '99991005524'
assert record._filler == '1883206'
def test_transaction_amount_item_3_for_ocr_giro_transactions():
record = netsgiro.records.TransactionAmountItem3.from_string(
'NY0921320000001Foo bar baz '
' 0000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AMOUNT_ITEM_3
assert record.transaction_type == (
netsgiro.TransactionType.PURCHASE_WITH_TEXT
)
assert record.transaction_number == 1
assert record.text == 'Foo bar baz'
def test_transaction_specification_for_avtalegiro_payment_request():
record = netsgiro.records.TransactionSpecification.from_string(
'NY212149000000140011 Gjelder Faktura: 16'
'8837 Dato: 19/03/0400000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_SPECIFICATION
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
)
assert record.transaction_number == 1
assert record.line_number == 1
assert record.column_number == 1
assert record.text == ' Gjelder Faktura: 168837 Dato: 19/03/04'
def make_specification_records(num_lines, num_columns=2):
return [
netsgiro.records.TransactionSpecification(
service_code=netsgiro.ServiceCode.AVTALEGIRO,
transaction_type=(
netsgiro.TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
),
transaction_number=1,
line_number=line,
column_number=column,
text='Line {}, column {}'.format(line, column),
)
for line in range(1, num_lines + 1)
for column in range(1, num_columns + 1)
]
def test_transaction_specification_to_text_with_max_number_of_records():
records = make_specification_records(42)
result = netsgiro.records.TransactionSpecification.to_text(records)
assert len(result.splitlines()) == 42
assert 'Line 1, column 1' in result
assert 'Line 42, column 2' in result
def test_transaction_specification_to_text_with_too_many_records():
records = make_specification_records(43)
with pytest.raises(
ValueError, match='Max 84 specification records allowed, got 86'
):
netsgiro.records.TransactionSpecification.to_text(records)
def test_avtalegiro_active_agreement():
record = netsgiro.records.AvtaleGiroAgreement.from_string(
'NY21947000000010 00800001168837'
'3J00000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AGREEMENTS
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_AGREEMENT
)
assert record.transaction_number == 1
assert record.registration_type == (
netsgiro.AvtaleGiroRegistrationType.ACTIVE_AGREEMENT
)
assert record.kid == '008000011688373'
assert record.notify is True
def test_avtalegiro_new_or_updated_agreement():
record = netsgiro.records.AvtaleGiroAgreement.from_string(
'NY21947000000011 00800001168837'
'3N00000000000000000000000000000000000000'
)
assert record.service_code == netsgiro.ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == netsgiro.RecordType.TRANSACTION_AGREEMENTS
assert record.transaction_type == (
netsgiro.TransactionType.AVTALEGIRO_AGREEMENT
)
assert record.transaction_number == 1
assert record.registration_type == (
netsgiro.AvtaleGiroRegistrationType.NEW_OR_UPDATED_AGREEMENT
)
assert record.kid == '008000011688373'
assert record.notify is False
| [
"netsgiro.records.TransmissionEnd.from_string",
"netsgiro.records.TransmissionStart.from_string",
"netsgiro.records.TransactionAmountItem3.from_string",
"netsgiro.records.AvtaleGiroAgreement.from_string",
"netsgiro.records.TransactionAmountItem2.from_string",
"netsgiro.records.TransactionAmountItem1.from_... | [((128, 268), 'netsgiro.records.TransmissionStart.from_string', 'netsgiro.records.TransmissionStart.from_string', (['"""NY000010555555551000081000080800000000000000000000000000000000000000000000000000"""'], {}), "(\n 'NY000010555555551000081000080800000000000000000000000000000000000000000000000000'\n )\n", (174, 268), False, 'import netsgiro\n'), ((891, 1029), 'netsgiro.records.TransmissionEnd.from_string', 'netsgiro.records.TransmissionEnd.from_string', (['"""NY000089000000060000002200000000000000600170604000000000000000000000000000000000"""'], {}), "(\n 'NY000089000000060000002200000000000000600170604000000000000000000000000000000000'\n )\n", (935, 1029), False, 'import netsgiro\n'), ((1416, 1554), 'netsgiro.records.AssignmentStart.from_string', 'netsgiro.records.AssignmentStart.from_string', (['"""NY210020000000000400008688888888888000000000000000000000000000000000000000000000"""'], {}), "(\n 'NY210020000000000400008688888888888000000000000000000000000000000000000000000000'\n )\n", (1460, 1554), False, 'import netsgiro\n'), ((2002, 2140), 'netsgiro.records.AssignmentStart.from_string', 'netsgiro.records.AssignmentStart.from_string', (['"""NY212420000000000400008688888888888000000000000000000000000000000000000000000000"""'], {}), "(\n 'NY212420000000000400008688888888888000000000000000000000000000000000000000000000'\n )\n", (2046, 2140), False, 'import netsgiro\n'), ((2608, 2746), 'netsgiro.records.AssignmentStart.from_string', 'netsgiro.records.AssignmentStart.from_string', (['"""NY213620000000000400008688888888888000000000000000000000000000000000000000000000"""'], {}), "(\n 'NY213620000000000400008688888888888000000000000000000000000000000000000000000000'\n )\n", (2652, 2746), False, 'import netsgiro\n'), ((3215, 3353), 'netsgiro.records.AssignmentStart.from_string', 'netsgiro.records.AssignmentStart.from_string', (['"""NY090020001008566000000299991042764000000000000000000000000000000000000000000000"""'], {}), "(\n 'NY090020001008566000000299991042764000000000000000000000000000000000000000000000'\n )\n", (3259, 3353), False, 'import netsgiro\n'), ((3803, 3939), 'netsgiro.records.AssignmentEnd.from_string', 'netsgiro.records.AssignmentEnd.from_string', (['"""NY210088000000060000002000000000000000600170604170604000000000000000000000000000"""'], {}), "(\n 'NY210088000000060000002000000000000000600170604170604000000000000000000000000000'\n )\n", (3845, 3939), False, 'import netsgiro\n'), ((4462, 4598), 'netsgiro.records.AssignmentEnd.from_string', 'netsgiro.records.AssignmentEnd.from_string', (['"""NY212488000000060000002000000000000000000000000000000000000000000000000000000000"""'], {}), "(\n 'NY212488000000060000002000000000000000000000000000000000000000000000000000000000'\n )\n", (4504, 4598), False, 'import netsgiro\n'), ((5124, 5260), 'netsgiro.records.AssignmentEnd.from_string', 'netsgiro.records.AssignmentEnd.from_string', (['"""NY213688000000060000002000000000000000600170604170604000000000000000000000000000"""'], {}), "(\n 'NY213688000000060000002000000000000000600170604170604000000000000000000000000000'\n )\n", (5166, 5260), False, 'import netsgiro\n'), ((5811, 5947), 'netsgiro.records.AssignmentEnd.from_string', 'netsgiro.records.AssignmentEnd.from_string', (['"""NY090088000000200000004200000000005144900200192200192200192000000000000000000000"""'], {}), "(\n 'NY090088000000200000004200000000005144900200192200192200192000000000000000000000'\n )\n", (5853, 5947), False, 'import netsgiro\n'), ((6538, 6683), 'netsgiro.records.TransactionAmountItem1.from_string', 'netsgiro.records.TransactionAmountItem1.from_string', (['"""NY2121300000001170604 00000000000000100 008000011688373000000"""'], {}), "(\n 'NY2121300000001170604 00000000000000100 008000011688373000000'\n )\n", (6589, 6683), False, 'import netsgiro\n'), ((7207, 7352), 'netsgiro.records.TransactionAmountItem1.from_string', 'netsgiro.records.TransactionAmountItem1.from_string', (['"""NY2193300000001170604 00000000000000100 008000011688373000000"""'], {}), "(\n 'NY2193300000001170604 00000000000000100 008000011688373000000'\n )\n", (7258, 7352), False, 'import netsgiro\n'), ((7864, 8009), 'netsgiro.records.TransactionAmountItem1.from_string', 'netsgiro.records.TransactionAmountItem1.from_string', (['"""NY09103000000012001921320101464000000000000102000 0000531000000"""'], {}), "(\n 'NY09103000000012001921320101464000000000000102000 0000531000000'\n )\n", (7915, 8009), False, 'import netsgiro\n'), ((8733, 8878), 'netsgiro.records.TransactionAmountItem2.from_string', 'netsgiro.records.TransactionAmountItem2.from_string', (['"""NY2121310000001NAVN 00000"""'], {}), "(\n 'NY2121310000001NAVN 00000'\n )\n", (8784, 8878), False, 'import netsgiro\n'), ((9351, 9496), 'netsgiro.records.TransactionAmountItem2.from_string', 'netsgiro.records.TransactionAmountItem2.from_string', (['"""NY091031000000196368271940990385620000000160192999905123410000000000000000000000"""'], {}), "(\n 'NY091031000000196368271940990385620000000160192999905123410000000000000000000000'\n )\n", (9402, 9496), False, 'import netsgiro\n'), ((10121, 10266), 'netsgiro.records.TransactionAmountItem2.from_string', 'netsgiro.records.TransactionAmountItem2.from_string', (['"""NY091031000000297975960160975960161883206160192999910055240000000000000000000000"""'], {}), "(\n 'NY091031000000297975960160975960161883206160192999910055240000000000000000000000'\n )\n", (10172, 10266), False, 'import netsgiro\n'), ((10917, 11062), 'netsgiro.records.TransactionAmountItem3.from_string', 'netsgiro.records.TransactionAmountItem3.from_string', (['"""NY0921320000001Foo bar baz 0000000000000000000000000"""'], {}), "(\n 'NY0921320000001Foo bar baz 0000000000000000000000000'\n )\n", (10968, 11062), False, 'import netsgiro\n'), ((11488, 11635), 'netsgiro.records.TransactionSpecification.from_string', 'netsgiro.records.TransactionSpecification.from_string', (['"""NY212149000000140011 Gjelder Faktura: 168837 Dato: 19/03/0400000000000000000000"""'], {}), "(\n 'NY212149000000140011 Gjelder Faktura: 168837 Dato: 19/03/0400000000000000000000'\n )\n", (11541, 11635), False, 'import netsgiro\n'), ((12798, 12856), 'netsgiro.records.TransactionSpecification.to_text', 'netsgiro.records.TransactionSpecification.to_text', (['records'], {}), '(records)\n', (12847, 12856), False, 'import netsgiro\n'), ((13323, 13465), 'netsgiro.records.AvtaleGiroAgreement.from_string', 'netsgiro.records.AvtaleGiroAgreement.from_string', (['"""NY21947000000010 008000011688373J00000000000000000000000000000000000000"""'], {}), "(\n 'NY21947000000010 008000011688373J00000000000000000000000000000000000000'\n )\n", (13371, 13465), False, 'import netsgiro\n'), ((14015, 14157), 'netsgiro.records.AvtaleGiroAgreement.from_string', 'netsgiro.records.AvtaleGiroAgreement.from_string', (['"""NY21947000000011 008000011688373N00000000000000000000000000000000000000"""'], {}), "(\n 'NY21947000000011 008000011688373N00000000000000000000000000000000000000'\n )\n", (14063, 14157), False, 'import netsgiro\n'), ((794, 846), 'netsgiro.records.TransmissionStart.from_string', 'netsgiro.records.TransmissionStart.from_string', (['line'], {}), '(line)\n', (840, 846), False, 'import netsgiro\n'), ((1322, 1339), 'datetime.date', 'date', (['(2004)', '(6)', '(17)'], {}), '(2004, 6, 17)\n', (1326, 1339), False, 'from datetime import date\n'), ((4320, 4337), 'datetime.date', 'date', (['(2004)', '(6)', '(17)'], {}), '(2004, 6, 17)\n', (4324, 4337), False, 'from datetime import date\n'), ((4376, 4393), 'datetime.date', 'date', (['(2004)', '(6)', '(17)'], {}), '(2004, 6, 17)\n', (4380, 4393), False, 'from datetime import date\n'), ((5667, 5684), 'datetime.date', 'date', (['(2004)', '(6)', '(17)'], {}), '(2004, 6, 17)\n', (5671, 5684), False, 'from datetime import date\n'), ((5725, 5742), 'datetime.date', 'date', (['(2004)', '(6)', '(17)'], {}), '(2004, 6, 17)\n', (5729, 5742), False, 'from datetime import date\n'), ((6322, 6339), 'datetime.date', 'date', (['(1992)', '(1)', '(20)'], {}), '(1992, 1, 20)\n', (6326, 6339), False, 'from datetime import date\n'), ((6380, 6397), 'datetime.date', 'date', (['(1992)', '(1)', '(20)'], {}), '(1992, 1, 20)\n', (6384, 6397), False, 'from datetime import date\n'), ((6436, 6453), 'datetime.date', 'date', (['(1992)', '(1)', '(20)'], {}), '(1992, 1, 20)\n', (6440, 6453), False, 'from datetime import date\n'), ((7033, 7050), 'datetime.date', 'date', (['(2004)', '(6)', '(17)'], {}), '(2004, 6, 17)\n', (7037, 7050), False, 'from datetime import date\n'), ((7692, 7709), 'datetime.date', 'date', (['(2004)', '(6)', '(17)'], {}), '(2004, 6, 17)\n', (7696, 7709), False, 'from datetime import date\n'), ((8349, 8366), 'datetime.date', 'date', (['(1992)', '(1)', '(20)'], {}), '(1992, 1, 20)\n', (8353, 8366), False, 'from datetime import date\n'), ((9962, 9979), 'datetime.date', 'date', (['(1992)', '(1)', '(16)'], {}), '(1992, 1, 16)\n', (9966, 9979), False, 'from datetime import date\n'), ((10732, 10749), 'datetime.date', 'date', (['(1992)', '(1)', '(16)'], {}), '(1992, 1, 16)\n', (10736, 10749), False, 'from datetime import date\n'), ((13106, 13185), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Max 84 specification records allowed, got 86"""'}), "(ValueError, match='Max 84 specification records allowed, got 86')\n", (13119, 13185), False, 'import pytest\n'), ((13209, 13267), 'netsgiro.records.TransactionSpecification.to_text', 'netsgiro.records.TransactionSpecification.to_text', (['records'], {}), '(records)\n', (13258, 13267), False, 'import netsgiro\n')] |
from django.contrib import admin
from .models import TimePeriod
class TimePeriodAdminBase(object):
list_display = ('name', 'period_start', 'period_end',)
class TimePeriodAdmin(TimePeriodAdminBase, admin.ModelAdmin):
pass
admin.site.register(TimePeriod, TimePeriodAdmin)
| [
"django.contrib.admin.site.register"
] | [((236, 284), 'django.contrib.admin.site.register', 'admin.site.register', (['TimePeriod', 'TimePeriodAdmin'], {}), '(TimePeriod, TimePeriodAdmin)\n', (255, 284), False, 'from django.contrib import admin\n')] |
import qiskit
import qtm.progress_bar
import qtm.constant
import qtm.qfim
import qtm.noise
import qtm.optimizer
import qtm.fubini_study
import numpy as np
import types, typing
def measure(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""Measuring the quantu circuit which fully measurement gates
Args:
- qc (QuantumCircuit): Measured circuit
- qubits (np.ndarray): List of measured qubit
Returns:
- float: Frequency of 00.. cbit
"""
n = len(qubits)
if cbits == []:
cbits = qubits.copy()
for i in range(0, n):
qc.measure(qubits[i], cbits[i])
if qtm.constant.noise_prob > 0:
noise_model = qtm.noise.generate_noise_model(
n, qtm.constant.noise_prob)
results = qiskit.execute(qc, backend=qtm.constant.backend,
noise_model=noise_model,
shots=qtm.constant.num_shots).result()
# Raw counts
counts = results.get_counts()
# Mitigating noise based on https://qiskit.org/textbook/ch-quantum-hardware/measurement-error-mitigation.html
meas_filter = qtm.noise.generate_measurement_filter(
n, noise_model=noise_model)
# Mitigated counts
counts = meas_filter.apply(counts.copy())
else:
counts = qiskit.execute(
qc, backend=qtm.constant.backend,
shots=qtm.constant.num_shots).result().get_counts()
return counts.get("0" * len(qubits), 0) / qtm.constant.num_shots
def x_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.h(qubits[i])
qc.measure(qubits[i], cbits[i])
return qc
def y_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.sdg(qubits[i])
qc.h(qubits[i])
qc.measure(qubits[i], cbits[i])
return qc
def z_measurement(qc: qiskit.QuantumCircuit, qubits, cbits=[]):
"""As its function name
Args:
qc (qiskit.QuantumCircuit): measuremed circuit
qubits (np.ndarray): list of measuremed qubit
cbits (list, optional): classical bits. Defaults to [].
Returns:
qiskit.QuantumCircuit: added measure gates circuit
"""
if cbits == []:
cbits = qubits.copy()
for i in range(0, len(qubits)):
qc.measure(qubits[i], cbits[i])
return qc
def get_u_hat(thetas: np.ndarray, create_circuit_func: types.FunctionType, num_qubits: int,
**kwargs):
"""Return inverse of reconstructed gate
Args:
- thetas (np.ndarray): Parameters
- num_qubits (Int): number of qubit
Returns:
- Statevector: The state vector of when applying u_1q gate
"""
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
if not kwargs:
qc = create_circuit_func(qc, thetas).inverse()
else:
qc = create_circuit_func(qc, thetas, **kwargs).inverse()
return qiskit.quantum_info.Statevector.from_instruction(qc)
def get_cry_index(create_circuit_func: types.FunctionType, thetas: np.ndarray, num_qubits, **kwargs):
"""Return a list where i_th = 1 mean thetas[i] is parameter of CRY gate
Args:
- func (types.FunctionType): The creating circuit function
- thetas (np.ndarray): Parameters
Returns:
- np.ndarray: The index list has length equal with number of parameters
"""
qc = qiskit.QuantumCircuit(num_qubits)
qc = create_circuit_func(qc, thetas, **kwargs)
layers = qtm.fubini_study.split_into_layers(qc)
index_list = []
for layer in layers:
for gate in layer[1]:
if gate[0] == 'cry':
index_list.append(1)
else:
index_list.append(0)
if len(index_list) == len(thetas):
return index_list
return index_list
def grad_loss(qc: qiskit.QuantumCircuit, create_circuit_func: types.FunctionType,
thetas: np.ndarray, **kwargs):
"""Return the gradient of the loss function
L = 1 - |<psi~|psi>|^2 = 1 - P_0
=> nabla_L = - nabla_P_0 = - r (P_0(+s) - P_0(-s))
Args:
- qc (QuantumCircuit): The quantum circuit want to calculate the gradient
- create_circuit_func (Function): The creating circuit function
- thetas (np.ndarray): Parameters
- c_0 (float): cost value
- **kwargs: additional parameters for different create_circuit_func()
Returns:
- np.ndarray: the gradient vector
"""
index_list = get_cry_index(create_circuit_func, thetas,
num_qubits=qc.num_qubits, **kwargs)
grad_loss = np.zeros(len(thetas))
for i in range(0, len(thetas)):
if index_list[i] == 0:
# In equation (13)
thetas1, thetas2 = thetas.copy(), thetas.copy()
thetas1[i] += qtm.constant.two_term_psr['s']
thetas2[i] -= qtm.constant.two_term_psr['s']
qc1 = create_circuit_func(qc.copy(), thetas1, **kwargs)
qc2 = create_circuit_func(qc.copy(), thetas2, **kwargs)
grad_loss[i] = -qtm.constant.two_term_psr['r'] * (
qtm.base.measure(qc1, list(range(qc1.num_qubits))) -
qtm.base.measure(qc2, list(range(qc2.num_qubits))))
if index_list[i] == 1:
# In equation (14)
thetas1, thetas2 = thetas.copy(), thetas.copy()
thetas3, thetas4 = thetas.copy(), thetas.copy()
thetas1[i] += qtm.constant.four_term_psr['alpha']
thetas2[i] -= qtm.constant.four_term_psr['alpha']
thetas3[i] += qtm.constant.four_term_psr['beta']
thetas4[i] -= qtm.constant.four_term_psr['beta']
qc1 = create_circuit_func(qc.copy(), thetas1, **kwargs)
qc2 = create_circuit_func(qc.copy(), thetas2, **kwargs)
qc3 = create_circuit_func(qc.copy(), thetas3, **kwargs)
qc4 = create_circuit_func(qc.copy(), thetas4, **kwargs)
grad_loss[i] = - (qtm.constant.four_term_psr['d_plus'] * (
qtm.base.measure(qc1, list(range(qc1.num_qubits))) -
qtm.base.measure(qc2, list(range(qc2.num_qubits)))) - qtm.constant.four_term_psr['d_minus'] * (
qtm.base.measure(qc3, list(range(qc3.num_qubits))) -
qtm.base.measure(qc4, list(range(qc4.num_qubits)))))
return grad_loss
def grad_psi(qc: qiskit.QuantumCircuit, create_circuit_func: types.FunctionType,
thetas: np.ndarray, r: float, s: float, **kwargs):
"""Return the derivatite of the psi base on parameter shift rule
Args:
- qc (qiskit.QuantumCircuit): circuit
- create_circuit_func (types.FunctionType)
- thetas (np.ndarray): parameters
- r (float): in psr
- s (float): in psr
Returns:
- np.ndarray: N x N matrix
"""
gradient_psi = []
for i in range(0, len(thetas)):
thetas_copy = thetas.copy()
thetas_copy[i] += s
qc_copy = create_circuit_func(qc.copy(), thetas_copy, **kwargs)
psi_qc = qiskit.quantum_info.Statevector.from_instruction(qc_copy).data
psi_qc = np.expand_dims(psi_qc, 1)
gradient_psi.append(r * psi_qc)
gradient_psi = np.array(gradient_psi)
return gradient_psi
def fit_state_tomography(u: qiskit.QuantumCircuit,
create_vdagger_func: types.FunctionType,
thetas: np.ndarray,
num_steps: int,
loss_func: types.FunctionType,
optimizer: types.FunctionType,
verbose: int = 0,
is_return_all_thetas: bool = False,
**kwargs):
"""Return the new thetas that fit with the circuit from create_vdagger_func function
Args:
- u (QuantumCircuit): fitting circuit
- create_vdagger_func (types.FunctionType): added circuit function
- thetas (np.ndarray): parameters
- num_steps (Int): number of iterations
- loss_func (types.FunctionType): loss function
- optimizer (types.FunctionType): otimizer function
- verbose (Int): the seeing level of the fitting process (0: nothing, 1: progress bar, 2: one line per step)
- **kwargs: additional parameters for create_circuit_func()
Returns:
- thetas (np.ndarray): the optimized parameters
- loss_values (np.ndarray): the list of loss_value
"""
thetass = []
loss_values = []
if verbose == 1:
bar = qtm.progress_bar.ProgressBar(max_value=num_steps, disable=False)
for i in range(0, num_steps):
grad_loss = qtm.base.grad_loss(u, create_vdagger_func, thetas, **kwargs)
optimizer_name = optimizer.__name__
if optimizer_name == 'sgd':
thetas = qtm.optimizer.sgd(thetas, grad_loss)
elif optimizer_name == 'adam':
if i == 0:
m, v = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.adam(thetas, m, v, i, grad_loss)
elif optimizer_name in ['qng_fubini_study', 'qng_qfim', 'qng_adam']:
grad_psi1 = grad_psi(u,
create_vdagger_func,
thetas,
r=qtm.constant.two_term_psr['s'],
s=np.pi,
**kwargs)
u_copy = create_vdagger_func(u.copy(), thetas, **kwargs)
psi = qiskit.quantum_info.Statevector.from_instruction(u_copy).data
psi = np.expand_dims(psi, 1)
if optimizer_name == 'qng_fubini_study':
G = qtm.fubini_study.qng(
u.copy(), thetas, create_vdagger_func, **kwargs)
thetas = qtm.optimizer.qng_fubini_study(thetas, G, grad_loss)
if optimizer_name == 'qng_qfim':
thetas = qtm.optimizer.qng_qfim(
thetas, psi, grad_psi1, grad_loss)
if optimizer_name == 'qng_adam':
if i == 0:
m, v = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.qng_adam(
thetas, m, v, i, psi, grad_psi1, grad_loss)
else:
thetas = optimizer(thetas, grad_loss)
u_copy = create_vdagger_func(u.copy(), thetas, **kwargs)
loss = loss_func(
qtm.base.measure(u_copy, list(range(u_copy.num_qubits))))
loss_values.append(loss)
thetass.append(thetas.copy())
if verbose == 1:
bar.update(1)
if verbose == 2 and i % 10 == 0:
print("Step " + str(i) + ": " + str(loss))
if verbose == 1:
bar.close()
if is_return_all_thetas:
return thetass, loss_values
else:
return thetas, loss_values
def fit_state_preparation(create_u_func: types.FunctionType,
vdagger: qiskit.QuantumCircuit,
thetas: np.ndarray,
num_steps: int,
loss_func: types.FunctionType,
optimizer: types.FunctionType,
verbose: int = 0,
is_return_all_thetas: bool = False,
**kwargs):
"""Return the new thetas that fit with the circuit from create_u_func function
Args:
- create_u_func (types.FunctionType): added circuit function
- vdagger (QuantumCircuit): fitting circuit
- thetas (np.ndarray): parameters
- num_steps (Int): number of iterations
- loss_func (types.FunctionType): loss function
- optimizer (types.FunctionType): otimizer function
- verbose (Int): the seeing level of the fitting process (0: nothing, 1: progress bar, 2: one line per step)
- **kwargs: additional parameters for create_circuit_func()
Returns:
- thetas (np.ndarray): the optimized parameters
- loss_values (np.ndarray): the list of loss_value
"""
if verbose == 1:
bar = qtm.progress_bar.ProgressBar(max_value=num_steps, disable=False)
thetass = []
loss_values = []
def create_circuit_func(vdagger: qiskit.QuantumCircuit, thetas: np.ndarray, **kwargs):
return create_u_func(qiskit.QuantumCircuit(vdagger.num_qubits, vdagger.num_qubits), thetas, **kwargs).combine(vdagger)
for i in range(0, num_steps):
grad_loss = qtm.base.grad_loss(vdagger, create_circuit_func, thetas, **kwargs)
optimizer_name = optimizer.__name__
if optimizer_name == 'sgd':
thetas = qtm.optimizer.sgd(thetas, grad_loss)
elif optimizer_name == 'adam':
if i == 0:
m, v1 = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.adam(thetas, m, v1, i, grad_loss)
elif optimizer_name in ['qng_fubini_study', 'qng_qfim', 'qng_adam']:
grad_psi1 = grad_psi(vdagger,
create_circuit_func,
thetas,
r=1 / 2,
s=np.pi,
**kwargs)
v_copy = create_circuit_func(vdagger.copy(), thetas, **kwargs)
psi = qiskit.quantum_info.Statevector.from_instruction(
v_copy).data
psi = np.expand_dims(psi, 1)
if optimizer_name == 'qng_fubini_study':
G = qtm.fubini_study.qng(
vdagger.copy(), thetas, create_circuit_func, **kwargs)
thetas = qtm.optimizer.qng_fubini_study(thetas, G, grad_loss)
if optimizer_name == 'qng_qfim':
thetas = qtm.optimizer.qng_qfim(
thetas, psi, grad_psi1, grad_loss)
if optimizer_name == 'qng_adam':
if i == 0:
m, v1 = list(np.zeros(thetas.shape[0])), list(
np.zeros(thetas.shape[0]))
thetas = qtm.optimizer.qng_adam(
thetas, m, v1, i, psi, grad_psi1, grad_loss)
else:
thetas = optimizer(thetas, grad_loss)
v_copy = create_circuit_func(vdagger.copy(), thetas, **kwargs)
loss = loss_func(
qtm.base.measure(v_copy, list(range(v_copy.num_qubits))))
loss_values.append(loss)
thetass.append(thetas.copy())
if verbose == 1:
bar.update(1)
if verbose == 2 and i % 10 == 0:
print("Step " + str(i) + ": " + str(loss))
if verbose == 1:
bar.close()
if is_return_all_thetas:
return thetass, loss_values
else:
return thetas, loss_values
def fit(u: typing.Union[qiskit.QuantumCircuit, types.FunctionType], v: typing.Union[qiskit.QuantumCircuit, types.FunctionType],
thetas: np.ndarray,
num_steps: int,
loss_func: types.FunctionType,
optimizer: types.FunctionType,
verbose: int = 0,
is_return_all_thetas: bool = False,
**kwargs):
if callable(u):
return fit_state_preparation(create_u_func=u,
vdagger=v,
thetas=thetas,
num_steps=num_steps,
loss_func=loss_func,
optimizer=optimizer,
verbose=verbose,
is_return_all_thetas=is_return_all_thetas,
**kwargs)
else:
return fit_state_tomography(u=u,
create_vdagger_func=v,
thetas=thetas,
num_steps=num_steps,
loss_func=loss_func,
optimizer=optimizer,
verbose=verbose,
is_return_all_thetas=is_return_all_thetas,
**kwargs)
| [
"qiskit.execute",
"numpy.array",
"numpy.zeros",
"qiskit.quantum_info.Statevector.from_instruction",
"numpy.expand_dims",
"qiskit.QuantumCircuit"
] | [((3447, 3492), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['num_qubits', 'num_qubits'], {}), '(num_qubits, num_qubits)\n', (3468, 3492), False, 'import qiskit\n'), ((3653, 3705), 'qiskit.quantum_info.Statevector.from_instruction', 'qiskit.quantum_info.Statevector.from_instruction', (['qc'], {}), '(qc)\n', (3701, 3705), False, 'import qiskit\n'), ((4116, 4149), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['num_qubits'], {}), '(num_qubits)\n', (4137, 4149), False, 'import qiskit\n'), ((7952, 7974), 'numpy.array', 'np.array', (['gradient_psi'], {}), '(gradient_psi)\n', (7960, 7974), True, 'import numpy as np\n'), ((7867, 7892), 'numpy.expand_dims', 'np.expand_dims', (['psi_qc', '(1)'], {}), '(psi_qc, 1)\n', (7881, 7892), True, 'import numpy as np\n'), ((7787, 7844), 'qiskit.quantum_info.Statevector.from_instruction', 'qiskit.quantum_info.Statevector.from_instruction', (['qc_copy'], {}), '(qc_copy)\n', (7835, 7844), False, 'import qiskit\n'), ((761, 868), 'qiskit.execute', 'qiskit.execute', (['qc'], {'backend': 'qtm.constant.backend', 'noise_model': 'noise_model', 'shots': 'qtm.constant.num_shots'}), '(qc, backend=qtm.constant.backend, noise_model=noise_model,\n shots=qtm.constant.num_shots)\n', (775, 868), False, 'import qiskit\n'), ((10365, 10387), 'numpy.expand_dims', 'np.expand_dims', (['psi', '(1)'], {}), '(psi, 1)\n', (10379, 10387), True, 'import numpy as np\n'), ((13144, 13205), 'qiskit.QuantumCircuit', 'qiskit.QuantumCircuit', (['vdagger.num_qubits', 'vdagger.num_qubits'], {}), '(vdagger.num_qubits, vdagger.num_qubits)\n', (13165, 13205), False, 'import qiskit\n'), ((14276, 14298), 'numpy.expand_dims', 'np.expand_dims', (['psi', '(1)'], {}), '(psi, 1)\n', (14290, 14298), True, 'import numpy as np\n'), ((1322, 1400), 'qiskit.execute', 'qiskit.execute', (['qc'], {'backend': 'qtm.constant.backend', 'shots': 'qtm.constant.num_shots'}), '(qc, backend=qtm.constant.backend, shots=qtm.constant.num_shots)\n', (1336, 1400), False, 'import qiskit\n'), ((10285, 10341), 'qiskit.quantum_info.Statevector.from_instruction', 'qiskit.quantum_info.Statevector.from_instruction', (['u_copy'], {}), '(u_copy)\n', (10333, 10341), False, 'import qiskit\n'), ((14179, 14235), 'qiskit.quantum_info.Statevector.from_instruction', 'qiskit.quantum_info.Statevector.from_instruction', (['v_copy'], {}), '(v_copy)\n', (14227, 14235), False, 'import qiskit\n'), ((9688, 9713), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (9696, 9713), True, 'import numpy as np\n'), ((9742, 9767), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (9750, 9767), True, 'import numpy as np\n'), ((13594, 13619), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (13602, 13619), True, 'import numpy as np\n'), ((13648, 13673), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (13656, 13673), True, 'import numpy as np\n'), ((10883, 10908), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (10891, 10908), True, 'import numpy as np\n'), ((10941, 10966), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (10949, 10966), True, 'import numpy as np\n'), ((14803, 14828), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (14811, 14828), True, 'import numpy as np\n'), ((14861, 14886), 'numpy.zeros', 'np.zeros', (['thetas.shape[0]'], {}), '(thetas.shape[0])\n', (14869, 14886), True, 'import numpy as np\n')] |
from datetime import datetime, date
from marqeta.response_models.result import Result
from marqeta.response_models.kyc_question import KycQuestion
from marqeta.response_models import datetime_object
import json
import re
class KycResponse(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def created_time(self):
if 'created_time' in self.json_response:
return datetime_object('created_time', self.json_response)
@property
def last_modified_time(self):
if 'last_modified_time' in self.json_response:
return datetime_object('last_modified_time', self.json_response)
@property
def token(self):
return self.json_response.get('token', None)
@property
def user_token(self):
return self.json_response.get('user_token', None)
@property
def business_token(self):
return self.json_response.get('business_token', None)
@property
def result(self):
if 'result' in self.json_response:
return Result(self.json_response['result'])
@property
def manual_override(self):
return self.json_response.get('manual_override', None)
@property
def notes(self):
return self.json_response.get('notes', None)
@property
def questions(self):
if 'questions' in self.json_response:
return [KycQuestion(val) for val in self.json_response['questions']]
@property
def reference_id(self):
return self.json_response.get('reference_id', None)
def __repr__(self):
return '<Marqeta.response_models.kyc_response.KycResponse>' + self.__str__()
| [
"marqeta.response_models.datetime_object",
"json.dumps",
"marqeta.response_models.result.Result",
"marqeta.response_models.kyc_question.KycQuestion"
] | [((371, 427), 'json.dumps', 'json.dumps', (['self.json_response'], {'default': 'self.json_serial'}), '(self.json_response, default=self.json_serial)\n', (381, 427), False, 'import json\n'), ((672, 723), 'marqeta.response_models.datetime_object', 'datetime_object', (['"""created_time"""', 'self.json_response'], {}), "('created_time', self.json_response)\n", (687, 723), False, 'from marqeta.response_models import datetime_object\n'), ((848, 905), 'marqeta.response_models.datetime_object', 'datetime_object', (['"""last_modified_time"""', 'self.json_response'], {}), "('last_modified_time', self.json_response)\n", (863, 905), False, 'from marqeta.response_models import datetime_object\n'), ((1304, 1340), 'marqeta.response_models.result.Result', 'Result', (["self.json_response['result']"], {}), "(self.json_response['result'])\n", (1310, 1340), False, 'from marqeta.response_models.result import Result\n'), ((1646, 1662), 'marqeta.response_models.kyc_question.KycQuestion', 'KycQuestion', (['val'], {}), '(val)\n', (1657, 1662), False, 'from marqeta.response_models.kyc_question import KycQuestion\n')] |
#!/usr/bin/env python3
"""
Easy to use Websocket Server.
Source: https://github.com/rharder/handy
June 2018 - Updated for aiohttp v3.3
August 2018 - Updated for Python 3.7, made WebServer support multiple routes on one port
"""
import asyncio
import logging
import weakref
from functools import partial
from typing import Dict, Set, List
import aiohttp # pip install aiohttp
from aiohttp import web
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "Public Domain"
class WebServer:
"""Hosts a web/websocket server on a given port and responds to multiple routes
(relative urls) at that address.
Source: https://github.com/rharder/handy
Author: <NAME>
License: Public Domain
"""
def __init__(self, host: str = None, port: int = None, ssl_context=None):
"""
Create a new WebServer that will listen on the given port.
:param port: The port on which to listen
"""
super().__init__()
self.log = logging.getLogger(__name__ + '.' + self.__class__.__name__)
# Passed parameters
self.host: str = host
self.port: int = port
self.ssl_context = ssl_context
# Internal use
self.app: web.Application = None
self.site: web.TCPSite = None
self.runner: web.AppRunner = None
self.route_handlers: Dict[str, WebHandler] = {}
self._running: bool = False
self._shutting_down: bool = False
self._starting_up: bool = False
def __str__(self):
routes = ", ".join(self.route_handlers.keys())
return "{}({}:({})".format(self.__class__.__name__, self.port, routes)
@property
def running(self):
return self._running
@property
def starting_up(self):
return self._starting_up
@property
def shutting_down(self):
return self._shutting_down
async def start(self):
"""
Starts the websocket server and begins listening. This function returns
with the server continuing to listen (non-blocking).
:return: None
"""
if self.starting_up or self.running:
raise Exception("Cannot start server when it is already running.")
self._starting_up = True
self.app = web.Application()
self.app['requests'] = [] # type: List[web.BaseRequest]
self.app.on_shutdown.append(self._on_shutdown)
# Connect routes
for route in self.route_handlers.keys():
self.app.router.add_get(route, partial(self.incoming_http_handler, route))
self.runner = web.AppRunner(self.app)
await self.runner.setup()
self.site = web.TCPSite(self.runner, port=self.port, host=self.host, ssl_context=self.ssl_context)
await self.site.start()
self._running = True
self._starting_up = False
async def shutdown(self):
if not self.running:
raise Exception("Cannot close server that is not running.")
if self.shutting_down:
pass
else:
self._shutting_down = True
await self.runner.cleanup()
async def _on_shutdown(self, app: web.Application):
self.close_current_connections()
self._running = False
self._shutting_down = False
def close_current_connections(self):
for x in self.app["requests"]:
if x is not None and x.transport is not None:
x.transport.close()
def add_route(self, route: str, handler):
if self.running:
raise RuntimeError("Cannot add a route after server is already running.")
self.route_handlers[route] = handler
async def incoming_http_handler(self, route: str, request: web.BaseRequest):
self.app['requests'].append(request)
try:
resp = await self.route_handlers[route].on_incoming_http(route, request)
finally:
self.app['requests'].remove(request)
return resp
class WebHandler:
async def on_incoming_http(self, route: str, request: web.BaseRequest):
return web.Response(body=str(self.__class__.__name__))
class WebsocketHandler(WebHandler):
def __init__(self, *kargs, **kwargs):
super().__init__(*kargs, **kwargs)
self.websockets: Set[web.WebSocketResponse] = weakref.WeakSet()
async def broadcast_json(self, msg):
""" Converts msg to json and broadcasts the json data to all connected clients. """
await self._broadcast(msg, web.WebSocketResponse.send_json)
async def broadcast_text(self, msg: str):
""" Broadcasts a string to all connected clients. """
await self._broadcast(msg, web.WebSocketResponse.send_str)
async def broadcast_bytes(self, msg: bytes):
""" Broadcasts bytes to all connected clients. """
await self._broadcast(msg, web.WebSocketResponse.send_bytes)
async def _broadcast(self, msg, func: callable):
for ws in set(self.websockets): # type: web.WebSocketResponse
await func(ws, msg)
async def close_websockets(self):
"""Closes all active websockets for this handler."""
ws_closers = [ws.close() for ws in set(self.websockets) if not ws.closed]
ws_closers and await asyncio.gather(*ws_closers)
async def on_incoming_http(self, route: str, request: web.BaseRequest):
"""Handles the incoming http(s) request and converts it to a WebSocketResponse.
This method is not meant to be overridden when subclassed.
"""
ws = web.WebSocketResponse()
self.websockets.add(ws)
try:
await ws.prepare(request)
await self.on_websocket(route, ws)
finally:
self.websockets.discard(ws)
return ws
async def on_websocket(self, route: str, ws: web.WebSocketResponse):
"""
Override this function if you want to handle new incoming websocket clients.
The default behavior is to listen indefinitely for incoming messages from clients
and call on_message() with each one.
If you override on_websocket and have your own loop to receive and process messages,
you may also need an await asyncio.sleep(0) line to avoid an infinite loop with the
websocket close message.
Example:
while not ws.closed:
ws_msg = await ws.receive()
await asyncio.sleep(0)
...
"""
try:
while not ws.closed:
ws_msg = await ws.receive() # type: aiohttp.WSMessage
await self.on_message(route=route, ws=ws, ws_msg_from_client=ws_msg)
# If you override on_websocket and have your own loop
# to receive and process messages, you may also need
# this await asyncio.sleep(0) line to avoid an infinite
# loop with the websocket close message.
await asyncio.sleep(0) # Need to yield control back to event loop
except RuntimeError as e: # Socket closing throws RuntimeError
print("RuntimeError - did socket close?", e, flush=True)
pass
finally:
await self.on_close(route, ws)
async def on_message(self, route: str, ws: web.WebSocketResponse, ws_msg_from_client: aiohttp.WSMessage):
""" Override this function to handle incoming messages from websocket clients. """
pass
async def on_close(self, route: str, ws: web.WebSocketResponse):
""" Override this function to handle a websocket having closed. """
pass
| [
"logging.getLogger",
"asyncio.sleep",
"weakref.WeakSet",
"aiohttp.web.Application",
"aiohttp.web.AppRunner",
"aiohttp.web.TCPSite",
"functools.partial",
"asyncio.gather",
"aiohttp.web.WebSocketResponse"
] | [((986, 1045), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.' + self.__class__.__name__)"], {}), "(__name__ + '.' + self.__class__.__name__)\n", (1003, 1045), False, 'import logging\n'), ((2268, 2285), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (2283, 2285), False, 'from aiohttp import web\n'), ((2591, 2614), 'aiohttp.web.AppRunner', 'web.AppRunner', (['self.app'], {}), '(self.app)\n', (2604, 2614), False, 'from aiohttp import web\n'), ((2669, 2760), 'aiohttp.web.TCPSite', 'web.TCPSite', (['self.runner'], {'port': 'self.port', 'host': 'self.host', 'ssl_context': 'self.ssl_context'}), '(self.runner, port=self.port, host=self.host, ssl_context=self.\n ssl_context)\n', (2680, 2760), False, 'from aiohttp import web\n'), ((4317, 4334), 'weakref.WeakSet', 'weakref.WeakSet', ([], {}), '()\n', (4332, 4334), False, 'import weakref\n'), ((5545, 5568), 'aiohttp.web.WebSocketResponse', 'web.WebSocketResponse', ([], {}), '()\n', (5566, 5568), False, 'from aiohttp import web\n'), ((2524, 2566), 'functools.partial', 'partial', (['self.incoming_http_handler', 'route'], {}), '(self.incoming_http_handler, route)\n', (2531, 2566), False, 'from functools import partial\n'), ((5259, 5286), 'asyncio.gather', 'asyncio.gather', (['*ws_closers'], {}), '(*ws_closers)\n', (5273, 5286), False, 'import asyncio\n'), ((6963, 6979), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (6976, 6979), False, 'import asyncio\n')] |
#!/usr/bin/env python
#coding=utf-8
import os
import sys
import json
import time
import urllib2
import platform
import ConfigParser
UPDATE_CONFIG = 0
def checkplay(remotedir,workdir):
global UPDATE_CONFIG
if not remotedir.endswith('/'):
remotedir = remotedir + '/'
orgcode = UPDATE_CONFIG.get('global', 'orgcode')
projectdir = remotedir + orgcode
syncservice = UPDATE_CONFIG.get('BusSync', 'servicename')
saleservice = UPDATE_CONFIG.get('BusSale', 'servicename')
port = UPDATE_CONFIG.get('BusSale', 'port')
nginx = UPDATE_CONFIG.get('BusSale', 'nginx')
ngconf = UPDATE_CONFIG.get('BusSale', 'ngconf')
delay = UPDATE_CONFIG.get('BusSale', 'delay')
delay = float(delay)
remotedir = remotedir + 'play-1.2.3/'
localdir = os.path.join(workdir,'play-1.2.3')
play = os.path.join(localdir,'play')
rverfile = remotedir + 'version'
rmd5file = remotedir + 'filemd5'
lverfile = os.path.join(localdir,'version')
lmd5file = os.path.join(localdir,'filemd5')
fileloop = os.path.join(localdir,'FileLoop')
while True:
if not os.path.isdir(localdir):
os.makedirs(localdir)
ls = '0'
rs = '1';
try:
rs = urllib2.urlopen(rverfile).read()
except:
printf ('Can\'t find remote version file:%s,wait for next time!' % rverfile)
time.sleep(60)
continue
try:
ls = open(lverfile).read()
except:
printf ('Can\'t find local version file:%s' % lverfile)
printf ('开始生成本地MD5文件')
try:
os.chdir(localdir)
os.system('python %s' % fileloop)
time.sleep(10)
except:
printf('生成本地MD5文件失败,再说...')
if rs.strip() == ls.strip() :
printf (time.strftime('%Y-%m-%d %H:%M:%S')+':play暂无更新,当前版本号为:%s' % (ls))
else:
printf ('发现新版本,新版本号为:%s' % rs)
printf ('开始解析差异文件:%s' % rmd5file)
remotestr = ''
try:
remotestr = urllib2.urlopen(rmd5file).read()
except:
printf ('无法找到远程md5文件,请检查服务端目录或通过浏览器查看文件是否存在:%s' % rmd5file)
printf ('等待60秒后,重新尝试更新!')
time.sleep(60)
continue
remotedict = json.loads(remotestr)
remotekeys = set(remotedict.keys())
localstr = ''
localdict = {'':''}
try:
localstr = open(lmd5file).read()
except:
printf ('Can\'t find local md5 file:%s' % lmd5file)
try:
localdict = json.loads(localstr)
except:
printf ('Can\'t load md5 file as json:%s' % lmd5file)
localkeys = set(localdict.keys())
printf ('同步删除中..')
localdiff = localkeys-remotekeys
for local in localdiff:
lpath = localdict[local].replace('/',os.path.sep)
filepath = os.path.join(localdir,lpath)
removefile(filepath)
continue
printf ('同步更新中..')
remotediff = remotekeys-localkeys
for remote in remotediff:
rpath = remotedict[remote]
remotepath = remotedir + rpath
filepath = os.path.join(localdir,rpath.replace('/',os.path.sep))
addfile(remotepath,filepath)
continue
if(platform.system() == 'Linux'):
os.system('chmod 744 %s' % play)
printf (time.strftime('%Y-%m-%d %H:%M:%S')+':play由%s版本更新至%s版本成功!' % (ls,rs))
checksync(projectdir,workdir,'BusSync',play,syncservice)
checksale(projectdir,workdir,'BusSale',play,port,nginx,ngconf,delay,saleservice)
time.sleep(60)
def removefile(filepath):
parentdir = os.path.dirname(filepath)
try:
os.remove(filepath)
printf ('del:%s' % filepath)
except:
printf ('already del : %s' % filepath)
try:
filelist = os.listdir(parentdir)
if len(filelist) == 0 :
try:
os.rmdir(parentdir)
printf ('deldir:%s' % parentdir)
except:
printf ('already deldir : %s' % parentdir)
except:
printf ('%s not exist' % parentdir)
def addfile(remotepath,filepath):
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname):
os.makedirs(dirname)
printf ('mkdir:%s' % dirname)
try:
downloadFile(remotepath,filepath)
printf ('add:%s' % filepath)
except:
printf ('failed:%s' % remotepath)
if __name__ == '__main__':
if(platform.system() == 'Linux'):
cmd = "ps aux|grep %s|awk '{print $2}'" % __file__
pid = os.getpid()
for s in os.popen(cmd).readlines():
if pid != int(s):
os.popen('kill %d' % int(s))
config = ConfigParser.ConfigParser()
with open('update.conf') as conf:
config.readfp(conf)
UPDATE_CONFIG = config
remotedir = config.get('global','remotedir')
workdir = config.get('global','workdir')
pymdir = os.path.join(workdir,'pym')
sys.path.append(pymdir)
from pyutil import printf,downloadFile
from checksync import checksync
from checksale import checksale
checkplay(remotedir,workdir) | [
"time.sleep",
"ConfigParser.ConfigParser",
"sys.path.append",
"os.remove",
"os.path.exists",
"os.listdir",
"urllib2.urlopen",
"platform.system",
"os.path.isdir",
"os.popen",
"os.getpid",
"json.loads",
"pyutil.downloadFile",
"checksale.checksale",
"os.path.dirname",
"checksync.checksync... | [((814, 849), 'os.path.join', 'os.path.join', (['workdir', '"""play-1.2.3"""'], {}), "(workdir, 'play-1.2.3')\n", (826, 849), False, 'import os\n'), ((865, 895), 'os.path.join', 'os.path.join', (['localdir', '"""play"""'], {}), "(localdir, 'play')\n", (877, 895), False, 'import os\n'), ((989, 1022), 'os.path.join', 'os.path.join', (['localdir', '"""version"""'], {}), "(localdir, 'version')\n", (1001, 1022), False, 'import os\n'), ((1037, 1070), 'os.path.join', 'os.path.join', (['localdir', '"""filemd5"""'], {}), "(localdir, 'filemd5')\n", (1049, 1070), False, 'import os\n'), ((1090, 1124), 'os.path.join', 'os.path.join', (['localdir', '"""FileLoop"""'], {}), "(localdir, 'FileLoop')\n", (1102, 1124), False, 'import os\n'), ((3933, 3958), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (3948, 3958), False, 'import os\n'), ((4460, 4485), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (4475, 4485), False, 'import os\n'), ((5024, 5051), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ([], {}), '()\n', (5049, 5051), False, 'import ConfigParser\n'), ((5267, 5295), 'os.path.join', 'os.path.join', (['workdir', '"""pym"""'], {}), "(workdir, 'pym')\n", (5279, 5295), False, 'import os\n'), ((5299, 5322), 'sys.path.append', 'sys.path.append', (['pymdir'], {}), '(pymdir)\n', (5314, 5322), False, 'import sys\n'), ((3721, 3781), 'checksync.checksync', 'checksync', (['projectdir', 'workdir', '"""BusSync"""', 'play', 'syncservice'], {}), "(projectdir, workdir, 'BusSync', play, syncservice)\n", (3730, 3781), False, 'from checksync import checksync\n'), ((3786, 3878), 'checksale.checksale', 'checksale', (['projectdir', 'workdir', '"""BusSale"""', 'play', 'port', 'nginx', 'ngconf', 'delay', 'saleservice'], {}), "(projectdir, workdir, 'BusSale', play, port, nginx, ngconf, delay,\n saleservice)\n", (3795, 3878), False, 'from checksale import checksale\n'), ((3875, 3889), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (3885, 3889), False, 'import time\n'), ((3976, 3995), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (3985, 3995), False, 'import os\n'), ((4004, 4031), 'pyutil.printf', 'printf', (["('del:%s' % filepath)"], {}), "('del:%s' % filepath)\n", (4010, 4031), False, 'from pyutil import printf, downloadFile\n'), ((4120, 4141), 'os.listdir', 'os.listdir', (['parentdir'], {}), '(parentdir)\n', (4130, 4141), False, 'import os\n'), ((4497, 4520), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (4511, 4520), False, 'import os\n'), ((4530, 4550), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (4541, 4550), False, 'import os\n'), ((4559, 4587), 'pyutil.printf', 'printf', (["('mkdir:%s' % dirname)"], {}), "('mkdir:%s' % dirname)\n", (4565, 4587), False, 'from pyutil import printf, downloadFile\n'), ((4606, 4640), 'pyutil.downloadFile', 'downloadFile', (['remotepath', 'filepath'], {}), '(remotepath, filepath)\n', (4618, 4640), False, 'from pyutil import printf, downloadFile\n'), ((4648, 4675), 'pyutil.printf', 'printf', (["('add:%s' % filepath)"], {}), "('add:%s' % filepath)\n", (4654, 4675), False, 'from pyutil import printf, downloadFile\n'), ((4771, 4788), 'platform.system', 'platform.system', ([], {}), '()\n', (4786, 4788), False, 'import platform\n'), ((4875, 4886), 'os.getpid', 'os.getpid', ([], {}), '()\n', (4884, 4886), False, 'import os\n'), ((1160, 1183), 'os.path.isdir', 'os.path.isdir', (['localdir'], {}), '(localdir)\n', (1173, 1183), False, 'import os\n'), ((1197, 1218), 'os.makedirs', 'os.makedirs', (['localdir'], {}), '(localdir)\n', (1208, 1218), False, 'import os\n'), ((1989, 2018), 'pyutil.printf', 'printf', (["('发现新版本,新版本号为:%s' % rs)"], {}), "('发现新版本,新版本号为:%s' % rs)\n", (1995, 2018), False, 'from pyutil import printf, downloadFile\n'), ((2032, 2064), 'pyutil.printf', 'printf', (["('开始解析差异文件:%s' % rmd5file)"], {}), "('开始解析差异文件:%s' % rmd5file)\n", (2038, 2064), False, 'from pyutil import printf, downloadFile\n'), ((2390, 2411), 'json.loads', 'json.loads', (['remotestr'], {}), '(remotestr)\n', (2400, 2411), False, 'import json\n'), ((2888, 2905), 'pyutil.printf', 'printf', (['"""同步删除中.."""'], {}), "('同步删除中..')\n", (2894, 2905), False, 'from pyutil import printf, downloadFile\n'), ((3185, 3202), 'pyutil.printf', 'printf', (['"""同步更新中.."""'], {}), "('同步更新中..')\n", (3191, 3202), False, 'from pyutil import printf, downloadFile\n'), ((4053, 4090), 'pyutil.printf', 'printf', (["('already del : %s' % filepath)"], {}), "('already del : %s' % filepath)\n", (4059, 4090), False, 'from pyutil import printf, downloadFile\n'), ((4375, 4409), 'pyutil.printf', 'printf', (["('%s not exist' % parentdir)"], {}), "('%s not exist' % parentdir)\n", (4381, 4409), False, 'from pyutil import printf, downloadFile\n'), ((4697, 4729), 'pyutil.printf', 'printf', (["('failed:%s' % remotepath)"], {}), "('failed:%s' % remotepath)\n", (4703, 4729), False, 'from pyutil import printf, downloadFile\n'), ((1345, 1419), 'pyutil.printf', 'printf', (['("Can\'t find remote version file:%s,wait for next time!" % rverfile)'], {}), '("Can\'t find remote version file:%s,wait for next time!" % rverfile)\n', (1351, 1419), False, 'from pyutil import printf, downloadFile\n'), ((1434, 1448), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (1444, 1448), False, 'import time\n'), ((1550, 1603), 'pyutil.printf', 'printf', (['("Can\'t find local version file:%s" % lverfile)'], {}), '("Can\'t find local version file:%s" % lverfile)\n', (1556, 1603), False, 'from pyutil import printf, downloadFile\n'), ((1618, 1639), 'pyutil.printf', 'printf', (['"""开始生成本地MD5文件"""'], {}), "('开始生成本地MD5文件')\n", (1624, 1639), False, 'from pyutil import printf, downloadFile\n'), ((2718, 2738), 'json.loads', 'json.loads', (['localstr'], {}), '(localstr)\n', (2728, 2738), False, 'import json\n'), ((3081, 3110), 'os.path.join', 'os.path.join', (['localdir', 'lpath'], {}), '(localdir, lpath)\n', (3093, 3110), False, 'import os\n'), ((3544, 3561), 'platform.system', 'platform.system', ([], {}), '()\n', (3559, 3561), False, 'import platform\n'), ((3591, 3623), 'os.system', 'os.system', (["('chmod 744 %s' % play)"], {}), "('chmod 744 %s' % play)\n", (3600, 3623), False, 'import os\n'), ((4207, 4226), 'os.rmdir', 'os.rmdir', (['parentdir'], {}), '(parentdir)\n', (4215, 4226), False, 'import os\n'), ((4243, 4274), 'pyutil.printf', 'printf', (["('deldir:%s' % parentdir)"], {}), "('deldir:%s' % parentdir)\n", (4249, 4274), False, 'from pyutil import printf, downloadFile\n'), ((4904, 4917), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (4912, 4917), False, 'import os\n'), ((1284, 1309), 'urllib2.urlopen', 'urllib2.urlopen', (['rverfile'], {}), '(rverfile)\n', (1299, 1309), False, 'import urllib2\n'), ((1674, 1692), 'os.chdir', 'os.chdir', (['localdir'], {}), '(localdir)\n', (1682, 1692), False, 'import os\n'), ((1709, 1742), 'os.system', 'os.system', (["('python %s' % fileloop)"], {}), "('python %s' % fileloop)\n", (1718, 1742), False, 'import os\n'), ((1759, 1773), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1769, 1773), False, 'import time\n'), ((1898, 1932), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (1911, 1932), False, 'import time\n'), ((2207, 2265), 'pyutil.printf', 'printf', (["('无法找到远程md5文件,请检查服务端目录或通过浏览器查看文件是否存在:%s' % rmd5file)"], {}), "('无法找到远程md5文件,请检查服务端目录或通过浏览器查看文件是否存在:%s' % rmd5file)\n", (2213, 2265), False, 'from pyutil import printf, downloadFile\n'), ((2283, 2307), 'pyutil.printf', 'printf', (['"""等待60秒后,重新尝试更新!"""'], {}), "('等待60秒后,重新尝试更新!')\n", (2289, 2307), False, 'from pyutil import printf, downloadFile\n'), ((2325, 2339), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (2335, 2339), False, 'import time\n'), ((2621, 2670), 'pyutil.printf', 'printf', (['("Can\'t find local md5 file:%s" % lmd5file)'], {}), '("Can\'t find local md5 file:%s" % lmd5file)\n', (2627, 2670), False, 'from pyutil import printf, downloadFile\n'), ((2775, 2826), 'pyutil.printf', 'printf', (['("Can\'t load md5 file as json:%s" % lmd5file)'], {}), '("Can\'t load md5 file as json:%s" % lmd5file)\n', (2781, 2826), False, 'from pyutil import printf, downloadFile\n'), ((3644, 3678), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (3657, 3678), False, 'import time\n'), ((4312, 4353), 'pyutil.printf', 'printf', (["('already deldir : %s' % parentdir)"], {}), "('already deldir : %s' % parentdir)\n", (4318, 4353), False, 'from pyutil import printf, downloadFile\n'), ((1810, 1837), 'pyutil.printf', 'printf', (['"""生成本地MD5文件失败,再说..."""'], {}), "('生成本地MD5文件失败,再说...')\n", (1816, 1837), False, 'from pyutil import printf, downloadFile\n'), ((2138, 2163), 'urllib2.urlopen', 'urllib2.urlopen', (['rmd5file'], {}), '(rmd5file)\n', (2153, 2163), False, 'import urllib2\n')] |
import pytest
import os
import LabWeaver_analysis as lw_ana
DB_DIR = os.path.abspath("tests/assets/db")
@pytest.fixture
def existing_records_db():
db_path = os.path.join(DB_DIR, "records_existing.db")
return lw_ana.RecordsDB(db_path)
def test_fetch(existing_records_db):
fetched_record = existing_records_db.filter_records(
("experiment = 'X2021-03-17'",
"meas_id = '0001'"))[0]
assert fetched_record == {"experiment": "X2021-03-17",
"meas_id": "0001",
"cooldown": "CDX1",
"meas_type": "Qubit Rabi amplitude"}
def test_fetch_multiple(existing_records_db):
fetched_records = existing_records_db.filter_records(
("cooldown = 'CDX1'",))
assert fetched_records == [{"experiment": "X2021-03-17",
"meas_id": "0001",
"cooldown": "CDX1",
"meas_type": "Qubit Rabi amplitude"},
{"experiment": "X2021-03-17",
"meas_id": "0002",
"cooldown": "CDX1",
"meas_type": "Qubit Ramsey"}]
@pytest.fixture
def new_records_db(scope="function"):
db_path = os.path.join(DB_DIR, "records_temp.db")
## Return as yield to allow for teardown/destructor
yield lw_ana.RecordsDB(db_path)
## Teardown - delete temp file
os.remove(db_path)
@pytest.fixture
def new_single_record_db(new_records_db, scope="function"):
## Add a new record to the database
new_record = {"experiment": "X2021-03-17",
"meas_id": "0001",
"cooldown": "CDX1",
"meas_type": "Qubit Rabi amplitude"}
new_records_db.add_record(new_record)
yield new_records_db
def test_create_assign_fetch(new_single_record_db):
## Fetch the record by uids and compare
fetched_record = new_single_record_db.filter_records(
("experiment = 'X2021-03-17'",
"meas_id = '0001'"))[0]
assert fetched_record == {"experiment": "X2021-03-17",
"meas_id": "0001",
"cooldown": "CDX1",
"meas_type": "Qubit Rabi amplitude"}
def test_create_assign_delete(new_single_record_db):
## Delete record
new_single_record_db.delete_record(("X2021-03-17", "0001"))
## Ensure no records are left
assert new_single_record_db.head() == []
| [
"os.path.abspath",
"LabWeaver_analysis.RecordsDB",
"os.path.join",
"os.remove"
] | [((72, 106), 'os.path.abspath', 'os.path.abspath', (['"""tests/assets/db"""'], {}), "('tests/assets/db')\n", (87, 106), False, 'import os\n'), ((163, 206), 'os.path.join', 'os.path.join', (['DB_DIR', '"""records_existing.db"""'], {}), "(DB_DIR, 'records_existing.db')\n", (175, 206), False, 'import os\n'), ((215, 240), 'LabWeaver_analysis.RecordsDB', 'lw_ana.RecordsDB', (['db_path'], {}), '(db_path)\n', (231, 240), True, 'import LabWeaver_analysis as lw_ana\n'), ((1090, 1129), 'os.path.join', 'os.path.join', (['DB_DIR', '"""records_temp.db"""'], {}), "(DB_DIR, 'records_temp.db')\n", (1102, 1129), False, 'import os\n'), ((1249, 1267), 'os.remove', 'os.remove', (['db_path'], {}), '(db_path)\n', (1258, 1267), False, 'import os\n'), ((1190, 1215), 'LabWeaver_analysis.RecordsDB', 'lw_ana.RecordsDB', (['db_path'], {}), '(db_path)\n', (1206, 1215), True, 'import LabWeaver_analysis as lw_ana\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import random
import sys
from collections import Counter
import json
from argparse import ArgumentParser
from rand_utils import rand_partition
def build_tree(num_leaves = 10, rootdate = 1000):
"""
Starting from a three-node tree, split a randomly chosen branch to insert a new child
TODO: replace this with a coalescent method
"""
def _get_target_node_by_total_time(node, r):
interval1 = (node["date"] - node["left"]["date"]) * node["left"]["stability"]
if interval1 > r:
return node, True, r
r -= interval1
if node["left"]["left"] is not None:
node2, is_left, r2 = _get_target_node_by_total_time(node["left"], r)
if node2 is not None:
return node2, is_left, r2
r = r2
interval2 = (node["date"] - node["right"]["date"]) * node["right"]["stability"]
if interval2 > r:
return node, False, r
if node["right"]["left"] is not None:
return _get_target_node_by_total_time(node["right"], r - interval2)
return None, False, r - interval2
# endef
gshape, gscale = 2.0, 0.5
tree = {
"date": rootdate,
"left": {
"date": 0,
"left": None,
"right": None,
"name": "L0",
"stability": np.random.gamma(gshape, gscale),
},
"right": {
"date": 0,
"left": None,
"right": None,
"name": "L1",
"stability": np.random.gamma(gshape, gscale),
},
"name": "I0",
"stability": 1.0,
}
cur_leafnum = 2
cur_inodenum = 1
# totaltime = rootdate * 2
totaltime = rootdate * (tree["left"]["stability"] + tree["right"]["stability"])
while cur_leafnum < num_leaves:
r = np.random.uniform(0, totaltime)
parent, is_left, r2 = _get_target_node_by_total_time(tree, r)
cnode = {
"date": 0,
"left": None,
"right": None,
"name": "L{}".format(cur_leafnum),
"stability": np.random.gamma(gshape, gscale),
}
inode = {
"left": None,
"right": None,
"name": "I{}".format(cur_inodenum),
}
if is_left:
inode["date"] = parent["date"] - r2 / parent["left"]["stability"]
assert(inode["date"] > 0)
inode["stability"] = parent["left"]["stability"]
inode["right"] = cnode
inode["left"] = parent["left"]
parent["left"] = inode
else:
inode["date"] = parent["date"] - r2 / parent["right"]["stability"]
inode["stability"] = parent["right"]["stability"]
inode["left"] = cnode
inode["right"] = parent["right"]
parent["right"] = inode
# totaltime += inode["date"]
totaltime += inode["date"] * cnode["stability"]
cur_leafnum += 1
cur_inodenum += 1
return tree
def set_locations_by_random_walk(tree, variance=1.0):
"""
Perform simple random walks to assign coordinates
"""
def _set_locations_main(parent, node, variance):
interval = parent["date"] - node["date"]
_var = variance * interval
loc = np.random.multivariate_normal([parent["x"], parent["y"]], [[_var, 0.0], [0.0, _var]])
node["x"] = loc[0]
node["y"] = loc[1]
if node["left"] is not None:
assert(node["right"] is not None)
_set_locations_main(node, node["left"], variance)
_set_locations_main(node, node["right"], variance)
# endef
tree["x"] = tree["y"] = 0.0
_set_locations_main(tree, tree["left"], variance=variance)
_set_locations_main(tree, tree["right"], variance=variance)
def gen_traits(tree, _lambda=1.0, fnum=100):
"""
At each node,
- randomly choose the number of birth events
- for each birth event, randomly decide which feature is to be updated
"""
def _gen_traits_main(parent, node, flist, vcount, _lambda):
interval = parent["date"] - node["date"]
node["catvect"] = np.copy(parent["catvect"])
# # replace features num times
# num = np.random.poisson(_lambda * interval)
# # the same feature can be updated multiple times along a branch
# target_features = np.unique(np.random.randint(0, len(flist), size=num))
target_features = {}
t = 0.0
while True:
r = np.random.exponential(scale=1.0 / _lambda)
t += r
if t >= interval:
break
# the rich gets richer
weights = list(map(lambda x: x["size"] + 1.0, flist))
fid = rand_partition(weights)
if fid in target_features:
# the same feature can be updated multiple times along a branch
# just update the time
fval = node["catvect"][fid]
fnode["annotation"]["vid2date"][fval] = parent["date"] + t
else:
fnode = flist[fid]
fnode["size"] += 1
fnode["annotation"]["vid2date"][vcount] = parent["date"] + t
node["catvect"][fid] = vcount
vcount += 1
target_features[fid] = t
# ensure that at least one event happens
if len(target_features) <= 0:
t = np.random.uniform(0.0, interval)
fid = np.random.randint(0, len(flist))
fnode = flist[fid]
fnode["size"] += 1
fnode["annotation"]["vid2date"][vcount] = parent["date"] + t
node["catvect"][fid] = vcount
vcount += 1
if node["left"] is not None:
assert(node["right"] is not None)
vcount = _gen_traits_main(node, node["left"], flist, vcount, _lambda)
vcount = _gen_traits_main(node, node["right"], flist, vcount, _lambda)
return vcount
# endef
flist = []
for i in range(fnum):
flist.append({
"fid": i,
"size": 1,
"type": "cat",
"annotation": {
"vid2date": {
i: 0,
}
},
})
tree["catvect"] = np.arange(fnum)
vcount = fnum
vcount = _gen_traits_main(tree, tree["left"], flist, vcount, _lambda)
vcount = _gen_traits_main(tree, tree["right"], flist, vcount, _lambda)
return flist, vcount
def update_tree_by_borrowings(tree, flist, nu=0.05):
def _update_nodeval(node, fid, oldv, newv):
if node["catvect"][fid] != oldv:
return 0
node["catvect"][fid] = newv
change = 1
if node["left"] is not None:
change += _update_nodeval(node["left"], fid, oldv, newv)
change += _update_nodeval(node["right"], fid, oldv, newv)
return change
nodes = get_all_nodes(tree)
nodes_by_date = sorted(nodes, key=lambda x: x["date"], reverse=True)
for i in range(1, len(nodes_by_date)):
node = nodes_by_date[i]
# # # # #
# if node["date"] == 0.0:
# break
# collect branches
contemporary_nodes = []
for pnode in nodes_by_date[:i]:
if pnode["left"] is None:
break
if pnode["left"] is not node and pnode["left"]["date"] <= node["date"]:
contemporary_nodes.append((pnode, pnode["left"]))
if pnode["right"] is not node and pnode["right"]["date"] <= node["date"]:
contemporary_nodes.append((pnode, pnode["right"]))
assert(len(contemporary_nodes) > 0)
weights = []
for pnode, cnode in contemporary_nodes:
# TODO: weighted avg of the locations of pnode and cnode?
dist = np.sqrt((node["x"] - cnode["x"]) ** 2 + (node["y"] - cnode["y"]) ** 2)
weight = np.exp(20.0 * (max(dist / 3, 1.0) ** -0.5))
weights.append(weight)
weights = np.array(weights)
# print(weights / weights.sum())
for fid, is_borrowing in enumerate(np.random.rand(len(flist)) < nu):
if not is_borrowing:
continue
cid = rand_partition(weights)
pnode, cnode = contemporary_nodes[cid]
# too similar, no chance to be documented separately
if node["date"] == 0.0:
overlap = (cnode["catvect"] == pnode["catvect"]).sum() / float(len(pnode["catvect"]))
if overlap > 0.95:
sys.stderr.write("overlap {} ... skip\n".format(overlap))
continue
v = cnode["catvect"][fid]
if cnode["catvect"][fid] == pnode["catvect"][fid]:
newval = v
else:
date = flist[fid]["annotation"]["vid2date"][v]
if date > node["date"]:
newval = v
else:
newval = pnode["catvect"][fid]
# update only if the borrowed one is different from the original
if node["catvect"][fid] != v:
oldv = node["catvect"][fid]
change = _update_nodeval(node, fid, oldv, v)
sys.stderr.write("{} nodes updated\t{} -> {}\n".format(change, oldv, v))
def merge_leaves(tree, thres=0.98):
stack = [tree]
while len(stack) > 0:
node = stack.pop(0)
if node["left"] is not None:
if node["left"]["left"] is None and node["right"]["left"] is None:
assert(node["left"]["date"] == 0.0 and node["right"]["date"] == 0.0)
overlap = (node["left"]["catvect"] == node["right"]["catvect"]).sum() / float(len(node["left"]["catvect"]))
if overlap >= thres:
sys.stderr.write("overlap {} ... remove!\n".format(overlap))
node["name"] = node["left"]["name"]
node["date"] = 0.0
node["left"] = None
node["right"] = None
# restart
# TODO: efficiency
stack = [tree]
else:
sys.stderr.write("test passed {}\n".format(overlap))
else:
stack.append(node["left"])
stack.append(node["right"])
def update_vids(tree, flist, keep_singletons=False):
nodes = get_all_nodes(tree)
fidcounts = [Counter() for i in range(len(flist))]
for node in nodes:
for fid, v in enumerate(node["catvect"]):
fidcounts[fid][v] += 1
do_keep = np.ones(len(flist), dtype=np.bool_)
if not keep_singletons:
for fid in range(len(flist)):
if len(fidcounts[fid]) <= 1:
do_keep[fid] = 0
num_removed = len(flist) - do_keep.sum()
sys.stderr.write("remove {} singleton features\n".format(num_removed))
for node in nodes:
node["catvect"] = node["catvect"][do_keep]
flist2, fidcounts2 = [], []
vcount = 0
for is_kept, fnode, fidcount in zip(do_keep, flist, fidcounts):
if is_kept:
fnode["fid"] = len(flist2)
flist2.append(fnode)
fidcounts2.append(fidcount)
flist = flist2
fidcounts = fidcounts2
vcount = 0
for fid, (fnode, fidcount) in enumerate(zip(flist, fidcounts)):
fnode["size"] = len(fidcount)
vcount += fnode["size"]
labels = sorted(fidcount.keys(), key=int)
fnode["annotation"]["label2vid"] = {}
fnode["annotation"]["vid2label"] = []
for vid, _label in enumerate(labels):
fnode["annotation"]["label2vid"][_label] = vid
fnode["annotation"]["vid2label"].append(_label)
for node in nodes:
node["catvect"][fid] = fnode["annotation"]["label2vid"][node["catvect"][fid]]
return flist, vcount
def get_all_nodes(tree):
stack = [tree]
nodes = []
while len(stack) > 0:
node = stack.pop(0)
nodes.append(node)
if node["left"] is not None:
stack.append(node["left"])
stack.append(node["right"])
return nodes
def get_leaves(node, leaves):
if node["left"] is not None:
get_leaves(node["left"], leaves)
get_leaves(node["right"], leaves)
else:
leaves.append(node)
return leaves
def to_nexus(tree, flist, vcount, dump_tree=False):
leaves = get_leaves(tree, [])
# nexus
rv = "#NEXUS\r\nBEGIN TAXA;\r\nDIMENSIONS NTAX={};\r\nEND;\r\n".format(
len(leaves),
)
rv += "\r\nBEGIN CHARACTERS;\r\nDIMENSIONS NCHAR={};\r\nFORMAT\r\n\tDATATYPE=STANDARD\r\n\tSYMBOLS=\"01\"\r\n\tMISSING=?\r\n\tGAP=-\r\n\tINTERLEAVE=NO\r\n;\r\nMATRIX\n\n".format(vcount)
for node in leaves:
name_normalized = node["name"].replace(" ", "_").replace("(", "").replace(")", "")
binrep = np.zeros(vcount, dtype=np.int32)
for fid, v in enumerate(node["catvect"]):
binrep[v] = 1
rv += "{}\t{}\r".format(name_normalized, "".join(map(str, binrep.tolist())))
rv += ";\r\nEND;\r\n"
if dump_tree:
def _dump_tree(parent, node):
if node["left"] is not None:
rv1 = _dump_tree(node, node["left"])
rv2 = _dump_tree(node, node["right"])
rv = "({},{})".format(rv1, rv2)
else:
rv = node["name"].replace(" ", "_").replace("(", "").replace(")", "")
if parent is not None:
rv += ":{}".format(parent["date"] - node["date"])
return rv
# endef
rv += "\r\nBEGIN Trees;\r\nTree tree1 = "
rv += _dump_tree(None, tree)
rv += ";\r\nEND;\r\n"
return rv
def main():
parser = ArgumentParser()
parser.add_argument("-s", "--seed", metavar="INT", type=int, default=None,
help="random seed")
parser.add_argument('--rootdate', type=float, default=1000.0)
parser.add_argument('--num_leaves', type=int, default=10)
parser.add_argument('--variance', type=float, default=5.0,
help="Brownian process parameter")
parser.add_argument('--fnum', type=int, default=100,
help="# of features")
parser.add_argument('--lambda', dest="_lambda", type=float, default=0.02,
help="parameter of a pure birth process")
parser.add_argument('--nu', type=float, default=0.05,
help="borrowing parameter")
parser.add_argument('--keep_singletons', action="store_true", default=False)
parser.add_argument('--merge_thres', type=float, default=0.90,
help="merge near-identical leaves")
parser.add_argument('--tree', type=str, default=None)
parser.add_argument('--langs', type=str, default=None)
parser.add_argument('--flist', type=str, default=None)
parser.add_argument('--nexus', type=str, default=None)
args = parser.parse_args()
sys.stderr.write("args\t{}\n".format(args))
if args.num_leaves <= 2:
sys.stderr.write("# of leaves must be larger than 2\n")
sys.exit(1)
if args.seed is not None:
np.random.seed(args.seed)
# random.seed(args.seed)
# build a time-tree
tree = build_tree(args.num_leaves, args.rootdate)
# assign an xy coordinate to each node
set_locations_by_random_walk(tree, variance=args.variance)
# generate features
flist, vcount = gen_traits(tree, _lambda=args._lambda, fnum=args.fnum)
sys.stderr.write("{}\n".format(tree))
sys.stderr.write("{}\n".format(vcount))
# sys.stderr.write("{}\n".format(flist))
if args.nu > 0.0:
update_tree_by_borrowings(tree, flist, nu=args.nu)
# merge near-identical leaves
# too similar, no chance to be documented separately
merge_leaves(tree, thres=args.merge_thres)
flist, vcount = update_vids(tree, flist, keep_singletons=args.keep_singletons)
sys.stderr.write("{}\n".format(vcount))
for node in get_all_nodes(tree):
node["catvect"] = node["catvect"].tolist()
if args.tree is not None:
with open(args.tree, 'w') as f:
f.write("{}\n".format(json.dumps(tree)))
if args.langs is not None:
with open(args.langs, 'w') as f:
langs = get_leaves(tree, [])
for lang in langs:
f.write("{}\n".format(json.dumps(lang)))
if args.flist is not None:
with open(args.flist, 'w') as f:
f.write("{}\n".format(json.dumps(flist, indent=4, sort_keys=True)))
if args.nexus is not None:
with open(args.nexus, 'w') as f:
f.write(to_nexus(tree, flist, vcount, dump_tree=True))
if __name__ == "__main__":
main()
| [
"numpy.copy",
"rand_utils.rand_partition",
"sys.exit",
"numpy.sqrt",
"argparse.ArgumentParser",
"numpy.random.multivariate_normal",
"json.dumps",
"numpy.random.exponential",
"collections.Counter",
"numpy.array",
"numpy.zeros",
"sys.stderr.write",
"numpy.random.gamma",
"numpy.random.seed",
... | [((6320, 6335), 'numpy.arange', 'np.arange', (['fnum'], {}), '(fnum)\n', (6329, 6335), True, 'import numpy as np\n'), ((13855, 13871), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (13869, 13871), False, 'from argparse import ArgumentParser\n'), ((1867, 1898), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'totaltime'], {}), '(0, totaltime)\n', (1884, 1898), True, 'import numpy as np\n'), ((3324, 3414), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (["[parent['x'], parent['y']]", '[[_var, 0.0], [0.0, _var]]'], {}), "([parent['x'], parent['y']], [[_var, 0.0], [\n 0.0, _var]])\n", (3353, 3414), True, 'import numpy as np\n'), ((4187, 4213), 'numpy.copy', 'np.copy', (["parent['catvect']"], {}), "(parent['catvect'])\n", (4194, 4213), True, 'import numpy as np\n'), ((8054, 8071), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (8062, 8071), True, 'import numpy as np\n'), ((10497, 10506), 'collections.Counter', 'Counter', ([], {}), '()\n', (10504, 10506), False, 'from collections import Counter\n'), ((12978, 13010), 'numpy.zeros', 'np.zeros', (['vcount'], {'dtype': 'np.int32'}), '(vcount, dtype=np.int32)\n', (12986, 13010), True, 'import numpy as np\n'), ((15162, 15217), 'sys.stderr.write', 'sys.stderr.write', (['"""# of leaves must be larger than 2\n"""'], {}), "('# of leaves must be larger than 2\\n')\n", (15178, 15217), False, 'import sys\n'), ((15226, 15237), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (15234, 15237), False, 'import sys\n'), ((15276, 15301), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (15290, 15301), True, 'import numpy as np\n'), ((1374, 1405), 'numpy.random.gamma', 'np.random.gamma', (['gshape', 'gscale'], {}), '(gshape, gscale)\n', (1389, 1405), True, 'import numpy as np\n'), ((1564, 1595), 'numpy.random.gamma', 'np.random.gamma', (['gshape', 'gscale'], {}), '(gshape, gscale)\n', (1579, 1595), True, 'import numpy as np\n'), ((2135, 2166), 'numpy.random.gamma', 'np.random.gamma', (['gshape', 'gscale'], {}), '(gshape, gscale)\n', (2150, 2166), True, 'import numpy as np\n'), ((4545, 4587), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': '(1.0 / _lambda)'}), '(scale=1.0 / _lambda)\n', (4566, 4587), True, 'import numpy as np\n'), ((4783, 4806), 'rand_utils.rand_partition', 'rand_partition', (['weights'], {}), '(weights)\n', (4797, 4806), False, 'from rand_utils import rand_partition\n'), ((5467, 5499), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'interval'], {}), '(0.0, interval)\n', (5484, 5499), True, 'import numpy as np\n'), ((7865, 7935), 'numpy.sqrt', 'np.sqrt', (["((node['x'] - cnode['x']) ** 2 + (node['y'] - cnode['y']) ** 2)"], {}), "((node['x'] - cnode['x']) ** 2 + (node['y'] - cnode['y']) ** 2)\n", (7872, 7935), True, 'import numpy as np\n'), ((8266, 8289), 'rand_utils.rand_partition', 'rand_partition', (['weights'], {}), '(weights)\n', (8280, 8289), False, 'from rand_utils import rand_partition\n'), ((16296, 16312), 'json.dumps', 'json.dumps', (['tree'], {}), '(tree)\n', (16306, 16312), False, 'import json\n'), ((16622, 16665), 'json.dumps', 'json.dumps', (['flist'], {'indent': '(4)', 'sort_keys': '(True)'}), '(flist, indent=4, sort_keys=True)\n', (16632, 16665), False, 'import json\n'), ((16497, 16513), 'json.dumps', 'json.dumps', (['lang'], {}), '(lang)\n', (16507, 16513), False, 'import json\n')] |
import pandas as pd
import numpy as np
import os
import sys
def load_data(assets, start_date, end_date):
df_open = load_data_from_file('etf_data_open.csv', assets, start_date, end_date)
df_close = load_data_from_file('etf_data_close.csv', assets, start_date, end_date)
df_high = load_data_from_file('etf_data_high.csv', assets, start_date, end_date)
df_low = load_data_from_file('etf_data_low.csv', assets, start_date, end_date)
df_adj_close = load_data_from_file('etf_data_adj_close.csv', assets, start_date, end_date)
return df_open, df_close, df_high, df_low, df_adj_close
def load_data_from_file(file, assets, start_date, end_date):
if not os.path.isfile(file):
file = '../etf_data/' + file
if not os.path.isfile(file):
file = '../../etf_data/' + file
if not os.path.isfile(file):
file = '../../../etf_data/' + file
print('Loading file ',file)
df = pd.read_csv(file)
df = df.loc[df.Date > start_date]
df = df.loc[df.Date < end_date]
df = df[assets]
indexes = []
for key in df.keys():
for i in df[key].index:
val = df[key][i]
try:
if np.isnan(val) and not indexes.__contains__(i):
indexes.append(i)
except TypeError:
if not indexes.__contains__(i):
indexes.append(i)
df.drop(indexes, inplace=True)
return df
def load_data_from_file2(file, assets, start_date, end_date):
if not os.path.isfile(file):
file = '../etf_data/' + file
if not os.path.isfile(file):
file = '../../etf_data/' + file
if not os.path.isfile(file):
file = '../../../etf_data/' + file
print('Loading file ',file)
df = pd.read_csv(file)
df = df.loc[df.date > start_date]
df = df.loc[df.date < end_date]
df = df[assets]
indexes = []
for key in df.keys():
for i in df[key].index:
val = df[key][i]
try:
if np.isnan(val) and not indexes.__contains__(i):
indexes.append(i)
except TypeError:
if not indexes.__contains__(i):
indexes.append(i)
df.drop(indexes, inplace=True)
return df
def load_all_data_from_file(file, start_date, end_date):
if not os.path.isfile(file):
file = '../etf_data/' + file
if not os.path.isfile(file):
file = '../' + file
if not os.path.isfile(file):
file = '../' + file
print('Loading file ',file)
df = pd.read_csv(file)
df = df.loc[df.Date > start_date]
df = df.loc[df.Date < end_date]
# indexes = []
#
# for key in df.keys():
# for i in df[key].index:
# val = df[key][i]
# try:
# if np.isnan(val) and not indexes.__contains__(i):
# indexes.append(i)
# except TypeError:
# if not indexes.__contains__(i):
# indexes.append(i)
# df.drop(indexes, inplace=True)
return df
def load_all_data_from_file2(file, start_date, end_date):
if not os.path.isfile(file):
file = '../etf_data/' + file
if not os.path.isfile(file):
file = '../' + file
if not os.path.isfile(file):
file = '../' + file
print('Loading file ',file)
df = pd.read_csv(file)
df = df.loc[df.date > start_date]
df = df.loc[df.date < end_date]
return df
def load_all_data(start_date, end_date):
df_open = load_all_data_from_file('etf_data_open.csv', start_date, end_date)
df_close = load_all_data_from_file('etf_data_close.csv', start_date, end_date)
df_high = load_all_data_from_file('etf_data_high.csv', start_date, end_date)
df_low = load_all_data_from_file('etf_data_low.csv', start_date, end_date)
df_adj_close = load_all_data_from_file('etf_data_adj_close.csv', start_date, end_date)
return df_open, df_close, df_high, df_low, df_adj_close
| [
"os.path.isfile",
"numpy.isnan",
"pandas.read_csv"
] | [((927, 944), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (938, 944), True, 'import pandas as pd\n'), ((1757, 1774), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (1768, 1774), True, 'import pandas as pd\n'), ((2559, 2576), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (2570, 2576), True, 'import pandas as pd\n'), ((3366, 3383), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (3377, 3383), True, 'import pandas as pd\n'), ((676, 696), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (690, 696), False, 'import os\n'), ((746, 766), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (760, 766), False, 'import os\n'), ((819, 839), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (833, 839), False, 'import os\n'), ((1506, 1526), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (1520, 1526), False, 'import os\n'), ((1576, 1596), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (1590, 1596), False, 'import os\n'), ((1649, 1669), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (1663, 1669), False, 'import os\n'), ((2334, 2354), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2348, 2354), False, 'import os\n'), ((2404, 2424), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2418, 2424), False, 'import os\n'), ((2465, 2485), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (2479, 2485), False, 'import os\n'), ((3141, 3161), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3155, 3161), False, 'import os\n'), ((3211, 3231), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3225, 3231), False, 'import os\n'), ((3272, 3292), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (3286, 3292), False, 'import os\n'), ((1181, 1194), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (1189, 1194), True, 'import numpy as np\n'), ((2011, 2024), 'numpy.isnan', 'np.isnan', (['val'], {}), '(val)\n', (2019, 2024), True, 'import numpy as np\n')] |
import csv
from pprint import pprint
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
with open(args[0]) as csvfile:
reader = csv.DictReader(csvfile)
weights = []
for row in reader:
row['Twitter'] = 1
weights.append(row)
pprint(weights)
| [
"csv.DictReader",
"pprint.pprint"
] | [((219, 242), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (233, 242), False, 'import csv\n'), ((382, 397), 'pprint.pprint', 'pprint', (['weights'], {}), '(weights)\n', (388, 397), False, 'from pprint import pprint\n')] |
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import os
from datetime import datetime, date, timedelta
from sklearn.linear_model import LinearRegression
import scipy
import math
import sys
import locator
file_path = os.path.dirname(os.path.realpath(__file__))
proj_path = os.path.abspath(os.path.join(file_path,".."))
datagouv_path = os.path.join(proj_path,"datagouv")
gen_path = os.path.join(proj_path,"../../gatsby/trends/generated")
datagen_path = os.path.join(gen_path,"data")
def downloadIfNeeded(fileName):
need_download = True
if os.path.exists(fileName):
today = date.today()
last_modified_ts = os.path.getmtime(fileName)
mtime = date.fromtimestamp(last_modified_ts)
if (today-mtime).days <= 1:
need_download = False
if need_download:
print("%s Needs a download"%fileName)
if "department" in fileName:
command = "/usr/bin/wget https://www.data.gouv.fr/fr/datasets/r/eceb9fb4-3ebc-4da3-828d-f5939712600a -O %s"%fileName
elif "hospitalieres" in fileName:
command = "/usr/bin/wget https://www.data.gouv.fr/fr/datasets/r/6fadff46-9efd-4c53-942a-54aca783c30c -O %s"%fileName
os.system(command)
else:
print("%s est à jour"%fileName)
urgence_data = os.path.join(datagouv_path,"department_latest.csv")
downloadIfNeeded(urgence_data)
urgence_df = pd.read_csv(urgence_data, sep=";", dtype= {'dep':'object'})
hosp_data = os.path.join(datagouv_path,"donnees_hospitalieres_latest.csv")
downloadIfNeeded(hosp_data)
hosp_df = pd.read_csv(hosp_data, sep=';')
# Heure des données (wget garde le mtime du site web)
last_modified_ts = os.path.getmtime(urgence_data)
data_date = datetime.fromtimestamp(last_modified_ts)
#extraire les données toutes classe d'age
urgence_df = urgence_df[urgence_df["sursaud_cl_age_corona"] == 0].copy()
# Lire le fichier des code département
depts = pd.read_csv(os.path.join(datagouv_path,"departement2020.csv"))
depts.set_index(depts.dep, inplace=True)
depts.drop("dep",axis=1, inplace=True)
# Lire le fichier des régions
regs = pd.read_csv(os.path.join(datagouv_path,"region2020.csv"))
#regs["reg"] = regs["reg"].apply(lambda x: str(x) if len(str(x)) > 1 else '0' + str(x))
regs.set_index(regs.reg, inplace=True)
regs.drop("reg", axis=1, inplace=True)
# Ajouter nom de département, code région, nom région dans les données des urgences
urgence_df["dep_name"] = urgence_df["dep"].apply(lambda x: depts.loc[str(x)].libelle if pd.notnull(x) else None)
urgence_df["reg"] = urgence_df["dep"].apply(lambda x: depts.loc[x].reg if pd.notnull(x) else None)
urgence_df["reg_name"] = urgence_df["reg"].apply(lambda x: regs.loc[x].libelle if pd.notnull(x) else None)
# Ajouter nom de département, code région, nom région dans les données des hospitalières
hosp_df["dep"] = hosp_df["dep"].apply(lambda x: x if len(x) > 1 else '0'+x)
#Retrait de <NAME>
hosp_df=hosp_df[hosp_df.dep != "978"]
hosp_df["dep_name"] = hosp_df["dep"].apply(lambda x: depts.loc[str(x)].libelle if pd.notnull(x) else None)
hosp_df["reg"] = hosp_df["dep"].apply(lambda x: depts.loc[x].reg if pd.notnull(x) else None)
hosp_df["reg_name"] = hosp_df["reg"].apply(lambda x: regs.loc[x].libelle if pd.notnull(x) else None)
# Afficher les dates au format jj/mm/yy et les mettre en index
def convertDate(isodate):
l = isodate.split('-')
return l[2]+"/"+l[1]+"/"+l[0][2:]
def addDays(df, duration):
# Agrandissement du dataframe du nombre de jours spécifié
d = df.index[-1]
a = d.split("/")
dd = int(a[0])
mm = int(a[1])
yy = 2000 + int(a[2])
first = date(yy,mm,dd)+ timedelta(days=1)
last = date(yy,mm,dd)+ timedelta(days=duration)
current = first
indexExtension = []
while current <= last:
ds = str(current.day)
if len(ds) == 1:
ds = '0'+ds
ms = str(current.month)
if len(ms) == 1:
ms = '0'+ms
ys = str(current.year)[2:]
di = ds + '/' + ms + '/' + ys
indexExtension.append(di)
current += timedelta(days = 1)
return df.reindex(index = df.index.append(pd.Index(indexExtension)))
# Calcul de l'intervalle de confiance de la prédiction
# Voir http://pageperso.lif.univ-mrs.fr/~alexis.nasr/Ens/IAAAM2/SlidesModStat_C1_print.pdf
def estimateSigma(reg, X, Y):
Y_pred = reg.predict(X)
err = (Y - Y_pred)**2
return math.sqrt(err.sum() / (len(err) - 2))
def plot_non_zero(ax, logScale, df, col, label):
col_draw = col
if logScale:
col_draw = "nnz_%s"%col
df[col_draw] = df[col]
df.loc[df[col] == 0 ,col_draw] = np.nan
ax.plot(df[col_draw], label=label)
def make_hosp_bars(has_reg, df_source, hosp_col, reg_index, source_label, ax):
if has_reg:
# Afficher differement la donnée du dernier jour, car tout n'est pas encore remonté. Ce jour n'est pas pris en
# compte pour calculer la tendance
df_source["valid_hosp"] = np.nan
df_source["uncertain_hosp"] = np.nan
df_source.loc[df_source.index[:df_source.index.get_loc(reg_index[-1])+1], "valid_hosp"] = df_source[hosp_col]
last_day = df_source.index[df_source.index.get_loc(reg_index[-1]) + 1]
df_source.loc[last_day,"uncertain_hosp"] = df_source.loc[last_day,hosp_col]
ax.bar(df_source.index,
df_source["valid_hosp"],
label = "Nouvelles hospitalisations quotidiennes - données %s"%source_label,
alpha=0.3,
color="blue")
ax.bar(df_source.index,
df_source["uncertain_hosp"],
alpha=0.2,
edgecolor="black",
linestyle="--",
color="blue")
else:
# Le dernier jour n'est pas facile à avoir ici. Pas affiché. Mais de toute façon, il n'y a pas de tendance calculée.
ax.bar(df_source.index,
df_source[hosp_col],
label = "Nouvelles hospitalisations quotidiennes - données %s"%source_label,
alpha=0.3,
color="blue")
def make_curve(urgence, urg_index, hosp, hosp_index, src_urgence, roll_urg, roll_hosp, file_radical, df_row, label, logScale):
# Plot
fig = plt.figure(figsize=(10,6))
ax = plt.axes()
has_reg = df_row["reg_start"] is not None
# Ajout d'un échelle à droite pour meilleure lecture sur les telephones
ax.yaxis.set_ticks_position('both')
ax.tick_params(labeltop=False, labelright=True)
if src_urgence:
make_hosp_bars(has_reg, urgence, "nbre_hospit_corona", urg_index, "urgences", ax)
ax.plot(urgence[roll_urg], label="Nouvelles hospitalisations quotidiennes lissées - données urgences", color="orange")
if has_reg:
ax.plot(urgence["pred_hosp"], "--", label="Tendance hospitalisations quotidiennes -- données urgences", color="orange")
ax.fill_between(urgence.index, urgence["pred_max"], urgence["pred_min"],color="orange",alpha=0.3, label="Intervalle de confiance")
# En plus foncé sur la zone de prediction
reg_end = urg_index[-1]
pred_index = urgence.index[urgence.index.get_loc(reg_end) + 1 :]
ax.fill_between(pred_index, urgence.loc[pred_index, "pred_max"], urgence.loc[pred_index, "pred_min"],color="orange",alpha=0.2)
# Autres données (non utilsées pour la tendance)
ax.plot(hosp[roll_hosp], label="Nouvelles hospitalisations quotidiennes lissées - données hôpitaux", color="red")
else:
make_hosp_bars(has_reg, hosp, "incid_hosp", hosp_index, "hôpitaux", ax)
ax.plot(hosp[roll_hosp], label="Nouvelles hospitalisations quotidiennes lissées - données hôpitaux", color="orange")
if has_reg:
ax.plot(hosp["pred_hosp"], "--", label="Tendance hospitalisations quotidiennes - données hôpitaux", color="orange")
ax.fill_between(hosp.index, hosp["pred_max"], hosp["pred_min"],color="orange",alpha=0.3, label="Intervalle de confiance")
# En plus foncé sur la zone de prediction
reg_end = hosp_index[-1]
pred_index = hosp.index[hosp.index.get_loc(reg_end) + 1 :]
ax.fill_between(pred_index, hosp.loc[pred_index, "pred_max"], hosp.loc[pred_index,"pred_min"],color="orange",alpha=0.2)
#ax.xaxis.set_major_locator(plt.MaxNLocator(10))
ax.xaxis.set_major_locator(locator.FirstOfMonthLocator())
#ax.xaxis.set_minor_locator(plt.MultipleLocator(1))
ax.legend()
if src_urgence:
# Pour utiliser cette limite pour les données hospitalières, il faudrait étendre l'index vers le 24 février.
ax.set_xlim(left = "24/02/20", right=urgence.index[-1])
if logScale:
plt.yscale("log")
# Same scale for log curves
# Limit high enough to let room for the legend
ax.set_ylim(0.1,50000)
else:
if has_reg:
# Protection contre les prédiction trop divergeantes
df_source = urgence if src_urgence else hosp
hosp_col = "nbre_hospit_corona" if src_urgence else "incid_hosp"
if df_source.loc[df_source.index[-1], "pred_max"] > df_source[hosp_col].max()*4:
ax.set_ylim(0, df_source[hosp_col].max()*4)
ax.set_title("Hospitalisations COVID-19 quotidiennes en %s - échelle %s"%(label,"logarithmique" if logScale else "linéaire"))
file_name = file_radical + ("_log" if logScale else "_lin") + ".png"
plt.savefig(os.path.join(datagen_path,file_name))
df_row["log_curve" if logScale else "lin_curve"] = file_name
plt.close()
def aggregate(df_source, date_col):
df_source = df_source.groupby([date_col]).agg('sum')
# Convertir les dates maintenant que les tris sont faits
df_source["date"] = df_source.index
df_source["date"] = df_source["date"].apply(convertDate)
df_source = df_source.set_index(["date"])
return df_source
def make_rolling(df_source, col):
roll_col = "rolling_%s"%col
nnz_col = "nnz_%s"%col
df_source[nnz_col] = df_source[col]
df_source.loc[df_source[nnz_col]==0,nnz_col] = 0.1
# Calculer la moyenne lissée géométrique
df_source[roll_col] = df_source[nnz_col].rolling(7,center=True).aggregate(lambda x: x.prod()**(1./7))
# Remplacer ce qui vaut 0.1 par 0
df_source.loc[df_source[roll_col]<=0.101, roll_col] = 0
return roll_col
def extract_recent(source, history, use_latest):
if use_latest:
return source.iloc[-history:]
else:
return source.iloc[-history-1:-1]
def make_trend(df_source, hosp_col, roll_col, recent_hist):
recent = extract_recent(df_source, recent_hist, False)
nullVals = len(recent[recent[hosp_col] == 0])
if nullVals == 0:
reg_col = hosp_col
else:
# Remplacer les valeurs nulles par 0.1 (ou 0 si la moyenne glissante vaut 0)
reg_col = "%s_patch"%hosp_col
df_source[reg_col] = df_source[hosp_col]
df_source.loc[df_source[reg_col] == 0, reg_col] = 0.1
df_source.loc[df_source[roll_col] == 0, reg_col] = 0
# Si plus de 2 valeurs nulles, on double aussi la période d'estimation
if nullVals > 2:
recent_hist *= 2
else:
recent_hist = int(recent_hist*1.5)
# Ajouter une colonne de numéro de jour
df_source["num_jour"] = np.arange(len(df_source))
for_regression = extract_recent(df_source, recent_hist,False)
# Si pas assez de données ne pas générer de tendance
if len(for_regression[for_regression[reg_col] > 0]) < recent_hist*0.5:
return None, None, df_source
# Enlever les valeurs nulles ou non définies
for_regression = for_regression[for_regression[reg_col] > 0]
reg = LinearRegression()
X_train = for_regression.drop(columns = [c for c in for_regression.columns if c != "num_jour"])
Y_train = np.log(for_regression[reg_col])
reg.fit(X_train,Y_train)
# Extraire la pente de la regression
slope = reg.coef_[0]
timeToDouble = math.log(2)/slope
# Ajouter deux semaines de données et mettre a jour la colonne num_jour
df_source = addDays(df_source, 15)
df_source["num_jour"] = np.arange(len(df_source))
# Ajouter la prédiction dans les données
df_source["pred_hosp"]=np.nan
# Plage de prédiction: dans la phase descendante - jusqu'à last_day
predIndex = df_source[(df_source["num_jour"] >= X_train.iloc[0]["num_jour"])].index
X = df_source.loc[predIndex].drop(columns = [c for c in df_source.columns if c != "num_jour"])
df_source.loc[predIndex,"pred_hosp"]=np.exp(reg.predict(X))
# Intervalle de confiance
sigma = estimateSigma(reg,X_train,Y_train)
X_train_mean = X_train["num_jour"].mean()
# Ajout de l'intervalle de confiance en log (alpha = 10% -- 1 - alpha/2 = 0.95)
df_source["conf_log_mean"] = np.nan
# Plage pour l'intervalle de confiance sur la moyennes: depuis les données utilisées pour la régerssion linéaire
df_source.loc[predIndex,"conf_log_mean"] = np.sqrt(1./len(X_train) + \
(df_source["num_jour"]-X_train_mean)**2 / ((X_train["num_jour"]-X_train_mean)**2).sum()) * \
sigma*scipy.stats.t.ppf(0.95,len(X_train)-2)
df_source["pred_max"] = df_source["pred_hosp"]*np.exp(df_source["conf_log_mean"])
df_source["pred_min"] = df_source["pred_hosp"]/np.exp(df_source["conf_log_mean"])
return for_regression.index, timeToDouble, df_source
def make_trend_metadata(df_row, reg_index, df_source, timeToDouble, hosp_rate_row_col):
df_row["reg_start"] = reg_index[0] if reg_index is not None else None
df_row["reg_end"]=reg_index[-1] if reg_index is not None else None
cont_end_loc = df_source.index.get_loc(reg_index[-1]) - 11 if reg_index is not None else None
cont_start_loc = df_source.index.get_loc(reg_index[0]) - 11 if reg_index is not None else None
df_row["cont_end"]=df_source.index[cont_end_loc] if reg_index is not None else None
df_row["cont_start"]=df_source.index[cont_start_loc] if reg_index is not None else None
df_row["timeToDouble"] = timeToDouble
if df_row["reg_start"] is not None:
if df_source["pred_max"][-1] > df_row[hosp_rate_row_col]*2 and df_source["pred_min"][-1] < df_row[hosp_rate_row_col]/2.:
df_row["trend_confidence"] = 0
else:
df_row["trend_confidence"] = 1
else:
# Pas de tendance s'il n'y avait pas assez de données pour la calculer
df_row["trend_confidence"] = 0
def make_data(urgence, hosp, file_radical, df_row, label):
urgence = aggregate(urgence, "date_de_passage")
hosp = aggregate(hosp, "jour")
recent_hist = 15
recent = urgence.loc[urgence.index[-recent_hist:]]
recent = extract_recent(urgence, recent_hist, False)
# Utilisation des données urgence si au moins un cas est reporté dans la "période récente"
src_urgence = len(recent[recent["nbre_hospit_corona"] > 0]) >= 1
roll_urg = make_rolling(urgence, "nbre_hospit_corona")
roll_hosp = make_rolling(hosp, "incid_hosp")
# On utilise le dernier jour de la moyenne lissée pour indiquer le nombre d'hospitalisations par jour
if src_urgence:
df_row["hosp_rate_urgence"] = urgence[urgence[roll_urg] > 0 ][roll_urg][-1]
df_row["hosp_rate_all"] = hosp[hosp[roll_hosp] > 0 ][roll_hosp][-1]
df_row["rate_date"] = urgence[urgence[roll_urg] > 0 ].index[-1]
else:
df_row["hosp_rate_all"] = hosp[hosp[roll_hosp] > 0 ][roll_hosp][-1]
df_row["rate_date"] = hosp[hosp[roll_hosp] > 0 ].index[-1]
# make_trend modifies the dataframe (it extends the index) so we need to update the df variables
if src_urgence:
urg_index, urg_timeToDouble, urgence = make_trend(urgence, "nbre_hospit_corona", roll_urg, recent_hist)
else:
# Python interpreter complains if the value is not assigned
urg_index = None
# Calculer la tendance sur les données hospitalière dans tous les cas, même si elle n'est pas
# utilisée pour le moment lorsque les données des urgences sont utilisables
hosp_index, hosp_timeToDouble, hosp = make_trend(hosp, "incid_hosp", roll_hosp, recent_hist)
if src_urgence:
make_trend_metadata(df_row, urg_index, urgence,urg_timeToDouble, "hosp_rate_urgence")
else:
make_trend_metadata(df_row, hosp_index,hosp, hosp_timeToDouble, "hosp_rate_all")
make_curve(urgence, urg_index, hosp, hosp_index, src_urgence, roll_urg, roll_hosp, file_radical, df_row, label, True)
make_curve(urgence, urg_index, hosp, hosp_index, src_urgence, roll_urg, roll_hosp, file_radical, df_row, label, False)
common_fields = ["log_curve", "lin_curve","timeToDouble", "reg_start", "reg_end", "cont_start", "cont_end", "rate_date", "hosp_rate_urgence", "hosp_rate_all", "trend_confidence"]
fr_summary = pd.DataFrame(index=["France"],columns=["data_date"] + common_fields)
fr_summary.loc["France","data_date"] = data_date.strftime("%d/%m/%Y %H:%M")
make_data(urgence_df, hosp_df, "france", fr_summary.loc["France"], "France")
fr_summary.to_csv(os.path.join(datagen_path, "france.csv"), index_label='id')
metropole = [r for r in regs.index if r > 10]
drom = [r for r in regs.index if r < 10]
reg_summary = pd.DataFrame(index = metropole+drom, columns=["reg_name"] + common_fields)
dep_summary = pd.DataFrame(index = depts.index, columns=["dep_name", "reg"] + common_fields)
for reg in metropole + drom:
reg_name = regs.loc[reg]["libelle"]
file_radical = code = "r_" + str(reg)
print(reg, reg_name)
reg_summary.loc[reg]["reg_name"] = reg_name
make_data(urgence_df[urgence_df["reg"] == reg], hosp_df[hosp_df["reg"] == reg], file_radical, reg_summary.loc[reg], reg_name)
reg_depts = depts[depts["reg"]==reg]
for dept in reg_depts.index:
dep_name = reg_depts.loc[dept,"libelle"]
dep_summary.loc[dept,"reg"] = reg
dep_summary.loc[dept,"dep_name"] = dep_name
file_radical = code = "d_" + str(dept)
print("\t%s %s"%(dept, dep_name))
make_data(urgence_df[urgence_df["dep"] == dept], hosp_df[hosp_df["dep"] == dept], file_radical, dep_summary.loc[dept], dep_name)
reg_summary.to_csv(os.path.join(datagen_path, "regions.csv"), index_label="reg")
dep_summary.to_csv(os.path.join(datagen_path, "departements.csv"), index_label="dep")
| [
"pandas.read_csv",
"numpy.log",
"math.log",
"pandas.Index",
"datetime.date.fromtimestamp",
"locator.FirstOfMonthLocator",
"datetime.timedelta",
"pandas.notnull",
"os.path.exists",
"matplotlib.pyplot.close",
"numpy.exp",
"datetime.date",
"pandas.DataFrame",
"matplotlib.pyplot.yscale",
"ma... | [((366, 401), 'os.path.join', 'os.path.join', (['proj_path', '"""datagouv"""'], {}), "(proj_path, 'datagouv')\n", (378, 401), False, 'import os\n'), ((412, 468), 'os.path.join', 'os.path.join', (['proj_path', '"""../../gatsby/trends/generated"""'], {}), "(proj_path, '../../gatsby/trends/generated')\n", (424, 468), False, 'import os\n'), ((483, 513), 'os.path.join', 'os.path.join', (['gen_path', '"""data"""'], {}), "(gen_path, 'data')\n", (495, 513), False, 'import os\n'), ((1322, 1374), 'os.path.join', 'os.path.join', (['datagouv_path', '"""department_latest.csv"""'], {}), "(datagouv_path, 'department_latest.csv')\n", (1334, 1374), False, 'import os\n'), ((1422, 1481), 'pandas.read_csv', 'pd.read_csv', (['urgence_data'], {'sep': '""";"""', 'dtype': "{'dep': 'object'}"}), "(urgence_data, sep=';', dtype={'dep': 'object'})\n", (1433, 1481), True, 'import pandas as pd\n'), ((1495, 1558), 'os.path.join', 'os.path.join', (['datagouv_path', '"""donnees_hospitalieres_latest.csv"""'], {}), "(datagouv_path, 'donnees_hospitalieres_latest.csv')\n", (1507, 1558), False, 'import os\n'), ((1600, 1631), 'pandas.read_csv', 'pd.read_csv', (['hosp_data'], {'sep': '""";"""'}), "(hosp_data, sep=';')\n", (1611, 1631), True, 'import pandas as pd\n'), ((1706, 1736), 'os.path.getmtime', 'os.path.getmtime', (['urgence_data'], {}), '(urgence_data)\n', (1722, 1736), False, 'import os\n'), ((1749, 1789), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['last_modified_ts'], {}), '(last_modified_ts)\n', (1771, 1789), False, 'from datetime import datetime, date, timedelta\n'), ((17105, 17174), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': "['France']", 'columns': "(['data_date'] + common_fields)"}), "(index=['France'], columns=['data_date'] + common_fields)\n", (17117, 17174), True, 'import pandas as pd\n'), ((17510, 17584), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': '(metropole + drom)', 'columns': "(['reg_name'] + common_fields)"}), "(index=metropole + drom, columns=['reg_name'] + common_fields)\n", (17522, 17584), True, 'import pandas as pd\n'), ((17599, 17675), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'depts.index', 'columns': "(['dep_name', 'reg'] + common_fields)"}), "(index=depts.index, columns=['dep_name', 'reg'] + common_fields)\n", (17611, 17675), True, 'import pandas as pd\n'), ((264, 290), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (280, 290), False, 'import os\n'), ((320, 349), 'os.path.join', 'os.path.join', (['file_path', '""".."""'], {}), "(file_path, '..')\n", (332, 349), False, 'import os\n'), ((578, 602), 'os.path.exists', 'os.path.exists', (['fileName'], {}), '(fileName)\n', (592, 602), False, 'import os\n'), ((1967, 2017), 'os.path.join', 'os.path.join', (['datagouv_path', '"""departement2020.csv"""'], {}), "(datagouv_path, 'departement2020.csv')\n", (1979, 2017), False, 'import os\n'), ((2148, 2193), 'os.path.join', 'os.path.join', (['datagouv_path', '"""region2020.csv"""'], {}), "(datagouv_path, 'region2020.csv')\n", (2160, 2193), False, 'import os\n'), ((6272, 6299), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (6282, 6299), True, 'from matplotlib import pyplot as plt\n'), ((6308, 6318), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (6316, 6318), True, 'from matplotlib import pyplot as plt\n'), ((9690, 9701), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9699, 9701), True, 'from matplotlib import pyplot as plt\n'), ((11870, 11888), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (11886, 11888), False, 'from sklearn.linear_model import LinearRegression\n'), ((12003, 12034), 'numpy.log', 'np.log', (['for_regression[reg_col]'], {}), '(for_regression[reg_col])\n', (12009, 12034), True, 'import numpy as np\n'), ((17345, 17385), 'os.path.join', 'os.path.join', (['datagen_path', '"""france.csv"""'], {}), "(datagen_path, 'france.csv')\n", (17357, 17385), False, 'import os\n'), ((18457, 18498), 'os.path.join', 'os.path.join', (['datagen_path', '"""regions.csv"""'], {}), "(datagen_path, 'regions.csv')\n", (18469, 18498), False, 'import os\n'), ((18538, 18584), 'os.path.join', 'os.path.join', (['datagen_path', '"""departements.csv"""'], {}), "(datagen_path, 'departements.csv')\n", (18550, 18584), False, 'import os\n'), ((620, 632), 'datetime.date.today', 'date.today', ([], {}), '()\n', (630, 632), False, 'from datetime import datetime, date, timedelta\n'), ((660, 686), 'os.path.getmtime', 'os.path.getmtime', (['fileName'], {}), '(fileName)\n', (676, 686), False, 'import os\n'), ((703, 739), 'datetime.date.fromtimestamp', 'date.fromtimestamp', (['last_modified_ts'], {}), '(last_modified_ts)\n', (721, 739), False, 'from datetime import datetime, date, timedelta\n'), ((1236, 1254), 'os.system', 'os.system', (['command'], {}), '(command)\n', (1245, 1254), False, 'import os\n'), ((3656, 3672), 'datetime.date', 'date', (['yy', 'mm', 'dd'], {}), '(yy, mm, dd)\n', (3660, 3672), False, 'from datetime import datetime, date, timedelta\n'), ((3672, 3689), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3681, 3689), False, 'from datetime import datetime, date, timedelta\n'), ((3701, 3717), 'datetime.date', 'date', (['yy', 'mm', 'dd'], {}), '(yy, mm, dd)\n', (3705, 3717), False, 'from datetime import datetime, date, timedelta\n'), ((3717, 3741), 'datetime.timedelta', 'timedelta', ([], {'days': 'duration'}), '(days=duration)\n', (3726, 3741), False, 'from datetime import datetime, date, timedelta\n'), ((4102, 4119), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4111, 4119), False, 'from datetime import datetime, date, timedelta\n'), ((8484, 8513), 'locator.FirstOfMonthLocator', 'locator.FirstOfMonthLocator', ([], {}), '()\n', (8511, 8513), False, 'import locator\n'), ((8815, 8832), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (8825, 8832), True, 'from matplotlib import pyplot as plt\n'), ((9583, 9620), 'os.path.join', 'os.path.join', (['datagen_path', 'file_name'], {}), '(datagen_path, file_name)\n', (9595, 9620), False, 'import os\n'), ((12150, 12161), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (12158, 12161), False, 'import math\n'), ((13497, 13531), 'numpy.exp', 'np.exp', (["df_source['conf_log_mean']"], {}), "(df_source['conf_log_mean'])\n", (13503, 13531), True, 'import numpy as np\n'), ((13583, 13617), 'numpy.exp', 'np.exp', (["df_source['conf_log_mean']"], {}), "(df_source['conf_log_mean'])\n", (13589, 13617), True, 'import numpy as np\n'), ((2535, 2548), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (2545, 2548), True, 'import pandas as pd\n'), ((2634, 2647), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (2644, 2647), True, 'import pandas as pd\n'), ((2741, 2754), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (2751, 2754), True, 'import pandas as pd\n'), ((3073, 3086), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (3083, 3086), True, 'import pandas as pd\n'), ((3166, 3179), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (3176, 3179), True, 'import pandas as pd\n'), ((3267, 3280), 'pandas.notnull', 'pd.notnull', (['x'], {}), '(x)\n', (3277, 3280), True, 'import pandas as pd\n'), ((4169, 4193), 'pandas.Index', 'pd.Index', (['indexExtension'], {}), '(indexExtension)\n', (4177, 4193), True, 'import pandas as pd\n')] |
import unittest
import numpy as np
from dolo.numeric.ncpsolve import ncpsolve, smooth
def josephy(x):
# Computes the function value F(x) of the NCP-example by Josephy.
n=len(x)
Fx=np.zeros(n)
Fx[0]=3*x[0]**2+2*x[0]*x[1]+2*x[1]**2+x[2]+3*x[3]-6
Fx[1]=2*x[0]**2+x[0]+x[1]**2+3*x[2]+2*x[3]-2
Fx[2]=3*x[0]**2+x[0]*x[1]+2*x[1]**2+2*x[2]+3*x[3]-1
Fx[3]=x[0]**2+3*x[1]**2+2*x[2]+3*x[3]-3;
return Fx
def Djosephy(x):
# Local Variables: x, DFx, n
# Function calls: Djosephy, zeros, length
#%
#% Computes the Jacobian DF(x) of the NCP-example by Josephy
#%
n = len(x)
DFx = np.zeros( (n, n) )
DFx[0,0] = 6.*x[0]+2.*x[1]
DFx[0,1] = 2.*x[0]+4.*x[1]
DFx[0,2] = 1.
DFx[0,3] = 3.
DFx[1,0] = 4.*x[0]+1.
DFx[1,1] = 2.*x[1]
DFx[1,2] = 3.
DFx[1,3] = 2.
DFx[2,0] = 6.*x[0]+x[1]
DFx[2,1] = x[0]+4.*x[1]
DFx[2,2] = 2.
DFx[2,3] = 3.
DFx[3,0] = 2.*x[0]
DFx[3,1] = 6.*x[1]
DFx[3,2] = 2.
DFx[3,3] = 3.
return DFx
class SerialSolve(unittest.TestCase):
def test_simple_solve(self):
x0 = np.array([0.5,0.5,0.5,0.5])
lb = np.array([0.0,0.6,0.0,0.0])
ub = np.array([1.0,1.0,1.0,0.4])
fval = np.array([ 0.5, 0.5, 0.1,0.5 ])
jac = np.array([
[1.0,0.2,0.1,0.0],
[1.0,0.2,0.1,0.0],
[0.0,1.0,0.2,0.0],
[0.1,1.0,0.2,0.1]
])
N = 10
d = len(fval)
from dolo.numeric.solver import solver
sol_fsolve = solver(josephy, x0, method='fsolve')
sol_lmmcp = solver(josephy, x0, method='lmmcp')
from numpy.testing import assert_almost_equal
assert_almost_equal(sol_fsolve, sol_lmmcp)
def test_serial_problems(self):
from numpy import inf
import numpy
fun = lambda x: [-josephy(x), -Djosephy(x)]
x0=np.array( [1.25, 0.01, 0.01, 0.50] )
lb=np.array( [0.00, 0.00, 0.00, 0.00] )
ub=np.array( [inf, inf, inf, inf] )
resp = ncpsolve(fun, lb, ub, x0, tol=1e-15)
sol = np.array( [ 1.22474487e+00, 0.00000000e+00, 3.60543164e-17, 5.00000000e-01])
from numpy.testing import assert_almost_equal, assert_equal
assert_almost_equal(sol, resp)
N = 10
d = len(x0)
serial_sol_check = np.zeros((d,N))
for n in range(N):
serial_sol_check[:,n] = resp[0]
s_x0 = np.column_stack([x0]*N)
s_lb = np.column_stack([lb]*N)
s_ub = np.column_stack([ub]*N)
def serial_fun(xvec, deriv=None):
resp = np.zeros( (d,N) )
if deriv=='serial':
dresp = np.zeros( (d,d,N) )
elif deriv=='full':
dresp = np.zeros( (d,N,d,N) )
for n in range(N):
[v, dv] = fun(xvec[:,n])
resp[:,n] = v
if deriv=='serial':
dresp[:,:,n] = dv
elif deriv=='full':
dresp[:,n,:,n] = dv
# if deriv=='full':
# dresp = dresp.swapaxes(0,2).swapaxes(1,3)
if deriv is None:
return resp
else:
return [resp, dresp]
serial_fun_val = lambda x: serial_fun(x)
serial_fun_serial_jac = lambda x: serial_fun(x,deriv='serial')[1]
serial_fun_full_jac = lambda x: serial_fun(x,deriv='full')[1]
from dolo.numeric.solver import solver
print("Serial Bounded solution : ncpsolve")
serial_sol_with_bounds_without_jac = solver( serial_fun_val, s_x0, lb=s_lb, ub=s_ub, method='ncpsolve', serial_problem=True)
print("Serial Bounded solution (with jacobian) : ncpsolve")
serial_sol_with_bounds_with_jac = solver( serial_fun_val, s_x0, s_lb, s_ub, jac=serial_fun_serial_jac, method='ncpsolve', serial_problem=True)
print("Bounded solution : ncpsolve")
sol_with_bounds_without_jac = solver( serial_fun_val, s_x0, s_lb, s_ub, method='ncpsolve', serial_problem=False)
print("Bounded solution (with jacobian) : ncpsolve")
sol_with_bounds_with_jac = solver( serial_fun_val, s_x0, s_lb, s_ub, jac=serial_fun_full_jac, method='ncpsolve', serial_problem=False)
print("Serial Unbounded solution : ncpsolve")
serial_sol_without_bounds_without_jac = solver( serial_fun_val, s_x0, method='newton', serial_problem=True)
print("Unbounded solution : fsolve")
sol_without_bounds_without_jac = solver( serial_fun_val, s_x0, method='fsolve', serial_problem=False)
print("Unbounded solution (with jacobian) : fsolve")
sol_without_bounds = solver( serial_fun_val, s_x0, jac=serial_fun_full_jac, method='fsolve', serial_problem=False)
print("Unbounded solution : lmmcp")
sol_without_bounds = solver( serial_fun_val, s_x0, jac=serial_fun_full_jac, method='lmmcp', serial_problem=False)
# TODO : check that results are equal to the benchmark
if __name__ == '__main__':
unittest.main() | [
"numpy.column_stack",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_almost_equal",
"dolo.numeric.ncpsolve.ncpsolve",
"unittest.main",
"dolo.numeric.solver.solver"
] | [((198, 209), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (206, 209), True, 'import numpy as np\n'), ((633, 649), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (641, 649), True, 'import numpy as np\n'), ((5046, 5061), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5059, 5061), False, 'import unittest\n'), ((1111, 1141), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5, 0.5])\n', (1119, 1141), True, 'import numpy as np\n'), ((1154, 1184), 'numpy.array', 'np.array', (['[0.0, 0.6, 0.0, 0.0]'], {}), '([0.0, 0.6, 0.0, 0.0])\n', (1162, 1184), True, 'import numpy as np\n'), ((1195, 1225), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 0.4]'], {}), '([1.0, 1.0, 1.0, 0.4])\n', (1203, 1225), True, 'import numpy as np\n'), ((1239, 1269), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.1, 0.5]'], {}), '([0.5, 0.5, 0.1, 0.5])\n', (1247, 1269), True, 'import numpy as np\n'), ((1286, 1388), 'numpy.array', 'np.array', (['[[1.0, 0.2, 0.1, 0.0], [1.0, 0.2, 0.1, 0.0], [0.0, 1.0, 0.2, 0.0], [0.1, \n 1.0, 0.2, 0.1]]'], {}), '([[1.0, 0.2, 0.1, 0.0], [1.0, 0.2, 0.1, 0.0], [0.0, 1.0, 0.2, 0.0],\n [0.1, 1.0, 0.2, 0.1]])\n', (1294, 1388), True, 'import numpy as np\n'), ((1539, 1575), 'dolo.numeric.solver.solver', 'solver', (['josephy', 'x0'], {'method': '"""fsolve"""'}), "(josephy, x0, method='fsolve')\n", (1545, 1575), False, 'from dolo.numeric.solver import solver\n'), ((1597, 1632), 'dolo.numeric.solver.solver', 'solver', (['josephy', 'x0'], {'method': '"""lmmcp"""'}), "(josephy, x0, method='lmmcp')\n", (1603, 1632), False, 'from dolo.numeric.solver import solver\n'), ((1697, 1739), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sol_fsolve', 'sol_lmmcp'], {}), '(sol_fsolve, sol_lmmcp)\n', (1716, 1739), False, 'from numpy.testing import assert_almost_equal, assert_equal\n'), ((1895, 1928), 'numpy.array', 'np.array', (['[1.25, 0.01, 0.01, 0.5]'], {}), '([1.25, 0.01, 0.01, 0.5])\n', (1903, 1928), True, 'import numpy as np\n'), ((1943, 1973), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (1951, 1973), True, 'import numpy as np\n'), ((1991, 2021), 'numpy.array', 'np.array', (['[inf, inf, inf, inf]'], {}), '([inf, inf, inf, inf])\n', (1999, 2021), True, 'import numpy as np\n'), ((2040, 2076), 'dolo.numeric.ncpsolve.ncpsolve', 'ncpsolve', (['fun', 'lb', 'ub', 'x0'], {'tol': '(1e-15)'}), '(fun, lb, ub, x0, tol=1e-15)\n', (2048, 2076), False, 'from dolo.numeric.ncpsolve import ncpsolve, smooth\n'), ((2093, 2141), 'numpy.array', 'np.array', (['[1.22474487, 0.0, 3.60543164e-17, 0.5]'], {}), '([1.22474487, 0.0, 3.60543164e-17, 0.5])\n', (2101, 2141), True, 'import numpy as np\n'), ((2248, 2278), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['sol', 'resp'], {}), '(sol, resp)\n', (2267, 2278), False, 'from numpy.testing import assert_almost_equal, assert_equal\n'), ((2346, 2362), 'numpy.zeros', 'np.zeros', (['(d, N)'], {}), '((d, N))\n', (2354, 2362), True, 'import numpy as np\n'), ((2449, 2474), 'numpy.column_stack', 'np.column_stack', (['([x0] * N)'], {}), '([x0] * N)\n', (2464, 2474), True, 'import numpy as np\n'), ((2488, 2513), 'numpy.column_stack', 'np.column_stack', (['([lb] * N)'], {}), '([lb] * N)\n', (2503, 2513), True, 'import numpy as np\n'), ((2527, 2552), 'numpy.column_stack', 'np.column_stack', (['([ub] * N)'], {}), '([ub] * N)\n', (2542, 2552), True, 'import numpy as np\n'), ((3583, 3673), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0'], {'lb': 's_lb', 'ub': 's_ub', 'method': '"""ncpsolve"""', 'serial_problem': '(True)'}), "(serial_fun_val, s_x0, lb=s_lb, ub=s_ub, method='ncpsolve',\n serial_problem=True)\n", (3589, 3673), False, 'from dolo.numeric.solver import solver\n'), ((3782, 3894), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0', 's_lb', 's_ub'], {'jac': 'serial_fun_serial_jac', 'method': '"""ncpsolve"""', 'serial_problem': '(True)'}), "(serial_fun_val, s_x0, s_lb, s_ub, jac=serial_fun_serial_jac, method=\n 'ncpsolve', serial_problem=True)\n", (3788, 3894), False, 'from dolo.numeric.solver import solver\n'), ((3976, 4062), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0', 's_lb', 's_ub'], {'method': '"""ncpsolve"""', 'serial_problem': '(False)'}), "(serial_fun_val, s_x0, s_lb, s_ub, method='ncpsolve', serial_problem=\n False)\n", (3982, 4062), False, 'from dolo.numeric.solver import solver\n'), ((4156, 4267), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0', 's_lb', 's_ub'], {'jac': 'serial_fun_full_jac', 'method': '"""ncpsolve"""', 'serial_problem': '(False)'}), "(serial_fun_val, s_x0, s_lb, s_ub, jac=serial_fun_full_jac, method=\n 'ncpsolve', serial_problem=False)\n", (4162, 4267), False, 'from dolo.numeric.solver import solver\n'), ((4369, 4435), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0'], {'method': '"""newton"""', 'serial_problem': '(True)'}), "(serial_fun_val, s_x0, method='newton', serial_problem=True)\n", (4375, 4435), False, 'from dolo.numeric.solver import solver\n'), ((4524, 4591), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0'], {'method': '"""fsolve"""', 'serial_problem': '(False)'}), "(serial_fun_val, s_x0, method='fsolve', serial_problem=False)\n", (4530, 4591), False, 'from dolo.numeric.solver import solver\n'), ((4686, 4782), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0'], {'jac': 'serial_fun_full_jac', 'method': '"""fsolve"""', 'serial_problem': '(False)'}), "(serial_fun_val, s_x0, jac=serial_fun_full_jac, method='fsolve',\n serial_problem=False)\n", (4692, 4782), False, 'from dolo.numeric.solver import solver\n'), ((4855, 4950), 'dolo.numeric.solver.solver', 'solver', (['serial_fun_val', 's_x0'], {'jac': 'serial_fun_full_jac', 'method': '"""lmmcp"""', 'serial_problem': '(False)'}), "(serial_fun_val, s_x0, jac=serial_fun_full_jac, method='lmmcp',\n serial_problem=False)\n", (4861, 4950), False, 'from dolo.numeric.solver import solver\n'), ((2614, 2630), 'numpy.zeros', 'np.zeros', (['(d, N)'], {}), '((d, N))\n', (2622, 2630), True, 'import numpy as np\n'), ((2688, 2707), 'numpy.zeros', 'np.zeros', (['(d, d, N)'], {}), '((d, d, N))\n', (2696, 2707), True, 'import numpy as np\n'), ((2764, 2786), 'numpy.zeros', 'np.zeros', (['(d, N, d, N)'], {}), '((d, N, d, N))\n', (2772, 2786), True, 'import numpy as np\n')] |
from unittest.mock import MagicMock
import google.protobuf.text_format as text_format
import numpy as np
from banditpylib.bandits import CvarReward
from banditpylib.data_pb2 import Actions, Context
from .ts import ThompsonSampling
class TestThompsonSampling:
"""Test thompson sampling policy"""
def test_simple_run(self):
revenues = np.array([0, 0.7, 0.8, 0.9, 1.0])
horizon = 100
reward = CvarReward(0.7)
learner = ThompsonSampling(revenues=revenues,
horizon=horizon,
reward=reward)
# Test warm start
learner.reset()
assert learner.actions(Context()).SerializeToString() == text_format.Parse(
"""
arm_pulls {
arm {
set {
id: 1
}
}
times: 1
}
""", Actions()).SerializeToString()
learner.reset()
# pylint: disable=protected-access
learner._ThompsonSampling__within_warm_start = MagicMock(
return_value=False)
mock_preference_params = np.array([1, 1, 1, 1, 1])
learner._ThompsonSampling__correlated_sampling = MagicMock(
return_value=mock_preference_params)
assert learner.actions(Context()).SerializeToString() == text_format.Parse(
"""
arm_pulls {
arm {
set {
id: 1
id: 2
id: 3
id: 4
}
}
times: 1
}
""", Actions()).SerializeToString()
| [
"banditpylib.bandits.CvarReward",
"unittest.mock.MagicMock",
"numpy.array",
"banditpylib.data_pb2.Context",
"banditpylib.data_pb2.Actions"
] | [((346, 379), 'numpy.array', 'np.array', (['[0, 0.7, 0.8, 0.9, 1.0]'], {}), '([0, 0.7, 0.8, 0.9, 1.0])\n', (354, 379), True, 'import numpy as np\n'), ((411, 426), 'banditpylib.bandits.CvarReward', 'CvarReward', (['(0.7)'], {}), '(0.7)\n', (421, 426), False, 'from banditpylib.bandits import CvarReward\n'), ((972, 1001), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (981, 1001), False, 'from unittest.mock import MagicMock\n'), ((1040, 1065), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1])\n', (1048, 1065), True, 'import numpy as np\n'), ((1119, 1165), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'mock_preference_params'}), '(return_value=mock_preference_params)\n', (1128, 1165), False, 'from unittest.mock import MagicMock\n'), ((641, 650), 'banditpylib.data_pb2.Context', 'Context', ([], {}), '()\n', (648, 650), False, 'from banditpylib.data_pb2 import Actions, Context\n'), ((830, 839), 'banditpylib.data_pb2.Actions', 'Actions', ([], {}), '()\n', (837, 839), False, 'from banditpylib.data_pb2 import Actions, Context\n'), ((1202, 1211), 'banditpylib.data_pb2.Context', 'Context', ([], {}), '()\n', (1209, 1211), False, 'from banditpylib.data_pb2 import Actions, Context\n'), ((1445, 1454), 'banditpylib.data_pb2.Actions', 'Actions', ([], {}), '()\n', (1452, 1454), False, 'from banditpylib.data_pb2 import Actions, Context\n')] |
from maru import pymorphy
from maru.lemmatizer.abstract import ILemmatizer
from maru.tag import Tag
from maru.types import Word
class PymorphyLemmatizer(ILemmatizer):
def lemmatize(self, word: Word, tag: Tag) -> Word:
best_parse = max(
pymorphy.analyze(word),
key=lambda parse: (
tag.pos is pymorphy.get_part_of_speech(parse),
tag.case is pymorphy.get_case(parse),
tag.gender is pymorphy.get_gender(parse),
),
)
return best_parse.normal_form
| [
"maru.pymorphy.get_gender",
"maru.pymorphy.analyze",
"maru.pymorphy.get_part_of_speech",
"maru.pymorphy.get_case"
] | [((262, 284), 'maru.pymorphy.analyze', 'pymorphy.analyze', (['word'], {}), '(word)\n', (278, 284), False, 'from maru import pymorphy\n'), ((345, 379), 'maru.pymorphy.get_part_of_speech', 'pymorphy.get_part_of_speech', (['parse'], {}), '(parse)\n', (372, 379), False, 'from maru import pymorphy\n'), ((409, 433), 'maru.pymorphy.get_case', 'pymorphy.get_case', (['parse'], {}), '(parse)\n', (426, 433), False, 'from maru import pymorphy\n'), ((465, 491), 'maru.pymorphy.get_gender', 'pymorphy.get_gender', (['parse'], {}), '(parse)\n', (484, 491), False, 'from maru import pymorphy\n')] |
import os
import xcffib
from xcffib.testing import XvfbTest
from xcffib.xproto import Atom, ConfigWindow, EventMask, GetPropertyType
conn = xcffib.connect(os.environ['DISPLAY'])
xproto = xcffib.xproto.xprotoExtension(conn)
def arrange(layout, windowids):
for lay, winid in zip(layout, windowids):
xproto.ConfigureWindow(winid, ConfigWindow.X | ConfigWindow.Y | ConfigWindow.Width | ConfigWindow.Height, lay)
conn.flush()
def move(winid, x, y, sync=True):
xproto.ConfigureWindow(winid, ConfigWindow.X | ConfigWindow.Y, [x, y])
if sync:
conn.flush()
| [
"xcffib.xproto.xprotoExtension",
"xcffib.connect"
] | [((142, 179), 'xcffib.connect', 'xcffib.connect', (["os.environ['DISPLAY']"], {}), "(os.environ['DISPLAY'])\n", (156, 179), False, 'import xcffib\n'), ((189, 224), 'xcffib.xproto.xprotoExtension', 'xcffib.xproto.xprotoExtension', (['conn'], {}), '(conn)\n', (218, 224), False, 'import xcffib\n')] |
__author__ = 'krishnasagar'
from django import forms
# Refer for forms.MultipleChoiceField always, its helpful -
# http://www.programcreek.com/python/example/58199/django.forms.MultipleChoiceField
class TrackForm(forms.Form):
def __init__(self, *args, **kwargs):
if 'choices' in kwargs:
choices = kwargs.pop('choices')
else:
choices = None
super(TrackForm, self).__init__(*args, **kwargs)
self.fields['tname'] = forms.CharField(label='Track Name', max_length=100,
widget=forms.TextInput(
attrs={'title': 'Track name upto 100 characters'}))
self.fields['rating'] = forms.DecimalField(label='Rating', max_value=10.0,
min_value=0.0, decimal_places=1, max_digits=2,
required=False) # .widget_attrs({'title':'Rating from 0 to 10'})
if choices is not None:
self.fields['gname'] = forms.MultipleChoiceField(label="Genre Names", widget=forms.SelectMultiple(
attrs={'title': 'Multiple Select using RCtrl+Mouse Left Key'}), required=False, choices=choices)
else:
self.fields['gname'] = forms.MultipleChoiceField(label="Genre Names", widget=forms.SelectMultiple(
attrs={'title': 'Multiple Select using RCtrl+Mouse Left Key'}), required=False)
'''def clean(self):
cleaned_data = super(TrackForm, self).clean()
if cleaned_data.get('tname') == None:
self.add_error('tname','Music Track Name required!!!!')
else:
return cleaned_data'''
class GenreForm(forms.Form):
gname = forms.CharField(label='Genre Name', max_length=100,
widget=forms.TextInput(
attrs={'title': 'Genre name upto 100 characters', 'placeholder': 'Required'})) | [
"django.forms.TextInput",
"django.forms.SelectMultiple",
"django.forms.DecimalField"
] | [((734, 851), 'django.forms.DecimalField', 'forms.DecimalField', ([], {'label': '"""Rating"""', 'max_value': '(10.0)', 'min_value': '(0.0)', 'decimal_places': '(1)', 'max_digits': '(2)', 'required': '(False)'}), "(label='Rating', max_value=10.0, min_value=0.0,\n decimal_places=1, max_digits=2, required=False)\n", (752, 851), False, 'from django import forms\n'), ((1848, 1945), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'title': 'Genre name upto 100 characters', 'placeholder': 'Required'}"}), "(attrs={'title': 'Genre name upto 100 characters',\n 'placeholder': 'Required'})\n", (1863, 1945), False, 'from django import forms\n'), ((582, 648), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'title': 'Track name upto 100 characters'}"}), "(attrs={'title': 'Track name upto 100 characters'})\n", (597, 648), False, 'from django import forms\n'), ((1121, 1208), 'django.forms.SelectMultiple', 'forms.SelectMultiple', ([], {'attrs': "{'title': 'Multiple Select using RCtrl+Mouse Left Key'}"}), "(attrs={'title':\n 'Multiple Select using RCtrl+Mouse Left Key'})\n", (1141, 1208), False, 'from django import forms\n'), ((1359, 1446), 'django.forms.SelectMultiple', 'forms.SelectMultiple', ([], {'attrs': "{'title': 'Multiple Select using RCtrl+Mouse Left Key'}"}), "(attrs={'title':\n 'Multiple Select using RCtrl+Mouse Left Key'})\n", (1379, 1446), False, 'from django import forms\n')] |
import numpy
from xoppy_dabax_util import bragg_calc2
from run_diff_pat import run_diff_pat
from srxraylib.plot.gol import plot
if __name__ == "__main__":
descriptor = 'YB66'
SCANFROM = 0 # in microradiants
SCANTO = 100 # in microradiants
MILLER_INDEX_H = 4
MILLER_INDEX_K = 0
MILLER_INDEX_L = 0
TEMPER = 1.0
ENERGY = 8040.0
SCANPOINTS = 200
print("Using crystal descriptor: ",descriptor)
bragg_dictionary = bragg_calc2(descriptor=descriptor,
hh=MILLER_INDEX_H,kk=MILLER_INDEX_K,ll=MILLER_INDEX_L,
temper=TEMPER,
emin=ENERGY-100.0,emax=ENERGY+100.0,
estep=(SCANTO-SCANFROM)/SCANPOINTS,fileout="xcrystal.bra")
run_diff_pat(
MOSAIC = 0,
GEOMETRY = 0,
SCAN = 2,
UNIT = 1,
SCANFROM = SCANFROM,
SCANTO = SCANTO,
SCANPOINTS = SCANPOINTS,
ENERGY = ENERGY,
ASYMMETRY_ANGLE = 0.0,
THICKNESS = 0.7,
MOSAIC_FWHM = 0.1,
RSAG = 125.0,
RMER = 1290.0,
ANISOTROPY = 0,
POISSON = 0.22,
CUT = "2 -1 -1 ; 1 1 1 ; 0 0 0",
FILECOMPLIANCE = "mycompliance.dat")
a = numpy.loadtxt("diff_pat.dat",skiprows=5)
#
# plot
#
plot(a[:, 0], a[:, -1])
| [
"xoppy_dabax_util.bragg_calc2",
"numpy.loadtxt",
"run_diff_pat.run_diff_pat",
"srxraylib.plot.gol.plot"
] | [((457, 679), 'xoppy_dabax_util.bragg_calc2', 'bragg_calc2', ([], {'descriptor': 'descriptor', 'hh': 'MILLER_INDEX_H', 'kk': 'MILLER_INDEX_K', 'll': 'MILLER_INDEX_L', 'temper': 'TEMPER', 'emin': '(ENERGY - 100.0)', 'emax': '(ENERGY + 100.0)', 'estep': '((SCANTO - SCANFROM) / SCANPOINTS)', 'fileout': '"""xcrystal.bra"""'}), "(descriptor=descriptor, hh=MILLER_INDEX_H, kk=MILLER_INDEX_K, ll\n =MILLER_INDEX_L, temper=TEMPER, emin=ENERGY - 100.0, emax=ENERGY + \n 100.0, estep=(SCANTO - SCANFROM) / SCANPOINTS, fileout='xcrystal.bra')\n", (468, 679), False, 'from xoppy_dabax_util import bragg_calc2\n'), ((840, 1151), 'run_diff_pat.run_diff_pat', 'run_diff_pat', ([], {'MOSAIC': '(0)', 'GEOMETRY': '(0)', 'SCAN': '(2)', 'UNIT': '(1)', 'SCANFROM': 'SCANFROM', 'SCANTO': 'SCANTO', 'SCANPOINTS': 'SCANPOINTS', 'ENERGY': 'ENERGY', 'ASYMMETRY_ANGLE': '(0.0)', 'THICKNESS': '(0.7)', 'MOSAIC_FWHM': '(0.1)', 'RSAG': '(125.0)', 'RMER': '(1290.0)', 'ANISOTROPY': '(0)', 'POISSON': '(0.22)', 'CUT': '"""2 -1 -1 ; 1 1 1 ; 0 0 0"""', 'FILECOMPLIANCE': '"""mycompliance.dat"""'}), "(MOSAIC=0, GEOMETRY=0, SCAN=2, UNIT=1, SCANFROM=SCANFROM,\n SCANTO=SCANTO, SCANPOINTS=SCANPOINTS, ENERGY=ENERGY, ASYMMETRY_ANGLE=\n 0.0, THICKNESS=0.7, MOSAIC_FWHM=0.1, RSAG=125.0, RMER=1290.0,\n ANISOTROPY=0, POISSON=0.22, CUT='2 -1 -1 ; 1 1 1 ; 0 0 0',\n FILECOMPLIANCE='mycompliance.dat')\n", (852, 1151), False, 'from run_diff_pat import run_diff_pat\n'), ((1315, 1356), 'numpy.loadtxt', 'numpy.loadtxt', (['"""diff_pat.dat"""'], {'skiprows': '(5)'}), "('diff_pat.dat', skiprows=5)\n", (1328, 1356), False, 'import numpy\n'), ((1384, 1407), 'srxraylib.plot.gol.plot', 'plot', (['a[:, 0]', 'a[:, -1]'], {}), '(a[:, 0], a[:, -1])\n', (1388, 1407), False, 'from srxraylib.plot.gol import plot\n')] |
import os
import datetime
import math
import traceback
from typing import List
import requests
from loguru import logger
from lxml import etree
from siphon.catalog import TDSCatalog
from dask.utils import memory_repr
import numpy as np
from dateutil import parser
from ooi_harvester.settings import harvest_settings
def estimate_size_and_time(raw):
m = ""
if "requestUUID" in raw:
est_size = raw["sizeCalculation"] / 1024 ** 2
size_txt = "MB"
if (est_size / 1024) >= 1.0:
est_size = est_size / 1024
size_txt = "GB"
est_time = raw["timeCalculation"]
time_txt = "Seconds"
if (est_time / 60) >= 1.0 and (est_time / 60) < 60.0:
est_time = math.floor(est_time / 60)
time_txt = "Minutes"
if est_time == 1:
time_txt = "Minute"
elif (est_time / 60) >= 60.0:
est_time = math.floor(est_time / 60 ** 2)
time_txt = "Hours"
if est_time == 1:
time_txt = "Hour"
m = f"""
Estimated File size: {est_size:.4} {size_txt}
Estimated Time: {est_time} {time_txt}
"""
elif "message" in raw:
m = f"""
No estimate calculated.
{raw['message']['status']}
"""
return m
def parse_uframe_response(resp):
if "allURLs" in resp:
return {
"request_id": resp["requestUUID"],
"thredds_catalog": resp["allURLs"][0],
"download_catalog": resp["allURLs"][1],
"status_url": resp["allURLs"][1] + "/status.txt",
"data_size": resp["sizeCalculation"],
"estimated_time": resp["timeCalculation"],
"units": {
"data_size": "bytes",
"estimated_time": "seconds",
"request_dt": "UTC",
},
"request_dt": datetime.datetime.utcnow().isoformat(),
}
logger.warning(resp)
return None
def param_change(name):
"""
Method to accomodate for param change.
https://oceanobservatories.org/renaming-data-stream-parameters/
"""
if name == 'pressure_depth':
return 'pressure'
else:
return name
def parse_param_dict(param_dict):
unit = None
if "unit" in param_dict:
if isinstance(param_dict["unit"], dict):
if "value" in param_dict["unit"]:
unit = param_dict["unit"]["value"]
product_type = None
if "data_product_type" in param_dict:
if isinstance(param_dict["data_product_type"], dict):
if "value" in param_dict["data_product_type"]:
product_type = param_dict["data_product_type"]["value"]
return {
"pid": param_dict["id"],
"reference_designator": param_change(param_dict["name"]),
"parameter_name": param_dict["display_name"],
"netcdf_name": param_dict["netcdf_name"],
"standard_name": param_dict["standard_name"],
"description": param_dict["description"],
"unit": unit,
"data_level": param_dict['data_level'],
"data_product_type": product_type,
"data_product_identifier": param_dict["data_product_identifier"],
"last_updated": datetime.datetime.utcnow().isoformat(),
}
def parse_global_range_dataframe(global_ranges):
"""Cleans up the global ranges dataframe"""
global_df = global_ranges[global_ranges.columns[:-3]]
global_df.columns = [
"reference_designator",
"parameter_id_r",
"parameter_id_t",
"global_range_min",
"global_range_max",
"data_level",
"units",
]
return global_df
def get_bytes(value, unit):
bytes_map = {
'bytes': 1,
'Kbytes': 1024 ** 1,
'Mbytes': 1024 ** 2,
'Gbytes': 1024 ** 3,
}
return value * bytes_map[unit]
def parse_dataset_element(d, namespace):
dataset_dict = {}
for i in d.getiterator():
clean_tag = i.tag.replace('{' + namespace + '}', '')
if clean_tag == 'dataset':
dataset_dict = dict(**i.attrib)
if clean_tag == 'dataSize':
dataset_dict = dict(
data_size=float(i.text), **i.attrib, **dataset_dict
)
dataset_dict = dict(
size_bytes=get_bytes(
dataset_dict['data_size'], dataset_dict['units']
),
**dataset_dict,
)
if clean_tag == 'date':
dataset_dict = dict(date_modified=i.text, **dataset_dict)
return dataset_dict
def parse_response_thredds(response):
stream_name = response['stream']['table_name']
catalog = TDSCatalog(
response['result']['thredds_catalog'].replace('.html', '.xml')
)
catalog_dict = {
'stream_name': stream_name,
'catalog_url': catalog.catalog_url,
'base_tds_url': catalog.base_tds_url,
'async_url': response['result']['download_catalog'],
}
req = requests.get(catalog.catalog_url)
catalog_root = etree.fromstring(req.content)
namespaces = {}
for k, v in catalog_root.nsmap.items():
if k is None:
namespaces['cat'] = v
else:
namespaces[k] = v
dataset_elements = catalog_root.xpath(
'/cat:catalog/cat:dataset/cat:dataset', namespaces=namespaces
)
datasets = [
parse_dataset_element(i, namespaces['cat']) for i in dataset_elements
]
catalog_dict['datasets'] = datasets
return catalog_dict
def filter_and_parse_datasets(cat):
import re
stream_cat = cat.copy()
name = stream_cat['stream_name']
provenance_files = []
filtered_datasets = []
for d in stream_cat['datasets']:
m = re.search(
r'(deployment(\d{4})_(%s)_(\d{4}\d{2}\d{2}T\d+.\d+)-(\d{4}\d{2}\d{2}T\d+.\d+).nc)' # noqa
% (name),
str(d['name']),
)
prov = re.search(
r'(deployment(\d{4})_(%s)_aggregate_provenance.json)' % (name),
str(d['name']),
)
if m:
_, dep_num, _, start, end = m.groups()
dataset = dict(
deployment=int(dep_num), start_ts=start, end_ts=end, **d
)
filtered_datasets.append(dataset)
elif prov:
_, dep_num, _ = prov.groups()
provenance = dict(deployment=int(dep_num), **d)
provenance_files.append(provenance)
total_bytes = np.sum([d['size_bytes'] for d in filtered_datasets])
stream_cat['datasets'] = filtered_datasets
stream_cat['provenance'] = provenance_files
stream_cat['total_data_size'] = memory_repr(total_bytes)
stream_cat['total_data_bytes'] = total_bytes
return stream_cat
def filter_datasets_by_time(
datasets: List[dict], start_dt: np.datetime64, end_dt: np.datetime64
) -> List[dict]:
"""
Filters datasets collection based on the given start and end datetime.
Each dataset dictionary in the collection MUST have
`start_ts` and `end_ts`key in it.
Parameters
----------
datasets : list
The datasets collection to be filtered.
start_dt : np.datetime64
The start datetime desired.
end_dt : np.datetime64
The end datetime desired.
Returns
-------
list
The filtered datasets collection
"""
filtered_datasets = []
for d in datasets:
start_d = np.datetime64(parser.parse(d['start_ts']))
end_d = np.datetime64(parser.parse(d['end_ts']))
if start_d >= start_dt.astype(
start_d.dtype
) and end_d <= end_dt.astype(start_d.dtype):
filtered_datasets.append(d)
return filtered_datasets
def setup_etl(stream, source='ooinet', target_bucket='s3://ooi-data'):
name = stream['stream_name']
harvest_location = os.path.expanduser('~/.ooi-harvester')
# Setup Local temp folder for netcdf
temp_fold = os.path.join(harvest_location, name)
if not os.path.exists(os.path.dirname(temp_fold)):
os.mkdir(os.path.dirname(temp_fold))
if not os.path.exists(temp_fold):
os.mkdir(temp_fold)
# Setup S3 Bucket
temp_s3_fold = f"s3://temp-ooi-data/{name}.zarr"
final_s3_fold = f"{target_bucket}/{name}"
if source == 'ooinet':
retrieved_dt = stream['result']['request_dt']
else:
retrieved_dt = stream['retrieved_dt']
del stream['retrieved_dt']
return dict(
temp_fold=temp_fold,
temp_bucket=temp_s3_fold,
final_bucket=final_s3_fold,
retrieved_dt=retrieved_dt,
**stream,
)
def seconds_to_date(num):
start_dt = datetime.datetime(1900, 1, 1)
return start_dt + datetime.timedelta(seconds=num)
def get_storage_options(path):
if path.startswith("s3://"):
return harvest_settings.storage_options.aws.dict()
def get_items(keys, orig_dict):
new_dict = {}
for k, v in orig_dict.items():
if k in keys:
new_dict[k] = v
return new_dict
def rename_item(old_key, new_key, orig_dict):
new_dict = orig_dict.copy()
if old_key in new_dict:
new_dict.update({new_key: new_dict[old_key]})
del new_dict[old_key]
return new_dict
def parse_exception(exception):
exc_dict = {
'type': type(exception).__name__,
'value': str(exception),
'traceback': "".join(
traceback.format_exception(
type(exception), exception, exception.__traceback__
)
),
}
return exc_dict
| [
"datetime.datetime",
"os.path.exists",
"dask.utils.memory_repr",
"dateutil.parser.parse",
"math.floor",
"datetime.datetime.utcnow",
"loguru.logger.warning",
"os.path.join",
"requests.get",
"numpy.sum",
"os.path.dirname",
"os.mkdir",
"lxml.etree.fromstring",
"ooi_harvester.settings.harvest_... | [((1942, 1962), 'loguru.logger.warning', 'logger.warning', (['resp'], {}), '(resp)\n', (1956, 1962), False, 'from loguru import logger\n'), ((5007, 5040), 'requests.get', 'requests.get', (['catalog.catalog_url'], {}), '(catalog.catalog_url)\n', (5019, 5040), False, 'import requests\n'), ((5060, 5089), 'lxml.etree.fromstring', 'etree.fromstring', (['req.content'], {}), '(req.content)\n', (5076, 5089), False, 'from lxml import etree\n'), ((6488, 6540), 'numpy.sum', 'np.sum', (["[d['size_bytes'] for d in filtered_datasets]"], {}), "([d['size_bytes'] for d in filtered_datasets])\n", (6494, 6540), True, 'import numpy as np\n'), ((6672, 6696), 'dask.utils.memory_repr', 'memory_repr', (['total_bytes'], {}), '(total_bytes)\n', (6683, 6696), False, 'from dask.utils import memory_repr\n'), ((7861, 7899), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.ooi-harvester"""'], {}), "('~/.ooi-harvester')\n", (7879, 7899), False, 'import os\n'), ((7958, 7994), 'os.path.join', 'os.path.join', (['harvest_location', 'name'], {}), '(harvest_location, name)\n', (7970, 7994), False, 'import os\n'), ((8675, 8704), 'datetime.datetime', 'datetime.datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (8692, 8704), False, 'import datetime\n'), ((8107, 8132), 'os.path.exists', 'os.path.exists', (['temp_fold'], {}), '(temp_fold)\n', (8121, 8132), False, 'import os\n'), ((8142, 8161), 'os.mkdir', 'os.mkdir', (['temp_fold'], {}), '(temp_fold)\n', (8150, 8161), False, 'import os\n'), ((8727, 8758), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'num'}), '(seconds=num)\n', (8745, 8758), False, 'import datetime\n'), ((8840, 8883), 'ooi_harvester.settings.harvest_settings.storage_options.aws.dict', 'harvest_settings.storage_options.aws.dict', ([], {}), '()\n', (8881, 8883), False, 'from ooi_harvester.settings import harvest_settings\n'), ((732, 757), 'math.floor', 'math.floor', (['(est_time / 60)'], {}), '(est_time / 60)\n', (742, 757), False, 'import math\n'), ((7458, 7485), 'dateutil.parser.parse', 'parser.parse', (["d['start_ts']"], {}), "(d['start_ts'])\n", (7470, 7485), False, 'from dateutil import parser\n'), ((7517, 7542), 'dateutil.parser.parse', 'parser.parse', (["d['end_ts']"], {}), "(d['end_ts'])\n", (7529, 7542), False, 'from dateutil import parser\n'), ((8021, 8047), 'os.path.dirname', 'os.path.dirname', (['temp_fold'], {}), '(temp_fold)\n', (8036, 8047), False, 'import os\n'), ((8067, 8093), 'os.path.dirname', 'os.path.dirname', (['temp_fold'], {}), '(temp_fold)\n', (8082, 8093), False, 'import os\n'), ((918, 948), 'math.floor', 'math.floor', (['(est_time / 60 ** 2)'], {}), '(est_time / 60 ** 2)\n', (928, 948), False, 'import math\n'), ((3239, 3265), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3263, 3265), False, 'import datetime\n'), ((1888, 1914), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1912, 1914), False, 'import datetime\n')] |
"""
propertylist
"""
from __future__ import absolute_import, division, print_function
from collections import namedtuple
import logging
from PySide.QtCore import Qt
from mceditlib import nbt
from PySide import QtGui, QtCore
from mcedit2.util.load_ui import registerCustomWidget
log = logging.getLogger(__name__)
class PropertyListItemDelegate(QtGui.QStyledItemDelegate):
def __init__(self, *args, **kwargs):
super(PropertyListItemDelegate, self).__init__(*args, **kwargs)
def createEditor(self, parent, option, index):
model = index.model()
tagName, displayName, valueType, min, max = model.properties[index.row()]
if valueType is int:
valueWidget = QtGui.QSpinBox()
valueWidget.setMinimum(min)
valueWidget.setMaximum(max)
elif valueType is float:
valueWidget = QtGui.QDoubleSpinBox()
valueWidget.setMinimum(min)
valueWidget.setMaximum(max)
elif valueType is bool:
valueWidget = QtGui.QCheckBox()
elif isinstance(valueType, list): # Choice list
valueWidget = QtGui.QComboBox()
for value, name in valueType:
valueWidget.addItem(name, value)
elif valueType is unicode:
valueWidget = QtGui.QPlainTextEdit()
else:
raise TypeError("Can't create attribute widgets for %s yet" % valueType)
valueWidget.setParent(parent)
return valueWidget
def setEditorData(self, editor, index):
model = index.model()
rootTag = model.rootTag
tagName, displayName, valueType, min, max = model.properties[index.row()]
if valueType is int:
editor.setValue(rootTag[tagName].value)
elif valueType is float:
editor.setValue(rootTag[tagName].value)
elif valueType is bool:
editor.setChecked(rootTag[tagName].value)
elif isinstance(valueType, list): # Choice list
currentValue = rootTag[tagName].value
try:
currentIndex = [v for v, n in valueType].index(currentValue)
editor.setCurrentIndex(currentIndex)
except ValueError:
editor.addItem("Unknown value %s" % currentValue, currentValue)
elif valueType is unicode:
editor.setPlainText(rootTag[tagName].value)
else:
raise TypeError("Unknown valueType in setEditorData (check this in addNBTProperty, dummy)")
def setModelData(self, editor, model, index):
tagName, displayName, valueType, min, max = model.properties[index.row()]
rootTag = model.rootTag
if valueType is int:
value = int(editor.value())
elif valueType is float:
value = float(editor.value())
elif valueType is bool:
value = editor.isChecked()
elif isinstance(valueType, list): # Choice list
value = valueType[editor.currentIndex()][0]
elif valueType is unicode:
value = editor.plainText()
else:
raise TypeError("Unknown valueType in setModelData (check this in addNBTProperty, dummy)")
model.setData(index, value)
class PropertyListEntry(namedtuple('PropertyListEntry', 'tagName displayName valueType min max')):
pass
class PropertyListModel(QtCore.QAbstractItemModel):
propertyChanged = QtCore.Signal(unicode, object)
def __init__(self, rootTag):
super(PropertyListModel, self).__init__()
self.rootTag = rootTag
self.properties = []
def addNBTProperty(self, tagName, valueType=None, min=None, max=None, displayName=None):
if displayName is None:
displayName = tagName
if valueType is None:
valueType = int
if tagName not in self.rootTag:
return
tag = self.rootTag[tagName]
if tag.tagID == nbt.ID_BYTE:
tagMin = -(1 << 7)
tagMax = (1 << 7) - 1
elif tag.tagID == nbt.ID_SHORT:
tagMin = -(1 << 15)
tagMax = (1 << 15) - 1
elif tag.tagID == nbt.ID_INT:
tagMin = -(1 << 31)
tagMax = (1 << 31) - 1
else: # tag.tagID == nbt.ID_LONG, ID_FLOAT, ID_DOUBLE
# tagMin = -(1 << 63) # xxxx 64-bit spinbox
# tagMax = (1 << 63) - 1
tagMin = -(1 << 31)
tagMax = (1 << 31) - 1
if min is None:
min = tagMin
if max is None:
max = tagMax
self.properties.append(PropertyListEntry(tagName, displayName, valueType, min, max))
def columnCount(self, index):
return 2
def data(self, index, role=Qt.DisplayRole):
if not index.isValid():
return None
entry = self.properties[index.row()]
if role in (Qt.DisplayRole, Qt.EditRole):
if index.column() == 0:
return entry.displayName
else:
value = self.rootTag[entry.tagName].value
if isinstance(entry.valueType, (list, tuple)):
try:
return entry.valueType[value][1]
except IndexError:
return "Unknown value %s" % value
else:
return value
# if role == Qt.CheckStateRole:
# if entry.valueType is not bool:
# return -1
# value = self.rootTag[entry.tagName].value
# return bool(value)
def flags(self, index):
if not index.isValid():
return 0
flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if index.column() == 1:
flags |= Qt.ItemIsEditable
entry = self.properties[index.row()]
#if entry.valueType is bool:
# flags |= Qt.ItemIsUserCheckable
return flags
def headerData(self, section, orientation, role=Qt.DisplayRole):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return ("Name", "Value")[section]
return None
def index(self, row, column, parent=QtCore.QModelIndex()):
if parent.isValid():
return QtCore.QModelIndex()
return self.createIndex(row, column, None)
def parent(self, index):
return QtCore.QModelIndex()
def rowCount(self, parent=QtCore.QModelIndex()):
if parent.isValid():
return 0
return len(self.properties)
def setData(self, index, value, role=Qt.EditRole):
row = index.row()
entry = self.properties[row]
if self.rootTag[entry.tagName].value != value:
self.rootTag[entry.tagName].value = value
self.propertyChanged.emit(entry.tagName, value)
self.dataChanged.emit(index, index)
@registerCustomWidget
class PropertyListWidget(QtGui.QTreeView):
def __init__(self, *args, **kwargs):
super(PropertyListWidget, self).__init__(*args, **kwargs)
delegate = PropertyListItemDelegate()
self.setItemDelegate(delegate)
self.setEditTriggers(self.CurrentChanged | self.editTriggers())
| [
"logging.getLogger",
"PySide.QtCore.QModelIndex",
"PySide.QtGui.QSpinBox",
"collections.namedtuple",
"PySide.QtGui.QCheckBox",
"PySide.QtGui.QComboBox",
"PySide.QtGui.QDoubleSpinBox",
"PySide.QtGui.QPlainTextEdit",
"PySide.QtCore.Signal"
] | [((289, 316), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (306, 316), False, 'import logging\n'), ((3256, 3328), 'collections.namedtuple', 'namedtuple', (['"""PropertyListEntry"""', '"""tagName displayName valueType min max"""'], {}), "('PropertyListEntry', 'tagName displayName valueType min max')\n", (3266, 3328), False, 'from collections import namedtuple\n'), ((3415, 3445), 'PySide.QtCore.Signal', 'QtCore.Signal', (['unicode', 'object'], {}), '(unicode, object)\n', (3428, 3445), False, 'from PySide import QtGui, QtCore\n'), ((6159, 6179), 'PySide.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (6177, 6179), False, 'from PySide import QtGui, QtCore\n'), ((6348, 6368), 'PySide.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (6366, 6368), False, 'from PySide import QtGui, QtCore\n'), ((6400, 6420), 'PySide.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (6418, 6420), False, 'from PySide import QtGui, QtCore\n'), ((711, 727), 'PySide.QtGui.QSpinBox', 'QtGui.QSpinBox', ([], {}), '()\n', (725, 727), False, 'from PySide import QtGui, QtCore\n'), ((6230, 6250), 'PySide.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (6248, 6250), False, 'from PySide import QtGui, QtCore\n'), ((868, 890), 'PySide.QtGui.QDoubleSpinBox', 'QtGui.QDoubleSpinBox', ([], {}), '()\n', (888, 890), False, 'from PySide import QtGui, QtCore\n'), ((1030, 1047), 'PySide.QtGui.QCheckBox', 'QtGui.QCheckBox', ([], {}), '()\n', (1045, 1047), False, 'from PySide import QtGui, QtCore\n'), ((1132, 1149), 'PySide.QtGui.QComboBox', 'QtGui.QComboBox', ([], {}), '()\n', (1147, 1149), False, 'from PySide import QtGui, QtCore\n'), ((1303, 1325), 'PySide.QtGui.QPlainTextEdit', 'QtGui.QPlainTextEdit', ([], {}), '()\n', (1323, 1325), False, 'from PySide import QtGui, QtCore\n')] |
import pygame
class UI:
def __init__(self, surface):
# setup
self.display_surface = surface
# health
self.health_border = pygame.image.load(
'graphics/ui/Border_0.png').convert_alpha()
self.health_bar = pygame.image.load(
'graphics/ui/Health_0.png').convert_alpha()
self.health_bar_topleft = (50, 41)
self.bar_max_width = 152
self.bar_height = 3
# coins
self.coin = pygame.image.load('graphics/ui/coin.png').convert_alpha()
self.coin_rect = self.coin.get_rect(topleft=(50, 61))
self.font = pygame.font.Font(None, 30)
def show_health(self, current, full):
self.display_surface.blit(self.health_bar, (25, 10))
self.display_surface.blit(self.health_border, (25, 10))
current_health_ratio = current / full
current_bar_width = self.bar_max_width * current_health_ratio
health_bar_rect = pygame.Rect(
(self.health_bar_topleft), (current_bar_width, self.bar_height))
pygame.draw.rect(self.display_surface, '#dc4949', health_bar_rect)
def show_coins(self, amount):
self.display_surface.blit(self.coin, self.coin_rect)
coin_amount_surf = self.font.render(str(amount), False, 'black')
coint_amount_rect = coin_amount_surf.get_rect(
midleft=(self.coin_rect.right + 4, self.coin_rect.centery))
self.display_surface.blit(coin_amount_surf, coint_amount_rect)
| [
"pygame.image.load",
"pygame.font.Font",
"pygame.draw.rect",
"pygame.Rect"
] | [((638, 664), 'pygame.font.Font', 'pygame.font.Font', (['None', '(30)'], {}), '(None, 30)\n', (654, 664), False, 'import pygame\n'), ((984, 1058), 'pygame.Rect', 'pygame.Rect', (['self.health_bar_topleft', '(current_bar_width, self.bar_height)'], {}), '(self.health_bar_topleft, (current_bar_width, self.bar_height))\n', (995, 1058), False, 'import pygame\n'), ((1084, 1150), 'pygame.draw.rect', 'pygame.draw.rect', (['self.display_surface', '"""#dc4949"""', 'health_bar_rect'], {}), "(self.display_surface, '#dc4949', health_bar_rect)\n", (1100, 1150), False, 'import pygame\n'), ((172, 217), 'pygame.image.load', 'pygame.image.load', (['"""graphics/ui/Border_0.png"""'], {}), "('graphics/ui/Border_0.png')\n", (189, 217), False, 'import pygame\n'), ((275, 320), 'pygame.image.load', 'pygame.image.load', (['"""graphics/ui/Health_0.png"""'], {}), "('graphics/ui/Health_0.png')\n", (292, 320), False, 'import pygame\n'), ((496, 537), 'pygame.image.load', 'pygame.image.load', (['"""graphics/ui/coin.png"""'], {}), "('graphics/ui/coin.png')\n", (513, 537), False, 'import pygame\n')] |
# Generated by Django 2.0.3 on 2018-04-18 19:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Buy', '0002_standardset'),
]
operations = [
migrations.AddField(
model_name='singlecardpurchase',
name='initial_sell_price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
preserve_default=False,
),
]
| [
"django.db.models.DecimalField"
] | [((347, 409), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'default': '(0)', 'max_digits': '(6)'}), '(decimal_places=2, default=0, max_digits=6)\n', (366, 409), False, 'from django.db import migrations, models\n')] |
__version__ = '0.6.2'
__version_info__ = tuple(map(int, __version__.split('.')))
from django.utils.translation import ugettext_lazy as _
def __activate_social_auth_monkeypatch():
from social_core.backends.base import BaseAuth
from social_core.backends.open_id import (OPENID_ID_FIELD, OpenIdAuth)
from social_core.backends.livejournal import LiveJournalOpenId
BaseAuth.REQUIRED_FIELD_NAME = None
BaseAuth.REQUIRED_FIELD_VERBOSE_NAME = None
OpenIdAuth.REQUIRED_FIELD_NAME = OPENID_ID_FIELD
OpenIdAuth.REQUIRED_FIELD_VERBOSE_NAME = _('OpenID identity')
LiveJournalOpenId.REQUIRED_FIELD_NAME = 'openid_lj_user'
LiveJournalOpenId.REQUIRED_FIELD_VERBOSE_NAME = _('LiveJournal username')
__activate_social_auth_monkeypatch()
| [
"django.utils.translation.ugettext_lazy"
] | [((563, 583), 'django.utils.translation.ugettext_lazy', '_', (['"""OpenID identity"""'], {}), "('OpenID identity')\n", (564, 583), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((698, 723), 'django.utils.translation.ugettext_lazy', '_', (['"""LiveJournal username"""'], {}), "('LiveJournal username')\n", (699, 723), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import subprocess
__all__ = ['view_env', 'create_env', 'remove_env']
def view_env():
"""Get virtual environment info."""
cmd = f"conda info -e"
s = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
s = s.decode('utf-8').strip().split('\n')[2:]
s = [i.split(' ') for i in s]
return {i[0]:i[-1] for i in s}
def create_env(name, version):
"""Create virtual environment.
Args:
name: virtual environment.
version: python version.
Return:
log info.
"""
cmd = 'conda update -n base -c defaults conda'
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
s = view_env()
if name in s:
return 'Virtual environment already exists.'
cmd = f"conda create -n {name} python={version} -y"
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
s = view_env()
if name in s:
return 'Virtual environment successfully created.'
return 'Virtual environment failed created.'
def remove_env(name):
"""Remove virtual environment.
Args:
name: virtual environment.
Return:
log info.
"""
s = view_env()
if name not in s:
return 'Virtual environment not exists.'
cmd = f'conda remove -n {name} --all'
subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]
s = view_env()
if name not in s:
return 'Virtual environment successfully removed.'
return 'Virtual environment failed removed.' | [
"subprocess.Popen"
] | [((162, 219), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '(cmd, stdout=subprocess.PIPE, shell=True)\n', (178, 219), False, 'import subprocess\n'), ((599, 656), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '(cmd, stdout=subprocess.PIPE, shell=True)\n', (615, 656), False, 'import subprocess\n'), ((824, 881), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '(cmd, stdout=subprocess.PIPE, shell=True)\n', (840, 881), False, 'import subprocess\n'), ((1326, 1383), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '(cmd, stdout=subprocess.PIPE, shell=True)\n', (1342, 1383), False, 'import subprocess\n')] |
from test.mock.mock_inversion import MockMapper
from test.mock.mock_galaxy import MockHyperGalaxy
class MockTracer(object):
def __init__(self, unblurred_image_1d, blurring_image_1d, has_light_profile, has_pixelization, has_hyper_galaxy,
has_grid_mappers=False):
self.unblurred_image_1d = unblurred_image_1d
self.blurring_image_1d = blurring_image_1d
self.has_light_profile = has_light_profile
self.has_pixelization = has_pixelization
self.has_hyper_galaxy = has_hyper_galaxy
self.has_grid_mappers = has_grid_mappers
@property
def all_planes(self):
return []
@property
def image_plane_image_1d(self):
return self.unblurred_image_1d
@property
def image_plane_blurring_image_1d(self):
return self.blurring_image_1d
@property
def image_plane_images_1d(self):
return [self.unblurred_image_1d]
@property
def image_plane_blurring_images_1d(self):
return [self.blurring_image_1d]
@property
def mappers_of_planes(self):
return [MockMapper()]
@property
def regularization_of_planes(self):
return [MockMapper()]
@property
def hyper_galaxies(self):
return [MockHyperGalaxy(), MockHyperGalaxy()] | [
"test.mock.mock_galaxy.MockHyperGalaxy",
"test.mock.mock_inversion.MockMapper"
] | [((1093, 1105), 'test.mock.mock_inversion.MockMapper', 'MockMapper', ([], {}), '()\n', (1103, 1105), False, 'from test.mock.mock_inversion import MockMapper\n'), ((1178, 1190), 'test.mock.mock_inversion.MockMapper', 'MockMapper', ([], {}), '()\n', (1188, 1190), False, 'from test.mock.mock_inversion import MockMapper\n'), ((1253, 1270), 'test.mock.mock_galaxy.MockHyperGalaxy', 'MockHyperGalaxy', ([], {}), '()\n', (1268, 1270), False, 'from test.mock.mock_galaxy import MockHyperGalaxy\n'), ((1272, 1289), 'test.mock.mock_galaxy.MockHyperGalaxy', 'MockHyperGalaxy', ([], {}), '()\n', (1287, 1289), False, 'from test.mock.mock_galaxy import MockHyperGalaxy\n')] |
"""Blog models."""
from typing import Union
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.utils import timezone
from django.utils.text import Truncator, slugify
from markdownx.models import MarkdownxField
from .dbfunctions import Unnest
from .signals import post_published
from .utils import markdown_unformatted
class PostManager(models.Manager):
"""Custom object manager for blog posts."""
def published(self) -> models.QuerySet:
"""Return published blog posts only."""
return self.get_queryset().filter(published__isnull=False)
class Post(models.Model):
"""Represents a blog post."""
objects = PostManager()
SLUG_MAX_LENGTH = 80
title = models.CharField(max_length=300)
slug = models.SlugField(max_length=SLUG_MAX_LENGTH, unique=True)
description = models.TextField(
default="", blank=True, help_text="Used for social cards and RSS."
)
content = MarkdownxField(blank=True, default="")
image_url = models.URLField(blank=True, null=True)
image_caption = models.TextField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
published = models.DateTimeField(blank=True, null=True)
class Meta: # noqa
ordering = ("-published",)
# NOTE: Django uses B-Tree indexes, enough for small datasets.
indexes = [
# `created` is used for ordering, which can be sped up by an index.
models.Index(fields=["created"]),
# `published` is filtered on a lot (to retrieve drafts)
# and does not change very often.
models.Index(fields=(["published"])),
]
def save(self, *args, **kwargs):
"""Set slug when creating a post."""
if not self.pk and not self.slug:
self.slug = slugify(self.title)[: self.SLUG_MAX_LENGTH]
return super().save(*args, **kwargs)
def __str__(self) -> str:
"""Represent by its title."""
return str(self.title)
def publish(self, request=None):
"""Publish a blog post by setting its published date."""
self.published = timezone.now()
self.save()
post_published.send(sender=Post, instance=self, request=request)
@property
def is_draft(self) -> bool:
"""Return whether the post is a draft."""
return self.published is None
@property
def preview(self) -> str:
"""Return an unformatted preview of the post contents."""
return Truncator(markdown_unformatted(self.content)).chars(200)
def _find_published(self, order_by, **kwargs):
"""Filter and get the first published item in the queryset, or None."""
if not self.published:
return None
qs = Post.objects.published().order_by(order_by).filter(**kwargs)
return qs and qs[0] or None
@property
def previous(self) -> Union["Post", None]:
"""Return the previous published post.
If the post is not published or there is no previous published post,
returns None.
"""
return self._find_published("-published", published__lt=self.published)
@property
def next(self) -> Union["Post", None]:
"""Return the next published post.
If the post is not published or there is no next published post,
returns None.
"""
return self._find_published("published", published__gt=self.published)
def get_absolute_url(self) -> str:
"""Return the absolute URL path of a blog post."""
return f"/{self.slug}/"
@classmethod
def list_absolute_url(cls) -> str:
"""Return the absolute URL path for the list of posts."""
return "/"
class TagManager(models.Manager):
"""Custom manager for tag objects."""
def with_post_counts(self, published_only: bool = False):
"""Add a `.post_count` attribute on each tag."""
if published_only:
published_filter = models.Q(posts__published__isnull=False)
else:
published_filter = None
count_aggregate = models.Count("posts", filter=published_filter)
return self.get_queryset().annotate(post_count=count_aggregate)
class Tag(models.Model):
"""Represents a group of posts related to similar content."""
objects = TagManager()
name = models.CharField(max_length=20)
posts = models.ManyToManyField(to=Post, related_name="tags")
def __str__(self) -> str:
"""Represent the tag by its name."""
return str(self.name)
| [
"django.db.models.Index",
"django.utils.text.slugify",
"django.db.models.TextField",
"django.db.models.Count",
"django.db.models.ManyToManyField",
"django.db.models.DateTimeField",
"django.utils.timezone.now",
"django.db.models.SlugField",
"django.db.models.CharField",
"django.db.models.URLField",... | [((741, 773), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (757, 773), False, 'from django.db import models\n'), ((785, 842), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': 'SLUG_MAX_LENGTH', 'unique': '(True)'}), '(max_length=SLUG_MAX_LENGTH, unique=True)\n', (801, 842), False, 'from django.db import models\n'), ((861, 950), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""', 'blank': '(True)', 'help_text': '"""Used for social cards and RSS."""'}), "(default='', blank=True, help_text=\n 'Used for social cards and RSS.')\n", (877, 950), False, 'from django.db import models\n'), ((974, 1012), 'markdownx.models.MarkdownxField', 'MarkdownxField', ([], {'blank': '(True)', 'default': '""""""'}), "(blank=True, default='')\n", (988, 1012), False, 'from markdownx.models import MarkdownxField\n'), ((1029, 1067), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1044, 1067), False, 'from django.db import models\n'), ((1088, 1127), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1104, 1127), False, 'from django.db import models\n'), ((1142, 1181), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1162, 1181), False, 'from django.db import models\n'), ((1197, 1232), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1217, 1232), False, 'from django.db import models\n'), ((1249, 1292), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1269, 1292), False, 'from django.db import models\n'), ((4420, 4451), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (4436, 4451), False, 'from django.db import models\n'), ((4464, 4516), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': 'Post', 'related_name': '"""tags"""'}), "(to=Post, related_name='tags')\n", (4486, 4516), False, 'from django.db import models\n'), ((2210, 2224), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2222, 2224), False, 'from django.utils import timezone\n'), ((4168, 4214), 'django.db.models.Count', 'models.Count', (['"""posts"""'], {'filter': 'published_filter'}), "('posts', filter=published_filter)\n", (4180, 4214), False, 'from django.db import models\n'), ((1536, 1568), 'django.db.models.Index', 'models.Index', ([], {'fields': "['created']"}), "(fields=['created'])\n", (1548, 1568), False, 'from django.db import models\n'), ((1696, 1730), 'django.db.models.Index', 'models.Index', ([], {'fields': "['published']"}), "(fields=['published'])\n", (1708, 1730), False, 'from django.db import models\n'), ((4051, 4091), 'django.db.models.Q', 'models.Q', ([], {'posts__published__isnull': '(False)'}), '(posts__published__isnull=False)\n', (4059, 4091), False, 'from django.db import models\n'), ((1893, 1912), 'django.utils.text.slugify', 'slugify', (['self.title'], {}), '(self.title)\n', (1900, 1912), False, 'from django.utils.text import Truncator, slugify\n')] |
import asyncio
import math
import networkx as nx
import ccxt.async_support as ccxt
import datetime
import logging
from .logging_utils import FormatForLogAdapter
__all__ = [
'FeesNotAvailable',
'create_exchange_graph',
'load_exchange_graph',
]
adapter = FormatForLogAdapter(logging.getLogger('peregrinearb.utils.single_exchange'))
class FeesNotAvailable(Exception):
pass
def create_exchange_graph(exchange: ccxt.Exchange):
"""
Returns a simple graph representing exchange. Each edge represents a market.
exchange.load_markets() must have been called. Will throw a ccxt error if it has not.
"""
graph = nx.Graph()
for market_name in exchange.symbols:
try:
base_currency, quote_currency = market_name.split('/')
# if ccxt returns a market in incorrect format (e.g FX_BTC_JPY on BitFlyer)
except ValueError:
continue
graph.add_edge(base_currency, quote_currency, market_name=market_name)
return graph
async def load_exchange_graph(exchange, name=True, fees=True, suppress=None, depth=False, tickers=None) -> nx.DiGraph:
"""
Returns a networkx DiGraph populated with the current ask and bid prices for each market in graph (represented by
edges). If depth, also adds an attribute 'depth' to each edge which represents the current volume of orders
available at the price represented by the 'weight' attribute of each edge.
"""
if suppress is None:
suppress = ['markets']
if name:
exchange = getattr(ccxt, exchange)()
if tickers is None:
adapter.info('Fetching tickers')
tickers = await exchange.fetch_tickers()
adapter.info('Fetched tickers')
market_count = len(tickers)
adapter.info('Loading exchange graph', marketCount=market_count)
adapter.debug('Initializing empty graph with exchange_name and timestamp attributes')
graph = nx.DiGraph()
# todo: get exchange's server time?
graph.graph['exchange_name'] = exchange.id
graph.graph['datetime'] = datetime.datetime.now(tz=datetime.timezone.utc)
adapter.debug('Initialized empty graph with exchange_name and timestamp attributes')
async def add_edges():
tasks = [_add_weighted_edge_to_graph(exchange, market_name, graph, log=True, fees=fees,
suppress=suppress, ticker=ticker, depth=depth, )
for market_name, ticker in tickers.items()]
await asyncio.wait(tasks)
if fees:
for i in range(20):
try:
adapter.info('Loading fees', iteration=i)
# must load markets to get fees
await exchange.load_markets()
except (ccxt.DDoSProtection, ccxt.RequestTimeout) as e:
if i == 19:
adapter.warning('Rate limited on final iteration, raising error', iteration=i)
raise e
adapter.warning('Rate limited when loading markets', iteration=i)
await asyncio.sleep(0.1)
except ccxt.ExchangeNotAvailable as e:
if i == 19:
adapter.warning('Cannot load markets due to ExchangeNotAvailable error, '
'graph will not be loaded.', iteration=i)
raise e
adapter.warning('Received ExchangeNotAvailable error when loading markets', iteration=i)
else:
break
adapter.info('Loaded fees', iteration=i, marketCount=market_count)
currency_count = len(exchange.currencies)
adapter.info('Adding data to graph', marketCount=market_count, currencyCount=currency_count)
await add_edges()
adapter.info('Added data to graph', marketCount=market_count, currencyCount=currency_count)
else:
adapter.info('Adding data to graph', marketCount=market_count)
await add_edges()
adapter.info('Added data to graph', marketCount=market_count)
adapter.debug('Closing connection')
await exchange.close()
adapter.debug('Closed connection')
adapter.info('Loaded exchange graph')
return graph
async def _add_weighted_edge_to_graph(exchange: ccxt.Exchange, market_name: str, graph: nx.DiGraph, log=True,
fees=False, suppress=None, ticker=None, depth=False, ):
"""
todo: add global variable to bid_volume/ ask_volume to see if all tickers (for a given exchange) have value == None
Returns a Networkx DiGraph populated with the current ask and bid prices for each market in graph (represented by
edges).
:param exchange: A ccxt Exchange object
:param market_name: A string representing a cryptocurrency market formatted like so:
'{base_currency}/{quote_currency}'
:param graph: A Networkx DiGraph upon
:param log: If the edge weights given to the graph should be the negative logarithm of the ask and bid prices. This
is necessary to calculate arbitrage opportunities.
:param fees: If fees should be taken into account for prices.
:param suppress: A list or set which tells which types of warnings to not throw. Accepted elements are 'markets'.
:param ticker: A dictionary representing a market as returned by ccxt's Exchange's fetch_ticker method
:param depth: If True, also adds an attribute 'depth' to each edge which represents the current volume of orders
available at the price represented by the 'weight' attribute of each edge.
"""
adapter.debug('Adding edge to graph', market=market_name)
if ticker is None:
try:
adapter.info('Fetching ticker', market=market_name)
ticker = await exchange.fetch_ticker(market_name)
adapter.info('Fetched ticker', market=market_name)
# any error is solely because of fetch_ticker
except:
if 'markets' not in suppress:
adapter.warning('Market is unavailable at this time. It will not be included in the graph.',
market=market_name)
return
if fees:
if 'taker' in exchange.markets[market_name]:
# we always take the taker side because arbitrage depends on filling orders
# sell_fee_dict = exchange.calculate_fee(market_name, 'limit', 'sell', 0, 0, 'taker')
# buy_fee_dict = exchange.calculate_fee(market_name, 'limit', 'buy', 0, 0, 'taker')
fee = exchange.markets[market_name]['taker']
else:
if 'fees' not in suppress:
adapter.warning("The fees for {} have not yet been implemented into ccxt's uniform API."
.format(exchange))
raise FeesNotAvailable('Fees are not available for {} on {}'.format(market_name, exchange.id))
else:
fee = 0.002
else:
fee = 0
fee_scalar = 1 - fee
try:
bid_rate = ticker['bid']
ask_rate = ticker['ask']
if depth:
bid_volume = ticker['bidVolume']
ask_volume = ticker['askVolume']
if bid_volume is None:
adapter.warning('Market is unavailable because its bid volume was given as None. '
'It will not be included in the graph.', market=market_name)
return
if ask_volume is None:
adapter.warning('Market is unavailable because its ask volume was given as None. '
'It will not be included in the graph.', market=market_name)
return
# ask and bid == None if this market is non existent.
except TypeError:
adapter.warning('Market is unavailable at this time. It will not be included in the graph.',
market=market_name)
return
# Exchanges give asks and bids as either 0 or None when they do not exist.
# todo: should we account for exchanges upon which an ask exists but a bid does not (and vice versa)? Would this
# cause bugs?
if ask_rate == 0 or bid_rate == 0 or ask_rate is None or bid_rate is None:
adapter.warning('Market is unavailable at this time. It will not be included in the graph.',
market=market_name)
return
try:
base_currency, quote_currency = market_name.split('/')
# if ccxt returns a market in incorrect format (e.g FX_BTC_JPY on BitFlyer)
except ValueError:
if 'markets' not in suppress:
adapter.warning('Market is unavailable at this time due to incorrect formatting. '
'It will not be included in the graph.', market=market_name)
return
if log:
if depth:
graph.add_edge(base_currency, quote_currency, weight=-math.log(fee_scalar * bid_rate),
depth=-math.log(bid_volume), market_name=market_name, trade_type='SELL',
fee=fee, volume=bid_volume, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=-math.log(fee_scalar * 1 / ask_rate),
depth=-math.log(ask_volume * ask_rate), market_name=market_name, trade_type='BUY',
fee=fee, volume=ask_volume, no_fee_rate=ask_rate)
else:
graph.add_edge(base_currency, quote_currency, weight=-math.log(fee_scalar * bid_rate),
market_name=market_name, trade_type='SELL', fee=fee, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=-math.log(fee_scalar * 1 / ask_rate),
market_name=market_name, trade_type='BUY', fee=fee, no_fee_rate=ask_rate)
else:
if depth:
graph.add_edge(base_currency, quote_currency, weight=fee_scalar * bid_rate, depth=bid_volume,
market_name=market_name, trade_type='SELL', fee=fee, volume=bid_volume, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=fee_scalar * 1 / ask_rate, depth=ask_volume,
market_name=market_name, trade_type='BUY', fee=fee, volume=ask_volume, no_fee_rate=ask_rate)
else:
graph.add_edge(base_currency, quote_currency, weight=fee_scalar * bid_rate,
market_name=market_name, trade_type='SELL', fee=fee, no_fee_rate=bid_rate)
graph.add_edge(quote_currency, base_currency, weight=fee_scalar * 1 / ask_rate,
market_name=market_name, trade_type='BUY', fee=fee, no_fee_rate=ask_rate)
adapter.debug('Added edge to graph', market=market_name)
| [
"logging.getLogger",
"networkx.DiGraph",
"asyncio.wait",
"networkx.Graph",
"math.log",
"datetime.datetime.now",
"asyncio.sleep"
] | [((287, 342), 'logging.getLogger', 'logging.getLogger', (['"""peregrinearb.utils.single_exchange"""'], {}), "('peregrinearb.utils.single_exchange')\n", (304, 342), False, 'import logging\n'), ((644, 654), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (652, 654), True, 'import networkx as nx\n'), ((1927, 1939), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (1937, 1939), True, 'import networkx as nx\n'), ((2058, 2105), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (2079, 2105), False, 'import datetime\n'), ((2488, 2507), 'asyncio.wait', 'asyncio.wait', (['tasks'], {}), '(tasks)\n', (2500, 2507), False, 'import asyncio\n'), ((3046, 3064), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (3059, 3064), False, 'import asyncio\n'), ((8825, 8856), 'math.log', 'math.log', (['(fee_scalar * bid_rate)'], {}), '(fee_scalar * bid_rate)\n', (8833, 8856), False, 'import math\n'), ((8892, 8912), 'math.log', 'math.log', (['bid_volume'], {}), '(bid_volume)\n', (8900, 8912), False, 'import math\n'), ((9101, 9136), 'math.log', 'math.log', (['(fee_scalar * 1 / ask_rate)'], {}), '(fee_scalar * 1 / ask_rate)\n', (9109, 9136), False, 'import math\n'), ((9172, 9203), 'math.log', 'math.log', (['(ask_volume * ask_rate)'], {}), '(ask_volume * ask_rate)\n', (9180, 9203), False, 'import math\n'), ((9405, 9436), 'math.log', 'math.log', (['(fee_scalar * bid_rate)'], {}), '(fee_scalar * bid_rate)\n', (9413, 9436), False, 'import math\n'), ((9606, 9641), 'math.log', 'math.log', (['(fee_scalar * 1 / ask_rate)'], {}), '(fee_scalar * 1 / ask_rate)\n', (9614, 9641), False, 'import math\n')] |
from pathlib import Path
import fasttext
if __name__ == "__main__":
targets = [
"red", "black", "orange", "white", "gray/black", # colors
"sony", "apple", "canon", "nikon", "dell", # brands
"32", "an", "the", "4", "to", # numbers and stopwords
"inch", "cm", "oz", "gb", "mb", # measurements
"camera", "gps", "mp3", "iphone", "playstation" # products
]
base = Path("/Users/fredriko/PycharmProjects/search_with_machine_learning_course/workspace/titles")
training_data = base / "titles.txt"
model_file = base / "model_file"
kwargs = {
"input": str(training_data),
"epoch": 100,
"ws": 6,
"minn": 0,
"maxn": 0,
"dim": 150,
"model": "skipgram"
}
for min_count in [25]:
print(f"Training with min_count: {min_count}")
kwargs["minCount"] = min_count
model = fasttext.train_unsupervised(**kwargs)
model.save_model(str(model_file))
for target in targets:
print(f"Target: {target}")
nns = model.get_nearest_neighbors(target, 10)
for nn in nns:
print(f"{nn[1]} -- {round(nn[0], 3)}")
print("\n")
| [
"fasttext.train_unsupervised",
"pathlib.Path"
] | [((418, 520), 'pathlib.Path', 'Path', (['"""/Users/fredriko/PycharmProjects/search_with_machine_learning_course/workspace/titles"""'], {}), "(\n '/Users/fredriko/PycharmProjects/search_with_machine_learning_course/workspace/titles'\n )\n", (422, 520), False, 'from pathlib import Path\n'), ((909, 946), 'fasttext.train_unsupervised', 'fasttext.train_unsupervised', ([], {}), '(**kwargs)\n', (936, 946), False, 'import fasttext\n')] |
import json
import pytest
@pytest.fixture
def test_bib():
with open(".\\test_files\\bib.json") as file:
return json.load(file)
@pytest.fixture
def test_mixed_bib():
with open(".\\test_files\\mixed_bib.json") as file:
return json.load(file)
| [
"json.load"
] | [((126, 141), 'json.load', 'json.load', (['file'], {}), '(file)\n', (135, 141), False, 'import json\n'), ((253, 268), 'json.load', 'json.load', (['file'], {}), '(file)\n', (262, 268), False, 'import json\n')] |
import numpy as np
import pandas as pd
import torch
from physics.protein_os import Protein
import options
from utils import write_pdb, write_pdb_sample, transform_profile, load_protein
from physics.anneal import AnnealCoords, AnnealFrag
# from physics.move import SampleICNext
from physics.grad_minimizer import *
from physics.dynamics import *
import os
import mdtraj as md
from utils import test_setup
import h5py
#################################################
parser = options.get_fold_parser()
args = options.parse_args_and_arch(parser)
device, model, energy_fn, ProteinBase = test_setup(args)
# position_weights = torch.zeros((1, args.seq_len + 1), device=device)
# position_weights[:, 0:5] = 1
# energy_fn.energy_fn.position_weights = position_weights
#################################################
data_path = 'data/fold/cullpdb_val_deep'
protein_sample = pd.read_csv(f'{data_path}/sample.csv')
pdb_selected = protein_sample['pdb'].values
np.random.shuffle(pdb_selected)
fold_engine = args.fold_engine
mode = args.mode
# sample_ic = SampleICNext(mode)
exp_id = args.load_exp[-5:]
save_dir = args.save_dir
# if not os.path.exists(f'data/fold/{exp_id}'):
# os.mkdir(f'data/fold/{exp_id}')
if not os.path.exists(f'data/fold/{save_dir}'):
os.mkdir(f'data/fold/{save_dir}')
for pdb_id in pdb_selected:
seq, coords_native, profile = load_protein(data_path, pdb_id, mode, device, args)
protein_native = Protein(seq, coords_native, profile)
energy_native = protein_native.get_energy(energy_fn).item()
print('energy_native:', energy_native)
rg2, collision = protein_native.get_rad_gyration(coords_native)
print('native radius of gyration square:', rg2.item())
# residue_energy = protein_native.get_residue_energy(energy_fn)
# print(residue_energy)
# write_pdb(seq, coords_native, pdb_id, 'native', exp_id)
protein = Protein(seq, coords_native.clone(), profile.clone())
if args.random_init:
# random_coords_int = sample_ic.random_coords_int(len(seq)-3).to(device)
# protein.update_coords_internal(random_coords_int)
# extend_coords_int = torch.tensor([[5.367, 1.6, 0.0]], device=device).repeat((len(seq)-3, 1))
extend_coords_int = torch.tensor([[5.367, 0.1, 0.0]], device=device).repeat((len(seq)-3, 1))
protein.update_coords_internal(extend_coords_int)
protein.update_cartesian_from_internal()
coords_init = protein.coords
energy_init = protein.get_energy(energy_fn).item()
print('energy_init:', energy_init)
# write_pdb(seq, coords_init, pdb_id, f'init_{mode}', exp_id)
if fold_engine == 'anneal':
# simulated annealing
torch.set_grad_enabled(False)
if args.anneal_type == 'int_one':
annealer = AnnealCoords(energy_fn, protein, mode=mode, ic_move_std=args.ic_move_std,
T_max=args.T_max, T_min=args.T_min, L=args.L)
elif args.anneal_type == 'frag':
frag_file = h5py.File(f'data/fragment/{pdb_id}/{pdb_id}_int.h5', 'r')
query_pos = torch.tensor(frag_file['query_pos'][()], device=device)
frag_int = torch.tensor(frag_file['coords_int'][()], device=device)
annealer = AnnealFrag(energy_fn, protein, frag=(query_pos, frag_int), use_rg=args.use_rg,
T_max=args.T_max, T_min=args.T_min, L=args.L)
else:
raise ValueError('anneal_type should be int_one / frag.')
annealer.run()
coords_best = annealer.x_best
energy_best = annealer.energy_best
sample = annealer.sample
sample_energy = annealer.sample_energy
elif fold_engine == 'grad':
if args.x_type == 'cart':
minimizer = GradMinimizerCartesian(energy_fn, protein, lr=args.lr, num_steps=args.L)
elif args.x_type == 'internal':
minimizer = GradMinimizerInternal(energy_fn, protein, lr=args.lr, num_steps=args.L, momentum=0.0)
elif args.x_type == 'int_fast':
minimizer = GradMinimizerIntFast(energy_fn, protein, lr=args.lr, num_steps=args.L)
elif args.x_type == 'mixed':
minimizer = GradMinimizerMixed(energy_fn, protein, lr=args.lr, num_steps=args.L)
elif args.x_type == 'mix_fast':
minimizer = GradMinimizerMixFast(energy_fn, protein, lr=args.lr, num_steps=args.L)
else:
raise ValueError('x_type should be cart / internal / mixed / int_fast / mix_fast.')
minimizer.run()
coords_best = minimizer.x_best
energy_best = minimizer.energy_best
sample = minimizer.sample
sample_energy = minimizer.sample_energy
elif fold_engine == 'dynamics':
if args.x_type == 'cart':
minimizer = Dynamics(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
elif args.x_type == 'internal':
minimizer = DynamicsInternal(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
elif args.x_type == 'int_fast':
minimizer = DynamicsIntFast(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
elif args.x_type == 'mixed':
minimizer = DynamicsMixed(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
elif args.x_type == 'mix_fast':
minimizer = DynamicsMixFast(energy_fn, protein, num_steps=args.L, lr=args.lr, t_noise=args.T_max)
else:
raise ValueError('x_type should be cart / internal / mixed / int_fast / mix_fast.')
minimizer.run()
coords_best = minimizer.x_best
energy_best = minimizer.energy_best
sample = minimizer.sample
sample_energy = minimizer.sample_energy
else:
raise ValueError('fold_engine should be anneal / grad / dynamics')
# protein.update_coords(coords_best)
# residue_energy = protein.get_residue_energy(energy_fn)
# print(residue_energy)
# write_pdb(seq, coords_best, pdb_id, f'best_{mode}', exp_id)
# save sampled structures
sample = [coords_native.cpu(), coords_best.cpu(), coords_init.cpu()] + sample
sample_energy = [energy_native, energy_best, energy_init] + sample_energy
# write_pdb_sample(seq, sample, pdb_id, 'sample', exp_id)
# pd.DataFrame({'sample_energy': sample_energy}).to_csv(f'data/fold/{exp_id}/{pdb_id}_energy.csv', index=False)
write_pdb_sample(seq, sample, pdb_id, 'sample', save_dir)
# compute RMSD,
sample_xyz = torch.stack(sample, 0).cpu().detach().numpy()
print(sample_xyz.shape)
t = md.Trajectory(xyz=sample_xyz, topology=None)
t = t.superpose(t, frame=0)
write_pdb_sample(seq, t.xyz, pdb_id, 'sample2', save_dir)
sample_rmsd = md.rmsd(t, t, frame=0) # computation will change sample_xyz;
print(f'best RMSD: {sample_rmsd[1]}')
df = pd.DataFrame({'sample_energy': sample_energy,
'sample_rmsd': sample_rmsd})
df.to_csv(f'data/fold/{save_dir}/{pdb_id}_energy.csv', index=False)
| [
"pandas.read_csv",
"mdtraj.Trajectory",
"os.path.exists",
"physics.anneal.AnnealCoords",
"os.mkdir",
"pandas.DataFrame",
"mdtraj.rmsd",
"utils.write_pdb_sample",
"physics.protein_os.Protein",
"utils.load_protein",
"options.parse_args_and_arch",
"h5py.File",
"utils.test_setup",
"torch.stack... | [((477, 502), 'options.get_fold_parser', 'options.get_fold_parser', ([], {}), '()\n', (500, 502), False, 'import options\n'), ((510, 545), 'options.parse_args_and_arch', 'options.parse_args_and_arch', (['parser'], {}), '(parser)\n', (537, 545), False, 'import options\n'), ((587, 603), 'utils.test_setup', 'test_setup', (['args'], {}), '(args)\n', (597, 603), False, 'from utils import test_setup\n'), ((875, 913), 'pandas.read_csv', 'pd.read_csv', (['f"""{data_path}/sample.csv"""'], {}), "(f'{data_path}/sample.csv')\n", (886, 913), True, 'import pandas as pd\n'), ((958, 989), 'numpy.random.shuffle', 'np.random.shuffle', (['pdb_selected'], {}), '(pdb_selected)\n', (975, 989), True, 'import numpy as np\n'), ((1218, 1257), 'os.path.exists', 'os.path.exists', (['f"""data/fold/{save_dir}"""'], {}), "(f'data/fold/{save_dir}')\n", (1232, 1257), False, 'import os\n'), ((1263, 1296), 'os.mkdir', 'os.mkdir', (['f"""data/fold/{save_dir}"""'], {}), "(f'data/fold/{save_dir}')\n", (1271, 1296), False, 'import os\n'), ((1362, 1413), 'utils.load_protein', 'load_protein', (['data_path', 'pdb_id', 'mode', 'device', 'args'], {}), '(data_path, pdb_id, mode, device, args)\n', (1374, 1413), False, 'from utils import write_pdb, write_pdb_sample, transform_profile, load_protein\n'), ((1436, 1472), 'physics.protein_os.Protein', 'Protein', (['seq', 'coords_native', 'profile'], {}), '(seq, coords_native, profile)\n', (1443, 1472), False, 'from physics.protein_os import Protein\n'), ((6396, 6453), 'utils.write_pdb_sample', 'write_pdb_sample', (['seq', 'sample', 'pdb_id', '"""sample"""', 'save_dir'], {}), "(seq, sample, pdb_id, 'sample', save_dir)\n", (6412, 6453), False, 'from utils import write_pdb, write_pdb_sample, transform_profile, load_protein\n'), ((6574, 6618), 'mdtraj.Trajectory', 'md.Trajectory', ([], {'xyz': 'sample_xyz', 'topology': 'None'}), '(xyz=sample_xyz, topology=None)\n', (6587, 6618), True, 'import mdtraj as md\n'), ((6655, 6712), 'utils.write_pdb_sample', 'write_pdb_sample', (['seq', 't.xyz', 'pdb_id', '"""sample2"""', 'save_dir'], {}), "(seq, t.xyz, pdb_id, 'sample2', save_dir)\n", (6671, 6712), False, 'from utils import write_pdb, write_pdb_sample, transform_profile, load_protein\n'), ((6731, 6753), 'mdtraj.rmsd', 'md.rmsd', (['t', 't'], {'frame': '(0)'}), '(t, t, frame=0)\n', (6738, 6753), True, 'import mdtraj as md\n'), ((6845, 6919), 'pandas.DataFrame', 'pd.DataFrame', (["{'sample_energy': sample_energy, 'sample_rmsd': sample_rmsd}"], {}), "({'sample_energy': sample_energy, 'sample_rmsd': sample_rmsd})\n", (6857, 6919), True, 'import pandas as pd\n'), ((2675, 2704), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (2697, 2704), False, 'import torch\n'), ((2770, 2893), 'physics.anneal.AnnealCoords', 'AnnealCoords', (['energy_fn', 'protein'], {'mode': 'mode', 'ic_move_std': 'args.ic_move_std', 'T_max': 'args.T_max', 'T_min': 'args.T_min', 'L': 'args.L'}), '(energy_fn, protein, mode=mode, ic_move_std=args.ic_move_std,\n T_max=args.T_max, T_min=args.T_min, L=args.L)\n', (2782, 2893), False, 'from physics.anneal import AnnealCoords, AnnealFrag\n'), ((2230, 2278), 'torch.tensor', 'torch.tensor', (['[[5.367, 0.1, 0.0]]'], {'device': 'device'}), '([[5.367, 0.1, 0.0]], device=device)\n', (2242, 2278), False, 'import torch\n'), ((2991, 3048), 'h5py.File', 'h5py.File', (['f"""data/fragment/{pdb_id}/{pdb_id}_int.h5"""', '"""r"""'], {}), "(f'data/fragment/{pdb_id}/{pdb_id}_int.h5', 'r')\n", (3000, 3048), False, 'import h5py\n'), ((3073, 3128), 'torch.tensor', 'torch.tensor', (["frag_file['query_pos'][()]"], {'device': 'device'}), "(frag_file['query_pos'][()], device=device)\n", (3085, 3128), False, 'import torch\n'), ((3152, 3208), 'torch.tensor', 'torch.tensor', (["frag_file['coords_int'][()]"], {'device': 'device'}), "(frag_file['coords_int'][()], device=device)\n", (3164, 3208), False, 'import torch\n'), ((3232, 3361), 'physics.anneal.AnnealFrag', 'AnnealFrag', (['energy_fn', 'protein'], {'frag': '(query_pos, frag_int)', 'use_rg': 'args.use_rg', 'T_max': 'args.T_max', 'T_min': 'args.T_min', 'L': 'args.L'}), '(energy_fn, protein, frag=(query_pos, frag_int), use_rg=args.\n use_rg, T_max=args.T_max, T_min=args.T_min, L=args.L)\n', (3242, 3361), False, 'from physics.anneal import AnnealCoords, AnnealFrag\n'), ((6492, 6514), 'torch.stack', 'torch.stack', (['sample', '(0)'], {}), '(sample, 0)\n', (6503, 6514), False, 'import torch\n')] |
import RPi.GPIO as GPIO
import time
# for GPIO numbering, choose BCM
#GPIO.setmode(GPIO.BCM)
# or, for pin numbering, choose BOARD
GPIO.setmode(GPIO.BOARD)
# battery1 = 2
# battery2 = 4
AEnable = 13 #27
AIN1 = 22 #25
AIN2 = 18 #24
# BIN1 = 23 # 16
# BIN2 = 18 # 12
GPIO.setup(AEnable, GPIO.OUT)
GPIO.setup(AIN1, GPIO.OUT)
GPIO.setup(AIN2, GPIO.OUT)
# GPIO.setup(BIN1, GPIO.OUT)
# GPIO.setup(BIN2, GPIO.OUT)
# GPIO.setup(battery1, GPIO.OUT)
# GPIO.setup(battery2, GPIO.OUT)
# GPIO.output(battery1, GPIO.HIGH)
# GPIO.output(battery2, GPIO.HIGH)
for i in range(5):
GPIO.output(AIN1, GPIO.HIGH)
GPIO.output(AIN2, GPIO.LOW)
GPIO.output(AEnable, GPIO.HIGH)
# GPIO.output(BIN1, GPIO.HIGH)
# GPIO.output(BIN2, GPIO.LOW)
time.sleep(2)
GPIO.output(AEnable, GPIO.LOW)
# GPIO.output(AIN1,GPIO.LOW)
# GPIO.output(AIN2, GPIO.HIGH)
# GPIO.output(BIN1,GPIO.LOW)
# GPIO.output(BIN2, GPIO.HIGH)
time.sleep(2)
print("attempt12")
GPIO.cleanup()
| [
"RPi.GPIO.cleanup",
"RPi.GPIO.setup",
"RPi.GPIO.output",
"time.sleep",
"RPi.GPIO.setmode"
] | [((143, 167), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (155, 167), True, 'import RPi.GPIO as GPIO\n'), ((282, 311), 'RPi.GPIO.setup', 'GPIO.setup', (['AEnable', 'GPIO.OUT'], {}), '(AEnable, GPIO.OUT)\n', (292, 311), True, 'import RPi.GPIO as GPIO\n'), ((312, 338), 'RPi.GPIO.setup', 'GPIO.setup', (['AIN1', 'GPIO.OUT'], {}), '(AIN1, GPIO.OUT)\n', (322, 338), True, 'import RPi.GPIO as GPIO\n'), ((339, 365), 'RPi.GPIO.setup', 'GPIO.setup', (['AIN2', 'GPIO.OUT'], {}), '(AIN2, GPIO.OUT)\n', (349, 365), True, 'import RPi.GPIO as GPIO\n'), ((987, 1001), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (999, 1001), True, 'import RPi.GPIO as GPIO\n'), ((586, 614), 'RPi.GPIO.output', 'GPIO.output', (['AIN1', 'GPIO.HIGH'], {}), '(AIN1, GPIO.HIGH)\n', (597, 614), True, 'import RPi.GPIO as GPIO\n'), ((619, 646), 'RPi.GPIO.output', 'GPIO.output', (['AIN2', 'GPIO.LOW'], {}), '(AIN2, GPIO.LOW)\n', (630, 646), True, 'import RPi.GPIO as GPIO\n'), ((651, 682), 'RPi.GPIO.output', 'GPIO.output', (['AEnable', 'GPIO.HIGH'], {}), '(AEnable, GPIO.HIGH)\n', (662, 682), True, 'import RPi.GPIO as GPIO\n'), ((758, 771), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (768, 771), False, 'import time\n'), ((776, 806), 'RPi.GPIO.output', 'GPIO.output', (['AEnable', 'GPIO.LOW'], {}), '(AEnable, GPIO.LOW)\n', (787, 806), True, 'import RPi.GPIO as GPIO\n'), ((949, 962), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (959, 962), False, 'import time\n')] |
# Copyright 2017, <NAME>, All rights reserved.
from bottle import HTTPResponse
from common import Status, overrides
from ..web_app import IHandler, WebApp
from ..serialize import SerializeStatusJson
class StatusHandler(IHandler):
def __init__(self, status: Status):
self.__status = status
@overrides(IHandler)
def add_routes(self, web_app: WebApp):
web_app.add_handler("/server/status", self.__handle_get_status)
def __handle_get_status(self):
out_json = SerializeStatusJson.status(self.__status)
return HTTPResponse(body=out_json)
| [
"common.overrides",
"bottle.HTTPResponse"
] | [((311, 330), 'common.overrides', 'overrides', (['IHandler'], {}), '(IHandler)\n', (320, 330), False, 'from common import Status, overrides\n'), ((558, 585), 'bottle.HTTPResponse', 'HTTPResponse', ([], {'body': 'out_json'}), '(body=out_json)\n', (570, 585), False, 'from bottle import HTTPResponse\n')] |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Electricity Transformer Temperature (ETT) dataset."""
from dataclasses import dataclass
import pandas as pd
import datasets
_CITATION = """\
@inproceedings{haoyietal-informer-2021,
author = {<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME>},
title = {Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting},
booktitle = {The Thirty-Fifth {AAAI} Conference on Artificial Intelligence, {AAAI} 2021, Virtual Conference},
volume = {35},
number = {12},
pages = {11106--11115},
publisher = {{AAAI} Press},
year = {2021},
}
"""
_DESCRIPTION = """\
The data of Electricity Transformers from two separated counties
in China collected for two years at hourly and 15-min frequencies.
Each data point consists of the target value "oil temperature" and
6 power load features. The train/val/test is 12/4/4 months.
"""
_HOMEPAGE = "https://github.com/zhouhaoyi/ETDataset"
_LICENSE = "The Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/"
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
"h1": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTh1.csv",
"h2": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTh2.csv",
"m1": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTm1.csv",
"m2": "https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTm2.csv",
}
@dataclass
class ETTBuilderConfig(datasets.BuilderConfig):
"""ETT builder config."""
prediction_length: int = 24
multivariate: bool = False
class ETT(datasets.GeneratorBasedBuilder):
"""Electricity Transformer Temperature (ETT) dataset"""
VERSION = datasets.Version("1.0.0")
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('ett', 'h1')
# data = datasets.load_dataset('ett', 'm2')
BUILDER_CONFIGS = [
ETTBuilderConfig(
name="h1",
version=VERSION,
description="Time series from first county at hourly frequency.",
),
ETTBuilderConfig(
name="h2",
version=VERSION,
description="Time series from second county at hourly frequency.",
),
ETTBuilderConfig(
name="m1",
version=VERSION,
description="Time series from first county at 15-min frequency.",
),
ETTBuilderConfig(
name="m2",
version=VERSION,
description="Time series from second county at 15-min frequency.",
),
]
DEFAULT_CONFIG_NAME = "h1" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
if self.config.multivariate:
features = datasets.Features(
{
"start": datasets.Value("timestamp[s]"),
"target": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"feat_static_cat": datasets.Sequence(datasets.Value("uint64")),
"item_id": datasets.Value("string"),
}
)
else:
features = datasets.Features(
{
"start": datasets.Value("timestamp[s]"),
"target": datasets.Sequence(datasets.Value("float32")),
"feat_static_cat": datasets.Sequence(datasets.Value("uint64")),
"feat_dynamic_real": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"item_id": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
filepath = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": filepath,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": filepath,
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": filepath,
"split": "dev",
},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
data = pd.read_csv(filepath, parse_dates=True, index_col=0)
start_date = data.index.min()
if self.config.name in ["m1", "m2"]:
factor = 4 # 15-min frequency
else:
factor = 1 # hourly frequency
train_end_date_index = 12 * 30 * 24 * factor # 1 year
if split == "dev":
end_date_index = 12 * 30 * 24 + 4 * 30 * 24 * factor # 1 year + 4 months
else:
end_date_index = 12 * 30 * 24 + 8 * 30 * 24 * factor # 1 year + 8 months
if self.config.multivariate:
if split in ["test", "dev"]:
# rolling windows of prediction_length for dev and test
for i, index in enumerate(
range(
train_end_date_index,
end_date_index,
self.config.prediction_length,
)
):
yield i, {
"start": start_date,
"target": data[: index + self.config.prediction_length].values.astype("float32").T,
"feat_static_cat": [0],
"item_id": "0",
}
else:
yield 0, {
"start": start_date,
"target": data[:train_end_date_index].values.astype("float32").T,
"feat_static_cat": [0],
"item_id": "0",
}
else:
if split in ["test", "dev"]:
# rolling windows of prediction_length for dev and test
for i, index in enumerate(
range(
train_end_date_index,
end_date_index,
self.config.prediction_length,
)
):
target = data["OT"][: index + self.config.prediction_length].values.astype("float32")
feat_dynamic_real = data[["HUFL", "HULL", "MUFL", "MULL", "LUFL", "LULL"]][
: index + self.config.prediction_length
].values.T.astype("float32")
yield i, {
"start": start_date,
"target": target,
"feat_dynamic_real": feat_dynamic_real,
"feat_static_cat": [0],
"item_id": "OT",
}
else:
target = data["OT"][:train_end_date_index].values.astype("float32")
feat_dynamic_real = data[["HUFL", "HULL", "MUFL", "MULL", "LUFL", "LULL"]][
:train_end_date_index
].values.T.astype("float32")
yield 0, {
"start": start_date,
"target": target,
"feat_dynamic_real": feat_dynamic_real,
"feat_static_cat": [0],
"item_id": "OT",
}
| [
"datasets.SplitGenerator",
"pandas.read_csv",
"datasets.DatasetInfo",
"datasets.Version",
"datasets.Value"
] | [((2653, 2678), 'datasets.Version', 'datasets.Version', (['"""1.0.0"""'], {}), "('1.0.0')\n", (2669, 2678), False, 'import datasets\n'), ((4638, 4766), 'datasets.DatasetInfo', 'datasets.DatasetInfo', ([], {'description': '_DESCRIPTION', 'features': 'features', 'homepage': '_HOMEPAGE', 'license': '_LICENSE', 'citation': '_CITATION'}), '(description=_DESCRIPTION, features=features, homepage=\n _HOMEPAGE, license=_LICENSE, citation=_CITATION)\n', (4658, 4766), False, 'import datasets\n'), ((6679, 6731), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'parse_dates': '(True)', 'index_col': '(0)'}), '(filepath, parse_dates=True, index_col=0)\n', (6690, 6731), True, 'import pandas as pd\n'), ((5653, 5760), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.TRAIN', 'gen_kwargs': "{'filepath': filepath, 'split': 'train'}"}), "(name=datasets.Split.TRAIN, gen_kwargs={'filepath':\n filepath, 'split': 'train'})\n", (5676, 5760), False, 'import datasets\n'), ((5944, 6049), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.TEST', 'gen_kwargs': "{'filepath': filepath, 'split': 'test'}"}), "(name=datasets.Split.TEST, gen_kwargs={'filepath':\n filepath, 'split': 'test'})\n", (5967, 6049), False, 'import datasets\n'), ((6233, 6344), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.VALIDATION', 'gen_kwargs': "{'filepath': filepath, 'split': 'dev'}"}), "(name=datasets.Split.VALIDATION, gen_kwargs={\n 'filepath': filepath, 'split': 'dev'})\n", (6256, 6344), False, 'import datasets\n'), ((3832, 3862), 'datasets.Value', 'datasets.Value', (['"""timestamp[s]"""'], {}), "('timestamp[s]')\n", (3846, 3862), False, 'import datasets\n'), ((4074, 4098), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (4088, 4098), False, 'import datasets\n'), ((4235, 4265), 'datasets.Value', 'datasets.Value', (['"""timestamp[s]"""'], {}), "('timestamp[s]')\n", (4249, 4265), False, 'import datasets\n'), ((4564, 4588), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (4578, 4588), False, 'import datasets\n'), ((4016, 4040), 'datasets.Value', 'datasets.Value', (['"""uint64"""'], {}), "('uint64')\n", (4030, 4040), False, 'import datasets\n'), ((4315, 4340), 'datasets.Value', 'datasets.Value', (['"""float32"""'], {}), "('float32')\n", (4329, 4340), False, 'import datasets\n'), ((4400, 4424), 'datasets.Value', 'datasets.Value', (['"""uint64"""'], {}), "('uint64')\n", (4414, 4424), False, 'import datasets\n'), ((3930, 3955), 'datasets.Value', 'datasets.Value', (['"""float32"""'], {}), "('float32')\n", (3944, 3955), False, 'import datasets\n'), ((4504, 4529), 'datasets.Value', 'datasets.Value', (['"""float32"""'], {}), "('float32')\n", (4518, 4529), False, 'import datasets\n')] |
'''
09 - Multiple grouped summaries
Earlier in this chapter, you saw that the .agg() method is useful to
compute multiple statistics on multiple variables. It also works with
grouped data. NumPy, which is imported as np, has many different summary
statistics functions, including: np.min, np.max, np.mean, and np.median.
Instructions:
- Import numpy with the alias np.
- Get the min, max, mean, and median of weekly_sales for each store type
using .groupby() and .agg(). Store this as sales_stats. Make sure to use
numpy functions!
- Get the min, max, mean, and median of unemployment and fuel_price_usd_per_l
for each store type. Store this as unemp_fuel_stats.
-----------------------------------------------------------------------------------------------------------------
sales.head()
store type department date weekly_sales is_holiday temperature_c fuel_price_usd_per_l unemployment
0 1 A 1 2010-02-05 24924.50 False 5.728 0.679 8.106
1 1 A 1 2010-03-05 21827.90 False 8.056 0.693 8.106
2 1 A 1 2010-04-02 57258.43 False 16.817 0.718 7.808
3 1 A 1 2010-05-07 17413.94 False 22.528 0.749 7.808
4 1 A 1 2010-06-04 17558.09 False 27.050 0.715 7.808
-----------------------------------------------------------------------------------------------------------------
'''
# Import pandas and numpy
import numpy as np
import pandas as pd
sales = pd.read_csv('content/wallmart_sales.csv')
# Import numpy with the alias np
import numpy as np
# For each store type, aggregate weekly_sales: get min, max, mean, and median
sales_stats = sales.groupby('type')['weekly_sales'].agg([np.min, np.max, np.mean, np.median])
# Print sales_stats
print(sales_stats)
# For each store type, aggregate unemployment and fuel_price_usd_per_l: get min, max, mean, and median
unemp_fuel_stats = sales.groupby('type')[['fuel_price_usd_per_l', 'unemployment']].agg([np.min, np.max, np.mean, np.median])
# Print unemp_fuel_stats
print(unemp_fuel_stats)
| [
"pandas.read_csv"
] | [((1687, 1728), 'pandas.read_csv', 'pd.read_csv', (['"""content/wallmart_sales.csv"""'], {}), "('content/wallmart_sales.csv')\n", (1698, 1728), True, 'import pandas as pd\n')] |
"""
This code is modified from harveyslash's work (https://github.com/harveyslash/Deep-Image-Analogy-PyTorch)
"""
import mxnet as mx
from mxnet.gluon.model_zoo import vision as models
from mxnet.gluon import nn
from mxnet import nd
from mxnet import autograd
from time import time
from mxnet import optimizer
import sys
class VGG19:
def __init__(self):
vgg19_model = models.vgg19(pretrained=False)
vgg19_model.load_params("model/vgg19.params", ctx=mx.cpu(0)) # pre-trained net is in cpu
self.use_cuda = True
self.mean = nd.array([0.485, 0.456, 0.406])
self.std = nd.array([0.229, 0.224, 0.225])
self.ctx = mx.gpu(0)
self.model = self.get_model(vgg19_model)
self.smooth = 0.5
def get_model(self, pretrained_net):
# We need to redefine a new network
# because pre-trained structures cannot be read directly as "arrays."
net = nn.Sequential()
for i in range(40):
net.add(pretrained_net.features[i])
net.collect_params().reset_ctx(ctx=self.ctx)
return net
def preprocess(self, img):
img = (nd.array(img).astype('float32') / 255.0 - self.mean) / self.std
return img.transpose((2, 0, 1)).expand_dims(axis=0)
def forward_subnet(self, x, start_layer, end_layer):
for i, layer in enumerate(list(self.model)):
if start_layer <= i <= end_layer:
x = layer(x)
return x
def get_features(self, img_tensor, layers):
img_tensor = self.preprocess(img_tensor)
img_tensor = nd.array(img_tensor).copyto(self.ctx)
features = []
sizes = []
x = img_tensor
features.append(img_tensor)
sizes.append(img_tensor.shape)
for i in range(len(self.model)):
x = self.model[i](x)
if i in layers:
features.append(x)
sizes.append(x.shape)
features.reverse()
sizes.reverse()
return features, sizes
def get_deconvoluted_feat(self, feat, curr_layer, init=None, lr=10, iters=3000):
# Deconvolution process: deconvolute the feature on one layer (e.g. L4) to the second last layer (e.g. L2)
# and forward it to the last layer (e.g. L3).
blob_layers = [29, 20, 11, 6, 1, -1]
end_layer = blob_layers[curr_layer]
mid_layer = blob_layers[curr_layer + 1]
start_layer = blob_layers[curr_layer + 2] + 1
# print("start:", start_layer, " mid:", mid_layer, " end", end_layer)
# make sure the data is in GPU
noise = init.copyto(self.ctx)
target = feat.copyto(self.ctx)
# get_sub_net
net = nn.Sequential()
for layer_num, layer in enumerate(list(self.model)):
if start_layer <= layer_num <= end_layer: # python simplified
net.add(layer)
net.collect_params().reset_ctx(ctx=self.ctx)
def tv_loss(x):
return (x[:, :, 1:, :] - x[:, :, :-1, :]).abs().sum() + (x[:, :, :, 1:] - x[:, :, :, :-1]).abs().sum()
def go(x):
output = net(x)
if curr_layer == 0:
loss = (output - target).square().sum() + self.smooth * tv_loss(x)
else:
loss = (output - target).square().sum()
return loss
def train(x, lr, iters):
tic = time()
t = 1
v = x.zeros_like()
sqr = x.zeros_like()
optim = optimizer.Adam(learning_rate=lr)
for idx in range(iters):
with autograd.record():
loss = go(x)
loss.backward()
optim.update(t, x, x.grad, [sqr, v])
nd.waitall() # TODO:it is a time cost operation
t = t + 1
sys.stdout.write('\r training..........%s%%' % (100 * idx // iters + 1))
sys.stdout.flush()
print(" all_train_time:", time() - tic)
return x
# begin training,just like style transfer
noise.attach_grad()
noise = train(noise, lr, iters)
out = self.forward_subnet(noise, start_layer, mid_layer)
return out
| [
"mxnet.autograd.record",
"mxnet.optimizer.Adam",
"mxnet.nd.waitall",
"mxnet.cpu",
"mxnet.gluon.model_zoo.vision.vgg19",
"mxnet.gpu",
"mxnet.nd.array",
"sys.stdout.flush",
"mxnet.gluon.nn.Sequential",
"time.time",
"sys.stdout.write"
] | [((382, 412), 'mxnet.gluon.model_zoo.vision.vgg19', 'models.vgg19', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (394, 412), True, 'from mxnet.gluon.model_zoo import vision as models\n'), ((560, 591), 'mxnet.nd.array', 'nd.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (568, 591), False, 'from mxnet import nd\n'), ((611, 642), 'mxnet.nd.array', 'nd.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (619, 642), False, 'from mxnet import nd\n'), ((662, 671), 'mxnet.gpu', 'mx.gpu', (['(0)'], {}), '(0)\n', (668, 671), True, 'import mxnet as mx\n'), ((925, 940), 'mxnet.gluon.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (938, 940), False, 'from mxnet.gluon import nn\n'), ((2692, 2707), 'mxnet.gluon.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (2705, 2707), False, 'from mxnet.gluon import nn\n'), ((3381, 3387), 'time.time', 'time', ([], {}), '()\n', (3385, 3387), False, 'from time import time\n'), ((3490, 3522), 'mxnet.optimizer.Adam', 'optimizer.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (3504, 3522), False, 'from mxnet import optimizer\n'), ((471, 480), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (477, 480), True, 'import mxnet as mx\n'), ((1582, 1602), 'mxnet.nd.array', 'nd.array', (['img_tensor'], {}), '(img_tensor)\n', (1590, 1602), False, 'from mxnet import nd\n'), ((3734, 3746), 'mxnet.nd.waitall', 'nd.waitall', ([], {}), '()\n', (3744, 3746), False, 'from mxnet import nd\n'), ((3825, 3897), 'sys.stdout.write', 'sys.stdout.write', (["('\\r training..........%s%%' % (100 * idx // iters + 1))"], {}), "('\\r training..........%s%%' % (100 * idx // iters + 1))\n", (3841, 3897), False, 'import sys\n'), ((3914, 3932), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3930, 3932), False, 'import sys\n'), ((3581, 3598), 'mxnet.autograd.record', 'autograd.record', ([], {}), '()\n', (3596, 3598), False, 'from mxnet import autograd\n'), ((3976, 3982), 'time.time', 'time', ([], {}), '()\n', (3980, 3982), False, 'from time import time\n'), ((1136, 1149), 'mxnet.nd.array', 'nd.array', (['img'], {}), '(img)\n', (1144, 1149), False, 'from mxnet import nd\n')] |
import json
from .fragment_doc import fragment_srt, fragment_syosetu, has_unbalanced_quotes, extract_kana_kanji
EXTRACT_KANA_KANJI_CASES = [
['asdf.!ä', ''],
['あいうえお', 'あいうえお'],
['asdこfdれ', 'これ'],
['「ああ、畜生」foo', 'ああ畜生'],
]
for [text, target] in EXTRACT_KANA_KANJI_CASES:
if extract_kana_kanji(text) != target:
print('FAIL EXTRACT KANA+KANJI')
print(text)
QUOTE_BALANCE_CASES = [
['あいうえお', False],
['あい「うえお', True],
['「あいうえお', True],
['「あいうえお」', False],
['あい「うえ」お', False],
['「あい」う「えお」', False],
['「あいう「えお」」', False],
['「あい「うえ」お', True],
['あい「うえ」お」', True],
]
for [text, target] in QUOTE_BALANCE_CASES:
if has_unbalanced_quotes(text) != target:
print('FAIL QUOTE BALANCE')
print(text)
FRAG_CASES = [
['S',
'''
1
00:02:17,440 --> 00:02:20,375
Senator, we're making
our final approach into Coruscant.
2
00:02:20,476 --> 00:02:22,501
Very good, Lieutenant.
''',
[
{'text': "Senator, we're making our final approach into Coruscant.", 'loc': 't:137.440-140.375'},
{'text': 'Very good, Lieutenant.', 'loc': 't:140.476-142.501'},
]
],
# no anchor novel
['N', '<div><p>食べる</p></div>', [{'text': "食べる"}]],
# anchor novel
['N', '<div><p id="L123">食べる</p></div>', [{'text': '食べる', 'loc': 'a:L123'}]],
# no splitting
['N', '<div><p>それでは、行ってまいります</p></div>',
[
{'text': 'それでは、行ってまいります'},
]
],
# simple splitting
['N', '<div><p>そのせいだろうか。あの日に見た空の青を、よく覚えている。</p></div>',
[
{'text': 'そのせいだろうか。'},
{'text': 'あの日に見た空の青を、よく覚えている。'},
]
],
# strip leading dashes
['N', '<div><p>――ああ、そうだったのですか。</p></div>',
[
{'text': 'ああ、そうだったのですか。'},
]
],
# strip leading ellipses
['N', '<div><p>……そうか?</p></div>',
[
{'text': 'そうか?'},
]
],
# strip matching quotes
['N', '<div><p>「ああ、畜生」</p></div>',
[
{'text': 'ああ、畜生'},
]
],
# strip just leading open quote
['N', '<div><p>「あっ、大丈夫です!</p></div>',
[
{'text': 'あっ、大丈夫です!'},
]
],
# strip just trailing close quote
['N', '<div><p>王宮に神父がいるかっ」</p></div>',
[
{'text': '王宮に神父がいるかっ'},
]
],
# combo
['N', '<div><p>「……うん」</p></div>',
[
{'text': 'うん'},
]
],
# don't strip trailing ellipses
['N', '<div><p>「……血……血が……………」</p></div>',
[
{'text': '血……血が……………'},
]
],
# ignore fragments that start with close quote
['N', '<div><p>」と見開いた。</p></div>', []],
# handle other quotes
['N', '<div><p>『モルツ、少し休憩する』</p></div>',
[
{'text': 'モルツ、少し休憩する'},
]
],
# remove leading speaker label
['N', '<div><p>【ポルペオ】「なんだ、その目は?</p></div>',
[
{'text': 'なんだ、その目は?'},
]
],
# remove drama-style speaker label
['N', '<div><p>(平次)おい 大変だ。</p></div>',
[
{'text': 'おい 大変だ。'},
]
],
# TODO: can we get rid of the leading dash?
# ['N', '<div><p id="L75">─ 〝城内〟に命ず。騎士団による警備を撤去せよ。</p></div>',
# [
# {'text': '〝城内〟に命ず。', 'loc': 'a:L75'},
# {'text': '騎士団による警備を撤去せよ。', 'loc': 'a:L75'}
# ]
# ],
]
for [kind, text, expected_result] in FRAG_CASES:
if kind == 'S':
result = fragment_srt(text, None)
elif kind == 'N':
result = fragment_syosetu(text, None)
else:
assert False
# this is hacky, but should be OK
if json.dumps(result, sort_keys=True) != json.dumps(expected_result, sort_keys=True):
print('FAIL')
print('TEXT-----------------')
print(text)
print('TARGET RESULT--------')
print(repr(expected_result))
print('ACTUAL RESULT--------')
print(repr(result))
print()
| [
"json.dumps"
] | [((3641, 3675), 'json.dumps', 'json.dumps', (['result'], {'sort_keys': '(True)'}), '(result, sort_keys=True)\n', (3651, 3675), False, 'import json\n'), ((3679, 3722), 'json.dumps', 'json.dumps', (['expected_result'], {'sort_keys': '(True)'}), '(expected_result, sort_keys=True)\n', (3689, 3722), False, 'import json\n')] |
import os
import math
import time
import datetime
import logging
from . import podio_auth
log = logging.getLogger(__file__)
def try_environment_token():
"""
Try to get the token from the environment variables TETRAPOD_CLIENT_ID and TETRAPOD_ACCESS_TOKEN.
:return:
"""
try:
client_id = os.environ['TETRAPOD_CLIENT_ID']
access_token = os.environ['TETRAPOD_ACCESS_TOKEN']
except KeyError as e:
log.info('Environment variables TETRAPOD_CLIENT_ID and TETRAPOD_ACCESS_TOKEN not set.')
return None
log.info('Loading OAuth2 token from environment.')
return {
"access_token": str(access_token),
"client_id": str(client_id),
"token_type": "bearer"
}
def create_podio_session(credentials_file=None, credentials=None, check=True, robust=False):
token = try_environment_token()
if token is None:
log.info('Loading OAuth2 token from credentials file.')
if credentials_file is None:
token = podio_auth.load_token()
else:
token = podio_auth.load_token(credentials_file)
podio = podio_auth.make_client(token['client_id'], token, check=check, enable_robustness=robust)
return podio
def create_app_auth_session(client_id:str, client_secret:str, app_id:int, app_token:str):
return podio_auth.make_app_auth_client(client_id, client_secret, app_id, app_token) | [
"logging.getLogger"
] | [((98, 125), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (115, 125), False, 'import logging\n')] |
import sys
import csv
import json
import shutil
from collections import OrderedDict
HW = OrderedDict()
with open('ec2_instance_types.csv', 'r') as f:
reader = csv.reader(f)
for i,row in enumerate(reader):
if i == 0:
header = row
else:
entry = {}
entry['type'] = i+1
entry['name'] = row[0]
entry['cpu'] = int(row[1])
entry['memory'] = float(row[2].replace(',', ''))
storage_str = row[3]
storage_type = None
if 'EBS' in storage_str:
storage_type = 'EBS'
elif 'NVMe' in storage_str:
storage_type = 'NVMe'
elif 'SSD' in storage_str:
storage_type = 'SSD'
elif entry['name'].startswith('r4'):
storage_type = 'EBS'
elif entry['name'].startswith('d2'):
storage_type = 'HDD'
elif entry['name'] == 'f1.16xlarge':
storage_type = 'SSD'
else:
raise Exception('Unknown storage type for {}'.format(entry['name']))
storage_list = None
if storage_type == 'EBS':
entry['storage'] = '40,40'
elif entry['name'] == 'f1.2xlarge':
entry['storage'] = storage_str.split(' ')[0]
else:
parts = storage_str.split(' ')
num_devices = 4 if int(parts[0]) > 4 else int(parts[0])
size = parts[2].replace(',', '')
entry['storage'] = ','.join([size for _ in range(num_devices)])
entry['storage_type'] = storage_type
entry['additional_specs'] = json.dumps(OrderedDict(zip(header[4:], row[4:])), encoding='utf-8')
HW[entry['name']] = entry
# For types.HardwareTypes
hw_consts = [('GENERIC', 1, 'generic')]
for k,v in HW.iteritems():
hw_consts.append(('EC2_{}'.format(k.replace('.', '').upper()), v['type'], k))
hw_str = ' '.join(['{} = {};'.format(k, v) for k,v,_ in hw_consts])
type_names = ', '.join(['{}: \'{}\''.format(k,n) for k,_,n in hw_consts])
#hw_str = 'GENERIC = 1; '
#hw_str += ' '.join(['EC2_{} = {};'.format(k.replace('.', '').upper(), v['type']) for k,v in HW.iteritems()])
#type_names = {v['type']: k for k,v in HW.iteritems()}
#type_names['GENERIC'] = 'GENERIC'
with open('hardware_types.txt', 'w') as f:
f.write(hw_str + '\n')
f.write('TYPE_NAMES = {' + type_names + '}')
entries = []
for k,v in HW.iteritems():
entries.append({
"model": "website.Hardware",
'fields': v
})
with open('hardware.json', 'w') as f:
json.dump(entries, f, encoding='utf-8', indent=4)
shutil.copy('hardware.json', '../../preload/hardware.json')
#maxx = ''
#maxlen = 0
#for k,v in HW.iteritems():
# if len(v['storage']) > maxlen:
# print k,len(v['storage']), v['storage']
# maxlen = len(v['storage'])
| [
"collections.OrderedDict",
"json.dump",
"csv.reader",
"shutil.copy"
] | [((91, 104), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (102, 104), False, 'from collections import OrderedDict\n'), ((2688, 2747), 'shutil.copy', 'shutil.copy', (['"""hardware.json"""', '"""../../preload/hardware.json"""'], {}), "('hardware.json', '../../preload/hardware.json')\n", (2699, 2747), False, 'import shutil\n'), ((165, 178), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (175, 178), False, 'import csv\n'), ((2637, 2686), 'json.dump', 'json.dump', (['entries', 'f'], {'encoding': '"""utf-8"""', 'indent': '(4)'}), "(entries, f, encoding='utf-8', indent=4)\n", (2646, 2686), False, 'import json\n')] |
import panda3d.core as p3d
import pytest #pylint:disable=wrong-import-order
import simplepbr
#pylint:disable=redefined-outer-name
@pytest.fixture(scope='session')
def showbase():
from direct.showbase.ShowBase import ShowBase
p3d.load_prc_file_data(
'',
'window-type offscreen\n'
'framebuffer-hardware false\n'
)
return ShowBase()
def test_setup(showbase):
simplepbr.init(
render_node=showbase.render,
window=showbase.win,
camera_node=showbase.cam,
)
| [
"pytest.fixture",
"panda3d.core.load_prc_file_data",
"direct.showbase.ShowBase.ShowBase",
"simplepbr.init"
] | [((135, 166), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (149, 166), False, 'import pytest\n'), ((237, 324), 'panda3d.core.load_prc_file_data', 'p3d.load_prc_file_data', (['""""""', '"""window-type offscreen\nframebuffer-hardware false\n"""'], {}), '(\'\',\n """window-type offscreen\nframebuffer-hardware false\n""")\n', (259, 324), True, 'import panda3d.core as p3d\n'), ((363, 373), 'direct.showbase.ShowBase.ShowBase', 'ShowBase', ([], {}), '()\n', (371, 373), False, 'from direct.showbase.ShowBase import ShowBase\n'), ((406, 500), 'simplepbr.init', 'simplepbr.init', ([], {'render_node': 'showbase.render', 'window': 'showbase.win', 'camera_node': 'showbase.cam'}), '(render_node=showbase.render, window=showbase.win,\n camera_node=showbase.cam)\n', (420, 500), False, 'import simplepbr\n')] |
import numpy as np
from .common import *
from . import rotation
def to_homogeneous(x):
x = np.asarray(x)
o = np.ones_like(x[..., :1])
return np.concatenate([x, o], axis=-1)
def from_homogeneous(x):
return x[..., :-1] / x[..., -1:]
def compose(r, t, rtype, out=None):
if out is None:
shape = tuple(np.shape(t)[:-1]) + (4, 4)
out = np.zeros(shape, dtype=t.dtype)
rtype.to_matrix(r, out=out[..., :3, :3])
out[..., :3, 3:] = t.reshape(out[...,:3,3:].shape)
return out
def translation_from_matrix(T):
return T[..., :3, 3]
def rotation_from_matrix(T):
return T[..., :3, :3]
def rotation_2d(x, R=None, c=None, s=None):
if R is None:
shape = tuple(np.shape(x)[:-1]) + (2, 2)
R = np.zeros(shape, dtype=x.dtype)
if c is None:
c = np.cos(x)
if s is None:
s = np.sin(x)
R[..., 0, 0] = c
R[..., 0, 1] = -s
R[..., 1, 0] = s
R[..., 1, 1] = c
return R
def Rz(x, T=None, c=None, s=None):
if T is None:
shape = tuple(np.shape(x)[:-1]) + (4, 4)
T = np.zeros(shape, dtype=np.float32)
if c is None:
c = np.cos(x)
if s is None:
s = np.sin(x)
T[..., 0, 0] = c
T[..., 0, 1] = -s
T[..., 1, 0] = s
T[..., 1, 1] = c
T[..., 2, 2] = 1
return T
def invert(T, out=None):
R = T[..., :3, :3]
t = T[..., :3, 3:]
if out is None:
out = np.zeros_like(T)
out[..., :3, :3] = R.swapaxes(-1, -2)
out[..., :3, 3:] = -np.einsum('...ba,...bc->...ac', R, t)
out[..., 3, 3] = 1
return out
def Rti(R, t):
Ri = R.swapaxes(-1, -2)
if np.ndim(t) < np.ndim(Ri):
# case (...,D)
ti = -np.einsum('...ab,...b->...a', Ri, t)
else:
# case (...,D,1)
ti = -np.einsum('...ab,...bc->...ac', Ri, t)
return Ri, ti
def lerp(a, b, w):
return (a * (1.0-w)) + (b*w)
def flerp(a, b, w, f, fi):
return fi(lerp(f(a), f(b), w))
def rlerp(ra, rb, w):
Ra = np.eye(4, dtype=np.float32)
Rb = np.eye(4, dtype=np.float32)
Ra[:3, :3] = ra
Rb[:3, :3] = rb
qa = tx.quaternion_from_matrix(Ra)
qb = tx.quaternion_from_matrix(Rb)
q = tx.quaternion_slerp(q0, q1, w)
R = tx.quaternion_matrix(q)[:3, :3]
return R
def rx3(R, x):
rx = np.einsum('...ab,...b->...a', R[..., :3, :3], x)
return rx
def tx3(T, x):
rx = np.einsum('...ab,...b->...a', T[..., :3, :3], x)
return rx + T[..., :3, 3:].swapaxes(-2, -1)
def rtx3(r, t, x):
return x.dot(r.swapaxes(-2, -1)) + t
def tx4(T, x):
return np.einsum('...ab,...b->...a', T, x)
| [
"numpy.ones_like",
"numpy.eye",
"numpy.asarray",
"numpy.ndim",
"numpy.zeros",
"numpy.einsum",
"numpy.cos",
"numpy.concatenate",
"numpy.sin",
"numpy.shape",
"numpy.zeros_like"
] | [((96, 109), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (106, 109), True, 'import numpy as np\n'), ((118, 142), 'numpy.ones_like', 'np.ones_like', (['x[..., :1]'], {}), '(x[..., :1])\n', (130, 142), True, 'import numpy as np\n'), ((154, 185), 'numpy.concatenate', 'np.concatenate', (['[x, o]'], {'axis': '(-1)'}), '([x, o], axis=-1)\n', (168, 185), True, 'import numpy as np\n'), ((1987, 2014), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (1993, 2014), True, 'import numpy as np\n'), ((2024, 2051), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'np.float32'}), '(4, dtype=np.float32)\n', (2030, 2051), True, 'import numpy as np\n'), ((2288, 2336), 'numpy.einsum', 'np.einsum', (['"""...ab,...b->...a"""', 'R[..., :3, :3]', 'x'], {}), "('...ab,...b->...a', R[..., :3, :3], x)\n", (2297, 2336), True, 'import numpy as np\n'), ((2376, 2424), 'numpy.einsum', 'np.einsum', (['"""...ab,...b->...a"""', 'T[..., :3, :3]', 'x'], {}), "('...ab,...b->...a', T[..., :3, :3], x)\n", (2385, 2424), True, 'import numpy as np\n'), ((2563, 2598), 'numpy.einsum', 'np.einsum', (['"""...ab,...b->...a"""', 'T', 'x'], {}), "('...ab,...b->...a', T, x)\n", (2572, 2598), True, 'import numpy as np\n'), ((369, 399), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 't.dtype'}), '(shape, dtype=t.dtype)\n', (377, 399), True, 'import numpy as np\n'), ((753, 783), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'x.dtype'}), '(shape, dtype=x.dtype)\n', (761, 783), True, 'import numpy as np\n'), ((814, 823), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (820, 823), True, 'import numpy as np\n'), ((854, 863), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (860, 863), True, 'import numpy as np\n'), ((1077, 1110), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (1085, 1110), True, 'import numpy as np\n'), ((1141, 1150), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (1147, 1150), True, 'import numpy as np\n'), ((1181, 1190), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1187, 1190), True, 'import numpy as np\n'), ((1419, 1435), 'numpy.zeros_like', 'np.zeros_like', (['T'], {}), '(T)\n', (1432, 1435), True, 'import numpy as np\n'), ((1502, 1539), 'numpy.einsum', 'np.einsum', (['"""...ba,...bc->...ac"""', 'R', 't'], {}), "('...ba,...bc->...ac', R, t)\n", (1511, 1539), True, 'import numpy as np\n'), ((1630, 1640), 'numpy.ndim', 'np.ndim', (['t'], {}), '(t)\n', (1637, 1640), True, 'import numpy as np\n'), ((1643, 1654), 'numpy.ndim', 'np.ndim', (['Ri'], {}), '(Ri)\n', (1650, 1654), True, 'import numpy as np\n'), ((1693, 1729), 'numpy.einsum', 'np.einsum', (['"""...ab,...b->...a"""', 'Ri', 't'], {}), "('...ab,...b->...a', Ri, t)\n", (1702, 1729), True, 'import numpy as np\n'), ((1779, 1817), 'numpy.einsum', 'np.einsum', (['"""...ab,...bc->...ac"""', 'Ri', 't'], {}), "('...ab,...bc->...ac', Ri, t)\n", (1788, 1817), True, 'import numpy as np\n'), ((328, 339), 'numpy.shape', 'np.shape', (['t'], {}), '(t)\n', (336, 339), True, 'import numpy as np\n'), ((714, 725), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (722, 725), True, 'import numpy as np\n'), ((1038, 1049), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1046, 1049), True, 'import numpy as np\n')] |
# coding: utf-8
# In[1]:
get_ipython().run_cell_magic('javascript', '', '<!-- Ignore this block -->\nIPython.OutputArea.prototype._should_scroll = function(lines) {\n return false;\n}')
# # Data preprocessing
# 1. convert any non-numeric values to numeric values.
# 2. If required drop out the rows with missing values or NA. In next lectures we will handle sparse data, which will allow us to use records with missing values.
# 3. Split the data into a train(80%) and test(20%) .
# In[2]:
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
from __future__ import division
import pandas as pd
import numpy as np
from math import sqrt, isnan
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
"""Set global rcParams for pyplotlib"""
plt.rcParams["figure.figsize"] = "18,25"
# ### TextEncoder
#
# Here the data is mix of numbers and text. Text value cannot be directly used and should be converted to numeric data.<br>
# For this I have created a function text encoder which accepts a pandas series. Text encoder returns a lookUp dictionary for recreating the numeric value for text value.
# In[3]:
def textEncoder(*textVectors):
lookUpDictionary = {}
lookupValue = 1
for textVector in textVectors:
for key in textVector.unique():
if key not in lookUpDictionary:
lookUpDictionary[key] = lookupValue
lookupValue +=1
return lookUpDictionary
# ### SplitDataSet Procedure
# This method splits the dataset into trainset and testset based upon the trainSetSize value. For splitting the dataset, I am using pandas.sample to split the data. This gives me trainset. For testset I am calculating complement of the trainset. This I am doing by droping the index present in training set.
# In[4]:
"""Splits the provided pandas dataframe into training and test dataset"""
def splitDataSet(inputDataframe, trainSetSize):
trainSet = inputDataframe.sample(frac=trainSetSize)
testSet = inputDataframe.drop(trainSet.index)
return trainSet,testSet
# ### generatePearsonCoefficient Procedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/f76ccfa7c2ed7f5b085115086107bbe25d329cec">
# For sample:-
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/bd1ccc2979b0fd1c1aec96e386f686ae874f9ec0">
# For selecting some features and for dropping others I am using Pearson's Coefficient. The value of Pearson's coefficient lies between [-1, 1] and tells how two features are related<br>
# <table>
# <tr><td>Strength of Association</td><td>Positive</td><td>Negative</td></tr><tr><td>Small</td><td>.1 to .3 </td><td>-0.1 to -0.3 </td></tr><tr><td>Medium</td><td>.3 to .5 </td><td>-0.3 to -0.5 </td></tr><tr><td>Large</td><td>.5 to 1.0 </td><td>-0.5 to -1.0 </td></tr></table>
#
# In[5]:
"""Generate pearson's coefficient"""
def generatePearsonCoefficient(A, B):
A = A - A.mean()
B = B - B.mean()
return ((A * B).sum())/(sqrt((A * A).sum()) * sqrt((B * B).sum()))
# ### predictLinearRegression Procedure
# This method performs predicts the value for Y given X and model parameters. This method will add bias to X.<br>
# The prediction is given by BX<sup>T</sup>
# In[6]:
"""Method to make prediction for yTest"""
def predictionLinearRegression(X, modelParameters):
X = np.insert(X, 0, 1, axis=1)
yPrediction = np.dot(modelParameters, X.T)
return yPrediction
# ### RMSE procedure
# Will calculate root mean squared error for given Ytrue values and YPrediction.
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/fc187c3557d633423444d4c80a4a50cd6ecc3dd4">
#
# In[7]:
"""Model accuracy estimator RMSE"""
def RMSE(yTrue, yPrediction):
n = yTrue.shape[0]
return sqrt((1.0) * np.sum(np.square((yTrue - yPrediction))))/n
# ### armijoStepLengthController proedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/ed6d74a5c23f9034a072125eeb316eee5faeed43">
# In[8]:
"""Uses armijo principle to detect next value of alpha.
Alpha values are rewritten. Passed to function just to maintain uniformity
"""
def armijoStepLengthController(fx, alpha, x, y, beta, gradient, delta, maxIterations = 1000):
alpha = 1.0
gradientSquare = np.dot(gradient, gradient)
for i in range(0, maxIterations):
alpha = alpha/2
residual_alpha_gradient = y - np.dot((beta - (alpha * gradient)), x .T)
fx_alpha_gradient = np.dot(residual_alpha_gradient.T, residual_alpha_gradient)
"""Convergence condition for armijo principle"""
if fx_alpha_gradient < fx - (alpha * delta * gradientSquare):
break;
return alpha
# ### boldDriverStepLengthController procedure
# An extension to armijo steplength controller. Retain alpha values.
# In[9]:
def boldDriverStepLengthController(fx, alpha, x, y, beta, gradient, maxIterations = 1000,
alphaMinus = 0.5, alphaPlus = 1.1):
alpha = alpha * alphaPlus
for i in range(0, maxIterations):
alpha = alpha * alphaMinus
residual_alpha_gradient = y - np.dot((beta - (alpha * gradient)), x .T)
fx_alpha_gradient = np.dot(residual_alpha_gradient.T, residual_alpha_gradient)
"""Convergence condition for bold driver method"""
if fx - fx_alpha_gradient > 0:
break;
return alpha
# ### linearRegressionGradientDescent procedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/26a319f33db70a80f8c5373f4348a198a202056c">
# Calculate slope at the given point(gradient) and travel in the negative direction with provided step length.<br/>
# In[10]:
"""If no step length controller is provided then values of alpha will be taken as step length.
Else the step length controller will be used. Additional parameters to the controller are
provided by stepLengthControllerParameters"""
def linearRegressionGradientDescent(x, y, xTest, yTest, alpha, beta,
maxIterations=1000, epsilon=1.1e-20,
stepLengthController = None, stepLengthControllerParameters = None):
x = np.insert(x, 0, 1, axis=1)
x = x * 1.0
y = y * 1.0
if stepLengthController != None:
print("Warning using stepLengthController alpha values will be rewritten")
plotX = []
plotY_diff = []
plotY_RMSE = []
y_prediction = np.dot(beta, x.T)
residual = y_prediction - y
f_x = np.dot(residual.T, residual)
rmse = RMSE(yTest, predictionLinearRegression(xTest, beta))
"""For plotting graph"""
plotY_RMSE.append(rmse)
plotY_diff.append(f_x)
plotX.append(0)
for i in range(1, maxIterations):
gradient = np.dot(x.T, residual) * 2
"""Use step length controller if required"""
if stepLengthController != None:
alpha = stepLengthController(fx = f_x, alpha = alpha, x = x, y = y,
beta = beta, gradient = gradient, **stepLengthControllerParameters)
beta = beta - (alpha * gradient)
y_prediction = np.dot(beta, x.T)
residual = y_prediction - y
f_x_new = np.dot(residual.T, residual)
rmse = RMSE(yTest, predictionLinearRegression(xTest, beta))
"""For plotting graph"""
plotY_RMSE.append(rmse)
plotY_diff.append(abs(f_x_new - f_x))
plotX.append(i)
if abs(f_x - f_x_new) < epsilon:
print("Converged in " + str(i) + " iterations")
return beta, plotX, plotY_diff, plotY_RMSE, f_x, rmse
f_x = f_x_new
print("Warning algorithm failed to converge in " + str(maxIterations) + " interations")
return beta, plotX, plotY_diff, plotY_RMSE, f_x, rmse
# # Gradient descent for airlines fare data
# ### Load the airlines dataset
# In[11]:
""" File path change accordingly"""
directoryPath = "data"
airFareData = pd.read_csv(directoryPath+"/airq402.dat", sep='\s+',header = None)
airFareData.head(10)
"""Adding header"""
airFareData.columns = ["city1", "city2", "avgFare", "distance", "avgWeeklyPassengers",
"marketLeadingAirline", "marketShareLA", "averageFare", "lowPriceAirline",
"marketShareLPA", "price"]
airFareData.head()
# ### Using textEncoder to convert text data to numeric data
# In[12]:
"""Using lambda functions to replace text values based upon lockup dictionary"""
cityLookupDictionary = textEncoder(airFareData.city1, airFareData.city2)
airFareData['city1'] = airFareData.city1.apply(lambda cityName:
cityLookupDictionary[cityName])
airFareData['city2'] = airFareData.city2.apply(lambda cityName:
cityLookupDictionary[cityName])
airLineLookupDictionary = textEncoder(airFareData.lowPriceAirline, airFareData.marketLeadingAirline)
airFareData['lowPriceAirline'] = airFareData.lowPriceAirline.apply(lambda cityName:
airLineLookupDictionary[cityName])
airFareData['marketLeadingAirline'] = airFareData.marketLeadingAirline.apply(lambda cityName:
airLineLookupDictionary[cityName])
# ### Check and remove missing data
# In[13]:
airFareData.dropna(inplace = True)
airFareData.head()
# ### Check for corelation between different X and Y
# In[14]:
for column in airFareData:
if column != "price":
print("The corelation between " + column +" vs price is " +
str(generatePearsonCoefficient(airFareData[column], airFareData['price'])))
# ### Visualizing the data
# In[15]:
plt.close()
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6), (ax7, ax8), (ax9, ax10)) = plt.subplots(5,2,sharey='none')
ax1.plot(airFareData.city1, airFareData.price, "ro")
ax1.grid()
ax1.set_title("city1 vs price")
ax1.set_xlabel("city1")
ax1.set_ylabel("price")
ax2.plot(airFareData.city2, airFareData.price, "ro")
ax2.grid()
ax2.set_title("city2 vs price")
ax2.set_xlabel("city2")
ax2.set_ylabel("price")
ax3.plot(airFareData.avgFare, airFareData.price, "ro")
ax3.grid()
ax3.set_title("avgFare vs price")
ax3.set_xlabel("avgFare")
ax3.set_ylabel("price")
ax4.plot(airFareData.distance, airFareData.price, "ro")
ax4.grid()
ax4.set_title("distance vs price")
ax4.set_xlabel("distance")
ax4.set_ylabel("price")
ax5.plot(airFareData.avgWeeklyPassengers, airFareData.price, "ro")
ax5.grid()
ax5.set_title("avgWeeklyPassengers vs price")
ax5.set_xlabel("avgWeeklyPassengers")
ax5.set_ylabel("price")
ax6.plot(airFareData.marketLeadingAirline, airFareData.price, "ro")
ax6.grid()
ax6.set_title("marketLeadingAirline vs price")
ax6.set_xlabel("marketLeadingAirline")
ax6.set_ylabel("price")
ax7.plot(airFareData.marketShareLA, airFareData.price, "ro")
ax7.grid()
ax7.set_title("marketShareLA vs price")
ax7.set_xlabel("marketShareLA")
ax7.set_ylabel("price")
ax8.plot(airFareData.averageFare, airFareData.price, "ro")
ax8.grid()
ax8.set_title("averageFare vs price")
ax8.set_xlabel("averageFare")
ax8.set_ylabel("price")
ax9.plot(airFareData.lowPriceAirline, airFareData.price, "ro")
ax9.grid()
ax9.set_title("lowPriceAirline vs price")
ax9.set_xlabel("lowPriceAirline")
ax9.set_ylabel("price")
ax10.plot(airFareData.marketShareLPA, airFareData.price, "ro")
ax10.grid()
ax10.set_title("marketShareLPA vs price")
ax10.set_xlabel("marketShareLPA")
ax10.set_ylabel("price")
plt.show()
# By looking at pearson's coefficient we can drop city1, city2, marketLeadingAirline, lowPriceAirline as they do not have any corelation with price.
# ### Selecting the required features and splitting the dataset using splitDataSetProcedure
# In[16]:
airFareData = airFareData[['avgFare', 'distance', 'avgWeeklyPassengers', 'marketShareLA',
'averageFare', 'marketShareLPA', 'price']]
airFareData.head()
# In[17]:
trainSet, testSet = splitDataSet(airFareData, 0.8)
print(trainSet.shape)
print(testSet.shape)
# In[18]:
trainSet.head()
# ### Running gradient descent with alpha parameter grid serach
# In[19]:
"""Setting beta constant as future comparasion will be easy"""
np.random.seed(8)
inputBeta = np.random.random_sample(7)
alpha_parameterGrid = [0.1, 1.7e-9, 1.17e-11]
X_train = trainSet.as_matrix(columns = ['avgFare', 'distance', 'avgWeeklyPassengers', 'marketShareLA',
'averageFare', 'marketShareLPA'])
X_test = testSet.as_matrix(columns = ['avgFare', 'distance', 'avgWeeklyPassengers', 'marketShareLA',
'averageFare', 'marketShareLPA'])
Y_train = trainSet['price']
Y_test = testSet['price']
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3,2,sharey='none')
axis = ((ax1, ax2), (ax3, ax4), (ax5, ax6))
index = 0
bestModelParameters = None
bestModelX = None
bestModelY = None
leastRMSE = None
leastRSS = None
for alpha in alpha_parameterGrid:
"""No step length controller provided so normal gradient descent will be executed"""
modelParameters, X, Ydiff, Yrmse, rss, rmse = linearRegressionGradientDescent(X_train, Y_train, X_test, Y_test,
alpha, inputBeta,
maxIterations = 1000)
"""Selecting the best model with least RMSE"""
if not(isnan(rmse)):
if leastRMSE is None or leastRMSE > rmse:
leastRMSE = rmse
bestModelParameters = modelParameters
leastRSS = rss
bestModelX = X
bestModelY = Yrmse
print("RMSE "+ str(rmse))
axis[index][0].plot(X, Ydiff)
axis[index][0].grid()
axis[index][0].set_title("Iteration vs abs(fx+1 - fx), alpha = " + str(alpha))
axis[index][0].set_xlabel("Iterations")
axis[index][0].set_ylabel("abs(fx+1 - fx)")
axis[index][1].plot(X, Yrmse)
axis[index][1].grid()
axis[index][1].set_title("Iteration vs RMSE, alpha = " + str(alpha))
axis[index][1].set_xlabel("Iterations")
axis[index][1].set_ylabel("RMSE")
index = index + 1
plt.show()
plt.close()
# ### Graph description
# <ul><li><b>Alpha = 0.1</b>
# <br>Here the alpha value is very big. Because of this instead of converging we are diverging away. Both abs(fx+1 - fx) and RMSE appear to be diverging.
# </li><li><b>Alpha = 1.7e-9</b><br>Here also the alpha value is too big. The observed effect is still the same</li><li><b>Alpha = 1.17e-11</b><br>Now alpha value is small enough for algorithm to converge. RMSE is also converging</li>
# ### Best model
# In[20]:
print("Best rmse for alpha grid is "+ str(leastRMSE))
print("Best rss for alpha grid is "+ str(leastRSS))
# ### Some sample predictions
# In[21]:
yPrediction = predictionLinearRegression(X_test,bestModelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Armijo Step Length Controller
# In[22]:
plt.close()
figure, ((ax1, ax2)) = plt.subplots(1, 2, sharey='none')
"""Set steplengthController to armijoStepLengthController and
stepLengthControllerParameters as any additional model parameters"""
modelParameters, xArmijo, ydiffArmijo, yRMSEArmijo, rss, rmse = linearRegressionGradientDescent(X_train, Y_train,
X_test, Y_test,
None, inputBeta, maxIterations = 1000,
stepLengthController = armijoStepLengthController,
stepLengthControllerParameters = {"delta":0.2})
figure.set_figheight(8)
print("RMSE "+ str(rmse))
ax1.plot(xArmijo, ydiffArmijo)
ax1.grid()
ax1.set_title("Iteration vs abs(fx+1 - fx)")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("(fx+1 - fx)")
ax2.plot(xArmijo, yRMSEArmijo)
ax2.grid()
ax2.set_title("Iteration vs RMSE")
ax2.set_xlabel("Iterations")
ax2.set_ylabel("RMSE")
plt.show()
# ### Some sample predictions
# In[23]:
yPrediction = predictionLinearRegression(X_test,modelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Bold Driver Step Length Controller
# In[24]:
figure, ((ax1, ax2)) = plt.subplots(1, 2, sharey='none')
"""Set steplengthController to boldDriverStepLengthController and
stepLengthControllerParameters as any additional model parameters"""
modelParameters, xBold, yDiffBold, yRMSEBold, rss, rmse = linearRegressionGradientDescent(X_train, Y_train,
X_test, Y_test,
1.0, inputBeta, maxIterations = 1000,
stepLengthController = boldDriverStepLengthController,
stepLengthControllerParameters =
{"alphaMinus" : 0.9, "alphaPlus" : 1.5})
figure.set_figheight(8)
print("RMSE "+ str(rmse))
ax1.plot(xBold, yDiffBold)
ax1.grid()
ax1.set_title("Iteration vs abs(fx+1 - fx)")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("(fx+1 - fx)")
ax2.plot(xBold, yRMSEBold)
ax2.grid()
ax2.set_title("Iteration vs RMSE")
ax2.set_xlabel("Iterations")
ax2.set_ylabel("RMSE")
plt.show()
# ### Sample predictions
# In[25]:
yPrediction = predictionLinearRegression(X_test,modelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Comparasion
# In[26]:
plt.close()
plt.figure(figsize=(9,7))
plt.plot(bestModelX, bestModelY, label = "Gradient Descent")
plt.plot(xArmijo, yRMSEArmijo, label = "Gradient Descent with Armijo step length controller")
plt.plot(xBold, yRMSEBold, label = "Gradient Descent with Bold driver length controller")
plt.grid()
plt.xlabel("Iteration")
plt.ylabel("RMSE")
plt.title("Comparasion of constant steplength and variable steplength with controller")
plt.legend()
plt.show()
# # Gradient descent for wine data
# ## Load data
# I am combining both red wine and white wine data in a single dataframe
# In[27]:
"""Load redwine data and add a new feature type
type = 0 => RedWine
type = 1 => WhiteWine """
tmpFrame = pd.read_csv(directoryPath+"/winequality-red.csv", sep=";")
tmpFrame['type'] = 0
wineData = tmpFrame
tmpFrame = pd.read_csv(directoryPath+"/winequality-white.csv", sep=";")
tmpFrame['type'] = 1
wineData = pd.concat([wineData, tmpFrame])
wineData.head()
# ## All data is numeric. Checking for NA data
# In[28]:
wineData.dropna(inplace = True)
wineData.head()
# ### Check for corelation between different X and Y
# #### For red wine
# In[29]:
redWine = wineData.loc[wineData['type'] == 0]
for column in redWine:
if column != "quality":
print("The corelation between " + column +" vs quality is " +
str(generatePearsonCoefficient(redWine[column], redWine['quality'])))
# #### For white wine
# In[30]:
whiteWine = wineData.loc[wineData['type'] == 1]
for column in whiteWine:
if column != "quality":
print("The corelation between " + column +" vs quality is " +
str(generatePearsonCoefficient(whiteWine[column], whiteWine['quality'])))
# #### Combined
# In[31]:
for column in wineData:
if column != "quality":
print("The corelation between " + column +" vs quality is " +
str(generatePearsonCoefficient(wineData[column], wineData['quality'])))
# ### Visualizing the data
# In[32]:
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6), (ax7, ax8), (ax9, ax10), (ax11, ax12)) = plt.subplots(6,2,
sharey='none')
figure.tight_layout()
figure.set_figheight(40)
ax1.plot(wineData['fixed acidity'], wineData.quality, "ro")
ax1.grid()
ax1.set_title("fixed acidity vs quality")
ax1.set_xlabel("fixed acidity")
ax1.set_ylabel("quality")
ax2.plot(wineData['volatile acidity'], wineData.quality, "ro")
ax2.grid()
ax2.set_title("volatile acidity vs quality")
ax2.set_xlabel("volatile acidity")
ax2.set_ylabel("quality")
ax3.plot(wineData['citric acid'], wineData.quality, "ro")
ax3.grid()
ax3.set_title("citric acid vs quality")
ax3.set_xlabel("citric acid")
ax3.set_ylabel("quality")
ax4.plot(wineData['residual sugar'], wineData.quality, "ro")
ax4.grid()
ax4.set_title("residual sugar vs quality")
ax4.set_xlabel("residual sugar")
ax4.set_ylabel("quality")
ax5.plot(wineData['chlorides'], wineData.quality, "ro")
ax5.grid()
ax5.set_title("chlorides vs quality")
ax5.set_xlabel("chlorides")
ax5.set_ylabel("quality")
ax6.plot(wineData['free sulfur dioxide'], wineData.quality, "ro")
ax6.grid()
ax6.set_title("free sulfur dioxide vs quality")
ax6.set_xlabel("free sulfur dioxide")
ax6.set_ylabel("quality")
ax7.plot(wineData['total sulfur dioxide'], wineData.quality, "ro")
ax7.grid()
ax7.set_title("total sulfur dioxide vs quality")
ax7.set_xlabel("total sulfur dioxide")
ax7.set_ylabel("quality")
ax8.plot(wineData['density'], wineData.quality, "ro")
ax8.grid()
ax8.set_title("density vs quality")
ax8.set_xlabel("density")
ax8.set_ylabel("quality")
ax9.plot(wineData['pH'], wineData.quality, "ro")
ax9.grid()
ax9.set_title("pH vs quality")
ax9.set_xlabel("pH")
ax9.set_ylabel("quality")
ax10.plot(wineData['sulphates'], wineData.quality, "ro")
ax10.grid()
ax10.set_title("sulphates vs quality")
ax10.set_xlabel("sulphates")
ax10.set_ylabel("quality")
ax11.plot(wineData['alcohol'], wineData.quality, "ro")
ax11.grid()
ax11.set_title("alcohol vs quality")
ax11.set_xlabel("alcohol")
ax11.set_ylabel("quality")
ax12.plot(wineData['type'], wineData.quality, "ro")
ax12.grid()
ax12.set_title("type vs quality")
ax12.set_xlabel("type")
ax12.set_ylabel("quality")
plt.show()
# Selected features are volatile acidity, chlorides, density, alcohol and type
# ### Split data into trainSet and testSet
# In[33]:
trainSet, testSet = splitDataSet(wineData, 0.8)
print(trainSet.shape)
print(testSet.shape)
# ### Gradient descent no step length controller
# In[34]:
np.random.seed(8)
inputBeta = np.random.random_sample(6)
alpha_parameterGrid = [0.1, 0.007, 1.34e-7]
X_train = trainSet.as_matrix(columns = ['volatile acidity', 'chlorides', 'density', 'alcohol','type'])
X_test = testSet.as_matrix(columns = ['volatile acidity', 'chlorides', 'density', 'alcohol','type'])
Y_train = trainSet['quality']
Y_test = testSet['quality']
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3,2,sharey='none')
axis = ((ax1, ax2), (ax3, ax4), (ax5, ax6))
index = 0
bestModelParameters = None
bestModelX = None
bestModelY = None
leastRMSE = None
leastRSS = None
for alpha in alpha_parameterGrid:
modelParameters, X, Ydiff, Yrmse, rss, rmse = linearRegressionGradientDescent(X_train, Y_train, X_test, Y_test,
alpha, inputBeta,
maxIterations = 1000)
if not(isnan(rmse)):
if leastRMSE is None or leastRMSE > rmse:
leastRMSE = rmse
bestModelParameters = modelParameters
leastRSS = rss
bestModelX = X
bestModelY = Yrmse
print("RMSE "+ str(rmse))
axis[index][0].plot(X, Ydiff)
axis[index][0].grid()
axis[index][0].set_title("Iteration vs abs(fx+1 - fx), alpha = " + str(alpha))
axis[index][0].set_xlabel("Iterations")
axis[index][0].set_ylabel("abs(fx+1 - fx)")
axis[index][1].plot(X, Yrmse)
axis[index][1].grid()
axis[index][1].set_title("Iteration vs RMSE, alpha = " + str(alpha))
axis[index][0].set_xlabel("Iterations")
axis[index][0].set_ylabel("RMSE")
index = index + 1
plt.show()
plt.close()
# ### Graph description
# <ul><li><b>Alpha = 0.1</b>
# <br>Here the alpha value is very big. Because of this instead of converging we are diverging away. Both abs(fx+1 - fx) and RMSE appear to be diverging.
# </li><li><b>Alpha = 0.007</b><br>Here also the alpha value is too big. The observed effect is still the same</li><li><b>Alpha = 1.34e-7</b>Now alpha value is small enough for algorithm to converge. RMSE is also converging</li>
# ### Best model
# In[35]:
print("Best rmse for alpha grid is "+ str(leastRMSE))
print("Best rss for alpha grid is "+ str(leastRSS))
# ### Sample Predictions
# In[36]:
yPrediction = predictionLinearRegression(X_test,bestModelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Armijo Step Length Controller
# In[37]:
figure, ((ax1, ax2)) = plt.subplots(1, 2, sharey='none')
modelParameters, xArmijo, ydiffArmijo, yRMSEArmijo, rss, rmse = linearRegressionGradientDescent(X_train, Y_train,
X_test, Y_test,
alpha, inputBeta,
maxIterations = 1000,
stepLengthController = armijoStepLengthController,
stepLengthControllerParameters = {"delta" : 0.2})
figure.set_figheight(8)
print("RMSE "+ str(rmse))
ax1.plot(xArmijo, ydiffArmijo)
ax1.grid()
ax1.set_title("Iteration vs abs(fx+1 - fx)")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("(fx+1 - fx)")
ax2.plot(xArmijo, yRMSEArmijo)
ax2.grid()
ax2.set_title("Iteration vs RMSE")
ax2.set_xlabel("Iterations")
ax2.set_ylabel("RMSE")
plt.show()
# ### Sample predictions
# In[38]:
yPrediction = predictionLinearRegression(X_test,modelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Bold Driver Step Length Controller
# In[39]:
figure, ((ax1, ax2)) = plt.subplots(1, 2, sharey='none')
modelParameters, xBold, yDiffBold, yRMSEBold, rss, rmse = linearRegressionGradientDescent(X_train, Y_train,
X_test, Y_test,
1.0, inputBeta, maxIterations = 1000,
stepLengthController = boldDriverStepLengthController,
stepLengthControllerParameters =
{"alphaMinus" : 0.9, "alphaPlus" : 1.5})
figure.set_figheight(8)
print("RMSE "+ str(rmse))
ax1.plot(X, Ydiff)
ax1.grid()
ax1.set_title("Iteration vs abs(fx+1 - fx)")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("(fx+1 - fx)")
ax2.plot(X, Yrmse)
ax2.grid()
ax2.set_title("Iteration vs RMSE")
ax2.set_xlabel("Iterations")
ax2.set_ylabel("RMSE")
plt.show()
# ### Sample predictions
# In[40]:
yPrediction = predictionLinearRegression(X_test,modelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Comparasion
# In[41]:
plt.close()
plt.figure(figsize=(9,7))
plt.plot(bestModelX, bestModelY, label = "Gradient Descent")
plt.plot(xArmijo, yRMSEArmijo, label = "Gradient Descent with Armijo step length controller")
plt.plot(xBold, yRMSEBold, label = "Gradient Descent with Bold driver length controller")
plt.grid()
plt.xlabel("Iteration")
plt.ylabel("RMSE")
plt.title("Comparasion of constant steplength and variable steplength with controller")
plt.legend()
plt.show()
| [
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.dot",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.random.random_sample",
"numpy.square",
"matplotlib.pyplot.title",
"warnings.f... | [((734, 767), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (757, 767), False, 'import warnings\n'), ((8116, 8184), 'pandas.read_csv', 'pd.read_csv', (["(directoryPath + '/airq402.dat')"], {'sep': '"""\\\\s+"""', 'header': 'None'}), "(directoryPath + '/airq402.dat', sep='\\\\s+', header=None)\n", (8127, 8184), True, 'import pandas as pd\n'), ((9922, 9933), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9931, 9933), True, 'import matplotlib.pyplot as plt\n'), ((10007, 10040), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(2)'], {'sharey': '"""none"""'}), "(5, 2, sharey='none')\n", (10019, 10040), True, 'import matplotlib.pyplot as plt\n'), ((11696, 11706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11704, 11706), True, 'import matplotlib.pyplot as plt\n'), ((12426, 12443), 'numpy.random.seed', 'np.random.seed', (['(8)'], {}), '(8)\n', (12440, 12443), True, 'import numpy as np\n'), ((12456, 12482), 'numpy.random.random_sample', 'np.random.random_sample', (['(7)'], {}), '(7)\n', (12479, 12482), True, 'import numpy as np\n'), ((12987, 13020), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'sharey': '"""none"""'}), "(3, 2, sharey='none')\n", (12999, 13020), True, 'import matplotlib.pyplot as plt\n'), ((14429, 14439), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14437, 14439), True, 'import matplotlib.pyplot as plt\n'), ((14440, 14451), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14449, 14451), True, 'import matplotlib.pyplot as plt\n'), ((15152, 15211), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': Y_test, 'Prediction': yPrediction}"], {}), "({'Actual': Y_test, 'Prediction': yPrediction})\n", (15164, 15211), True, 'import pandas as pd\n'), ((15273, 15284), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15282, 15284), True, 'import matplotlib.pyplot as plt\n'), ((15308, 15341), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '"""none"""'}), "(1, 2, sharey='none')\n", (15320, 15341), True, 'import matplotlib.pyplot as plt\n'), ((16385, 16395), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16393, 16395), True, 'import matplotlib.pyplot as plt\n'), ((16511, 16570), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': Y_test, 'Prediction': yPrediction}"], {}), "({'Actual': Y_test, 'Prediction': yPrediction})\n", (16523, 16570), True, 'import pandas as pd\n'), ((16660, 16693), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '"""none"""'}), "(1, 2, sharey='none')\n", (16672, 16693), True, 'import matplotlib.pyplot as plt\n'), ((17724, 17734), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17732, 17734), True, 'import matplotlib.pyplot as plt\n'), ((17845, 17904), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': Y_test, 'Prediction': yPrediction}"], {}), "({'Actual': Y_test, 'Prediction': yPrediction})\n", (17857, 17904), True, 'import pandas as pd\n'), ((17948, 17959), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17957, 17959), True, 'import matplotlib.pyplot as plt\n'), ((17960, 17986), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (17970, 17986), True, 'import matplotlib.pyplot as plt\n'), ((17986, 18044), 'matplotlib.pyplot.plot', 'plt.plot', (['bestModelX', 'bestModelY'], {'label': '"""Gradient Descent"""'}), "(bestModelX, bestModelY, label='Gradient Descent')\n", (17994, 18044), True, 'import matplotlib.pyplot as plt\n'), ((18047, 18143), 'matplotlib.pyplot.plot', 'plt.plot', (['xArmijo', 'yRMSEArmijo'], {'label': '"""Gradient Descent with Armijo step length controller"""'}), "(xArmijo, yRMSEArmijo, label=\n 'Gradient Descent with Armijo step length controller')\n", (18055, 18143), True, 'import matplotlib.pyplot as plt\n'), ((18141, 18233), 'matplotlib.pyplot.plot', 'plt.plot', (['xBold', 'yRMSEBold'], {'label': '"""Gradient Descent with Bold driver length controller"""'}), "(xBold, yRMSEBold, label=\n 'Gradient Descent with Bold driver length controller')\n", (18149, 18233), True, 'import matplotlib.pyplot as plt\n'), ((18232, 18242), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (18240, 18242), True, 'import matplotlib.pyplot as plt\n'), ((18243, 18266), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (18253, 18266), True, 'import matplotlib.pyplot as plt\n'), ((18267, 18285), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE"""'], {}), "('RMSE')\n", (18277, 18285), True, 'import matplotlib.pyplot as plt\n'), ((18286, 18383), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparasion of constant steplength and variable steplength with controller"""'], {}), "(\n 'Comparasion of constant steplength and variable steplength with controller'\n )\n", (18295, 18383), True, 'import matplotlib.pyplot as plt\n'), ((18374, 18386), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18384, 18386), True, 'import matplotlib.pyplot as plt\n'), ((18387, 18397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18395, 18397), True, 'import matplotlib.pyplot as plt\n'), ((18643, 18703), 'pandas.read_csv', 'pd.read_csv', (["(directoryPath + '/winequality-red.csv')"], {'sep': '""";"""'}), "(directoryPath + '/winequality-red.csv', sep=';')\n", (18654, 18703), True, 'import pandas as pd\n'), ((18756, 18818), 'pandas.read_csv', 'pd.read_csv', (["(directoryPath + '/winequality-white.csv')"], {'sep': '""";"""'}), "(directoryPath + '/winequality-white.csv', sep=';')\n", (18767, 18818), True, 'import pandas as pd\n'), ((18849, 18880), 'pandas.concat', 'pd.concat', (['[wineData, tmpFrame]'], {}), '([wineData, tmpFrame])\n', (18858, 18880), True, 'import pandas as pd\n'), ((20013, 20046), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(6)', '(2)'], {'sharey': '"""none"""'}), "(6, 2, sharey='none')\n", (20025, 20046), True, 'import matplotlib.pyplot as plt\n'), ((22198, 22208), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22206, 22208), True, 'import matplotlib.pyplot as plt\n'), ((22503, 22520), 'numpy.random.seed', 'np.random.seed', (['(8)'], {}), '(8)\n', (22517, 22520), True, 'import numpy as np\n'), ((22533, 22559), 'numpy.random.random_sample', 'np.random.random_sample', (['(6)'], {}), '(6)\n', (22556, 22559), True, 'import numpy as np\n'), ((22916, 22949), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'sharey': '"""none"""'}), "(3, 2, sharey='none')\n", (22928, 22949), True, 'import matplotlib.pyplot as plt\n'), ((24218, 24228), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24226, 24228), True, 'import matplotlib.pyplot as plt\n'), ((24229, 24240), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24238, 24240), True, 'import matplotlib.pyplot as plt\n'), ((24930, 24989), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': Y_test, 'Prediction': yPrediction}"], {}), "({'Actual': Y_test, 'Prediction': yPrediction})\n", (24942, 24989), True, 'import pandas as pd\n'), ((25074, 25107), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '"""none"""'}), "(1, 2, sharey='none')\n", (25086, 25107), True, 'import matplotlib.pyplot as plt\n'), ((26035, 26045), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26043, 26045), True, 'import matplotlib.pyplot as plt\n'), ((26156, 26215), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': Y_test, 'Prediction': yPrediction}"], {}), "({'Actual': Y_test, 'Prediction': yPrediction})\n", (26168, 26215), True, 'import pandas as pd\n'), ((26305, 26338), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '"""none"""'}), "(1, 2, sharey='none')\n", (26317, 26338), True, 'import matplotlib.pyplot as plt\n'), ((27220, 27230), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27228, 27230), True, 'import matplotlib.pyplot as plt\n'), ((27341, 27400), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': Y_test, 'Prediction': yPrediction}"], {}), "({'Actual': Y_test, 'Prediction': yPrediction})\n", (27353, 27400), True, 'import pandas as pd\n'), ((27444, 27455), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (27453, 27455), True, 'import matplotlib.pyplot as plt\n'), ((27456, 27482), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (27466, 27482), True, 'import matplotlib.pyplot as plt\n'), ((27482, 27540), 'matplotlib.pyplot.plot', 'plt.plot', (['bestModelX', 'bestModelY'], {'label': '"""Gradient Descent"""'}), "(bestModelX, bestModelY, label='Gradient Descent')\n", (27490, 27540), True, 'import matplotlib.pyplot as plt\n'), ((27543, 27639), 'matplotlib.pyplot.plot', 'plt.plot', (['xArmijo', 'yRMSEArmijo'], {'label': '"""Gradient Descent with Armijo step length controller"""'}), "(xArmijo, yRMSEArmijo, label=\n 'Gradient Descent with Armijo step length controller')\n", (27551, 27639), True, 'import matplotlib.pyplot as plt\n'), ((27637, 27729), 'matplotlib.pyplot.plot', 'plt.plot', (['xBold', 'yRMSEBold'], {'label': '"""Gradient Descent with Bold driver length controller"""'}), "(xBold, yRMSEBold, label=\n 'Gradient Descent with Bold driver length controller')\n", (27645, 27729), True, 'import matplotlib.pyplot as plt\n'), ((27728, 27738), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (27736, 27738), True, 'import matplotlib.pyplot as plt\n'), ((27739, 27762), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (27749, 27762), True, 'import matplotlib.pyplot as plt\n'), ((27763, 27781), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSE"""'], {}), "('RMSE')\n", (27773, 27781), True, 'import matplotlib.pyplot as plt\n'), ((27782, 27879), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparasion of constant steplength and variable steplength with controller"""'], {}), "(\n 'Comparasion of constant steplength and variable steplength with controller'\n )\n", (27791, 27879), True, 'import matplotlib.pyplot as plt\n'), ((27870, 27882), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (27880, 27882), True, 'import matplotlib.pyplot as plt\n'), ((27883, 27893), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27891, 27893), True, 'import matplotlib.pyplot as plt\n'), ((3404, 3430), 'numpy.insert', 'np.insert', (['X', '(0)', '(1)'], {'axis': '(1)'}), '(X, 0, 1, axis=1)\n', (3413, 3430), True, 'import numpy as np\n'), ((3449, 3477), 'numpy.dot', 'np.dot', (['modelParameters', 'X.T'], {}), '(modelParameters, X.T)\n', (3455, 3477), True, 'import numpy as np\n'), ((4330, 4356), 'numpy.dot', 'np.dot', (['gradient', 'gradient'], {}), '(gradient, gradient)\n', (4336, 4356), True, 'import numpy as np\n'), ((6279, 6305), 'numpy.insert', 'np.insert', (['x', '(0)', '(1)'], {'axis': '(1)'}), '(x, 0, 1, axis=1)\n', (6288, 6305), True, 'import numpy as np\n'), ((6551, 6568), 'numpy.dot', 'np.dot', (['beta', 'x.T'], {}), '(beta, x.T)\n', (6557, 6568), True, 'import numpy as np\n'), ((6611, 6639), 'numpy.dot', 'np.dot', (['residual.T', 'residual'], {}), '(residual.T, residual)\n', (6617, 6639), True, 'import numpy as np\n'), ((4546, 4604), 'numpy.dot', 'np.dot', (['residual_alpha_gradient.T', 'residual_alpha_gradient'], {}), '(residual_alpha_gradient.T, residual_alpha_gradient)\n', (4552, 4604), True, 'import numpy as np\n'), ((5299, 5357), 'numpy.dot', 'np.dot', (['residual_alpha_gradient.T', 'residual_alpha_gradient'], {}), '(residual_alpha_gradient.T, residual_alpha_gradient)\n', (5305, 5357), True, 'import numpy as np\n'), ((7270, 7287), 'numpy.dot', 'np.dot', (['beta', 'x.T'], {}), '(beta, x.T)\n', (7276, 7287), True, 'import numpy as np\n'), ((7351, 7379), 'numpy.dot', 'np.dot', (['residual.T', 'residual'], {}), '(residual.T, residual)\n', (7357, 7379), True, 'import numpy as np\n'), ((13666, 13677), 'math.isnan', 'isnan', (['rmse'], {}), '(rmse)\n', (13671, 13677), False, 'from math import sqrt, isnan\n'), ((23455, 23466), 'math.isnan', 'isnan', (['rmse'], {}), '(rmse)\n', (23460, 23466), False, 'from math import sqrt, isnan\n'), ((4476, 4512), 'numpy.dot', 'np.dot', (['(beta - alpha * gradient)', 'x.T'], {}), '(beta - alpha * gradient, x.T)\n', (4482, 4512), True, 'import numpy as np\n'), ((5229, 5265), 'numpy.dot', 'np.dot', (['(beta - alpha * gradient)', 'x.T'], {}), '(beta - alpha * gradient, x.T)\n', (5235, 5265), True, 'import numpy as np\n'), ((6885, 6906), 'numpy.dot', 'np.dot', (['x.T', 'residual'], {}), '(x.T, residual)\n', (6891, 6906), True, 'import numpy as np\n'), ((3853, 3883), 'numpy.square', 'np.square', (['(yTrue - yPrediction)'], {}), '(yTrue - yPrediction)\n', (3862, 3883), True, 'import numpy as np\n')] |
from __future__ import print_function
from measurement.base import MeasureBase, BidimensionalMeasure
from measurement.utils import get_all_measures
for measure in get_all_measures():
classname = measure.__name__
print(classname)
print('-' * len(classname))
print()
if issubclass(measure, MeasureBase):
units = measure.get_units()
aliases = measure.get_aliases()
print(
'* *Acceptable as Arguments or Attributes*: %s' % (
', '.join(sorted(['``%s``' % unit for unit in units]))
)
)
print(
'* *Acceptable as Arguments*: %s' % (
', '.join(sorted(['``%s``' % alias for alias in aliases]))
)
)
elif issubclass(measure, BidimensionalMeasure):
print(".. note::")
print(" This is a bi-dimensional measurement; bi-dimensional")
print(" measures are created by finding an appropriate unit in the")
print(" measure's primary measurement class, and an appropriate")
print(" in the measure's reference class, and using them as a")
print(" double-underscore-separated keyword argument (or, if")
print(" converting to another unit, as an attribute).")
print()
print(" For example, to create an object representing 24 miles-per")
print(" hour::")
print()
print(" >>> from measurement.measure import Speed")
print(" >>> my_speed = Speed(mile__hour=24)")
print(" >>> print my_speed")
print(" 24.0 mi/hr")
print(" >>> print my_speed.km__hr")
print(" 38.624256")
print()
print(
"* *Primary Measurement*: %s" % (
measure.PRIMARY_DIMENSION.__name__
)
)
print(
"* *Reference Measurement*: %s" % (
measure.REFERENCE_DIMENSION.__name__
)
)
print()
| [
"measurement.utils.get_all_measures"
] | [((165, 183), 'measurement.utils.get_all_measures', 'get_all_measures', ([], {}), '()\n', (181, 183), False, 'from measurement.utils import get_all_measures\n')] |
#coding=utf-8
import tensorflow as tf
import tfop
contrib_image = tf.contrib.image
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
dtype = image1.dtype
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), dtype)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, -1]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
if image is None:
return None
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
if image is None:
return None
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def _clip_bbox(min_y, min_x, max_y, max_x):
"""Clip bounding box coordinates between 0 and 1.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
Returns:
Clipped coordinate values between 0 and 1.
"""
min_y = tf.clip_by_value(min_y, 0.0, 1.0)
min_x = tf.clip_by_value(min_x, 0.0, 1.0)
max_y = tf.clip_by_value(max_y, 0.0, 1.0)
max_x = tf.clip_by_value(max_x, 0.0, 1.0)
return min_y, min_x, max_y, max_x
def _check_bbox_area(min_y, min_x, max_y, max_x, delta=0.05):
"""Adjusts bbox coordinates to make sure the area is > 0.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
delta: Float, this is used to create a gap of size 2 * delta between
bbox min/max coordinates that are the same on the boundary.
This prevents the bbox from having an area of zero.
Returns:
Tuple of new bbox coordinates between 0 and 1 that will now have a
guaranteed area > 0.
"""
height = max_y - min_y
width = max_x - min_x
def _adjust_bbox_boundaries(min_coord, max_coord):
# Make sure max is never 0 and min is never 1.
max_coord = tf.maximum(max_coord, 0.0 + delta)
min_coord = tf.minimum(min_coord, 1.0 - delta)
return min_coord, max_coord
min_y, max_y = tf.cond(tf.equal(height, 0.0),
lambda: _adjust_bbox_boundaries(min_y, max_y),
lambda: (min_y, max_y))
min_x, max_x = tf.cond(tf.equal(width, 0.0),
lambda: _adjust_bbox_boundaries(min_x, max_x),
lambda: (min_x, max_x))
return min_y, min_x, max_y, max_x
def _shift_bbox(bbox, image_height, image_width, pixels, shift_horizontal):
"""Shifts the bbox coordinates by pixels.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, width of the image.
pixels: An int. How many pixels to shift the bbox.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
pixels = tf.to_int32(pixels)
# Convert bbox to integer pixel locations.
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
if shift_horizontal:
min_x = tf.maximum(0, min_x - pixels)
max_x = tf.minimum(image_width, max_x - pixels)
else:
min_y = tf.maximum(0, min_y - pixels)
max_y = tf.minimum(image_height, max_y - pixels)
# Convert bbox back to floats.
min_y = tf.to_float(min_y) / tf.to_float(image_height)
min_x = tf.to_float(min_x) / tf.to_float(image_width)
max_y = tf.to_float(max_y) / tf.to_float(image_height)
max_x = tf.to_float(max_x) / tf.to_float(image_width)
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def translate_bbox(image, mask,bboxes, pixels, replace, shift_horizontal):
"""Equivalent of PIL Translate in X/Y dimension that shifts image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pixels: An int. How many pixels to shift the image and bboxes
replace: A one or three value 1D tensor to fill empty pixels.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of translating
image by pixels. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the shifted image.
"""
if shift_horizontal:
image = translate_x(image, pixels, replace)
mask = translate_x(mask, pixels, 0)
else:
image = translate_y(image, pixels, replace)
mask = translate_y(mask, pixels, 0)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shift_bbox = lambda bbox: _shift_bbox(
bbox, image_height, image_width, pixels, shift_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shift_bbox, bboxes)
return image, mask,bboxes
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
dtype = image.dtype
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, dtype)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
if replace is not None:
image = contrib_image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
else:
image = contrib_image.transform(
image, [1., level, 0., 0., 1., 0., 0., 0.])
return image
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
if replace is not None:
image = contrib_image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
else:
image = contrib_image.transform(
image, [1., 0., 0., level, 1., 0., 0., 0.])
return image
def _shear_bbox(bbox, image_height, image_width, level, shear_horizontal):
"""Shifts the bbox according to how the image was sheared.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x), absolute coordinate
image_height: Int, height of the image.
image_width: Int, height of the image.
level: Float. How much to shear the image.
shear_horizontal: If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Change bbox coordinates to be pixels.
min_y = bbox[0]
min_x = bbox[1]
max_y = bbox[2]
max_x = bbox[3]
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = coordinates
# Shear the coordinates according to the translation matrix.
if shear_horizontal:
translation_matrix = tf.stack(
[[1, 0], [-level, 1]])
else:
translation_matrix = tf.stack(
[[1, -level], [0, 1]])
translation_matrix = tf.cast(translation_matrix, tf.float32)
new_coords = tf.cast(
tf.matmul(translation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to floats.
min_y = tf.to_float(tf.reduce_min(new_coords[0, :]))
min_x = tf.to_float(tf.reduce_min(new_coords[1, :]))
max_y = tf.to_float(tf.reduce_max(new_coords[0, :]))
max_x = tf.to_float(tf.reduce_max(new_coords[1, :]))
# Clip the bboxes to be sure the fall between [0, 1].
min_y = tf.clip_by_value(min_y,clip_value_min=0,clip_value_max=image_height-1)
max_y = tf.clip_by_value(max_y,clip_value_min=0,clip_value_max=image_height-1)
min_x = tf.clip_by_value(min_x,clip_value_min=0,clip_value_max=image_width-1)
max_x = tf.clip_by_value(max_x,clip_value_min=0,clip_value_max=image_width-1)
return tf.stack([min_y, min_x, max_y, max_x])
def shear_with_bboxes(image, bboxes, mask,level, replace, shear_horizontal):
"""Applies Shear Transformation to the image and shifts the bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
level: Float. How much to shear the image. This value will be between
-0.3 to 0.3.
replace: A one or three value 1D tensor to fill empty pixels.
shear_horizontal: Boolean. If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of shearing
image by level. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the sheared image.
"""
if shear_horizontal:
image = shear_x(image, level, replace)
else:
image = shear_y(image, level, replace)
if mask is not None:
mask = tf.transpose(mask,[1,2,0])
if shear_horizontal:
mask = shear_x(mask,level,None)
else:
mask = shear_y(mask,level,None)
mask = tf.transpose(mask,[2,0,1])
bboxes = tfop.get_bboxes_from_mask(mask,stride=4)
else:
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shear_bbox = lambda bbox: _shear_bbox(
bbox, image_height, image_width, level, shear_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shear_bbox, bboxes)
return image, bboxes, mask
| [
"tensorflow.tile",
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.histogram_fixed_width",
"tensorflow.ones_like",
"tensorflow.cast",
"tensorflow.slice",
"tensorflow.reduce_min",
"tfop.get_bboxes_from_mask",
"tensorflow.not_equal",
"tensor... | [((883, 902), 'tensorflow.to_float', 'tf.to_float', (['image1'], {}), '(image1)\n', (894, 902), True, 'import tensorflow as tf\n'), ((914, 933), 'tensorflow.to_float', 'tf.to_float', (['image2'], {}), '(image2)\n', (925, 933), True, 'import tensorflow as tf\n'), ((2188, 2203), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (2196, 2203), True, 'import tensorflow as tf\n'), ((2260, 2299), 'tensorflow.reshape', 'tf.reshape', (['image', '[-1, image_shape[2]]'], {}), '(image, [-1, image_shape[2]])\n', (2270, 2299), True, 'import tensorflow as tf\n'), ((2677, 2717), 'tensorflow.reshape', 'tf.reshape', (['flattened_image', 'image_shape'], {}), '(flattened_image, image_shape)\n', (2687, 2717), True, 'import tensorflow as tf\n'), ((2728, 2791), 'tensorflow.slice', 'tf.slice', (['image', '[0, 0, 0]', '[image_shape[0], image_shape[1], 3]'], {}), '(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])\n', (2736, 2791), True, 'import tensorflow as tf\n'), ((2896, 2911), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (2904, 2911), True, 'import tensorflow as tf\n'), ((2933, 2978), 'tensorflow.ones', 'tf.ones', (['[shape[0], shape[1], 1]', 'image.dtype'], {}), '([shape[0], shape[1], 1], image.dtype)\n', (2940, 2978), True, 'import tensorflow as tf\n'), ((2992, 3031), 'tensorflow.concat', 'tf.concat', (['[image, extended_channel]', '(2)'], {}), '([image, extended_channel], 2)\n', (3001, 3031), True, 'import tensorflow as tf\n'), ((3954, 3987), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['min_y', '(0.0)', '(1.0)'], {}), '(min_y, 0.0, 1.0)\n', (3970, 3987), True, 'import tensorflow as tf\n'), ((3998, 4031), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['min_x', '(0.0)', '(1.0)'], {}), '(min_x, 0.0, 1.0)\n', (4014, 4031), True, 'import tensorflow as tf\n'), ((4042, 4075), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['max_y', '(0.0)', '(1.0)'], {}), '(max_y, 0.0, 1.0)\n', (4058, 4075), True, 'import tensorflow as tf\n'), ((4086, 4119), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['max_x', '(0.0)', '(1.0)'], {}), '(max_x, 0.0, 1.0)\n', (4102, 4119), True, 'import tensorflow as tf\n'), ((6161, 6180), 'tensorflow.to_int32', 'tf.to_int32', (['pixels'], {}), '(pixels)\n', (6172, 6180), True, 'import tensorflow as tf\n'), ((7153, 7191), 'tensorflow.stack', 'tf.stack', (['[min_y, min_x, max_y, max_x]'], {}), '([min_y, min_x, max_y, max_x])\n', (7161, 7191), True, 'import tensorflow as tf\n'), ((8540, 8577), 'tensorflow.map_fn', 'tf.map_fn', (['wrapped_shift_bbox', 'bboxes'], {}), '(wrapped_shift_bbox, bboxes)\n', (8549, 8577), True, 'import tensorflow as tf\n'), ((10113, 10138), 'tensorflow.stack', 'tf.stack', (['[s1, s2, s3]', '(2)'], {}), '([s1, s2, s3], 2)\n', (10121, 10138), True, 'import tensorflow as tf\n'), ((11085, 11159), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[]', 'minval': '(0)', 'maxval': 'image_height', 'dtype': 'tf.int32'}), '(shape=[], minval=0, maxval=image_height, dtype=tf.int32)\n', (11102, 11159), True, 'import tensorflow as tf\n'), ((11198, 11271), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '[]', 'minval': '(0)', 'maxval': 'image_width', 'dtype': 'tf.int32'}), '(shape=[], minval=0, maxval=image_width, dtype=tf.int32)\n', (11215, 11271), True, 'import tensorflow as tf\n'), ((11300, 11346), 'tensorflow.maximum', 'tf.maximum', (['(0)', '(cutout_center_height - pad_size)'], {}), '(0, cutout_center_height - pad_size)\n', (11310, 11346), True, 'import tensorflow as tf\n'), ((11361, 11422), 'tensorflow.maximum', 'tf.maximum', (['(0)', '(image_height - cutout_center_height - pad_size)'], {}), '(0, image_height - cutout_center_height - pad_size)\n', (11371, 11422), True, 'import tensorflow as tf\n'), ((11436, 11481), 'tensorflow.maximum', 'tf.maximum', (['(0)', '(cutout_center_width - pad_size)'], {}), '(0, cutout_center_width - pad_size)\n', (11446, 11481), True, 'import tensorflow as tf\n'), ((11496, 11555), 'tensorflow.maximum', 'tf.maximum', (['(0)', '(image_width - cutout_center_width - pad_size)'], {}), '(0, image_width - cutout_center_width - pad_size)\n', (11506, 11555), True, 'import tensorflow as tf\n'), ((11850, 11874), 'tensorflow.expand_dims', 'tf.expand_dims', (['mask', '(-1)'], {}), '(mask, -1)\n', (11864, 11874), True, 'import tensorflow as tf\n'), ((11884, 11908), 'tensorflow.tile', 'tf.tile', (['mask', '[1, 1, 3]'], {}), '(mask, [1, 1, 3])\n', (11891, 11908), True, 'import tensorflow as tf\n'), ((13744, 13818), 'tensorflow.stack', 'tf.stack', (['[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]]'], {}), '([[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])\n', (13752, 13818), True, 'import tensorflow as tf\n'), ((14104, 14143), 'tensorflow.cast', 'tf.cast', (['translation_matrix', 'tf.float32'], {}), '(translation_matrix, tf.float32)\n', (14111, 14143), True, 'import tensorflow as tf\n'), ((14587, 14661), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['min_y'], {'clip_value_min': '(0)', 'clip_value_max': '(image_height - 1)'}), '(min_y, clip_value_min=0, clip_value_max=image_height - 1)\n', (14603, 14661), True, 'import tensorflow as tf\n'), ((14668, 14742), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['max_y'], {'clip_value_min': '(0)', 'clip_value_max': '(image_height - 1)'}), '(max_y, clip_value_min=0, clip_value_max=image_height - 1)\n', (14684, 14742), True, 'import tensorflow as tf\n'), ((14749, 14822), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['min_x'], {'clip_value_min': '(0)', 'clip_value_max': '(image_width - 1)'}), '(min_x, clip_value_min=0, clip_value_max=image_width - 1)\n', (14765, 14822), True, 'import tensorflow as tf\n'), ((14829, 14902), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['max_x'], {'clip_value_min': '(0)', 'clip_value_max': '(image_width - 1)'}), '(max_x, clip_value_min=0, clip_value_max=image_width - 1)\n', (14845, 14902), True, 'import tensorflow as tf\n'), ((14908, 14946), 'tensorflow.stack', 'tf.stack', (['[min_y, min_x, max_y, max_x]'], {}), '([min_y, min_x, max_y, max_x])\n', (14916, 14946), True, 'import tensorflow as tf\n'), ((782, 810), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image1'], {}), '(image1)\n', (802, 810), True, 'import tensorflow as tf\n'), ((842, 870), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['image2'], {}), '(image2)\n', (862, 870), True, 'import tensorflow as tf\n'), ((1033, 1052), 'tensorflow.to_float', 'tf.to_float', (['image1'], {}), '(image1)\n', (1044, 1052), True, 'import tensorflow as tf\n'), ((1185, 1208), 'tensorflow.cast', 'tf.cast', (['temp', 'tf.uint8'], {}), '(temp, tf.uint8)\n', (1192, 1208), True, 'import tensorflow as tf\n'), ((1283, 1317), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['temp', '(0.0)', '(255.0)'], {}), '(temp, 0.0, 255.0)\n', (1299, 1317), True, 'import tensorflow as tf\n'), ((1427, 1459), 'tensorflow.image.rgb_to_grayscale', 'tf.image.rgb_to_grayscale', (['image'], {}), '(image)\n', (1452, 1459), True, 'import tensorflow as tf\n'), ((2549, 2575), 'tensorflow.equal', 'tf.equal', (['alpha_channel', '(0)'], {}), '(alpha_channel, 0)\n', (2557, 2575), True, 'import tensorflow as tf\n'), ((5045, 5079), 'tensorflow.maximum', 'tf.maximum', (['max_coord', '(0.0 + delta)'], {}), '(max_coord, 0.0 + delta)\n', (5055, 5079), True, 'import tensorflow as tf\n'), ((5096, 5130), 'tensorflow.minimum', 'tf.minimum', (['min_coord', '(1.0 - delta)'], {}), '(min_coord, 1.0 - delta)\n', (5106, 5130), True, 'import tensorflow as tf\n'), ((5188, 5209), 'tensorflow.equal', 'tf.equal', (['height', '(0.0)'], {}), '(height, 0.0)\n', (5196, 5209), True, 'import tensorflow as tf\n'), ((5357, 5377), 'tensorflow.equal', 'tf.equal', (['width', '(0.0)'], {}), '(width, 0.0)\n', (5365, 5377), True, 'import tensorflow as tf\n'), ((6496, 6525), 'tensorflow.maximum', 'tf.maximum', (['(0)', '(min_x - pixels)'], {}), '(0, min_x - pixels)\n', (6506, 6525), True, 'import tensorflow as tf\n'), ((6538, 6577), 'tensorflow.minimum', 'tf.minimum', (['image_width', '(max_x - pixels)'], {}), '(image_width, max_x - pixels)\n', (6548, 6577), True, 'import tensorflow as tf\n'), ((6598, 6627), 'tensorflow.maximum', 'tf.maximum', (['(0)', '(min_y - pixels)'], {}), '(0, min_y - pixels)\n', (6608, 6627), True, 'import tensorflow as tf\n'), ((6640, 6680), 'tensorflow.minimum', 'tf.minimum', (['image_height', '(max_y - pixels)'], {}), '(image_height, max_y - pixels)\n', (6650, 6680), True, 'import tensorflow as tf\n'), ((6725, 6743), 'tensorflow.to_float', 'tf.to_float', (['min_y'], {}), '(min_y)\n', (6736, 6743), True, 'import tensorflow as tf\n'), ((6746, 6771), 'tensorflow.to_float', 'tf.to_float', (['image_height'], {}), '(image_height)\n', (6757, 6771), True, 'import tensorflow as tf\n'), ((6782, 6800), 'tensorflow.to_float', 'tf.to_float', (['min_x'], {}), '(min_x)\n', (6793, 6800), True, 'import tensorflow as tf\n'), ((6803, 6827), 'tensorflow.to_float', 'tf.to_float', (['image_width'], {}), '(image_width)\n', (6814, 6827), True, 'import tensorflow as tf\n'), ((6838, 6856), 'tensorflow.to_float', 'tf.to_float', (['max_y'], {}), '(max_y)\n', (6849, 6856), True, 'import tensorflow as tf\n'), ((6859, 6884), 'tensorflow.to_float', 'tf.to_float', (['image_height'], {}), '(image_height)\n', (6870, 6884), True, 'import tensorflow as tf\n'), ((6895, 6913), 'tensorflow.to_float', 'tf.to_float', (['max_x'], {}), '(max_x)\n', (6906, 6913), True, 'import tensorflow as tf\n'), ((6916, 6940), 'tensorflow.to_float', 'tf.to_float', (['image_width'], {}), '(image_width)\n', (6927, 6940), True, 'import tensorflow as tf\n'), ((8296, 8311), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (8304, 8311), True, 'import tensorflow as tf\n'), ((8331, 8346), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (8339, 8346), True, 'import tensorflow as tf\n'), ((8810, 8840), 'tensorflow.cast', 'tf.cast', (['im[:, :, c]', 'tf.int32'], {}), '(im[:, :, c], tf.int32)\n', (8817, 8840), True, 'import tensorflow as tf\n'), ((8903, 8952), 'tensorflow.histogram_fixed_width', 'tf.histogram_fixed_width', (['im', '[0, 255]'], {'nbins': '(256)'}), '(im, [0, 255], nbins=256)\n', (8927, 8952), True, 'import tensorflow as tf\n'), ((9894, 9916), 'tensorflow.cast', 'tf.cast', (['result', 'dtype'], {}), '(result, dtype)\n', (9901, 9916), True, 'import tensorflow as tf\n'), ((10924, 10939), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (10932, 10939), True, 'import tensorflow as tf\n'), ((10959, 10974), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (10967, 10974), True, 'import tensorflow as tf\n'), ((11759, 11800), 'tensorflow.zeros', 'tf.zeros', (['cutout_shape'], {'dtype': 'image.dtype'}), '(cutout_shape, dtype=image.dtype)\n', (11767, 11800), True, 'import tensorflow as tf\n'), ((11935, 11952), 'tensorflow.equal', 'tf.equal', (['mask', '(0)'], {}), '(mask, 0)\n', (11943, 11952), True, 'import tensorflow as tf\n'), ((13560, 13585), 'tensorflow.to_float', 'tf.to_float', (['image_height'], {}), '(image_height)\n', (13571, 13585), True, 'import tensorflow as tf\n'), ((13587, 13611), 'tensorflow.to_float', 'tf.to_float', (['image_width'], {}), '(image_width)\n', (13598, 13611), True, 'import tensorflow as tf\n'), ((13966, 13997), 'tensorflow.stack', 'tf.stack', (['[[1, 0], [-level, 1]]'], {}), '([[1, 0], [-level, 1]])\n', (13974, 13997), True, 'import tensorflow as tf\n'), ((14040, 14071), 'tensorflow.stack', 'tf.stack', (['[[1, -level], [0, 1]]'], {}), '([[1, -level], [0, 1]])\n', (14048, 14071), True, 'import tensorflow as tf\n'), ((14322, 14353), 'tensorflow.reduce_min', 'tf.reduce_min', (['new_coords[0, :]'], {}), '(new_coords[0, :])\n', (14335, 14353), True, 'import tensorflow as tf\n'), ((14377, 14408), 'tensorflow.reduce_min', 'tf.reduce_min', (['new_coords[1, :]'], {}), '(new_coords[1, :])\n', (14390, 14408), True, 'import tensorflow as tf\n'), ((14432, 14463), 'tensorflow.reduce_max', 'tf.reduce_max', (['new_coords[0, :]'], {}), '(new_coords[0, :])\n', (14445, 14463), True, 'import tensorflow as tf\n'), ((14487, 14518), 'tensorflow.reduce_max', 'tf.reduce_max', (['new_coords[1, :]'], {}), '(new_coords[1, :])\n', (14500, 14518), True, 'import tensorflow as tf\n'), ((15952, 15981), 'tensorflow.transpose', 'tf.transpose', (['mask', '[1, 2, 0]'], {}), '(mask, [1, 2, 0])\n', (15964, 15981), True, 'import tensorflow as tf\n'), ((16101, 16130), 'tensorflow.transpose', 'tf.transpose', (['mask', '[2, 0, 1]'], {}), '(mask, [2, 0, 1])\n', (16113, 16130), True, 'import tensorflow as tf\n'), ((16141, 16182), 'tfop.get_bboxes_from_mask', 'tfop.get_bboxes_from_mask', (['mask'], {'stride': '(4)'}), '(mask, stride=4)\n', (16166, 16182), False, 'import tfop\n'), ((16512, 16549), 'tensorflow.map_fn', 'tf.map_fn', (['wrapped_shear_bbox', 'bboxes'], {}), '(wrapped_shear_bbox, bboxes)\n', (16521, 16549), True, 'import tensorflow as tf\n'), ((2427, 2452), 'tensorflow.ones', 'tf.ones', (['[1]', 'image.dtype'], {}), '([1], image.dtype)\n', (2434, 2452), True, 'import tensorflow as tf\n'), ((2583, 2631), 'tensorflow.ones_like', 'tf.ones_like', (['flattened_image'], {'dtype': 'image.dtype'}), '(flattened_image, dtype=image.dtype)\n', (2595, 2631), True, 'import tensorflow as tf\n'), ((6248, 6273), 'tensorflow.to_float', 'tf.to_float', (['image_height'], {}), '(image_height)\n', (6259, 6273), True, 'import tensorflow as tf\n'), ((6307, 6331), 'tensorflow.to_float', 'tf.to_float', (['image_width'], {}), '(image_width)\n', (6318, 6331), True, 'import tensorflow as tf\n'), ((6365, 6390), 'tensorflow.to_float', 'tf.to_float', (['image_height'], {}), '(image_height)\n', (6376, 6390), True, 'import tensorflow as tf\n'), ((6424, 6448), 'tensorflow.to_float', 'tf.to_float', (['image_width'], {}), '(image_width)\n', (6435, 6448), True, 'import tensorflow as tf\n'), ((9048, 9070), 'tensorflow.not_equal', 'tf.not_equal', (['histo', '(0)'], {}), '(histo, 0)\n', (9060, 9070), True, 'import tensorflow as tf\n'), ((9103, 9128), 'tensorflow.gather', 'tf.gather', (['histo', 'nonzero'], {}), '(histo, nonzero)\n', (9112, 9128), True, 'import tensorflow as tf\n'), ((9439, 9468), 'tensorflow.concat', 'tf.concat', (['[[0], lut[:-1]]', '(0)'], {}), '([[0], lut[:-1]], 0)\n', (9448, 9468), True, 'import tensorflow as tf\n'), ((9575, 9604), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['lut', '(0)', '(255)'], {}), '(lut, 0, 255)\n', (9591, 9604), True, 'import tensorflow as tf\n'), ((9762, 9779), 'tensorflow.equal', 'tf.equal', (['step', '(0)'], {}), '(step, 0)\n', (9770, 9779), True, 'import tensorflow as tf\n'), ((11960, 11998), 'tensorflow.ones_like', 'tf.ones_like', (['image'], {'dtype': 'image.dtype'}), '(image, dtype=image.dtype)\n', (11972, 11998), True, 'import tensorflow as tf\n'), ((14204, 14229), 'tensorflow.transpose', 'tf.transpose', (['coordinates'], {}), '(coordinates)\n', (14216, 14229), True, 'import tensorflow as tf\n'), ((16257, 16272), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (16265, 16272), True, 'import tensorflow as tf\n'), ((16294, 16309), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (16302, 16309), True, 'import tensorflow as tf\n'), ((9148, 9176), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['nonzero_histo'], {}), '(nonzero_histo)\n', (9161, 9176), True, 'import tensorflow as tf\n'), ((9349, 9365), 'tensorflow.cumsum', 'tf.cumsum', (['histo'], {}), '(histo)\n', (9358, 9365), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, SELCO and contributors
# For license information, please see license.txt
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CashPaymentEntry(Document):
def before_insert(self):
self.naming_series = frappe.db.get_value("Branch",self.selco_branch,"selco_cash_payment_naming_series")
def on_submit(self):
je = frappe.new_doc('Journal Entry')
je.selco_branch = self.selco_branch
je.voucher_type = self.voucher_type
je.selco_use_different_cost_center = self.use_different_cost_center
je.posting_date = self.posting_date
je.cheque_no = self.cheque_no
je.cheque_date = self.cheque_date
je.user_remark = self.user_remark
je.name = "J" + self.name
frappe.msgprint("je.name is" + str(je.name))
je.company = self.company
for d in self.get('accounts'):
je.append("accounts",{
"account":d.account,
"party_type":d.party_type,
"party":d.party,
"reference_type":d.reference_type,
"reference_name":d.reference_name,
"is_advance":d.is_advance,
"cost_center":d.cost_center,
"account_currency":d.account_currency,
"debit_in_account_currency":d.debit_in_account_currency,
"credit_in_account_currency":d.credit_in_account_currency
})
je.save()
je.submit()
| [
"frappe.db.get_value",
"frappe.new_doc"
] | [((454, 542), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Branch"""', 'self.selco_branch', '"""selco_cash_payment_naming_series"""'], {}), "('Branch', self.selco_branch,\n 'selco_cash_payment_naming_series')\n", (473, 542), False, 'import frappe\n'), ((576, 607), 'frappe.new_doc', 'frappe.new_doc', (['"""Journal Entry"""'], {}), "('Journal Entry')\n", (590, 607), False, 'import frappe\n')] |
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Feature(models.Model):
name = models.CharField('Feature Name', max_length=50, blank=True, unique=True)
description = models.CharField('Feature Description', max_length=150, blank=True)
info_link = models.CharField('Feature Demo Link', max_length=100, blank=True)
class Meta:
verbose_name = _('feature')
verbose_name_plural = _('features')
ordering = ['id']
class Version(models.Model):
tag = models.CharField('Tag', max_length=50, unique=True)
class Meta:
verbose_name = _('tag')
verbose_name_plural = _('tags')
class Release(models.Model):
version = models.ForeignKey(Version, on_delete=models.CASCADE)
features = models.ManyToManyField(Feature, blank=True)
class Meta:
verbose_name = _('release')
verbose_name_plural = _('releases')
class FeedbackResult(models.Model):
user_email = models.EmailField('Email', blank=False, null=False)
service = models.ForeignKey('organizations.service', null=True, on_delete=models.SET_NULL)
feature = models.ForeignKey(Feature, on_delete=models.CASCADE)
feedback = models.CharField('Feature Feedback', max_length=512, blank=True, null=True)
liked = models.NullBooleanField('Feature Liked')
skipped = models.NullBooleanField('Feature Skipped')
class Meta:
verbose_name = _('feedback-result')
verbose_name_plural = _('feedback-results')
class FeedbackActivity(models.Model):
user_email = models.EmailField('Email', blank=False)
declined = models.NullBooleanField('Declined', null=True, blank=True)
release = models.ForeignKey(Release, null=True, blank=True, on_delete=models.CASCADE)
service = models.ForeignKey('organizations.service', null=True, blank=True, on_delete=models.CASCADE)
has_given_feedback = models.NullBooleanField('Given Feedback', blank=True)
hours_used_release = models.FloatField(null=True, blank=True)
class Meta:
verbose_name = _('feedback-activity')
verbose_name_plural = _('feedback-activities')
class UserSession(models.Model):
user_email = models.EmailField('Email', blank=False)
session_start = models.DateTimeField(null=True)
session_end = models.DateTimeField(null=True)
tag = models.CharField(null=True, max_length=30)
class Meta:
verbose_name = _('user-session')
verbose_name_plural = _('user-sessions')
| [
"django.db.models.EmailField",
"django.db.models.FloatField",
"django.utils.translation.ugettext_lazy",
"django.db.models.ForeignKey",
"django.db.models.NullBooleanField",
"django.db.models.ManyToManyField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((127, 199), 'django.db.models.CharField', 'models.CharField', (['"""Feature Name"""'], {'max_length': '(50)', 'blank': '(True)', 'unique': '(True)'}), "('Feature Name', max_length=50, blank=True, unique=True)\n", (143, 199), False, 'from django.db import models\n'), ((218, 285), 'django.db.models.CharField', 'models.CharField', (['"""Feature Description"""'], {'max_length': '(150)', 'blank': '(True)'}), "('Feature Description', max_length=150, blank=True)\n", (234, 285), False, 'from django.db import models\n'), ((302, 367), 'django.db.models.CharField', 'models.CharField', (['"""Feature Demo Link"""'], {'max_length': '(100)', 'blank': '(True)'}), "('Feature Demo Link', max_length=100, blank=True)\n", (318, 367), False, 'from django.db import models\n'), ((532, 583), 'django.db.models.CharField', 'models.CharField', (['"""Tag"""'], {'max_length': '(50)', 'unique': '(True)'}), "('Tag', max_length=50, unique=True)\n", (548, 583), False, 'from django.db import models\n'), ((718, 770), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Version'], {'on_delete': 'models.CASCADE'}), '(Version, on_delete=models.CASCADE)\n', (735, 770), False, 'from django.db import models\n'), ((786, 829), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Feature'], {'blank': '(True)'}), '(Feature, blank=True)\n', (808, 829), False, 'from django.db import models\n'), ((982, 1033), 'django.db.models.EmailField', 'models.EmailField', (['"""Email"""'], {'blank': '(False)', 'null': '(False)'}), "('Email', blank=False, null=False)\n", (999, 1033), False, 'from django.db import models\n'), ((1048, 1133), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""organizations.service"""'], {'null': '(True)', 'on_delete': 'models.SET_NULL'}), "('organizations.service', null=True, on_delete=models.SET_NULL\n )\n", (1065, 1133), False, 'from django.db import models\n'), ((1143, 1195), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Feature'], {'on_delete': 'models.CASCADE'}), '(Feature, on_delete=models.CASCADE)\n', (1160, 1195), False, 'from django.db import models\n'), ((1211, 1286), 'django.db.models.CharField', 'models.CharField', (['"""Feature Feedback"""'], {'max_length': '(512)', 'blank': '(True)', 'null': '(True)'}), "('Feature Feedback', max_length=512, blank=True, null=True)\n", (1227, 1286), False, 'from django.db import models\n'), ((1299, 1339), 'django.db.models.NullBooleanField', 'models.NullBooleanField', (['"""Feature Liked"""'], {}), "('Feature Liked')\n", (1322, 1339), False, 'from django.db import models\n'), ((1354, 1396), 'django.db.models.NullBooleanField', 'models.NullBooleanField', (['"""Feature Skipped"""'], {}), "('Feature Skipped')\n", (1377, 1396), False, 'from django.db import models\n'), ((1567, 1606), 'django.db.models.EmailField', 'models.EmailField', (['"""Email"""'], {'blank': '(False)'}), "('Email', blank=False)\n", (1584, 1606), False, 'from django.db import models\n'), ((1622, 1680), 'django.db.models.NullBooleanField', 'models.NullBooleanField', (['"""Declined"""'], {'null': '(True)', 'blank': '(True)'}), "('Declined', null=True, blank=True)\n", (1645, 1680), False, 'from django.db import models\n'), ((1695, 1770), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Release'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.CASCADE'}), '(Release, null=True, blank=True, on_delete=models.CASCADE)\n', (1712, 1770), False, 'from django.db import models\n'), ((1785, 1881), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""organizations.service"""'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.CASCADE'}), "('organizations.service', null=True, blank=True, on_delete\n =models.CASCADE)\n", (1802, 1881), False, 'from django.db import models\n'), ((1902, 1955), 'django.db.models.NullBooleanField', 'models.NullBooleanField', (['"""Given Feedback"""'], {'blank': '(True)'}), "('Given Feedback', blank=True)\n", (1925, 1955), False, 'from django.db import models\n'), ((1981, 2021), 'django.db.models.FloatField', 'models.FloatField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1998, 2021), False, 'from django.db import models\n'), ((2192, 2231), 'django.db.models.EmailField', 'models.EmailField', (['"""Email"""'], {'blank': '(False)'}), "('Email', blank=False)\n", (2209, 2231), False, 'from django.db import models\n'), ((2252, 2283), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (2272, 2283), False, 'from django.db import models\n'), ((2302, 2333), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (2322, 2333), False, 'from django.db import models\n'), ((2344, 2386), 'django.db.models.CharField', 'models.CharField', ([], {'null': '(True)', 'max_length': '(30)'}), '(null=True, max_length=30)\n', (2360, 2386), False, 'from django.db import models\n'), ((408, 420), 'django.utils.translation.ugettext_lazy', '_', (['"""feature"""'], {}), "('feature')\n", (409, 420), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((451, 464), 'django.utils.translation.ugettext_lazy', '_', (['"""features"""'], {}), "('features')\n", (452, 464), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((624, 632), 'django.utils.translation.ugettext_lazy', '_', (['"""tag"""'], {}), "('tag')\n", (625, 632), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((663, 672), 'django.utils.translation.ugettext_lazy', '_', (['"""tags"""'], {}), "('tags')\n", (664, 672), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((870, 882), 'django.utils.translation.ugettext_lazy', '_', (['"""release"""'], {}), "('release')\n", (871, 882), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((913, 926), 'django.utils.translation.ugettext_lazy', '_', (['"""releases"""'], {}), "('releases')\n", (914, 926), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1437, 1457), 'django.utils.translation.ugettext_lazy', '_', (['"""feedback-result"""'], {}), "('feedback-result')\n", (1438, 1457), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1488, 1509), 'django.utils.translation.ugettext_lazy', '_', (['"""feedback-results"""'], {}), "('feedback-results')\n", (1489, 1509), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2062, 2084), 'django.utils.translation.ugettext_lazy', '_', (['"""feedback-activity"""'], {}), "('feedback-activity')\n", (2063, 2084), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2115, 2139), 'django.utils.translation.ugettext_lazy', '_', (['"""feedback-activities"""'], {}), "('feedback-activities')\n", (2116, 2139), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2427, 2444), 'django.utils.translation.ugettext_lazy', '_', (['"""user-session"""'], {}), "('user-session')\n", (2428, 2444), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2475, 2493), 'django.utils.translation.ugettext_lazy', '_', (['"""user-sessions"""'], {}), "('user-sessions')\n", (2476, 2493), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
'''
Script to loop through all baseballrefernce.com pages and store the HTML in data frames
'''
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
import requests
import time
import os
from selenium.webdriver.common.by import By
import pickle
import re
import time
def get_rookie_player_pages_html(rookie_player_pages, driver, stop=None):
index = 0
html_df = rookie_player_pages[rookie_player_pages.html.isnull()]
for l in html_df.link.values:
start = time.time()
driver.get(l) #Could add try and except to write to csv if error, so can restart from last write.
end = time.time()
print((end-start), l)
rookie_player_pages.loc[rookie_player_pages.link == l, 'html'] = driver.page_source
if index != 0 and index % 100 == 0:
print('Rows completed', rookie_player_pages[~rookie_player_pages.html.isnull()].shape[0])
rookie_player_pages.to_csv('data/rookie_player_pages.csv')
if index == stop:
break
index += 1
rookie_player_pages.to_csv('data/rookie_player_pages.csv')
return rookie_player_pages
def build_rookie_pages(start, end, driver):
rookie_pages = pd.DataFrame(columns=['year','link','html'])
rookie_player_pages = pd.DataFrame(columns=['year','name','link','html'])
#attempt to load from csv
try:
rookie_pages = pd.read_csv('data/rookie_pages.csv', index_col=0)
except FileNotFoundError:
pass
print(rookie_pages.shape)
try:
rookie_player_pages = pd.read_csv('data/rookie_player_pages.csv', index_col=0)
except FileNotFoundError:
pass
print(rookie_player_pages.shape)
for i in range(start, end+1):
links_list = []
names_list = []
#if year == i, then move onto link loop
if not (rookie_pages.year == i).any():
url = 'https://www.baseball-reference.com/leagues/MLB/'+str(i)+'-rookies.shtml'
start = time.time()
driver.get(url)
end = time.time()
print(end-start, i)
rookie_pages.loc[i] = [i, url, driver.page_source]
# scrape the rookie batters (includes pitchers if PA)
batting = driver.find_element_by_id('misc_batting') ## HTML tables
links = batting.find_elements_by_xpath('.//tbody/tr/td/a') ## player pages
# add these to the DF to save
links_list = [a.get_attribute('href') for a in links if re.search(r'players/.', a.get_attribute('href'))]
names_list = [a.text for a in links if re.search(r'players/.', a.get_attribute('href'))]
if len(links_list) != 0: # add new data
year_l = [i] * len(links_list)
new_df = pd.DataFrame({'year': year_l, 'name': names_list, 'link': links_list})
rookie_player_pages = rookie_player_pages.append(new_df, sort=True)
rookie_pages.to_csv('data/rookie_pages.csv')
rookie_player_pages.to_csv('data/rookie_player_pages.csv')
return rookie_pages, rookie_player_pages
chromedriver = "chromedriver" # path to the chromedriver executable
os.environ["webdriver.chrome.driver"] = chromedriver
driver = webdriver.Chrome(chromedriver)
while True:
try:
rookie_pages, rookie_player_pages = build_rookie_pages(1985, 2017, driver)
except TimeoutException:
pass
else:
break
tries = 0
while tries <= 2:
try:
rookie_player_pages = pd.read_csv('data/rookie_player_pages.csv',index_col=0)
print('Try:', tries)
print(rookie_player_pages.shape)
rookie_player_pages = get_rookie_player_pages_html(rookie_player_pages, driver, stop=6000)
except TimeoutException:
tries += 1
pass
else:
break
driver.close() | [
"pandas.DataFrame",
"selenium.webdriver.Chrome",
"time.time",
"pandas.read_csv"
] | [((3378, 3408), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['chromedriver'], {}), '(chromedriver)\n', (3394, 3408), False, 'from selenium import webdriver\n'), ((1348, 1394), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['year', 'link', 'html']"}), "(columns=['year', 'link', 'html'])\n", (1360, 1394), True, 'import pandas as pd\n'), ((1419, 1473), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['year', 'name', 'link', 'html']"}), "(columns=['year', 'name', 'link', 'html'])\n", (1431, 1473), True, 'import pandas as pd\n'), ((629, 640), 'time.time', 'time.time', ([], {}), '()\n', (638, 640), False, 'import time\n'), ((764, 775), 'time.time', 'time.time', ([], {}), '()\n', (773, 775), False, 'import time\n'), ((1538, 1587), 'pandas.read_csv', 'pd.read_csv', (['"""data/rookie_pages.csv"""'], {'index_col': '(0)'}), "('data/rookie_pages.csv', index_col=0)\n", (1549, 1587), True, 'import pandas as pd\n'), ((1700, 1756), 'pandas.read_csv', 'pd.read_csv', (['"""data/rookie_player_pages.csv"""'], {'index_col': '(0)'}), "('data/rookie_player_pages.csv', index_col=0)\n", (1711, 1756), True, 'import pandas as pd\n'), ((3648, 3704), 'pandas.read_csv', 'pd.read_csv', (['"""data/rookie_player_pages.csv"""'], {'index_col': '(0)'}), "('data/rookie_player_pages.csv', index_col=0)\n", (3659, 3704), True, 'import pandas as pd\n'), ((2140, 2151), 'time.time', 'time.time', ([], {}), '()\n', (2149, 2151), False, 'import time\n'), ((2198, 2209), 'time.time', 'time.time', ([], {}), '()\n', (2207, 2209), False, 'import time\n'), ((2921, 2991), 'pandas.DataFrame', 'pd.DataFrame', (["{'year': year_l, 'name': names_list, 'link': links_list}"], {}), "({'year': year_l, 'name': names_list, 'link': links_list})\n", (2933, 2991), True, 'import pandas as pd\n')] |
import sys
from amr import JAMR_CorpusReader
if __name__ == '__main__':
args = sys.argv
infile = args[1]
cr = JAMR_CorpusReader()
cr.load_amrs(infile)
gold_amrs = cr.amrs
for sentidx, amr in enumerate(gold_amrs):
for n in amr.alignments:
print(str(sentidx)+'\t'+n+'\t'+','.join(str(s) for s in amr.alignments[n]))
print()
| [
"amr.JAMR_CorpusReader"
] | [((125, 144), 'amr.JAMR_CorpusReader', 'JAMR_CorpusReader', ([], {}), '()\n', (142, 144), False, 'from amr import JAMR_CorpusReader\n')] |
import sys
from graphqlclient import GraphQLClient
from random import randint
import codecs, json
client = GraphQLClient("https://net.ton.dev/graphql")
mutation_template = '''
mutation {
postRequests(requests:[{id:"%(request_id)s",body:"%(base64_boc)s",expireAt:2e12}])
}
'''
def send_boc(client, boc):
data = {'request_id':str(randint(0,2**32)), 'base64_boc': codecs.decode(codecs.encode(boc,'base64'),'utf8').replace('\n','')}
r = json.loads(client.execute(mutation_template%data))
print(r)
with open(sys.argv[1], "rb+") as f:
send_boc(client, f.read())
| [
"codecs.encode",
"random.randint",
"graphqlclient.GraphQLClient"
] | [((108, 152), 'graphqlclient.GraphQLClient', 'GraphQLClient', (['"""https://net.ton.dev/graphql"""'], {}), "('https://net.ton.dev/graphql')\n", (121, 152), False, 'from graphqlclient import GraphQLClient\n'), ((334, 353), 'random.randint', 'randint', (['(0)', '(2 ** 32)'], {}), '(0, 2 ** 32)\n', (341, 353), False, 'from random import randint\n'), ((381, 409), 'codecs.encode', 'codecs.encode', (['boc', '"""base64"""'], {}), "(boc, 'base64')\n", (394, 409), False, 'import codecs, json\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import pytest
from demorphy.tokenizer import tokenize
class TestTokenizer:
def test_split_simple(self):
assert tokenize(u"Ich bin krank") == [u"Ich", u"bin", u"krank"]
def test_split_hypen(self):
assert tokenize(u"Wir können uns auf der U-Bahn treffen") == [u'Wir', u'können', u'uns', u'auf', u'der', u'U-Bahn', u'treffen.']
def test_split_email(self):
assert tokenize(u"Bitte schreiben Sie an <EMAIL> ") == [u"Bitte", u"schreiben", u"Sie", u"an", u"<EMAIL>"]
def test_split_url(self):
assert tokenize(u"www.akbank.com.tr ich du Sie bahn.de") == [u'www.akbank.com.tr', u'ich', u'du', u'Sie', u'bahn.de']
def test_split_punct(self):
assert tokenize(u"Ich bin krank, sie auch; ich auch") == [u'Ich', u'bin', u'krank', u'sie', u'auch', u'ich', u'auch']
def test_split_abbrev(self):
assert tokenize(u"ggf. kommen wir auf Ihr Angebot zurück") == [u'ggf.', u'kommen', u'wir', u'auf', u'Ihr', u'Angebot', u'zurück']
| [
"demorphy.tokenizer.tokenize"
] | [((206, 232), 'demorphy.tokenizer.tokenize', 'tokenize', (['u"""Ich bin krank"""'], {}), "(u'Ich bin krank')\n", (214, 232), False, 'from demorphy.tokenizer import tokenize\n'), ((311, 361), 'demorphy.tokenizer.tokenize', 'tokenize', (['u"""Wir können uns auf der U-Bahn treffen"""'], {}), "(u'Wir können uns auf der U-Bahn treffen')\n", (319, 361), False, 'from demorphy.tokenizer import tokenize\n'), ((481, 525), 'demorphy.tokenizer.tokenize', 'tokenize', (['u"""Bitte schreiben Sie an <EMAIL> """'], {}), "(u'Bitte schreiben Sie an <EMAIL> ')\n", (489, 525), False, 'from demorphy.tokenizer import tokenize\n'), ((627, 676), 'demorphy.tokenizer.tokenize', 'tokenize', (['u"""www.akbank.com.tr ich du Sie bahn.de"""'], {}), "(u'www.akbank.com.tr ich du Sie bahn.de')\n", (635, 676), False, 'from demorphy.tokenizer import tokenize\n'), ((786, 832), 'demorphy.tokenizer.tokenize', 'tokenize', (['u"""Ich bin krank, sie auch; ich auch"""'], {}), "(u'Ich bin krank, sie auch; ich auch')\n", (794, 832), False, 'from demorphy.tokenizer import tokenize\n'), ((946, 997), 'demorphy.tokenizer.tokenize', 'tokenize', (['u"""ggf. kommen wir auf Ihr Angebot zurück"""'], {}), "(u'ggf. kommen wir auf Ihr Angebot zurück')\n", (954, 997), False, 'from demorphy.tokenizer import tokenize\n')] |
import os
from django.apps import AppConfig
class OsfOauth2AdapterConfig(AppConfig):
name = 'osf_oauth2_adapter'
# staging by default so people don't have to run OSF to use this.
osf_api_url = os.environ.get('OSF_API_URL', 'https://staging-api.osf.io').rstrip('/') + '/'
osf_accounts_url = os.environ.get('OSF_ACCOUNTS_URL', 'https://staging-accounts.osf.io').rstrip('/') + '/'
default_scopes = ['osf.users.email_read', 'osf.users.profile_read', ]
humans_group_name = 'OSF_USERS'
| [
"os.environ.get"
] | [((208, 267), 'os.environ.get', 'os.environ.get', (['"""OSF_API_URL"""', '"""https://staging-api.osf.io"""'], {}), "('OSF_API_URL', 'https://staging-api.osf.io')\n", (222, 267), False, 'import os\n'), ((309, 378), 'os.environ.get', 'os.environ.get', (['"""OSF_ACCOUNTS_URL"""', '"""https://staging-accounts.osf.io"""'], {}), "('OSF_ACCOUNTS_URL', 'https://staging-accounts.osf.io')\n", (323, 378), False, 'import os\n')] |
from typing import Tuple, Type
import numpy as np
from pyjackson.core import ArgList, Field
from pyjackson.generics import Serializer
from ebonite.core.analyzer.base import CanIsAMustHookMixin, TypeHookMixin
from ebonite.core.analyzer.dataset import DatasetHook
from ebonite.core.objects.dataset_type import DatasetType
from ebonite.runtime.interface.typing import ListTypeWithSpec, SizedTypedListType
def _python_type_from_np_string_repr(string_repr: str) -> type:
np_type = _np_type_from_string(string_repr)
return _python_type_from_np_type(np_type)
def _python_type_from_np_type(np_type: Type):
value = np_type()
if np_type.__module__ == 'numpy':
value = value.item()
return type(value)
def _np_type_from_string(string_repr):
try:
return getattr(np, string_repr)
except AttributeError:
raise ValueError('Unknown numpy type {}'.format(string_repr))
class NumpyNumberDatasetType(DatasetType):
"""
:class:`.DatasetType` implementation for `numpy.number` objects which
converts them to built-in Python numbers and vice versa.
:param dtype: `numpy.number` data type as string
"""
type = 'numpy_number'
def __init__(self, dtype: str):
self.dtype = dtype
def get_spec(self) -> ArgList:
return [Field(None, self.actual_type, False)]
def deserialize(self, obj: dict) -> object:
return self.actual_type(obj)
def serialize(self, instance: np.number) -> object:
return instance.item()
@property
def actual_type(self):
return _np_type_from_string(self.dtype)
class NumpyNumberHook(CanIsAMustHookMixin, DatasetHook):
"""
:class:`.DatasetHook` implementation for `numpy.number` objects which uses :class:`NumpyNumberDatasetType`.
"""
def must_process(self, obj) -> bool:
return isinstance(obj, np.number)
def process(self, obj: np.number) -> DatasetType:
return NumpyNumberDatasetType(obj.dtype.name)
class NumpyNdarrayHook(TypeHookMixin, DatasetHook):
"""
:class:`.DatasetHook` implementation for `np.ndarray` objects which uses :class:`NumpyNdarrayDatasetType`
"""
valid_types = [np.ndarray]
def process(self, obj) -> DatasetType:
return NumpyNdarrayDatasetType(obj.shape, obj.dtype.name)
class NumpyDTypeSerializer(Serializer):
"""
PyJackson :class:`.Serializer` for `numpy` data types: stores types in JSON as their names.
"""
def deserialize(self, obj: str):
return getattr(np, obj)
def serialize(self, instance) -> str:
return str(instance)
class NumpyNdarrayDatasetType(DatasetType, ListTypeWithSpec):
"""
:class:`.DatasetType` implementation for `np.ndarray` objects
which converts them to built-in Python lists and vice versa.
:param shape: shape of `numpy.ndarray` objects in dataset
:param dtype: data type of `numpy.ndarray` objects in dataset
"""
real_type = np.ndarray
type = 'numpy_ndarray'
def __init__(self, shape: Tuple[int, ...], dtype: str):
# TODO assert shape and dtypes len
self.shape = shape
self.dtype = dtype
@property
def size(self):
if len(self.shape) == 1:
return 1
else:
return self.shape[0] # TODO more dimensions
def list_size(self):
return self.shape[0]
def _get_subtype(self, shape):
if len(shape) == 0:
return _python_type_from_np_string_repr(self.dtype)
elif len(shape) == 1:
subtype = _python_type_from_np_string_repr(self.dtype)
else:
subtype = self._get_subtype(shape[1:])
return SizedTypedListType(shape[0], subtype)
def get_spec(self) -> ArgList:
return [Field(None, self._get_subtype(self.shape[1:]), False)]
def deserialize(self, obj):
return np.array(obj)
def serialize(self, instance: np.ndarray):
# if self.shape == 1:
# return [instance.tolist()] # TODO better shapes
return instance.tolist()
| [
"numpy.array",
"pyjackson.core.Field",
"ebonite.runtime.interface.typing.SizedTypedListType"
] | [((3677, 3714), 'ebonite.runtime.interface.typing.SizedTypedListType', 'SizedTypedListType', (['shape[0]', 'subtype'], {}), '(shape[0], subtype)\n', (3695, 3714), False, 'from ebonite.runtime.interface.typing import ListTypeWithSpec, SizedTypedListType\n'), ((3870, 3883), 'numpy.array', 'np.array', (['obj'], {}), '(obj)\n', (3878, 3883), True, 'import numpy as np\n'), ((1304, 1340), 'pyjackson.core.Field', 'Field', (['None', 'self.actual_type', '(False)'], {}), '(None, self.actual_type, False)\n', (1309, 1340), False, 'from pyjackson.core import ArgList, Field\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class MAE(nn.Module):
def __init__(self):
super(MAE, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float().cuda()
err = torch.abs(target * val_pixels - outputs * val_pixels)
loss = torch.sum(err.view(err.size(0), 1, -1), -1, keepdim=True)
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1, keepdim=True)
return torch.mean(loss / cnt)
class RMSE(nn.Module):
def __init__(self):
super(RMSE, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float().cuda()
err = (target * val_pixels - outputs * val_pixels) ** 2
loss = torch.sum(err.view(err.size(0), 1, -1), -1, keepdim=True)
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1, keepdim=True)
return torch.mean(torch.sqrt(loss / cnt))
class Deltas(nn.Module):
def __init__(self):
super(Deltas, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float().cuda()
rel = torch.max((target * val_pixels) / (outputs * val_pixels + 1e-3),
(outputs * val_pixels) / (target * val_pixels))
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1, keepdim=True)
def del_i(i):
r = (rel < 1.01 ** i).float()
delta = torch.sum(r.view(r.size(0), 1, -1), -1, keepdim=True) / cnt
return torch.mean(delta)
return del_i(1), del_i(2), del_i(3)
class Huber(nn.Module):
def __init__(self):
super(Huber, self).__init__()
def forward(self, outputs, target, delta=5):
l1_loss = F.l1_loss(outputs, target, reduce=False)
mse_loss = F.mse_loss(outputs, target, reduce=False)
mask = (l1_loss < delta).float()
loss = (0.5 * mse_loss) * mask + delta * (l1_loss - 0.5 * delta) * (1 - mask)
return torch.mean(loss)
class EPE_metric(nn.Module):
def __init__(self):
super(EPE_metric, self).__init__()
def forward(self, outputs, target):
mask = (target > 0)
outputs, target = outputs[mask], target[mask]
err = torch.abs(target - outputs)
loss = torch.mean(err)
return loss
class D1_metric(nn.Module):
def __init__(self):
super(D1_metric, self).__init__()
def forward(self, outputs, target):
mask = (target > 0)
outputs, target = outputs[mask], target[mask]
E = torch.abs(outputs - target)
# err_mask = (E > 3) & (E / target.abs() > 0.05)
err_mask = (E > 3)
return torch.mean(err_mask.float())
class Thres_metric(nn.Module):
def __init__(self):
super(Thres_metric, self).__init__()
def forward(self, outputs, target):
mask_tar = (target > 0)
mask_out = (outputs > 0)
mask = mask_tar * mask_out
# mask = (target > 0)
thres = 3
assert isinstance(thres, (int, float))
outputs, target = outputs[mask], target[mask]
E = torch.abs(target - outputs)
err_mask = (E > thres)
return torch.mean(err_mask.float())
class Deltas_Paint(nn.Module):
def __init__(self):
super(Deltas_Paint, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float().cuda()
rel = torch.max((target * val_pixels) / (outputs * val_pixels + 1e-3),
(outputs * val_pixels) / (target * val_pixels))
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1, keepdim=True)
def del_i(i):
r = (rel < 1.25 ** i).float()
delta = torch.sum(r.view(r.size(0), 1, -1), -1, keepdim=True) / cnt
return torch.mean(delta)
def del_j(i):
r = (rel < i).float()
delta = torch.sum(r.view(r.size(0), 1, -1), -1, keepdim=True) / cnt
return torch.mean(delta)
return del_j(1.05), del_j(1.10), del_i(1), del_i(2), del_i(3), cnt
class SSIM_Metric(nn.Module):
def __init__(self):
super(SSIM_Metric, self).__init__()
def forward(self, x, y):
C1 = 0.01 ** 2
C2 = 0.03 ** 2
mu_x = F.avg_pool2d(x, 3, 1, 1)
mu_y = F.avg_pool2d(y, 3, 1, 1)
sigma_x = F.avg_pool2d(x ** 2, 3, 1, 1) - mu_x ** 2
sigma_y = F.avg_pool2d(y ** 2, 3, 1, 1) - mu_y ** 2
sigma_xy = F.avg_pool2d(x * y, 3, 1, 1) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)
SSIM = SSIM_n / SSIM_d
# return torch.clamp((1 - SSIM) / 2, 0, 1)
return SSIM.mean(), torch.tensor([torch.numel(x)])
class MAE_Paint(nn.Module):
def __init__(self):
super(MAE_Paint, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float().cuda()
err = torch.abs(target * val_pixels - outputs * val_pixels)
loss = torch.sum(err.view(err.size(0), 1, -1), -1, keepdim=True)
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1, keepdim=True)
return torch.mean(loss / torch.numel(outputs)), torch.tensor([torch.numel(outputs)])
class RMSE_Paint(nn.Module):
def __init__(self):
super(RMSE_Paint, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float().cuda()
err = (target * val_pixels - outputs * val_pixels) ** 2
loss = torch.sum(err.view(err.size(0), 1, -1), -1, keepdim=True)
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1, keepdim=True)
return (loss / torch.numel(outputs)), torch.tensor([torch.numel(outputs)]) | [
"torch.abs",
"torch.nn.functional.l1_loss",
"torch.nn.functional.mse_loss",
"torch.mean",
"torch.max",
"torch.sqrt",
"torch.nn.functional.avg_pool2d",
"torch.numel"
] | [((280, 333), 'torch.abs', 'torch.abs', (['(target * val_pixels - outputs * val_pixels)'], {}), '(target * val_pixels - outputs * val_pixels)\n', (289, 333), False, 'import torch\n'), ((508, 530), 'torch.mean', 'torch.mean', (['(loss / cnt)'], {}), '(loss / cnt)\n', (518, 530), False, 'import torch\n'), ((1186, 1299), 'torch.max', 'torch.max', (['(target * val_pixels / (outputs * val_pixels + 0.001))', '(outputs * val_pixels / (target * val_pixels))'], {}), '(target * val_pixels / (outputs * val_pixels + 0.001), outputs *\n val_pixels / (target * val_pixels))\n', (1195, 1299), False, 'import torch\n'), ((1792, 1832), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['outputs', 'target'], {'reduce': '(False)'}), '(outputs, target, reduce=False)\n', (1801, 1832), True, 'import torch.nn.functional as F\n'), ((1852, 1893), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['outputs', 'target'], {'reduce': '(False)'}), '(outputs, target, reduce=False)\n', (1862, 1893), True, 'import torch.nn.functional as F\n'), ((2039, 2055), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (2049, 2055), False, 'import torch\n'), ((2290, 2317), 'torch.abs', 'torch.abs', (['(target - outputs)'], {}), '(target - outputs)\n', (2299, 2317), False, 'import torch\n'), ((2333, 2348), 'torch.mean', 'torch.mean', (['err'], {}), '(err)\n', (2343, 2348), False, 'import torch\n'), ((2599, 2626), 'torch.abs', 'torch.abs', (['(outputs - target)'], {}), '(outputs - target)\n', (2608, 2626), False, 'import torch\n'), ((3158, 3185), 'torch.abs', 'torch.abs', (['(target - outputs)'], {}), '(target - outputs)\n', (3167, 3185), False, 'import torch\n'), ((3473, 3586), 'torch.max', 'torch.max', (['(target * val_pixels / (outputs * val_pixels + 0.001))', '(outputs * val_pixels / (target * val_pixels))'], {}), '(target * val_pixels / (outputs * val_pixels + 0.001), outputs *\n val_pixels / (target * val_pixels))\n', (3482, 3586), False, 'import torch\n'), ((4320, 4344), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(3)', '(1)', '(1)'], {}), '(x, 3, 1, 1)\n', (4332, 4344), True, 'import torch.nn.functional as F\n'), ((4360, 4384), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['y', '(3)', '(1)', '(1)'], {}), '(y, 3, 1, 1)\n', (4372, 4384), True, 'import torch.nn.functional as F\n'), ((5053, 5106), 'torch.abs', 'torch.abs', (['(target * val_pixels - outputs * val_pixels)'], {}), '(target * val_pixels - outputs * val_pixels)\n', (5062, 5106), False, 'import torch\n'), ((962, 984), 'torch.sqrt', 'torch.sqrt', (['(loss / cnt)'], {}), '(loss / cnt)\n', (972, 984), False, 'import torch\n'), ((1574, 1591), 'torch.mean', 'torch.mean', (['delta'], {}), '(delta)\n', (1584, 1591), False, 'import torch\n'), ((3861, 3878), 'torch.mean', 'torch.mean', (['delta'], {}), '(delta)\n', (3871, 3878), False, 'import torch\n'), ((4035, 4052), 'torch.mean', 'torch.mean', (['delta'], {}), '(delta)\n', (4045, 4052), False, 'import torch\n'), ((4404, 4433), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['(x ** 2)', '(3)', '(1)', '(1)'], {}), '(x ** 2, 3, 1, 1)\n', (4416, 4433), True, 'import torch.nn.functional as F\n'), ((4464, 4493), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['(y ** 2)', '(3)', '(1)', '(1)'], {}), '(y ** 2, 3, 1, 1)\n', (4476, 4493), True, 'import torch.nn.functional as F\n'), ((4525, 4553), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['(x * y)', '(3)', '(1)', '(1)'], {}), '(x * y, 3, 1, 1)\n', (4537, 4553), True, 'import torch.nn.functional as F\n'), ((5799, 5819), 'torch.numel', 'torch.numel', (['outputs'], {}), '(outputs)\n', (5810, 5819), False, 'import torch\n'), ((4830, 4844), 'torch.numel', 'torch.numel', (['x'], {}), '(x)\n', (4841, 4844), False, 'import torch\n'), ((5299, 5319), 'torch.numel', 'torch.numel', (['outputs'], {}), '(outputs)\n', (5310, 5319), False, 'import torch\n'), ((5336, 5356), 'torch.numel', 'torch.numel', (['outputs'], {}), '(outputs)\n', (5347, 5356), False, 'import torch\n'), ((5836, 5856), 'torch.numel', 'torch.numel', (['outputs'], {}), '(outputs)\n', (5847, 5856), False, 'import torch\n')] |
import dash
import dash_core_components as dcc
import dash_html_components as html
from data_gather import plot_line_graph
from run_save_model import predict_line_graph, train_save_load
from generating_data import generate_sensors_data
# generating sensors data
generate_sensors_data()
train_save_load()
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
html.Div([
html.Div([
dcc.Graph(id='g1', figure=plot_line_graph(warehouse_name='wh1', warehouse_zone='zone1', title="Gerogia warehouse zone 1 present health"))
], className="six columns"),
html.Div([
dcc.Graph(id='g2', figure=predict_line_graph(warehouse_name='wh1', warehouse_zone='zone1', title="Gerogia warehouse zone 1 future health"))
], className="six columns"),
], className="row"),
html.Div([
html.Div([
dcc.Graph(id='g3', figure=plot_line_graph(warehouse_name='wh1', warehouse_zone='zone2', title="Georgia warehouse zone 2 present health"))
], className="six columns"),
html.Div([
dcc.Graph(id='g4', figure=predict_line_graph(warehouse_name='wh1', warehouse_zone='zone2', title="Georgia warehouse zone 2 future health"))
], className="six columns"),
], className="row")
])
if __name__ == '__main__':
app.run_server(debug=False) | [
"run_save_model.train_save_load",
"generating_data.generate_sensors_data",
"run_save_model.predict_line_graph",
"data_gather.plot_line_graph",
"dash.Dash"
] | [((263, 286), 'generating_data.generate_sensors_data', 'generate_sensors_data', ([], {}), '()\n', (284, 286), False, 'from generating_data import generate_sensors_data\n'), ((287, 304), 'run_save_model.train_save_load', 'train_save_load', ([], {}), '()\n', (302, 304), False, 'from run_save_model import predict_line_graph, train_save_load\n'), ((382, 444), 'dash.Dash', 'dash.Dash', (['__name__'], {'external_stylesheets': 'external_stylesheets'}), '(__name__, external_stylesheets=external_stylesheets)\n', (391, 444), False, 'import dash\n'), ((543, 658), 'data_gather.plot_line_graph', 'plot_line_graph', ([], {'warehouse_name': '"""wh1"""', 'warehouse_zone': '"""zone1"""', 'title': '"""Gerogia warehouse zone 1 present health"""'}), "(warehouse_name='wh1', warehouse_zone='zone1', title=\n 'Gerogia warehouse zone 1 present health')\n", (558, 658), False, 'from data_gather import plot_line_graph\n'), ((750, 867), 'run_save_model.predict_line_graph', 'predict_line_graph', ([], {'warehouse_name': '"""wh1"""', 'warehouse_zone': '"""zone1"""', 'title': '"""Gerogia warehouse zone 1 future health"""'}), "(warehouse_name='wh1', warehouse_zone='zone1', title=\n 'Gerogia warehouse zone 1 future health')\n", (768, 867), False, 'from run_save_model import predict_line_graph, train_save_load\n'), ((999, 1114), 'data_gather.plot_line_graph', 'plot_line_graph', ([], {'warehouse_name': '"""wh1"""', 'warehouse_zone': '"""zone2"""', 'title': '"""Georgia warehouse zone 2 present health"""'}), "(warehouse_name='wh1', warehouse_zone='zone2', title=\n 'Georgia warehouse zone 2 present health')\n", (1014, 1114), False, 'from data_gather import plot_line_graph\n'), ((1206, 1323), 'run_save_model.predict_line_graph', 'predict_line_graph', ([], {'warehouse_name': '"""wh1"""', 'warehouse_zone': '"""zone2"""', 'title': '"""Georgia warehouse zone 2 future health"""'}), "(warehouse_name='wh1', warehouse_zone='zone2', title=\n 'Georgia warehouse zone 2 future health')\n", (1224, 1323), False, 'from run_save_model import predict_line_graph, train_save_load\n')] |
import socket
import json
class udp_emit:
def __init__(self, host, port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.connect((host, port))
def emit(self, datadict):
try:
self.sock.sendall(json.dumps(datadict).encode())
except:
pass
#uncomment if you want to be notified
#print("cannot send data over udp socket, the destination is either not listening yet or is refusing to connect. Check to see if it's running yet.")
| [
"json.dumps",
"socket.socket"
] | [((99, 147), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (112, 147), False, 'import socket\n'), ((283, 303), 'json.dumps', 'json.dumps', (['datadict'], {}), '(datadict)\n', (293, 303), False, 'import json\n')] |
import sys;
from queue import Queue
from multiprocessing.managers import BaseManager
import etl;
import json
import extends;
import time;
authkey= "etlpy".encode('utf-8')
timeout=1;
rpc_port=8888
class ETLJob:
def __init__(self,project,jobname,config,id):
self.project= project;
self.jobname=jobname;
self.config=config;
self.id= id;
class JobResult:
def __init__(self,name,count,id):
self.name=name;
self.count=count;
self.id=id;
class Master:
def __init__(self,project,jobname):
# 派发出去的作业队列
self.dispatched_job_queue = Queue()
# 完成的作业队列
self.finished_job_queue = Queue()
self.project= project;
self.jobname=jobname;
self.maxprocess= 10;
def get_dispatched_job_queue(self):
return self.dispatched_job_queue
def get_finished_job_queue(self):
return self.finished_job_queue
def start(self,skip=0):
# 把派发作业队列和完成作业队列注册到网络上
BaseManager.register('get_dispatched_job_queue', callable=self.get_dispatched_job_queue)
BaseManager.register('get_finished_job_queue', callable=self.get_finished_job_queue)
# 监听端口和启动服务
manager = BaseManager(address=('0.0.0.0', rpc_port), authkey=authkey)
manager.start()
# 使用上面注册的方法获取队列
dispatched_jobs = manager.get_dispatched_job_queue()
finished_jobs = manager.get_finished_job_queue()
job_id = 0
module= self.project.modules[self.jobname];
proj=json.loads(json.dumps(etl.convert_dict(self.project,self.project.__defaultdict__), ensure_ascii=False))
while True:
for task in etl.parallel_map(module):
job_id = job_id + 1
if job_id<skip:
continue
job = ETLJob(proj, self.jobname, task, job_id);
print('Dispatch job: %s' % job.id)
dispatched_jobs.put(job)
while not dispatched_jobs.empty():
job = finished_jobs.get(60)
print('Finished Job: %s, Count: %s' % (job.id, job.count))
key=input('press any key to repeat,c to cancel')
if key=='c':
manager.shutdown()
break
#manager.shutdown()
class Slave:
def __init__(self):
# 派发出去的作业队列
self.dispatched_job_queue = Queue()
# 完成的作业队列
self.finished_job_queue = Queue()
def start(self,execute= True,serverip='127.0.0.1',port=8888):
# 把派发作业队列和完成作业队列注册到网络上
BaseManager.register('get_dispatched_job_queue')
BaseManager.register('get_finished_job_queue')
server = serverip;
print('Connect to server %s...' % server)
manager = BaseManager(address=(server, port), authkey=authkey)
manager.connect()
# 使用上面注册的方法获取队列
dispatched_jobs = manager.get_dispatched_job_queue()
finished_jobs = manager.get_finished_job_queue()
# 运行作业并返回结果,这里只是模拟作业运行,所以返回的是接收到的作业
while True:
if dispatched_jobs.empty():
time.sleep(1)
print('queue is empty,wait 1 sec...')
continue;
job = dispatched_jobs.get(timeout=timeout)
print('Run job: %s ' % job.id)
project=job.project;
project= etl.LoadProject_dict(project);
module= project.modules[job.jobname];
count=0
try:
generator= etl.parallel_reduce(module,[ job.config],execute)
for r in generator:
count+=1;
except Exception as e:
print(e)
print('finish job,id %s, count %s'%(job.id,count))
resultjob= JobResult(job.jobname,count,job.id)
finished_jobs.put(resultjob)
if __name__ == '__main__':
ip='127.0.0.1'
port=8888;
argv=sys.argv;
if len(argv)>1:
ip=argv[1];
if len(argv)>2:
port=int(argv[2]);
slave= Slave();
slave.start(True,ip,port);
| [
"etl.parallel_reduce",
"multiprocessing.managers.BaseManager.register",
"etl.convert_dict",
"time.sleep",
"etl.LoadProject_dict",
"queue.Queue",
"etl.parallel_map",
"multiprocessing.managers.BaseManager"
] | [((610, 617), 'queue.Queue', 'Queue', ([], {}), '()\n', (615, 617), False, 'from queue import Queue\n'), ((670, 677), 'queue.Queue', 'Queue', ([], {}), '()\n', (675, 677), False, 'from queue import Queue\n'), ((996, 1089), 'multiprocessing.managers.BaseManager.register', 'BaseManager.register', (['"""get_dispatched_job_queue"""'], {'callable': 'self.get_dispatched_job_queue'}), "('get_dispatched_job_queue', callable=self.\n get_dispatched_job_queue)\n", (1016, 1089), False, 'from multiprocessing.managers import BaseManager\n'), ((1093, 1182), 'multiprocessing.managers.BaseManager.register', 'BaseManager.register', (['"""get_finished_job_queue"""'], {'callable': 'self.get_finished_job_queue'}), "('get_finished_job_queue', callable=self.\n get_finished_job_queue)\n", (1113, 1182), False, 'from multiprocessing.managers import BaseManager\n'), ((1217, 1276), 'multiprocessing.managers.BaseManager', 'BaseManager', ([], {'address': "('0.0.0.0', rpc_port)", 'authkey': 'authkey'}), "(address=('0.0.0.0', rpc_port), authkey=authkey)\n", (1228, 1276), False, 'from multiprocessing.managers import BaseManager\n'), ((2396, 2403), 'queue.Queue', 'Queue', ([], {}), '()\n', (2401, 2403), False, 'from queue import Queue\n'), ((2456, 2463), 'queue.Queue', 'Queue', ([], {}), '()\n', (2461, 2463), False, 'from queue import Queue\n'), ((2569, 2617), 'multiprocessing.managers.BaseManager.register', 'BaseManager.register', (['"""get_dispatched_job_queue"""'], {}), "('get_dispatched_job_queue')\n", (2589, 2617), False, 'from multiprocessing.managers import BaseManager\n'), ((2626, 2672), 'multiprocessing.managers.BaseManager.register', 'BaseManager.register', (['"""get_finished_job_queue"""'], {}), "('get_finished_job_queue')\n", (2646, 2672), False, 'from multiprocessing.managers import BaseManager\n'), ((2769, 2821), 'multiprocessing.managers.BaseManager', 'BaseManager', ([], {'address': '(server, port)', 'authkey': 'authkey'}), '(address=(server, port), authkey=authkey)\n', (2780, 2821), False, 'from multiprocessing.managers import BaseManager\n'), ((1678, 1702), 'etl.parallel_map', 'etl.parallel_map', (['module'], {}), '(module)\n', (1694, 1702), False, 'import etl\n'), ((3358, 3387), 'etl.LoadProject_dict', 'etl.LoadProject_dict', (['project'], {}), '(project)\n', (3378, 3387), False, 'import etl\n'), ((1552, 1612), 'etl.convert_dict', 'etl.convert_dict', (['self.project', 'self.project.__defaultdict__'], {}), '(self.project, self.project.__defaultdict__)\n', (1568, 1612), False, 'import etl\n'), ((3111, 3124), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3121, 3124), False, 'import time\n'), ((3503, 3553), 'etl.parallel_reduce', 'etl.parallel_reduce', (['module', '[job.config]', 'execute'], {}), '(module, [job.config], execute)\n', (3522, 3553), False, 'import etl\n')] |
import json
import argparse
import pprint
import csv
parser = argparse.ArgumentParser(description="parse squad qa into scv")
parser.add_argument("--input", type=str)
parser.add_argument("--output", type=str)
args = parser.parse_args()
input_file = args.input
output_file = args.output
with open(input_file, 'r') as f:
data = json.load(f)
pprint.pprint(data['data'][0]['paragraphs'][0]['qas'])
print(len(data['data']))
i = 0
output_data = []
for d in data['data']:
for paragraph in d['paragraphs']:
for qa in paragraph['qas']:
if qa['answers']:
print(qa['question'])
print(qa['answers'][0]['text'])
output_data.append([str(i), qa['question'], qa['answers'][0]['text']])
i += 1
with open(output_file, 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
# write the header
writer.writerow(['id', 'question', 'answer'])
# write multiple rows
writer.writerows(output_data)
| [
"json.load",
"csv.writer",
"pprint.pprint",
"argparse.ArgumentParser"
] | [((63, 125), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""parse squad qa into scv"""'}), "(description='parse squad qa into scv')\n", (86, 125), False, 'import argparse\n'), ((345, 399), 'pprint.pprint', 'pprint.pprint', (["data['data'][0]['paragraphs'][0]['qas']"], {}), "(data['data'][0]['paragraphs'][0]['qas'])\n", (358, 399), False, 'import pprint\n'), ((331, 343), 'json.load', 'json.load', (['f'], {}), '(f)\n', (340, 343), False, 'import json\n'), ((849, 862), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (859, 862), False, 'import csv\n')] |
from __future__ import unicode_literals
import logging
import re
from django.contrib import messages
from django.db import ProgrammingError
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.utils import six
from .schema import (
TemplateSchemaActivation, Forbidden,
get_schema_model,
activate_schema, deactivate_schema,
)
from .signals import session_requesting_schema_change, session_schema_changed
logger = logging.getLogger('boardinghouse.middleware')
def change_schema(request, schema):
"""
Change the schema for the current request's session.
Note this does not actually _activate_ the schema, it only stores
the schema name in the current request's session.
"""
session = request.session
user = request.user
# Allow clearing out the current schema.
if not schema:
session.pop('schema', None)
return
# Anonymous users may not select a schema.
# Should this be selectable?
if user.is_anonymous():
session.pop('schema', None)
raise Forbidden()
# We actually want the schema name, so we can see if we
# don't actually need to change the schema at all (if the
# session is already set, then we assume that it's all good)
if isinstance(schema, six.string_types):
schema_name = schema
else:
schema_name = schema.schema
# Don't allow anyone, even superusers, to select the template schema.
if schema_name == '__template__':
raise TemplateSchemaActivation()
# If the schema is already set to this name for this session, then
# we can just exit early, saving some db access.
if schema_name == session.get('schema', None):
return
Schema = get_schema_model()
if user.is_superuser or user.is_staff:
# Just a sanity check: that the schema actually
# exists at all, when the superuser attempts to set
# the schema.
if schema_name == schema:
try:
schema = Schema.objects.get(schema=schema_name)
except Schema.DoesNotExist:
raise Forbidden()
else:
# If we were passed in a schema object, rather than a string,
# then we can check to see if that schema is active before
# having to hit the database.
if isinstance(schema, Schema):
# I'm not sure that it's logically possible to get this
# line to return True - we only pass in data from user.visible_schemata,
# which excludes inactives.
if not schema.is_active:
raise Forbidden()
# Ensure that this user has access to this schema,
# and that this schema is active. We can do this using the
# cache, which prevents hitting the database.
visible_schemata = [schema.schema for schema in user.visible_schemata]
if schema_name not in visible_schemata:
raise Forbidden()
# Allow 3rd-party applications to listen for an attempt to change
# the schema for a user/session, and prevent it from occurring by
# raising an exception. We will just pass that exception up the
# call stack.
session_requesting_schema_change.send(
sender=request,
schema=schema_name,
user=request.user,
session=request.session,
)
# Actually set the schema on the session.
session['schema'] = schema_name
# Allow 3rd-party applications to listen for a change, and act upon
# it accordingly.
session_schema_changed.send(
sender=request,
schema=schema_name,
user=request.user,
session=request.session,
)
class SchemaMiddleware:
"""
Middleware to set the postgres schema for the current request's session.
The schema that will be used is stored in the session. A lookup will
occur (but this could easily be cached) on each request.
There are three ways to change the schema as part of a request.
1. Request a page with a querystring containg a ``__schema`` value::
https://example.com/page/?__schema=<schema-name>
The schema will be changed (or cleared, if this user cannot view
that schema), and the page will be re-loaded (if it was a GET). This
method of changing schema allows you to have a link that changes the
current schema and then loads the data with the new schema active.
It is used within the admin for having a link to data from an
arbitrary schema in the ``LogEntry`` history.
This type of schema change request should not be done with a POST
request.
2. Add a request header::
X-Change-Schema: <schema-name>
This will not cause a redirect to the same page without query string. It
is the only way to do a schema change within a POST request, but could
be used for any request type.
3. Use a specific request::
https://example.com/__change_schema__/<schema-name>/
This is designed to be used from AJAX requests, or as part of
an API call, as it returns a status code (and a short message)
about the schema change request. If you were storing local data,
and did one of these, you are probably going to have to invalidate
much of that.
You could also come up with other methods.
"""
def process_request(self, request):
FORBIDDEN = HttpResponseForbidden(_('You may not select that schema'))
# Ways of changing the schema.
# 1. URL /__change_schema__/<name>/
# This will return a whole page.
# We don't need to activate, that happens on the next request.
if request.path.startswith('/__change_schema__/'):
schema = request.path.split('/')[2]
try:
change_schema(request, schema)
except Forbidden:
return FORBIDDEN
if 'schema' in request.session:
response = _('Schema changed to %s') % request.session['schema']
else:
response = _('Schema deselected')
return HttpResponse(response)
# 2. GET querystring ...?__schema=<name>
# This will change the query, and then redirect to the page
# without the schema name included.
elif request.GET.get('__schema', None) is not None:
schema = request.GET['__schema']
try:
change_schema(request, schema)
except Forbidden:
return FORBIDDEN
data = request.GET.copy()
data.pop('__schema')
if request.method == "GET":
# redirect so we strip the schema out of the querystring.
if data:
return redirect(request.path + '?' + data.urlencode())
return redirect(request.path)
# method == 'POST' or other
request.GET = data
# 3. Header "X-Change-Schema: <name>"
elif 'HTTP_X_CHANGE_SCHEMA' in request.META:
schema = request.META['HTTP_X_CHANGE_SCHEMA']
try:
change_schema(request, schema)
except Forbidden:
return FORBIDDEN
elif 'schema' not in request.session and len(request.user.visible_schemata) == 1:
# Can we not require a db hit each request here?
change_schema(request, request.user.visible_schemata[0])
if 'schema' in request.session:
activate_schema(request.session['schema'])
else:
deactivate_schema()
def process_exception(self, request, exception):
"""
In the case a request returned a DatabaseError, and there was no
schema set on ``request.session``, then look and see if the error
that was provided by the database may indicate that we should have
been looking inside a schema.
In the case we had a :class:`TemplateSchemaActivation` exception,
then we want to remove that key from the session.
"""
if isinstance(exception, ProgrammingError) and not request.session.get('schema'):
if re.search('relation ".*" does not exist', exception.args[0]):
# I'm not sure if this should be done or not, but it does
# fail without the if statement from django 1.8+
# if not transaction.get_autocommit():
# transaction.rollback()
# Should we return an error, or redirect? When should we
# do one or the other? For an API, we would want an error
# but for a regular user, a redirect may be better.
# Can we see if there is already a pending message for this
# request that has the same content as us?
messages.error(request,
_("You must select a schema to access that resource"),
fail_silently=True
)
return HttpResponseRedirect('..')
# I'm not sure we ever really hit this one, but it's worth keeping
# here just in case we've missed something.
if isinstance(exception, TemplateSchemaActivation):
request.session.pop('schema', None)
return HttpResponseForbidden(_('You may not select that schema'))
| [
"logging.getLogger",
"django.http.HttpResponseRedirect",
"django.utils.translation.ugettext_lazy",
"django.http.HttpResponse",
"django.shortcuts.redirect",
"re.search"
] | [((563, 608), 'logging.getLogger', 'logging.getLogger', (['"""boardinghouse.middleware"""'], {}), "('boardinghouse.middleware')\n", (580, 608), False, 'import logging\n'), ((5534, 5569), 'django.utils.translation.ugettext_lazy', '_', (['"""You may not select that schema"""'], {}), "('You may not select that schema')\n", (5535, 5569), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6214, 6236), 'django.http.HttpResponse', 'HttpResponse', (['response'], {}), '(response)\n', (6226, 6236), False, 'from django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect\n'), ((8260, 8320), 're.search', 're.search', (['"""relation ".*" does not exist"""', 'exception.args[0]'], {}), '(\'relation ".*" does not exist\', exception.args[0])\n', (8269, 8320), False, 'import re\n'), ((6171, 6193), 'django.utils.translation.ugettext_lazy', '_', (['"""Schema deselected"""'], {}), "('Schema deselected')\n", (6172, 6193), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9108, 9134), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['""".."""'], {}), "('..')\n", (9128, 9134), False, 'from django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect\n'), ((9411, 9446), 'django.utils.translation.ugettext_lazy', '_', (['"""You may not select that schema"""'], {}), "('You may not select that schema')\n", (9412, 9446), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6072, 6097), 'django.utils.translation.ugettext_lazy', '_', (['"""Schema changed to %s"""'], {}), "('Schema changed to %s')\n", (6073, 6097), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6941, 6963), 'django.shortcuts.redirect', 'redirect', (['request.path'], {}), '(request.path)\n', (6949, 6963), False, 'from django.shortcuts import redirect\n'), ((8973, 9026), 'django.utils.translation.ugettext_lazy', '_', (['"""You must select a schema to access that resource"""'], {}), "('You must select a schema to access that resource')\n", (8974, 9026), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import torch.nn as nn
import numpy as np
from collections import OrderedDict
from torchmeta.modules import (MetaModule, MetaConv2d, MetaBatchNorm2d,
MetaSequential, MetaLinear)
import torch
def conv_block(in_channels, out_channels, **kwargs):
return MetaSequential(OrderedDict([
('conv', MetaConv2d(in_channels, out_channels, **kwargs)),
# ('norm', nn.BatchNorm2d(out_channels, momentum=1.,
# track_running_stats=False)),
('relu', nn.ReLU()),
# ('pool', nn.MaxPool2d(2))
]))
class MetaConvModel(MetaModule):
"""4-layer Convolutional Neural Network architecture from [1].
Parameters
----------
in_channels : int
Number of channels for the input images.
out_features : int
Number of classes (output of the model).
hidden_size : int (default: 64)
Number of channels in the intermediate representations.
feature_size : int (default: 64)
Number of features returned by the convolutional head.
References
----------
.. [1] <NAME>., <NAME>., and <NAME>. (2017). Model-Agnostic Meta-Learning
for Fast Adaptation of Deep Networks. International Conference on
Machine Learning (ICML) (https://arxiv.org/abs/1703.03400)
"""
def __init__(self, in_channels, out_features, hidden_size=64, feature_size=64):
super(MetaConvModel, self).__init__()
self.in_channels = in_channels
self.out_features = out_features
self.hidden_size = hidden_size
self.feature_size = feature_size
self.features = MetaSequential(OrderedDict([
('layer1', conv_block(in_channels, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
('layer2', conv_block(hidden_size, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
('layer3', conv_block(hidden_size, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
('layer4', conv_block(hidden_size, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True))
]))
self.classifier = MetaLinear(feature_size, out_features, bias=True)
def forward(self, inputs, params=None):
features = self.features(inputs, params=self.get_subdict(params, 'features'))
features = features.view((features.size(0), -1))
logits = self.classifier(features, params=self.get_subdict(params, 'classifier'))
return logits
class MetaToyConvModel(MetaModule):
def __init__(self, out_features, in_channels=1, hidden_size=64, feature_size=64):
super(MetaToyConvModel, self).__init__()
self.in_channels = in_channels
self.out_features = out_features
self.hidden_size = hidden_size
self.feature_size = feature_size
self.features = MetaSequential(OrderedDict([
('layer1', conv_block(in_channels, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
('layer2', conv_block(hidden_size, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
# ('layer3', conv_block(hidden_size, hidden_size, kernel_size=3,
# stride=1, padding=1, bias=True)),
# ('layer4', conv_block(hidden_size, hidden_size, kernel_size=3,
# stride=1, padding=1, bias=True))
]))
self.classifier = MetaLinear(feature_size, out_features, bias=True)
def forward(self, inputs, params=None):
inputs = torch.reshape(inputs, (-1, 1, 84, 84))
features = self.features(inputs, params=self.get_subdict(params, 'features'))
features = features.view((features.size(0), -1))
logits = self.classifier(features, params=self.get_subdict(params, 'classifier'))
return logits
def embedding(self, inputs, params=None):
if type(inputs) == np.ndarray:
inputs = torch.from_numpy(inputs)
inputs = torch.reshape(inputs, (-1, 1, 100, 100))
features = self.features(inputs, params=self.get_subdict(params, 'features'))
features = features.view((features.size(0), -1))
return features
class MetaMNISTConvModel(MetaModule):
def __init__(self, out_features, in_width=28, in_channels=1, hidden_size=32, mid_feats=512, feature_size=25088):
super(MetaMNISTConvModel, self).__init__()
self.in_width = in_width
self.in_channels = in_channels
self.out_features = out_features
self.hidden_size = hidden_size
self.feature_size = in_width * in_width * hidden_size
self.mid_feats = mid_feats
self.features = MetaSequential(OrderedDict([
('layer1', conv_block(in_channels, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
('layer2', conv_block(hidden_size, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
]))
self.classifier_first = MetaLinear(self.feature_size, mid_feats, bias=True)
self.classifier = MetaLinear(mid_feats, out_features, bias=True)
def forward(self, inputs, params=None):
inputs = torch.reshape(inputs, (-1, self.in_channels, self.in_width, self.in_width) )
features = self.features(inputs, params=self.get_subdict(params, 'features'))
features = features.reshape((features.size(0), -1))
mid_logits = self.classifier_first(features, params=self.get_subdict(params, 'classifier_first'))
logits = self.classifier(mid_logits, params=self.get_subdict(params, 'classifier'))
return logits
def embedding(self, inputs, params=None):
inputs = torch.reshape(inputs, (-1, self.in_channels, self.in_width, self.in_width) )
features = self.features(inputs, params=self.get_subdict(params, 'features'))
features = features.view((features.size(0), -1))
mid_logits = self.classifier_first(features, params=self.get_subdict(params, 'classifier_first'))
return mid_logits
class MNISTConvModel(nn.Module):
def __init__(self, num_classes):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
class Flatten(nn.Module):
def forward(self, input):
# print(input.shape)
return input.reshape(input.size(0), -1)
class UnFlatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), 20, 7, 7)
class VAE(nn.Module):
def __init__(self, in_width=28, z_dim=20, img_channels=1, h_dim=980):
super(VAE, self).__init__()
# in = img_channels x in_width x in_width
## encoder
self.in_width = in_width
self.img_channels = img_channels
def conv_output_dim(input_size, kernel_size, stride=1, padding=0, **kwargs):
from math import floor
return floor((input_size + 2 * padding - (kernel_size - 1) - 1) / stride + 1)
def conv_transpose_output_dim(input_size, kernel_size, stride=1, padding=0, dilation=1, **kwargs):
return (input_size - 1) * stride - 2 * padding + dilation * (kernel_size - 1) + 1
# (H −1)×stride[0]−2×padding[0]+dilation[0]×(kernel_size[0]−1)+output_padding[0]+1
## encoder
input_size = in_width
conv1_filters = 10
conv1_kwargs = dict(out_channels=10, kernel_size=3, stride=1, padding=1)
a1_size = conv_output_dim(input_size, **conv1_kwargs)
conv2_filters = 10
conv2_kwargs = dict(out_channels=10, kernel_size=4, stride=2, padding=1)
a2_size = conv_output_dim(a1_size, **conv2_kwargs)
conv3_filters = 20
conv3_kwargs = dict(out_channels=20, kernel_size=5, stride=2, padding=2)
a3_size = conv_output_dim(a2_size, **conv2_kwargs)
h_dim = a3_size ** 2 * conv3_filters
print(a3_size)
print(h_dim)
## decoder
deconv1_filters = 10
deconv1_kwargs = dict(kernel_size=5, stride=2, padding=2)
d1_size = conv_transpose_output_dim(a3_size, **deconv1_kwargs)
deconv2_filters = 10
deconv2_kwargs = dict(kernel_size=5, stride=2, padding=1)
d2_size = conv_transpose_output_dim(d1_size, **deconv2_kwargs)
deconv3_filters = 20
deconv3_kwargs = dict(kernel_size=6, stride=1, padding=2)
d3_size = conv_transpose_output_dim(d2_size, **deconv3_kwargs)
print(d1_size, d2_size, d3_size)
self.conv1 = nn.Sequential(
nn.Conv2d(img_channels, **conv1_kwargs),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(conv1_filters, **conv2_kwargs),
nn.ReLU()
)
self.conv3 = nn.Sequential(
nn.Conv2d(conv2_filters, **conv3_kwargs),
nn.ReLU()
)
self.to_dense = Flatten()
## map to latent z
self.fc11 = nn.Linear(h_dim, z_dim)
self.fc12 = nn.Linear(h_dim, z_dim)
## decoder
self.fc2 = nn.Linear(z_dim, h_dim)
self.reshape = UnFlatten()
self.deconv1 = nn.Sequential(
nn.ConvTranspose2d(20, 10, kernel_size=5, stride=2, padding=2),
nn.ReLU(),
)
self.deconv2 = nn.Sequential(
nn.ConvTranspose2d(10, 10, kernel_size=5, stride=2, padding=1),
nn.ReLU(),
)
self.deconv3 = nn.Sequential(
nn.ConvTranspose2d(10, img_channels, kernel_size=6, stride=1, padding=2),
nn.Sigmoid(),
)
def encode(self, x):
a1 = self.conv1(x)
a2 = self.conv2(a1)
a3 = self.conv3(a2)
h = self.to_dense(a3)
return self.fc11(h), self.fc12(h)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
h = self.reshape(self.fc2(z))
a1 = self.deconv1(h)
a2 = self.deconv2(a1)
a3 = self.deconv3(a2)
return a3
def forward(self, x):
x = torch.reshape(x, (-1, self.img_channels, self.in_width, self.in_width))
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
class MetaMLPModel(MetaModule):
"""Multi-layer Perceptron architecture from [1].
Parameters
----------
in_features : int
Number of input features.
out_features : int
Number of classes (output of the model).
hidden_sizes : list of int
Size of the intermediate representations. The length of this list
corresponds to the number of hidden layers.
References
----------
.. [1] <NAME>., <NAME>., and <NAME>. (2017). Model-Agnostic Meta-Learning
for Fast Adaptation of Deep Networks. International Conference on
Machine Learning (ICML) (https://arxiv.org/abs/1703.03400)
"""
def __init__(self, in_features, out_features, hidden_sizes):
super(MetaMLPModel, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.hidden_sizes = hidden_sizes
layer_sizes = [in_features] + hidden_sizes
self.features = MetaSequential(OrderedDict([('layer{0}'.format(i + 1),
MetaSequential(OrderedDict([
('linear', MetaLinear(hidden_size, layer_sizes[i + 1], bias=True)),
('relu', nn.ReLU())
]))) for (i, hidden_size) in enumerate(layer_sizes[:-1])]))
self.classifier = MetaLinear(hidden_sizes[-1], out_features, bias=True)
def forward(self, inputs, params=None):
features = self.features(inputs, params=self.get_subdict(params, 'features'))
logits = self.classifier(features, params=self.get_subdict(params, 'classifier'))
return logits
def ModelConvOmniglot(out_features, hidden_size=64):
return MetaConvModel(1, out_features, hidden_size=hidden_size,
feature_size=hidden_size)
def ModelConvMiniImagenet(out_features, hidden_size=64):
return MetaConvModel(3, out_features, hidden_size=hidden_size,
feature_size=5 * 5 * hidden_size)
def ModelMLPSinusoid(hidden_sizes=[40, 40]):
return MetaMLPModel(1, 1, hidden_sizes)
def ModelMLPToy2D(hidden_sizes=[1024, 1024]):
return MetaMLPModel(2, 2, hidden_sizes)
if __name__ == '__main__':
model = ModelMLPToy2D()
| [
"torch.nn.ConvTranspose2d",
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"math.floor",
"torch.nn.Dropout2d",
"torch.exp",
"torch.nn.Conv2d",
"torch.from_numpy",
"torchmeta.modules.MetaConv2d",
"torch.randn_like",
"torch.nn.Linear",
"torchmeta.modules.MetaLinear",
"torch.reshape"
] | [((2255, 2304), 'torchmeta.modules.MetaLinear', 'MetaLinear', (['feature_size', 'out_features'], {'bias': '(True)'}), '(feature_size, out_features, bias=True)\n', (2265, 2304), False, 'from torchmeta.modules import MetaModule, MetaConv2d, MetaBatchNorm2d, MetaSequential, MetaLinear\n'), ((3610, 3659), 'torchmeta.modules.MetaLinear', 'MetaLinear', (['feature_size', 'out_features'], {'bias': '(True)'}), '(feature_size, out_features, bias=True)\n', (3620, 3659), False, 'from torchmeta.modules import MetaModule, MetaConv2d, MetaBatchNorm2d, MetaSequential, MetaLinear\n'), ((3722, 3760), 'torch.reshape', 'torch.reshape', (['inputs', '(-1, 1, 84, 84)'], {}), '(inputs, (-1, 1, 84, 84))\n', (3735, 3760), False, 'import torch\n'), ((4165, 4205), 'torch.reshape', 'torch.reshape', (['inputs', '(-1, 1, 100, 100)'], {}), '(inputs, (-1, 1, 100, 100))\n', (4178, 4205), False, 'import torch\n'), ((5215, 5266), 'torchmeta.modules.MetaLinear', 'MetaLinear', (['self.feature_size', 'mid_feats'], {'bias': '(True)'}), '(self.feature_size, mid_feats, bias=True)\n', (5225, 5266), False, 'from torchmeta.modules import MetaModule, MetaConv2d, MetaBatchNorm2d, MetaSequential, MetaLinear\n'), ((5293, 5339), 'torchmeta.modules.MetaLinear', 'MetaLinear', (['mid_feats', 'out_features'], {'bias': '(True)'}), '(mid_feats, out_features, bias=True)\n', (5303, 5339), False, 'from torchmeta.modules import MetaModule, MetaConv2d, MetaBatchNorm2d, MetaSequential, MetaLinear\n'), ((5402, 5477), 'torch.reshape', 'torch.reshape', (['inputs', '(-1, self.in_channels, self.in_width, self.in_width)'], {}), '(inputs, (-1, self.in_channels, self.in_width, self.in_width))\n', (5415, 5477), False, 'import torch\n'), ((5913, 5988), 'torch.reshape', 'torch.reshape', (['inputs', '(-1, self.in_channels, self.in_width, self.in_width)'], {}), '(inputs, (-1, self.in_channels, self.in_width, self.in_width))\n', (5926, 5988), False, 'import torch\n'), ((6393, 6424), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(10)'], {'kernel_size': '(5)'}), '(1, 10, kernel_size=5)\n', (6402, 6424), True, 'import torch.nn as nn\n'), ((6446, 6478), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', '(20)'], {'kernel_size': '(5)'}), '(10, 20, kernel_size=5)\n', (6455, 6478), True, 'import torch.nn as nn\n'), ((6505, 6519), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {}), '()\n', (6517, 6519), True, 'import torch.nn as nn\n'), ((6539, 6557), 'torch.nn.Linear', 'nn.Linear', (['(320)', '(50)'], {}), '(320, 50)\n', (6548, 6557), True, 'import torch.nn as nn\n'), ((6577, 6603), 'torch.nn.Linear', 'nn.Linear', (['(50)', 'num_classes'], {}), '(50, num_classes)\n', (6586, 6603), True, 'import torch.nn as nn\n'), ((9599, 9622), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'z_dim'], {}), '(h_dim, z_dim)\n', (9608, 9622), True, 'import torch.nn as nn\n'), ((9643, 9666), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'z_dim'], {}), '(h_dim, z_dim)\n', (9652, 9666), True, 'import torch.nn as nn\n'), ((9706, 9729), 'torch.nn.Linear', 'nn.Linear', (['z_dim', 'h_dim'], {}), '(z_dim, h_dim)\n', (9715, 9729), True, 'import torch.nn as nn\n'), ((10457, 10480), 'torch.exp', 'torch.exp', (['(0.5 * logvar)'], {}), '(0.5 * logvar)\n', (10466, 10480), False, 'import torch\n'), ((10495, 10516), 'torch.randn_like', 'torch.randn_like', (['std'], {}), '(std)\n', (10511, 10516), False, 'import torch\n'), ((10757, 10828), 'torch.reshape', 'torch.reshape', (['x', '(-1, self.img_channels, self.in_width, self.in_width)'], {}), '(x, (-1, self.img_channels, self.in_width, self.in_width))\n', (10770, 10828), False, 'import torch\n'), ((12241, 12294), 'torchmeta.modules.MetaLinear', 'MetaLinear', (['hidden_sizes[-1]', 'out_features'], {'bias': '(True)'}), '(hidden_sizes[-1], out_features, bias=True)\n', (12251, 12294), False, 'from torchmeta.modules import MetaModule, MetaConv2d, MetaBatchNorm2d, MetaSequential, MetaLinear\n'), ((4123, 4147), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (4139, 4147), False, 'import torch\n'), ((7579, 7649), 'math.floor', 'floor', (['((input_size + 2 * padding - (kernel_size - 1) - 1) / stride + 1)'], {}), '((input_size + 2 * padding - (kernel_size - 1) - 1) / stride + 1)\n', (7584, 7649), False, 'from math import floor\n'), ((9201, 9240), 'torch.nn.Conv2d', 'nn.Conv2d', (['img_channels'], {}), '(img_channels, **conv1_kwargs)\n', (9210, 9240), True, 'import torch.nn as nn\n'), ((9254, 9263), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9261, 9263), True, 'import torch.nn as nn\n'), ((9322, 9362), 'torch.nn.Conv2d', 'nn.Conv2d', (['conv1_filters'], {}), '(conv1_filters, **conv2_kwargs)\n', (9331, 9362), True, 'import torch.nn as nn\n'), ((9376, 9385), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9383, 9385), True, 'import torch.nn as nn\n'), ((9444, 9484), 'torch.nn.Conv2d', 'nn.Conv2d', (['conv2_filters'], {}), '(conv2_filters, **conv3_kwargs)\n', (9453, 9484), True, 'import torch.nn as nn\n'), ((9498, 9507), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9505, 9507), True, 'import torch.nn as nn\n'), ((9815, 9877), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(20)', '(10)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)'}), '(20, 10, kernel_size=5, stride=2, padding=2)\n', (9833, 9877), True, 'import torch.nn as nn\n'), ((9891, 9900), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9898, 9900), True, 'import torch.nn as nn\n'), ((9962, 10024), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(10)', '(10)'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(1)'}), '(10, 10, kernel_size=5, stride=2, padding=1)\n', (9980, 10024), True, 'import torch.nn as nn\n'), ((10038, 10047), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10045, 10047), True, 'import torch.nn as nn\n'), ((10109, 10181), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(10)', 'img_channels'], {'kernel_size': '(6)', 'stride': '(1)', 'padding': '(2)'}), '(10, img_channels, kernel_size=6, stride=1, padding=2)\n', (10127, 10181), True, 'import torch.nn as nn\n'), ((10195, 10207), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (10205, 10207), True, 'import torch.nn as nn\n'), ((332, 379), 'torchmeta.modules.MetaConv2d', 'MetaConv2d', (['in_channels', 'out_channels'], {}), '(in_channels, out_channels, **kwargs)\n', (342, 379), False, 'from torchmeta.modules import MetaModule, MetaConv2d, MetaBatchNorm2d, MetaSequential, MetaLinear\n'), ((503, 512), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (510, 512), True, 'import torch.nn as nn\n'), ((12050, 12104), 'torchmeta.modules.MetaLinear', 'MetaLinear', (['hidden_size', 'layer_sizes[i + 1]'], {'bias': '(True)'}), '(hidden_size, layer_sizes[i + 1], bias=True)\n', (12060, 12104), False, 'from torchmeta.modules import MetaModule, MetaConv2d, MetaBatchNorm2d, MetaSequential, MetaLinear\n'), ((12132, 12141), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (12139, 12141), True, 'import torch.nn as nn\n')] |
from setuptools import setup
setup(
name = 'datoms',
version = '0.1.0',
description = 'A simplistic, Datomic inspired, SQLite backed, REST influenced, schemaless auditable facts storage.',
py_modules = ['datoms'],
license = 'unlicense',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/3kwa/datoms',
install_requires = ['sql'],
)
| [
"setuptools.setup"
] | [((31, 350), 'setuptools.setup', 'setup', ([], {'name': '"""datoms"""', 'version': '"""0.1.0"""', 'description': '"""A simplistic, Datomic inspired, SQLite backed, REST influenced, schemaless auditable facts storage."""', 'py_modules': "['datoms']", 'license': '"""unlicense"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/3kwa/datoms"""', 'install_requires': "['sql']"}), "(name='datoms', version='0.1.0', description=\n 'A simplistic, Datomic inspired, SQLite backed, REST influenced, schemaless auditable facts storage.'\n , py_modules=['datoms'], license='unlicense', author='<NAME>',\n author_email='<EMAIL>', url='https://github.com/3kwa/datoms',\n install_requires=['sql'])\n", (36, 350), False, 'from setuptools import setup\n')] |
# Copyright 2021 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
import argparse
import numpy as np
from python.input.MNIST_input_pipeline import MNIST
from python.input.cifar10_input_pipeline import Cifar10
from python.input.cifar100_input_pipeline import Cifar100
from python.input.smallNORB_input_pipeline import smallNORB
from python.models.BranchingMerging import SmallImageBranchingMerging
import tensorflow as tf
def go(data_dir, log_dir, output_file, input_pipeline, merge_strategy,
use_hvcs=True, hvc_type=1, hvc_dims=None, total_convolutions=None,
branches_after=None):
files = []
for dirname, _, filenames in os.walk(log_dir):
file = list(set([os.path.join(dirname,
os.path.splitext(fn)[0]) for fn in filenames]))
if len(file) > 0:
files.append(file[0])
if input_pipeline == 3:
in_pipe = Cifar10(data_dir, False, 0)
elif input_pipeline == 4:
in_pipe = Cifar100(data_dir, False, 0)
elif input_pipeline == 5:
in_pipe = smallNORB(data_dir, False, 48, 32)
else:
in_pipe = MNIST(data_dir, False, 1)
branch_weights = []
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
print("Building model...")
model = SmallImageBranchingMerging(in_pipe.get_class_count(),
in_pipe.get_image_size(), in_pipe.get_image_channels(),
merge_strategy, use_hvcs, hvc_type, hvc_dims,
total_convolutions, branches_after, False)
for weights_file in files:
print("Restoring weights file: {}".format(weights_file))
ckpt = tf.train.Checkpoint(
vars=model.get_all_savable_variables())
ckpt.restore(weights_file).expect_partial()
branch_weights.append(model.branch_weights.variable.numpy())
print("Saving final branch weights...")
# (False Positive)
# noinspection PyTypeChecker
np.savetxt(output_file, np.array(branch_weights), delimiter=',', fmt='%0f')
print("Finished.")
################################################################################
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("--data_dir", default=r"../../../../Datasets/smallNORB_data")
p.add_argument("--log_dir", default=r"../../logs/20210609135430")
p.add_argument("--output_file",
default=r"../../logs/20210609135430/final_branch_weights.txt")
p.add_argument("--input_pipeline", default=5, type=int)
p.add_argument("--merge_strategy", default=2, type=float)
p.add_argument("--use_hvcs", default=True, type=bool)
p.add_argument("--hvc_type", default=2, type=int)
p.add_argument("--hvc_dims", default=[96, 144, 192], type=int)
p.add_argument("--total_convolutions", default=11, type=int)
p.add_argument("--branches_after", default=[4, 7, 10])
a = p.parse_args()
go(data_dir=a.data_dir, log_dir=a.log_dir, output_file=a.output_file,
input_pipeline=a.input_pipeline, merge_strategy=a.merge_strategy,
use_hvcs=a.use_hvcs, hvc_type=a.hvc_type, hvc_dims=a.hvc_dims,
total_convolutions=a.total_convolutions, branches_after=a.branches_after)
| [
"argparse.ArgumentParser",
"python.input.cifar100_input_pipeline.Cifar100",
"os.path.splitext",
"numpy.array",
"python.input.cifar10_input_pipeline.Cifar10",
"python.input.smallNORB_input_pipeline.smallNORB",
"tensorflow.distribute.MirroredStrategy",
"python.input.MNIST_input_pipeline.MNIST",
"os.wa... | [((1302, 1318), 'os.walk', 'os.walk', (['log_dir'], {}), '(log_dir)\n', (1309, 1318), False, 'import os\n'), ((1817, 1849), 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', ([], {}), '()\n', (1847, 1849), True, 'import tensorflow as tf\n'), ((2845, 2870), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2868, 2870), False, 'import argparse\n'), ((1534, 1561), 'python.input.cifar10_input_pipeline.Cifar10', 'Cifar10', (['data_dir', '(False)', '(0)'], {}), '(data_dir, False, 0)\n', (1541, 1561), False, 'from python.input.cifar10_input_pipeline import Cifar10\n'), ((2652, 2676), 'numpy.array', 'np.array', (['branch_weights'], {}), '(branch_weights)\n', (2660, 2676), True, 'import numpy as np\n'), ((1610, 1638), 'python.input.cifar100_input_pipeline.Cifar100', 'Cifar100', (['data_dir', '(False)', '(0)'], {}), '(data_dir, False, 0)\n', (1618, 1638), False, 'from python.input.cifar100_input_pipeline import Cifar100\n'), ((1687, 1721), 'python.input.smallNORB_input_pipeline.smallNORB', 'smallNORB', (['data_dir', '(False)', '(48)', '(32)'], {}), '(data_dir, False, 48, 32)\n', (1696, 1721), False, 'from python.input.smallNORB_input_pipeline import smallNORB\n'), ((1750, 1775), 'python.input.MNIST_input_pipeline.MNIST', 'MNIST', (['data_dir', '(False)', '(1)'], {}), '(data_dir, False, 1)\n', (1755, 1775), False, 'from python.input.MNIST_input_pipeline import MNIST\n'), ((1379, 1399), 'os.path.splitext', 'os.path.splitext', (['fn'], {}), '(fn)\n', (1395, 1399), False, 'import os\n')] |
from __future__ import division
from collections import defaultdict
import itertools
import sys
import os
import sqlite3
import click
from kSpider2.click_context import cli
import glob
class kClusters:
source = []
target = []
source2 = []
target2 = []
seq_to_kmers = dict()
names_map = dict()
components = defaultdict(set)
def __init__(self, logger_obj, index_prefix, cut_off_threshold):
self.Logger = logger_obj
self.names_file = index_prefix + ".namesMap"
self.cut_off_threshold = cut_off_threshold
self.seqToKmers_file = index_prefix + "_kSpider_seqToKmersNo.tsv"
self.pairwise_file = index_prefix + "_kSpider_pairwise.tsv"
self.uncovered_seqs = set()
self.shared_kmers_threshold = 200
self.seq_to_clusterid = dict()
self.max_cluster_id = 0
self.Logger.INFO("Loading TSV pairwise file")
self.load_seq_to_kmers(self.seqToKmers_file)
self.tsv_get_namesmap()
def load_seq_to_kmers(self, tsv):
with open(tsv) as KMER_COUNT:
next(KMER_COUNT)
for line in KMER_COUNT:
seq_ID, no_of_kmers = tuple(line.strip().split('\t')[1:])
self.seq_to_kmers[int(seq_ID)] = int(no_of_kmers)
def ids_to_names(self, cluster):
new_cluster = []
for _id in cluster:
new_cluster.append(self.names_map[int(_id)])
return new_cluster
def tsv_get_namesmap(self):
with open(self.names_file, 'r') as namesMap:
next(namesMap) # skip the header
for row in namesMap:
row = row.strip().split()
self.names_map[int(row[0])] = row[1]
def tsv_build_graph(self):
with open(self.pairwise_file, 'r') as pairwise_tsv:
next(pairwise_tsv) # skip header
for row in pairwise_tsv:
row = row.strip().split()
seq1 = int(row[1])
seq2 = int(row[2])
shared_kmers = int(row[3])
containment = 0.0
min_seq = float(
min(self.seq_to_kmers[seq1], self.seq_to_kmers[seq2]))
containment = shared_kmers / min_seq
if containment < self.cut_off_threshold:
continue
if shared_kmers < self.shared_kmers_threshold:
self.source2.append(seq1)
self.target2.append(seq2)
elif shared_kmers >= self.shared_kmers_threshold:
self.source.append(seq1)
self.target.append(seq2)
# # For covering clusters with single sequence
uncovered_seqs_1 = set(self.names_map.keys()) - \
set(self.source).union(set(self.target))
for seq in uncovered_seqs_1:
self.uncovered_seqs.add(seq)
# OR:
# for i in range(1, len(self.names_map) + 1, 1):
# self.source.append(i)
# self.target.append(i)
def clustering(self):
registers = defaultdict(lambda: None)
def find(x):
l = registers[x]
if l is not None:
l = find(l)
registers[x] = l
return l
return x
def union(x, y):
lx, ly = find(x), find(y)
if lx != ly:
registers[lx] = ly
for i in range(len(self.source)):
union(self.source.pop(), self.target.pop())
for x in registers:
self.components[find(x)].add(x)
temp_components = self.components.copy()
self.components.clear()
for cluster_id, (k, v) in enumerate(temp_components.items(), 1):
self.components[cluster_id] = set(v)
for seq in v:
self.seq_to_clusterid[seq] = cluster_id
temp_components.clear()
self.post_clustering()
def post_clustering(self):
registers2 = defaultdict(lambda: None)
local_components = defaultdict(set)
covered_seqs = set()
def find(x):
l = registers2[x]
if l is not None:
l = find(l)
registers2[x] = l
return l
return x
def union(x, y):
lx, ly = find(x), find(y)
if lx != ly:
registers2[lx] = ly
for i in range(len(self.source2)):
union(self.source2.pop(), self.target2.pop())
for x in registers2:
local_components[find(x)].add(x)
self.components = dict(self.components)
covered_clusters = set()
for cluster2_id, (k, v) in enumerate(local_components.items(), 1):
for seq in v:
covered_seqs.add(seq)
for seq in v:
if seq in self.seq_to_clusterid:
cluster_id = self.seq_to_clusterid[seq]
to_be_added = set()
for i in v:
if i not in self.seq_to_clusterid:
to_be_added.add(i)
self.components[cluster_id] = self.components[cluster_id].union(
to_be_added)
covered_clusters.add(k)
continue
self.uncovered_seqs = self.uncovered_seqs - covered_seqs
uncovered_clusters = set(local_components.keys()) - covered_clusters
max_id = len(self.components)
for i, unc in enumerate(uncovered_clusters, 1):
max_id += 1
self.components[max_id] = local_components[unc]
for seq in self.uncovered_seqs:
max_id += 1
self.components[max_id] = {seq}
def export_kCluster(self):
kCluster_file_name = f"kSpider_{self.cut_off_threshold:.2f}%_"
kCluster_file_name += os.path.basename(
self.pairwise_file).split(".")[0]
kCluster_file_name += ".clusters.tsv"
with open(kCluster_file_name, 'w') as kClusters:
kClusters.write("kClust_id\tseqs_ids\n")
for cluster_id, (k, v) in enumerate(self.components.items(), 1):
kClusters.write(
f"{cluster_id}\t{'|'.join(self.ids_to_names(v))}\n")
self.Logger.INFO(f"Total Number Of Clusters: {cluster_id}")
"""
TODO:
New help messages
1. containment cutoff (sim_cutoff): cluster sequences with (containment > cutoff) where containment = shared kmers % to the total kmers in the smallest node.
2. connectivity cutoff (con_cutoff): cluster sequences with (connectivity > cutoff) where connectivity = shared kmers % to the total kmers in the largest node.
3. min count cutoff (min_count): the min kmers count of a node to connect two clusters, otherwise the node will be reported twice in both clusters.
"""
@cli.command(name="cluster", help_priority=5)
@click.option('-c', '--cutoff', required=False, type=click.FloatRange(0, 1, clamp=False), default=0.0, show_default=True, help="cluster sequences with (containment > cutoff)")
@click.option('-i', '--index-prefix', "index_prefix", required=True, type=click.STRING, help="kProcessor index file prefix")
@click.pass_context
def main(ctx, index_prefix, cutoff):
"""Sequence clustering."""
kCl = kClusters(logger_obj=ctx.obj,
index_prefix=index_prefix, cut_off_threshold=cutoff)
ctx.obj.INFO("Building the main graph...")
kCl.tsv_build_graph()
ctx.obj.INFO("Clustering...")
kCl.clustering()
ctx.obj.INFO("Exporting ...")
kCl.export_kCluster()
| [
"click.option",
"collections.defaultdict",
"os.path.basename",
"kSpider2.click_context.cli.command",
"click.FloatRange"
] | [((6889, 6933), 'kSpider2.click_context.cli.command', 'cli.command', ([], {'name': '"""cluster"""', 'help_priority': '(5)'}), "(name='cluster', help_priority=5)\n", (6900, 6933), False, 'from kSpider2.click_context import cli\n'), ((7111, 7239), 'click.option', 'click.option', (['"""-i"""', '"""--index-prefix"""', '"""index_prefix"""'], {'required': '(True)', 'type': 'click.STRING', 'help': '"""kProcessor index file prefix"""'}), "('-i', '--index-prefix', 'index_prefix', required=True, type=\n click.STRING, help='kProcessor index file prefix')\n", (7123, 7239), False, 'import click\n'), ((337, 353), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (348, 353), False, 'from collections import defaultdict\n'), ((3109, 3135), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (3120, 3135), False, 'from collections import defaultdict\n'), ((4023, 4049), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (4034, 4049), False, 'from collections import defaultdict\n'), ((4076, 4092), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (4087, 4092), False, 'from collections import defaultdict\n'), ((6987, 7022), 'click.FloatRange', 'click.FloatRange', (['(0)', '(1)'], {'clamp': '(False)'}), '(0, 1, clamp=False)\n', (7003, 7022), False, 'import click\n'), ((5912, 5948), 'os.path.basename', 'os.path.basename', (['self.pairwise_file'], {}), '(self.pairwise_file)\n', (5928, 5948), False, 'import os\n')] |
import pandas as pd
def parse_bed(log_file, out):
print("chrom\tleft_start\tleft_end\tright_start\tright_end", file=out)
for data_primers in pd.read_csv(
snakemake.input[0],
sep="\t",
header=None,
chunksize=chunksize,
usecols=[0, 1, 2, 5],
):
for row in data_primers.iterrows():
row_id = row[0]
row = row[1]
if row[5] == "+":
print(
"{chrom}\t{start}\t{end}\t-1\t-1".format(
chrom=row[0], start=row[1]+1, end=row[2]
),
file=out,
)
elif row[5] == "-":
print(
"{chrom}\t-1\t-1\t{start}\t{end}".format(
chrom=row[0], start=row[1]+1, end=row[2]
),
file=out,
)
else:
print("Invalid strand in row {}".format(row_id), file=log_file)
def parse_bedpe(log_file, out):
for data_primers in pd.read_csv(
snakemake.input[0],
sep="\t",
header=None,
chunksize=chunksize,
usecols=[0, 1, 2, 3, 4, 5],
):
valid_primers = data_primers[0] == data_primers[3]
valid_data = data_primers[valid_primers].copy()
valid_data.iloc[:, [1, 4]] += 1
valid_data.drop(columns=[3], inplace=True)
valid_data.dropna(how="all", inplace=True)
valid_data.to_csv(
out,
sep="\t",
index=False,
header=["chrom", "left_start", "left_end", "right_start", "right_end"],
)
print(
data_primers[~valid_primers].to_csv(sep="\t", index=False, header=False),
file=log_file,
)
chunksize = 10 ** 6
with open(snakemake.output[0], "w") as out:
with open(snakemake.log[0], "w") as log_file:
if snakemake.input[0].endswith("bedpe"):
parse_bedpe(log_file, out)
else:
parse_bed(log_file, out)
| [
"pandas.read_csv"
] | [((151, 252), 'pandas.read_csv', 'pd.read_csv', (['snakemake.input[0]'], {'sep': '"""\t"""', 'header': 'None', 'chunksize': 'chunksize', 'usecols': '[0, 1, 2, 5]'}), "(snakemake.input[0], sep='\\t', header=None, chunksize=chunksize,\n usecols=[0, 1, 2, 5])\n", (162, 252), True, 'import pandas as pd\n'), ((1054, 1161), 'pandas.read_csv', 'pd.read_csv', (['snakemake.input[0]'], {'sep': '"""\t"""', 'header': 'None', 'chunksize': 'chunksize', 'usecols': '[0, 1, 2, 3, 4, 5]'}), "(snakemake.input[0], sep='\\t', header=None, chunksize=chunksize,\n usecols=[0, 1, 2, 3, 4, 5])\n", (1065, 1161), True, 'import pandas as pd\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
class MapColorControl():
def __init__(self, colour_scheme, map_normalization,data):
self.colors = plt.get_cmap(colour_scheme)(range(256))[:,:3]
self.data = data
if self.data.min() <= 0:
self.data = self.data + abs(self.data.min()) + 1
if map_normalization == "Linear":
self.normNorm = colors.Normalize(vmin=self.data.min(),vmax=self.data.max())
elif map_normalization == "Logarithmic":
self.normNorm = colors.LogNorm(vmin=self.data.min(),vmax=self.data.max())
elif map_normalization == "Power-law":
self.normNorm = colors.PowerNorm(gamma=2,vmin=self.data.min(),vmax=self.data.max())
def get_map_data(self):
datum = np.round(self.normNorm(self.data) * 255)
return self.map(datum)
def map(self,infos):
datum = []
for index in infos:
datum.append(colors.rgb2hex(self.colors[int(index)]))
return datum
class MapControl():
def __init__(self,max_value,min_value,map_normalization,data):
self.data = data
if self.data.min() <=0:
self.data = self.data + abs(self.data.min()) +1
if map_normalization == "Linear":
self.normNorm = colors.Normalize(vmin=self.data.min(),vmax=self.data.max())
elif map_normalization == "Logarithmic":
self.normNorm = colors.LogNorm(vmin=self.data.min(),vmax=self.data.max())
elif map_normalization == "Power-law":
self.normNorm = colors.PowerNorm(gamma=2,vmin=self.data.min(),vmax=self.data.max())
self.maxValue = max_value
self.minValue = min_value
def get_map_data(self,is_round):
if is_round:
datum = np.round(self.normNorm(self.data) * (self.maxValue-self.minValue) + self.minValue,5)
else:
datum = np.round(self.normNorm(self.data) * (self.maxValue - self.minValue) + self.minValue)
return list(datum) | [
"matplotlib.pyplot.get_cmap"
] | [((198, 225), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['colour_scheme'], {}), '(colour_scheme)\n', (210, 225), True, 'import matplotlib.pyplot as plt\n')] |
from django.conf.urls import patterns, url
from lattice.views import (lattices)
from lattice.views import (saveLatticeInfo, saveLattice)
from lattice.views import (saveModel)
from lattice.views import (lattice_home, lattice_content_home, lattice_content_search, lattice_content_list, lattice_content_model_list, lattice_content_details, lattice_content_model_details)
from lattice.views import (lattice_modal, saveLatticeHelper, saveLatticeTypeHelper, saveLatticeStatusHelper, saveModelHelper, saveModelStatusHelper)
urlpatterns = patterns(
'',
# return raw data not thru html ui
url(r'^lattice/$',
lattices,
name='lattices'),
url(r'^lattice/savelatticeinfo/$',
saveLatticeInfo,
name='saveLatticeInfo'),
url(r'^lattice/savelattice$',
saveLattice,
name='saveLattice'),
url(r'^lattice/savemodel$',
saveModel,
name='saveModel'),
url(r'^lattice/web/$',
lattice_home,
name='lattice_home'),
url(r'^lattice/web/index.html$',
lattice_home,
name='lattice_home'),
url(r'^lattice/web/content.html$',
lattice_content_home,
name='lattice_content_home'),
url(r'^lattice/web/search.html$',
lattice_content_search,
name='lattice_content_search'),
url(r'^lattice/web/list.html$',
lattice_content_list,
name='lattice_content_list'),
url(r'^lattice/web/model_list.html$',
lattice_content_model_list,
name='lattice_content_model_list'),
url(r'^lattice/web/details.html$',
lattice_content_details,
name='lattice_content_details'),
url(r'^lattice/web/model_details.html$',
lattice_content_model_details,
name='lattice_content_model_details'),
url(r'^lattice/web/modal/',
lattice_modal,
name='lattice_modal'),
url(r'^lattice/savelatticetype$',
saveLatticeTypeHelper,
name='saveLatticTypeeHelper'),
url(r'^lattice/upload$',
saveLatticeHelper,
name='saveLatticeHelper'),
url(r'^lattice/savestatus$',
saveLatticeStatusHelper,
name='saveLatticeStatusHelper'),
url(r'^model/upload$',
saveModelHelper,
name='saveModelHelper'),
url(r'^model/savestatus$',
saveModelStatusHelper,
name='saveModelStatusHelper'),
)
| [
"django.conf.urls.url"
] | [((594, 638), 'django.conf.urls.url', 'url', (['"""^lattice/$"""', 'lattices'], {'name': '"""lattices"""'}), "('^lattice/$', lattices, name='lattices')\n", (597, 638), False, 'from django.conf.urls import patterns, url\n'), ((662, 736), 'django.conf.urls.url', 'url', (['"""^lattice/savelatticeinfo/$"""', 'saveLatticeInfo'], {'name': '"""saveLatticeInfo"""'}), "('^lattice/savelatticeinfo/$', saveLatticeInfo, name='saveLatticeInfo')\n", (665, 736), False, 'from django.conf.urls import patterns, url\n'), ((760, 821), 'django.conf.urls.url', 'url', (['"""^lattice/savelattice$"""', 'saveLattice'], {'name': '"""saveLattice"""'}), "('^lattice/savelattice$', saveLattice, name='saveLattice')\n", (763, 821), False, 'from django.conf.urls import patterns, url\n'), ((845, 900), 'django.conf.urls.url', 'url', (['"""^lattice/savemodel$"""', 'saveModel'], {'name': '"""saveModel"""'}), "('^lattice/savemodel$', saveModel, name='saveModel')\n", (848, 900), False, 'from django.conf.urls import patterns, url\n'), ((924, 980), 'django.conf.urls.url', 'url', (['"""^lattice/web/$"""', 'lattice_home'], {'name': '"""lattice_home"""'}), "('^lattice/web/$', lattice_home, name='lattice_home')\n", (927, 980), False, 'from django.conf.urls import patterns, url\n'), ((1003, 1069), 'django.conf.urls.url', 'url', (['"""^lattice/web/index.html$"""', 'lattice_home'], {'name': '"""lattice_home"""'}), "('^lattice/web/index.html$', lattice_home, name='lattice_home')\n", (1006, 1069), False, 'from django.conf.urls import patterns, url\n'), ((1092, 1181), 'django.conf.urls.url', 'url', (['"""^lattice/web/content.html$"""', 'lattice_content_home'], {'name': '"""lattice_content_home"""'}), "('^lattice/web/content.html$', lattice_content_home, name=\n 'lattice_content_home')\n", (1095, 1181), False, 'from django.conf.urls import patterns, url\n'), ((1199, 1291), 'django.conf.urls.url', 'url', (['"""^lattice/web/search.html$"""', 'lattice_content_search'], {'name': '"""lattice_content_search"""'}), "('^lattice/web/search.html$', lattice_content_search, name=\n 'lattice_content_search')\n", (1202, 1291), False, 'from django.conf.urls import patterns, url\n'), ((1309, 1395), 'django.conf.urls.url', 'url', (['"""^lattice/web/list.html$"""', 'lattice_content_list'], {'name': '"""lattice_content_list"""'}), "('^lattice/web/list.html$', lattice_content_list, name=\n 'lattice_content_list')\n", (1312, 1395), False, 'from django.conf.urls import patterns, url\n'), ((1413, 1517), 'django.conf.urls.url', 'url', (['"""^lattice/web/model_list.html$"""', 'lattice_content_model_list'], {'name': '"""lattice_content_model_list"""'}), "('^lattice/web/model_list.html$', lattice_content_model_list, name=\n 'lattice_content_model_list')\n", (1416, 1517), False, 'from django.conf.urls import patterns, url\n'), ((1535, 1630), 'django.conf.urls.url', 'url', (['"""^lattice/web/details.html$"""', 'lattice_content_details'], {'name': '"""lattice_content_details"""'}), "('^lattice/web/details.html$', lattice_content_details, name=\n 'lattice_content_details')\n", (1538, 1630), False, 'from django.conf.urls import patterns, url\n'), ((1648, 1761), 'django.conf.urls.url', 'url', (['"""^lattice/web/model_details.html$"""', 'lattice_content_model_details'], {'name': '"""lattice_content_model_details"""'}), "('^lattice/web/model_details.html$', lattice_content_model_details, name\n ='lattice_content_model_details')\n", (1651, 1761), False, 'from django.conf.urls import patterns, url\n'), ((1780, 1843), 'django.conf.urls.url', 'url', (['"""^lattice/web/modal/"""', 'lattice_modal'], {'name': '"""lattice_modal"""'}), "('^lattice/web/modal/', lattice_modal, name='lattice_modal')\n", (1783, 1843), False, 'from django.conf.urls import patterns, url\n'), ((1867, 1957), 'django.conf.urls.url', 'url', (['"""^lattice/savelatticetype$"""', 'saveLatticeTypeHelper'], {'name': '"""saveLatticTypeeHelper"""'}), "('^lattice/savelatticetype$', saveLatticeTypeHelper, name=\n 'saveLatticTypeeHelper')\n", (1870, 1957), False, 'from django.conf.urls import patterns, url\n'), ((1976, 2044), 'django.conf.urls.url', 'url', (['"""^lattice/upload$"""', 'saveLatticeHelper'], {'name': '"""saveLatticeHelper"""'}), "('^lattice/upload$', saveLatticeHelper, name='saveLatticeHelper')\n", (1979, 2044), False, 'from django.conf.urls import patterns, url\n'), ((2068, 2157), 'django.conf.urls.url', 'url', (['"""^lattice/savestatus$"""', 'saveLatticeStatusHelper'], {'name': '"""saveLatticeStatusHelper"""'}), "('^lattice/savestatus$', saveLatticeStatusHelper, name=\n 'saveLatticeStatusHelper')\n", (2071, 2157), False, 'from django.conf.urls import patterns, url\n'), ((2176, 2238), 'django.conf.urls.url', 'url', (['"""^model/upload$"""', 'saveModelHelper'], {'name': '"""saveModelHelper"""'}), "('^model/upload$', saveModelHelper, name='saveModelHelper')\n", (2179, 2238), False, 'from django.conf.urls import patterns, url\n'), ((2262, 2340), 'django.conf.urls.url', 'url', (['"""^model/savestatus$"""', 'saveModelStatusHelper'], {'name': '"""saveModelStatusHelper"""'}), "('^model/savestatus$', saveModelStatusHelper, name='saveModelStatusHelper')\n", (2265, 2340), False, 'from django.conf.urls import patterns, url\n')] |
import unittest
import torch
from parameterized import parameterized
from torecsys.losses import *
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
class AdaptiveHingeLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, num_neg: int):
criterion = AdaptiveHingeLoss()
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class BayesianPersonalizedRankingLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, num_neg: int):
criterion = BayesianPersonalizedRankingLoss(reduction='sum')
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class HingeLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, num_neg: int):
criterion = HingeLoss()
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class ListnetLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, length: int):
criterion = ListnetLoss()
criterion = criterion.to(device)
y_hat = torch.rand(batch_size, length)
y_true = torch.rand(batch_size, length)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(y_hat, y_true, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class PointwiseLogisticLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, num_neg: int):
criterion = PointwiseLogisticLoss()
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class SkipGramLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32, 32,),
(16, 64, 16,),
(32, 128, 4,),
])
def test_forward(self, batch_size: int, embed_size: int, num_neg: int):
criterion = SkipGramLoss()
criterion = criterion.to(device)
content_inp = torch.rand(batch_size, 1, embed_size)
pos_inp = torch.rand(batch_size, 1, embed_size)
neg_inp = torch.rand(batch_size, num_neg, embed_size)
loss = criterion(content_inp, pos_inp, neg_inp)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class TripletLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32, 32,),
(16, 64, 16,),
(32, 128, 4,),
])
def test_forward(self, batch_size: int, embed_size: int, num_neg: int):
criterion = TripletLoss(margin=1.0, reduction='sum')
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
if __name__ == '__main__':
unittest.main()
| [
"parameterized.parameterized.expand",
"torch.randint",
"torch.cuda.is_available",
"unittest.main",
"torch.Size",
"torch.rand"
] | [((123, 148), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (146, 148), False, 'import torch\n'), ((219, 269), 'parameterized.parameterized.expand', 'parameterized.expand', (['[(4, 32), (16, 16), (32, 4)]'], {}), '([(4, 32), (16, 16), (32, 4)])\n', (239, 269), False, 'from parameterized import parameterized\n'), ((855, 905), 'parameterized.parameterized.expand', 'parameterized.expand', (['[(4, 32), (16, 16), (32, 4)]'], {}), '([(4, 32), (16, 16), (32, 4)])\n', (875, 905), False, 'from parameterized import parameterized\n'), ((1498, 1548), 'parameterized.parameterized.expand', 'parameterized.expand', (['[(4, 32), (16, 16), (32, 4)]'], {}), '([(4, 32), (16, 16), (32, 4)])\n', (1518, 1548), False, 'from parameterized import parameterized\n'), ((2106, 2156), 'parameterized.parameterized.expand', 'parameterized.expand', (['[(4, 32), (16, 16), (32, 4)]'], {}), '([(4, 32), (16, 16), (32, 4)])\n', (2126, 2156), False, 'from parameterized import parameterized\n'), ((2723, 2773), 'parameterized.parameterized.expand', 'parameterized.expand', (['[(4, 32), (16, 16), (32, 4)]'], {}), '([(4, 32), (16, 16), (32, 4)])\n', (2743, 2773), False, 'from parameterized import parameterized\n'), ((3344, 3407), 'parameterized.parameterized.expand', 'parameterized.expand', (['[(4, 32, 32), (16, 64, 16), (32, 128, 4)]'], {}), '([(4, 32, 32), (16, 64, 16), (32, 128, 4)])\n', (3364, 3407), False, 'from parameterized import parameterized\n'), ((4001, 4064), 'parameterized.parameterized.expand', 'parameterized.expand', (['[(4, 32, 32), (16, 64, 16), (32, 128, 4)]'], {}), '([(4, 32, 32), (16, 64, 16), (32, 128, 4)])\n', (4021, 4064), False, 'from parameterized import parameterized\n'), ((4648, 4663), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4661, 4663), False, 'import unittest\n'), ((463, 488), 'torch.rand', 'torch.rand', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (473, 488), False, 'import torch\n'), ((507, 538), 'torch.rand', 'torch.rand', (['batch_size', 'num_neg'], {}), '(batch_size, num_neg)\n', (517, 538), False, 'import torch\n'), ((554, 588), 'torch.randint', 'torch.randint', (['(0)', '(1)', '(batch_size,)'], {}), '(0, 1, (batch_size,))\n', (567, 588), False, 'import torch\n'), ((1128, 1153), 'torch.rand', 'torch.rand', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (1138, 1153), False, 'import torch\n'), ((1172, 1203), 'torch.rand', 'torch.rand', (['batch_size', 'num_neg'], {}), '(batch_size, num_neg)\n', (1182, 1203), False, 'import torch\n'), ((1219, 1253), 'torch.randint', 'torch.randint', (['(0)', '(1)', '(batch_size,)'], {}), '(0, 1, (batch_size,))\n', (1232, 1253), False, 'import torch\n'), ((1734, 1759), 'torch.rand', 'torch.rand', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (1744, 1759), False, 'import torch\n'), ((1778, 1809), 'torch.rand', 'torch.rand', (['batch_size', 'num_neg'], {}), '(batch_size, num_neg)\n', (1788, 1809), False, 'import torch\n'), ((1825, 1859), 'torch.randint', 'torch.randint', (['(0)', '(1)', '(batch_size,)'], {}), '(0, 1, (batch_size,))\n', (1838, 1859), False, 'import torch\n'), ((2341, 2371), 'torch.rand', 'torch.rand', (['batch_size', 'length'], {}), '(batch_size, length)\n', (2351, 2371), False, 'import torch\n'), ((2389, 2419), 'torch.rand', 'torch.rand', (['batch_size', 'length'], {}), '(batch_size, length)\n', (2399, 2419), False, 'import torch\n'), ((2435, 2469), 'torch.randint', 'torch.randint', (['(0)', '(1)', '(batch_size,)'], {}), '(0, 1, (batch_size,))\n', (2448, 2469), False, 'import torch\n'), ((2971, 2996), 'torch.rand', 'torch.rand', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (2981, 2996), False, 'import torch\n'), ((3015, 3046), 'torch.rand', 'torch.rand', (['batch_size', 'num_neg'], {}), '(batch_size, num_neg)\n', (3025, 3046), False, 'import torch\n'), ((3062, 3096), 'torch.randint', 'torch.randint', (['(0)', '(1)', '(batch_size,)'], {}), '(0, 1, (batch_size,))\n', (3075, 3096), False, 'import torch\n'), ((3617, 3654), 'torch.rand', 'torch.rand', (['batch_size', '(1)', 'embed_size'], {}), '(batch_size, 1, embed_size)\n', (3627, 3654), False, 'import torch\n'), ((3673, 3710), 'torch.rand', 'torch.rand', (['batch_size', '(1)', 'embed_size'], {}), '(batch_size, 1, embed_size)\n', (3683, 3710), False, 'import torch\n'), ((3729, 3772), 'torch.rand', 'torch.rand', (['batch_size', 'num_neg', 'embed_size'], {}), '(batch_size, num_neg, embed_size)\n', (3739, 3772), False, 'import torch\n'), ((4296, 4321), 'torch.rand', 'torch.rand', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (4306, 4321), False, 'import torch\n'), ((4340, 4371), 'torch.rand', 'torch.rand', (['batch_size', 'num_neg'], {}), '(batch_size, num_neg)\n', (4350, 4371), False, 'import torch\n'), ((4387, 4421), 'torch.randint', 'torch.randint', (['(0)', '(1)', '(batch_size,)'], {}), '(0, 1, (batch_size,))\n', (4400, 4421), False, 'import torch\n'), ((702, 716), 'torch.Size', 'torch.Size', (['[]'], {}), '([])\n', (712, 716), False, 'import torch\n'), ((1367, 1381), 'torch.Size', 'torch.Size', (['[]'], {}), '([])\n', (1377, 1381), False, 'import torch\n'), ((1973, 1987), 'torch.Size', 'torch.Size', (['[]'], {}), '([])\n', (1983, 1987), False, 'import torch\n'), ((2580, 2594), 'torch.Size', 'torch.Size', (['[]'], {}), '([])\n', (2590, 2594), False, 'import torch\n'), ((3210, 3224), 'torch.Size', 'torch.Size', (['[]'], {}), '([])\n', (3220, 3224), False, 'import torch\n'), ((3868, 3882), 'torch.Size', 'torch.Size', (['[]'], {}), '([])\n', (3878, 3882), False, 'import torch\n'), ((4535, 4549), 'torch.Size', 'torch.Size', (['[]'], {}), '([])\n', (4545, 4549), False, 'import torch\n')] |
import torch, math, copy
import scipy.sparse as sp
import numpy as np
from torch.nn.modules.module import Module
import torch.nn as nn
from torch.nn.parameter import Parameter
def normalize(adj, device='cpu'):
if isinstance(adj, torch.Tensor):
adj_ = adj.to(device)
elif isinstance(adj, sp.csr_matrix):
adj_ = torch.from_numpy(adj.toarray()).float().to(device)
elif isinstance(adj, np.ndarray):
adj_ = torch.from_numpy(adj).float().to(device)
else:
adj_ = adj.to(device)
adj_ = adj_ + torch.eye(adj_.shape[0]).to(device)
rowsum = adj_.sum(1)
degree_mat_inv_sqrt = torch.diag(torch.pow(rowsum, -0.5).flatten())
degree_mat_sqrt = torch.diag(torch.pow(rowsum, -0.5).flatten())
adj_normalized = torch.mm(torch.spmm(degree_mat_inv_sqrt, adj_), degree_mat_sqrt)
# return torch.from_numpy(adj_normalized).float().to(device_
return adj_normalized
def coo_to_csp(sp_coo):
num = sp_coo.shape[0]
row = sp_coo.row
col = sp_coo.col
sp_tensor = torch.sparse.FloatTensor(torch.LongTensor(np.stack([row, col])),
torch.tensor(sp_coo.data),
torch.Size([num, num]))
return sp_tensor
#def sp_diag(sp_tensor):
# sp_tensor = sp_tensor.to_dense()
# sp_array = sp_tensor.to('cpu').numpy()
# sp_diags = sp.diags(sp_array).tocoo()
# return coo_to_csp(sp_diags)
def sp_normalize(adj_def, device='cpu'):
"""
:param adj: scipy.sparse.coo_matrix
:param device: default as cpu
:return: normalized_adj:
"""
adj_ = sp.coo_matrix(adj_def)
adj_ = adj_ + sp.coo_matrix(sp.eye(adj_def.shape[0]), dtype=np.float32)
rowsum = np.array(adj_.sum(axis=1)).reshape(-1)
norm_unit = np.float_power(rowsum, -0.5).astype(np.float32)
degree_mat_inv_sqrt = sp.diags(norm_unit)
degree_mat_sqrt = copy.copy(degree_mat_inv_sqrt)
# degree_mat_sqrt = degree_mat_inv_sqrt.to_dense()
support = adj_.__matmul__(degree_mat_sqrt)
# support = coo_to_csp(support.tocoo())
# degree_mat_inv_sqrt = coo_to_csp(degree_mat_inv_sqrt.tocoo())
adj_normalized = degree_mat_inv_sqrt.__matmul__(support)
adj_normalized = coo_to_csp(adj_normalized.tocoo())
return adj_normalized, rowsum
# coo_adj = sp.coo_matrix(adj_normalized.to('cpu').numpy())
# return coo_to_csp(coo_adj).to(device), rowsum
class PairNorm(nn.Module):
def __init__(self, mode='PN', scale=1):
"""
mode:
'None' : No normalization
'PN' : Original version
'PN-SI' : Scale-Individually version
'PN-SCS' : Scale-and-Center-Simultaneously version
('SCS'-mode is not in the paper but we found it works well in practice,
especially for GCN and GAT.)
PairNorm is typically used after each graph convolution operation.
"""
assert mode in ['None', 'PN', 'PN-SI', 'PN-SCS']
super(PairNorm, self).__init__()
self.mode = mode
self.scale = scale
# Scale can be set based on origina data, and also the current feature lengths.
# We leave the experiments to future. A good pool we used for choosing scale:
# [0.1, 1, 10, 50, 100]
def forward(self, x):
if self.mode == 'None':
return x
col_mean = x.mean(dim=0)
if self.mode == 'PN':
x = x - col_mean
rownorm_mean = (1e-6 + x.pow(2).sum(dim=1).mean()).sqrt()
x = self.scale * x / rownorm_mean
if self.mode == 'PN-SI':
x = x - col_mean
rownorm_individual = (1e-6 + x.pow(2).sum(dim=1, keepdim=True)).sqrt()
x = self.scale * x / rownorm_individual
if self.mode == 'PN-SCS':
rownorm_individual = (1e-6 + x.pow(2).sum(dim=1, keepdim=True)).sqrt()
x = self.scale * x / rownorm_individual - col_mean
return x
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True, mode='None', act=lambda x: x):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.pn = PairNorm(mode=mode)
self.act = act
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.mm(adj, support)
if self.bias is not None:
output = output + self.bias
return self.act(self.pn(output))
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
| [
"scipy.sparse.eye",
"torch.eye",
"torch.FloatTensor",
"torch.pow",
"torch.from_numpy",
"torch.mm",
"torch.tensor",
"numpy.stack",
"scipy.sparse.coo_matrix",
"torch.spmm",
"scipy.sparse.diags",
"copy.copy",
"torch.Size",
"numpy.float_power"
] | [((1654, 1676), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj_def'], {}), '(adj_def)\n', (1667, 1676), True, 'import scipy.sparse as sp\n'), ((1899, 1918), 'scipy.sparse.diags', 'sp.diags', (['norm_unit'], {}), '(norm_unit)\n', (1907, 1918), True, 'import scipy.sparse as sp\n'), ((1942, 1972), 'copy.copy', 'copy.copy', (['degree_mat_inv_sqrt'], {}), '(degree_mat_inv_sqrt)\n', (1951, 1972), False, 'import torch, math, copy\n'), ((791, 828), 'torch.spmm', 'torch.spmm', (['degree_mat_inv_sqrt', 'adj_'], {}), '(degree_mat_inv_sqrt, adj_)\n', (801, 828), False, 'import torch, math, copy\n'), ((1164, 1189), 'torch.tensor', 'torch.tensor', (['sp_coo.data'], {}), '(sp_coo.data)\n', (1176, 1189), False, 'import torch, math, copy\n'), ((1233, 1255), 'torch.Size', 'torch.Size', (['[num, num]'], {}), '([num, num])\n', (1243, 1255), False, 'import torch, math, copy\n'), ((5022, 5050), 'torch.mm', 'torch.mm', (['input', 'self.weight'], {}), '(input, self.weight)\n', (5030, 5050), False, 'import torch, math, copy\n'), ((5069, 5091), 'torch.mm', 'torch.mm', (['adj', 'support'], {}), '(adj, support)\n', (5077, 5091), False, 'import torch, math, copy\n'), ((1099, 1119), 'numpy.stack', 'np.stack', (['[row, col]'], {}), '([row, col])\n', (1107, 1119), True, 'import numpy as np\n'), ((1710, 1734), 'scipy.sparse.eye', 'sp.eye', (['adj_def.shape[0]'], {}), '(adj_def.shape[0])\n', (1716, 1734), True, 'import scipy.sparse as sp\n'), ((1824, 1852), 'numpy.float_power', 'np.float_power', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (1838, 1852), True, 'import numpy as np\n'), ((4451, 4495), 'torch.FloatTensor', 'torch.FloatTensor', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (4468, 4495), False, 'import torch, math, copy\n'), ((556, 580), 'torch.eye', 'torch.eye', (['adj_.shape[0]'], {}), '(adj_.shape[0])\n', (565, 580), False, 'import torch, math, copy\n'), ((656, 679), 'torch.pow', 'torch.pow', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (665, 679), False, 'import torch, math, copy\n'), ((725, 748), 'torch.pow', 'torch.pow', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (734, 748), False, 'import torch, math, copy\n'), ((4613, 4644), 'torch.FloatTensor', 'torch.FloatTensor', (['out_features'], {}), '(out_features)\n', (4630, 4644), False, 'import torch, math, copy\n'), ((454, 475), 'torch.from_numpy', 'torch.from_numpy', (['adj'], {}), '(adj)\n', (470, 475), False, 'import torch, math, copy\n')] |
import os
from sqlite3 import dbapi2 as sqlite3
class GarageDb:
def __init__(self, instance_path, resource_path):
self.db_file = os.path.join(instance_path, 'history.db')
self.init_file = os.path.join(resource_path, 'schema.sql')
# Run init script to ensure database structure
conn = self.get_connection()
with open(self.init_file, mode='r') as f:
conn.cursor().executescript(f.read())
conn.commit()
conn.close()
def get_connection(self):
rv = sqlite3.connect(self.db_file)
rv.row_factory = sqlite3.Row
return rv
def record_event(self, user_agent: str, login: str, event: str, description: str):
conn = self.get_connection()
conn.execute('insert into entries (UserAgent, Login, Event, Description) values (?, ?, ?, ?)',
[user_agent, login, event, description])
conn.commit()
conn.close()
def read_history(self):
conn = self.get_connection()
cur = conn.execute('select datetime(timestamp, \'localtime\') as timestamp, event, description from entries order by timestamp desc')
records = cur.fetchmany(500)
conn.close()
return records
def read_full_history(self):
conn = self.get_connection()
cur = conn.execute('select datetime(timestamp, \'localtime\') as timestamp, event, description from entries order by timestamp desc')
records = cur.fetchall()
conn.close()
return records
| [
"sqlite3.dbapi2.connect",
"os.path.join"
] | [((142, 183), 'os.path.join', 'os.path.join', (['instance_path', '"""history.db"""'], {}), "(instance_path, 'history.db')\n", (154, 183), False, 'import os\n'), ((209, 250), 'os.path.join', 'os.path.join', (['resource_path', '"""schema.sql"""'], {}), "(resource_path, 'schema.sql')\n", (221, 250), False, 'import os\n'), ((531, 560), 'sqlite3.dbapi2.connect', 'sqlite3.connect', (['self.db_file'], {}), '(self.db_file)\n', (546, 560), True, 'from sqlite3 import dbapi2 as sqlite3\n')] |
from flask_restful import Resource
from flask import request
class Shutdown(Resource):
def get(self):
shutdown = request.environ.get('werkzeug.server.shutdown')
if shutdown is None:
raise RuntimeError('Not running with the Werkzeug Server')
shutdown()
return 'Server shutting down'
| [
"flask.request.environ.get"
] | [((127, 174), 'flask.request.environ.get', 'request.environ.get', (['"""werkzeug.server.shutdown"""'], {}), "('werkzeug.server.shutdown')\n", (146, 174), False, 'from flask import request\n')] |