code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import requests
HUB_LOCATION = 'http://hub.wattbike.com/ranking/getSessionRows?sessionId='
class HubClient:
def __init__(self, location=None):
self.location = location if location else HUB_LOCATION
def _validate_session_id(self, session_id):
# TODO: add regex validation of session_id
return HUB_LOCATION + session_id
def get_session(self, session_id):
session_url = self._validate_session_id(session_id)
resp = requests.get(session_url)
if resp.status_code != 200:
raise requests.HTTPError('Response status code != 200')
return resp.json()
| [
"requests.HTTPError",
"requests.get"
] | [((471, 496), 'requests.get', 'requests.get', (['session_url'], {}), '(session_url)\n', (483, 496), False, 'import requests\n'), ((551, 600), 'requests.HTTPError', 'requests.HTTPError', (['"""Response status code != 200"""'], {}), "('Response status code != 200')\n", (569, 600), False, 'import requests\n')] |
#!/usr/bin/env python
"""
Copyright (c) 2018 <NAME> GmbH
All rights reserved.
This source code is licensed under the BSD-3-Clause license found in the
LICENSE file in the root directory of this source tree.
@author: <NAME>
"""
from geometry_msgs.msg import TransformStamped
from roadmap_planner_tools.planner_input_manager import PlannerInputManager
from roadmap_planning_common_msgs.msg import OrderedVisitingConstraint, StringList, ConstraintType
from roadmap_planning_common_msgs.srv import AddObjectRequest
from rospy_message_converter import json_message_converter
def get_pose_dict():
# type: () -> dict[str, PoseStamped]
PosesDict = {}
s = '{"object_type": "gluepoint", "object_name": "loc_8", "pose": {"header": {"stamp": {"secs": 1527494081, "nsecs": 245750904}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.0673123623248813, "x": -0.11819957737525943, "z": -0.000529293203694906}, "orientation": {"y": -0.09199954920780186, "x": -0.02204239273911617, "z": -0.9826223619036331, "w": -0.15969818331722338}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_6", "pose": {"header": {"stamp": {"secs": 1527494075, "nsecs": 379102230}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.11369306929267338, "x": -0.1261289291850433, "z": 0.0007951176021754491}, "orientation": {"y": 0.07187094200286825, "x": -0.061023926496261725, "z": 0.9873831660085665, "w": 0.12722079850990872}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_7", "pose": {"header": {"stamp": {"secs": 1527494078, "nsecs": 595736504}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.1149274695384531, "x": 0.06208364635543662, "z": -0.005476238253788906}, "orientation": {"y": 0.1316436407714954, "x": 0.019739166149056388, "z": 0.9750055991865761, "w": 0.17788872566576838}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_4", "pose": {"header": {"stamp": {"secs": 1527494065, "nsecs": 979056120}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.06856865760540547, "x": 0.4478018813158141, "z": -0.000679487573898074}, "orientation": {"y": -0.050516132689598016, "x": 0.014163494691613031, "z": -0.878984408924756, "w": -0.47395561461323}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_5", "pose": {"header": {"stamp": {"secs": 1527494071, "nsecs": 795750141}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.06760627153697468, "x": 0.06349269911330815, "z": -0.0007470379806025116}, "orientation": {"y": -0.010168248374561623, "x": 0.04411559477008324, "z": -0.9325496611657705, "w": -0.3581920580954878}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_2", "pose": {"header": {"stamp": {"secs": 1527494059, "nsecs": 112413883}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.06838950251450462, "x": 0.6409328063798745, "z": 0.00015782094835932174}, "orientation": {"y": 0.05237392498219545, "x": 0.02846261965189043, "z": 0.8403338301717435, "w": 0.53878187157086}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_3", "pose": {"header": {"stamp": {"secs": 1527494063, "nsecs": 79089880}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.11665509017783073, "x": 0.6388356663857032, "z": 0.001613388200793883}, "orientation": {"y": -0.10153267056716704, "x": -0.0370089029955912, "z": -0.9240133314356973, "w": -0.3667708020489993}}}}' + '\n'
s += '{"object_type": "gluepoint", "object_name": "loc_1", "pose": {"header": {"stamp": {"secs": 1527494055, "nsecs": 529085398}, "frame_id": "board", "seq": 0}, "pose": {"position": {"y": 0.11632455207600452, "x": 0.4453907194544092, "z": 0.0016855318552673815}, "orientation": {"y": -0.11786933993294174, "x": -0.08813291134398896, "z": -0.9653377546033448, "w": -0.21555145135020032}}}}' + '\n'
json_strs = s.readlines()
for json in json_strs:
if json == "\n":
continue
req = json_message_converter.convert_json_to_ros_message('roadmap_planning_common_msgs/AddObjectRequest', json) # type: AddObjectRequest
PosesDict[req.object_name] = req.pose
return PosesDict
if __name__ == "__main__":
pi_manager = PlannerInputManager()
trans = TransformStamped()
trans.child_frame_id = 'board'
trans.header.frame_id = 'world'
board_quat = [-0.6646584989424609, 0.7469166744613165, 0.009387090228191897, -0.016013860629187193]
board_trans = [0.6, 0.3, 0.02]
trans.transform.translation.x = board_trans[0]
trans.transform.translation.y = board_trans[1]
trans.transform.translation.z = board_trans[2]
trans.transform.rotation.x = board_quat[0]
trans.transform.rotation.y = board_quat[1]
trans.transform.rotation.z = board_quat[2]
trans.transform.rotation.w = board_quat[3]
pi_manager.add_frame(transform=trans)
PosesDict = get_pose_dict()
pi_manager.add_loc(PosesDict.values(), PosesDict.keys(), len(PosesDict) * ["gluepoint"])
myOVC = OrderedVisitingConstraint()
myOVC.name = 'ovc_1'
loc_names_1 = StringList()
loc_names_1.values.append('loc_1')
myOVC.location_names.append(loc_names_1)
pi_manager.add_ovc([myOVC])
myOVC_2 = OrderedVisitingConstraint()
myOVC_2.name = 'ovc_2'
loc_names_1 = StringList()
loc_names_1.values.append('loc_2')
myOVC_2.location_names.append(loc_names_1)
pi_manager.add_ovc([myOVC])
# ct = Constraints[0]
pi_manager.add_ovc_ct(constraint_type=ConstraintType.StartsAfterEnd, first_ovcs=['ovc_1'], second_ovcs=['ovc_2'])
pi_manager.write_planner_input_file('test') | [
"geometry_msgs.msg.TransformStamped",
"roadmap_planning_common_msgs.msg.StringList",
"rospy_message_converter.json_message_converter.convert_json_to_ros_message",
"roadmap_planning_common_msgs.msg.OrderedVisitingConstraint",
"roadmap_planner_tools.planner_input_manager.PlannerInputManager"
] | [((4221, 4242), 'roadmap_planner_tools.planner_input_manager.PlannerInputManager', 'PlannerInputManager', ([], {}), '()\n', (4240, 4242), False, 'from roadmap_planner_tools.planner_input_manager import PlannerInputManager\n'), ((4256, 4274), 'geometry_msgs.msg.TransformStamped', 'TransformStamped', ([], {}), '()\n', (4272, 4274), False, 'from geometry_msgs.msg import TransformStamped\n'), ((5008, 5035), 'roadmap_planning_common_msgs.msg.OrderedVisitingConstraint', 'OrderedVisitingConstraint', ([], {}), '()\n', (5033, 5035), False, 'from roadmap_planning_common_msgs.msg import OrderedVisitingConstraint, StringList, ConstraintType\n'), ((5079, 5091), 'roadmap_planning_common_msgs.msg.StringList', 'StringList', ([], {}), '()\n', (5089, 5091), False, 'from roadmap_planning_common_msgs.msg import OrderedVisitingConstraint, StringList, ConstraintType\n'), ((5223, 5250), 'roadmap_planning_common_msgs.msg.OrderedVisitingConstraint', 'OrderedVisitingConstraint', ([], {}), '()\n', (5248, 5250), False, 'from roadmap_planning_common_msgs.msg import OrderedVisitingConstraint, StringList, ConstraintType\n'), ((5296, 5308), 'roadmap_planning_common_msgs.msg.StringList', 'StringList', ([], {}), '()\n', (5306, 5308), False, 'from roadmap_planning_common_msgs.msg import OrderedVisitingConstraint, StringList, ConstraintType\n'), ((3976, 4086), 'rospy_message_converter.json_message_converter.convert_json_to_ros_message', 'json_message_converter.convert_json_to_ros_message', (['"""roadmap_planning_common_msgs/AddObjectRequest"""', 'json'], {}), "(\n 'roadmap_planning_common_msgs/AddObjectRequest', json)\n", (4026, 4086), False, 'from rospy_message_converter import json_message_converter\n')] |
import re
def find_key(keyString):
k = PyKeyboard()
key_to_press = None
highest = 0
for each in dir(k):
if each.endswith("_key"):
if similar(keyString + "_key" ,each) > highest:
highest = similar(keyString + "_key" ,each)
key_to_press = getattr(k,each)
return key_to_press
def keypress(key):
k = PyKeyboard()
keys_to_press = regex_keystring(key)
key_to_press = []
for each in keys_to_press:
if len(each) > 1:
key_to_press.append(find_key(each))
else:
key_to_press.append(each)
# pressing time
for each in key_to_press:
k.press_key(each)
def keyrelease(key):
k = PyKeyboard()
keys_to_press = regex_keystring(key)
key_to_press = []
for each in keys_to_press:
if len(each) > 1:
key_to_press.append(find_key(each))
else:
key_to_press.append(each)
# pressing time
for each in key_to_press:
k.release_key(each)
def _press_key(key_int):
k = PyKeyboard()
k.tap_key(key_int)
time.sleep(0.3)
def regex_keystring(string):
regex = r"<([A-Za-z]*)>"
working_string = string
result = []
loop = True
while loop:
if len(working_string) > 0:
try:
found = re.match(regex,working_string)
result.append(found.group(1))
start = len(found.group(0))
working_string = working_string[start:]
except AttributeError:
# not found
result.append(working_string)
working_string = ""
else:
loop = False
return result
def regex_generatekeyevent(string):
regex = r"<(.*?)>"
for each in re.finditer(regex,string):
string = string.replace(each.group(0),";"+each.group(1)+";")
return string.split(";")
def generatekeyevent(key):
k = PyKeyboard()
key = regex_generatekeyevent(key)
if len(key) == 1:
for cur in key[0]:
k.tap_key(cur)
time.sleep(0.3)
else:
for each in key:
if each == "":
continue
cur_key = find_key(each)
if cur_key == None:
for cur in each:
k.tap_key(cur)
time.sleep(0.3)
else:
k.tap_key(cur_key)
time.sleep(0.3)
| [
"re.match",
"re.finditer"
] | [((1784, 1810), 're.finditer', 're.finditer', (['regex', 'string'], {}), '(regex, string)\n', (1795, 1810), False, 'import re\n'), ((1329, 1360), 're.match', 're.match', (['regex', 'working_string'], {}), '(regex, working_string)\n', (1337, 1360), False, 'import re\n')] |
# Exercícios Numpy-27
# *******************
import numpy as np
Z=np.arange((10),dtype=int)
print(Z**Z)
print(Z)
print(2<<Z>>2)
print()
print(Z <- Z)
print()
print(1j*Z)
print()
print(Z/1/1)
print()
#print(Z<Z>Z) | [
"numpy.arange"
] | [((66, 90), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'int'}), '(10, dtype=int)\n', (75, 90), True, 'import numpy as np\n')] |
import copy
from .coco import CocoDataset
def build_dataset(cfg, mode):
dataset_cfg = copy.deepcopy(cfg)
if dataset_cfg['name'] == 'coco':
dataset_cfg.pop('name')
return CocoDataset(mode=mode, **dataset_cfg)
| [
"copy.deepcopy"
] | [((92, 110), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (105, 110), False, 'import copy\n')] |
#!/bin/python
"""
straintables' main pipeline script;
"""
import os
import argparse
import shutil
import straintables
import subprocess
from Bio.Align.Applications import ClustalOmegaCommandline
from straintables.logo import logo
from straintables.Executable import primerFinder, detectMutations,\
compareHeatmap, matrixAnalysis
from straintables.Database import directoryManager
Description = """
Main straintables pipeline.
Will initialize an analysis directory based on the provided genomes,
annotation and wanted regions."
If primers were not declared at the wanted regions file,
it will try to create primers based on the region sequence,
by using the region boundaries defined at the annotation file.
The resulting directory can be inspected manually
or used to build dissimilarity matrices through the command 'stview'.
"""
class Options():
def __init__(self, options):
self.__dict__.update(options)
def find_primers(options):
return primerFinder.Execute(options)
def run_alignment(filePrefix,
clustalPath=straintables.Definitions.ClustalCommand):
infile = filePrefix + ".fasta"
outfile = filePrefix + ".aln"
aln_cmd = ClustalOmegaCommandline(clustalPath,
infile=infile,
outfile=outfile,
force=True,
outfmt="clustal")
stdout, stderr = aln_cmd()
print(stdout)
def draw_tree(filePrefix):
infile = filePrefix + ".ph"
outfile = filePrefix + "pdf"
treeOptions = Options({
"InputFile": infile,
"OutputFile": outfile
})
straintables.DrawGraphics.drawTree.Execute(treeOptions)
def run_meshclust(filePrefix):
subprocess.run([
"meshclust",
filePrefix + ".fasta",
"--output",
filePrefix + ".clst",
"--id", "0.999",
"--align"
])
def detect_mutations(filePrefix):
infile = filePrefix + ".aln"
mutationsOptions = Options({
"InputFile": infile,
"PlotSubtitle": ""
})
detectMutations.Execute(mutationsOptions)
def matrix_analysis(WorkingDirectory):
analysisOptions = Options({
"WorkingDirectory": WorkingDirectory,
"updateOnly": False
})
return matrixAnalysis.Execute(analysisOptions)
def parse_arguments():
parser = argparse.ArgumentParser(description=Description)
parser.add_argument("--noamplicon", dest="DoAmplicon",
action="store_false", default=True)
parser.add_argument("--noalign", dest="DoAlignment",
action="store_false", default=True)
parser.add_argument("--alnmode", dest="AlignmentMode",
default="clustal")
parser.add_argument("--clustalpath", dest="ClustalPath",
default=straintables.Definitions.ClustalCommand)
parser = primerFinder.parse_arguments(parser)
return parser.parse_args()
def TestMeshclust():
# -- TEST MESHCLUST SETUP;
if shutil.which("meshclust"):
print("MeshClust enabled!")
return True
else:
print("MeshClust not found! Disabled...")
return False
def process_individual_region(options, locusName, MeshClustEnabled):
filePrefix = os.path.join(
options.WorkingDirectory,
straintables.Definitions.FastaRegionPrefix + locusName)
print("Running alignment for %s..." % locusName)
run_alignment(filePrefix,
clustalPath=options.ClustalPath)
# draw_tree(filePrefix)
detect_mutations(filePrefix)
if MeshClustEnabled:
run_meshclust(filePrefix)
def Execute(options):
if not options.PrimerFile:
print("Fatal: No primer file specified!")
exit(1)
# -- SELECT WORKING DIRECTORY;
if not options.WorkingDirectory:
AnalysisCode = os.path.splitext(options.PrimerFile)[0]
AnalysisCode = os.path.basename(AnalysisCode)
WorkingDirectoryBase = "analysisResults"
options.WorkingDirectory = os.path.join(WorkingDirectoryBase,
AnalysisCode)
# -- TEST CLUSTAL SETUP;
if not shutil.which(options.ClustalPath):
print("%s not found! Aborting..." % options.ClustalPath)
exit(1)
MeshClustEnabled = TestMeshclust()
directoryManager.createDirectoryPath(options.WorkingDirectory)
# SHOW BEAUTIFUL ASCII ART;
print(logo)
# -- RUN PIPELINE;
if options.DoAmplicon:
result = find_primers(options)
if not result:
print("Failure to find primers.")
exit(1)
AllowedAlignModes = ["clustal"]
if options.AlignmentMode not in AllowedAlignModes:
print("Unknown alignment mode %s." % (options.AlignmentMode))
exit(1)
MatchedRegions = straintables.OutputFile.MatchedRegions(
options.WorkingDirectory)
MatchedRegions.read()
SuccessfulLoci = MatchedRegions.content["LocusName"]
if options.DoAlignment:
for locusName in SuccessfulLoci:
process_individual_region(options, locusName, MeshClustEnabled)
if matrix_analysis(options.WorkingDirectory):
print("Analysis successful.")
def main():
options = parse_arguments()
Execute(options)
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"straintables.Executable.primerFinder.parse_arguments",
"Bio.Align.Applications.ClustalOmegaCommandline",
"subprocess.run",
"shutil.which",
"os.path.join",
"straintables.OutputFile.MatchedRegions",
"straintables.DrawGraphics.drawTree.Execute",
"os.path.splitext",
"strain... | [((979, 1008), 'straintables.Executable.primerFinder.Execute', 'primerFinder.Execute', (['options'], {}), '(options)\n', (999, 1008), False, 'from straintables.Executable import primerFinder, detectMutations, compareHeatmap, matrixAnalysis\n'), ((1197, 1300), 'Bio.Align.Applications.ClustalOmegaCommandline', 'ClustalOmegaCommandline', (['clustalPath'], {'infile': 'infile', 'outfile': 'outfile', 'force': '(True)', 'outfmt': '"""clustal"""'}), "(clustalPath, infile=infile, outfile=outfile, force=\n True, outfmt='clustal')\n", (1220, 1300), False, 'from Bio.Align.Applications import ClustalOmegaCommandline\n'), ((1692, 1747), 'straintables.DrawGraphics.drawTree.Execute', 'straintables.DrawGraphics.drawTree.Execute', (['treeOptions'], {}), '(treeOptions)\n', (1734, 1747), False, 'import straintables\n'), ((1785, 1903), 'subprocess.run', 'subprocess.run', (["['meshclust', filePrefix + '.fasta', '--output', filePrefix + '.clst',\n '--id', '0.999', '--align']"], {}), "(['meshclust', filePrefix + '.fasta', '--output', filePrefix +\n '.clst', '--id', '0.999', '--align'])\n", (1799, 1903), False, 'import subprocess\n'), ((2125, 2166), 'straintables.Executable.detectMutations.Execute', 'detectMutations.Execute', (['mutationsOptions'], {}), '(mutationsOptions)\n', (2148, 2166), False, 'from straintables.Executable import primerFinder, detectMutations, compareHeatmap, matrixAnalysis\n'), ((2333, 2372), 'straintables.Executable.matrixAnalysis.Execute', 'matrixAnalysis.Execute', (['analysisOptions'], {}), '(analysisOptions)\n', (2355, 2372), False, 'from straintables.Executable import primerFinder, detectMutations, compareHeatmap, matrixAnalysis\n'), ((2411, 2459), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'Description'}), '(description=Description)\n', (2434, 2459), False, 'import argparse\n'), ((2950, 2986), 'straintables.Executable.primerFinder.parse_arguments', 'primerFinder.parse_arguments', (['parser'], {}), '(parser)\n', (2978, 2986), False, 'from straintables.Executable import primerFinder, detectMutations, compareHeatmap, matrixAnalysis\n'), ((3080, 3105), 'shutil.which', 'shutil.which', (['"""meshclust"""'], {}), "('meshclust')\n", (3092, 3105), False, 'import shutil\n'), ((3332, 3431), 'os.path.join', 'os.path.join', (['options.WorkingDirectory', '(straintables.Definitions.FastaRegionPrefix + locusName)'], {}), '(options.WorkingDirectory, straintables.Definitions.\n FastaRegionPrefix + locusName)\n', (3344, 3431), False, 'import os\n'), ((4396, 4458), 'straintables.Database.directoryManager.createDirectoryPath', 'directoryManager.createDirectoryPath', (['options.WorkingDirectory'], {}), '(options.WorkingDirectory)\n', (4432, 4458), False, 'from straintables.Database import directoryManager\n'), ((4887, 4951), 'straintables.OutputFile.MatchedRegions', 'straintables.OutputFile.MatchedRegions', (['options.WorkingDirectory'], {}), '(options.WorkingDirectory)\n', (4925, 4951), False, 'import straintables\n'), ((3982, 4012), 'os.path.basename', 'os.path.basename', (['AnalysisCode'], {}), '(AnalysisCode)\n', (3998, 4012), False, 'import os\n'), ((4098, 4146), 'os.path.join', 'os.path.join', (['WorkingDirectoryBase', 'AnalysisCode'], {}), '(WorkingDirectoryBase, AnalysisCode)\n', (4110, 4146), False, 'import os\n'), ((4236, 4269), 'shutil.which', 'shutil.which', (['options.ClustalPath'], {}), '(options.ClustalPath)\n', (4248, 4269), False, 'import shutil\n'), ((3919, 3955), 'os.path.splitext', 'os.path.splitext', (['options.PrimerFile'], {}), '(options.PrimerFile)\n', (3935, 3955), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# @Time : 2019-08-02 18:31
# @Author : <NAME>
# @Email : <EMAIL>
import os
import cv2
import glob
import shutil
from multiprocessing import Pool
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from tqdm import tqdm
import numpy as np
import subprocess
def auto_unzip_fun(x, f):
return f(*x)
def make_video(output_mp4_path, img_path_list, save_frames_dir=None, fps=24):
"""
output_path is the final mp4 name
img_dir is where the images to make into video are saved.
"""
first_img = cv2.imread(img_path_list[0])
h, w = first_img.shape[:2]
pool_size = 40
tmp_avi_video_path = '%s.avi' % output_mp4_path
fourcc = cv2.VideoWriter_fourcc(*'XVID')
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (w, h))
args_list = [(img_path,) for img_path in img_path_list]
with Pool(pool_size) as p:
for img in tqdm(p.imap(partial(auto_unzip_fun, f=cv2.imread), args_list), total=len(args_list)):
videoWriter.write(img)
videoWriter.release()
if save_frames_dir:
for i, img_path in enumerate(img_path_list):
shutil.copy(img_path, '%s/%.8d.jpg' % (save_frames_dir, i))
os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1" % (tmp_avi_video_path, output_mp4_path))
os.system("rm %s" % tmp_avi_video_path)
def fuse_image(img_path_list, row_num, col_num):
assert len(img_path_list) == row_num * col_num
img_list = [cv2.imread(img_path) for img_path in img_path_list]
row_imgs = []
for i in range(row_num):
col_imgs = img_list[i * col_num: (i + 1) * col_num]
col_img = np.concatenate(col_imgs, axis=1)
row_imgs.append(col_img)
fused_img = np.concatenate(row_imgs, axis=0)
return fused_img
def fuse_video(video_frames_path_list, output_mp4_path, row_num, col_num, fps=24):
assert len(video_frames_path_list) == row_num * col_num
frame_num = len(video_frames_path_list[0])
first_img = cv2.imread(video_frames_path_list[0][0])
h, w = first_img.shape[:2]
fused_h, fused_w = h * row_num, w * col_num
args_list = []
for frame_idx in range(frame_num):
fused_frame_path_list = [video_frames[frame_idx] for video_frames in video_frames_path_list]
args_list.append((fused_frame_path_list, row_num, col_num))
pool_size = 40
tmp_avi_video_path = '%s.avi' % output_mp4_path
fourcc = cv2.VideoWriter_fourcc(*'XVID')
# for args in args_list:
# fuse_image(*args)
# exit()
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (fused_w, fused_h))
with Pool(pool_size) as p:
for img in tqdm(p.imap(partial(auto_unzip_fun, f=fuse_image), args_list), total=len(args_list)):
videoWriter.write(img)
videoWriter.release()
os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1" % (tmp_avi_video_path, output_mp4_path))
os.system("rm %s" % (tmp_avi_video_path))
def merge(src_img, ref_img_path, out_img_path, pad):
h, w = src_img.shape[:2]
image_size = h
ref_img = cv2.imread(ref_img_path)
out_img = cv2.imread(out_img_path)
if ref_img.shape[0] != image_size and ref_img.shape[1] != image_size:
ref_img = cv2.resize(ref_img, (image_size, image_size))
if out_img.shape[0] != image_size and out_img.shape[1] != image_size:
out_img = cv2.resize(out_img, (image_size, image_size))
# print(src_img.shape, ref_img.shape, out_img.shape)
merge_img = np.concatenate([src_img, pad, ref_img, pad, out_img], axis=1)
return merge_img
def load_image(image_path, image_size=512):
"""
Args:
image_path (str):
image_size (int):
Returns:
image (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
image = cv2.imread(image_path)
image = cv2.resize(image, (image_size, image_size))
return image
def fuse_one_image(img_paths, image_size):
return load_image(img_paths[0], image_size)
def fuse_two_images(img_paths, image_size):
"""
Args:
img_paths (list of str):
image_size (int):
Returns:
fuse_img (np.ndarray): (image_size // 2, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
img_size = image_size // 2
img_1 = load_image(img_paths[0], img_size)
img_2 = load_image(img_paths[1], img_size)
fuse_img = np.concatenate([img_1, img_2], axis=0)
return fuse_img
def fuse_four_images(img_paths, image_size):
"""
Args:
img_paths (list of str):
image_size (int):
Returns:
fuse_img (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
fuse_img_1 = fuse_two_images(img_paths[0:2], image_size)
fuse_img_2 = fuse_two_images(img_paths[2:4], image_size)
fuse_img = np.concatenate([fuse_img_1, fuse_img_2], axis=1)
return fuse_img
def fuse_eight_images(img_paths, image_size):
"""
Args:
img_paths (list of str):
image_size (int):
Returns:
fuse_img (np.ndarray): (image_size // 2, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
fuse_img_1 = fuse_two_images(img_paths[0:4], image_size // 2)
fuse_img_2 = fuse_two_images(img_paths[4:8], image_size // 2)
fuse_img = np.concatenate([fuse_img_1, fuse_img_2], axis=0)
return fuse_img
def fuse_source(all_src_img_paths, image_size=512):
"""
Args:
all_src_img_paths (list of str): the list of source image paths, currently it only supports, 1, 2, 4, 8 number
of source images.
image_size (int): the final image resolution, (image_size, image_size, 3)
Returns:
fuse_img (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8.
"""
ns = len(all_src_img_paths)
# TODO, currently it only supports, 1, 2, 4, 8 number of source images.
assert ns in [1, 2, 4, 8], "{} must be in [1, 2, 4, 8], currently it only supports, " \
"1, 2, 4, 8 number of source images."
if ns == 1:
fuse_img = load_image(all_src_img_paths[0], image_size)
elif ns == 2:
fuse_img = fuse_two_images(all_src_img_paths, image_size)
elif ns == 4:
fuse_img = fuse_four_images(all_src_img_paths, image_size)
elif ns == 8:
fuse_img = fuse_eight_images(all_src_img_paths, image_size)
else:
raise ValueError("{} must be in [1, 2, 4, 8], currently it only supports, "
"1, 2, 4, 8 number of source images.")
return fuse_img
def fuse_source_reference_output(output_mp4_path, src_img_paths, ref_img_paths, out_img_paths,
image_size=512, pad=10, fps=25):
total = len(ref_img_paths)
assert total == len(out_img_paths), "{} != {}".format(total, len(out_img_paths))
fused_src_img = fuse_source(src_img_paths, image_size)
pad_region = np.zeros((image_size, pad, 3), dtype=np.uint8)
pool_size = min(15, os.cpu_count())
tmp_avi_video_path = '%s.avi' % output_mp4_path
fourcc = cv2.VideoWriter_fourcc(*'XVID')
W = fused_src_img.shape[1] + (image_size + pad) * 2
videoWriter = cv2.VideoWriter(tmp_avi_video_path, fourcc, fps, (W, image_size))
with ProcessPoolExecutor(pool_size) as pool:
for img in tqdm(pool.map(merge, [fused_src_img] * total,
ref_img_paths, out_img_paths, [pad_region] * total)):
videoWriter.write(img)
videoWriter.release()
os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1" % (tmp_avi_video_path, output_mp4_path))
os.system("rm %s" % tmp_avi_video_path)
| [
"cv2.VideoWriter",
"numpy.zeros",
"functools.partial",
"multiprocessing.Pool",
"numpy.concatenate",
"cv2.VideoWriter_fourcc",
"os.cpu_count",
"os.system",
"cv2.resize",
"concurrent.futures.ProcessPoolExecutor",
"cv2.imread",
"shutil.copy"
] | [((574, 602), 'cv2.imread', 'cv2.imread', (['img_path_list[0]'], {}), '(img_path_list[0])\n', (584, 602), False, 'import cv2\n'), ((719, 750), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (741, 750), False, 'import cv2\n'), ((770, 826), 'cv2.VideoWriter', 'cv2.VideoWriter', (['tmp_avi_video_path', 'fourcc', 'fps', '(w, h)'], {}), '(tmp_avi_video_path, fourcc, fps, (w, h))\n', (785, 826), False, 'import cv2\n'), ((1239, 1345), 'os.system', 'os.system', (["('ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1' % (tmp_avi_video_path,\n output_mp4_path))"], {}), "('ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1' % (\n tmp_avi_video_path, output_mp4_path))\n", (1248, 1345), False, 'import os\n'), ((1345, 1384), 'os.system', 'os.system', (["('rm %s' % tmp_avi_video_path)"], {}), "('rm %s' % tmp_avi_video_path)\n", (1354, 1384), False, 'import os\n'), ((1765, 1797), 'numpy.concatenate', 'np.concatenate', (['row_imgs'], {'axis': '(0)'}), '(row_imgs, axis=0)\n', (1779, 1797), True, 'import numpy as np\n'), ((2028, 2068), 'cv2.imread', 'cv2.imread', (['video_frames_path_list[0][0]'], {}), '(video_frames_path_list[0][0])\n', (2038, 2068), False, 'import cv2\n'), ((2461, 2492), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (2483, 2492), False, 'import cv2\n'), ((2583, 2651), 'cv2.VideoWriter', 'cv2.VideoWriter', (['tmp_avi_video_path', 'fourcc', 'fps', '(fused_w, fused_h)'], {}), '(tmp_avi_video_path, fourcc, fps, (fused_w, fused_h))\n', (2598, 2651), False, 'import cv2\n'), ((2854, 2960), 'os.system', 'os.system', (["('ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1' % (tmp_avi_video_path,\n output_mp4_path))"], {}), "('ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1' % (\n tmp_avi_video_path, output_mp4_path))\n", (2863, 2960), False, 'import os\n'), ((2960, 2999), 'os.system', 'os.system', (["('rm %s' % tmp_avi_video_path)"], {}), "('rm %s' % tmp_avi_video_path)\n", (2969, 2999), False, 'import os\n'), ((3120, 3144), 'cv2.imread', 'cv2.imread', (['ref_img_path'], {}), '(ref_img_path)\n', (3130, 3144), False, 'import cv2\n'), ((3159, 3183), 'cv2.imread', 'cv2.imread', (['out_img_path'], {}), '(out_img_path)\n', (3169, 3183), False, 'import cv2\n'), ((3536, 3597), 'numpy.concatenate', 'np.concatenate', (['[src_img, pad, ref_img, pad, out_img]'], {'axis': '(1)'}), '([src_img, pad, ref_img, pad, out_img], axis=1)\n', (3550, 3597), True, 'import numpy as np\n'), ((3884, 3906), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (3894, 3906), False, 'import cv2\n'), ((3919, 3962), 'cv2.resize', 'cv2.resize', (['image', '(image_size, image_size)'], {}), '(image, (image_size, image_size))\n', (3929, 3962), False, 'import cv2\n'), ((4483, 4521), 'numpy.concatenate', 'np.concatenate', (['[img_1, img_2]'], {'axis': '(0)'}), '([img_1, img_2], axis=0)\n', (4497, 4521), True, 'import numpy as np\n'), ((4944, 4992), 'numpy.concatenate', 'np.concatenate', (['[fuse_img_1, fuse_img_2]'], {'axis': '(1)'}), '([fuse_img_1, fuse_img_2], axis=1)\n', (4958, 4992), True, 'import numpy as np\n'), ((5430, 5478), 'numpy.concatenate', 'np.concatenate', (['[fuse_img_1, fuse_img_2]'], {'axis': '(0)'}), '([fuse_img_1, fuse_img_2], axis=0)\n', (5444, 5478), True, 'import numpy as np\n'), ((7087, 7133), 'numpy.zeros', 'np.zeros', (['(image_size, pad, 3)'], {'dtype': 'np.uint8'}), '((image_size, pad, 3), dtype=np.uint8)\n', (7095, 7133), True, 'import numpy as np\n'), ((7240, 7271), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (7262, 7271), False, 'import cv2\n'), ((7347, 7412), 'cv2.VideoWriter', 'cv2.VideoWriter', (['tmp_avi_video_path', 'fourcc', 'fps', '(W, image_size)'], {}), '(tmp_avi_video_path, fourcc, fps, (W, image_size))\n', (7362, 7412), False, 'import cv2\n'), ((7682, 7788), 'os.system', 'os.system', (["('ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1' % (tmp_avi_video_path,\n output_mp4_path))"], {}), "('ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1' % (\n tmp_avi_video_path, output_mp4_path))\n", (7691, 7788), False, 'import os\n'), ((7788, 7827), 'os.system', 'os.system', (["('rm %s' % tmp_avi_video_path)"], {}), "('rm %s' % tmp_avi_video_path)\n", (7797, 7827), False, 'import os\n'), ((896, 911), 'multiprocessing.Pool', 'Pool', (['pool_size'], {}), '(pool_size)\n', (900, 911), False, 'from multiprocessing import Pool\n'), ((1504, 1524), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1514, 1524), False, 'import cv2\n'), ((1682, 1714), 'numpy.concatenate', 'np.concatenate', (['col_imgs'], {'axis': '(1)'}), '(col_imgs, axis=1)\n', (1696, 1714), True, 'import numpy as np\n'), ((2661, 2676), 'multiprocessing.Pool', 'Pool', (['pool_size'], {}), '(pool_size)\n', (2665, 2676), False, 'from multiprocessing import Pool\n'), ((3277, 3322), 'cv2.resize', 'cv2.resize', (['ref_img', '(image_size, image_size)'], {}), '(ref_img, (image_size, image_size))\n', (3287, 3322), False, 'import cv2\n'), ((3416, 3461), 'cv2.resize', 'cv2.resize', (['out_img', '(image_size, image_size)'], {}), '(out_img, (image_size, image_size))\n', (3426, 3461), False, 'import cv2\n'), ((7159, 7173), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (7171, 7173), False, 'import os\n'), ((7423, 7453), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', (['pool_size'], {}), '(pool_size)\n', (7442, 7453), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((1174, 1233), 'shutil.copy', 'shutil.copy', (['img_path', "('%s/%.8d.jpg' % (save_frames_dir, i))"], {}), "(img_path, '%s/%.8d.jpg' % (save_frames_dir, i))\n", (1185, 1233), False, 'import shutil\n'), ((949, 986), 'functools.partial', 'partial', (['auto_unzip_fun'], {'f': 'cv2.imread'}), '(auto_unzip_fun, f=cv2.imread)\n', (956, 986), False, 'from functools import partial\n'), ((2714, 2751), 'functools.partial', 'partial', (['auto_unzip_fun'], {'f': 'fuse_image'}), '(auto_unzip_fun, f=fuse_image)\n', (2721, 2751), False, 'from functools import partial\n')] |
"""
Podcats is a podcast feed generator and a server.
It generates RSS feeds for podcast episodes from local audio files and,
optionally, exposes the feed and as well as the episode file via
a built-in web server so that they can be imported into iTunes
or another podcast client.
"""
import os
import re
import time
import argparse
import mimetypes
from email.utils import formatdate
from os import path
from xml.sax.saxutils import escape, quoteattr
try:
from urllib.request import pathname2url
except ImportError:
# For python 2
# noinspection PyUnresolvedReferences
from urllib import pathname2url
import mutagen
import humanize
from mutagen.id3 import ID3
from flask import Flask, Response
# noinspection PyPackageRequirements
from jinja2 import Environment, FileSystemLoader
from collections import defaultdict
__version__ = '0.6.3'
__licence__ = 'BSD'
__author__ = '<NAME>'
__url__ = 'https://github.com/jakubroztocil/podcats'
WEB_PATH = '/web'
STATIC_PATH = '/static'
TEMPLATES_ROOT = os.path.join(os.path.dirname(__file__), 'templates')
BOOK_COVER_EXTENSIONS = ('.jpg', '.jpeg', '.png')
jinja2_env = Environment(loader=FileSystemLoader(TEMPLATES_ROOT))
class Episode(object):
"""Podcast episode"""
def __init__(self, filename, relative_dir, root_url, title_format='{filename}{title}'):
self.filename = filename
self.relative_dir = relative_dir
self.root_url = root_url
self.length = os.path.getsize(filename)
self.tags = mutagen.File(self.filename, easy=True)
self.title_format = title_format
print(self.tags)
try:
self.id3 = ID3(self.filename)
except Exception:
self.id3 = None
def __lt__(self, other):
return self.date < other.date
def __gt__(self, other):
return self.date > other.date
def __cmp__(self, other):
a, b = self.date, other.date
return (a > b) - (a < b) # Python3 cmp() equivalent
def as_xml(self):
"""Return episode item XML"""
template = jinja2_env.get_template('episode.xml')
return template.render(
title=escape(self.title),
url=quoteattr(self.url),
guid=escape(self.url),
mimetype=self.mimetype,
length=self.length,
date=formatdate(self.date),
image_url=self.image,
description=self.description,
)
def as_html(self):
"""Return episode item html"""
filename = os.path.basename(self.filename)
directory = os.path.split(os.path.dirname(self.filename))[-1]
template = jinja2_env.get_template('episode.html')
return template.render(
title=escape(self.title),
url=self.url,
filename=filename,
directory=directory,
mimetype=self.mimetype,
length=humanize.naturalsize(self.length),
date=formatdate(self.date),
image_url=self.image,
description=self.description,
)
def get_tag(self, name):
"""Return episode file tag info"""
try:
return self.tags[name][0]
except (KeyError, IndexError):
pass
def _to_url(self, filepath):
fn = os.path.basename(filepath)
path = STATIC_PATH + '/' + self.relative_dir + '/' + fn
path = re.sub(r'//', '/', path)
url = self.root_url + pathname2url(path)
return url
@property
def title(self):
"""Return episode title"""
filename = os.path.splitext(os.path.basename(self.filename))[0]
try:
args = defaultdict(lambda: '<unset>', {'filename': filename})
for key, value in self.tags.items():
args[key.lower()] = value[0] # use first entry only
text = self.title_format.format_map(args)
except Exception:
print('Failed setting title for file {}. Using filename as title.'.format(filename))
text = filename
return text
@property
def url(self):
"""Return episode url"""
return self._to_url(self.filename)
@property
def date(self):
"""Return episode date as unix timestamp"""
dt = self.get_tag('date')
if dt:
formats = [
'%Y-%m-%d:%H:%M:%S',
'%Y-%m-%d:%H:%M',
'%Y-%m-%d:%H',
'%Y-%m-%d',
'%Y-%m',
'%Y',
]
for fmt in formats:
try:
dt = time.mktime(time.strptime(dt, fmt))
break
except ValueError:
pass
else:
dt = None
if not dt:
dt = os.path.getmtime(self.filename)
return dt
@property
def mimetype(self):
"""Return file mimetype name"""
if self.filename.endswith('m4b'):
return 'audio/x-m4b'
else:
return mimetypes.guess_type(self.filename)[0]
@property
def image(self):
"""Return an eventual cover image"""
directory = os.path.split(self.filename)[0]
image_files = []
for fn in os.listdir(directory):
ext = os.path.splitext(fn)[1]
if ext.lower() in BOOK_COVER_EXTENSIONS:
image_files.append(fn)
if len(image_files) > 0:
abs_path_image = image_files[0]
return self._to_url(abs_path_image)
else:
return None
@property
def description(self):
"""Return description"""
try:
return self.tags['description'][0]
except Exception:
return ''
class Channel(object):
"""Podcast channel"""
def __init__(self, root_dir, root_url, host, port, title, link, debug=False, video=False, title_format='{filename}{title}'):
self.root_dir = root_dir or os.getcwd()
self.root_url = root_url
self.host = host
self.port = int(port)
self.link = link or self.root_url
self.title = title or os.path.basename(
os.path.abspath(self.root_dir.rstrip('/')))
self.description = 'Feed generated by <a href="%s">Podcats</a>.' % __url__
self.debug = debug
self.video = video
self.title_format = title_format
def __iter__(self):
for root, _, files in os.walk(self.root_dir):
relative_dir = root[len(self.root_dir):]
for fn in files:
filepath = os.path.join(root, fn)
mimetype = mimetypes.guess_type(filepath)[0]
if (mimetype and 'audio' in mimetype
or filepath.endswith('m4b')
or (mimetype and 'video' in mimetype and self.video is True)
):
yield Episode(filepath, relative_dir, self.root_url, title_format=self.title_format)
def as_xml(self):
"""Return channel XML with all episode items"""
template = jinja2_env.get_template('feed.xml')
return template.render(
title=escape(self.title),
description=escape(self.description),
link=escape(self.link),
items=u''.join(episode.as_xml() for episode in sorted(self))
).strip()
def as_html(self):
"""Return channel HTML with all episode items"""
template = jinja2_env.get_template('feed.html')
return template.render(
title=escape(self.title),
description=self.description,
link=escape(self.link),
items=u''.join(episode.as_html() for episode in sorted(self)),
).strip()
def serve(channel):
"""Serve podcast channel and episodes over HTTP"""
server = Flask(
__name__,
static_folder=channel.root_dir,
static_url_path=STATIC_PATH,
)
server.route('/')(
lambda: Response(
channel.as_xml(),
content_type='application/xml; charset=utf-8')
)
server.add_url_rule(
WEB_PATH,
view_func=channel.as_html,
methods=['GET'],
)
server.run(host=channel.host, port=channel.port, debug=channel.debug, threaded=True)
def main():
"""Main function"""
args = parser.parse_args()
url = 'http://' + args.host + ':' + args.port
channel = Channel(
root_dir=path.abspath(args.directory),
root_url=url,
host=args.host,
port=args.port,
title=args.title,
link=args.link,
debug=args.debug,
video=args.video,
title_format=args.title_format
)
if args.action == 'generate':
print(channel.as_xml())
elif args.action == 'generate_html':
print(channel.as_html())
else:
print('Welcome to the Podcats web server!')
print('\nYour podcast feed is available at:\n')
print('\t' + channel.root_url + '\n')
print('The web interface is available at\n')
print('\t{url}{web_path}\n'.format(url=url, web_path=WEB_PATH))
serve(channel)
parser = argparse.ArgumentParser(
description='Podcats: podcast feed generator and server <%s>.' % __url__
)
parser.add_argument(
'--host',
default='localhost',
help='listen hostname or IP address'
)
parser.add_argument(
'--port',
default='5000',
help='listen tcp port number'
)
parser.add_argument(
'action',
metavar='COMMAND',
choices=['generate', 'generate_html', 'serve'],
help='`generate` the RSS feed to the terminal, or'
'`serve` the generated RSS as well as audio files'
' via the built-in web server'
)
parser.add_argument(
'directory',
metavar='DIRECTORY',
help='path to a directory with episode audio files',
)
parser.add_argument(
'--debug',
action="store_true",
help='Serve with debug mode on'
)
parser.add_argument('--title', help='optional feed title')
parser.add_argument('--link', help='optional feed link')
parser.add_argument(
'--video',
action="store_true",
help='include video files as well'
)
parser.add_argument(
'--title-format',
dest='title_format',
default='{filename}{title}',
help='title format string and arguments'
)
if __name__ == '__main__':
main()
| [
"xml.sax.saxutils.quoteattr",
"flask.Flask",
"mimetypes.guess_type",
"os.walk",
"mutagen.File",
"os.listdir",
"argparse.ArgumentParser",
"humanize.naturalsize",
"os.path.split",
"os.path.getsize",
"time.strptime",
"os.path.splitext",
"urllib.pathname2url",
"os.path.dirname",
"os.path.get... | [((9147, 9249), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': "('Podcats: podcast feed generator and server <%s>.' % __url__)"}), "(description=\n 'Podcats: podcast feed generator and server <%s>.' % __url__)\n", (9170, 9249), False, 'import argparse\n'), ((1029, 1054), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1044, 1054), False, 'import os\n'), ((7828, 7904), 'flask.Flask', 'Flask', (['__name__'], {'static_folder': 'channel.root_dir', 'static_url_path': 'STATIC_PATH'}), '(__name__, static_folder=channel.root_dir, static_url_path=STATIC_PATH)\n', (7833, 7904), False, 'from flask import Flask, Response\n'), ((1152, 1184), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['TEMPLATES_ROOT'], {}), '(TEMPLATES_ROOT)\n', (1168, 1184), False, 'from jinja2 import Environment, FileSystemLoader\n'), ((1459, 1484), 'os.path.getsize', 'os.path.getsize', (['filename'], {}), '(filename)\n', (1474, 1484), False, 'import os\n'), ((1505, 1543), 'mutagen.File', 'mutagen.File', (['self.filename'], {'easy': '(True)'}), '(self.filename, easy=True)\n', (1517, 1543), False, 'import mutagen\n'), ((2522, 2553), 'os.path.basename', 'os.path.basename', (['self.filename'], {}), '(self.filename)\n', (2538, 2553), False, 'import os\n'), ((3287, 3313), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (3303, 3313), False, 'import os\n'), ((3393, 3416), 're.sub', 're.sub', (['"""//"""', '"""/"""', 'path'], {}), "('//', '/', path)\n", (3399, 3416), False, 'import re\n'), ((5255, 5276), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (5265, 5276), False, 'import os\n'), ((6455, 6477), 'os.walk', 'os.walk', (['self.root_dir'], {}), '(self.root_dir)\n', (6462, 6477), False, 'import os\n'), ((1646, 1664), 'mutagen.id3.ID3', 'ID3', (['self.filename'], {}), '(self.filename)\n', (1649, 1664), False, 'from mutagen.id3 import ID3\n'), ((3448, 3466), 'urllib.pathname2url', 'pathname2url', (['path'], {}), '(path)\n', (3460, 3466), False, 'from urllib import pathname2url\n'), ((3661, 3716), 'collections.defaultdict', 'defaultdict', (["(lambda : '<unset>')", "{'filename': filename}"], {}), "(lambda : '<unset>', {'filename': filename})\n", (3672, 3716), False, 'from collections import defaultdict\n'), ((4801, 4832), 'os.path.getmtime', 'os.path.getmtime', (['self.filename'], {}), '(self.filename)\n', (4817, 4832), False, 'import os\n'), ((5179, 5207), 'os.path.split', 'os.path.split', (['self.filename'], {}), '(self.filename)\n', (5192, 5207), False, 'import os\n'), ((5976, 5987), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5985, 5987), False, 'import os\n'), ((8437, 8465), 'os.path.abspath', 'path.abspath', (['args.directory'], {}), '(args.directory)\n', (8449, 8465), False, 'from os import path\n'), ((2154, 2172), 'xml.sax.saxutils.escape', 'escape', (['self.title'], {}), '(self.title)\n', (2160, 2172), False, 'from xml.sax.saxutils import escape, quoteattr\n'), ((2190, 2209), 'xml.sax.saxutils.quoteattr', 'quoteattr', (['self.url'], {}), '(self.url)\n', (2199, 2209), False, 'from xml.sax.saxutils import escape, quoteattr\n'), ((2228, 2244), 'xml.sax.saxutils.escape', 'escape', (['self.url'], {}), '(self.url)\n', (2234, 2244), False, 'from xml.sax.saxutils import escape, quoteattr\n'), ((2331, 2352), 'email.utils.formatdate', 'formatdate', (['self.date'], {}), '(self.date)\n', (2341, 2352), False, 'from email.utils import formatdate\n'), ((2588, 2618), 'os.path.dirname', 'os.path.dirname', (['self.filename'], {}), '(self.filename)\n', (2603, 2618), False, 'import os\n'), ((2734, 2752), 'xml.sax.saxutils.escape', 'escape', (['self.title'], {}), '(self.title)\n', (2740, 2752), False, 'from xml.sax.saxutils import escape, quoteattr\n'), ((2899, 2932), 'humanize.naturalsize', 'humanize.naturalsize', (['self.length'], {}), '(self.length)\n', (2919, 2932), False, 'import humanize\n'), ((2951, 2972), 'email.utils.formatdate', 'formatdate', (['self.date'], {}), '(self.date)\n', (2961, 2972), False, 'from email.utils import formatdate\n'), ((3593, 3624), 'os.path.basename', 'os.path.basename', (['self.filename'], {}), '(self.filename)\n', (3609, 3624), False, 'import os\n'), ((5039, 5074), 'mimetypes.guess_type', 'mimetypes.guess_type', (['self.filename'], {}), '(self.filename)\n', (5059, 5074), False, 'import mimetypes\n'), ((5296, 5316), 'os.path.splitext', 'os.path.splitext', (['fn'], {}), '(fn)\n', (5312, 5316), False, 'import os\n'), ((6588, 6610), 'os.path.join', 'os.path.join', (['root', 'fn'], {}), '(root, fn)\n', (6600, 6610), False, 'import os\n'), ((6638, 6668), 'mimetypes.guess_type', 'mimetypes.guess_type', (['filepath'], {}), '(filepath)\n', (6658, 6668), False, 'import mimetypes\n'), ((4610, 4632), 'time.strptime', 'time.strptime', (['dt', 'fmt'], {}), '(dt, fmt)\n', (4623, 4632), False, 'import time\n'), ((7163, 7181), 'xml.sax.saxutils.escape', 'escape', (['self.title'], {}), '(self.title)\n', (7169, 7181), False, 'from xml.sax.saxutils import escape, quoteattr\n'), ((7207, 7231), 'xml.sax.saxutils.escape', 'escape', (['self.description'], {}), '(self.description)\n', (7213, 7231), False, 'from xml.sax.saxutils import escape, quoteattr\n'), ((7250, 7267), 'xml.sax.saxutils.escape', 'escape', (['self.link'], {}), '(self.link)\n', (7256, 7267), False, 'from xml.sax.saxutils import escape, quoteattr\n'), ((7547, 7565), 'xml.sax.saxutils.escape', 'escape', (['self.title'], {}), '(self.title)\n', (7553, 7565), False, 'from xml.sax.saxutils import escape, quoteattr\n'), ((7626, 7643), 'xml.sax.saxutils.escape', 'escape', (['self.link'], {}), '(self.link)\n', (7632, 7643), False, 'from xml.sax.saxutils import escape, quoteattr\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.graphics.tsaplots as sgt
from statsmodels.tsa.arima_model import ARMA
from scipy.stats.distributions import chi2
import statsmodels.tsa.stattools as sts
# ------------------------
# load data
# ----------
raw_csv_data = pd.read_csv("../data/Index2018.csv")
df_comp=raw_csv_data.copy()
# -- make the index a datetime object
df_comp.date = pd.to_datetime(df_comp.date, dayfirst = True)
df_comp.set_index("date", inplace=True)
df_comp=df_comp.asfreq('b')
# -- fill na values
df_comp=df_comp.fillna(method='ffill')
# -- redefine column names and add a new column on returns
df_comp['market_value']=df_comp.ftse
df['returns'] = df.market_value.pct_change(1).mul(100)
df = df.iloc[1:]
# -- delete redundant data
del df_comp['spx']
del df_comp['dax']
del df_comp['ftse']
del df_comp['nikkei']
# split dataset (on straight data = prices)
# ----------
size = int(len(df_comp) * 0.8)
df = df_comp.iloc[:size]
df_test = df_comp.iloc[size:]
# review ACF and PACF (in reality is more functional to run auto_arima vs checking ACF/PACF manually, but this is for sake of example)
# ----------
sgt.plot_acf(df.returns, lags=40, zero = False)
plt.title("ACF FTSE Returns", size=24)
sgt.plot_pacf(df.returns, lags = 40, zero = False, method = ('ols'))
plt.title("PACF FTSE Returns", size=24)
plt.show()
# => we know data is non-stationary from a previous exercise
# select AR model (by looking to PACF here) and iterating through more models...until LLR will stop going down
# ----------
model_ret_ar_1 = ARMA(df.returns, order = (1,0)).fit()
print(model_ar.summary())
print('----------')
model_ar_4 = ARMA(df.returns, order=(4,0)).fit()
print(model_ar_4.summary())
print('----------')
model_ar_6 = ARMA(df.returns, order=(6,0)).fit()
print(model_ar_6.summary())
print('----------')
# => by comparing the LLR stat and AIC/BIC from models' summary we can see what is the best order ... (we would find out AR(6,0))
# => remember that auto_arima is much easier...
# compare LLR results across models
# ----------
def LLR_test(mod_1, mod_2, DF=1):
L1 = mod_1.fit().llf
L2 = mod_2.fit().llf
LR = (2*(L2-L1))
p = chi2.sf(LR, DF).round(3)
return p
print('LLR test 1: ', LLR_test(model_ar_1, model_ar_4, DF=3))
print('LLR test 2: ', LLR_test(model_ar_4, model_ar_7, DF=3))
# now let's normalise values and run the AR model again to see what happens ...
# ----------
benchmark = df.market_value.iloc[0]
df['norm'] = df.market_value.div(benchmark).mul(100)
bench_ret = df.returns.iloc[0]
df['norm_ret'] = df.returns.div(bench_ret).mul(100)
# we jump straight to order 6 here without repeating previous steps (as we see on class notes)
model_norm_ret_ar_6 = ARMA(df.norm_ret, order=(6,0)).fit()
print(model_norm_ret_ar_6.summary())
# analyzing residuals
# ----------
df['res_ret'] = model_norm_ret_ar_6
df.res_ret.mean()
df.res_ret.var()
# -- let's see if there is any significant error that the model has missed (via ACF or PACF)
sgt.plot_acf(df.res_ret, zero = False, lags = 40)
plt.title("ACF Of Residuals for Returns",size=24)
# -- plotting all residuals
df.res_ret.plot(figsize=(20,5))
plt.title("Residuals of Returns", size=24)
# plt.show()
| [
"matplotlib.pyplot.title",
"pandas.read_csv",
"statsmodels.graphics.tsaplots.plot_pacf",
"statsmodels.tsa.arima_model.ARMA",
"scipy.stats.distributions.chi2.sf",
"statsmodels.graphics.tsaplots.plot_acf",
"pandas.to_datetime",
"matplotlib.pyplot.show"
] | [((314, 350), 'pandas.read_csv', 'pd.read_csv', (['"""../data/Index2018.csv"""'], {}), "('../data/Index2018.csv')\n", (325, 350), True, 'import pandas as pd\n'), ((433, 476), 'pandas.to_datetime', 'pd.to_datetime', (['df_comp.date'], {'dayfirst': '(True)'}), '(df_comp.date, dayfirst=True)\n', (447, 476), True, 'import pandas as pd\n'), ((1178, 1223), 'statsmodels.graphics.tsaplots.plot_acf', 'sgt.plot_acf', (['df.returns'], {'lags': '(40)', 'zero': '(False)'}), '(df.returns, lags=40, zero=False)\n', (1190, 1223), True, 'import statsmodels.graphics.tsaplots as sgt\n'), ((1226, 1264), 'matplotlib.pyplot.title', 'plt.title', (['"""ACF FTSE Returns"""'], {'size': '(24)'}), "('ACF FTSE Returns', size=24)\n", (1235, 1264), True, 'import matplotlib.pyplot as plt\n'), ((1266, 1326), 'statsmodels.graphics.tsaplots.plot_pacf', 'sgt.plot_pacf', (['df.returns'], {'lags': '(40)', 'zero': '(False)', 'method': '"""ols"""'}), "(df.returns, lags=40, zero=False, method='ols')\n", (1279, 1326), True, 'import statsmodels.graphics.tsaplots as sgt\n'), ((1335, 1374), 'matplotlib.pyplot.title', 'plt.title', (['"""PACF FTSE Returns"""'], {'size': '(24)'}), "('PACF FTSE Returns', size=24)\n", (1344, 1374), True, 'import matplotlib.pyplot as plt\n'), ((1375, 1385), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1383, 1385), True, 'import matplotlib.pyplot as plt\n'), ((3037, 3082), 'statsmodels.graphics.tsaplots.plot_acf', 'sgt.plot_acf', (['df.res_ret'], {'zero': '(False)', 'lags': '(40)'}), '(df.res_ret, zero=False, lags=40)\n', (3049, 3082), True, 'import statsmodels.graphics.tsaplots as sgt\n'), ((3087, 3137), 'matplotlib.pyplot.title', 'plt.title', (['"""ACF Of Residuals for Returns"""'], {'size': '(24)'}), "('ACF Of Residuals for Returns', size=24)\n", (3096, 3137), True, 'import matplotlib.pyplot as plt\n'), ((3197, 3239), 'matplotlib.pyplot.title', 'plt.title', (['"""Residuals of Returns"""'], {'size': '(24)'}), "('Residuals of Returns', size=24)\n", (3206, 3239), True, 'import matplotlib.pyplot as plt\n'), ((1590, 1620), 'statsmodels.tsa.arima_model.ARMA', 'ARMA', (['df.returns'], {'order': '(1, 0)'}), '(df.returns, order=(1, 0))\n', (1594, 1620), False, 'from statsmodels.tsa.arima_model import ARMA\n'), ((1687, 1717), 'statsmodels.tsa.arima_model.ARMA', 'ARMA', (['df.returns'], {'order': '(4, 0)'}), '(df.returns, order=(4, 0))\n', (1691, 1717), False, 'from statsmodels.tsa.arima_model import ARMA\n'), ((1784, 1814), 'statsmodels.tsa.arima_model.ARMA', 'ARMA', (['df.returns'], {'order': '(6, 0)'}), '(df.returns, order=(6, 0))\n', (1788, 1814), False, 'from statsmodels.tsa.arima_model import ARMA\n'), ((2760, 2791), 'statsmodels.tsa.arima_model.ARMA', 'ARMA', (['df.norm_ret'], {'order': '(6, 0)'}), '(df.norm_ret, order=(6, 0))\n', (2764, 2791), False, 'from statsmodels.tsa.arima_model import ARMA\n'), ((2211, 2226), 'scipy.stats.distributions.chi2.sf', 'chi2.sf', (['LR', 'DF'], {}), '(LR, DF)\n', (2218, 2226), False, 'from scipy.stats.distributions import chi2\n')] |
##############################################################################
#
# Copyright (c) 2001 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Customizable page templates that come from the filesystem.
"""
import re
import six
from six import get_unbound_function
from AccessControl.class_init import InitializeClass
from AccessControl.SecurityInfo import ClassSecurityInfo
from AccessControl.SecurityManagement import getSecurityManager
from App.special_dtml import DTMLFile
from OFS import bbb
from Products.PageTemplates.PageTemplate import PageTemplate
from Products.PageTemplates.utils import charsetFromMetaEquiv
from Products.PageTemplates.utils import encodingFromXMLPreamble
from Products.PageTemplates.ZopePageTemplate import Src
from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
from Products.PageTemplates.ZopePageTemplate import preferred_encodings
from Shared.DC.Scripts.Script import Script
from .DirectoryView import registerFileExtension
from .DirectoryView import registerMetaType
from .FSObject import FSObject
from .permissions import FTPAccess
from .permissions import View
from .permissions import ViewManagementScreens
from .utils import _checkConditionalGET
from .utils import _dtmldir
from .utils import _setCacheHeaders
xml_detect_re = re.compile(
br'^\s*<\?xml\s+(?:[^>]*?encoding=["\']([^"\'>]+))?')
charset_re = re.compile(r'charset.*?=.*?(?P<charset>[\w\-]*)',
re.I | re.M | re.S)
_marker = object()
class FSPageTemplate(FSObject, Script, PageTemplate):
"""Wrapper for Page Template.
"""
meta_type = 'Filesystem Page Template'
_owner = None # Unowned
manage_options = (
{'label': 'Customize', 'action': 'manage_main'},)
security = ClassSecurityInfo()
security.declareObjectProtected(View)
security.declareProtected(ViewManagementScreens, # NOQA: flake8: D001
'manage_main')
manage_main = DTMLFile('custpt', _dtmldir)
# Declare security for unprotected PageTemplate methods.
security.declarePrivate('pt_edit', 'write') # NOQA: flake8: D001
def __init__(self, id, filepath, fullname=None, properties=None):
FSObject.__init__(self, id, filepath, fullname, properties)
self.ZBindings_edit(self._default_bindings)
def _createZODBClone(self):
"""Create a ZODB (editable) equivalent of this object."""
obj = ZopePageTemplate(self.getId(), self._text, self.content_type)
obj.expand = 0
obj.write(self.read())
return obj
# def ZCacheable_isCachingEnabled(self):
# return 0
def _readFile(self, reparse):
"""Read the data from the filesystem.
"""
if reparse:
if six.PY2:
# not 'rb', as this is a text file!
file = open(self._filepath, 'rU')
else:
file = open(self._filepath, 'br')
try:
data = file.read()
if not six.PY2:
data = data.replace(b'\r\n', b'\n').replace(b'\r', b'\n')
finally:
file.close()
# If we already have a content_type set it must come from a
# .metadata file and we should always honor that. The content
# type is initialized as text/html by default, so we only
# attempt further detection if the default is encountered.
# One previous misbehavior remains: It is not possible to
# force a text/html type if parsing detects it as XML.
encoding = None
preferred = preferred_encodings[:]
if getattr(self, 'content_type', 'text/html') == 'text/html':
xml_info = xml_detect_re.match(data)
if xml_info:
# Smells like xml
# set "content_type" from the XML declaration
encoding = xml_info.group(1) or 'utf-8'
self.content_type = 'text/xml; charset=%s' % encoding
if not isinstance(data, six.text_type):
if encoding is None:
charset = getattr(self, 'charset', None)
if charset is None:
if self.content_type.startswith('text/html'):
mo = charset_re.search(self.content_type)
if mo:
charset = mo.group(1).lower()
if charset is None:
charset = charsetFromMetaEquiv(data)
elif self.content_type.startswith('text/xml'):
charset = encodingFromXMLPreamble(data)
else:
raise ValueError('Unsupported content_type: %s' %
self.content_type)
if charset is not None:
preferred.insert(0, charset)
else:
preferred.insert(0, encoding)
for enc in preferred:
try:
data = six.text_type(data, enc)
if isinstance(data, six.text_type):
break
except UnicodeDecodeError:
continue
else:
data = six.text_type(data)
self.write(data)
@security.private
def read(self):
# Tie in on an opportunity to auto-update
self._updateFromFS()
return FSPageTemplate.inheritedAttribute('read')(self)
# The following is mainly taken from ZopePageTemplate.py
expand = 0
output_encoding = 'utf-8'
__defaults__ = None
__code__ = ZopePageTemplate.__code__
_default_bindings = ZopePageTemplate._default_bindings
security.declareProtected(View, '__call__') # NOQA: flake8: D001
def pt_macros(self):
# Tie in on an opportunity to auto-reload
self._updateFromFS()
return FSPageTemplate.inheritedAttribute('pt_macros')(self)
def pt_render(self, source=0, extra_context={}):
self._updateFromFS() # Make sure the template has been loaded.
if not source:
# If we have a conditional get, set status 304 and return
# no content
if _checkConditionalGET(self, extra_context):
return ''
result = FSPageTemplate.inheritedAttribute('pt_render')(
self, source, extra_context)
if not source:
_setCacheHeaders(self, extra_context)
return result
@security.protected(ViewManagementScreens)
def pt_source_file(self):
""" Return a file name to be compiled into the TAL code.
"""
return 'file:%s' % self._filepath
security.declarePrivate('_ZPT_exec') # NOQA: flake8: D001
_ZPT_exec = get_unbound_function(ZopePageTemplate._exec)
@security.private
def _exec(self, bound_names, args, kw):
"""Call a FSPageTemplate"""
try:
response = self.REQUEST.RESPONSE
except AttributeError:
response = None
# Read file first to get a correct content_type default value.
self._updateFromFS()
if 'args' not in kw:
kw['args'] = args
bound_names['options'] = kw
try:
response = self.REQUEST.RESPONSE
if 'content-type' not in response.headers:
response.setHeader('content-type', self.content_type)
except AttributeError:
pass
security = getSecurityManager()
bound_names['user'] = security.getUser()
# Retrieve the value from the cache.
keyset = None
if self.ZCacheable_isCachingEnabled():
# Prepare a cache key.
keyset = {
# Why oh why?
# All this code is cut and paste
# here to make sure that we
# dont call _getContext and hence can't cache
# Annoying huh?
'here': self.aq_parent.getPhysicalPath(),
'bound_names': bound_names}
result = self.ZCacheable_get(keywords=keyset)
if result is not None:
# Got a cached value.
return result
# Execute the template in a new security context.
security.addContext(self)
try:
result = self.pt_render(extra_context=bound_names)
if keyset is not None:
# Store the result in the cache.
self.ZCacheable_set(result, keywords=keyset)
return result
finally:
security.removeContext(self)
return result
# Copy over more methods
if bbb.HAS_ZSERVER:
security.declareProtected(FTPAccess, # NOQA: flake8: D001
'manage_FTPget')
manage_FTPget = get_unbound_function(ZopePageTemplate.manage_FTPget)
security.declareProtected(View, 'get_size') # NOQA: flake8: D001
get_size = get_unbound_function(ZopePageTemplate.get_size)
getSize = get_size
security.declareProtected(ViewManagementScreens, # NOQA: flake8: D001
'PrincipiaSearchSource')
PrincipiaSearchSource = get_unbound_function(
ZopePageTemplate.PrincipiaSearchSource)
security.declareProtected(ViewManagementScreens, # NOQA: flake8: D001
'document_src')
document_src = get_unbound_function(ZopePageTemplate.document_src)
pt_getContext = get_unbound_function(ZopePageTemplate.pt_getContext)
source_dot_xml = Src()
setattr(FSPageTemplate, 'source.xml', FSPageTemplate.source_dot_xml)
setattr(FSPageTemplate, 'source.html', FSPageTemplate.source_dot_xml)
InitializeClass(FSPageTemplate)
registerFileExtension('pt', FSPageTemplate)
registerFileExtension('zpt', FSPageTemplate)
registerFileExtension('html', FSPageTemplate)
registerFileExtension('htm', FSPageTemplate)
registerMetaType('Page Template', FSPageTemplate)
| [
"Products.PageTemplates.ZopePageTemplate.Src",
"re.compile",
"six.text_type",
"Products.PageTemplates.utils.encodingFromXMLPreamble",
"AccessControl.class_init.InitializeClass",
"App.special_dtml.DTMLFile",
"AccessControl.SecurityManagement.getSecurityManager",
"Products.PageTemplates.utils.charsetFro... | [((1761, 1831), 're.compile', 're.compile', (['b\'^\\\\s*<\\\\?xml\\\\s+(?:[^>]*?encoding=["\\\\\\\']([^"\\\\\\\'>]+))?\''], {}), '(b\'^\\\\s*<\\\\?xml\\\\s+(?:[^>]*?encoding=["\\\\\\\']([^"\\\\\\\'>]+))?\')\n', (1771, 1831), False, 'import re\n'), ((1844, 1914), 're.compile', 're.compile', (['"""charset.*?=.*?(?P<charset>[\\\\w\\\\-]*)"""', '(re.I | re.M | re.S)'], {}), "('charset.*?=.*?(?P<charset>[\\\\w\\\\-]*)', re.I | re.M | re.S)\n", (1854, 1914), False, 'import re\n'), ((10414, 10445), 'AccessControl.class_init.InitializeClass', 'InitializeClass', (['FSPageTemplate'], {}), '(FSPageTemplate)\n', (10429, 10445), False, 'from AccessControl.class_init import InitializeClass\n'), ((2227, 2246), 'AccessControl.SecurityInfo.ClassSecurityInfo', 'ClassSecurityInfo', ([], {}), '()\n', (2244, 2246), False, 'from AccessControl.SecurityInfo import ClassSecurityInfo\n'), ((2428, 2456), 'App.special_dtml.DTMLFile', 'DTMLFile', (['"""custpt"""', '_dtmldir'], {}), "('custpt', _dtmldir)\n", (2436, 2456), False, 'from App.special_dtml import DTMLFile\n'), ((7420, 7464), 'six.get_unbound_function', 'get_unbound_function', (['ZopePageTemplate._exec'], {}), '(ZopePageTemplate._exec)\n', (7440, 7464), False, 'from six import get_unbound_function\n'), ((9654, 9701), 'six.get_unbound_function', 'get_unbound_function', (['ZopePageTemplate.get_size'], {}), '(ZopePageTemplate.get_size)\n', (9674, 9701), False, 'from six import get_unbound_function\n'), ((9884, 9944), 'six.get_unbound_function', 'get_unbound_function', (['ZopePageTemplate.PrincipiaSearchSource'], {}), '(ZopePageTemplate.PrincipiaSearchSource)\n', (9904, 9944), False, 'from six import get_unbound_function\n'), ((10119, 10170), 'six.get_unbound_function', 'get_unbound_function', (['ZopePageTemplate.document_src'], {}), '(ZopePageTemplate.document_src)\n', (10139, 10170), False, 'from six import get_unbound_function\n'), ((10192, 10244), 'six.get_unbound_function', 'get_unbound_function', (['ZopePageTemplate.pt_getContext'], {}), '(ZopePageTemplate.pt_getContext)\n', (10212, 10244), False, 'from six import get_unbound_function\n'), ((10267, 10272), 'Products.PageTemplates.ZopePageTemplate.Src', 'Src', ([], {}), '()\n', (10270, 10272), False, 'from Products.PageTemplates.ZopePageTemplate import Src\n'), ((8133, 8153), 'AccessControl.SecurityManagement.getSecurityManager', 'getSecurityManager', ([], {}), '()\n', (8151, 8153), False, 'from AccessControl.SecurityManagement import getSecurityManager\n'), ((9515, 9567), 'six.get_unbound_function', 'get_unbound_function', (['ZopePageTemplate.manage_FTPget'], {}), '(ZopePageTemplate.manage_FTPget)\n', (9535, 9567), False, 'from six import get_unbound_function\n'), ((5879, 5898), 'six.text_type', 'six.text_type', (['data'], {}), '(data)\n', (5892, 5898), False, 'import six\n'), ((5631, 5655), 'six.text_type', 'six.text_type', (['data', 'enc'], {}), '(data, enc)\n', (5644, 5655), False, 'import six\n'), ((5025, 5051), 'Products.PageTemplates.utils.charsetFromMetaEquiv', 'charsetFromMetaEquiv', (['data'], {}), '(data)\n', (5045, 5051), False, 'from Products.PageTemplates.utils import charsetFromMetaEquiv\n'), ((5162, 5191), 'Products.PageTemplates.utils.encodingFromXMLPreamble', 'encodingFromXMLPreamble', (['data'], {}), '(data)\n', (5185, 5191), False, 'from Products.PageTemplates.utils import encodingFromXMLPreamble\n')] |
#!/usr/bin/env python
# Standard imports
import pandas as pd
import numpy as np
# Pytorch
import torch
from torch import nn
# Using sklearn's LASSO implementation
from sklearn.linear_model import Lasso
# Local Files
from models.model_interface import CryptoModel
class LASSO(CryptoModel):
"""Wrapper around the sklearn LASSO class"""
def __init__(self, alpha=0.1, warm_start=True, verbose_training=False):
"""Create the LASSO model.
:input_size: Input size to the AutoEncoder, should be n_coins
"""
# Arguments
self.alpha = alpha
self.verbose_training = verbose_training
self.model = Lasso(alpha=alpha, fit_intercept=True, warm_start=warm_start)
# set the default plotting color
self.set_plotting_color()
def predict(self, sample):
"""Predict the next out of sample timestep
:sample: Vector or DataFrame of timesteps to use as input for the predictor(s).
:returns: [batch_size, 1, n_coins] Tensor of predictions
"""
n_samp, _, n_features = sample.shape
yhat = self.model.predict(sample.reshape((n_samp, n_features)))
if self.verbose_training:
print(f'prediction: {yhat}')
return yhat
def train(self, training_set):
"""Train, or re-train, the LSTM and AE
:training_set: DataFrame of training samples
"""
X, Y = [], []
for data, target in training_set:
X.append(data.numpy())
Y.append(target.numpy())
X = np.vstack(X)
Y = np.vstack(Y)
n_samples, _, n_features = X.shape
X = X.reshape((n_samples, n_features))
Y = Y.reshape((n_samples, n_features))
self.model.fit(X, Y)
#TODO: print out that coefficients (or at least num of) that the L1 normalization leaves
coef = self.model.coef_
all_zeros = np.isin([0,-0], coef)
if self.verbose_training:
print(f'All zeros? {all_zeros}')
print(f'Coefs? {coef.shape}')
if np.isin(False, all_zeros):
print(self.model.coef_)
print(type(self.model.coef_))
print(self.model.coef_.shape)
def get_fullname(self):
"""Get the full-grammar name for this model
:returns: English phrase as string
"""
return f"LASSO_alpha-{self.alpha}"
def get_filename(self):
"""Get the abbreviated (file)name for this model
:returns: Abbreviated string with underscores
"""
return f"LASSO_alpha-{self.alpha}"
def needs_retraining(self):
"""Does this model need regular retraining while forecasting?
:returns: bool
"""
return True
def set_plotting_color(self, color="#FCB97D"):
"""Set color used for plotting
:color: Hex value string
"""
self.color = color
def get_plotting_color(self):
"""return color for graphing distinction
:returns: str of color
"""
return self.color
| [
"numpy.vstack",
"sklearn.linear_model.Lasso",
"numpy.isin"
] | [((658, 719), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': 'alpha', 'fit_intercept': '(True)', 'warm_start': 'warm_start'}), '(alpha=alpha, fit_intercept=True, warm_start=warm_start)\n', (663, 719), False, 'from sklearn.linear_model import Lasso\n'), ((1555, 1567), 'numpy.vstack', 'np.vstack', (['X'], {}), '(X)\n', (1564, 1567), True, 'import numpy as np\n'), ((1580, 1592), 'numpy.vstack', 'np.vstack', (['Y'], {}), '(Y)\n', (1589, 1592), True, 'import numpy as np\n'), ((1909, 1931), 'numpy.isin', 'np.isin', (['[0, -0]', 'coef'], {}), '([0, -0], coef)\n', (1916, 1931), True, 'import numpy as np\n'), ((2067, 2092), 'numpy.isin', 'np.isin', (['(False)', 'all_zeros'], {}), '(False, all_zeros)\n', (2074, 2092), True, 'import numpy as np\n')] |
from django import forms
from apps.app_gestor_usuarios.models import db_usuarios, db_manage_contacts
class signupForm(forms.ModelForm):
class Meta:
model = db_usuarios
fields = [
'name',
'last',
'email',
'password',
]
labels = {
'name':'Name',
'last':'Last',
'email':'Email',
'password':'Password',
}
widgets = {
'name': forms.TextInput(attrs={'class':'form-control'}),
'last': forms.TextInput(attrs={'class':'form-control'}),
'email': forms.EmailInput(attrs={'class':'form-control'}),
'password': forms.PasswordInput(attrs={'class':'form-control'}),
}
class loginForm(forms.ModelForm):
class Meta:
model = db_usuarios
fields = [
'email',
'password',
]
labels = {
'email':'Email',
'password':'Password',
}
widgets = {
'email': forms.EmailInput(attrs={'class':'form-control'}),
'password': forms.PasswordInput(attrs={'class':'form-control'}),
}
class addForm(forms.ModelForm):
class Meta:
model = db_manage_contacts
fields = [
'id',
'associated_user',
'name',
'last',
'email',
'phone_local',
'phone_mov',
'street',
'street_number',
'population',
'state',
'postalcode',
'country',
'url_web',
]
labels = {
'id':'id',
'associated_user':'Associated User',
'name':'Name',
'last':'Last',
'email':'Email',
'phone_local':'Local Phone',
'phone_mov':'Cel. Phone',
'street':'Street',
'street_number':'Street Number',
'population':'Population',
'state':'Comunity',
'postalcode':'Postal code',
'country':'Country',
'url_web':'Web or Blog',
}
widgets = {
'id':forms.TextInput(attrs={'class':'form-control'}),
'associated_user':forms.EmailInput(attrs={'class':'form-control','readonly':'True'}),
'name': forms.TextInput(attrs={'class':'form-control'}),
'last': forms.TextInput(attrs={'class':'form-control'}),
'email': forms.EmailInput(attrs={'class':'form-control'}),
'phone_local': forms.TextInput(attrs={'class':'form-control'}),
'phone_mov': forms.TextInput(attrs={'class': 'form-control'}),
'street': forms.TextInput(attrs={'class': 'form-control', 'id':'route'}),
'street_number': forms.TextInput(attrs={'class': 'form-control', 'id': 'street_number'}),
'population': forms.TextInput(attrs={'class': 'form-control', 'id':'locality'}),
'state': forms.TextInput(attrs={'class': 'form-control', 'id': 'administrative_area_level_1'}),
'postalcode': forms.TextInput(attrs={'class': 'form-control', 'id':'postal_code'}),
'country': forms.TextInput(attrs={'class': 'form-control', 'id':'country'}),
'url_web': forms.URLInput(attrs={'class': 'form-control'}),
} | [
"django.forms.URLInput",
"django.forms.PasswordInput",
"django.forms.EmailInput",
"django.forms.TextInput"
] | [((486, 534), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (501, 534), False, 'from django import forms\n'), ((555, 603), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (570, 603), False, 'from django import forms\n'), ((625, 674), 'django.forms.EmailInput', 'forms.EmailInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (641, 674), False, 'from django import forms\n'), ((699, 751), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (718, 751), False, 'from django import forms\n'), ((1052, 1101), 'django.forms.EmailInput', 'forms.EmailInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1068, 1101), False, 'from django import forms\n'), ((1126, 1178), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1145, 1178), False, 'from django import forms\n'), ((2193, 2241), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2208, 2241), False, 'from django import forms\n'), ((2272, 2341), 'django.forms.EmailInput', 'forms.EmailInput', ([], {'attrs': "{'class': 'form-control', 'readonly': 'True'}"}), "(attrs={'class': 'form-control', 'readonly': 'True'})\n", (2288, 2341), False, 'from django import forms\n'), ((2360, 2408), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2375, 2408), False, 'from django import forms\n'), ((2429, 2477), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2444, 2477), False, 'from django import forms\n'), ((2499, 2548), 'django.forms.EmailInput', 'forms.EmailInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2515, 2548), False, 'from django import forms\n'), ((2576, 2624), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2591, 2624), False, 'from django import forms\n'), ((2650, 2698), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2665, 2698), False, 'from django import forms\n'), ((2722, 2785), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'id': 'route'}"}), "(attrs={'class': 'form-control', 'id': 'route'})\n", (2737, 2785), False, 'from django import forms\n'), ((2815, 2886), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'id': 'street_number'}"}), "(attrs={'class': 'form-control', 'id': 'street_number'})\n", (2830, 2886), False, 'from django import forms\n'), ((2914, 2980), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'id': 'locality'}"}), "(attrs={'class': 'form-control', 'id': 'locality'})\n", (2929, 2980), False, 'from django import forms\n'), ((3002, 3091), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'id': 'administrative_area_level_1'}"}), "(attrs={'class': 'form-control', 'id':\n 'administrative_area_level_1'})\n", (3017, 3091), False, 'from django import forms\n'), ((3115, 3184), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'id': 'postal_code'}"}), "(attrs={'class': 'form-control', 'id': 'postal_code'})\n", (3130, 3184), False, 'from django import forms\n'), ((3208, 3273), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'id': 'country'}"}), "(attrs={'class': 'form-control', 'id': 'country'})\n", (3223, 3273), False, 'from django import forms\n'), ((3297, 3344), 'django.forms.URLInput', 'forms.URLInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (3311, 3344), False, 'from django import forms\n')] |
from .item import Item
from .entry import Entry
from copy import copy
class Record(object):
"""
A Record, the tuple of an entry and it's item
Records are useful for representing the latest entry for a
field value.
Records are serialised as the merged entry and item
"""
def __init__(self, entry=None, item=None):
self.entry = entry
self.item = item
@property
def primitive(self):
"""Record as Python primitive."""
primitive = copy(self.item.primitive)
primitive.update(self.entry.primitive)
return primitive
@primitive.setter
def primitive(self, primitive):
"""Record from Python primitive."""
self.entry = Entry()
self.entry.primitive = primitive
primitive = copy(primitive)
for field in self.entry.fields:
del primitive[field]
self.item = Item()
self.item.primitive = primitive
| [
"copy.copy"
] | [((499, 524), 'copy.copy', 'copy', (['self.item.primitive'], {}), '(self.item.primitive)\n', (503, 524), False, 'from copy import copy\n'), ((791, 806), 'copy.copy', 'copy', (['primitive'], {}), '(primitive)\n', (795, 806), False, 'from copy import copy\n')] |
"""
pyexcel_matplotlib
~~~~~~~~~~~~~~~~~~~
chart drawing plugin for pyexcel
:copyright: (c) 2016-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for further details
"""
from pyexcel.plugins import PyexcelPluginChain
PyexcelPluginChain(__name__).add_a_renderer(
relative_plugin_class_path='plot.MatPlotter',
file_types=['svg', 'png']
)
| [
"pyexcel.plugins.PyexcelPluginChain"
] | [((258, 286), 'pyexcel.plugins.PyexcelPluginChain', 'PyexcelPluginChain', (['__name__'], {}), '(__name__)\n', (276, 286), False, 'from pyexcel.plugins import PyexcelPluginChain\n')] |
"""
Django settings for petstore project.
Generated by 'django-admin startproject' using Django 2.2.10.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.humanize',
# third party
'crispy_forms',
'allauth',
'allauth.account',
'storages',
# local
'accounts.apps.AccountsConfig',
'pages.apps.PagesConfig',
'products.apps.ProductsConfig',
'basket.apps.BasketConfig',
'checkout.apps.CheckoutConfig',
'orders.apps.OrdersConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
# 'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# user middleware
'basket.middleware.BasketMiddleware',
]
ROOT_URLCONF = 'petstore.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'petstore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# User defined settings
AUTH_USER_MODEL = 'accounts.CustomUser'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# AWS Setup for media store on Heroku
USE_S3 = os.getenv('USE_S3') == 'TRUE'
if USE_S3:
# aws settings
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME')
AWS_DEFAULT_ACL = None
AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'
AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400'}
# s3 static settings
STATIC_LOCATION = 'static'
STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/'
STATICFILES_STORAGE = 'petstore.storage_backends.StaticStorage'
# s3 public media settings
PUBLIC_MEDIA_LOCATION = 'media'
MEDIA_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{PUBLIC_MEDIA_LOCATION}/'
DEFAULT_FILE_STORAGE = 'petstore.storage_backends.PublicMediaStorage'
else:
STATIC_URL = '/staticfiles/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Bootstrap class mappings for django messages
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger',
}
# django-allauth config
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT = 'home'
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_PRESERVE_USERNAME_CASING = False
# custom forms to override allauth defaults
ACCOUNT_FORMS = {
'signup': 'accounts.forms.CustomSignupForm',
'login': 'accounts.forms.CustomLoginForm',
}
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
# stripe keys
STRIPE_TEST_PUBLISHABLE_KEY = os.environ.get('STRIPE_TEST_PUBLISHABLE_KEY')
STRIPE_TEST_SECRET_KEY = os.environ.get('STRIPE_TEST_SECRET_KEY')
# whitenoise setting
# STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| [
"os.path.join",
"os.environ.get",
"os.path.abspath",
"os.getenv"
] | [((5533, 5578), 'os.environ.get', 'os.environ.get', (['"""STRIPE_TEST_PUBLISHABLE_KEY"""'], {}), "('STRIPE_TEST_PUBLISHABLE_KEY')\n", (5547, 5578), False, 'import os\n'), ((5604, 5644), 'os.environ.get', 'os.environ.get', (['"""STRIPE_TEST_SECRET_KEY"""'], {}), "('STRIPE_TEST_SECRET_KEY')\n", (5618, 5644), False, 'import os\n'), ((3499, 3518), 'os.getenv', 'os.getenv', (['"""USE_S3"""'], {}), "('USE_S3')\n", (3508, 3518), False, 'import os\n'), ((3583, 3613), 'os.getenv', 'os.getenv', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (3592, 3613), False, 'import os\n'), ((3642, 3676), 'os.getenv', 'os.getenv', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (3651, 3676), False, 'import os\n'), ((3707, 3743), 'os.getenv', 'os.getenv', (['"""AWS_STORAGE_BUCKET_NAME"""'], {}), "('AWS_STORAGE_BUCKET_NAME')\n", (3716, 3743), False, 'import os\n'), ((4376, 4413), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""staticfiles"""'], {}), "(BASE_DIR, 'staticfiles')\n", (4388, 4413), False, 'import os\n'), ((4462, 4498), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""mediafiles"""'], {}), "(BASE_DIR, 'mediafiles')\n", (4474, 4498), False, 'import os\n'), ((4520, 4552), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static"""'], {}), "(BASE_DIR, 'static')\n", (4532, 4552), False, 'import os\n'), ((495, 520), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (510, 520), False, 'import os\n'), ((1933, 1968), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""templates"""'], {}), "(BASE_DIR, 'templates')\n", (1945, 1968), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# From /opt/recon-ng/recon/mixins/threads.py
from Queue import Queue, Empty
import threading
import time
import logging
logging.basicConfig(level=logging.INFO, format="[+] %(message)s")
logger = logging.getLogger("mutilthreads")
class ThreadingMixin(object):
def __init__(self):
self.stopped = threading.Event()
self.queue = Queue()
self.threadNum = 10
def _thread_wrapper(self, *args):
while not self.stopped.is_set():
try:
item = self.queue.get_nowait()
except Empty:
continue
try:
self.module_thread(item, *args)
except:
logger.info('thread exception')
finally:
self.queue.task_done()
def threads(self, *args):
[self.queue.put(_) for _ in args[0]]
threads = [
threading.Thread(target=self._thread_wrapper, args=args[1:])
for i in range(self.threadNum)
]
[_.setDaemon(True) for _ in threads]
[_.start() for _ in threads]
try:
while not self.queue.empty():
time.sleep(0.7)
except KeyboardInterrupt:
self.stopped.set()
[_.join() for _ in threads]
self.queue.join()
self.stopped.set()
def module_thread(self, item, *args):
logger.info(item)
pass
if __name__ == '__main__':
# define a new ThreadingMixin's subclass
class demo(ThreadingMixin):
def __init__(self):
super(demo, self).__init__()
def module_thread(self, item, callback):
logger.info(callback(item))
def callback(word):
return "abc - %s" % word
words = []
with open('wordlists.txt') as f:
words = [i.strip() for i in f]
d = demo()
d.threads(words, callback)
| [
"logging.basicConfig",
"logging.getLogger",
"time.sleep",
"threading.Event",
"threading.Thread",
"Queue.Queue"
] | [((170, 235), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""[+] %(message)s"""'}), "(level=logging.INFO, format='[+] %(message)s')\n", (189, 235), False, 'import logging\n'), ((245, 278), 'logging.getLogger', 'logging.getLogger', (['"""mutilthreads"""'], {}), "('mutilthreads')\n", (262, 278), False, 'import logging\n'), ((358, 375), 'threading.Event', 'threading.Event', ([], {}), '()\n', (373, 375), False, 'import threading\n'), ((397, 404), 'Queue.Queue', 'Queue', ([], {}), '()\n', (402, 404), False, 'from Queue import Queue, Empty\n'), ((931, 991), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._thread_wrapper', 'args': 'args[1:]'}), '(target=self._thread_wrapper, args=args[1:])\n', (947, 991), False, 'import threading\n'), ((1200, 1215), 'time.sleep', 'time.sleep', (['(0.7)'], {}), '(0.7)\n', (1210, 1215), False, 'import time\n')] |
from BusquedasSem import *
import seaborn as sns
def main():
df = pd.read_csv('./client0-sort.csv')
df_abstract = df['Abstract']
l = df_abstract.size
abstracts = df_abstract.values
PCA_score = np.zeros((l, l))
abstracts_aux = preprocessing_abstracts_PCA(abstracts)
for i in range(l):
#PCA_score[i][:] = PCAscore2(thoughtobeat(words=abstracts_aux[i], abstracts=abstracts_aux))
aux = PCAscore2(
thoughtobeat(words=abstracts_aux[i], abstracts=abstracts_aux))
for j in range(l):
PCA_score[i][j] = aux[j]
print(PCA_score)
PCA_score = pd.DataFrame(PCA_score)
sns.set()
sns.heatmap(PCA_score)
sns.plt.show()
def preprocessing_abstracts_PCA(abstracts):
abstracts_aux = []
for abstract in abstracts:
text = minimizar(abstract)
text = deletePunt(text=text)
text = deleteStop(text=text, leng='english')
#text = nltk.tokenize.word_tokenize(text)
text = deleteWord('CD', text)
text = deleteWord('DT', text)
text = stemmingLemmatizer(text)
abstracts_aux.append(text)
return abstracts_aux
def simpleScore(abstract_i, abstract_j, gamma):
freq = list()
freq_acum = 0
score = 1
# (?) normalizar score por longitud del abstract (?)
l_i = len(abstract_i)
l_j = len(abstract_j)
for i in abstract_i:
for j in abstract_j:
freq_i = abstract_j.count(i) / l_j
freq_j = abstract_i.count(j) / l_i
freq.append(freq_i + freq_j)
freq_acum += freq_i + freq_j
maximo = np.amax(freq)
for n in freq:
if freq_acum == 0:
score = -math.inf
return score
else:
aux = np.log(gamma + ((n / maximo)**(3 / 4)) /
(freq_acum**(3 / 4)))
score += aux
return score
def Score_abstract_preprocessing(abstract):
text = minimizar(abstract)
text = deletePunt(text=text)
text = deleteStop(text=text, leng='english')
text = deleteWord('CD', text)
text = stemmingLemmatizer(text)
return text.split()
if __name__ == '__main__':
main()
| [
"seaborn.plt.show",
"seaborn.set",
"seaborn.heatmap"
] | [((651, 660), 'seaborn.set', 'sns.set', ([], {}), '()\n', (658, 660), True, 'import seaborn as sns\n'), ((665, 687), 'seaborn.heatmap', 'sns.heatmap', (['PCA_score'], {}), '(PCA_score)\n', (676, 687), True, 'import seaborn as sns\n'), ((692, 706), 'seaborn.plt.show', 'sns.plt.show', ([], {}), '()\n', (704, 706), True, 'import seaborn as sns\n')] |
from pathlib import Path
from typing import Any, Generator, Optional, Tuple
from cv2 import CAP_PROP_BUFFERSIZE, VideoCapture
from numpy import ndarray
from zoloto.marker_type import MarkerType
from .base import BaseCamera
from .mixins import IterableCameraMixin, VideoCaptureMixin, ViewableCameraMixin
from .utils import (
get_video_capture_resolution,
set_video_capture_resolution,
validate_calibrated_video_capture_resolution,
)
def find_camera_ids() -> Generator[int, None, None]:
"""
Find and return ids of connected cameras.
Works the same as VideoCapture(-1).
"""
for camera_id in range(8):
capture = VideoCapture(camera_id)
opened = capture.isOpened()
capture.release()
if opened:
yield camera_id
class Camera(VideoCaptureMixin, IterableCameraMixin, BaseCamera, ViewableCameraMixin):
def __init__(
self,
camera_id: int,
*,
marker_size: Optional[int] = None,
marker_type: MarkerType,
calibration_file: Optional[Path] = None,
resolution: Optional[Tuple[int, int]] = None,
) -> None:
super().__init__(
marker_size=marker_size,
marker_type=marker_type,
calibration_file=calibration_file,
)
self.camera_id = camera_id
self.video_capture = self.get_video_capture(self.camera_id)
if resolution is not None:
self._set_resolution(resolution)
if self.calibration_params is not None:
validate_calibrated_video_capture_resolution(
self.video_capture,
self.calibration_params,
override=resolution is not None,
)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {self.camera_id}>"
def get_video_capture(self, camera_id: int) -> VideoCapture:
cap = VideoCapture(camera_id)
cap.set(CAP_PROP_BUFFERSIZE, 1)
return cap
def _set_resolution(self, resolution: Tuple[int, int]) -> None:
set_video_capture_resolution(self.video_capture, resolution)
def get_resolution(self) -> Tuple[int, int]:
return get_video_capture_resolution(self.video_capture)
def capture_frame(self) -> ndarray:
# Hack: Double capture frames to fill buffer.
self.video_capture.read()
return super().capture_frame()
def close(self) -> None:
super().close()
self.video_capture.release()
@classmethod
def discover(cls, **kwargs: Any) -> Generator["Camera", None, None]:
for camera_id in find_camera_ids():
yield cls(camera_id, **kwargs)
class SnapshotCamera(VideoCaptureMixin, BaseCamera):
"""
A modified version of Camera optimised for single use.
- Doesn't keep the camera open between captures
"""
def __init__(
self,
camera_id: int,
*,
marker_size: Optional[int] = None,
marker_type: MarkerType,
calibration_file: Optional[Path] = None,
resolution: Optional[Tuple[int, int]] = None,
) -> None:
super().__init__(
marker_size=marker_size,
marker_type=marker_type,
calibration_file=calibration_file,
)
self.camera_id = camera_id
self._resolution = resolution
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {self.camera_id}>"
def get_video_capture(self, camera_id: int) -> VideoCapture:
video_capture = VideoCapture(camera_id)
if self._resolution is not None:
set_video_capture_resolution(video_capture, self._resolution)
else:
self._resolution = get_video_capture_resolution(video_capture)
if self.calibration_params is not None:
validate_calibrated_video_capture_resolution(
video_capture, self.calibration_params, override=False
)
return video_capture
def get_resolution(self) -> Tuple[int, int]:
if self._resolution is None:
raise ValueError(
"Cannot find resolution of camera until at least 1 frame has been captured."
)
return self._resolution
def capture_frame(self) -> ndarray:
self.video_capture = self.get_video_capture(self.camera_id)
frame = super().capture_frame()
self.video_capture.release()
return frame
@classmethod
def discover(cls, **kwargs: Any) -> Generator["SnapshotCamera", None, None]:
for camera_id in find_camera_ids():
yield cls(camera_id, **kwargs)
| [
"cv2.VideoCapture"
] | [((654, 677), 'cv2.VideoCapture', 'VideoCapture', (['camera_id'], {}), '(camera_id)\n', (666, 677), False, 'from cv2 import CAP_PROP_BUFFERSIZE, VideoCapture\n'), ((1901, 1924), 'cv2.VideoCapture', 'VideoCapture', (['camera_id'], {}), '(camera_id)\n', (1913, 1924), False, 'from cv2 import CAP_PROP_BUFFERSIZE, VideoCapture\n'), ((3534, 3557), 'cv2.VideoCapture', 'VideoCapture', (['camera_id'], {}), '(camera_id)\n', (3546, 3557), False, 'from cv2 import CAP_PROP_BUFFERSIZE, VideoCapture\n')] |
from prefixdate import parse_parts
from opensanctions import helpers as h
from opensanctions.util import remove_namespace
def parse_address(context, el):
country = el.get("countryDescription")
if country == "UNKNOWN":
country = None
# context.log.info("Addrr", el=el)
return h.make_address(
context,
street=el.get("street"),
po_box=el.get("poBox"),
city=el.get("city"),
place=el.get("place"),
postal_code=el.get("zipCode"),
region=el.get("region"),
country=country,
country_code=el.get("countryIso2Code"),
)
def parse_entry(context, entry):
subject_type = entry.find("./subjectType")
schema = context.lookup_value("subject_type", subject_type.get("code"))
if schema is None:
context.log.warning("Unknown subject type", type=subject_type)
return
entity = context.make(schema)
entity.id = context.make_slug(entry.get("euReferenceNumber"))
entity.add("notes", entry.findtext("./remark"))
entity.add("topics", "sanction")
sanction = h.make_sanction(context, entity)
regulation = entry.find("./regulation")
source_url = regulation.findtext("./publicationUrl", "")
sanction.set("sourceUrl", source_url)
sanction.add("program", regulation.get("programme"))
sanction.add("reason", regulation.get("numberTitle"))
sanction.add("startDate", regulation.get("entryIntoForceDate"))
sanction.add("listingDate", regulation.get("publicationDate"))
for name in entry.findall("./nameAlias"):
if entry.get("strong") == "false":
entity.add("weakAlias", name.get("wholeName"))
else:
entity.add("name", name.get("wholeName"))
entity.add("title", name.get("title"), quiet=True)
entity.add("firstName", name.get("firstName"), quiet=True)
entity.add("middleName", name.get("middleName"), quiet=True)
entity.add("lastName", name.get("lastName"), quiet=True)
entity.add("position", name.get("function"), quiet=True)
gender = h.clean_gender(name.get("gender"))
entity.add("gender", gender, quiet=True)
for node in entry.findall("./identification"):
type = node.get("identificationTypeCode")
schema = "Passport" if type == "passport" else "Identification"
passport = context.make(schema)
passport.id = context.make_id("ID", entity.id, node.get("logicalId"))
passport.add("holder", entity)
passport.add("authority", node.get("issuedBy"))
passport.add("type", node.get("identificationTypeDescription"))
passport.add("number", node.get("number"))
passport.add("number", node.get("latinNumber"))
passport.add("startDate", node.get("issueDate"))
passport.add("startDate", node.get("issueDate"))
passport.add("country", node.get("countryIso2Code"))
passport.add("country", node.get("countryDescription"))
for remark in node.findall("./remark"):
passport.add("summary", remark.text)
context.emit(passport)
for node in entry.findall("./address"):
address = parse_address(context, node)
h.apply_address(context, entity, address)
for child in node.getchildren():
if child.tag in ("regulationSummary"):
continue
elif child.tag == "remark":
entity.add("notes", child.text)
elif child.tag == "contactInfo":
prop = context.lookup_value("contact_info", child.get("key"))
if prop is None:
context.log.warning("Unknown contact info", node=child)
else:
entity.add(prop, child.get("value"))
else:
context.log.warning("Unknown address component", node=child)
for birth in entry.findall("./birthdate"):
partialBirth = parse_parts(
birth.get("year"), birth.get("month"), birth.get("day")
)
entity.add("birthDate", birth.get("birthdate"))
entity.add("birthDate", partialBirth)
address = parse_address(context, birth)
if address is not None:
entity.add("birthPlace", address.get("full"))
entity.add("country", address.get("country"))
for node in entry.findall("./citizenship"):
entity.add("nationality", node.get("countryIso2Code"), quiet=True)
entity.add("nationality", node.get("countryDescription"), quiet=True)
context.emit(entity, target=True, unique=True)
context.emit(sanction)
def crawl(context):
path = context.fetch_resource("source.xml", context.dataset.data.url)
context.export_resource(path, "text/xml", title=context.SOURCE_TITLE)
doc = context.parse_resource_xml(path)
doc = remove_namespace(doc)
for entry in doc.findall(".//sanctionEntity"):
parse_entry(context, entry)
| [
"opensanctions.helpers.apply_address",
"opensanctions.helpers.make_sanction",
"opensanctions.util.remove_namespace"
] | [((1084, 1116), 'opensanctions.helpers.make_sanction', 'h.make_sanction', (['context', 'entity'], {}), '(context, entity)\n', (1099, 1116), True, 'from opensanctions import helpers as h\n'), ((4808, 4829), 'opensanctions.util.remove_namespace', 'remove_namespace', (['doc'], {}), '(doc)\n', (4824, 4829), False, 'from opensanctions.util import remove_namespace\n'), ((3190, 3231), 'opensanctions.helpers.apply_address', 'h.apply_address', (['context', 'entity', 'address'], {}), '(context, entity, address)\n', (3205, 3231), True, 'from opensanctions import helpers as h\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: <NAME>
@license: Apache Licence
@file: prepare_training_data.py
@time: 2019/12/19
@contact: <EMAIL>
将conll的v4_gold_conll文件格式转成模型训练所需的jsonlines数据格式
"""
import argparse
import json
import logging
import os
import re
import sys
from collections import defaultdict
from typing import List, Tuple
import tensorflow as tf
import data_preprocess.conll as conll
from bert.tokenization import FullTokenizer
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--data_dir', type=str, help='The data directory to save *.v4_gold_conll and *.jsonlines')
parser.add_argument('--language', type=str, default='english', help='The language to process.')
parser.add_argument('--vocab_file', type=str, help='The original vocab file for bert tokenization')
parser.add_argument('--sliding_window_size', type=int, default=384, help='Sliding window size for BERT processing')
args = parser.parse_args()
handlers = [logging.FileHandler(filename='prepare_training_data.log'), logging.StreamHandler(sys.stdout)]
logging.basicConfig(level=logging.INFO, handlers=handlers,
format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s')
logger = logging.getLogger()
SPEAKER_START = '[unused19]'
SPEAKER_END = '[unused73]'
def read_conll_file(conll_file_path: str) -> List[Tuple]:
documents = []
with open(conll_file_path) as fi:
for line in fi:
begin_document_match = re.match(conll.BEGIN_DOCUMENT_REGEX, line)
if begin_document_match:
doc_key = conll.get_doc_key(begin_document_match.group(1), begin_document_match.group(2))
documents.append((doc_key, []))
elif line.startswith("#end document"):
continue
else:
documents[-1][1].append(line.strip())
return documents
def normalize_word(word, language):
if language == "arabic":
word = word[:word.find("#")]
if word == "/." or word == "/?":
return word[1:]
else:
return word
def parse_document(document: Tuple[str, List], language: str) -> dict:
"""
get basic information from one document annotation.
:param document:
:param language: english, chinese or arabic
:return:
"""
doc_key = document[0]
sentences = [[]]
speakers = []
coreferences = []
word_idx = -1
last_speaker = ''
for line_id, line in enumerate(document[1]):
row = line.split()
sentence_end = len(row) == 0
if not sentence_end:
assert len(row) >= 12
word_idx += 1
word = normalize_word(row[3], language)
sentences[-1].append(word)
speaker = row[9]
if speaker != last_speaker:
speakers.append((word_idx, speaker))
last_speaker = speaker
coreferences.append(row[-1])
else:
sentences.append([])
clusters = coreference_annotations_to_clusters(coreferences)
doc_info = {'doc_key': doc_key, 'sentences': sentences[: -1], 'speakers': speakers, 'clusters': clusters}
return doc_info
def coreference_annotations_to_clusters(annotations: List[str]) -> List[List[Tuple]]:
"""
convert coreference information to clusters
:param annotations:
:return:
"""
clusters = defaultdict(list)
coref_stack = defaultdict(list)
for word_idx, annotation in enumerate(annotations):
if annotation == '-':
continue
for ann in annotation.split('|'):
cluster_id = int(ann.replace('(', '').replace(')', ''))
if ann[0] == '(' and ann[-1] == ')':
clusters[cluster_id].append((word_idx, word_idx))
elif ann[0] == '(':
coref_stack[cluster_id].append(word_idx)
elif ann[-1] == ')':
span_start = coref_stack[cluster_id].pop()
clusters[cluster_id].append((span_start, word_idx))
else:
raise NotImplementedError
assert all([len(starts) == 0 for starts in coref_stack.values()])
return list(clusters.values())
def checkout_clusters(doc_info):
words = [i for j in doc_info['sentences'] for i in j]
clusters = [[' '.join(words[start: end + 1]) for start, end in cluster] for cluster in doc_info['clusters']]
print(clusters)
def tokenize_document(doc_info: dict, tokenizer: FullTokenizer) -> dict:
"""
tokenize into sub tokens
:param doc_info:
:param tokenizer:
:return:
"""
sub_tokens: List[str] = [] # all sub tokens of a document
sentence_map: List[int] = [] # collected tokenized tokens -> sentence id
subtoken_map: List[int] = [] # collected tokenized tokens -> original token id
word_idx = -1
for sentence_id, sentence in enumerate(doc_info['sentences']):
for token in sentence:
word_idx += 1
word_tokens = tokenizer.tokenize(token)
sub_tokens.extend(word_tokens)
sentence_map.extend([sentence_id] * len(word_tokens))
subtoken_map.extend([word_idx] * len(word_tokens))
speakers = {subtoken_map.index(word_index): tokenizer.tokenize(speaker)
for word_index, speaker in doc_info['speakers']}
clusters = [[(subtoken_map.index(start), len(subtoken_map) - 1 - subtoken_map[::-1].index(end))
for start, end in cluster] for cluster in doc_info['clusters']]
tokenized_document = {'sub_tokens': sub_tokens, 'sentence_map': sentence_map, 'subtoken_map': subtoken_map,
'speakers': speakers, 'clusters': clusters, 'doc_key': doc_info['doc_key']}
return tokenized_document
def expand_with_speakers(tokenized_document: dict) -> Tuple[List[str], List[int]]:
"""
add speaker name information
:param tokenized_document: tokenized document information
:return:
"""
expanded_tokens = []
expanded_masks = []
for token_idx, token in enumerate(tokenized_document['sub_tokens']):
if token_idx in tokenized_document['speakers']:
speaker = [SPEAKER_START] + tokenized_document['speakers'][token_idx] + [SPEAKER_END]
expanded_tokens.extend(speaker)
expanded_masks.extend([-1] * len(speaker))
expanded_tokens.append(token)
expanded_masks.append(token_idx)
return expanded_tokens, expanded_masks
def construct_sliding_windows(sequence_length: int, sliding_window_size: int):
"""
construct sliding windows for BERT processing
:param sequence_length: e.g. 9
:param sliding_window_size: e.g. 4
:return: [(0, 4, [1, 1, 1, 0]), (2, 6, [0, 1, 1, 0]), (4, 8, [0, 1, 1, 0]), (6, 9, [0, 1, 1])]
"""
sliding_windows = []
stride = int(sliding_window_size / 2)
start_index = 0
end_index = 0
while end_index < sequence_length:
end_index = min(start_index + sliding_window_size, sequence_length)
left_value = 1 if start_index == 0 else 0
right_value = 1 if end_index == sequence_length else 0
mask = [left_value] * int(sliding_window_size / 4) + [1] * int(sliding_window_size / 2) \
+ [right_value] * (sliding_window_size - int(sliding_window_size / 2) - int(sliding_window_size / 4))
mask = mask[: end_index - start_index]
sliding_windows.append((start_index, end_index, mask))
start_index += stride
assert sum([sum(window[2]) for window in sliding_windows]) == sequence_length
return sliding_windows
def flatten_clusters(clusters: List[List[Tuple[int, int]]]) -> Tuple[List[int], List[int], List[int]]:
"""
flattern cluster information
:param clusters:
:return:
"""
span_starts = []
span_ends = []
cluster_ids = []
for cluster_id, cluster in enumerate(clusters):
for start, end in cluster:
span_starts.append(start)
span_ends.append(end)
cluster_ids.append(cluster_id + 1)
return span_starts, span_ends, cluster_ids
def convert_to_sliding_window(tokenized_document: dict, sliding_window_size: int):
"""
construct sliding windows, allocate tokens and masks into each window
:param tokenized_document:
:param sliding_window_size:
:return:
"""
expanded_tokens, expanded_masks = expand_with_speakers(tokenized_document)
sliding_windows = construct_sliding_windows(len(expanded_tokens), sliding_window_size - 2)
token_windows = [] # expanded tokens to sliding window
mask_windows = [] # expanded masks to sliding window
for window_start, window_end, window_mask in sliding_windows:
original_tokens = expanded_tokens[window_start: window_end]
original_masks = expanded_masks[window_start: window_end]
window_masks = [-2 if w == 0 else o for w, o in zip(window_mask, original_masks)]
one_window_token = ['[CLS]'] + original_tokens + ['[SEP]'] + ['[PAD]'] * (sliding_window_size - 2 - len(original_tokens))
one_window_mask = [-3] + window_masks + [-3] + [-4] * (sliding_window_size - 2 - len(original_tokens))
assert len(one_window_token) == sliding_window_size
assert len(one_window_mask) == sliding_window_size
token_windows.append(one_window_token)
mask_windows.append(one_window_mask)
assert len(tokenized_document['sentence_map']) == sum([i >= 0 for j in mask_windows for i in j])
return token_windows, mask_windows
def prepare_training_data(data_dir: str, language: str, vocab_file: str, sliding_window_size: int):
tokenizer = FullTokenizer(vocab_file=vocab_file, do_lower_case=False)
for dataset in ['train', 'dev', 'test']:
conll_file_path = os.path.join(data_dir, F"{dataset}.{language}.v4_gold_conll")
writer = tf.python_io.TFRecordWriter(os.path.join(data_dir, F"{dataset}.{language}.tfrecord"))
doc_map = {}
documents = read_conll_file(conll_file_path)
for doc_idx, document in enumerate(documents):
doc_info = parse_document(document, language)
checkout_clusters(doc_info)
tokenized_document = tokenize_document(doc_info, tokenizer)
doc_map[doc_idx] = tokenized_document['doc_key']
token_windows, mask_windows = convert_to_sliding_window(tokenized_document, sliding_window_size)
input_id_windows = [tokenizer.convert_tokens_to_ids(tokens) for tokens in token_windows]
span_starts, span_ends, cluster_ids = flatten_clusters(tokenized_document['clusters'])
instance = (doc_idx, tokenized_document['sentence_map'], tokenized_document['subtoken_map'],
input_id_windows, mask_windows, span_starts, span_ends, cluster_ids)
write_instance_to_example_file(writer, instance)
with open(os.path.join(data_dir, F"{dataset}.{language}.map"), 'w') as fo:
json.dump(doc_map, fo, indent=2)
def write_instance_to_example_file(writer: tf.python_io.TFRecordWriter, instance: tuple):
doc_idx, sentence_map, subtoken_map, input_id_windows, mask_windows, span_starts, span_ends, cluster_ids = instance
flattened_input_ids = [i for j in input_id_windows for i in j]
flattened_input_mask = [i for j in mask_windows for i in j]
features = {
'doc_idx': create_int_feature([doc_idx]),
'sentence_map': create_int_feature(sentence_map),
'subtoken_map': create_int_feature(subtoken_map),
'flattened_input_ids': create_int_feature(flattened_input_ids),
'flattened_input_mask': create_int_feature(flattened_input_mask),
'span_starts': create_int_feature(span_starts),
'span_ends': create_int_feature(span_ends),
'cluster_ids': create_int_feature(cluster_ids),
}
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
if __name__ == '__main__':
prepare_training_data(args.data_dir, args.language, args.vocab_file, args.sliding_window_size)
| [
"logging.basicConfig",
"logging.getLogger",
"logging.StreamHandler",
"argparse.ArgumentParser",
"os.path.join",
"re.match",
"bert.tokenization.FullTokenizer",
"tensorflow.train.Features",
"logging.FileHandler",
"collections.defaultdict",
"json.dump"
] | [((471, 532), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (494, 532), False, 'import argparse\n'), ((1090, 1231), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'handlers': 'handlers', 'format': '"""[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s"""'}), "(level=logging.INFO, handlers=handlers, format=\n '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s')\n", (1109, 1231), False, 'import logging\n'), ((1256, 1275), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1273, 1275), False, 'import logging\n'), ((996, 1053), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': '"""prepare_training_data.log"""'}), "(filename='prepare_training_data.log')\n", (1015, 1053), False, 'import logging\n'), ((1055, 1088), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1076, 1088), False, 'import logging\n'), ((3402, 3419), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3413, 3419), False, 'from collections import defaultdict\n'), ((3438, 3455), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3449, 3455), False, 'from collections import defaultdict\n'), ((9631, 9688), 'bert.tokenization.FullTokenizer', 'FullTokenizer', ([], {'vocab_file': 'vocab_file', 'do_lower_case': '(False)'}), '(vocab_file=vocab_file, do_lower_case=False)\n', (9644, 9688), False, 'from bert.tokenization import FullTokenizer\n'), ((9760, 9821), 'os.path.join', 'os.path.join', (['data_dir', 'f"""{dataset}.{language}.v4_gold_conll"""'], {}), "(data_dir, f'{dataset}.{language}.v4_gold_conll')\n", (9772, 9821), False, 'import os\n'), ((1509, 1551), 're.match', 're.match', (['conll.BEGIN_DOCUMENT_REGEX', 'line'], {}), '(conll.BEGIN_DOCUMENT_REGEX, line)\n', (1517, 1551), False, 'import re\n'), ((9867, 9923), 'os.path.join', 'os.path.join', (['data_dir', 'f"""{dataset}.{language}.tfrecord"""'], {}), "(data_dir, f'{dataset}.{language}.tfrecord')\n", (9879, 9923), False, 'import os\n'), ((10948, 10980), 'json.dump', 'json.dump', (['doc_map', 'fo'], {'indent': '(2)'}), '(doc_map, fo, indent=2)\n', (10957, 10980), False, 'import json\n'), ((11866, 11901), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'features'}), '(feature=features)\n', (11883, 11901), True, 'import tensorflow as tf\n'), ((10871, 10922), 'os.path.join', 'os.path.join', (['data_dir', 'f"""{dataset}.{language}.map"""'], {}), "(data_dir, f'{dataset}.{language}.map')\n", (10883, 10922), False, 'import os\n')] |
"""
Utilities for making plots
"""
from warnings import warn
import matplotlib.pyplot as plt
from matplotlib.projections import register_projection
from matplotlib.animation import ArtistAnimation
from .parse import Delay, Pulse, PulseSeq
def subplots(*args, **kwargs):
"""
Wrapper around matplotlib.pyplot.subplots
Automatically incorporates the PulseProgram projection
in subplot keywords
"""
register_projection(PulseProgram)
if "subplot_kw" in kwargs.keys():
if "projection" in kwargs["subplot_kw"]:
warn(
f"Projection will be set to 'PulseProgram' instead of {kwargs['subplot_kw']['projection']}"
)
kwargs["subplot_kw"]["projection"] = "PulseProgram"
else:
kwargs["subplot_kw"] = {"projection": "PulseProgram"}
fig, ax = plt.subplots(*args, **kwargs)
return fig, ax
def subplot_mosaic(*args, **kwargs):
"""
Wrapper around matplotlib.pyplot.subplot_mosiac
Automatically incorporates the PulseProgram projection
in subplot keywords
"""
register_projection(PulseProgram)
if "subplot_kw" in kwargs.keys():
if "projection" in kwargs["subplot_kw"]:
warn(
f"Projection will be set to 'PulseProgram' instead of {kwargs['subplot_kw']['projection']}"
)
kwargs["subplot_kw"]["projection"] = "PulseProgram"
else:
kwargs["subplot_kw"] = {"projection": "PulseProgram"}
fig, ax = plt.subplot_mosaic(*args, **kwargs)
return fig, ax
def show(*args, **kwargs):
"""
Calls matplotlib.pyplot.show
This is just to avoid the import
of matploltib.pyplot while making
pulse diagrams.
"""
plt.show(*args, **kwargs)
return
def animation(*args, **kwargs):
"""
Artist animation wrapper to avoid another import
"""
return ArtistAnimation(*args, **kwargs)
class PulseProgram(plt.Axes):
"""
A class that defines convinience functions for
plotting elements of a NMR pulse squence on a
matplotlib axes object.
Usage
-----
>>> from pulseplot import pplot
>>> fig, ax = pplot()
>>> ax.params["p1"] = 0.5
>>> ax.pulse("p1 pl1 ph1 f1")
>>> ax.delay(2)
>>> ax.pulse("p2 pl1 ph2 f1 w")
>>> ax.pulse("p2 pl1 ph2 f2")
>>> ax.delay(2)
>>> ax.pulse("p1 pl1 ph2 f1 w")
>>> ax.pulse("p1 pl1 ph2 f2 w")
>>> ax.fid("p1 pl1 phrec f2")
"""
name = "PulseProgram"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.center_align = False
self.spacing = 0.0
self.phase_dy = 0.0
self.text_dy = 0.0
self.fontsize = None
self.time = 0.0
self.params = {}
self.limits = {
"xlow": 10,
"xhigh": -10,
"ylow": 10,
"yhigh": -10,
"dx": 0.1,
"dy": 0.1,
}
self.set_limits()
self.axis(False)
def pulse(self, *args, **kwargs):
if isinstance(args[0], Pulse):
p = args[0]
else:
p = Pulse(*args, **kwargs, external_params=self.params)
if p.defer_start_time:
p.start_time = self.time + self.spacing
p.plen -= 2 * self.spacing
if not p.wait:
self.time = p.end_time() + 2 * self.spacing
p.text_dy += self.text_dy
p.phtxt_dy += self.phase_dy
if self.fontsize:
if "fontsize" not in p.text_kw:
p.text_kw["fontsize"] = self.fontsize
if "fontsize" not in p.phase_kw:
p.phase_kw["fontsize"] = self.fontsize
# add the actual pulse
pulse_patch = p.patch()
super().add_patch(pulse_patch)
xarr, yarr = pulse_patch.xy[:, 0], pulse_patch.xy[:, 1]
if self.center_align:
center = (yarr.min() + yarr.max()) / 2.0 - yarr.min()
yarr -= center
pulse_patch.xy[:, 1] = yarr
p.text_dy -= center
p.phtxt_dy -= center
self.edit_limits(
xlow=xarr.min(), xhigh=xarr.max(), ylow=yarr.min(), yhigh=yarr.max()
)
p.start_time -= self.spacing
p.plen += 2 * self.spacing
try:
super().text(**p.label_params())
xpos, ypos = p.label_params["x"], p.label_params["y"]
self.edit_limits(xlow=xpos, xhigh=xpos, ylow=ypos, yhigh=ypos)
except:
pass
try:
super().text(**p.phase_params())
xpos, ypos = p.phase_params["x"], p.phase_params["y"]
self.edit_limits(xlow=xpos, xhigh=xpos, ylow=ypos, yhigh=ypos)
except:
pass
p.text_dy -= self.text_dy
p.phtxt_dy -= self.phase_dy
def delay(self, *args, **kwargs):
if isinstance(args[0], Delay):
d = args[0]
else:
d = Delay(*args, **kwargs, external_params=self.params)
if d.defer_start_time:
d.start_time = self.time
self.time += d.time
try:
super().text(**d.label_params())
except:
pass
def fid(self, *args, **kwargs):
self.pulse(
*args,
**kwargs,
shape="fid",
truncate_off=True,
open=True,
facecolor="none",
)
def clear(self):
"""
Removes all channels and resets the time to zero
"""
self.time = 0.0
super().clear()
def draw_channels(self, *args, **kwargs):
"""
Draws lines marking the channels
"""
defaults = {"color": "k", "linewidth": 1.0, "zorder": -1}
try:
x0, x1 = kwargs["limits"]
kwargs.pop("limits")
except KeyError:
x0, x1 = self.limits["xlow"], self.limits["xhigh"]
defaults = {**defaults, **kwargs}
for channel in args:
if channel in self.params.keys():
super().hlines(self.params[channel], x0, x1, **defaults)
else:
try:
super().hlines(channel, x0, x1, **defaults)
except ValueError:
raise ValueError(
"Channel must be present in parameters, or must be a number"
)
def pseq(self, instruction):
"""
Main way in which
"""
if isinstance(instruction, str):
instruction = PulseSeq(instruction, external_params=self.params)
for item in instruction.elements:
if isinstance(item, Pulse):
self.pulse(item)
elif isinstance(item, Delay):
self.delay(item)
self.sequence = instruction
def get_time(self, name=None, index=None):
if name is not None:
try:
index_ = self.sequence.named_elements[name]
x = self.sequence.elements[index_].start_time
except KeyError:
raise KeyError(f"Cannot find the element named {name}")
elif index is not None:
try:
x = self.sequence.elements[index].start_time
except KeyError:
raise KeyError(f"Cannot find the element named {name}")
else:
raise ValueError("Either a name of a index must be supplied")
return x
def set_limits(self, limits=None):
if limits is not None:
self.limits = limits
try:
super().set_xlim(self.limits["xlow"], self.limits["xhigh"])
super().set_ylim(self.limits["ylow"], self.limits["yhigh"])
except IndexError:
raise IndexError("limits should be given as [xlow, xhigh, ylow, yhigh]")
def edit_limits(self, xlow=None, xhigh=None, ylow=None, yhigh=None):
dx, dy = self.limits["dx"], self.limits["dy"]
if (xlow is not None) and (xlow - dx < self.limits["xlow"]):
self.limits["xlow"] = xlow - dx
if (ylow is not None) and (ylow - dy < self.limits["ylow"]):
self.limits["ylow"] = ylow - dy
if (xhigh is not None) and (xhigh + dx > self.limits["xhigh"]):
self.limits["xhigh"] = xhigh + dx
if (yhigh is not None) and (yhigh + dy > self.limits["yhigh"]):
self.limits["yhigh"] = yhigh + dy
self.limits["dx"] = (self.limits["xhigh"] - self.limits["xlow"]) / 50
self.limits["dy"] = (self.limits["yhigh"] - self.limits["ylow"]) / 50
self.set_limits()
| [
"matplotlib.pyplot.subplot_mosaic",
"matplotlib.projections.register_projection",
"matplotlib.animation.ArtistAnimation",
"warnings.warn",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((424, 457), 'matplotlib.projections.register_projection', 'register_projection', (['PulseProgram'], {}), '(PulseProgram)\n', (443, 457), False, 'from matplotlib.projections import register_projection\n'), ((836, 865), 'matplotlib.pyplot.subplots', 'plt.subplots', (['*args'], {}), '(*args, **kwargs)\n', (848, 865), True, 'import matplotlib.pyplot as plt\n'), ((1081, 1114), 'matplotlib.projections.register_projection', 'register_projection', (['PulseProgram'], {}), '(PulseProgram)\n', (1100, 1114), False, 'from matplotlib.projections import register_projection\n'), ((1493, 1528), 'matplotlib.pyplot.subplot_mosaic', 'plt.subplot_mosaic', (['*args'], {}), '(*args, **kwargs)\n', (1511, 1528), True, 'import matplotlib.pyplot as plt\n'), ((1727, 1752), 'matplotlib.pyplot.show', 'plt.show', (['*args'], {}), '(*args, **kwargs)\n', (1735, 1752), True, 'import matplotlib.pyplot as plt\n'), ((1885, 1917), 'matplotlib.animation.ArtistAnimation', 'ArtistAnimation', (['*args'], {}), '(*args, **kwargs)\n', (1900, 1917), False, 'from matplotlib.animation import ArtistAnimation\n'), ((559, 666), 'warnings.warn', 'warn', (['f"""Projection will be set to \'PulseProgram\' instead of {kwargs[\'subplot_kw\'][\'projection\']}"""'], {}), '(\n f"Projection will be set to \'PulseProgram\' instead of {kwargs[\'subplot_kw\'][\'projection\']}"\n )\n', (563, 666), False, 'from warnings import warn\n'), ((1216, 1323), 'warnings.warn', 'warn', (['f"""Projection will be set to \'PulseProgram\' instead of {kwargs[\'subplot_kw\'][\'projection\']}"""'], {}), '(\n f"Projection will be set to \'PulseProgram\' instead of {kwargs[\'subplot_kw\'][\'projection\']}"\n )\n', (1220, 1323), False, 'from warnings import warn\n')] |
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
class CartPage:
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(driver, 10)
def del_from_cart_button(self):
return self.driver.find_elements_by_css_selector("li.item button[name='remove_cart_item']")
@property
def smal_prod_icons(self):
return self.driver.find_elements_by_css_selector("li.shortcut a")
@property
def product_image(self):
return self.driver.find_elements_by_css_selector("li.item a[class='image-wrapper shadow']")
@property
def back_main_page_link(self):
return self.driver.find_element_by_css_selector("div#checkout-cart-wrapper a")
def get_count_product_in_cart(self):
self.wait.until(EC.presence_of_element_located((By.ID, "checkout-summary-wrapper")))
# определяем количество товаров для удаления из корзины (кол-во строк в таблице)
return self.driver.find_elements_by_css_selector("div#checkout-summary-wrapper td.item")
def delete_all_prod_from_cart(self, prod_count):
for i in range(prod_count):
# определяем элемент исчезновение которого будем ожидать после очередного удаления товара из корзины
if i != prod_count-1:
shortcuts = self.smal_prod_icons
shortcuts[0].click()
else:
shortcuts = self.product_image
products_del = self.del_from_cart_button()
products_del[0].click()
# ожидаем исчезновение нужного элемента для продолжения действий
self.wait.until(EC.staleness_of(shortcuts[0]))
def wait_for_empty_cart(self):
self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "div#checkout-cart-wrapper a")))
| [
"selenium.webdriver.support.wait.WebDriverWait",
"selenium.webdriver.support.expected_conditions.presence_of_element_located",
"selenium.webdriver.support.expected_conditions.staleness_of"
] | [((266, 291), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (279, 291), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((896, 963), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.ID, 'checkout-summary-wrapper')"], {}), "((By.ID, 'checkout-summary-wrapper'))\n", (926, 963), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1829, 1914), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.CSS_SELECTOR, 'div#checkout-cart-wrapper a')"], {}), "((By.CSS_SELECTOR, 'div#checkout-cart-wrapper a')\n )\n", (1859, 1914), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1738, 1767), 'selenium.webdriver.support.expected_conditions.staleness_of', 'EC.staleness_of', (['shortcuts[0]'], {}), '(shortcuts[0])\n', (1753, 1767), True, 'from selenium.webdriver.support import expected_conditions as EC\n')] |
import time,pyb
uart = pyb. UART(3,9600, bits=8, parity=None, stop=1)
while(True):
time.sleep_ms(100)
size = uart.any()
if (size > 0):
string = uart.read(size)
data = int(string[-1])
print('Data: %3d' % (data))
| [
"pyb.UART",
"time.sleep_ms"
] | [((24, 70), 'pyb.UART', 'pyb.UART', (['(3)', '(9600)'], {'bits': '(8)', 'parity': 'None', 'stop': '(1)'}), '(3, 9600, bits=8, parity=None, stop=1)\n', (32, 70), False, 'import time, pyb\n'), ((89, 107), 'time.sleep_ms', 'time.sleep_ms', (['(100)'], {}), '(100)\n', (102, 107), False, 'import time, pyb\n')] |
from faker import Faker
import random
import parameters
from utils import chance_choose, chance
from uri_generator import gen_path, uri_extensions, gen_uri_useable
# TODO: Continue to expand this list with the proper formats for other application
# layer protocols (e.g. FTP, SSH, SMTP...)
protocols = ['HTTP/1.0', 'HTTP/1.1', 'HTTP/2']
common_methods = ['GET', 'PUT', 'POST', 'DELETE']
# Faker is passed in as an argument to prevent unnecessary re-decleration,
# but is not needed to make this method run.
def gen_req_method(test_mode=False, faker=None):
if faker is None:
faker = Faker()
return chance_choose(
random.choice(common_methods),
faker.http_method(),
parameters.frequency['common_http'] / 100)
def gen_uri_path(test_mode=False):
# TODO: Continue extending the possible URI paths and file
# extension types
# TODO: Add in querystrings
# This format allows for choice of a URI path or a document
path_options = [
gen_path(test_mode),
'{}{}'.format(
gen_path(test_mode),
random.choice(uri_extensions)
)
]
return random.choice(path_options)
def gen_querystring(test_mode=False):
# There's an 80% chance that a querystring will be non-existant
if chance(parameters.frequency['empty_querystring']):
return ''
queries = []
for _ in range(
random.randint(
1,
parameters.max_val['querystring_elements'])):
queries.append(
'{}={}'.format(
gen_uri_useable(),
gen_uri_useable()))
querystring = '&'.join(queries)
return '?{}'.format(querystring)
def gen_req_protocol(test_mode=False):
return random.choice(protocols)
def gen_request(test_mode=False):
fake = Faker()
# 90% chance of being a common method
method = gen_req_method(fake)
path = gen_uri_path(test_mode)
querystring = gen_querystring()
protocol = gen_req_protocol()
return '{} {}{} {}'.format(method, path, querystring, protocol)
| [
"utils.chance",
"random.choice",
"uri_generator.gen_uri_useable",
"faker.Faker",
"uri_generator.gen_path",
"random.randint"
] | [((1152, 1179), 'random.choice', 'random.choice', (['path_options'], {}), '(path_options)\n', (1165, 1179), False, 'import random\n'), ((1295, 1344), 'utils.chance', 'chance', (["parameters.frequency['empty_querystring']"], {}), "(parameters.frequency['empty_querystring'])\n", (1301, 1344), False, 'from utils import chance_choose, chance\n'), ((1750, 1774), 'random.choice', 'random.choice', (['protocols'], {}), '(protocols)\n', (1763, 1774), False, 'import random\n'), ((1822, 1829), 'faker.Faker', 'Faker', ([], {}), '()\n', (1827, 1829), False, 'from faker import Faker\n'), ((601, 608), 'faker.Faker', 'Faker', ([], {}), '()\n', (606, 608), False, 'from faker import Faker\n'), ((644, 673), 'random.choice', 'random.choice', (['common_methods'], {}), '(common_methods)\n', (657, 673), False, 'import random\n'), ((1004, 1023), 'uri_generator.gen_path', 'gen_path', (['test_mode'], {}), '(test_mode)\n', (1012, 1023), False, 'from uri_generator import gen_path, uri_extensions, gen_uri_useable\n'), ((1411, 1472), 'random.randint', 'random.randint', (['(1)', "parameters.max_val['querystring_elements']"], {}), "(1, parameters.max_val['querystring_elements'])\n", (1425, 1472), False, 'import random\n'), ((1061, 1080), 'uri_generator.gen_path', 'gen_path', (['test_mode'], {}), '(test_mode)\n', (1069, 1080), False, 'from uri_generator import gen_path, uri_extensions, gen_uri_useable\n'), ((1094, 1123), 'random.choice', 'random.choice', (['uri_extensions'], {}), '(uri_extensions)\n', (1107, 1123), False, 'import random\n'), ((1568, 1585), 'uri_generator.gen_uri_useable', 'gen_uri_useable', ([], {}), '()\n', (1583, 1585), False, 'from uri_generator import gen_path, uri_extensions, gen_uri_useable\n'), ((1603, 1620), 'uri_generator.gen_uri_useable', 'gen_uri_useable', ([], {}), '()\n', (1618, 1620), False, 'from uri_generator import gen_path, uri_extensions, gen_uri_useable\n')] |
"""Test evaluation functionality."""
from moda.evaluators import f_beta
from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics
def test_f_beta1():
precision = 0.6
recall = 1.0
beta = 1
f = f_beta(precision, recall, beta)
assert (f > 0.74) and (f < 0.76)
def test_f_beta3():
precision = 0.6
recall = 1.0
beta = 3
f = f_beta(precision, recall, beta)
assert (f > 0.937) and (f < 0.938)
def test_calculate_metrics_with_shift_all_zero():
actual = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
predicted = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 0
assert metrics['FP'] == 0
assert metrics['FN'] == 0
def test_calculate_metrics_with_shift_actual_zero():
actual = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
predicted = [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 0
assert metrics['FP'] == 2
assert metrics['FN'] == 0
predicted = [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 0
assert metrics['FP'] == 4
assert metrics['FN'] == 0
def test_calculate_metrics_with_shift_predicted_zero():
actual = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0]
predicted = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 0
assert metrics['FP'] == 0
assert metrics['FN'] == 2
actual = [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 0
assert metrics['FP'] == 0
assert metrics['FN'] == 4
def test_calculate_metrics_with_shift_perfect():
actual = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0]
predicted = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 2
assert metrics['FP'] == 0
assert metrics['FN'] == 0
actual = [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]
predicted = [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 4
assert metrics['FP'] == 0
assert metrics['FN'] == 0
def test_calculate_metrics_with_shift_mixed():
actual = [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0]
predicted = [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 1
assert metrics['FP'] == 1
assert metrics['FN'] == 1
def test_calculate_metrics_with_shift_in_window():
actual = [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
predicted = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=1)
assert metrics['TP'] == 1
assert metrics['FP'] == 0
assert metrics['FN'] == 0
def test_calculate_metrics_with_shift_in_large_window():
actual = [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
predicted = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
metrics = calculate_metrics_with_shift(predicted, actual, window_size=4)
assert metrics['TP'] == 1
assert metrics['FP'] == 0
assert metrics['FN'] == 0
def test_join_metrics():
metrics1 = {'cat1': {'TP': 12, 'FP': 7, 'FN': 35, 'num_samples': 1, 'num_values': 2},
'cat2': {'TP': 0, 'FP': 0, 'FN': 0, 'num_samples': 1, 'num_values': 2},
'cat4': {'TP': 9, 'FP': 9, 'FN': 9, 'num_samples': 1, 'num_values': 2}}
metrics2 = {'cat1': {'TP': 10, 'FP': 10, 'FN': 10, 'num_samples': 1, 'num_values': 2},
'cat2': {'TP': 2, 'FP': 2, 'FN': 2, 'num_samples': 1, 'num_values': 2},
'cat3': {'TP': 1, 'FP': 1, 'FN': 1, 'num_samples': 1, 'num_values': 2}}
metrics = _join_metrics(metrics1, metrics2)
assert metrics['cat1']['TP'] == 22
assert metrics['cat1']['FP'] == 17
assert metrics['cat1']['FN'] == 45
assert metrics['cat1']['num_samples'] == 2
assert metrics['cat1']['num_values'] == 4
assert metrics['cat2']['TP'] == 2
assert metrics['cat2']['FP'] == 2
assert metrics['cat2']['FN'] == 2
assert metrics['cat3']['TP'] == 1
assert metrics['cat3']['FP'] == 1
assert metrics['cat3']['FN'] == 1
assert metrics['cat4']['TP'] == 9
assert metrics['cat4']['FP'] == 9
assert metrics['cat4']['FN'] == 9
if __name__ == '__main__':
test_f_beta1()
test_f_beta3()
test_calculate_metrics_with_shift_all_zero()
test_calculate_metrics_with_shift_actual_zero()
test_calculate_metrics_with_shift_predicted_zero()
test_calculate_metrics_with_shift_perfect()
test_calculate_metrics_with_shift_mixed()
test_calculate_metrics_with_shift_in_window()
test_calculate_metrics_with_shift_in_large_window()
test_join_metrics()
| [
"moda.evaluators.metrics._join_metrics",
"moda.evaluators.f_beta",
"moda.evaluators.metrics.calculate_metrics_with_shift"
] | [((232, 263), 'moda.evaluators.f_beta', 'f_beta', (['precision', 'recall', 'beta'], {}), '(precision, recall, beta)\n', (238, 263), False, 'from moda.evaluators import f_beta\n'), ((381, 412), 'moda.evaluators.f_beta', 'f_beta', (['precision', 'recall', 'beta'], {}), '(precision, recall, beta)\n', (387, 412), False, 'from moda.evaluators import f_beta\n'), ((615, 677), 'moda.evaluators.metrics.calculate_metrics_with_shift', 'calculate_metrics_with_shift', (['predicted', 'actual'], {'window_size': '(1)'}), '(predicted, actual, window_size=1)\n', (643, 677), False, 'from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics\n'), ((934, 996), 'moda.evaluators.metrics.calculate_metrics_with_shift', 'calculate_metrics_with_shift', (['predicted', 'actual'], {'window_size': '(1)'}), '(predicted, actual, window_size=1)\n', (962, 996), False, 'from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics\n'), ((1152, 1214), 'moda.evaluators.metrics.calculate_metrics_with_shift', 'calculate_metrics_with_shift', (['predicted', 'actual'], {'window_size': '(1)'}), '(predicted, actual, window_size=1)\n', (1180, 1214), False, 'from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics\n'), ((1474, 1536), 'moda.evaluators.metrics.calculate_metrics_with_shift', 'calculate_metrics_with_shift', (['predicted', 'actual'], {'window_size': '(1)'}), '(predicted, actual, window_size=1)\n', (1502, 1536), False, 'from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics\n'), ((1689, 1751), 'moda.evaluators.metrics.calculate_metrics_with_shift', 'calculate_metrics_with_shift', (['predicted', 'actual'], {'window_size': '(1)'}), '(predicted, actual, window_size=1)\n', (1717, 1751), False, 'from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics\n'), ((2004, 2066), 'moda.evaluators.metrics.calculate_metrics_with_shift', 'calculate_metrics_with_shift', (['predicted', 'actual'], {'window_size': '(1)'}), '(predicted, actual, window_size=1)\n', (2032, 2066), False, 'from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics\n'), ((2269, 2331), 'moda.evaluators.metrics.calculate_metrics_with_shift', 'calculate_metrics_with_shift', (['predicted', 'actual'], {'window_size': '(1)'}), '(predicted, actual, window_size=1)\n', (2297, 2331), False, 'from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics\n'), ((2582, 2644), 'moda.evaluators.metrics.calculate_metrics_with_shift', 'calculate_metrics_with_shift', (['predicted', 'actual'], {'window_size': '(1)'}), '(predicted, actual, window_size=1)\n', (2610, 2644), False, 'from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics\n'), ((2899, 2961), 'moda.evaluators.metrics.calculate_metrics_with_shift', 'calculate_metrics_with_shift', (['predicted', 'actual'], {'window_size': '(1)'}), '(predicted, actual, window_size=1)\n', (2927, 2961), False, 'from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics\n'), ((3222, 3284), 'moda.evaluators.metrics.calculate_metrics_with_shift', 'calculate_metrics_with_shift', (['predicted', 'actual'], {'window_size': '(4)'}), '(predicted, actual, window_size=4)\n', (3250, 3284), False, 'from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics\n'), ((3951, 3984), 'moda.evaluators.metrics._join_metrics', '_join_metrics', (['metrics1', 'metrics2'], {}), '(metrics1, metrics2)\n', (3964, 3984), False, 'from moda.evaluators.metrics import calculate_metrics_with_shift, _join_metrics\n')] |
from django.db import models,transaction
from contact.models import Customer
from product.models import Stock
from django.urls import reverse
from django.db.models import Sum
# Create your models here.
class Approval(models.Model):
created_at = models.DateTimeField(auto_now_add = True,
editable = False)
updated_at = models.DateTimeField(auto_now = True,
editable = False)
created_by = models.ForeignKey('users.CustomUser', on_delete=models.CASCADE,
null=True,blank = True)
contact = models.ForeignKey(Customer,
related_name = 'contact',on_delete = models.CASCADE)
total_wt = models.DecimalField(max_digits=10,
decimal_places=3,default =0)
total_qty = models.IntegerField(default=0)
posted = models.BooleanField(default = False)
is_billed = models.BooleanField(default = False)
status = models.CharField(max_length = 10,
choices = (('Pending','Pending'),
('Complete','Complete')),default = 'Pending')
class Meta:
ordering = ('created_at',)
def __str__(self):
return f"{self.id}"
def get_absolute_url(self):
return reverse('approval_approval_detail',args=(self.pk,))
def get_update_url(self):
return reverse('approval_approval_update',args = (self.pk,))
@transaction.atomic()
def post(self):
if not self.posted:
for i in self.items.all():
i.post()
self.posted = True
self.save(update_fields=['posted'])
@transaction.atomic()
def unpost(self):
# if is billed cant unpost
if self.posted and not self.is_billed:
for i in self.items.all():
i.unpost()
self.posted = False
self.save(update_fields=['posted'])
def update_status(self):
print('in approval update_Status')
for i in self.items.all():
print(f"{i}-{i.status} ")
if any(i.status == 'Pending' for i in self.items.all()):
self.status ='Pending'
else:
self.status ='Complete'
self.save()
class ApprovalLine(models.Model):
product = models.ForeignKey(Stock,related_name = 'product',
on_delete=models.CASCADE)
quantity = models.IntegerField(default=0)
weight = models.DecimalField(max_digits=10,
decimal_places=3,default = 0.0)
touch = models.DecimalField(max_digits=10,
decimal_places=3,default = 0.0)
approval = models.ForeignKey(Approval,
on_delete = models.CASCADE,
related_name='items')
status = models.CharField(max_length =30,
choices = (
('Pending','Pending'),
('Returned','Returned'),
('Billed','Billed')),
default = 'Pending',
blank = True )
class Meta:
ordering = ('approval',)
def __str__(self):
return f"{self.id}"
def balance(self):
return (self.weight - self.approvallinereturn_set.filter(posted = True).\
aggregate(t = Sum('weight'))['t'])
def post(self):
self.product.remove(self.weight,self.quantity,None,'A')
def unpost(self):
for i in self.approvallinereturn_set.all():
i.unpost()
i.delete()
self.product.add(self.weight, self.quantity, None, 'AR')
def update_status(self):
ret = self.approvallinereturn_set.filter(
posted = True
).aggregate(
qty = Sum('quantity'),
wt = Sum('weight'))
if self.quantity == ret['qty'] and self.weight == ret['wt']:
self.status = 'Returned'
else:
self.status = 'Pending'
self.save()
self.approval.update_status()
class ApprovalLineReturn(models.Model):
created_at = models.DateTimeField(auto_now_add = True)
created_by = models.ForeignKey(
'users.CustomUser', on_delete=models.CASCADE,
null=True, blank=True)
line = models.ForeignKey(ApprovalLine,on_delete = models.CASCADE)
quantity = models.IntegerField(default = 0)
weight = models.DecimalField(max_digits = 10,
decimal_places = 3,default =0.0)
posted = models.BooleanField(default=False)
class Meta:
ordering = ('id',)
def __str__(self):
return f"{self.line.product}"
def post(self):
if not self.posted:
self.line.product.add(self.weight, self.quantity, None, 'AR')
self.posted = True
self.save(update_fields=['posted'])
self.line.update_status()
def unpost(self):
if self.posted:
self.line.product.remove(self.weight, self.quantity, None, 'A')
self.posted = False
self.save(update_fields=['posted'])
self.line.update_status()
| [
"django.db.models.Sum",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.transaction.atomic",
"django.db.models.BooleanField",
"django.urls.reverse",
"django.db.models.DateTimeField",
"django.db.models.DecimalField",
"django.db.models.CharField"
] | [((250, 305), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'editable': '(False)'}), '(auto_now_add=True, editable=False)\n', (270, 305), False, 'from django.db import models, transaction\n'), ((343, 394), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'editable': '(False)'}), '(auto_now=True, editable=False)\n', (363, 394), False, 'from django.db import models, transaction\n'), ((432, 522), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""users.CustomUser"""'], {'on_delete': 'models.CASCADE', 'null': '(True)', 'blank': '(True)'}), "('users.CustomUser', on_delete=models.CASCADE, null=True,\n blank=True)\n", (449, 522), False, 'from django.db import models, transaction\n'), ((555, 632), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Customer'], {'related_name': '"""contact"""', 'on_delete': 'models.CASCADE'}), "(Customer, related_name='contact', on_delete=models.CASCADE)\n", (572, 632), False, 'from django.db import models, transaction\n'), ((667, 730), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(3)', 'default': '(0)'}), '(max_digits=10, decimal_places=3, default=0)\n', (686, 730), False, 'from django.db import models, transaction\n'), ((763, 793), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (782, 793), False, 'from django.db import models, transaction\n'), ((807, 841), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (826, 841), False, 'from django.db import models, transaction\n'), ((860, 894), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (879, 894), False, 'from django.db import models, transaction\n'), ((910, 1025), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'choices': "(('Pending', 'Pending'), ('Complete', 'Complete'))", 'default': '"""Pending"""'}), "(max_length=10, choices=(('Pending', 'Pending'), (\n 'Complete', 'Complete')), default='Pending')\n", (926, 1025), False, 'from django.db import models, transaction\n'), ((1362, 1382), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (1380, 1382), False, 'from django.db import models, transaction\n'), ((1572, 1592), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (1590, 1592), False, 'from django.db import models, transaction\n'), ((2215, 2289), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Stock'], {'related_name': '"""product"""', 'on_delete': 'models.CASCADE'}), "(Stock, related_name='product', on_delete=models.CASCADE)\n", (2232, 2289), False, 'from django.db import models, transaction\n'), ((2326, 2356), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2345, 2356), False, 'from django.db import models, transaction\n'), ((2370, 2435), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(3)', 'default': '(0.0)'}), '(max_digits=10, decimal_places=3, default=0.0)\n', (2389, 2435), False, 'from django.db import models, transaction\n'), ((2469, 2534), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(3)', 'default': '(0.0)'}), '(max_digits=10, decimal_places=3, default=0.0)\n', (2488, 2534), False, 'from django.db import models, transaction\n'), ((2572, 2647), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Approval'], {'on_delete': 'models.CASCADE', 'related_name': '"""items"""'}), "(Approval, on_delete=models.CASCADE, related_name='items')\n", (2589, 2647), False, 'from django.db import models, transaction\n'), ((2708, 2861), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'choices': "(('Pending', 'Pending'), ('Returned', 'Returned'), ('Billed', 'Billed'))", 'default': '"""Pending"""', 'blank': '(True)'}), "(max_length=30, choices=(('Pending', 'Pending'), (\n 'Returned', 'Returned'), ('Billed', 'Billed')), default='Pending',\n blank=True)\n", (2724, 2861), False, 'from django.db import models, transaction\n'), ((4035, 4074), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (4055, 4074), False, 'from django.db import models, transaction\n'), ((4094, 4184), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""users.CustomUser"""'], {'on_delete': 'models.CASCADE', 'null': '(True)', 'blank': '(True)'}), "('users.CustomUser', on_delete=models.CASCADE, null=True,\n blank=True)\n", (4111, 4184), False, 'from django.db import models, transaction\n'), ((4209, 4266), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ApprovalLine'], {'on_delete': 'models.CASCADE'}), '(ApprovalLine, on_delete=models.CASCADE)\n', (4226, 4266), False, 'from django.db import models, transaction\n'), ((4283, 4313), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (4302, 4313), False, 'from django.db import models, transaction\n'), ((4329, 4394), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(3)', 'default': '(0.0)'}), '(max_digits=10, decimal_places=3, default=0.0)\n', (4348, 4394), False, 'from django.db import models, transaction\n'), ((4432, 4466), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4451, 4466), False, 'from django.db import models, transaction\n'), ((1204, 1256), 'django.urls.reverse', 'reverse', (['"""approval_approval_detail"""'], {'args': '(self.pk,)'}), "('approval_approval_detail', args=(self.pk,))\n", (1211, 1256), False, 'from django.urls import reverse\n'), ((1302, 1354), 'django.urls.reverse', 'reverse', (['"""approval_approval_update"""'], {'args': '(self.pk,)'}), "('approval_approval_update', args=(self.pk,))\n", (1309, 1354), False, 'from django.urls import reverse\n'), ((3702, 3717), 'django.db.models.Sum', 'Sum', (['"""quantity"""'], {}), "('quantity')\n", (3705, 3717), False, 'from django.db.models import Sum\n'), ((3748, 3761), 'django.db.models.Sum', 'Sum', (['"""weight"""'], {}), "('weight')\n", (3751, 3761), False, 'from django.db.models import Sum\n'), ((3233, 3246), 'django.db.models.Sum', 'Sum', (['"""weight"""'], {}), "('weight')\n", (3236, 3246), False, 'from django.db.models import Sum\n')] |
from pdf2image import convert_from_path, convert_from_bytes
from pdf2image.exceptions import (
PDFInfoNotInstalledError,
PDFPageCountError,
PDFSyntaxError
)
images = convert_from_path('.\git_cheat_sheet.pdf')
| [
"pdf2image.convert_from_path"
] | [((178, 221), 'pdf2image.convert_from_path', 'convert_from_path', (['""".\\\\git_cheat_sheet.pdf"""'], {}), "('.\\\\git_cheat_sheet.pdf')\n", (195, 221), False, 'from pdf2image import convert_from_path, convert_from_bytes\n')] |
from googleapiclient.discovery import build
from uuid import uuid4
from google.auth.transport.requests import Request
from pathlib import Path
from google_auth_oauthlib.flow import InstalledAppFlow
from typing import Dict, List
from pickle import load, dump
class CreateMeet:
def __init__(self, attendees: Dict[str, str], event_time: Dict[str, str], topic):
authe = self._auth()
attendees = [{"email": e} for e in attendees.values()]
self.event_states = self._create_event(attendees, event_time, authe, topic)
@staticmethod
def _create_event(attendees: List[Dict[str, str]], event_time, authe: build, topic):
event = {"conferenceData": {"createRequest": {"requestId": f"{uuid4().hex}", "conferenceSolutionKey": {"type": "hangoutsMeet"}}},
"attendees": attendees,
"start": {"dateTime": event_time["start"], 'timeZone': 'Asia/Kolkata'},
"end": {"dateTime": event_time["end"], 'timeZone': 'Asia/Kolkata'},
"summary": topic,
"reminders": {"useDefault": True}
}
event = authe.events().insert(calendarId="primary", sendNotifications=True, body=event, conferenceDataVersion=1).execute()
return event
@staticmethod
def _auth():
token_file, scopes = Path("./token.pickle"), ["https://www.googleapis.com/auth/calendar"]
credentials = None
if token_file.exists():
with open(token_file, "rb") as token:
credentials = load(token)
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
credentials.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json', scopes)
credentials = flow.run_local_server(port=0)
with open(token_file, "wb") as token:
dump(credentials, token)
calendar_service = build("calendar", "v3", credentials=credentials)
return calendar_service
print('------------------------------')
print('-- Follow YYYY-MM-DD format --')
print('------------------------------')
date = input('Enter date of the meeting : ').strip()
print('------------------------------------')
print('-- Follow HH:MM and 24 hrs format --')
print('------------------------------------')
start = input('Enter starting time : ').strip()
end = input('Enter ending time : ').strip()
emails = list(input('Enter the emails of guests separated by 1 space each : ').strip().split())
topic = input('Enter the topic of the meeting : ')
time = {
'start':date+'T'+start+':00.000000',
'end':date+'T'+end+':00.000000'
}
guests = { email : email for email in emails }
meet = CreateMeet(guests, time, topic)
keys = ['organizer','hangoutLink', 'summary', 'start', 'end', 'attendees']
details = { key: meet.event_states[key] for key in keys }
print('---------------------')
print('-- Meeting Details --')
print('---------------------')
for key in keys:
print(key+' : ', details[key])
| [
"pickle.dump",
"pathlib.Path",
"google.auth.transport.requests.Request",
"pickle.load",
"uuid.uuid4",
"googleapiclient.discovery.build",
"google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file"
] | [((2013, 2061), 'googleapiclient.discovery.build', 'build', (['"""calendar"""', '"""v3"""'], {'credentials': 'credentials'}), "('calendar', 'v3', credentials=credentials)\n", (2018, 2061), False, 'from googleapiclient.discovery import build\n'), ((1322, 1344), 'pathlib.Path', 'Path', (['"""./token.pickle"""'], {}), "('./token.pickle')\n", (1326, 1344), False, 'from pathlib import Path\n'), ((1530, 1541), 'pickle.load', 'load', (['token'], {}), '(token)\n', (1534, 1541), False, 'from pickle import load, dump\n'), ((1765, 1834), 'google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file', 'InstalledAppFlow.from_client_secrets_file', (['"""credentials.json"""', 'scopes'], {}), "('credentials.json', scopes)\n", (1806, 1834), False, 'from google_auth_oauthlib.flow import InstalledAppFlow\n'), ((1961, 1985), 'pickle.dump', 'dump', (['credentials', 'token'], {}), '(credentials, token)\n', (1965, 1985), False, 'from pickle import load, dump\n'), ((1713, 1722), 'google.auth.transport.requests.Request', 'Request', ([], {}), '()\n', (1720, 1722), False, 'from google.auth.transport.requests import Request\n'), ((717, 724), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (722, 724), False, 'from uuid import uuid4\n')] |
import librosa
import numpy as np
import audio
from hparams import hparams
"""
This helps implement a user interface for a vocoder.
Currently this is Griffin-Lim but can be extended to different vocoders.
Required elements for the vocoder UI are:
self.sample_rate
self.source_action
self.vocode_action
"""
class Voicebox_Project:
def __init__(self):
# Property needed for voicebox
self.sample_rate = hparams.sample_rate
# Initialization for project
self.source_spec = None
"""
The following action methods are called by Voicebox on button press
Source: [Load] --> source_action
Vocode: [Vocode] --> vocode_action
"""
def source_action(self, wav):
# The vocoder toolbox also vocodes the spectrogram with Griffin-Lim for comparison.
# Inputs: wav (from voicebox)
# Outputs: spec, wav_GL, spec_GL (to voicebox)
self.source_spec = audio.melspectrogram(wav, hparams)
wav_GL = audio.inv_mel_spectrogram(self.source_spec, hparams)
spec_GL = audio.melspectrogram(wav_GL, hparams)
return self.source_spec.T, wav_GL, spec_GL.T
def vocode_action(self):
# For this sample vocoder project, we will use Griffin-Lim as the vocoder.
# Other projects will substitute an actual neural vocoder.
# Inputs: None
# Outputs: wav, spec (to voicebox)
wav = audio.inv_mel_spectrogram(self.source_spec, hparams)
spec = audio.melspectrogram(wav, hparams)
return wav, spec.T
| [
"audio.inv_mel_spectrogram",
"audio.melspectrogram"
] | [((925, 959), 'audio.melspectrogram', 'audio.melspectrogram', (['wav', 'hparams'], {}), '(wav, hparams)\n', (945, 959), False, 'import audio\n'), ((977, 1029), 'audio.inv_mel_spectrogram', 'audio.inv_mel_spectrogram', (['self.source_spec', 'hparams'], {}), '(self.source_spec, hparams)\n', (1002, 1029), False, 'import audio\n'), ((1048, 1085), 'audio.melspectrogram', 'audio.melspectrogram', (['wav_GL', 'hparams'], {}), '(wav_GL, hparams)\n', (1068, 1085), False, 'import audio\n'), ((1408, 1460), 'audio.inv_mel_spectrogram', 'audio.inv_mel_spectrogram', (['self.source_spec', 'hparams'], {}), '(self.source_spec, hparams)\n', (1433, 1460), False, 'import audio\n'), ((1476, 1510), 'audio.melspectrogram', 'audio.melspectrogram', (['wav', 'hparams'], {}), '(wav, hparams)\n', (1496, 1510), False, 'import audio\n')] |
# -*- coding: utf-8 -*-
import pytest
from src.replace_lcsh import replace_term, lcsh_fields, normalize_subfields
# def test_flip_impacted_fields(fake_bib):
# pass
@pytest.mark.parametrize(
"arg,expectation",
[
(1, ["a", "local term", "x", "subX1", "x", "subX2", "z", "subZ."]),
(3, ["a", "subA", "x", "local term", "x", "subX2", "z", "subZ."]),
(5, ["a", "subA", "x", "subX1", "x", "local term", "z", "subZ."]),
(7, ["a", "subA", "x", "subX1", "x", "subX2", "z", "local term."]),
],
)
def test_replace_term(fake_subfields, arg, expectation):
assert replace_term(fake_subfields, arg, "local term") == expectation
def test_lcsh_fields(fake_subjects):
assert len(lcsh_fields(fake_subjects)) == 2
def test_normalize_subields(fake_subfields):
assert normalize_subfields(fake_subfields) == [
"a",
"suba",
"x",
"subx1",
"x",
"subx2",
"z",
"subz",
]
| [
"pytest.mark.parametrize",
"src.replace_lcsh.lcsh_fields",
"src.replace_lcsh.normalize_subfields",
"src.replace_lcsh.replace_term"
] | [((175, 505), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""arg,expectation"""', "[(1, ['a', 'local term', 'x', 'subX1', 'x', 'subX2', 'z', 'subZ.']), (3, [\n 'a', 'subA', 'x', 'local term', 'x', 'subX2', 'z', 'subZ.']), (5, ['a',\n 'subA', 'x', 'subX1', 'x', 'local term', 'z', 'subZ.']), (7, ['a',\n 'subA', 'x', 'subX1', 'x', 'subX2', 'z', 'local term.'])]"], {}), "('arg,expectation', [(1, ['a', 'local term', 'x',\n 'subX1', 'x', 'subX2', 'z', 'subZ.']), (3, ['a', 'subA', 'x',\n 'local term', 'x', 'subX2', 'z', 'subZ.']), (5, ['a', 'subA', 'x',\n 'subX1', 'x', 'local term', 'z', 'subZ.']), (7, ['a', 'subA', 'x',\n 'subX1', 'x', 'subX2', 'z', 'local term.'])])\n", (198, 505), False, 'import pytest\n'), ((608, 655), 'src.replace_lcsh.replace_term', 'replace_term', (['fake_subfields', 'arg', '"""local term"""'], {}), "(fake_subfields, arg, 'local term')\n", (620, 655), False, 'from src.replace_lcsh import replace_term, lcsh_fields, normalize_subfields\n'), ((816, 851), 'src.replace_lcsh.normalize_subfields', 'normalize_subfields', (['fake_subfields'], {}), '(fake_subfields)\n', (835, 851), False, 'from src.replace_lcsh import replace_term, lcsh_fields, normalize_subfields\n'), ((725, 751), 'src.replace_lcsh.lcsh_fields', 'lcsh_fields', (['fake_subjects'], {}), '(fake_subjects)\n', (736, 751), False, 'from src.replace_lcsh import replace_term, lcsh_fields, normalize_subfields\n')] |
from just_bin_it.exceptions import KafkaException
class SpyProducer:
def __init__(self, brokers=None):
self.messages = []
def publish_message(self, topic, message):
self.messages.append((topic, message))
class StubProducerThatThrows:
def publish_message(self, topic, message):
raise KafkaException("Some Kafka error")
| [
"just_bin_it.exceptions.KafkaException"
] | [((324, 358), 'just_bin_it.exceptions.KafkaException', 'KafkaException', (['"""Some Kafka error"""'], {}), "('Some Kafka error')\n", (338, 358), False, 'from just_bin_it.exceptions import KafkaException\n')] |
import requests
import re
import json
import datetime
import os
DATE_FORMAT = '%Y-%m-%d'
DATA_PATH = 'rupodcast_lengths.json'
def extract_hms(g):
return float(g[0] or 0) * 60 + float(g[1]) + float(g[2]) / 60
def extract_rss(dur):
g = dur.split(':')
while len(g) < 3:
g = [0] + g
return float(g[0] or 0) * 60 + float(g[1]) + float(g[2]) / 60
def parse_soundcloud_date(m):
return None
MONTHS = {m: i+1 for i, m in enumerate(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])}
def parse_overcast_date(m):
month = MONTHS.get(m[0])
if not month:
return None
year = int(m[2] or datetime.date.today().year)
return year, month, int(m[1])
def parse_rss_date(m):
month = MONTHS.get(m[1])
if not month:
return None
return int(m[2]), month, int(m[0])
class ByHandDurationParser:
@staticmethod
def findall(s):
return [p for p in s.split(',') if 1 <= len(p) < 6]
EXTRACTORS = {
'byhand': {
'duration': (ByHandDurationParser, lambda x: float(x)),
'date': (re.compile(r'(\d\d)(\d\d)(\d\d)'),
lambda m: (2000 + int(m[0]), int(m[1]), int(m[2]))),
},
'overcast': {
'duration': (re.compile(r'•\s+(\d+) min'), lambda x: float(x)),
'date': (re.compile(r'<div class="caption2[^"]+">\s+([A-Z][a-z]{2})\s+'
'(\d+)(?:, (\d{4}))?\s+(?:•[^<]+)?</div>', re.S),
parse_overcast_date),
},
'soundcloud': {
'duration': (re.compile(r'meta itemprop="duration" content="PT(\d\d)H(\d\d)M(\d\d)S"'),
extract_hms),
'date': (re.compile(r'<time pubdate>(\d{4})/(\d\d)/(\d\d) '),
lambda m: (int(m[0]), int(m[1]), int(m[2]))),
},
'vk': {
'duration': (
re.compile(r'<div class="[^"]*audio_row__duration[^"]*">'
'(?:(\d):)?(\d+):(\d+)</div>'.encode()),
extract_hms),
},
'rss': {
'duration': (re.compile(r'<itunes:duration>\s*([\d:]+)\s*</itunes:duration>', re.M | re.S),
extract_rss),
'date': (re.compile(r'<pubDate>[^<\d]*(\d+) ([A-Z][a-z]{2}) (\d{4}) '), parse_rss_date),
},
'spotify': {
'duration': (re.compile(r'<span class="total-duration">(?:(\d+):)?(\d+):(\d+)</span>'),
extract_hms),
'date': (re.compile(r'<span class="artists-albums">(\d\d)/(\d\d)/(\d{4})</span>'),
lambda m: (int(m[2]), int(m[0]), int(m[1]))),
},
}
def download_data():
resp = requests.get('https://russiancast.club/data.json')
return resp.json()
def read_lengths():
if not os.path.exists(DATA_PATH):
return {}
with open(DATA_PATH, 'r') as f:
return json.load(f)
def write_lengths(lengths):
with open(DATA_PATH, 'w') as f:
json.dump(lengths, f, ensure_ascii=False)
def parse_text(ex_name, text):
result = {}
for k, parser in EXTRACTORS[ex_name].items():
m = parser[0].findall(text)
if m and parser:
result[k] = [parser[1](g) for g in m]
return result
def get_durations(podcast):
mins = {}
for name in EXTRACTORS:
if name in ('vk', 'byhand'):
continue
if podcast.get(name):
resp = requests.get(podcast[name])
res = parse_text(name, resp.text)
if res:
mins[name] = res
return mins
def find_longest_mins(lengths):
durs = [v.get('duration', []) for v in lengths.values()]
ll = [[x for x in p if x >= 1] for p in durs]
return sorted(ll, key=lambda d: len(d))[-1]
def find_longest_dates(lengths):
dates = [v.get('date', []) for v in lengths.values()]
return sorted(dates, key=lambda d: len(d))[-1]
def find_medians(mins):
mins.sort()
if len(mins) == 1:
median = round(mins[0])
elif len(mins) % 2 == 0:
median = round((float(mins[len(mins)//2-1]) + mins[len(mins)//2]) / 2.0)
else:
median = round(mins[len(mins)//2])
if len(mins) <= 2:
dmed = 0
elif len(mins) <= 5:
dmed = 1
elif len(mins) <= 10:
dmed = 2
else:
dmed = len(mins) // 5
med_low = round(mins[dmed])
med_high = round(mins[-1-dmed])
return median, med_low, med_high
def format_medians(median, med_low, med_high):
def r5(n):
if n < 14:
return n
return round(n/5.0)*5
def minut(n):
if n % 10 == 1 and n % 100 != 11:
return 'минута'
if n % 10 in (2, 3, 4) and n % 100 not in (12, 13, 14):
return 'минуты'
return 'минут'
need_two = med_high-med_low > 10 and med_high * 1.0 / med_low > 1.5
if need_two:
res = '{}–{} {}'.format(r5(med_low), r5(med_high), minut(r5(med_high)))
else:
res = '{} {}'.format(r5(median), minut(r5(median)))
return res
def get_latest_date(dates):
if not dates:
return None
return datetime.date(*max(dates)).strftime(DATE_FORMAT)
def get_median_interval(dates):
if len(dates) < 2:
return None
today = datetime.date.today()
days = sorted((today - datetime.date(*d)).days for d in dates)
daydiffs = [days[i+1] - days[i] for i in range(len(days)-1)]
daydiffs = [d for d in daydiffs if d > 0]
# print(daydiffs)
if not daydiffs:
return None
# Take last 20, so that format changes do not affect the result
if len(daydiffs) > 20:
daydiffs = daydiffs[:20]
return find_medians(daydiffs)
def format_interval(median, med_low, med_high):
if not median:
return ''
if med_high > 40:
return 'нерегулярно'
if med_high > med_low * 3 and median > 14:
return 'нерегулярно'
if median == 1:
return 'ежедневно'
if median == 2:
return 'через день'
if 3 <= median <= 5:
return 'дважды в неделю'
if 6 <= median <= 9:
return 'еженедельно'
if 10 <= median <= 17:
return 'раз в две недели'
if 18 <= median <= 25:
return 'раз в три недели'
if 26 <= median <= 40:
return 'ежемесячно'
else:
return 'реже раза в месяц'
return 'нерегулярно'
def gen_additional_fields(lengths):
result = {}
if not lengths:
return result
mins = find_longest_mins(lengths)
if mins:
meds = find_medians(mins)
result['duration'] = format_medians(*meds)
dates = find_longest_dates(lengths)
if dates:
meds = get_median_interval(dates)
latest = get_latest_date(dates)
age = (datetime.datetime.now() - datetime.datetime.strptime(
latest, DATE_FORMAT)).days
result['active'] = age <= (32 if not meds else max(32, meds[1] + meds[2]))
if meds:
result['frequency'] = format_interval(*meds)
return result
| [
"os.path.exists",
"re.compile",
"datetime.datetime.strptime",
"requests.get",
"datetime.datetime.now",
"datetime.date",
"json.load",
"datetime.date.today",
"json.dump"
] | [((2647, 2697), 'requests.get', 'requests.get', (['"""https://russiancast.club/data.json"""'], {}), "('https://russiancast.club/data.json')\n", (2659, 2697), False, 'import requests\n'), ((5212, 5233), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (5231, 5233), False, 'import datetime\n'), ((2754, 2779), 'os.path.exists', 'os.path.exists', (['DATA_PATH'], {}), '(DATA_PATH)\n', (2768, 2779), False, 'import os\n'), ((2850, 2862), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2859, 2862), False, 'import json\n'), ((2937, 2978), 'json.dump', 'json.dump', (['lengths', 'f'], {'ensure_ascii': '(False)'}), '(lengths, f, ensure_ascii=False)\n', (2946, 2978), False, 'import json\n'), ((1142, 1180), 're.compile', 're.compile', (['"""(\\\\d\\\\d)(\\\\d\\\\d)(\\\\d\\\\d)"""'], {}), "('(\\\\d\\\\d)(\\\\d\\\\d)(\\\\d\\\\d)')\n", (1152, 1180), False, 'import re\n'), ((1293, 1327), 're.compile', 're.compile', (['"""•\\\\s+(\\\\d+) min"""'], {}), "('•\\\\s+(\\\\d+) min')\n", (1303, 1327), False, 'import re\n'), ((1366, 1493), 're.compile', 're.compile', (['"""<div class="caption2[^"]+">\\\\s+([A-Z][a-z]{2})\\\\s+(\\\\d+)(?:, (\\\\d{4}))?\\\\s+(?:•[^<]+)?</div>"""', 're.S'], {}), '(\n \'<div class="caption2[^"]+">\\\\s+([A-Z][a-z]{2})\\\\s+(\\\\d+)(?:, (\\\\d{4}))?\\\\s+(?:•[^<]+)?</div>\'\n , re.S)\n', (1376, 1493), False, 'import re\n'), ((1599, 1677), 're.compile', 're.compile', (['"""meta itemprop="duration" content="PT(\\\\d\\\\d)H(\\\\d\\\\d)M(\\\\d\\\\d)S\\""""'], {}), '(\'meta itemprop="duration" content="PT(\\\\d\\\\d)H(\\\\d\\\\d)M(\\\\d\\\\d)S"\')\n', (1609, 1677), False, 'import re\n'), ((1726, 1781), 're.compile', 're.compile', (['"""<time pubdate>(\\\\d{4})/(\\\\d\\\\d)/(\\\\d\\\\d) """'], {}), "('<time pubdate>(\\\\d{4})/(\\\\d\\\\d)/(\\\\d\\\\d) ')\n", (1736, 1781), False, 'import re\n'), ((2084, 2163), 're.compile', 're.compile', (['"""<itunes:duration>\\\\s*([\\\\d:]+)\\\\s*</itunes:duration>"""', '(re.M | re.S)'], {}), "('<itunes:duration>\\\\s*([\\\\d:]+)\\\\s*</itunes:duration>', re.M | re.S)\n", (2094, 2163), False, 'import re\n'), ((2215, 2278), 're.compile', 're.compile', (['"""<pubDate>[^<\\\\d]*(\\\\d+) ([A-Z][a-z]{2}) (\\\\d{4}) """'], {}), "('<pubDate>[^<\\\\d]*(\\\\d+) ([A-Z][a-z]{2}) (\\\\d{4}) ')\n", (2225, 2278), False, 'import re\n'), ((2340, 2415), 're.compile', 're.compile', (['"""<span class="total-duration">(?:(\\\\d+):)?(\\\\d+):(\\\\d+)</span>"""'], {}), '(\'<span class="total-duration">(?:(\\\\d+):)?(\\\\d+):(\\\\d+)</span>\')\n', (2350, 2415), False, 'import re\n'), ((2467, 2543), 're.compile', 're.compile', (['"""<span class="artists-albums">(\\\\d\\\\d)/(\\\\d\\\\d)/(\\\\d{4})</span>"""'], {}), '(\'<span class="artists-albums">(\\\\d\\\\d)/(\\\\d\\\\d)/(\\\\d{4})</span>\')\n', (2477, 2543), False, 'import re\n'), ((3386, 3413), 'requests.get', 'requests.get', (['podcast[name]'], {}), '(podcast[name])\n', (3398, 3413), False, 'import requests\n'), ((707, 728), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (726, 728), False, 'import datetime\n'), ((6704, 6727), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6725, 6727), False, 'import datetime\n'), ((6730, 6777), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['latest', 'DATE_FORMAT'], {}), '(latest, DATE_FORMAT)\n', (6756, 6777), False, 'import datetime\n'), ((5261, 5278), 'datetime.date', 'datetime.date', (['*d'], {}), '(*d)\n', (5274, 5278), False, 'import datetime\n')] |
from django.conf.urls import patterns, include, url
from . import views
urlpatterns = [
url(r'^$', views.rate_list, name='list'),
url(r'^(?P<pk>\d+)/$', views.rate_as_field, name='as_field'),
url(r'^suggestions/$', views.suggestions, name='suggestions'),
url(r'^create/$', views.create_rate, name='create'),
#url(r'^user_rate/(?P<username>[\w\s@+.-]+)/$', views.edit_user_rate, name='edit_user_rate'),
url(r'^(?P<pk>\d+)/edit/$', views.edit_rate, name='edit'),
url(r'^(?P<pk>\d+)/delete/$', views.delete_rate, name='delete'),
]
| [
"django.conf.urls.url"
] | [((93, 132), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.rate_list'], {'name': '"""list"""'}), "('^$', views.rate_list, name='list')\n", (96, 132), False, 'from django.conf.urls import patterns, include, url\n'), ((139, 199), 'django.conf.urls.url', 'url', (['"""^(?P<pk>\\\\d+)/$"""', 'views.rate_as_field'], {'name': '"""as_field"""'}), "('^(?P<pk>\\\\d+)/$', views.rate_as_field, name='as_field')\n", (142, 199), False, 'from django.conf.urls import patterns, include, url\n'), ((205, 265), 'django.conf.urls.url', 'url', (['"""^suggestions/$"""', 'views.suggestions'], {'name': '"""suggestions"""'}), "('^suggestions/$', views.suggestions, name='suggestions')\n", (208, 265), False, 'from django.conf.urls import patterns, include, url\n'), ((272, 322), 'django.conf.urls.url', 'url', (['"""^create/$"""', 'views.create_rate'], {'name': '"""create"""'}), "('^create/$', views.create_rate, name='create')\n", (275, 322), False, 'from django.conf.urls import patterns, include, url\n'), ((427, 484), 'django.conf.urls.url', 'url', (['"""^(?P<pk>\\\\d+)/edit/$"""', 'views.edit_rate'], {'name': '"""edit"""'}), "('^(?P<pk>\\\\d+)/edit/$', views.edit_rate, name='edit')\n", (430, 484), False, 'from django.conf.urls import patterns, include, url\n'), ((490, 553), 'django.conf.urls.url', 'url', (['"""^(?P<pk>\\\\d+)/delete/$"""', 'views.delete_rate'], {'name': '"""delete"""'}), "('^(?P<pk>\\\\d+)/delete/$', views.delete_rate, name='delete')\n", (493, 553), False, 'from django.conf.urls import patterns, include, url\n')] |
# importing libraries
import numpy as np
import pandas as pd
import random
import torch
def set_seeds(seed=1234):
"""[Set seeds for reproducibility.]
Keyword Arguments:
seed {int} -- [The seed value] (default: {1234})
"""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
print("[INFO] THE SEED IS ", seed)
def set_device(cuda=True):
"""[To set the type of machine CPU or GPU]
Keyword Arguments:
cuda {bool} -- [To use GPU or not] (default: {True})
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("[INFO] THE DEVICE IS ", device)
return device
paths = {
"train_path1": "data/atis/train/seq.in",
"train_path2": "data/atis/train/seq.out",
"train_path3": "data/atis/train/label",
"valid_path1": "data/atis/dev/seq.in",
"valid_path2": "data/atis/dev/seq.out",
"valid_path3": "data/atis/dev/label",
"test_path1": "data/atis/test/seq.in",
"test_path2": "data/atis/test/seq.out",
"test_path3":"data/atis/test/label"
}
| [
"torch.manual_seed",
"random.seed",
"torch.cuda.is_available",
"numpy.random.seed",
"torch.cuda.manual_seed"
] | [((247, 267), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (261, 267), True, 'import numpy as np\n'), ((272, 289), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (283, 289), False, 'import random\n'), ((294, 317), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (311, 317), False, 'import torch\n'), ((322, 350), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (344, 350), False, 'import torch\n'), ((594, 619), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (617, 619), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_in_serializers
-------------------
Tests of the fields cooperation in the serializer interfaces for serialization, de-serialization,
and validation.
"""
# Django settings:
import os
os.environ['DJANGO_SETTINGS_MODULE'] = __name__
from django.conf.global_settings import CACHES # NOQA
SECRET_KEY = 's3cr3t'
from rest_framework import serializers
from rest_framework.compat import six
from drf_compound_fields.fields import DictField
from drf_compound_fields.fields import ListField
class ListSerializer(serializers.Serializer):
emails = ListField(serializers.EmailField(), required=False)
def test_non_list():
serializer = ListSerializer(data={'emails': 'notAList'})
assert not serializer.is_valid(), 'Non-list value should not be allowed'
assert 'emails' in serializer.errors, 'Non-list value should produce a field error'
assert serializer.errors['emails'], 'Non-list value error should be non-empty'
def test_invalid_list_item():
serializer = ListSerializer(data={'emails': ['<EMAIL>', 'notAnEmail']})
assert not serializer.is_valid(), 'Invalid list-item should not be allowed'
assert 'emails' in serializer.errors, 'Invalid list-item should produce a field error'
assert serializer.errors['emails'], 'Invalid list-item errors should be non-empty {0}'.format(
serializer.errors['emails'])
assert [1] == list(six.iterkeys(serializer.errors['emails'][0]))
def test_empty_list():
serializer = ListSerializer(data={'emails': []})
assert serializer.is_valid(), 'Empty list should be allowed'
def test_valid_list():
serializer = ListSerializer(data={'emails': ['<EMAIL>']})
assert serializer.is_valid(), 'Valid list should be allowed'
class DictSerializer(serializers.Serializer):
emails = DictField(serializers.EmailField(), required=False)
def test_non_dict():
serializer = DictSerializer(data={'emails': 'notADict'})
assert not serializer.is_valid(), 'Non-dict value should not be allowed'
assert 'emails' in serializer.errors, 'Non-dict value should produce a field error'
assert serializer.errors['emails'], 'Non-dict value error should be non-empty'
def test_invalid_dict_value():
serializer = DictSerializer(data={'emails': {'a': '<EMAIL>',
'b': 'notAnEmail'}})
assert not serializer.is_valid(), 'Invalid dict-value should not be allowed'
assert 'emails' in serializer.errors, 'Invalid dict-value should produce a field error'
assert serializer.errors['emails'], 'Invalid dict-value errors should be non-empty {0}'.format(
serializer.errors['emails'])
assert ['b'] == list(six.iterkeys(serializer.errors['emails'][0]))
def test_empty_dict():
serializer = DictSerializer(data={'emails': {}})
assert serializer.is_valid(), 'Empty dict should be allowed'
def test_valid_dict():
serializer = DictSerializer(data={'emails': {'a': '<EMAIL>'}})
assert serializer.is_valid(), 'Valid dict shouild be allowed'
| [
"rest_framework.serializers.EmailField",
"rest_framework.compat.six.iterkeys"
] | [((618, 642), 'rest_framework.serializers.EmailField', 'serializers.EmailField', ([], {}), '()\n', (640, 642), False, 'from rest_framework import serializers\n'), ((1842, 1866), 'rest_framework.serializers.EmailField', 'serializers.EmailField', ([], {}), '()\n', (1864, 1866), False, 'from rest_framework import serializers\n'), ((1430, 1474), 'rest_framework.compat.six.iterkeys', 'six.iterkeys', (["serializer.errors['emails'][0]"], {}), "(serializer.errors['emails'][0])\n", (1442, 1474), False, 'from rest_framework.compat import six\n'), ((2708, 2752), 'rest_framework.compat.six.iterkeys', 'six.iterkeys', (["serializer.errors['emails'][0]"], {}), "(serializer.errors['emails'][0])\n", (2720, 2752), False, 'from rest_framework.compat import six\n')] |
### Copyright 2014, MTA SZTAKI, www.sztaki.hu
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
"""
Upkeep algorithms to be used before making an Enactor pass.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import occo.infobroker as ib
import occo.util.factory as factory
import occo.constants.status as nodestate
import logging
log = logging.getLogger('occo.upkeep')
datalog = logging.getLogger('occo.data.upkeep')
class Upkeep(factory.MultiBackend):
def __init__(self):
self.infobroker = ib.main_info_broker
def acquire_dynamic_state(self, infra_id):
raise NotImplementedError()
@factory.register(Upkeep, 'noop')
class DefaultUpkeep(Upkeep):
def acquire_dynamic_state(self, infra_id):
return self.infobroker.get('infrastructure.state', infra_id, True)
@factory.register(Upkeep, 'basic')
class BasicUpkeep(Upkeep):
def __init__(self):
super(BasicUpkeep, self).__init__()
import occo.infobroker
self.uds = occo.infobroker.main_uds
def is_failed(self, node):
return node['state'] == nodestate.FAIL
def is_shutdown(self, node):
return node['state'] == nodestate.SHUTDOWN
def acquire_dynamic_state(self, infra_id):
log.debug('Acquiring state of %r', infra_id)
dynamic_state = self.infobroker.get(
'infrastructure.state', infra_id, True)
datalog.debug('%r', dynamic_state)
log.debug('Processing failed nodes in %r', infra_id)
nodes = [node
for instances in list(dynamic_state.values())
for node in list(instances.values())]
failed_nodes, remove_nodes = [], []
for node in nodes:
failed = self.is_failed(node)
shutdown = self.is_shutdown(node)
if failed or shutdown:
if failed:
failed_nodes.append(node)
remove_nodes.append(node)
del dynamic_state[node['resolved_node_definition']['name']][node['node_id']]
if len(failed_nodes)>0:
log.info('Archiving failed instances of %r: %r',
infra_id, [i['node_id'] for i in failed_nodes])
self.uds.store_failed_nodes(infra_id, *failed_nodes)
if len(remove_nodes)>0:
remove_ids = [i['node_id'] for i in remove_nodes]
log.info('Removing lost instances from %r: %r',
infra_id, remove_ids)
self.uds.remove_nodes(infra_id, *remove_ids)
return dynamic_state, failed_nodes
| [
"logging.getLogger",
"occo.util.factory.register"
] | [((850, 882), 'logging.getLogger', 'logging.getLogger', (['"""occo.upkeep"""'], {}), "('occo.upkeep')\n", (867, 882), False, 'import logging\n'), ((893, 930), 'logging.getLogger', 'logging.getLogger', (['"""occo.data.upkeep"""'], {}), "('occo.data.upkeep')\n", (910, 930), False, 'import logging\n'), ((1124, 1156), 'occo.util.factory.register', 'factory.register', (['Upkeep', '"""noop"""'], {}), "(Upkeep, 'noop')\n", (1140, 1156), True, 'import occo.util.factory as factory\n'), ((1310, 1343), 'occo.util.factory.register', 'factory.register', (['Upkeep', '"""basic"""'], {}), "(Upkeep, 'basic')\n", (1326, 1343), True, 'import occo.util.factory as factory\n')] |
#!/usr/bin/env python3
import sys, re
for line in iter(sys.stdin.readline, ''):
if re.search('agent ', line):
print(line.strip())
| [
"re.search"
] | [((89, 114), 're.search', 're.search', (['"""agent """', 'line'], {}), "('agent ', line)\n", (98, 114), False, 'import sys, re\n')] |
from haptyc import *
from base64 import b64encode, b64decode
import json
class TestLogic(Transform):
#
# test_h1: Decodes base64, fuzzes using random_insert, Re-encodes base64
# Number of tests: 50
#
@ApplyIteration(50)
def test_h1(self, data, state):
data = b64decode(data)
data = random_insert(data,list("'"))
data = b64encode(data)
return data
#
# test_jsonfuzz: Deserialize JSON
# Loop through every key
# Decodes base64
# fuzzes using random_insert
# Re-encodes base64
# Serialize JSON
# Number of tests: 50
#
@ApplyIteration(50)
def test_jsonfuzz(self, data, state):
JA = json.loads(data)
for key in JA:
JA[key] = b64encode(random_insert(b64decode(JA[key]), list("!@#$%^&*()")))
return json.dumps(JA)
def queueRequests(target, wordlists):
engine = RequestEngine(endpoint=target.endpoint, concurrentConnections=1, requestsPerConnection=1, pipeline=0)
TestFactory = TestLogic(target.req)
for test in TestFactory:
engine.queue(test)
def handleResponse(req, interesting):
table.add(req)
| [
"base64.b64encode",
"json.dumps",
"base64.b64decode",
"json.loads"
] | [((304, 319), 'base64.b64decode', 'b64decode', (['data'], {}), '(data)\n', (313, 319), False, 'from base64 import b64encode, b64decode\n'), ((382, 397), 'base64.b64encode', 'b64encode', (['data'], {}), '(data)\n', (391, 397), False, 'from base64 import b64encode, b64decode\n'), ((797, 813), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (807, 813), False, 'import json\n'), ((942, 956), 'json.dumps', 'json.dumps', (['JA'], {}), '(JA)\n', (952, 956), False, 'import json\n'), ((885, 903), 'base64.b64decode', 'b64decode', (['JA[key]'], {}), '(JA[key])\n', (894, 903), False, 'from base64 import b64encode, b64decode\n')] |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from pathlib import Path
app = Flask(__name__)
BASE_DIR = Path(__file__).resolve().parent
DB_PATH = str(BASE_DIR / "one_to_many.sqlite")
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + DB_PATH
app.config["SQLALCHEMY_COMMIT_ON_SUBMIT"] = True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
class Board(db.Model):
# Many
__tablename__ = "amamov_board" # tablename을 지정하지 않으면 class 명의 소문자이다.
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(128))
writer_id = db.Column(
db.Integer, db.ForeignKey("amamov_user.id", ondelete="CASCADE")
)
def __repr__(self):
return f"<Board {self.title}>"
class User(db.Model):
# One
__tablename__ = "amamov_user"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(128))
boards = db.relationship(
"Board", # 어떤 클래스와 관계를 가지는가
backref="writer", # 상대방이 참조하는 이름
lazy="dynamic", # 어떻게 둘을 연결하는가(연결하는 방식:dynamic)
passive_deletes=True,
)
def __repr__(self):
return f"<User {self.username}>"
db.create_all()
if __name__ == "__main__":
app.run(host="localhost", port=5000, debug=True)
"""
amamov = User(username='amamov')
db.session.add(amamov)
db.session.commit()
board1 = Board(title='hello1', writer_id=amamov.id)
board2 = Board(title='hello2', writer=amamov) # User의 backref="writer"로 접근 가능
db.session.add_all([board1, board2])
db.session.commit()
amamov = User.query.filter(User.username=='amamov').first()
amamov.boards.all()
# [<Board hello1>, <Board hello2>]
"""
| [
"flask_sqlalchemy.SQLAlchemy",
"pathlib.Path",
"flask.Flask"
] | [((97, 112), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (102, 112), False, 'from flask import Flask\n'), ((376, 391), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (386, 391), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((125, 139), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (129, 139), False, 'from pathlib import Path\n')] |
import pandas as pd
import sys
from os import system
sys.path.append('../final_project/')
sys.path.append('../')
def readNames(inputFile='new_poi_names.txt'):
'''
A function to read names data from a file create by a data cache
Returns:
Returns a data frame that contains data from 'poi_names.txt'
'''
#bash_command = 'bash script.sh'
#system(bash_command)
data = pd.read_csv(inputFile, skiprows=2, delimiter=';', header=None, names=['Ans', 'Name'])
return(data) | [
"sys.path.append",
"pandas.read_csv"
] | [((53, 89), 'sys.path.append', 'sys.path.append', (['"""../final_project/"""'], {}), "('../final_project/')\n", (68, 89), False, 'import sys\n'), ((90, 112), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (105, 112), False, 'import sys\n'), ((405, 494), 'pandas.read_csv', 'pd.read_csv', (['inputFile'], {'skiprows': '(2)', 'delimiter': '""";"""', 'header': 'None', 'names': "['Ans', 'Name']"}), "(inputFile, skiprows=2, delimiter=';', header=None, names=['Ans',\n 'Name'])\n", (416, 494), True, 'import pandas as pd\n')] |
# Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tempfile import NamedTemporaryFile
from launch.legacy.exit_handler import ignore_exit_handler
from launch.legacy.output_handler import FileOutput
def launch(launch_descriptor, argv):
counter_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'counter.py')
with NamedTemporaryFile(mode='w', prefix='foo_', delete=False) as h:
foo_filename = h.name
ld = launch_descriptor
ld.add_process(
cmd=[sys.executable, '-u', counter_file, '--limit', '15', '--sleep', '0.5'],
name='foo',
output_handlers=[FileOutput(filename=foo_filename)],
exit_handler=ignore_exit_handler,
)
| [
"os.path.dirname",
"launch.legacy.output_handler.FileOutput",
"tempfile.NamedTemporaryFile"
] | [((914, 971), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'mode': '"""w"""', 'prefix': '"""foo_"""', 'delete': '(False)'}), "(mode='w', prefix='foo_', delete=False)\n", (932, 971), False, 'from tempfile import NamedTemporaryFile\n'), ((862, 887), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (877, 887), False, 'import os\n'), ((1186, 1219), 'launch.legacy.output_handler.FileOutput', 'FileOutput', ([], {'filename': 'foo_filename'}), '(filename=foo_filename)\n', (1196, 1219), False, 'from launch.legacy.output_handler import FileOutput\n')] |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression
from ... import tensor as mt
from ..wrappers import ParallelPostFit
def test_parallel_post_fit_basic(setup):
raw_x, raw_y = make_classification(n_samples=1000)
X, y = mt.tensor(raw_x, chunk_size=100), mt.tensor(raw_y, chunk_size=100)
clf = ParallelPostFit(GradientBoostingClassifier())
clf.fit(X, y)
assert isinstance(clf.predict(X), mt.Tensor)
assert isinstance(clf.predict_proba(X), mt.Tensor)
result = clf.score(X, y)
expected = clf.estimator.score(X, y)
assert result.fetch() == expected
clf = ParallelPostFit(LinearRegression())
clf.fit(X, y)
with pytest.raises(
AttributeError, match="The wrapped estimator (.|\n)* 'predict_proba' method."
):
clf.predict_proba(X)
def test_parallel_post_fit_predict(setup):
raw_x, raw_y = make_classification(n_samples=1000)
X, y = mt.tensor(raw_x, chunk_size=100), mt.tensor(raw_y, chunk_size=100)
base = LogisticRegression(random_state=0, n_jobs=1, solver="lbfgs")
wrap = ParallelPostFit(LogisticRegression(random_state=0, n_jobs=1, solver="lbfgs"))
base.fit(X, y)
wrap.fit(X, y)
result = wrap.predict(X)
expected = base.predict(X)
np.testing.assert_allclose(result, expected)
result = wrap.predict_proba(X)
expected = base.predict_proba(X)
np.testing.assert_allclose(result, expected)
result = wrap.predict_log_proba(X)
expected = base.predict_log_proba(X)
np.testing.assert_allclose(result, expected)
def test_parallel_post_fit_transform(setup):
raw_x, raw_y = make_classification(n_samples=1000)
X, y = mt.tensor(raw_x, chunk_size=100), mt.tensor(raw_y, chunk_size=100)
base = PCA(random_state=0)
wrap = ParallelPostFit(PCA(random_state=0))
base.fit(raw_x, raw_y)
wrap.fit(X, y)
result = base.transform(X)
expected = wrap.transform(X)
np.testing.assert_allclose(result, expected, atol=0.1)
def test_parallel_post_fit_multiclass(setup):
raw_x, raw_y = make_classification(n_samples=1000)
X, y = mt.tensor(raw_x, chunk_size=100), mt.tensor(raw_y, chunk_size=100)
raw_x, raw_y = make_classification(n_classes=3, n_informative=4)
X, y = mt.tensor(raw_x, chunk_size=50), mt.tensor(raw_y, chunk_size=50)
clf = ParallelPostFit(
LogisticRegression(random_state=0, n_jobs=1, solver="lbfgs", multi_class="auto")
)
clf.fit(X, y)
result = clf.predict(X)
expected = clf.estimator.predict(X)
np.testing.assert_allclose(result, expected)
result = clf.predict_proba(X)
expected = clf.estimator.predict_proba(X)
np.testing.assert_allclose(result, expected)
result = clf.predict_log_proba(X)
expected = clf.estimator.predict_log_proba(X)
np.testing.assert_allclose(result, expected)
| [
"sklearn.decomposition.PCA",
"numpy.testing.assert_allclose",
"sklearn.linear_model.LogisticRegression",
"pytest.raises",
"sklearn.ensemble.GradientBoostingClassifier",
"sklearn.linear_model.LinearRegression",
"sklearn.datasets.make_classification"
] | [((974, 1009), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1000)'}), '(n_samples=1000)\n', (993, 1009), False, 'from sklearn.datasets import make_classification\n'), ((1651, 1686), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1000)'}), '(n_samples=1000)\n', (1670, 1686), False, 'from sklearn.datasets import make_classification\n'), ((1776, 1836), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'n_jobs': '(1)', 'solver': '"""lbfgs"""'}), "(random_state=0, n_jobs=1, solver='lbfgs')\n", (1794, 1836), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((2030, 2074), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (2056, 2074), True, 'import numpy as np\n'), ((2152, 2196), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (2178, 2196), True, 'import numpy as np\n'), ((2282, 2326), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (2308, 2326), True, 'import numpy as np\n'), ((2393, 2428), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1000)'}), '(n_samples=1000)\n', (2412, 2428), False, 'from sklearn.datasets import make_classification\n'), ((2518, 2537), 'sklearn.decomposition.PCA', 'PCA', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2521, 2537), False, 'from sklearn.decomposition import PCA\n'), ((2702, 2756), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {'atol': '(0.1)'}), '(result, expected, atol=0.1)\n', (2728, 2756), True, 'import numpy as np\n'), ((2824, 2859), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1000)'}), '(n_samples=1000)\n', (2843, 2859), False, 'from sklearn.datasets import make_classification\n'), ((2957, 3006), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_classes': '(3)', 'n_informative': '(4)'}), '(n_classes=3, n_informative=4)\n', (2976, 3006), False, 'from sklearn.datasets import make_classification\n'), ((3298, 3342), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (3324, 3342), True, 'import numpy as np\n'), ((3429, 3473), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (3455, 3473), True, 'import numpy as np\n'), ((3568, 3612), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {}), '(result, expected)\n', (3594, 3612), True, 'import numpy as np\n'), ((1114, 1142), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (1140, 1142), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((1403, 1421), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1419, 1421), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((1450, 1550), 'pytest.raises', 'pytest.raises', (['AttributeError'], {'match': '"""The wrapped estimator (.|\n)* \'predict_proba\' method."""'}), '(AttributeError, match=\n """The wrapped estimator (.|\n)* \'predict_proba\' method.""")\n', (1463, 1550), False, 'import pytest\n'), ((1864, 1924), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'n_jobs': '(1)', 'solver': '"""lbfgs"""'}), "(random_state=0, n_jobs=1, solver='lbfgs')\n", (1882, 1924), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((2565, 2584), 'sklearn.decomposition.PCA', 'PCA', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2568, 2584), False, 'from sklearn.decomposition import PCA\n'), ((3119, 3204), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)', 'n_jobs': '(1)', 'solver': '"""lbfgs"""', 'multi_class': '"""auto"""'}), "(random_state=0, n_jobs=1, solver='lbfgs', multi_class='auto'\n )\n", (3137, 3204), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n')] |
import cx_Freeze
import sys
base = None
if sys.platform == 'win32':
base = "Win32GUI"
executables = [cx_Freeze.Executable("Speech_Recognizer.py", base=base, icon = "icon.ico")]
cx_Freeze.setup(
name = "Speech Recognizer",
author = "<NAME>",
options = {"build_exe":{"packages":["tkinter", "speech_recognition", "threading", "time"], "include_files":["icon.ico", "wait.ico", "mic.ico", "save.ico"]}},
version = "1.0",
description = "Speech Recognizer cum Text Editor that facilitates voice typing using Google Speech Recognition API",
executables = executables
) | [
"cx_Freeze.Executable",
"cx_Freeze.setup"
] | [((195, 590), 'cx_Freeze.setup', 'cx_Freeze.setup', ([], {'name': '"""Speech Recognizer"""', 'author': '"""<NAME>"""', 'options': "{'build_exe': {'packages': ['tkinter', 'speech_recognition', 'threading',\n 'time'], 'include_files': ['icon.ico', 'wait.ico', 'mic.ico', 'save.ico']}}", 'version': '"""1.0"""', 'description': '"""Speech Recognizer cum Text Editor that facilitates voice typing using Google Speech Recognition API"""', 'executables': 'executables'}), "(name='Speech Recognizer', author='<NAME>', options={\n 'build_exe': {'packages': ['tkinter', 'speech_recognition', 'threading',\n 'time'], 'include_files': ['icon.ico', 'wait.ico', 'mic.ico',\n 'save.ico']}}, version='1.0', description=\n 'Speech Recognizer cum Text Editor that facilitates voice typing using Google Speech Recognition API'\n , executables=executables)\n", (210, 590), False, 'import cx_Freeze\n'), ((116, 188), 'cx_Freeze.Executable', 'cx_Freeze.Executable', (['"""Speech_Recognizer.py"""'], {'base': 'base', 'icon': '"""icon.ico"""'}), "('Speech_Recognizer.py', base=base, icon='icon.ico')\n", (136, 188), False, 'import cx_Freeze\n')] |
#!/usr/bin/env python
from setuptools import setup
LONG_DESCRIPTION = \
'''The program extracts regions of interest from Fasta or Genome Feature Format (GFF) genomes.
This is done given a set of seed sequences given as nucleotide strings in a multi-line fasta file.
The program can output fasta and GFF outputs or regions, and will giv multiple outputs around regions and their evidence.
The program takes in multiple fasta or GFF files at a single time and can also take multiple seed sequence pairs.'''
setup(
name='Magphi',
version='0.1.6',
author='<NAME>',
author_email='<EMAIL>',
packages=['Magphi'],
package_dir={'Magphi': 'Magphi'},
entry_points={
'console_scripts': ['Magphi = Magphi.__main__:main']
},
url='https://github.com/milnus/Magphi',
license='MIT license',
description=('A bioinformatics tool allowing for examnination and extraction of genomic features using seed sequences.'),
long_description=LONG_DESCRIPTION,
install_requires=['biopython==1.79',
'pybedtools'],
keywords=['Genomic', 'extraction', 'bacteria', 'prokaryotes', 'bioinformatics'],
classifiers=[
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Development Status :: 4 - Beta']
)
| [
"setuptools.setup"
] | [((512, 1390), 'setuptools.setup', 'setup', ([], {'name': '"""Magphi"""', 'version': '"""0.1.6"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'packages': "['Magphi']", 'package_dir': "{'Magphi': 'Magphi'}", 'entry_points': "{'console_scripts': ['Magphi = Magphi.__main__:main']}", 'url': '"""https://github.com/milnus/Magphi"""', 'license': '"""MIT license"""', 'description': '"""A bioinformatics tool allowing for examnination and extraction of genomic features using seed sequences."""', 'long_description': 'LONG_DESCRIPTION', 'install_requires': "['biopython==1.79', 'pybedtools']", 'keywords': "['Genomic', 'extraction', 'bacteria', 'prokaryotes', 'bioinformatics']", 'classifiers': "['Programming Language :: Python :: 3.9',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Development Status :: 4 - Beta']"}), "(name='Magphi', version='0.1.6', author='<NAME>', author_email=\n '<EMAIL>', packages=['Magphi'], package_dir={'Magphi': 'Magphi'},\n entry_points={'console_scripts': ['Magphi = Magphi.__main__:main']},\n url='https://github.com/milnus/Magphi', license='MIT license',\n description=\n 'A bioinformatics tool allowing for examnination and extraction of genomic features using seed sequences.'\n , long_description=LONG_DESCRIPTION, install_requires=[\n 'biopython==1.79', 'pybedtools'], keywords=['Genomic', 'extraction',\n 'bacteria', 'prokaryotes', 'bioinformatics'], classifiers=[\n 'Programming Language :: Python :: 3.9',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Development Status :: 4 - Beta'])\n", (517, 1390), False, 'from setuptools import setup\n')] |
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
class Generator:
def __init__(self, learning_rate=1e-4, num_blocks=6):
self.learning_rate = learning_rate
self.num_blocks = num_blocks
def pelu(self, x):
with tf.variable_scope(x.op.name + '_activation', initializer=tf.constant_initializer(1.0), reuse=tf.AUTO_REUSE):
shape = x.get_shape().as_list()[1:]
alpha = tf.get_variable('alpha', 1, constraint=lambda t: tf.maximum(t, 0.1))
beta = tf.get_variable('beta', 1, constraint=lambda t: tf.maximum(t, 0.1))
positive = tf.nn.relu(x) * alpha / (beta + 1e-9)
negative = alpha * (tf.exp((-tf.nn.relu(-x)) / (beta + 1e-9)) - 1)
return negative + positive
def adaptive_global_average_pool_2d(self, x):
c = x.get_shape()[-1]
ADAP2d = tf.reshape(tf.reduce_mean(x, axis=[1, 2]), (-1, 1, 1, c))
return ADAP2d
def channel_attention(self, x, f, reduction):
skip_conn = tf.identity(x, name='identity')
x = self.adaptive_global_average_pool_2d(x)
x = tf.layers.conv2d(x, kernel_size=1, filters=f//reduction, strides=1, padding='same')
x = self.pelu(x)
x = tf.layers.conv2d(x, kernel_size=1, filters=f, strides=1, padding='same')
x = tf.nn.sigmoid(x)
CA = tf.multiply(skip_conn, x)
return CA
def ResidualBlock(self, x, kernel_size, filters, strides=1):
x = tf.layers.conv2d(x, kernel_size=1, filters=filters, strides=1, padding='same')
skip = x
x1 = x
for i in range(3):
tm1 = slim.conv2d(x1, num_outputs=filters, kernel_size=[3, 3], stride=1)
tm1 = self.pelu(tm1)
tm1 = slim.conv2d(tm1, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm1 = self.pelu(tm1)
tm1 = slim.conv2d(tm1, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm1 = self.channel_attention(tm1, f=filters, reduction=4)
x1 = tf.concat([x1,tm1], axis=3)
x2 = x
for i in range(3):
tm2 = slim.conv2d(x2, num_outputs=filters, kernel_size=[3, 3], stride=1)
tm2 = self.pelu(tm2)
tm2 = slim.conv2d(tm2, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm2 = self.pelu(tm2)
tm2 = slim.conv2d(tm2, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm2 = self.channel_attention(tm2, f=filters, reduction=4)
x2 = tf.concat([x2,tm2], axis=3)
x3 = x
for i in range(3):
tm3 = slim.conv2d(x3, num_outputs=filters, kernel_size=[3, 3], stride=1)
tm3 = self.pelu(tm3)
tm3 = slim.conv2d(tm3, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm3 = self.pelu(tm3)
tm3 = slim.conv2d(tm3, num_outputs=filters, kernel_size=[1, 1], stride=1)
tm3 = self.channel_attention(tm3, f=filters, reduction=4)
x3 = tf.concat([x3,tm3], axis=3)
x5 = tf.concat(values=[x1, x2, x3], axis=3, name='stack0')
x6 = tf.layers.conv2d(x5, kernel_size=1, filters=filters, strides=strides, padding='same', use_bias=False)
x7 = skip + x6
return x7
def Upsample2xBlock(self, x, kernel_size, filters, strides):
#size = tf.shape(x)
#h = size[1]
#w = size[2]
#x = tf.image.resize_nearest_neighbor(x, size=[h * 3, w * 3], align_corners=False, name=None)
x = tf.layers.conv2d(x, kernel_size=kernel_size, filters=filters, strides=strides, padding='same')
x = tf.depth_to_space(x, 2)
x = self.pelu(x)
return x
def ThermalSR(self, x, reuse=False, isTraining=True):
with tf.variable_scope("ThermalSR", reuse=reuse) as scope:
x4 = tf.layers.conv2d(x, kernel_size=7, filters=64, strides=1, padding='same')
x4 = self.pelu(x4)
skip = x4
# Global Residual Learning
size = tf.shape(x)
h = size[1]
w = size[2]
x_GRL = tf.image.resize_bicubic(x, size=[h * 4, w * 4], align_corners=False, name=None)
x_GRL = tf.layers.conv2d(x_GRL, kernel_size=1, filters=64, strides=1, padding='same')
x_GRL = self.pelu(x_GRL)
x_GRL = tf.layers.conv2d(x_GRL, kernel_size=1, filters=16, strides=1, padding='same')
x_GRL = self.pelu(x_GRL)
x_GRL = tf.layers.conv2d(x_GRL, kernel_size=1, filters=3, strides=1, padding='same')
x_GRL = self.pelu(x_GRL)
for i in range(4):
x4 = self.ResidualBlock(x4, kernel_size=1, filters=64, strides=1)
x4 = tf.layers.conv2d(x4, kernel_size=1, filters=64, strides=1, padding='same', use_bias=False)
x4 = self.pelu(x4)
x4 = tf.concat([x4, skip], axis=3)
x4 = tf.layers.conv2d(x4, kernel_size=3, filters=64, strides=1, padding='same', use_bias=False)
x4 = self.pelu(x4)
x4 = x4 + skip
with tf.variable_scope('Upsamplingconv_stage_1'):
xUP = self.Upsample2xBlock(x4, kernel_size=3, filters=64, strides = 1)
xUP = tf.layers.conv2d(xUP, kernel_size=1, filters=64, strides=1, padding='same', use_bias=False)
xUP = self.pelu(xUP)
skip1 = xUP
for i in range(2):
x5 = self.ResidualBlock(xUP, kernel_size=1, filters=32, strides=1)
x5 = tf.layers.conv2d(x5, kernel_size=1, filters=32, strides=1, padding='same', use_bias=False)
x5 = self.pelu(x5)
x5 = tf.concat([x5, skip1], axis=3)
x5 = tf.layers.conv2d(x5, kernel_size=3, filters=64, strides=1, padding='same', use_bias=False)
x5 = self.pelu(x5)
x5 = x5 + skip1
with tf.variable_scope('Upsamplingconv_stage_2'):
x6 = self.Upsample2xBlock(x5, kernel_size=3, filters=64, strides = 1)
x6 = tf.layers.conv2d(x6, kernel_size=3, filters=64, strides=1, padding='same', name='forward_4')
x6 = self.pelu(x6)
x6 = tf.layers.conv2d(x6, kernel_size=3, filters=3, strides=1, padding='same', name='forward_5')
x6 = self.pelu(x6)
x_final = x6 + x_GRL
return x_final
| [
"tensorflow.depth_to_space",
"tensorflow.variable_scope",
"tensorflow.shape",
"tensorflow.nn.relu",
"tensorflow.image.resize_bicubic",
"tensorflow.multiply",
"tensorflow.concat",
"tensorflow.nn.sigmoid",
"tensorflow.layers.conv2d",
"tensorflow.constant_initializer",
"tensorflow.reduce_mean",
"... | [((976, 1007), 'tensorflow.identity', 'tf.identity', (['x'], {'name': '"""identity"""'}), "(x, name='identity')\n", (987, 1007), True, 'import tensorflow as tf\n'), ((1066, 1155), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x'], {'kernel_size': '(1)', 'filters': '(f // reduction)', 'strides': '(1)', 'padding': '"""same"""'}), "(x, kernel_size=1, filters=f // reduction, strides=1,\n padding='same')\n", (1082, 1155), True, 'import tensorflow as tf\n'), ((1180, 1252), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x'], {'kernel_size': '(1)', 'filters': 'f', 'strides': '(1)', 'padding': '"""same"""'}), "(x, kernel_size=1, filters=f, strides=1, padding='same')\n", (1196, 1252), True, 'import tensorflow as tf\n'), ((1261, 1277), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['x'], {}), '(x)\n', (1274, 1277), True, 'import tensorflow as tf\n'), ((1287, 1312), 'tensorflow.multiply', 'tf.multiply', (['skip_conn', 'x'], {}), '(skip_conn, x)\n', (1298, 1312), True, 'import tensorflow as tf\n'), ((1399, 1477), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x'], {'kernel_size': '(1)', 'filters': 'filters', 'strides': '(1)', 'padding': '"""same"""'}), "(x, kernel_size=1, filters=filters, strides=1, padding='same')\n", (1415, 1477), True, 'import tensorflow as tf\n'), ((2805, 2858), 'tensorflow.concat', 'tf.concat', ([], {'values': '[x1, x2, x3]', 'axis': '(3)', 'name': '"""stack0"""'}), "(values=[x1, x2, x3], axis=3, name='stack0')\n", (2814, 2858), True, 'import tensorflow as tf\n'), ((2868, 2973), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x5'], {'kernel_size': '(1)', 'filters': 'filters', 'strides': 'strides', 'padding': '"""same"""', 'use_bias': '(False)'}), "(x5, kernel_size=1, filters=filters, strides=strides,\n padding='same', use_bias=False)\n", (2884, 2973), True, 'import tensorflow as tf\n'), ((3231, 3330), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x'], {'kernel_size': 'kernel_size', 'filters': 'filters', 'strides': 'strides', 'padding': '"""same"""'}), "(x, kernel_size=kernel_size, filters=filters, strides=\n strides, padding='same')\n", (3247, 3330), True, 'import tensorflow as tf\n'), ((3334, 3357), 'tensorflow.depth_to_space', 'tf.depth_to_space', (['x', '(2)'], {}), '(x, 2)\n', (3351, 3357), True, 'import tensorflow as tf\n'), ((846, 876), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': '[1, 2]'}), '(x, axis=[1, 2])\n', (860, 876), True, 'import tensorflow as tf\n'), ((1537, 1603), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['x1'], {'num_outputs': 'filters', 'kernel_size': '[3, 3]', 'stride': '(1)'}), '(x1, num_outputs=filters, kernel_size=[3, 3], stride=1)\n', (1548, 1603), True, 'import tensorflow.contrib.slim as slim\n'), ((1643, 1710), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['tm1'], {'num_outputs': 'filters', 'kernel_size': '[1, 1]', 'stride': '(1)'}), '(tm1, num_outputs=filters, kernel_size=[1, 1], stride=1)\n', (1654, 1710), True, 'import tensorflow.contrib.slim as slim\n'), ((1750, 1817), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['tm1'], {'num_outputs': 'filters', 'kernel_size': '[1, 1]', 'stride': '(1)'}), '(tm1, num_outputs=filters, kernel_size=[1, 1], stride=1)\n', (1761, 1817), True, 'import tensorflow.contrib.slim as slim\n'), ((1893, 1921), 'tensorflow.concat', 'tf.concat', (['[x1, tm1]'], {'axis': '(3)'}), '([x1, tm1], axis=3)\n', (1902, 1921), True, 'import tensorflow as tf\n'), ((1972, 2038), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['x2'], {'num_outputs': 'filters', 'kernel_size': '[3, 3]', 'stride': '(1)'}), '(x2, num_outputs=filters, kernel_size=[3, 3], stride=1)\n', (1983, 2038), True, 'import tensorflow.contrib.slim as slim\n'), ((2078, 2145), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['tm2'], {'num_outputs': 'filters', 'kernel_size': '[1, 1]', 'stride': '(1)'}), '(tm2, num_outputs=filters, kernel_size=[1, 1], stride=1)\n', (2089, 2145), True, 'import tensorflow.contrib.slim as slim\n'), ((2185, 2252), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['tm2'], {'num_outputs': 'filters', 'kernel_size': '[1, 1]', 'stride': '(1)'}), '(tm2, num_outputs=filters, kernel_size=[1, 1], stride=1)\n', (2196, 2252), True, 'import tensorflow.contrib.slim as slim\n'), ((2328, 2356), 'tensorflow.concat', 'tf.concat', (['[x2, tm2]'], {'axis': '(3)'}), '([x2, tm2], axis=3)\n', (2337, 2356), True, 'import tensorflow as tf\n'), ((2407, 2473), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['x3'], {'num_outputs': 'filters', 'kernel_size': '[3, 3]', 'stride': '(1)'}), '(x3, num_outputs=filters, kernel_size=[3, 3], stride=1)\n', (2418, 2473), True, 'import tensorflow.contrib.slim as slim\n'), ((2513, 2580), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['tm3'], {'num_outputs': 'filters', 'kernel_size': '[1, 1]', 'stride': '(1)'}), '(tm3, num_outputs=filters, kernel_size=[1, 1], stride=1)\n', (2524, 2580), True, 'import tensorflow.contrib.slim as slim\n'), ((2620, 2687), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['tm3'], {'num_outputs': 'filters', 'kernel_size': '[1, 1]', 'stride': '(1)'}), '(tm3, num_outputs=filters, kernel_size=[1, 1], stride=1)\n', (2631, 2687), True, 'import tensorflow.contrib.slim as slim\n'), ((2763, 2791), 'tensorflow.concat', 'tf.concat', (['[x3, tm3]'], {'axis': '(3)'}), '([x3, tm3], axis=3)\n', (2772, 2791), True, 'import tensorflow as tf\n'), ((3458, 3501), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""ThermalSR"""'], {'reuse': 'reuse'}), "('ThermalSR', reuse=reuse)\n", (3475, 3501), True, 'import tensorflow as tf\n'), ((3523, 3596), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x'], {'kernel_size': '(7)', 'filters': '(64)', 'strides': '(1)', 'padding': '"""same"""'}), "(x, kernel_size=7, filters=64, strides=1, padding='same')\n", (3539, 3596), True, 'import tensorflow as tf\n'), ((3685, 3696), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (3693, 3696), True, 'import tensorflow as tf\n'), ((3747, 3826), 'tensorflow.image.resize_bicubic', 'tf.image.resize_bicubic', (['x'], {'size': '[h * 4, w * 4]', 'align_corners': '(False)', 'name': 'None'}), '(x, size=[h * 4, w * 4], align_corners=False, name=None)\n', (3770, 3826), True, 'import tensorflow as tf\n'), ((3841, 3918), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x_GRL'], {'kernel_size': '(1)', 'filters': '(64)', 'strides': '(1)', 'padding': '"""same"""'}), "(x_GRL, kernel_size=1, filters=64, strides=1, padding='same')\n", (3857, 3918), True, 'import tensorflow as tf\n'), ((3964, 4041), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x_GRL'], {'kernel_size': '(1)', 'filters': '(16)', 'strides': '(1)', 'padding': '"""same"""'}), "(x_GRL, kernel_size=1, filters=16, strides=1, padding='same')\n", (3980, 4041), True, 'import tensorflow as tf\n'), ((4087, 4163), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x_GRL'], {'kernel_size': '(1)', 'filters': '(3)', 'strides': '(1)', 'padding': '"""same"""'}), "(x_GRL, kernel_size=1, filters=3, strides=1, padding='same')\n", (4103, 4163), True, 'import tensorflow as tf\n'), ((4481, 4575), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x4'], {'kernel_size': '(3)', 'filters': '(64)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(x4, kernel_size=3, filters=64, strides=1, padding='same',\n use_bias=False)\n", (4497, 4575), True, 'import tensorflow as tf\n'), ((4775, 4870), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['xUP'], {'kernel_size': '(1)', 'filters': '(64)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(xUP, kernel_size=1, filters=64, strides=1, padding='same',\n use_bias=False)\n", (4791, 4870), True, 'import tensorflow as tf\n'), ((5200, 5294), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x5'], {'kernel_size': '(3)', 'filters': '(64)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(x5, kernel_size=3, filters=64, strides=1, padding='same',\n use_bias=False)\n", (5216, 5294), True, 'import tensorflow as tf\n'), ((5493, 5589), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x6'], {'kernel_size': '(3)', 'filters': '(64)', 'strides': '(1)', 'padding': '"""same"""', 'name': '"""forward_4"""'}), "(x6, kernel_size=3, filters=64, strides=1, padding='same',\n name='forward_4')\n", (5509, 5589), True, 'import tensorflow as tf\n'), ((5622, 5717), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x6'], {'kernel_size': '(3)', 'filters': '(3)', 'strides': '(1)', 'padding': '"""same"""', 'name': '"""forward_5"""'}), "(x6, kernel_size=3, filters=3, strides=1, padding='same',\n name='forward_5')\n", (5638, 5717), True, 'import tensorflow as tf\n'), ((4308, 4402), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x4'], {'kernel_size': '(1)', 'filters': '(64)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(x4, kernel_size=1, filters=64, strides=1, padding='same',\n use_bias=False)\n", (4324, 4402), True, 'import tensorflow as tf\n'), ((4439, 4468), 'tensorflow.concat', 'tf.concat', (['[x4, skip]'], {'axis': '(3)'}), '([x4, skip], axis=3)\n', (4448, 4468), True, 'import tensorflow as tf\n'), ((4630, 4673), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Upsamplingconv_stage_1"""'], {}), "('Upsamplingconv_stage_1')\n", (4647, 4673), True, 'import tensorflow as tf\n'), ((5026, 5120), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', (['x5'], {'kernel_size': '(1)', 'filters': '(32)', 'strides': '(1)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(x5, kernel_size=1, filters=32, strides=1, padding='same',\n use_bias=False)\n", (5042, 5120), True, 'import tensorflow as tf\n'), ((5157, 5187), 'tensorflow.concat', 'tf.concat', (['[x5, skip1]'], {'axis': '(3)'}), '([x5, skip1], axis=3)\n', (5166, 5187), True, 'import tensorflow as tf\n'), ((5350, 5393), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Upsamplingconv_stage_2"""'], {}), "('Upsamplingconv_stage_2')\n", (5367, 5393), True, 'import tensorflow as tf\n'), ((316, 344), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (339, 344), True, 'import tensorflow as tf\n'), ((599, 612), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (609, 612), True, 'import tensorflow as tf\n'), ((477, 495), 'tensorflow.maximum', 'tf.maximum', (['t', '(0.1)'], {}), '(t, 0.1)\n', (487, 495), True, 'import tensorflow as tf\n'), ((560, 578), 'tensorflow.maximum', 'tf.maximum', (['t', '(0.1)'], {}), '(t, 0.1)\n', (570, 578), True, 'import tensorflow as tf\n'), ((674, 688), 'tensorflow.nn.relu', 'tf.nn.relu', (['(-x)'], {}), '(-x)\n', (684, 688), True, 'import tensorflow as tf\n')] |
import nltk
from .base import BaseTokenizer
from typing import (
Tuple,
Iterator,
)
__all__ = ['NLTKTokenizer']
class NLTKTokenizer(BaseTokenizer):
"""NLTK-based Treebank tokenizer.
Args:
sentencizer (str): Name of sentencizer for text.
chunker (str): Phrase chunker where 'noun' uses nouns only,
'noun_chunks' uses basic noun chunking, 'pos_chunks' uses
parts-of-speech for chunking, None uses window-based
tokenization. If chunking is enabled then 'window',
'stopwords', and 'min_token_length' parameters are not used.
tokenizer (str): Name of tokenizer for sentences.
lemmatizer (str): Name of lemmatizer for tokens. None = disabled.
language (str): Language to use for processing corpora.
"""
NAME = 'nltk'
# For reference only, these are the universal POS tags.
# https://spacy.io/api/annotation#pos-universal
_UNIVERSAL_POS_TAGS = (
'ADJ', 'ADP', 'ADV', 'AUX', 'CONJ', 'CCONJ', 'DET', 'INTJ',
'NOUN', 'NUM', 'PART', 'PRON', 'PROPN', 'PUNCT', 'SCONJ',
'SYM', 'VERB', 'X', 'SPACE',
)
_SENTENCIZER_MAP = {
'line': nltk.tokenize.LineTokenizer,
'punctuation': nltk.tokenize.PunktSentenceTokenizer,
}
_TOKENIZER_MAP = {
# NOTE: The following tokenizers raise 'NotImplementedError'
# for 'span_tokenize()'.
# 'nltk': nltk.tokenize.NLTKWordTokenizer,
# 'toktok': nltk.tokenize.ToktokTokenizer,
'treebank': nltk.tokenize.TreebankWordTokenizer,
# NOTE: Will be deprecated in v3.2.5, NLTK recommends
# nltk.parse.corenlp.CoreNLPTokenizer, but this does not exists.
# 'stanford': nltk.tokenize.StanfordSegmenter,
'punctuation': nltk.tokenize.WordPunctTokenizer,
'space': nltk.tokenize.SpaceTokenizer,
'whitespace': nltk.tokenize.WhitespaceTokenizer,
}
_LEMMATIZER_MAP = {
'ci': nltk.stem.Cistem,
'isri': nltk.stem.ISRIStemmer,
'lancaster': nltk.stem.LancasterStemmer,
'porter': nltk.stem.PorterStemmer,
'snowball': nltk.stem.SnowballStemmer,
'rslps': nltk.stem.RSLPStemmer,
'wordnet': nltk.stem.WordNetLemmatizer,
}
def __init__(
self,
*,
sentencizer: str = 'punctuation',
chunker: str = None,
tokenizer: str = 'treebank',
lemmatizer: str = 'snowball',
language: str = 'english',
**kwargs,
):
# Set class's stop words, then initialize base class to allow
# customization of stop words.
try:
type(self)._STOPWORDS = set(nltk.corpus.stopwords.words(language))
except ValueError as ex:
raise ex(f"Model for NLTK language '{language}' is invalid.")
super().__init__(**kwargs)
chunker_func_map = {
'nouns': self._tokenize_with_nouns,
'noun_chunks': self._tokenize_with_noun_chunks,
'pos_chunks': self._tokenize_with_pos_chunks,
# Let base class handle tokenization window.
None: super().tokenize,
}
self._sentencizer = type(self)._SENTENCIZER_MAP[sentencizer]()
self._chunker = chunker_func_map[chunker]
self._tokenizer = type(self)._TOKENIZER_MAP[tokenizer]()
# NOTE: Need to set 'language' before '_get_lemmatizer()'.
self._language = language
self._lemmatizer = self._get_lemmatizer(lemmatizer)
self._parser = nltk.RegexpParser('NP: {<ADJ>*<NOUN>}')
def _get_lemmatizer(self, lemmatizer: str):
if lemmatizer is None:
return lemmatizer
# NOTE: This may trigger a LookupError if the stemmer/lemmatizer
# resource is not found/installed.
elif lemmatizer == 'snowball':
_lemmatizer = (
type(self)._LEMMATIZER_MAP[lemmatizer](self._language)
)
else:
_lemmatizer = type(self)._LEMMATIZER_MAP[lemmatizer]()
# NOTE: In NLTK, WordNetLemmatizer API differs from stemmers.
if lemmatizer == 'wordnet':
_lemmatizer.stem = _lemmatizer.lemmatize
# NOTE: This may trigger a LookupError if the stemmer/lemmatizer
# resource is not found/installed.
_lemmatizer.stem('testing')
return _lemmatizer
def _pos_tag(
self,
text: Tuple[int, int, str],
) -> Iterator[Tuple[Tuple[int, int], Tuple[str, str]]]:
"""Parts-of-speech tagging."""
spans = []
tokens = []
for begin, end in self._tokenizer.span_tokenize(text[2]):
spans.append((text[0] + begin, text[0] + end - 1))
tokens.append(text[2][begin:end])
# NOTE: Language for nltk.pos_tag() is based on
# ISO 639-2 (3 letter code). We take the first 3-letters of language
# set for nltk.stopwords.words() although this is not always correct,
# but we chose this approach for simplicity.
yield from zip(
spans,
nltk.pos_tag(tokens, tagset='universal', lang=self._language[:3])
)
def _is_valid_token(self, token: str):
return (
len(token) >= self._min_token_length
and token not in self._stopwords
)
def tokenize(self, text):
# NOTE: Support raw strings to allow invoking directly, that is,
# it is not necessary to 'sentencize()' first.
yield from self._chunker(
(0, len(text) - 1, text)
if isinstance(text, str)
else text
)
def _sentencize(self, text):
yield from (
(begin, end - 1, text[begin:end])
for begin, end in self._sentencizer.span_tokenize(text)
)
def _lemmatize(self, text: str) -> str:
return (
text
if self._lemmatizer is None
else self._lemmatizer.stem(text)
)
def _tokenize(self, text: Tuple[int, int, str]):
sentence = text[2]
# for begin, end in self._tokenizer.span_tokenize(sentence):
# token = sentence[begin:end]
# if self._is_valid_token(token):
# yield (
# text[0] + begin,
# text[0] + end - 1,
# self._lemmatize(token),
# )
yield from (
(
text[0] + begin,
text[0] + end - 1,
self._lemmatize(sentence[begin:end]),
)
for begin, end in self._tokenizer.span_tokenize(sentence)
if self._is_valid_token(sentence[begin:end])
)
def _tokenize_with_nouns(self, text: Tuple[int, int, str]):
"""Tokenizer for single nouns."""
def is_valid_pos(pos: str):
return pos in ('NOUN', 'PROPN', 'X')
yield from (
(*span, self._lemmatize(token))
for span, (token, pos) in self._pos_tag(text)
if is_valid_pos(pos) and self._is_valid_token(token)
)
def _tokenize_with_noun_chunks(self, text: Tuple[int, int, str]):
"""Tokenizer for noun chunks."""
def is_valid_pos(node: 'nltk.tree.Tree'):
return isinstance(node, nltk.tree.Tree) and node.label() == 'NP'
# Parser requires tags in an iterable, so we unpack them.
spans, tags = zip(*self._pos_tag(text))
# NOTE: Traverse parser tree assuming it has height = 3.
spans = iter(spans)
for node in self._parser.parse(tags):
span = next(spans)
if is_valid_pos(node):
begin = span[0]
for _ in range(len(node) - 1):
span = next(spans)
yield (
begin,
span[1],
' '.join(map(lambda t: self._lemmatize(t[0]), node)),
)
def _tokenize_with_pos_chunks(self, text: Tuple[int, int, str]):
"""Phrase tokenizer with parts-of-speech tags for marking bounds."""
def is_valid_pos(pos: str):
return pos in (
'ADJ', 'ADP', 'ADV', 'AUX', 'CONJ', 'DET', 'NOUN', 'PROPN',
'PART', 'VERB', 'X',
)
def is_valid_begin_pos(pos: str):
return pos in ('ADJ', 'ADV', 'DET', 'NOUN', 'PROPN', 'VERB', 'X')
def is_valid_middle_pos(pos: str):
return pos in (
'ADJ', 'ADP', 'ADV', 'AUX', 'CONJ', 'DET', 'NOUN', 'PROPN',
'PART', 'VERB', 'X',
)
def is_valid_end_pos(pos: str):
return pos in ('NOUN', 'PROPN', 'VERB', 'X')
spans = []
tokens = []
for span, (token, pos) in self._pos_tag(text):
if not is_valid_pos(pos):
continue
# Flag for not duplicating flush of a single token valid
# as both, end and begin POS.
is_end_token = False
# Check for end token first:
# Handle single word tokens
# An end token can also be a begin token of another phrase
if is_valid_end_pos(pos):
# NOTE: Split based on chunk size to improve performance.
if len(spans) == 0:
if self._is_valid_token(token):
is_end_token = True
yield (*span, self._lemmatize(token))
else:
is_end_token = True
tokens.append(token)
yield (
spans[0][0],
span[1],
' '.join(map(lambda t: self._lemmatize(t), tokens)),
)
spans = []
tokens = []
if (
is_valid_begin_pos(pos)
or (len(tokens) > 0 and is_valid_middle_pos(pos))
):
spans.append(span)
tokens.append(token)
# Use remaining chunk span if not a single end token
if len(spans) > 0 and not is_end_token:
yield (
spans[0][0],
spans[-1][1],
' '.join(map(lambda t: self._lemmatize(t), tokens)),
)
# spans = []
# tokens = []
| [
"nltk.pos_tag",
"nltk.RegexpParser",
"nltk.corpus.stopwords.words"
] | [((3525, 3564), 'nltk.RegexpParser', 'nltk.RegexpParser', (['"""NP: {<ADJ>*<NOUN>}"""'], {}), "('NP: {<ADJ>*<NOUN>}')\n", (3542, 3564), False, 'import nltk\n'), ((2674, 2711), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['language'], {}), '(language)\n', (2701, 2711), False, 'import nltk\n'), ((5066, 5131), 'nltk.pos_tag', 'nltk.pos_tag', (['tokens'], {'tagset': '"""universal"""', 'lang': 'self._language[:3]'}), "(tokens, tagset='universal', lang=self._language[:3])\n", (5078, 5131), False, 'import nltk\n')] |
from vcs import vtk_ui
from vcs.colorpicker import ColorPicker
from vcs.vtk_ui import behaviors
from vcs.VCS_validation_functions import checkMarker
import vtk
import vcs.vcs2vtk
from . import priority
import sys
class MarkerEditor(
behaviors.ClickableMixin, behaviors.DraggableMixin, priority.PriorityEditor):
"""
Editor for marker objects
Ctrl + click to drop a new marker, toolbar to configure, priority, draggable + handles on each marker.
"""
def __init__(self, interactor, marker, index, display, configurator):
self.interactor = interactor
self.marker = marker
self.index = index
self.configurator = configurator
actors = display.backend["vtk_backend_marker_actors"][index]
self.glyph, self.glyph_source, self.polydata, self.actor, self.geo = actors
self.display = display
self.handles = []
for ind, x in enumerate(marker.x[index]):
y = marker.y[index][ind]
h = vtk_ui.Handle(
self.interactor, (x, y), dragged=self.adjust, color=(
0, 0, 0), normalize=True)
h.show()
self.handles.append(h)
self.toolbar = vtk_ui.toolbar.Toolbar(
self.interactor,
"Marker Options")
self.toolbar.show()
self.toolbar.add_button(["Change Color"], action=self.change_color)
self.toolbar.add_slider_button(
marker.size[index],
1,
300,
"Marker Size",
update=self.set_size)
self.type_bar = self.toolbar.add_toolbar(
"Marker Type",
open_label="Change")
shapes = marker_shapes()
shapes.insert(0, "Select Shape")
self.shape_button = self.type_bar.add_button(
shapes,
action=self.change_shape)
wmos = wmo_shapes()
wmos.insert(0, "Select WMO Marker")
self.wmo_button = self.type_bar.add_button(
wmos,
action=self.change_wmo)
if self.marker.type[self.index] in shapes:
self.shape_button.set_state(
shapes.index(
self.marker.type[
self.index]))
else:
self.wmo_button.set_state(wmos.index(self.marker.type[self.index]))
# Used to store the color picker when it's active
self.picker = None
prop = vtk.vtkTextProperty()
prop.SetBackgroundColor(.87, .79, .55)
prop.SetBackgroundOpacity(1)
prop.SetColor(0, 0, 0)
self.tooltip = vtk_ui.Label(
self.interactor,
"%s + Click to place new markers." %
("Cmd" if sys.platform == "darwin" else "Ctrl"),
textproperty=prop)
self.tooltip.left = 0
self.tooltip.top = self.interactor.GetRenderWindow(
).GetSize()[1] - self.tooltip.get_dimensions()[1]
self.tooltip.show()
super(MarkerEditor, self).__init__()
self.register()
def get_object(self):
return self.marker
def handle_click(self, point):
x, y = point
# Control drops a new instance
return self.in_bounds(x, y) or self.toolbar.in_toolbar(
x, y) or self.current_modifiers()["control"]
def is_object(self, marker):
return self.marker == marker
def place(self):
for h in self.handles:
h.place()
self.toolbar.place()
def render(self):
from vcs.vtk_ui.manager import get_manager
m = get_manager(self.interactor)
m.queue_render()
def update_shape(self):
# Update the glyph for the marker to reflect the new shape
self.glyph_source, self.polydata = vcs.vcs2vtk.prepGlyph(
self.glyph, self.marker, self.index)
self.display.backend["vtk_backend_marker_actors"][
self.index] = (
self.glyph,
self.glyph_source,
self.polydata,
self.actor,
self.geo)
# Have to rescale the glyph now... work that out later with charles
self.render()
def change_shape(self, index):
if index != 0:
self.marker.type[self.index] = marker_shapes()[index - 1]
self.wmo_button.set_state(0)
self.update_shape()
else:
self.change_wmo(1)
def change_wmo(self, index):
if index != 0:
self.marker.type[self.index] = wmo_shapes()[index - 1]
self.shape_button.set_state(0)
self.update_shape()
else:
self.change_shape(1)
def set_size(self, size):
self.marker.size[self.index] = size
self.update_shape()
def change_color(self, state):
if self.picker:
self.picker.make_current()
else:
self.picker = ColorPicker(
500,
500,
self.marker.colormap,
self.marker.color[
self.index],
parent_interactor=self.interactor,
on_save=self.set_color,
on_cancel=self.cancel_color)
def set_color(self, colormap, color):
self.marker.colormap = colormap
self.marker.color[self.index] = color
del self.picker
self.picker = None
vcs.vcs2vtk.setMarkerColor(
self.actor.GetProperty(),
self.marker,
self.marker.color[
self.index])
self.render()
def cancel_color(self):
del self.picker
self.picker = None
def click_release(self):
x, y = self.event_position()
if self.current_modifiers()["control"]:
h = vtk_ui.Handle(
self.interactor, (x, y), dragged=self.adjust, color=(
0, 0, 0), normalize=True)
h.show()
self.handles.append(h)
self.marker.x[self.index].append(x)
self.marker.y[self.index].append(y)
self.sync_positions()
def adjust(self, handle, dx, dy):
ind = self.handles.index(handle)
self.marker.x[self.index][ind] += dx
self.marker.y[self.index][ind] += dy
self.sync_positions()
def in_bounds(self, x, y):
w, h = self.interactor.GetRenderWindow().GetSize()
return inside_marker(
self.marker, x, y, w, h, index=self.index) is not None
def right_release(self):
x, y = self.event_position()
if self.in_bounds(x, y):
points = list(zip(self.marker.x[self.index], self.marker.y[self.index]))
size = self.marker.size[self.index]
screen_width, screen_height = self.interactor.GetRenderWindow(
).GetSize()
w, h = float(size) / screen_width, float(size) / screen_height
for ind, point in enumerate(points):
m_x, m_y = point
if x > m_x - w and x < m_x + w and y > m_y - h and y < m_y + h:
break
del self.marker.x[self.index][ind]
del self.marker.y[self.index][ind]
self.handles[ind].detach()
del self.handles[ind]
if len(self.marker.x[self.index]) == 0:
del self.marker.x[self.index]
del self.marker.y[self.index]
del self.marker.type[self.index]
del self.marker.color[self.index]
if len(self.marker.x) == 0:
self.delete()
return
self.sync_positions()
def detach(self):
self.unregister()
if self.picker:
self.picker.close()
self.picker = None
self.toolbar.detach()
for h in self.handles:
h.detach()
self.tooltip.detach()
def delete(self):
self.actor.SetVisibility(0)
self.configurator.deactivate(self)
def update_priority(self):
maxLayers = self.interactor.GetRenderWindow().GetNumberOfLayers()
new_layer = self.marker.priority * 10000 + 1 + \
self.configurator.displays.index(self.display)
if new_layer + 1 > maxLayers:
self.interactor.GetRenderWindow().SetNumberOfLayers(new_layer + 1)
self.actor.SetLayerNumber(new_layer)
self.render()
def sync_positions(self):
# Sync all points
points = self.glyph.GetInput().GetPoints()
for i, (x, y) in enumerate(
zip(self.marker.x[self.index], self.marker.y[self.index])):
if i == points.GetNumberOfPoints():
points.InsertNextPoint(x, y, 0)
else:
points.SetPoint(i, x, y, 0)
self.glyph.GetInput().Modified()
self.render()
__shape_cache = {}
def marker_shapes():
# Returns all shapes that are supported (skips star for now), indexed
# numerically
shapes = []
for i in range(1, 20):
if i in __shape_cache:
shapes.append(__shape_cache[i])
else:
try:
val = checkMarker(None, "type", i)
shapes.append(val)
__shape_cache[i] = val
except ValueError:
pass
return shapes
def wmo_shapes():
wmo = []
for i in range(100, 203):
if i in __shape_cache:
wmo.append(__shape_cache[i])
else:
try:
val = checkMarker(None, "type", i)
wmo.append(val)
__shape_cache[i] = val
except ValueError:
pass
return wmo
def inside_marker(marker, x, y, screen_width, screen_height, index=None):
if index is None:
index = list(range(len(marker.x)))
else:
index = [index]
for ind in index:
marker_x, marker_y = marker.x[ind], marker.y[ind]
coords = list(zip(marker_x, marker_y))
size = marker.size[ind]
w, h = float(size) / screen_width, float(size) / screen_height
for m_x, m_y in coords:
if x > m_x - w and x < m_x + w and y > m_y - h and y < m_y + h:
return ind
return None
| [
"vtk.vtkTextProperty",
"vcs.vtk_ui.Label",
"vcs.vtk_ui.Handle",
"vcs.vtk_ui.manager.get_manager",
"vcs.colorpicker.ColorPicker",
"vcs.VCS_validation_functions.checkMarker",
"vcs.vtk_ui.toolbar.Toolbar"
] | [((1214, 1271), 'vcs.vtk_ui.toolbar.Toolbar', 'vtk_ui.toolbar.Toolbar', (['self.interactor', '"""Marker Options"""'], {}), "(self.interactor, 'Marker Options')\n", (1236, 1271), False, 'from vcs import vtk_ui\n'), ((2440, 2461), 'vtk.vtkTextProperty', 'vtk.vtkTextProperty', ([], {}), '()\n', (2459, 2461), False, 'import vtk\n'), ((2601, 2739), 'vcs.vtk_ui.Label', 'vtk_ui.Label', (['self.interactor', "('%s + Click to place new markers.' % ('Cmd' if sys.platform == 'darwin' else\n 'Ctrl'))"], {'textproperty': 'prop'}), "(self.interactor, '%s + Click to place new markers.' % ('Cmd' if\n sys.platform == 'darwin' else 'Ctrl'), textproperty=prop)\n", (2613, 2739), False, 'from vcs import vtk_ui\n'), ((3562, 3590), 'vcs.vtk_ui.manager.get_manager', 'get_manager', (['self.interactor'], {}), '(self.interactor)\n', (3573, 3590), False, 'from vcs.vtk_ui.manager import get_manager\n'), ((1003, 1099), 'vcs.vtk_ui.Handle', 'vtk_ui.Handle', (['self.interactor', '(x, y)'], {'dragged': 'self.adjust', 'color': '(0, 0, 0)', 'normalize': '(True)'}), '(self.interactor, (x, y), dragged=self.adjust, color=(0, 0, 0),\n normalize=True)\n', (1016, 1099), False, 'from vcs import vtk_ui\n'), ((4875, 5046), 'vcs.colorpicker.ColorPicker', 'ColorPicker', (['(500)', '(500)', 'self.marker.colormap', 'self.marker.color[self.index]'], {'parent_interactor': 'self.interactor', 'on_save': 'self.set_color', 'on_cancel': 'self.cancel_color'}), '(500, 500, self.marker.colormap, self.marker.color[self.index],\n parent_interactor=self.interactor, on_save=self.set_color, on_cancel=\n self.cancel_color)\n', (4886, 5046), False, 'from vcs.colorpicker import ColorPicker\n'), ((5744, 5840), 'vcs.vtk_ui.Handle', 'vtk_ui.Handle', (['self.interactor', '(x, y)'], {'dragged': 'self.adjust', 'color': '(0, 0, 0)', 'normalize': '(True)'}), '(self.interactor, (x, y), dragged=self.adjust, color=(0, 0, 0),\n normalize=True)\n', (5757, 5840), False, 'from vcs import vtk_ui\n'), ((9098, 9126), 'vcs.VCS_validation_functions.checkMarker', 'checkMarker', (['None', '"""type"""', 'i'], {}), "(None, 'type', i)\n", (9109, 9126), False, 'from vcs.VCS_validation_functions import checkMarker\n'), ((9459, 9487), 'vcs.VCS_validation_functions.checkMarker', 'checkMarker', (['None', '"""type"""', 'i'], {}), "(None, 'type', i)\n", (9470, 9487), False, 'from vcs.VCS_validation_functions import checkMarker\n')] |
from functools import wraps
from typing import Any, Callable, TypeVar, cast
from grpc import Call, RpcError
from grpc.aio import AioRpcError
from .exceptions import AioRequestError, RequestError
from .logging import get_metadata_from_aio_error, get_metadata_from_call, log_error
TFunc = TypeVar("TFunc", bound=Callable[..., Any])
def handle_request_error(name: str):
def decorator(func: TFunc) -> TFunc:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return func(*args, **kwargs)
except RpcError as e:
if issubclass(type(e), Call):
metadata = get_metadata_from_call(e)
tracking_id = metadata.tracking_id if metadata else None
log_error(
tracking_id,
name,
f"{e.code().name} {e.details()}", # type:ignore
)
raise RequestError(
e.code(), e.details(), metadata # type:ignore
) from e
raise
return cast(TFunc, wrapper)
return decorator
def handle_request_error_gen(name: str):
def decorator(func: TFunc) -> TFunc:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
yield from func(*args, **kwargs)
except RpcError as e:
if issubclass(type(e), Call):
metadata = get_metadata_from_call(e)
tracking_id = metadata.tracking_id if metadata else None
log_error(
tracking_id,
name,
f"{e.code().name} {e.details()}", # type:ignore
)
raise RequestError(
e.code(), e.details(), metadata # type:ignore
) from e
raise
return cast(TFunc, wrapper)
return decorator
def handle_aio_request_error(name: str):
def decorator(func: TFunc) -> TFunc:
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return await func(*args, **kwargs)
except AioRpcError as e:
metadata = get_metadata_from_aio_error(e)
tracking_id = metadata.tracking_id if metadata else None
log_error(
tracking_id,
name,
f"{e.code().name} {e.details()}", # type:ignore
)
raise AioRequestError(
e.code(), e.details(), metadata # type:ignore
) from e
return cast(TFunc, wrapper)
return decorator
def handle_aio_request_error_gen(name: str):
def decorator(func: TFunc) -> TFunc:
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
async for result in func(*args, **kwargs):
yield result
except AioRpcError as e:
metadata = get_metadata_from_aio_error(e)
tracking_id = metadata.tracking_id if metadata else None
log_error(
tracking_id,
name,
f"{e.code().name} {e.details()}", # type:ignore
)
raise AioRequestError(
e.code(), e.details(), metadata # type:ignore
) from e
return cast(TFunc, wrapper)
return decorator
| [
"typing.cast",
"functools.wraps",
"typing.TypeVar"
] | [((290, 332), 'typing.TypeVar', 'TypeVar', (['"""TFunc"""'], {'bound': 'Callable[..., Any]'}), "('TFunc', bound=Callable[..., Any])\n", (297, 332), False, 'from typing import Any, Callable, TypeVar, cast\n'), ((422, 433), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (427, 433), False, 'from functools import wraps\n'), ((1136, 1156), 'typing.cast', 'cast', (['TFunc', 'wrapper'], {}), '(TFunc, wrapper)\n', (1140, 1156), False, 'from typing import Any, Callable, TypeVar, cast\n'), ((1272, 1283), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1277, 1283), False, 'from functools import wraps\n'), ((1990, 2010), 'typing.cast', 'cast', (['TFunc', 'wrapper'], {}), '(TFunc, wrapper)\n', (1994, 2010), False, 'from typing import Any, Callable, TypeVar, cast\n'), ((2126, 2137), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2131, 2137), False, 'from functools import wraps\n'), ((2755, 2775), 'typing.cast', 'cast', (['TFunc', 'wrapper'], {}), '(TFunc, wrapper)\n', (2759, 2775), False, 'from typing import Any, Callable, TypeVar, cast\n'), ((2895, 2906), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2900, 2906), False, 'from functools import wraps\n'), ((3565, 3585), 'typing.cast', 'cast', (['TFunc', 'wrapper'], {}), '(TFunc, wrapper)\n', (3569, 3585), False, 'from typing import Any, Callable, TypeVar, cast\n')] |
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader, TensorDataset, Dataset
from torch.utils.data.sampler import SubsetRandomSampler
from torch import optim
import pandas as pd
import sys
sys.path.append('./proto')
import trainer_pb2
import trainer_pb2_grpc
import time
from concurrent import futures
import logging
import grpc
import argparse
import base64
import io
import ipfshttpclient
torch.nn.Module.dump_patches = True
def fullmodel2base64(model):
buffer = io.BytesIO()
torch.save(model, buffer)
bg = buffer.getvalue()
return base64.b64encode(bg).decode()
def base642fullmodel(modbase64):
inputrpc = bytes(modbase64.encode())
inputrpc_ = base64.b64decode(inputrpc)
loadmodel = torch.load(io.BytesIO(inputrpc_))
return loadmodel
# class Model(nn.Module):
# def __init__(self):
# super().__init__()
# self.hidden = nn.Linear(784, 20)
# self.output = nn.Linear(20, 10)
# def forward(self, x):
# x = self.hidden(x)
# x = torch.sigmoid(x)
# x = self.output(x)
# return x
class Model(nn.Module):
def __init__(self):
super().__init__()
self.cnn = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Dropout(0.25),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout(0.25))
self.classifier = nn.Sequential(nn.Linear(576, 256),
nn.Dropout(0.5),
nn.Linear(256, 47))
def forward(self, x):
x = self.cnn(x)
x = x.view(x.size(0), -1) # flatten layer
x = self.classifier(x)
return x
class Trainer(trainer_pb2_grpc.TrainerServicer):
def __init__(self, csvdata, device, batch):
self.dloader = getdataloader(csvdata, batch=batch)
self.device = device
# self.batch = batch
while True:
try:
self.client = ipfshttpclient.connect("/ip4/192.168.127.12/tcp/5001/http")
break
except:
print("Waiting for ipfs services at : 192.168.127.12:5001")
time.sleep(1)
def Train(self, request, result):
#print(request.BaseModel)
print("Training...")
result = trainOneEp(self.client.cat(request.BaseModel).decode(), self.dloader, self.device)
hashresult = self.client.add_str(result)
return trainer_pb2.TrainResult(Round=request.Round, Result=hashresult)
def serve(data, port, dev, bat):
print("Read dataset : ",data)
print("Using : ",dev)
print("Port : ",port)
time.sleep(2)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
trainer_pb2_grpc.add_TrainerServicer_to_server(Trainer(data, dev, bat), server)
server.add_insecure_port('0.0.0.0:'+port)
server.start()
server.wait_for_termination()
def trainOneEp(bmodel, dloader, device):
#return bmodel
model = Model()
model = base642fullmodel(bmodel)
#return fullmodel2base64(model)
print(model)
# loss_function = nn.CrossEntropyLoss()
# optimizer = optim.SGD(model.parameters(), lr=0.005)
optimizer = optim.RMSprop(model.parameters(), lr=0.001)
loss_function = nn.CrossEntropyLoss()
if (device=="GPU"):
model.cuda()
model.train()
for data, target in dloader:
if (device=="GPU"):
data = data.cuda()
target = target.cuda()
optimizer.zero_grad()
#data = data.view(data.size(0),-1)
output = model(data.float())
loss = loss_function(output, target)
loss.backward()
optimizer.step()
#model.eval()
#print(model)
if (device=="GPU"):
model.cpu()
bmodel_ = fullmodel2base64(model)
#print(bmodel_)
return bmodel_
# def getdataloader(dset = '/home/tedbest/Documents/mnist_train_0.csv'):
# #print(dset)
# train = pd.read_csv(dset)
# train_labels = train['label'].values
# train_data = train.drop(labels = ['label'], axis = 1)
# train_data = train_data.values.reshape(-1,28, 28)
# train_images_tensor = torch.tensor(train_data)/255.0
# train_labels_tensor = torch.tensor(train_labels)
# mnist = TensorDataset(train_images_tensor, train_labels_tensor)
# trainloader = DataLoader(mnist, batch_size=256, shuffle= True)
# return trainloader
class MNISTDataset(Dataset):
"""MNIST dataset"""
def __init__(self, feature, target, transform=None):
self.X = []
self.Y = target
if transform is not None:
for i in range(len(feature)):
self.X.append(transform(feature[i]))
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.Y[idx]
def getdataloader(dset = './mnist_test.csv', batch=256):
#print(dset)
train = pd.read_csv(dset)
train_labels = train['label'].values
train_data = train.drop(labels = ['label'], axis = 1)
train_data = train_data.values.reshape(-1,28, 28)
featuresTrain = torch.from_numpy(train_data)
targetsTrain = torch.from_numpy(train_labels)
data_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomAffine(degrees=45, translate=(0.1, 0.1), scale=(0.8, 1.2)),
transforms.ToTensor()]
)
train_set = MNISTDataset(featuresTrain.float(), targetsTrain, transform=data_transform)
trainloader = torch.utils.data.DataLoader(train_set, batch_size = batch, shuffle = True, num_workers=4)
return trainloader
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default="/home/tedbest/Documents/mnist_train_0.csv")
parser.add_argument('--port', type=str, default="63387")
parser.add_argument('--device', type=str, default="CPU") # GPU/CPU
parser.add_argument('--batch', type=int, default=256)
parser.add_argument('-f')
args = parser.parse_args()
if (args.device=="GPU"):
if torch.cuda.is_available():
print("GPU found.")
else:
print("GPU not found.")
exit(0)
logging.basicConfig()
serve(args.data, args.port, args.device, args.batch) | [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"torchvision.transforms.ToPILImage",
"base64.b64encode",
"io.BytesIO",
"time.sleep",
"torch.from_numpy",
"torch.cuda.is_available",
"sys.path.append",
"ipfshttpclient.connect",
"argparse.ArgumentParser",
"... | [((246, 272), 'sys.path.append', 'sys.path.append', (['"""./proto"""'], {}), "('./proto')\n", (261, 272), False, 'import sys\n'), ((528, 540), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (538, 540), False, 'import io\n'), ((545, 570), 'torch.save', 'torch.save', (['model', 'buffer'], {}), '(model, buffer)\n', (555, 570), False, 'import torch\n'), ((730, 756), 'base64.b64decode', 'base64.b64decode', (['inputrpc'], {}), '(inputrpc)\n', (746, 756), False, 'import base64\n'), ((3386, 3399), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3396, 3399), False, 'import time\n'), ((4006, 4027), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4025, 4027), True, 'import torch.nn as nn\n'), ((5687, 5704), 'pandas.read_csv', 'pd.read_csv', (['dset'], {}), '(dset)\n', (5698, 5704), True, 'import pandas as pd\n'), ((5884, 5912), 'torch.from_numpy', 'torch.from_numpy', (['train_data'], {}), '(train_data)\n', (5900, 5912), False, 'import torch\n'), ((5932, 5962), 'torch.from_numpy', 'torch.from_numpy', (['train_labels'], {}), '(train_labels)\n', (5948, 5962), False, 'import torch\n'), ((6285, 6374), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'batch_size': 'batch', 'shuffle': '(True)', 'num_workers': '(4)'}), '(train_set, batch_size=batch, shuffle=True,\n num_workers=4)\n', (6312, 6374), False, 'import torch\n'), ((6441, 6466), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6464, 6466), False, 'import argparse\n'), ((6990, 7011), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (7009, 7011), False, 'import logging\n'), ((784, 805), 'io.BytesIO', 'io.BytesIO', (['inputrpc_'], {}), '(inputrpc_)\n', (794, 805), False, 'import io\n'), ((3197, 3260), 'trainer_pb2.TrainResult', 'trainer_pb2.TrainResult', ([], {'Round': 'request.Round', 'Result': 'hashresult'}), '(Round=request.Round, Result=hashresult)\n', (3220, 3260), False, 'import trainer_pb2\n'), ((3425, 3467), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(10)'}), '(max_workers=10)\n', (3451, 3467), False, 'from concurrent import futures\n'), ((6856, 6881), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6879, 6881), False, 'import torch\n'), ((609, 629), 'base64.b64encode', 'base64.b64encode', (['bg'], {}), '(bg)\n', (625, 629), False, 'import base64\n'), ((1249, 1305), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(32)', 'kernel_size': '(5)'}), '(in_channels=1, out_channels=32, kernel_size=5)\n', (1258, 1305), True, 'import torch.nn as nn\n'), ((1344, 1365), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1351, 1365), True, 'import torch.nn as nn\n'), ((1404, 1461), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(32)', 'kernel_size': '(5)'}), '(in_channels=32, out_channels=32, kernel_size=5)\n', (1413, 1461), True, 'import torch.nn as nn\n'), ((1500, 1521), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1507, 1521), True, 'import torch.nn as nn\n'), ((1560, 1587), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (1572, 1587), True, 'import torch.nn as nn\n'), ((1626, 1642), 'torch.nn.Dropout', 'nn.Dropout', (['(0.25)'], {}), '(0.25)\n', (1636, 1642), True, 'import torch.nn as nn\n'), ((1681, 1738), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(32)', 'out_channels': '(64)', 'kernel_size': '(3)'}), '(in_channels=32, out_channels=64, kernel_size=3)\n', (1690, 1738), True, 'import torch.nn as nn\n'), ((1777, 1798), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1784, 1798), True, 'import torch.nn as nn\n'), ((1837, 1894), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(64)', 'out_channels': '(64)', 'kernel_size': '(3)'}), '(in_channels=64, out_channels=64, kernel_size=3)\n', (1846, 1894), True, 'import torch.nn as nn\n'), ((1933, 1954), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1940, 1954), True, 'import torch.nn as nn\n'), ((1993, 2030), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (2005, 2030), True, 'import torch.nn as nn\n'), ((2069, 2085), 'torch.nn.Dropout', 'nn.Dropout', (['(0.25)'], {}), '(0.25)\n', (2079, 2085), True, 'import torch.nn as nn\n'), ((2136, 2155), 'torch.nn.Linear', 'nn.Linear', (['(576)', '(256)'], {}), '(576, 256)\n', (2145, 2155), True, 'import torch.nn as nn\n'), ((2196, 2211), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2206, 2211), True, 'import torch.nn as nn\n'), ((2252, 2270), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(47)'], {}), '(256, 47)\n', (2261, 2270), True, 'import torch.nn as nn\n'), ((6018, 6041), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (6039, 6041), False, 'from torchvision import transforms\n'), ((6051, 6126), 'torchvision.transforms.RandomAffine', 'transforms.RandomAffine', ([], {'degrees': '(45)', 'translate': '(0.1, 0.1)', 'scale': '(0.8, 1.2)'}), '(degrees=45, translate=(0.1, 0.1), scale=(0.8, 1.2))\n', (6074, 6126), False, 'from torchvision import transforms\n'), ((6136, 6157), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6155, 6157), False, 'from torchvision import transforms\n'), ((2723, 2782), 'ipfshttpclient.connect', 'ipfshttpclient.connect', (['"""/ip4/192.168.127.12/tcp/5001/http"""'], {}), "('/ip4/192.168.127.12/tcp/5001/http')\n", (2745, 2782), False, 'import ipfshttpclient\n'), ((2917, 2930), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2927, 2930), False, 'import time\n')] |
from django.shortcuts import render
from django.http import HttpResponse
from django.template.loader import get_template
from .models import Poll
from time import timezone
from datetime import date
# Create your views here.
def index(request):
myTemplate = get_template('./index.html')
print(myTemplate)
return render(request, myTemplate)
def detail(request, poll_id):
return HttpResponse("You're looking at poll %s." % poll_id)
def results(request, poll_id):
return HttpResponse("You're looking at the results of poll %s." % poll_id)
# CRUD
def create(request):
newPoll = Poll(question="What's up?", pub_date= date.today())
newPoll.save()
return HttpResponse("You're creating a new poll with id %s." % newPoll.id)
def update(request, id):
response = HttpResponse("You must send data using POST")
if request.method == 'POST':
response = HttpResponse("You're updating poll %s." % id)
myPoll = Poll.objects.get(id=id)
myPoll.question = request.POST['question']
myPoll.save()
return response
def delete(request, id):
myPoll = Poll.objects.get(id=id)
myPoll.delete()
return HttpResponse("You're deleting poll %s." % id)
def read(request, id):
myPoll = Poll.objects.get(id=id)
return HttpResponse("You're reading a poll. %s " % myPoll)
#### SECURITY PENDING TOPICS
# Toda conexion con el back deberia tener un token.
# Todo API/CRUD tiene que tener un limite de queries... | [
"django.shortcuts.render",
"django.http.HttpResponse",
"datetime.date.today",
"django.template.loader.get_template"
] | [((264, 292), 'django.template.loader.get_template', 'get_template', (['"""./index.html"""'], {}), "('./index.html')\n", (276, 292), False, 'from django.template.loader import get_template\n'), ((326, 353), 'django.shortcuts.render', 'render', (['request', 'myTemplate'], {}), '(request, myTemplate)\n', (332, 353), False, 'from django.shortcuts import render\n'), ((396, 448), 'django.http.HttpResponse', 'HttpResponse', (['("You\'re looking at poll %s." % poll_id)'], {}), '("You\'re looking at poll %s." % poll_id)\n', (408, 448), False, 'from django.http import HttpResponse\n'), ((492, 559), 'django.http.HttpResponse', 'HttpResponse', (['("You\'re looking at the results of poll %s." % poll_id)'], {}), '("You\'re looking at the results of poll %s." % poll_id)\n', (504, 559), False, 'from django.http import HttpResponse\n'), ((685, 752), 'django.http.HttpResponse', 'HttpResponse', (['("You\'re creating a new poll with id %s." % newPoll.id)'], {}), '("You\'re creating a new poll with id %s." % newPoll.id)\n', (697, 752), False, 'from django.http import HttpResponse\n'), ((795, 840), 'django.http.HttpResponse', 'HttpResponse', (['"""You must send data using POST"""'], {}), "('You must send data using POST')\n", (807, 840), False, 'from django.http import HttpResponse\n'), ((1167, 1212), 'django.http.HttpResponse', 'HttpResponse', (['("You\'re deleting poll %s." % id)'], {}), '("You\'re deleting poll %s." % id)\n', (1179, 1212), False, 'from django.http import HttpResponse\n'), ((1285, 1336), 'django.http.HttpResponse', 'HttpResponse', (['("You\'re reading a poll. %s " % myPoll)'], {}), '("You\'re reading a poll. %s " % myPoll)\n', (1297, 1336), False, 'from django.http import HttpResponse\n'), ((893, 938), 'django.http.HttpResponse', 'HttpResponse', (['("You\'re updating poll %s." % id)'], {}), '("You\'re updating poll %s." % id)\n', (905, 938), False, 'from django.http import HttpResponse\n'), ((641, 653), 'datetime.date.today', 'date.today', ([], {}), '()\n', (651, 653), False, 'from datetime import date\n')] |
#!/usr/bin/python
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import time
from .. import conftest
from ..common_setup import enterprise_no_client
from .common_update import update_image, common_update_procedure
from .mendertesting import MenderTesting
from ..MenderAPI import auth, devauth, deploy, image, logger
from testutils.infra.device import MenderDevice
class TestProvidesDependsEnterprise(MenderTesting):
def test_update_provides_depends(self, enterprise_no_client):
"""
Perform two consecutive updates, the first adds virtual provides
to the artifact and the second artifact depends on these provides.
"""
# Create tenant user
auth.reset_auth_token()
auth.new_tenant("admin", "<EMAIL>", "secret-service", "enterprise")
token = auth.current_tenant["tenant_token"]
# Create client setup with tenant token
enterprise_no_client.new_tenant_docker_client("mender-client", token)
mender_device = MenderDevice(enterprise_no_client.get_mender_clients()[0])
host_ip = enterprise_no_client.get_virtual_network_host_ip()
# Wait for ssh to be open
mender_device.ssh_is_opened()
# Check that the device has authorized with the backend.
devauth.get_devices(expected_devices=1)
devauth.accept_devices(1)
assert len(devauth.get_devices_status("accepted")) == 1
# Update client with and artifact with custom provides
def prepare_provides_artifact(artifact_file, artifact_id):
cmd = (
# Package tests folder in the artifact, just a random folder.
"directory-artifact-gen -o %s -n %s -t docker-client -d /tmp/test_file_update_module tests -- --provides rootfs-image.directory.foo:bar"
% (artifact_file, artifact_id)
)
logger.info("Executing: " + cmd)
subprocess.check_call(cmd, shell=True)
return artifact_file
deployment_id, _ = common_update_procedure(
make_artifact=prepare_provides_artifact,
# We use verify_status=False, because update module updates are so
# quick that it sometimes races past the 'inprogress' status without
# the test framework having time to register it. That's not really
# the part we're interested in though, so just skip it.
verify_status=False,
)
deploy.check_expected_status("finished", deployment_id)
# Issue another update which depends on the custom provides
def prepare_depends_artifact(artifact_file, artifact_id):
cmd = (
# Package tests folder in the artifact, just a random folder.
"directory-artifact-gen -o %s -n %s -t docker-client -d /tmp/test_file_update_module tests -- --depends rootfs-image.directory.foo:bar"
% (artifact_file, artifact_id)
)
logger.info("Executing: " + cmd)
subprocess.check_call(cmd, shell=True)
return artifact_file
deployment_id, _ = common_update_procedure(
make_artifact=prepare_depends_artifact, verify_status=False,
)
deploy.check_expected_status("finished", deployment_id)
# Issue a third update with the same update as previous, this time
# with insufficient provides -> no artifact status
deployment_id, _ = common_update_procedure(
make_artifact=prepare_depends_artifact, verify_status=False
)
# Retry for at most 60 seconds checking for deployment status update
stat = None
noartifact = 0
for i in range(60):
time.sleep(1)
stat = deploy.get_statistics(deployment_id)
if stat.get("noartifact") == 1:
noartifact = 1
break
assert stat is not None
assert noartifact == 1
| [
"time.sleep",
"subprocess.check_call"
] | [((2490, 2528), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (2511, 2528), False, 'import subprocess\n'), ((3585, 3623), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (3606, 3623), False, 'import subprocess\n'), ((4287, 4300), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4297, 4300), False, 'import time\n')] |
from invoke import task
@task
def no_docstring():
pass
@task
def one_line():
"""foo
"""
@task
def two_lines():
"""foo
bar
"""
@task
def leading_whitespace():
"""
foo
"""
@task(aliases=('a', 'b'))
def with_aliases():
"""foo
"""
| [
"invoke.task"
] | [((212, 236), 'invoke.task', 'task', ([], {'aliases': "('a', 'b')"}), "(aliases=('a', 'b'))\n", (216, 236), False, 'from invoke import task\n')] |
from subprocess import PIPE
from subprocess import Popen
def run(command):
assert (isinstance(command, list)), "Command must be a list"
p = Popen(command, stdout=PIPE, stderr=PIPE)
s, e = p.communicate()
return s.decode('utf-8'), e.decode('utf-8'), p.returncode
| [
"subprocess.Popen"
] | [((150, 190), 'subprocess.Popen', 'Popen', (['command'], {'stdout': 'PIPE', 'stderr': 'PIPE'}), '(command, stdout=PIPE, stderr=PIPE)\n', (155, 190), False, 'from subprocess import Popen\n')] |
import tensorflow as tf
from tensorflow.keras.layers import Layer
from debugprint import print_debug
from .utils import get_mask_from_lengths
class ParrotLoss():
def __init__(self, hparams):
super(ParrotLoss, self).__init__()
self.hidden_dim = hparams.encoder_embedding_dim
self.mel_hidden_dim = hparams.mel_embedding_dim
self.contr_w = hparams.contrastive_loss_w
self.spenc_w = hparams.speaker_encoder_loss_w
self.texcl_w = hparams.text_classifier_loss_w
self.spadv_w = hparams.speaker_adversial_loss_w
self.spcla_w = hparams.speaker_classifier_loss_w
self.n_symbols = hparams.n_symbols
self.fine_tune = hparams.fine_tune
# speaker classif logit from mel hidden can be at text rate or at frame rate
self.spksclassif_at_mel_rate = hparams.spksclassif_at_mel_rate
if 'speaker_adversial_loss_type' in hparams:
# adevrsarial loss measures (l2, l1, KL))
self.speaker_adversial_loss_type = hparams.speaker_adversial_loss_type
else:
# default (from the original paper/code)
self.speaker_adversial_loss_type = 'l2'
print_debug('spk adv loss type: ' + self.speaker_adversial_loss_type)
def contrastive_loss(self, text_hidden, mel_hidden, mel_lengths, eps=1e-5):
"""
Zhang's basic constrastive loss
"""
# ### CONTRASTIVE LOSS
n_frames = mel_hidden.shape[1] # n_frames = T
# 1) contrastive mask #
# # [B, T] -> [B, T, T] (tile)
contrast_mask1 = tf.tile(tf.expand_dims(get_mask_from_lengths(mel_lengths), axis=2), [1, 1, n_frames])
# # [B, T] -> [B, T, T] (tile)
contrast_mask2 = tf.tile(tf.expand_dims(get_mask_from_lengths(mel_lengths), axis=1), [1, n_frames, 1])
# # [B, T, T]
contrast_mask = tf.cast(contrast_mask1 & contrast_mask2, tf.float32)
# text_hidden [B, T, emb_size]
# mel_hidden [B, T, emb_size]
text_hidden_normed = text_hidden / (tf.norm(text_hidden, axis=2, keepdims=True) + eps)
mel_hidden_normed = mel_hidden / (tf.norm(mel_hidden, axis=2, keepdims=True) + eps)
# (x - y) ** 2 = x ** 2 + y ** 2 - 2xy
# [batch_size, T, 1]
distance_matrix_xx = tf.reduce_sum(text_hidden_normed ** 2, axis=2, keepdims=True)
distance_matrix_yy = tf.reduce_sum(mel_hidden_normed ** 2, axis=2)
# [batch_size, 1, T]
distance_matrix_yy = tf.expand_dims(distance_matrix_yy, axis=1)
# [batch_size, T, T]
distance_matrix_xy = text_hidden_normed @ tf.transpose(mel_hidden_normed, (0, 2, 1))
# [batch_size, T, T]
distance_matrix = distance_matrix_xx + distance_matrix_yy - 2 * distance_matrix_xy
identity_mat = tf.eye(distance_matrix.shape[1])
margin = 1.
contrast_loss = identity_mat * distance_matrix + \
(1. - identity_mat) * tf.maximum(margin - distance_matrix, tf.zeros_like(distance_matrix))
contrast_loss = tf.reduce_sum(contrast_loss*contrast_mask) / tf.reduce_sum(contrast_mask)
return contrast_loss
def compute_loss(self, model_outputs, targets, speaker_target, input_text=False, eps=1e-5):
(predicted_mel, predicted_mel_post, mel_lengths, text_lengths,
speaker_logit_from_mel, speaker_logit_from_mel_hidden_text_or_mel_rate,
expand_mat_padded, text_input_padded, text_hidden, mel_hidden, mel_hidden_text_or_mel_rate,
text_logit_from_mel_hidden, text_target_text_level, mat_onehot_padded) = model_outputs
mel_target = targets
mel_mask = get_mask_from_lengths(mel_lengths)
mel_mask = tf.expand_dims(mel_mask, axis=1)
# mel_mask = tf.keras.backend.cast(tf.tile(mel_mask, [1, mel_target.shape[1], 1]), dtype='float32')
# replicate mel_mask over mel features axis
mel_mask = tf.tile(tf.keras.backend.cast(mel_mask, dtype='float32'), [1, mel_target.shape[1], 1])
# n_frames = mel_hidden.shape[1] # n_frames = T
recon_loss = tf.reduce_sum(tf.abs(mel_target-predicted_mel)*mel_mask)/tf.reduce_sum(mel_mask)
recon_post_loss = tf.reduce_sum(tf.abs(mel_target-predicted_mel_post)*mel_mask)/tf.reduce_sum(mel_mask)
# contrastive loss
contrast_loss = self.contrastive_loss(text_hidden, mel_hidden, mel_lengths, eps)
if not self.fine_tune:
# speaker classification loss from mel speaker space, at text frame rate
# speaker_logit_from_mel_int = tf.cast(speaker_logit_from_mel, tf.int16)
speaker_encoder_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\
(speaker_target, speaker_logit_from_mel)
predicted_speaker = tf.cast(tf.math.argmax(speaker_logit_from_mel, axis=1), dtype=tf.int16)
speaker_encoder_acc = tf.reduce_sum(tf.cast((predicted_speaker == speaker_target), tf.float32)) \
/ speaker_target.shape[0]
else:
speaker_encoder_loss = tf.convert_to_tensor(0., dtype=tf.dtypes.float32)
speaker_encoder_acc = tf.convert_to_tensor(0., dtype=tf.dtypes.float32)
if self.fine_tune:
n_speakers = 2
else:
n_speakers = speaker_logit_from_mel_hidden_text_or_mel_rate.shape[2]
n_text_frames = speaker_logit_from_mel_hidden_text_or_mel_rate.shape[1]
text_mask = get_mask_from_lengths(text_lengths)
sc_mel_mask = get_mask_from_lengths(mel_lengths) # mask for speaker classifier at mel rate
if not self.fine_tune:
text_mask = tf.expand_dims(text_mask, axis=1)
sc_mel_mask = tf.expand_dims(sc_mel_mask, axis=1)
text_mask_float = tf.keras.backend.cast(text_mask, dtype='float32')
sc_mel_mask_float = tf.keras.backend.cast(sc_mel_mask, dtype='float32')
# # speaker classification losses
# # fader losses
# speaker classification loss from mel linguistic space
if self.spksclassif_at_mel_rate:
sc_mask_float = sc_mel_mask_float
else:
sc_mask_float = text_mask_float
if self.fine_tune:
# there is only 1 dimension for the speaker "code" (2 speakers!)
# these two lines change
speaker_logit_flatten = tf.keras.backend.flatten(speaker_logit_from_mel_hidden_text_or_mel_rate)
predicted_speaker = tf.cast(speaker_logit_flatten > 0., dtype=tf.int16)
speaker_target_ling = tf.tile(tf.expand_dims(speaker_target, axis=1), [1, n_text_frames])
speaker_target_flatten = tf.keras.backend.flatten(speaker_target_ling)
sc_mask_float = tf.keras.backend.flatten(sc_mask_float)
speaker_classification_acc = tf.reduce_sum(tf.cast((predicted_speaker == speaker_target_flatten),
tf.float32) * sc_mask_float) \
/ tf.reduce_sum(sc_mask_float)
# this line changes
loss = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE, from_logits=True) \
(speaker_target_flatten, speaker_logit_flatten)
speaker_classification_loss = tf.reduce_sum(loss * sc_mask_float) / tf.reduce_sum(sc_mask_float)
# speaker adversival loss from mel hidden at frame rate
if self.speaker_adversial_loss_type == 'l2':
loss = tf.math.pow(tf.abs(tf.nn.sigmoid(speaker_logit_flatten) - 0.5), 2)
elif self.speaker_adversial_loss_type == 'l1':
loss = tf.abs(tf.nn.sigmoid(speaker_logit_flatten) - 0.5)
elif self.speaker_adversial_loss_type == 'KL':
# use inverse Kullback-Leibler divergence for 2 speakers = 2 probabilities p and 1-p
epsilon = 1e-12 # to avoid problems with log
ref_prob = 1. / n_speakers
target_prob = (1 - epsilon) * tf.nn.sigmoid(speaker_logit_flatten) + epsilon
loss = (1-target_prob)*tf.math.log((1-target_prob)/ref_prob) + target_prob*tf.math.log(target_prob/ref_prob)
speaker_adversial_loss = tf.reduce_sum(loss * sc_mask_float) / tf.reduce_sum(sc_mask_float)
else:
speaker_logit_flatten = tf.reshape(speaker_logit_from_mel_hidden_text_or_mel_rate, [-1, n_speakers])
predicted_speaker = tf.cast(tf.math.argmax(speaker_logit_flatten, axis=1), dtype=tf.int16)
speaker_target_ling = tf.tile(tf.expand_dims(speaker_target, axis=1), [1, n_text_frames])
speaker_target_flatten = tf.keras.backend.flatten(speaker_target_ling)
speaker_classification_acc = tf.reduce_sum(tf.cast((predicted_speaker == speaker_target_flatten),
tf.float32)*tf.keras.backend.flatten(sc_mask_float))\
/ tf.reduce_sum(sc_mask_float)
loss = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE,
from_logits=True)\
(speaker_target_flatten, speaker_logit_flatten)
speaker_classification_loss = tf.reduce_sum(loss*tf.keras.backend.flatten(sc_mask_float))\
/ tf.reduce_sum(sc_mask_float)
# speaker adversarial loss from mel hidden at frame rate
flatten_target = 1. / n_speakers # * tf.ones_like(speaker_logit_flatten)
if self.speaker_adversial_loss_type == 'l2':
loss = tf.math.pow(tf.abs(tf.nn.softmax(speaker_logit_flatten, axis=1) - flatten_target), 2)
elif self.speaker_adversial_loss_type == 'l1':
loss = tf.abs(tf.nn.softmax(speaker_logit_flatten, axis=1) - flatten_target)
elif self.speaker_adversial_loss_type == 'KL':
# use inverse Kullback-Leibler divergence
epsilon = 1e-12 # to avoid problems with log
ref_prob = 1. / n_speakers # flatten_target
target_prob = (1 - epsilon) * tf.nn.softmax(speaker_logit_flatten, axis=1) + epsilon
loss = target_prob*tf.math.log(target_prob/ref_prob)
# not sure of this (mask)
mask = tf.reshape(tf.tile(tf.transpose(sc_mask_float, (0, 2, 1)),
[1, 1, n_speakers]), [-1, n_speakers])
speaker_adversial_loss = tf.reduce_sum(loss * mask) / tf.reduce_sum(mask)
# text classification loss
# text classification loss from mel hidden at text rate
# compress from mel rate to text rate (normalize by the phone durations)
text_logit_from_mel_hidden_text_rate = expand_mat_padded @ text_logit_from_mel_hidden
# input the actual text at phone level rather than compress from mel level!
text_logit_flatten = tf.reshape(text_logit_from_mel_hidden_text_rate, [-1, self.n_symbols])
text_target_flatten = tf.keras.backend.flatten(text_target_text_level)
predicted_text = tf.cast(tf.math.argmax(text_logit_flatten, axis=1), dtype=tf.int16)
text_classification_acc = tf.reduce_sum(tf.cast((predicted_text == text_target_flatten),
tf.float32)*tf.keras.backend.flatten(text_mask_float))\
/ tf.reduce_sum(text_mask_float)
loss = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE,
from_logits=True)\
(text_target_flatten, text_logit_flatten)
text_classification_loss = tf.reduce_sum(loss*tf.keras.backend.flatten(text_mask_float)) / \
tf.reduce_sum(text_mask_float)
loss_list = [recon_loss, recon_post_loss, speaker_encoder_loss, speaker_classification_loss,
speaker_adversial_loss, text_classification_loss, contrast_loss]
accuracy_list = [speaker_encoder_acc, speaker_classification_acc, text_classification_acc]
combined_loss1 = recon_loss + self.spenc_w * speaker_encoder_loss + self.spadv_w * speaker_adversial_loss + \
self.texcl_w * text_classification_loss + self.contr_w * contrast_loss + recon_post_loss
# self.contr_w * contrast_loss + \
# + self.texcl_w * text_classification_loss + \
# self.spadv_w * speaker_adversial_loss
combined_loss2 = self.spcla_w * speaker_classification_loss
return loss_list, accuracy_list, combined_loss1, combined_loss2
| [
"tensorflow.keras.backend.flatten",
"tensorflow.transpose",
"tensorflow.math.log",
"tensorflow.reduce_sum",
"tensorflow.norm",
"tensorflow.nn.softmax",
"tensorflow.cast",
"tensorflow.eye",
"tensorflow.nn.sigmoid",
"tensorflow.keras.backend.cast",
"tensorflow.zeros_like",
"tensorflow.convert_to... | [((1187, 1256), 'debugprint.print_debug', 'print_debug', (["('spk adv loss type: ' + self.speaker_adversial_loss_type)"], {}), "('spk adv loss type: ' + self.speaker_adversial_loss_type)\n", (1198, 1256), False, 'from debugprint import print_debug\n'), ((1866, 1918), 'tensorflow.cast', 'tf.cast', (['(contrast_mask1 & contrast_mask2)', 'tf.float32'], {}), '(contrast_mask1 & contrast_mask2, tf.float32)\n', (1873, 1918), True, 'import tensorflow as tf\n'), ((2290, 2351), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(text_hidden_normed ** 2)'], {'axis': '(2)', 'keepdims': '(True)'}), '(text_hidden_normed ** 2, axis=2, keepdims=True)\n', (2303, 2351), True, 'import tensorflow as tf\n'), ((2381, 2426), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(mel_hidden_normed ** 2)'], {'axis': '(2)'}), '(mel_hidden_normed ** 2, axis=2)\n', (2394, 2426), True, 'import tensorflow as tf\n'), ((2485, 2527), 'tensorflow.expand_dims', 'tf.expand_dims', (['distance_matrix_yy'], {'axis': '(1)'}), '(distance_matrix_yy, axis=1)\n', (2499, 2527), True, 'import tensorflow as tf\n'), ((2794, 2826), 'tensorflow.eye', 'tf.eye', (['distance_matrix.shape[1]'], {}), '(distance_matrix.shape[1])\n', (2800, 2826), True, 'import tensorflow as tf\n'), ((3699, 3731), 'tensorflow.expand_dims', 'tf.expand_dims', (['mel_mask'], {'axis': '(1)'}), '(mel_mask, axis=1)\n', (3713, 3731), True, 'import tensorflow as tf\n'), ((5766, 5815), 'tensorflow.keras.backend.cast', 'tf.keras.backend.cast', (['text_mask'], {'dtype': '"""float32"""'}), "(text_mask, dtype='float32')\n", (5787, 5815), True, 'import tensorflow as tf\n'), ((5844, 5895), 'tensorflow.keras.backend.cast', 'tf.keras.backend.cast', (['sc_mel_mask'], {'dtype': '"""float32"""'}), "(sc_mel_mask, dtype='float32')\n", (5865, 5895), True, 'import tensorflow as tf\n'), ((10985, 11055), 'tensorflow.reshape', 'tf.reshape', (['text_logit_from_mel_hidden_text_rate', '[-1, self.n_symbols]'], {}), '(text_logit_from_mel_hidden_text_rate, [-1, self.n_symbols])\n', (10995, 11055), True, 'import tensorflow as tf\n'), ((11086, 11134), 'tensorflow.keras.backend.flatten', 'tf.keras.backend.flatten', (['text_target_text_level'], {}), '(text_target_text_level)\n', (11110, 11134), True, 'import tensorflow as tf\n'), ((2607, 2649), 'tensorflow.transpose', 'tf.transpose', (['mel_hidden_normed', '(0, 2, 1)'], {}), '(mel_hidden_normed, (0, 2, 1))\n', (2619, 2649), True, 'import tensorflow as tf\n'), ((3046, 3090), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(contrast_loss * contrast_mask)'], {}), '(contrast_loss * contrast_mask)\n', (3059, 3090), True, 'import tensorflow as tf\n'), ((3091, 3119), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['contrast_mask'], {}), '(contrast_mask)\n', (3104, 3119), True, 'import tensorflow as tf\n'), ((3920, 3968), 'tensorflow.keras.backend.cast', 'tf.keras.backend.cast', (['mel_mask'], {'dtype': '"""float32"""'}), "(mel_mask, dtype='float32')\n", (3941, 3968), True, 'import tensorflow as tf\n'), ((4134, 4157), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mel_mask'], {}), '(mel_mask)\n', (4147, 4157), True, 'import tensorflow as tf\n'), ((4246, 4269), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mel_mask'], {}), '(mel_mask)\n', (4259, 4269), True, 'import tensorflow as tf\n'), ((5068, 5118), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(0.0)'], {'dtype': 'tf.dtypes.float32'}), '(0.0, dtype=tf.dtypes.float32)\n', (5088, 5118), True, 'import tensorflow as tf\n'), ((5152, 5202), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(0.0)'], {'dtype': 'tf.dtypes.float32'}), '(0.0, dtype=tf.dtypes.float32)\n', (5172, 5202), True, 'import tensorflow as tf\n'), ((5644, 5677), 'tensorflow.expand_dims', 'tf.expand_dims', (['text_mask'], {'axis': '(1)'}), '(text_mask, axis=1)\n', (5658, 5677), True, 'import tensorflow as tf\n'), ((5704, 5739), 'tensorflow.expand_dims', 'tf.expand_dims', (['sc_mel_mask'], {'axis': '(1)'}), '(sc_mel_mask, axis=1)\n', (5718, 5739), True, 'import tensorflow as tf\n'), ((6350, 6422), 'tensorflow.keras.backend.flatten', 'tf.keras.backend.flatten', (['speaker_logit_from_mel_hidden_text_or_mel_rate'], {}), '(speaker_logit_from_mel_hidden_text_or_mel_rate)\n', (6374, 6422), True, 'import tensorflow as tf\n'), ((6455, 6507), 'tensorflow.cast', 'tf.cast', (['(speaker_logit_flatten > 0.0)'], {'dtype': 'tf.int16'}), '(speaker_logit_flatten > 0.0, dtype=tf.int16)\n', (6462, 6507), True, 'import tensorflow as tf\n'), ((6647, 6692), 'tensorflow.keras.backend.flatten', 'tf.keras.backend.flatten', (['speaker_target_ling'], {}), '(speaker_target_ling)\n', (6671, 6692), True, 'import tensorflow as tf\n'), ((6722, 6761), 'tensorflow.keras.backend.flatten', 'tf.keras.backend.flatten', (['sc_mask_float'], {}), '(sc_mask_float)\n', (6746, 6761), True, 'import tensorflow as tf\n'), ((8347, 8423), 'tensorflow.reshape', 'tf.reshape', (['speaker_logit_from_mel_hidden_text_or_mel_rate', '[-1, n_speakers]'], {}), '(speaker_logit_from_mel_hidden_text_or_mel_rate, [-1, n_speakers])\n', (8357, 8423), True, 'import tensorflow as tf\n'), ((8667, 8712), 'tensorflow.keras.backend.flatten', 'tf.keras.backend.flatten', (['speaker_target_ling'], {}), '(speaker_target_ling)\n', (8691, 8712), True, 'import tensorflow as tf\n'), ((11169, 11211), 'tensorflow.math.argmax', 'tf.math.argmax', (['text_logit_flatten'], {'axis': '(1)'}), '(text_logit_flatten, axis=1)\n', (11183, 11211), True, 'import tensorflow as tf\n'), ((11470, 11500), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['text_mask_float'], {}), '(text_mask_float)\n', (11483, 11500), True, 'import tensorflow as tf\n'), ((11517, 11627), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'reduction': 'tf.keras.losses.Reduction.NONE', 'from_logits': '(True)'}), '(reduction=tf.keras.losses.\n Reduction.NONE, from_logits=True)\n', (11562, 11627), True, 'import tensorflow as tf\n'), ((11875, 11905), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['text_mask_float'], {}), '(text_mask_float)\n', (11888, 11905), True, 'import tensorflow as tf\n'), ((2041, 2084), 'tensorflow.norm', 'tf.norm', (['text_hidden'], {'axis': '(2)', 'keepdims': '(True)'}), '(text_hidden, axis=2, keepdims=True)\n', (2048, 2084), True, 'import tensorflow as tf\n'), ((2134, 2176), 'tensorflow.norm', 'tf.norm', (['mel_hidden'], {'axis': '(2)', 'keepdims': '(True)'}), '(mel_hidden, axis=2, keepdims=True)\n', (2141, 2176), True, 'import tensorflow as tf\n'), ((4623, 4686), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4668, 4686), True, 'import tensorflow as tf\n'), ((4785, 4831), 'tensorflow.math.argmax', 'tf.math.argmax', (['speaker_logit_from_mel'], {'axis': '(1)'}), '(speaker_logit_from_mel, axis=1)\n', (4799, 4831), True, 'import tensorflow as tf\n'), ((6550, 6588), 'tensorflow.expand_dims', 'tf.expand_dims', (['speaker_target'], {'axis': '(1)'}), '(speaker_target, axis=1)\n', (6564, 6588), True, 'import tensorflow as tf\n'), ((7009, 7037), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['sc_mask_float'], {}), '(sc_mask_float)\n', (7022, 7037), True, 'import tensorflow as tf\n'), ((7089, 7187), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'reduction': 'tf.keras.losses.Reduction.NONE', 'from_logits': '(True)'}), '(reduction=tf.keras.losses.Reduction.NONE,\n from_logits=True)\n', (7123, 7187), True, 'import tensorflow as tf\n'), ((7292, 7327), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(loss * sc_mask_float)'], {}), '(loss * sc_mask_float)\n', (7305, 7327), True, 'import tensorflow as tf\n'), ((7330, 7358), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['sc_mask_float'], {}), '(sc_mask_float)\n', (7343, 7358), True, 'import tensorflow as tf\n'), ((8229, 8264), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(loss * sc_mask_float)'], {}), '(loss * sc_mask_float)\n', (8242, 8264), True, 'import tensorflow as tf\n'), ((8267, 8295), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['sc_mask_float'], {}), '(sc_mask_float)\n', (8280, 8295), True, 'import tensorflow as tf\n'), ((8464, 8509), 'tensorflow.math.argmax', 'tf.math.argmax', (['speaker_logit_flatten'], {'axis': '(1)'}), '(speaker_logit_flatten, axis=1)\n', (8478, 8509), True, 'import tensorflow as tf\n'), ((8570, 8608), 'tensorflow.expand_dims', 'tf.expand_dims', (['speaker_target'], {'axis': '(1)'}), '(speaker_target, axis=1)\n', (8584, 8608), True, 'import tensorflow as tf\n'), ((8971, 8999), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['sc_mask_float'], {}), '(sc_mask_float)\n', (8984, 8999), True, 'import tensorflow as tf\n'), ((9019, 9129), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'reduction': 'tf.keras.losses.Reduction.NONE', 'from_logits': '(True)'}), '(reduction=tf.keras.losses.\n Reduction.NONE, from_logits=True)\n', (9064, 9129), True, 'import tensorflow as tf\n'), ((9400, 9428), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['sc_mask_float'], {}), '(sc_mask_float)\n', (9413, 9428), True, 'import tensorflow as tf\n'), ((10547, 10573), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(loss * mask)'], {}), '(loss * mask)\n', (10560, 10573), True, 'import tensorflow as tf\n'), ((10576, 10595), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {}), '(mask)\n', (10589, 10595), True, 'import tensorflow as tf\n'), ((2990, 3020), 'tensorflow.zeros_like', 'tf.zeros_like', (['distance_matrix'], {}), '(distance_matrix)\n', (3003, 3020), True, 'import tensorflow as tf\n'), ((4091, 4125), 'tensorflow.abs', 'tf.abs', (['(mel_target - predicted_mel)'], {}), '(mel_target - predicted_mel)\n', (4097, 4125), True, 'import tensorflow as tf\n'), ((4198, 4237), 'tensorflow.abs', 'tf.abs', (['(mel_target - predicted_mel_post)'], {}), '(mel_target - predicted_mel_post)\n', (4204, 4237), True, 'import tensorflow as tf\n'), ((4897, 4953), 'tensorflow.cast', 'tf.cast', (['(predicted_speaker == speaker_target)', 'tf.float32'], {}), '(predicted_speaker == speaker_target, tf.float32)\n', (4904, 4953), True, 'import tensorflow as tf\n'), ((10392, 10430), 'tensorflow.transpose', 'tf.transpose', (['sc_mask_float', '(0, 2, 1)'], {}), '(sc_mask_float, (0, 2, 1))\n', (10404, 10430), True, 'import tensorflow as tf\n'), ((11278, 11336), 'tensorflow.cast', 'tf.cast', (['(predicted_text == text_target_flatten)', 'tf.float32'], {}), '(predicted_text == text_target_flatten, tf.float32)\n', (11285, 11336), True, 'import tensorflow as tf\n'), ((11387, 11428), 'tensorflow.keras.backend.flatten', 'tf.keras.backend.flatten', (['text_mask_float'], {}), '(text_mask_float)\n', (11411, 11428), True, 'import tensorflow as tf\n'), ((11793, 11834), 'tensorflow.keras.backend.flatten', 'tf.keras.backend.flatten', (['text_mask_float'], {}), '(text_mask_float)\n', (11817, 11834), True, 'import tensorflow as tf\n'), ((6817, 6881), 'tensorflow.cast', 'tf.cast', (['(predicted_speaker == speaker_target_flatten)', 'tf.float32'], {}), '(predicted_speaker == speaker_target_flatten, tf.float32)\n', (6824, 6881), True, 'import tensorflow as tf\n'), ((8769, 8833), 'tensorflow.cast', 'tf.cast', (['(predicted_speaker == speaker_target_flatten)', 'tf.float32'], {}), '(predicted_speaker == speaker_target_flatten, tf.float32)\n', (8776, 8833), True, 'import tensorflow as tf\n'), ((8886, 8925), 'tensorflow.keras.backend.flatten', 'tf.keras.backend.flatten', (['sc_mask_float'], {}), '(sc_mask_float)\n', (8910, 8925), True, 'import tensorflow as tf\n'), ((9316, 9355), 'tensorflow.keras.backend.flatten', 'tf.keras.backend.flatten', (['sc_mask_float'], {}), '(sc_mask_float)\n', (9340, 9355), True, 'import tensorflow as tf\n'), ((7527, 7563), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['speaker_logit_flatten'], {}), '(speaker_logit_flatten)\n', (7540, 7563), True, 'import tensorflow as tf\n'), ((7664, 7700), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['speaker_logit_flatten'], {}), '(speaker_logit_flatten)\n', (7677, 7700), True, 'import tensorflow as tf\n'), ((9686, 9730), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['speaker_logit_flatten'], {'axis': '(1)'}), '(speaker_logit_flatten, axis=1)\n', (9699, 9730), True, 'import tensorflow as tf\n'), ((9842, 9886), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['speaker_logit_flatten'], {'axis': '(1)'}), '(speaker_logit_flatten, axis=1)\n', (9855, 9886), True, 'import tensorflow as tf\n'), ((10281, 10316), 'tensorflow.math.log', 'tf.math.log', (['(target_prob / ref_prob)'], {}), '(target_prob / ref_prob)\n', (10292, 10316), True, 'import tensorflow as tf\n'), ((8019, 8055), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['speaker_logit_flatten'], {}), '(speaker_logit_flatten)\n', (8032, 8055), True, 'import tensorflow as tf\n'), ((8105, 8146), 'tensorflow.math.log', 'tf.math.log', (['((1 - target_prob) / ref_prob)'], {}), '((1 - target_prob) / ref_prob)\n', (8116, 8146), True, 'import tensorflow as tf\n'), ((8157, 8192), 'tensorflow.math.log', 'tf.math.log', (['(target_prob / ref_prob)'], {}), '(target_prob / ref_prob)\n', (8168, 8192), True, 'import tensorflow as tf\n'), ((10191, 10235), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['speaker_logit_flatten'], {'axis': '(1)'}), '(speaker_logit_flatten, axis=1)\n', (10204, 10235), True, 'import tensorflow as tf\n')] |
import datetime
import json
import os
import random
import re
from urllib.parse import quote
import lxml
import pafy
import requests
import youtube_dl
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.http import (Http404, HttpResponse, HttpResponseRedirect,
JsonResponse)
from django.shortcuts import redirect, render, reverse
from django.template.defaultfilters import filesizeformat
from django.utils.http import urlencode
from django.views.decorators.csrf import csrf_exempt
from pydub import AudioSegment
from utils import ytscrapper
def home(request):
return render(request, 'index.html')
def about(request):
return render(request, 'about.html')
def movies(request):
urlOfTrending = "https://www.imdb.com/india/released/"
requestOfTrending = requests.get(urlOfTrending)
soupOfTrending = BeautifulSoup(requestOfTrending.content, 'lxml')
rawListOfTrending = soupOfTrending.find_all(
'div', {"class": "trending-list-rank-item-data-container"})
finalTrendingList = [
{
"title": trend.text.strip().split("\n")[2]
}
for trend in rawListOfTrending
]
urlOfTrendingGlobal = "https://www.imdb.com/india/global/"
try:
requestOfTrendingGlobal = requests.get(urlOfTrendingGlobal)
except:
return HttpResponse("Server Error")
soupOfTrendingGlobal = BeautifulSoup(
requestOfTrendingGlobal.content, 'lxml')
rawListOfTrendingGlobal = soupOfTrendingGlobal.find_all(
'div', {"class": "trending-list-rank-item-data-container"})
finalTrendingListGlobal = [
{
"title": trend.text.strip().split("\n")[2]
}
for trend in rawListOfTrendingGlobal
]
context = {
'title': "Trending",
'local_list_name': "Trending Movies/Web Series (India)",
'local_list': finalTrendingList,
'global_list_name': "Trending Movies/Web Series(Global)",
'global_list': finalTrendingListGlobal,
'week': datetime.date.today(),
}
return render(request, 'trending.html', context)
def toptwohundred(request):
urlOfBB200 = "https://www.billboard.com/charts/billboard-200"
try:
requestOfBB200 = requests.get(urlOfBB200)
except:
return HttpResponse("Server error")
soupOfBB200 = BeautifulSoup(requestOfBB200.content, 'lxml')
rawListOfBB200 = soupOfBB200.find_all(
'span', {"class": "chart-element__information"})
week = soupOfBB200.find(
'button', {"class": "date-selector__button button--link"})
current_week = week.text.strip()
finalBB200List = [
{"name": song.text.strip().split(
"\n")[0], "artist":song.text.strip().split("\n")[1]}
for song in rawListOfBB200[:201]
]
context = {
'song_list': finalBB200List,
'week': current_week,
'list_name': "billboard Top 200 Songs",
'title': "Billboard 200"
}
return render(request, 'toptwohundred.html', context)
def hothundred(request):
urlOfHot100 = "https://www.billboard.com/charts/hot-100"
try:
requestOfHot100 = requests.get(urlOfHot100)
except:
return HttpResponse("server error")
soupOfHot100 = BeautifulSoup(requestOfHot100.content, 'lxml')
rawListOfHot100 = soupOfHot100.find_all(
'span', {"class": "chart-element__information"})
week = soupOfHot100.find(
'button', {"class": "date-selector__button button--link"})
current_week = week.text.strip()
finalHot100List = [
{"name": song.text.strip().split(
"\n")[0], "artist":song.text.strip().split("\n")[1]}
for song in rawListOfHot100[:201]
]
context = {
'song_list': finalHot100List,
'week': current_week,
'list_name': "billboard hot 100 Songs",
}
return render(request, 'toptwohundred.html', context)
def kannadatopfifty(request):
url_for_kannada_topfifty_request = "https://gaana.com/playlist/gaana-dj-kannada-top-20"
try:
r = requests.get(url_for_kannada_topfifty_request)
except:
return HttpResponse("Server Error")
try:
soup = BeautifulSoup(r.content, 'lxml')
except:
soup = BeautifulSoup(r.content, 'html.parser')
rawKanSongs = soup.find_all(
'div', {"class": "playlist_thumb_det"})
anchors_in_kan_songs = [
song_div.find_all('a') for song_div in rawKanSongs
]
final_kan_songs = [
get_formatted_song(anchor_tags)
for anchor_tags in anchors_in_kan_songs
]
print(final_kan_songs)
context = {
'song_list': final_kan_songs,
'list_name': "Kannada Weekly Top 50 Songs",
'week': datetime.date.today(),
'title': 'Kannada Top 50'
}
return render(request, 'toptwohundred.html', context)
def hinditopfifty(request):
url_hindi_topfifty = "https://gaana.com/playlist/gaana-dj-bollywood-top-50-1"
try:
response = requests.get(url_hindi_topfifty)
except:
return HttpResponse("Server Error")
try:
soup = BeautifulSoup(response.content, 'lxml')
except:
soup = BeautifulSoup(response.content, 'html.parser')
date = datetime.date.today()
rawHindiSongs = soup.find_all(
'div', {"class": "playlist_thumb_det"})
anchors_in_hindi_songs = [
song_div.find_all('a') for song_div in rawHindiSongs
]
final_hindi_songs = [
get_formatted_song(anchor_tags)
for anchor_tags in anchors_in_hindi_songs
]
context = {
'song_list': final_hindi_songs,
'list_name': "Hindi Weekly Top 50 Songs",
'week': datetime.date.today(),
'title': 'Hindi Top 50'
}
return render(request, 'toptwohundred.html', context)
def ytredirect(request):
video_name = str(request.GET['query'])
redirect_url = ytscrapper.getYtUrl(video_name)
if redirect_url is None:
return HttpResponse("Server Busy! Please Try again")
return HttpResponseRedirect(redirect_url)
def download_from_name(request):
video_name = str(request.GET['query'])
video_url = ytscrapper.getYtUrl(video_name)
if video_url is None:
return HttpResponse("Could Not Find Video")
redirect_url = reverse('ytdownloader') + f'?video_url={video_url}'
return redirect(redirect_url)
def youtube(request):
return render(request, 'youtube_from.html')
def ytdownloader(request):
ytApiKey = settings.YT_API_KEY
pafy.set_api_key(ytApiKey)
video_url = request.GET['video_url']
try:
video = pafy.new(video_url)
except:
context = {
'error': "invalid url"
}
return render(request, 'youtube_from.html', context)
video_audio_streams = [
{
'resolution': s.resolution.split("x")[1]+"p", # 360p,720p..
'extension': s.extension,
'file_size': filesizeformat(s.get_filesize()),
'video_url': s.url + "&title=" + video.title
}
for s in video.streams
]
audio_streams = [
{
'bitrate': s.rawbitrate // 1000, # bps -> kbps
'extension': s.extension,
'file_size': filesizeformat(s.get_filesize()),
'video_url': s.url + "&title=" + video.title
}
for s in video.audiostreams
]
context = {
'streams': video_audio_streams,
'audio_streams': audio_streams,
'meta': {
'title': video.title,
'thumb': video.bigthumbhd.replace("http://", "https://"),
'duration': video.duration,
'published': video.published,
'viewcount': video.viewcount,
'videoid': video.videoid
}
}
return render(request, 'download.html', context)
@csrf_exempt
def get_download_url(request):
ytApiKey = settings.YT_API_KEY
pafy.set_api_key(ytApiKey)
data = request.body.decode('utf-8')
req_data = json.loads(data)
videoid = req_data['videoid']
idx = int(req_data['idx'])
stream_type = req_data['stream_type']
try:
video = pafy.new(videoid)
if stream_type == 'audio-mp3':
stream = video.audiostreams[idx]
_filename = video.title + \
str(stream.rawbitrate // 1000) + "."+stream.extension
_filename = normalizeFilename(_filename)
filepath_temp = os.path.join(settings.MEDIA_ROOT, _filename)
stream.download(filepath=filepath_temp, quiet=True)
sound = AudioSegment.from_file(
os.path.join(settings.MEDIA_ROOT, _filename))
filepath_temp = os.path.join(
settings.MEDIA_ROOT, _filename.replace("."+stream.extension, ".mp3"))
sound.export(filepath_temp, format="mp3",
bitrate=str(stream.rawbitrate // 1000)+"K")
filepath_temp = "/media/" + \
_filename.replace("."+stream.extension, ".mp3")
elif stream_type == 'audio':
stream = video.audiostreams[idx]
_filename = video.title + \
str(stream.rawbitrate // 1000) + "."+stream.extension
_filename = normalizeFilename(_filename)
filepath_temp = os.path.join(settings.MEDIA_ROOT, _filename)
stream.download(filepath=filepath_temp, quiet=True)
filepath_temp = "/media/" + _filename
elif stream_type == 'video':
stream = video.streams[idx]
_filename = video.title + \
stream.resolution.split("x")[1]+"p" + "." + stream.extension
_filename = normalizeFilename(_filename)
filepath_temp = os.path.join(settings.MEDIA_ROOT, _filename)
stream.download(filepath=filepath_temp, quiet=False)
filepath_temp = "/media/" + _filename
except Exception as e:
print(e)
return JsonResponse(status=400, data={'message': "could not find video/audio"})
return JsonResponse({'filepath': filepath_temp})
def normalizeFilename(filename):
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_filename = re.sub(rstr, "", filename)
return new_filename.strip()
def get_formatted_song(anchor_tags):
formatted_song = {}
for anchor_tag in anchor_tags:
if 'song' in anchor_tag.get('href'):
formatted_song['name'] = anchor_tag.text
if 'artist' in anchor_tag.get('href'):
formatted_song['artist'] = anchor_tag.text
return formatted_song
| [
"django.shortcuts.render",
"django.http.HttpResponseRedirect",
"json.loads",
"django.http.JsonResponse",
"django.http.HttpResponse",
"os.path.join",
"utils.ytscrapper.getYtUrl",
"requests.get",
"bs4.BeautifulSoup",
"pafy.new",
"django.shortcuts.redirect",
"django.shortcuts.reverse",
"re.sub"... | [((679, 708), 'django.shortcuts.render', 'render', (['request', '"""index.html"""'], {}), "(request, 'index.html')\n", (685, 708), False, 'from django.shortcuts import redirect, render, reverse\n'), ((742, 771), 'django.shortcuts.render', 'render', (['request', '"""about.html"""'], {}), "(request, 'about.html')\n", (748, 771), False, 'from django.shortcuts import redirect, render, reverse\n'), ((879, 906), 'requests.get', 'requests.get', (['urlOfTrending'], {}), '(urlOfTrending)\n', (891, 906), False, 'import requests\n'), ((929, 977), 'bs4.BeautifulSoup', 'BeautifulSoup', (['requestOfTrending.content', '"""lxml"""'], {}), "(requestOfTrending.content, 'lxml')\n", (942, 977), False, 'from bs4 import BeautifulSoup\n'), ((1468, 1522), 'bs4.BeautifulSoup', 'BeautifulSoup', (['requestOfTrendingGlobal.content', '"""lxml"""'], {}), "(requestOfTrendingGlobal.content, 'lxml')\n", (1481, 1522), False, 'from bs4 import BeautifulSoup\n'), ((2143, 2184), 'django.shortcuts.render', 'render', (['request', '"""trending.html"""', 'context'], {}), "(request, 'trending.html', context)\n", (2149, 2184), False, 'from django.shortcuts import redirect, render, reverse\n'), ((2416, 2461), 'bs4.BeautifulSoup', 'BeautifulSoup', (['requestOfBB200.content', '"""lxml"""'], {}), "(requestOfBB200.content, 'lxml')\n", (2429, 2461), False, 'from bs4 import BeautifulSoup\n'), ((3058, 3104), 'django.shortcuts.render', 'render', (['request', '"""toptwohundred.html"""', 'context'], {}), "(request, 'toptwohundred.html', context)\n", (3064, 3104), False, 'from django.shortcuts import redirect, render, reverse\n'), ((3331, 3377), 'bs4.BeautifulSoup', 'BeautifulSoup', (['requestOfHot100.content', '"""lxml"""'], {}), "(requestOfHot100.content, 'lxml')\n", (3344, 3377), False, 'from bs4 import BeautifulSoup\n'), ((3949, 3995), 'django.shortcuts.render', 'render', (['request', '"""toptwohundred.html"""', 'context'], {}), "(request, 'toptwohundred.html', context)\n", (3955, 3995), False, 'from django.shortcuts import redirect, render, reverse\n'), ((4891, 4937), 'django.shortcuts.render', 'render', (['request', '"""toptwohundred.html"""', 'context'], {}), "(request, 'toptwohundred.html', context)\n", (4897, 4937), False, 'from django.shortcuts import redirect, render, reverse\n'), ((5319, 5340), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (5338, 5340), False, 'import datetime\n'), ((5843, 5889), 'django.shortcuts.render', 'render', (['request', '"""toptwohundred.html"""', 'context'], {}), "(request, 'toptwohundred.html', context)\n", (5849, 5889), False, 'from django.shortcuts import redirect, render, reverse\n'), ((5979, 6010), 'utils.ytscrapper.getYtUrl', 'ytscrapper.getYtUrl', (['video_name'], {}), '(video_name)\n', (5998, 6010), False, 'from utils import ytscrapper\n'), ((6112, 6146), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['redirect_url'], {}), '(redirect_url)\n', (6132, 6146), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((6241, 6272), 'utils.ytscrapper.getYtUrl', 'ytscrapper.getYtUrl', (['video_name'], {}), '(video_name)\n', (6260, 6272), False, 'from utils import ytscrapper\n'), ((6433, 6455), 'django.shortcuts.redirect', 'redirect', (['redirect_url'], {}), '(redirect_url)\n', (6441, 6455), False, 'from django.shortcuts import redirect, render, reverse\n'), ((6491, 6527), 'django.shortcuts.render', 'render', (['request', '"""youtube_from.html"""'], {}), "(request, 'youtube_from.html')\n", (6497, 6527), False, 'from django.shortcuts import redirect, render, reverse\n'), ((6597, 6623), 'pafy.set_api_key', 'pafy.set_api_key', (['ytApiKey'], {}), '(ytApiKey)\n', (6613, 6623), False, 'import pafy\n'), ((7869, 7910), 'django.shortcuts.render', 'render', (['request', '"""download.html"""', 'context'], {}), "(request, 'download.html', context)\n", (7875, 7910), False, 'from django.shortcuts import redirect, render, reverse\n'), ((7996, 8022), 'pafy.set_api_key', 'pafy.set_api_key', (['ytApiKey'], {}), '(ytApiKey)\n', (8012, 8022), False, 'import pafy\n'), ((8079, 8095), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (8089, 8095), False, 'import json\n'), ((10109, 10150), 'django.http.JsonResponse', 'JsonResponse', (["{'filepath': filepath_temp}"], {}), "({'filepath': filepath_temp})\n", (10121, 10150), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((10263, 10289), 're.sub', 're.sub', (['rstr', '""""""', 'filename'], {}), "(rstr, '', filename)\n", (10269, 10289), False, 'import re\n'), ((1350, 1383), 'requests.get', 'requests.get', (['urlOfTrendingGlobal'], {}), '(urlOfTrendingGlobal)\n', (1362, 1383), False, 'import requests\n'), ((2102, 2123), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2121, 2123), False, 'import datetime\n'), ((2316, 2340), 'requests.get', 'requests.get', (['urlOfBB200'], {}), '(urlOfBB200)\n', (2328, 2340), False, 'import requests\n'), ((3229, 3254), 'requests.get', 'requests.get', (['urlOfHot100'], {}), '(urlOfHot100)\n', (3241, 3254), False, 'import requests\n'), ((4142, 4188), 'requests.get', 'requests.get', (['url_for_kannada_topfifty_request'], {}), '(url_for_kannada_topfifty_request)\n', (4154, 4188), False, 'import requests\n'), ((4270, 4302), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.content', '"""lxml"""'], {}), "(r.content, 'lxml')\n", (4283, 4302), False, 'from bs4 import BeautifulSoup\n'), ((4816, 4837), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (4835, 4837), False, 'import datetime\n'), ((5079, 5111), 'requests.get', 'requests.get', (['url_hindi_topfifty'], {}), '(url_hindi_topfifty)\n', (5091, 5111), False, 'import requests\n'), ((5193, 5232), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""lxml"""'], {}), "(response.content, 'lxml')\n", (5206, 5232), False, 'from bs4 import BeautifulSoup\n'), ((5770, 5791), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (5789, 5791), False, 'import datetime\n'), ((6055, 6100), 'django.http.HttpResponse', 'HttpResponse', (['"""Server Busy! Please Try again"""'], {}), "('Server Busy! Please Try again')\n", (6067, 6100), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((6314, 6350), 'django.http.HttpResponse', 'HttpResponse', (['"""Could Not Find Video"""'], {}), "('Could Not Find Video')\n", (6326, 6350), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((6370, 6393), 'django.shortcuts.reverse', 'reverse', (['"""ytdownloader"""'], {}), "('ytdownloader')\n", (6377, 6393), False, 'from django.shortcuts import redirect, render, reverse\n'), ((6691, 6710), 'pafy.new', 'pafy.new', (['video_url'], {}), '(video_url)\n', (6699, 6710), False, 'import pafy\n'), ((8229, 8246), 'pafy.new', 'pafy.new', (['videoid'], {}), '(videoid)\n', (8237, 8246), False, 'import pafy\n'), ((1411, 1439), 'django.http.HttpResponse', 'HttpResponse', (['"""Server Error"""'], {}), "('Server Error')\n", (1423, 1439), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((2368, 2396), 'django.http.HttpResponse', 'HttpResponse', (['"""Server error"""'], {}), "('Server error')\n", (2380, 2396), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((3282, 3310), 'django.http.HttpResponse', 'HttpResponse', (['"""server error"""'], {}), "('server error')\n", (3294, 3310), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((4216, 4244), 'django.http.HttpResponse', 'HttpResponse', (['"""Server Error"""'], {}), "('Server Error')\n", (4228, 4244), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((4330, 4369), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.content', '"""html.parser"""'], {}), "(r.content, 'html.parser')\n", (4343, 4369), False, 'from bs4 import BeautifulSoup\n'), ((5139, 5167), 'django.http.HttpResponse', 'HttpResponse', (['"""Server Error"""'], {}), "('Server Error')\n", (5151, 5167), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((5260, 5306), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.content', '"""html.parser"""'], {}), "(response.content, 'html.parser')\n", (5273, 5306), False, 'from bs4 import BeautifulSoup\n'), ((6803, 6848), 'django.shortcuts.render', 'render', (['request', '"""youtube_from.html"""', 'context'], {}), "(request, 'youtube_from.html', context)\n", (6809, 6848), False, 'from django.shortcuts import redirect, render, reverse\n'), ((8523, 8567), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', '_filename'], {}), '(settings.MEDIA_ROOT, _filename)\n', (8535, 8567), False, 'import os\n'), ((10024, 10096), 'django.http.JsonResponse', 'JsonResponse', ([], {'status': '(400)', 'data': "{'message': 'could not find video/audio'}"}), "(status=400, data={'message': 'could not find video/audio'})\n", (10036, 10096), False, 'from django.http import Http404, HttpResponse, HttpResponseRedirect, JsonResponse\n'), ((8692, 8736), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', '_filename'], {}), '(settings.MEDIA_ROOT, _filename)\n', (8704, 8736), False, 'import os\n'), ((9369, 9413), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', '_filename'], {}), '(settings.MEDIA_ROOT, _filename)\n', (9381, 9413), False, 'import os\n'), ((9804, 9848), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', '_filename'], {}), '(settings.MEDIA_ROOT, _filename)\n', (9816, 9848), False, 'import os\n')] |
from spotdl import handle
from spotdl import const
from spotdl import downloader
import os
import sys
const.args = handle.get_arguments(to_group=True)
track = downloader.Downloader(raw_song=const.args.song[0])
track_title = track.refine_songname(track.content.title)
track_filename = track_title + const.args.output_ext
track_download_path = os.path.join(const.args.folder, track_filename)
print(track_filename) | [
"os.path.join",
"spotdl.handle.get_arguments",
"spotdl.downloader.Downloader"
] | [((117, 152), 'spotdl.handle.get_arguments', 'handle.get_arguments', ([], {'to_group': '(True)'}), '(to_group=True)\n', (137, 152), False, 'from spotdl import handle\n'), ((162, 212), 'spotdl.downloader.Downloader', 'downloader.Downloader', ([], {'raw_song': 'const.args.song[0]'}), '(raw_song=const.args.song[0])\n', (183, 212), False, 'from spotdl import downloader\n'), ((346, 393), 'os.path.join', 'os.path.join', (['const.args.folder', 'track_filename'], {}), '(const.args.folder, track_filename)\n', (358, 393), False, 'import os\n')] |
"""Tests for the cursor module."""
from __future__ import absolute_import
from . import common
from bearfield import cursor, Document, Field, Query
class TestCursor(common.TestCase):
"""Test the Cursor class."""
class Document(Document):
class Meta:
connection = 'test'
index = Field(int)
name = Field(str)
def setUp(self):
super(TestCursor, self).setUp()
self.collection = self.connection['cursor']
self.docs = [
{'index': 1, 'name': 'first'},
{'index': 2, 'name': 'second'},
]
for doc in self.docs:
doc['_id'] = self.collection.insert(doc)
def test_connection(self):
"""Cursor.connection"""
cur = cursor.Cursor(self.Document(), self.collection, None, None, False)
self.assertEqual(cur.connection, self.connection, "cursor connection is incorrect")
def test_find(self):
"""Cursor.find"""
q1 = Query({'index': 1})
q2 = Query({'name': 'first'})
qr = q1 & q2
cur = cursor.Cursor(self.Document(), self.collection, q1, None, False)
cur = cur.find(q2)
self.assertEqual(cur.query.criteria, qr.criteria, "cursor has invalid criteria")
def test_getitem(self):
"""Cursor.__getitem___"""
cur = cursor.Cursor(self.Document(), self.collection, {'index': 1}, None, False)
doc = cur[0]
have = doc._encode()
want = {'_id': doc._id}
want.update(self.docs[0])
self.assertEqual(have, want, "returned document is incorrect")
def test_iter(self):
"""Cursor.__iter__"""
cur = cursor.Cursor(self.Document(), self.collection, {'index': 1}, None, False)
it = cur.__iter__()
self.assertIsInstance(it, cursor.Cursor, "returned value has invalid type")
def test_close(self):
"""Cursor.close"""
cur = cursor.Cursor(self.Document(), self.collection, {'index': 1}, None, False)
cur.close()
cur.count()
cur.close()
| [
"bearfield.Field",
"bearfield.Query"
] | [((317, 327), 'bearfield.Field', 'Field', (['int'], {}), '(int)\n', (322, 327), False, 'from bearfield import cursor, Document, Field, Query\n'), ((343, 353), 'bearfield.Field', 'Field', (['str'], {}), '(str)\n', (348, 353), False, 'from bearfield import cursor, Document, Field, Query\n'), ((973, 992), 'bearfield.Query', 'Query', (["{'index': 1}"], {}), "({'index': 1})\n", (978, 992), False, 'from bearfield import cursor, Document, Field, Query\n'), ((1006, 1030), 'bearfield.Query', 'Query', (["{'name': 'first'}"], {}), "({'name': 'first'})\n", (1011, 1030), False, 'from bearfield import cursor, Document, Field, Query\n')] |
from typing import List, Union
import numpy as np
from .Figure import Figure
def create_position_pdf_plot(*, start_time_sec: np.float32, sampling_frequency: np.float32, pdf: np.ndarray, label: str):
# Nt = pdf.shape[0]
# Np = pdf.shape[1]
A = pdf
B = A / np.reshape(np.repeat(np.max(A, axis=1), A.shape[1]), A.shape)
B = (B * 100).astype(np.uint8)
data = {
'type': 'PositionPdfPlot',
'pdf': B,
'samplingFrequency': sampling_frequency,
'startTimeSec': start_time_sec
}
return Figure(
data=data,
label=label
)
def create_live_position_pdf_plot(*, start_time_sec: np.float32, end_time_sec: np.float32, sampling_frequency: np.float32, num_positions: int, pdf_object: dict, segment_size: int, multiscale_factor: int, label: str):
data = {
'type': 'LivePositionPdfPlot',
'pdfObject': pdf_object,
'startTimeSec': start_time_sec,
'endTimeSec': end_time_sec,
'numPositions': num_positions,
'samplingFrequency': sampling_frequency,
'segmentSize': segment_size,
'multiscaleFactor': multiscale_factor
}
return Figure(
data=data,
label=label
)
# def _get_subsample_inds(timestamps: np.array, sampling_frequency: float):
# dt = 1 / sampling_frequency
# ret = []
# last_t = timestamps[0] - dt * 2
# for i in range(len(timestamps)):
# delta = timestamps[i] - last_t
# if delta >= dt * 0.95:
# ret.append(i)
# last_t = timestamps[i]
# return ret
| [
"numpy.max"
] | [((294, 311), 'numpy.max', 'np.max', (['A'], {'axis': '(1)'}), '(A, axis=1)\n', (300, 311), True, 'import numpy as np\n')] |
# Connect files
from configs import *
# Arrays
items = []
selectedItems = []
# Interface arrays
buttons = []
surfaces = []
# Getting item
def getItemById(ident):
for item in items:
if item.id == ident:
return item
# Removing item
def removeItem(item):
items.remove(item)
# Removing items
def removeItems():
for item in selectedItems:
items.remove(item)
clearSelection()
# Adding items in selection items
def addSelection(item):
item.selected = True
selectedItems.append(item)
# Clear selected items
def clearSelection():
for item in items:
item.selected = False
selectedItems.clear()
# Clear buttons
def clearButtons():
buttons.clear()
# Clear surfaces
def clearSurfaces():
surfaces.clear()
# Import templates
from templates import Worker
# Adding item
def addItem(case, counter, x, y, faction):
if(case == "worker"):
item = Worker(counter, x, y, faction)
if(case == "soldier"):
item = Soldier(counter, x, y, faction)
items.append(item)
| [
"templates.Worker"
] | [((859, 889), 'templates.Worker', 'Worker', (['counter', 'x', 'y', 'faction'], {}), '(counter, x, y, faction)\n', (865, 889), False, 'from templates import Worker\n')] |
import click
import pathlib
import os
from kneejerk.image_server import score_images_in_dir
from kneejerk.data.saver import persist_scores, persist_metadata
from kneejerk.data.transfer import segment_data_from_csv, transfer_normalized_image_data
from kneejerk.data.utils import _get_classes, _get_max_image_dim, _ensure_path_exists
@click.group()
@click.pass_context
def main(ctx):
ctx.obj = dict()
@main.command(help='Cycle through a directory and score images')
@click.option('--input_dir', '-i', help='Location of the images.',
default='.')
@click.option('--output_dir', '-o', help='Location to output .csv file.',
default='.')
@click.option('--shuffle', '-s', help='Shuffle served image order',
default=1)
@click.option('--file-name', '-f', help='Name of .csv file',
default='output.csv')
@click.option('--min', 'min_', help='Minimum acceptable score', default='0')
@click.option('--max', 'max_', help='Maximum acceptable score', default='1')
@click.option('--limit', '-l', help="Limit the number of images to serve")
@click.pass_context
def score(ctx, output_dir, input_dir, file_name, shuffle, min_, max_, limit):
ctx.obj['min_val'] = min_
ctx.obj['max_val'] = max_
if limit:
ctx.obj['limit'] = int(limit)
if file_name[-4:] != '.csv':
file_name += '.csv'
input_dir = pathlib.Path(input_dir).resolve()
output_dir = pathlib.Path(output_dir).resolve()
click.echo(f'Input dir {input_dir}')
click.echo(f'Output dir {output_dir}')
output_path = output_dir.joinpath(file_name)
fpaths, scores = score_images_in_dir(input_dir, shuffle_files=shuffle)
# bit of helpful error handling if user doesn't provide any images
for val in os.listdir(input_dir):
if val[-3:].lower() in ['png', 'jpg']:
break
else:
print("\n\nDidn't find image at directory:", input_dir)
persist_scores(fpaths, scores, output_path)
@main.command(help='Use a kneejerk-generated csv to organize your files')
@click.option('--file_name', '-f', help='Name of .csv file', required=True)
@click.option('--consider_size', '-c', help='Consider the size of the images',
default=0)
@click.option('--rescale_len', '-r', help='Height/width to rescale the data to',
default=200)
@click.option('--trainpct', help='Percentage of data to train on',
default=.70)
@click.option('--testpct', help='Percentage of data to test on',
default=.20)
@click.option('--valpct', help='Percentage of data to validate on',
default=.10)
@click.pass_context
def transfer(ctx, file_name, consider_size, rescale_len, trainpct, testpct, valpct):
ctx.obj['file_name'] = file_name
ctx.obj['consider_size'] = consider_size
ctx.obj['rescale_len'] = rescale_len
ctx.obj['max_image_dim'] = _get_max_image_dim(file_name)
dirname = file_name[:-4]
ctx.obj['dirname'] = dirname
classes = _get_classes(file_name)
data_splits = ['train', 'test']
if valpct:
data_splits += ['val']
for split in data_splits:
for class_ in classes:
_ensure_path_exists(os.path.join(dirname, split, class_))
train, test, cross_val = segment_data_from_csv(trainpct, testpct, valpct)
transfer_normalized_image_data(train, 'train')
transfer_normalized_image_data(test, 'test')
if valpct:
transfer_normalized_image_data(cross_val, 'val')
persist_metadata()
if __name__ == '__main__':
main()
| [
"os.listdir",
"pathlib.Path",
"click.group",
"click.option",
"os.path.join",
"click.echo",
"kneejerk.data.utils._get_classes",
"kneejerk.data.utils._get_max_image_dim",
"kneejerk.image_server.score_images_in_dir",
"kneejerk.data.saver.persist_metadata",
"kneejerk.data.saver.persist_scores",
"k... | [((336, 349), 'click.group', 'click.group', ([], {}), '()\n', (347, 349), False, 'import click\n'), ((474, 552), 'click.option', 'click.option', (['"""--input_dir"""', '"""-i"""'], {'help': '"""Location of the images."""', 'default': '"""."""'}), "('--input_dir', '-i', help='Location of the images.', default='.')\n", (486, 552), False, 'import click\n'), ((568, 657), 'click.option', 'click.option', (['"""--output_dir"""', '"""-o"""'], {'help': '"""Location to output .csv file."""', 'default': '"""."""'}), "('--output_dir', '-o', help='Location to output .csv file.',\n default='.')\n", (580, 657), False, 'import click\n'), ((669, 746), 'click.option', 'click.option', (['"""--shuffle"""', '"""-s"""'], {'help': '"""Shuffle served image order"""', 'default': '(1)'}), "('--shuffle', '-s', help='Shuffle served image order', default=1)\n", (681, 746), False, 'import click\n'), ((762, 848), 'click.option', 'click.option', (['"""--file-name"""', '"""-f"""'], {'help': '"""Name of .csv file"""', 'default': '"""output.csv"""'}), "('--file-name', '-f', help='Name of .csv file', default=\n 'output.csv')\n", (774, 848), False, 'import click\n'), ((859, 934), 'click.option', 'click.option', (['"""--min"""', '"""min_"""'], {'help': '"""Minimum acceptable score"""', 'default': '"""0"""'}), "('--min', 'min_', help='Minimum acceptable score', default='0')\n", (871, 934), False, 'import click\n'), ((936, 1011), 'click.option', 'click.option', (['"""--max"""', '"""max_"""'], {'help': '"""Maximum acceptable score"""', 'default': '"""1"""'}), "('--max', 'max_', help='Maximum acceptable score', default='1')\n", (948, 1011), False, 'import click\n'), ((1013, 1086), 'click.option', 'click.option', (['"""--limit"""', '"""-l"""'], {'help': '"""Limit the number of images to serve"""'}), "('--limit', '-l', help='Limit the number of images to serve')\n", (1025, 1086), False, 'import click\n'), ((2048, 2122), 'click.option', 'click.option', (['"""--file_name"""', '"""-f"""'], {'help': '"""Name of .csv file"""', 'required': '(True)'}), "('--file_name', '-f', help='Name of .csv file', required=True)\n", (2060, 2122), False, 'import click\n'), ((2124, 2217), 'click.option', 'click.option', (['"""--consider_size"""', '"""-c"""'], {'help': '"""Consider the size of the images"""', 'default': '(0)'}), "('--consider_size', '-c', help=\n 'Consider the size of the images', default=0)\n", (2136, 2217), False, 'import click\n'), ((2228, 2325), 'click.option', 'click.option', (['"""--rescale_len"""', '"""-r"""'], {'help': '"""Height/width to rescale the data to"""', 'default': '(200)'}), "('--rescale_len', '-r', help=\n 'Height/width to rescale the data to', default=200)\n", (2240, 2325), False, 'import click\n'), ((2336, 2414), 'click.option', 'click.option', (['"""--trainpct"""'], {'help': '"""Percentage of data to train on"""', 'default': '(0.7)'}), "('--trainpct', help='Percentage of data to train on', default=0.7)\n", (2348, 2414), False, 'import click\n'), ((2430, 2506), 'click.option', 'click.option', (['"""--testpct"""'], {'help': '"""Percentage of data to test on"""', 'default': '(0.2)'}), "('--testpct', help='Percentage of data to test on', default=0.2)\n", (2442, 2506), False, 'import click\n'), ((2522, 2601), 'click.option', 'click.option', (['"""--valpct"""'], {'help': '"""Percentage of data to validate on"""', 'default': '(0.1)'}), "('--valpct', help='Percentage of data to validate on', default=0.1)\n", (2534, 2601), False, 'import click\n'), ((1467, 1503), 'click.echo', 'click.echo', (['f"""Input dir {input_dir}"""'], {}), "(f'Input dir {input_dir}')\n", (1477, 1503), False, 'import click\n'), ((1508, 1546), 'click.echo', 'click.echo', (['f"""Output dir {output_dir}"""'], {}), "(f'Output dir {output_dir}')\n", (1518, 1546), False, 'import click\n'), ((1619, 1672), 'kneejerk.image_server.score_images_in_dir', 'score_images_in_dir', (['input_dir'], {'shuffle_files': 'shuffle'}), '(input_dir, shuffle_files=shuffle)\n', (1638, 1672), False, 'from kneejerk.image_server import score_images_in_dir\n'), ((1760, 1781), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (1770, 1781), False, 'import os\n'), ((1927, 1970), 'kneejerk.data.saver.persist_scores', 'persist_scores', (['fpaths', 'scores', 'output_path'], {}), '(fpaths, scores, output_path)\n', (1941, 1970), False, 'from kneejerk.data.saver import persist_scores, persist_metadata\n'), ((2875, 2904), 'kneejerk.data.utils._get_max_image_dim', '_get_max_image_dim', (['file_name'], {}), '(file_name)\n', (2893, 2904), False, 'from kneejerk.data.utils import _get_classes, _get_max_image_dim, _ensure_path_exists\n'), ((2983, 3006), 'kneejerk.data.utils._get_classes', '_get_classes', (['file_name'], {}), '(file_name)\n', (2995, 3006), False, 'from kneejerk.data.utils import _get_classes, _get_max_image_dim, _ensure_path_exists\n'), ((3252, 3300), 'kneejerk.data.transfer.segment_data_from_csv', 'segment_data_from_csv', (['trainpct', 'testpct', 'valpct'], {}), '(trainpct, testpct, valpct)\n', (3273, 3300), False, 'from kneejerk.data.transfer import segment_data_from_csv, transfer_normalized_image_data\n'), ((3306, 3352), 'kneejerk.data.transfer.transfer_normalized_image_data', 'transfer_normalized_image_data', (['train', '"""train"""'], {}), "(train, 'train')\n", (3336, 3352), False, 'from kneejerk.data.transfer import segment_data_from_csv, transfer_normalized_image_data\n'), ((3357, 3401), 'kneejerk.data.transfer.transfer_normalized_image_data', 'transfer_normalized_image_data', (['test', '"""test"""'], {}), "(test, 'test')\n", (3387, 3401), False, 'from kneejerk.data.transfer import segment_data_from_csv, transfer_normalized_image_data\n'), ((3479, 3497), 'kneejerk.data.saver.persist_metadata', 'persist_metadata', ([], {}), '()\n', (3495, 3497), False, 'from kneejerk.data.saver import persist_scores, persist_metadata\n'), ((3425, 3473), 'kneejerk.data.transfer.transfer_normalized_image_data', 'transfer_normalized_image_data', (['cross_val', '"""val"""'], {}), "(cross_val, 'val')\n", (3455, 3473), False, 'from kneejerk.data.transfer import segment_data_from_csv, transfer_normalized_image_data\n'), ((1376, 1399), 'pathlib.Path', 'pathlib.Path', (['input_dir'], {}), '(input_dir)\n', (1388, 1399), False, 'import pathlib\n'), ((1427, 1451), 'pathlib.Path', 'pathlib.Path', (['output_dir'], {}), '(output_dir)\n', (1439, 1451), False, 'import pathlib\n'), ((3184, 3220), 'os.path.join', 'os.path.join', (['dirname', 'split', 'class_'], {}), '(dirname, split, class_)\n', (3196, 3220), False, 'import os\n')] |
import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob
# Must be in game root folder.
if not os.path.isfile('Ace7Game.exe'):
wait = input('Ace7Game.exe not found in this folder. Press any key to close...')
sys.exit(0)
# Get resolution from OS.
u32 = ctypes.windll.user32
u32.SetProcessDPIAware()
[res_w, res_h] = [u32.GetSystemMetrics(0), u32.GetSystemMetrics(1)]
res_y = 1080
res_x = res_w * (res_y / res_h)
# Get confirmation from user.
print('Your screen size appears to be ' + str(res_w) + 'x' + str(res_h) + '.')
prompt = ''
while prompt.lower() != 'y':
prompt = input('Is that correct? Y to continue, N to cancel:')
if prompt.lower() == 'n':
print('Canceled.')
sys.exit(0)
# Determine FOV hex value.
print('Determining FOV hex value...')
if res_x in [2560, 2304]: # This value is for 2560x1080, 2560x1200 monitors.
fov_hex = 'AA05333C'
elif res_x in [2580, 2322]: # This value is for 3440x1440, 3440x1600 monitors.
fov_hex = 'EDD1333C'
elif res_x in [3840, 3456]: # This value is for dual 16:9, 16:10 monitors.
fov_hex = 'FCCF653C'
elif res_x in [5760, 5184]: # This value is for triple 16:9, 16:10 monitors.
fov_hex = '707B8B3C'
elif res_x in [1920, 1728]: # This value is for single 16:9, 16:10 monitors.
fov_hex = '35FA0E3C'
else:
print('Unknown resolution or aspect ratio. Quitting.')
sys.exit(0)
# Back up the game exe.
print('Backing up the game exe...')
if not os.path.isfile('Ace7Game.exe_orig'):
shutil.copy2('Ace7Game.exe','Ace7Game.exe_orig')
# Overwrite FOV value in game exe.
print('Modifying the game exe...')
with open('Ace7Game.exe','rb+') as exe:
exe.seek(int('DD52E2A', 16)) # address to remove black bars
exe.write(binascii.a2b_hex('00'))
exe.seek(int('258B4D8', 16)) # address of field of view
exe.write(binascii.a2b_hex(fov_hex))
exe.close()
# Check for 3Dmigoto zip file.
print('Checking for 3Dmigoto zip file...')
tdm_regex = '3Dmigoto-*.zip'
tdm_list = glob.glob(tdm_regex)
if not tdm_list:
print('3Dmigoto zip file not found. Quitting.')
sys.exit(0)
tdm_zip = tdm_list[0]
tdm_dir = tdm_zip[:tdm_zip.rfind('.')]
# Unpack 3Dmigoto.
print('Unpacking ' + tdm_zip + '...')
zip_ref = zipfile.ZipFile(tdm_zip, 'r')
zip_ref.extractall(tdm_dir)
zip_ref.close()
# Copy files from x64 folder to game root folder.
print('Installing 3Dmigoto...')
for item in os.listdir(tdm_dir + '/x64'):
tdm_item = tdm_dir + '/x64/' + item
try:
if not os.path.exists(item):
shutil.copytree(tdm_item, item)
except:
if not os.path.exists(item):
shutil.copy2(tdm_item, item)
# Create Mods folder if it doesn't exist.
if not os.path.isdir('Mods'):
os.mkdir('Mods')
# Set up shader filenames.
github_url = 'https://raw.githubusercontent.com/mpm11011/ac7-ultrawide/master/'
hud_filename = '9958a636cbef5557-ps_replace.txt'
map_filename = 'e6f41464a78a35c4-ps_replace.txt'
char_filename = 'f355a6eae7adfe8e-ps_replace.txt'
map_m7_filename = '27f3e07e177ddf67-ps_replace.txt'
char_m7_filename = 'f904af6042b80b52-ps_replace.txt'
mp_hud_filename = '6dcdbf6042a8a27a-ps_replace.txt'
mp_pause_filename = 'c75a35eef5821976-ps_replace.txt'
mp_map_filename = 'ec51646d13b1fd16-ps_replace.txt'
subtitles_filename = 'da86a094e768f000-vs_replace.txt'
subtitles_hud_checker = 'hudtextfix.ini'
# Download shaders.
print('Downloading shader files...')
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + hud_filename, 'ShaderFixes/' + hud_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + map_filename, 'ShaderFixes/' + map_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + char_filename, 'ShaderFixes/' + char_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + map_m7_filename, 'ShaderFixes/' + map_m7_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + char_m7_filename, 'ShaderFixes/' + char_m7_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + mp_hud_filename, 'ShaderFixes/' + mp_hud_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + mp_pause_filename, 'ShaderFixes/' + mp_pause_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + mp_map_filename, 'ShaderFixes/' + mp_map_filename)
urllib.request.urlretrieve(github_url + 'ShaderFixes/' + subtitles_filename, 'ShaderFixes/' + subtitles_filename)
urllib.request.urlretrieve(github_url + 'Mods/' + subtitles_hud_checker, 'Mods/' + subtitles_hud_checker)
# Modify shader fix for resolution width.
print('Modifying shader files for resolution...')
delta_x = (res_x - 1920) / 3840 # divide by 1920, then divide by 2.
delta_x = round(delta_x, 4)
with open('ShaderFixes/' + hud_filename,'r+') as hud_file:
hud_file.seek(769) # number of bytes to line needing change
hud_file.write(' r1.x -= ' + str(delta_x) + ';')
hud_file.close()
with open('ShaderFixes/' + map_filename,'r+') as map_file:
map_file.seek(1035) # number of bytes to line needing change
map_file.write(' r0.x -= ' + str(delta_x) + ';')
map_file.close()
with open('ShaderFixes/' + char_filename,'r+') as char_file:
char_file.seek(1035) # number of bytes to line needing change
char_file.write(' r0.x -= ' + str(delta_x) + ';')
char_file.close()
with open('ShaderFixes/' + map_m7_filename,'r+') as map_m7_file:
map_m7_file.seek(1038) # number of bytes to line needing change
map_m7_file.write(' r1.x -= ' + str(delta_x) + ';')
map_m7_file.close()
with open('ShaderFixes/' + char_m7_filename,'r+') as char_m7_file:
char_m7_file.seek(1038) # number of bytes to line needing change
char_m7_file.write(' r1.x -= ' + str(delta_x) + ';')
char_m7_file.close()
with open('ShaderFixes/' + mp_hud_filename,'r+') as mp_hud_file:
mp_hud_file.seek(769) # number of bytes to line needing change
mp_hud_file.write(' r1.x -= ' + str(delta_x) + ';')
mp_hud_file.close()
with open('ShaderFixes/' + mp_pause_filename,'r+') as mp_pause_file:
mp_pause_file.seek(1108) # number of bytes to line needing change
mp_pause_file.write(' r0.x -= ' + str(delta_x) + ';')
mp_pause_file.close()
with open('ShaderFixes/' + mp_map_filename,'r+') as mp_map_file:
mp_map_file.seek(1108) # number of bytes to line needing change
mp_map_file.write(' r0.x -= ' + str(delta_x) + ';')
mp_map_file.close()
# Modifying subtitles fix for resolution width.
delta_o = 1 - ((16/9) * (res_h/res_w))
delta_o = round(delta_o, 4)
with open('ShaderFixes/' + subtitles_filename,'r+') as subtitles_file:
subtitles_file.seek(1368) # number of bytes to line needing change
subtitles_file.write(' o0.x+=' + str(delta_o) + ';')
subtitles_file.close()
# Disable shader hunting and enable Mods folder in config file.
print('Modifying d3dx.ini...')
with open('d3dx.ini','r+') as ini:
ini_data = ini.read()
ini.close()
ini_data = ini_data.replace(';include_recursive = Mods','include_recursive = Mods')
ini_data = ini_data.replace('hunting=1','hunting=0')
with open('d3dx.ini','w') as ini:
ini.write(ini_data);
ini.close()
wait = input('Script complete. Press any key to close.')
| [
"os.path.exists",
"os.listdir",
"zipfile.ZipFile",
"shutil.copy2",
"binascii.a2b_hex",
"os.path.isfile",
"shutil.copytree",
"os.path.isdir",
"os.mkdir",
"sys.exit",
"glob.glob"
] | [((2031, 2051), 'glob.glob', 'glob.glob', (['tdm_regex'], {}), '(tdm_regex)\n', (2040, 2051), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((2268, 2297), 'zipfile.ZipFile', 'zipfile.ZipFile', (['tdm_zip', '"""r"""'], {}), "(tdm_zip, 'r')\n", (2283, 2297), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((2437, 2465), 'os.listdir', 'os.listdir', (["(tdm_dir + '/x64')"], {}), "(tdm_dir + '/x64')\n", (2447, 2465), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((117, 147), 'os.path.isfile', 'os.path.isfile', (['"""Ace7Game.exe"""'], {}), "('Ace7Game.exe')\n", (131, 147), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((238, 249), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (246, 249), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((1489, 1524), 'os.path.isfile', 'os.path.isfile', (['"""Ace7Game.exe_orig"""'], {}), "('Ace7Game.exe_orig')\n", (1503, 1524), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((1530, 1579), 'shutil.copy2', 'shutil.copy2', (['"""Ace7Game.exe"""', '"""Ace7Game.exe_orig"""'], {}), "('Ace7Game.exe', 'Ace7Game.exe_orig')\n", (1542, 1579), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((2126, 2137), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2134, 2137), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((2742, 2763), 'os.path.isdir', 'os.path.isdir', (['"""Mods"""'], {}), "('Mods')\n", (2755, 2763), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((2769, 2785), 'os.mkdir', 'os.mkdir', (['"""Mods"""'], {}), "('Mods')\n", (2777, 2785), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((727, 738), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (735, 738), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((1769, 1791), 'binascii.a2b_hex', 'binascii.a2b_hex', (['"""00"""'], {}), "('00')\n", (1785, 1791), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((1868, 1893), 'binascii.a2b_hex', 'binascii.a2b_hex', (['fov_hex'], {}), '(fov_hex)\n', (1884, 1893), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((2536, 2556), 'os.path.exists', 'os.path.exists', (['item'], {}), '(item)\n', (2550, 2556), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((2570, 2601), 'shutil.copytree', 'shutil.copytree', (['tdm_item', 'item'], {}), '(tdm_item, item)\n', (2585, 2601), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((2629, 2649), 'os.path.exists', 'os.path.exists', (['item'], {}), '(item)\n', (2643, 2649), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((2663, 2691), 'shutil.copy2', 'shutil.copy2', (['tdm_item', 'item'], {}), '(tdm_item, item)\n', (2675, 2691), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n'), ((1409, 1420), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1417, 1420), False, 'import sys, os, shutil, binascii, urllib.request, zipfile, ctypes, math, glob\n')] |
# Tkinter is Python's de-facto standard GUI (Graphical User Interface) package.
import tkinter as tk
import keras as kr
import numpy as np
import matplotlib.pyplot as plt
import math
import sklearn.preprocessing as pre
import gzip
import PIL
from PIL import Image, ImageDraw
import os.path
width = 280
height = 280
center = height//2
white = (255, 255, 255)
black = (0,0,0)
def testImage(img):
global result, model
img = np.array(list(img)).reshape(1,784)
result.config(text='You Wrote the Number '+str(model.predict_classes(img)))
def nueralNet():
# global variables - in place of static variables
global model
# Read in images for training
with gzip.open('data/train-images-idx3-ubyte.gz', 'rb') as f:
train_img = f.read()
# read in labels for training
with gzip.open('data/train-labels-idx1-ubyte.gz', 'rb') as f:
train_lbl = f.read()
with gzip.open('data/t10k-images-idx3-ubyte.gz', 'rb') as f:
test_img = f.read()
with gzip.open('data/t10k-labels-idx1-ubyte.gz', 'rb') as f:
test_lbl = f.read()
# Add a hidden layer with 1000 neurons and an input layer with 784.
model.add(kr.layers.Dense(512, input_dim=784, activation="relu", kernel_initializer="normal"))
model.add(kr.layers.Dense(10, activation="softmax", kernel_initializer="normal"))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# reshape the images and labels.
train_img = ~np.array(list(train_img[16:])).reshape(60000, 1, 784).astype(np.uint8)
train_lbl = np.array(list(train_lbl[ 8:])).astype(np.uint8)
train_img = train_img/ 255
train_lbl = kr.utils.to_categorical(train_lbl)
# reshape the image array
inputs = train_img.reshape(60000, 784)
# Binarize labels in a one-vs-all fashion
encoder = pre.LabelBinarizer()
# Trains the model for a fixed number of epochs (iterations on a dataset).
encoder.fit(train_lbl)
outputs = encoder.transform(train_lbl)
# Train the model
model.fit(inputs, outputs, epochs=150, batch_size=100)
test_img = ~np.array(list(test_img[16:])).reshape(10000, 784).astype(np.uint8) / 255.0
test_lbl = np.array(list(test_lbl[ 8:])).astype(np.uint8)
saveModel()
def clearCanvas(event):
# global variables
global image1, draw
# clears the canvas seen by the user
cv.delete("all")
# clear the pillow image that is not seen by the user
image1 = PIL.Image.new("RGB", (width, height), black)
draw = ImageDraw.Draw(image1)
def save():
global image1
# resize the image so it matches the mnist data set conditions
img = image1.resize((28, 28), Image.BICUBIC)
# save the image
img.save("data/image.png")
# read back in the image,
# I chose to do it this way in case i wanted to give it an image
# or have the user do it
img = imageprepare('data/image.png')
# attempt to load the model data
loadModel()
# test our image
testImage(img)
def paint(event):
# creates a line using mouse events
x1, y1 = (event.x - 1), (event.y - 1)
x2, y2 = (event.x + 1), (event.y + 1)
# create a dot using these positions
# pillow image - not seen by user
cv.create_oval(x1, y1, x2, y2, fill="black",width=12)
# canvas image - seen by user
draw.line([x1, y1, x2, y2],fill="white",width=12)
def imageprepare(argv):
# read in an image and greyscale
im = Image.open(argv).convert('L')
# uncomment to view the incoming image
# im.show()
# get the data from the image
tv = list(im.getdata())
# normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.
tv = [(255 - x) * 1.0 / 255.0 for x in tv]
# return the image data
return tv
def saveModel():
global model
# save the current model
kr.models.save_model(
model,
"data/model.h5py",
overwrite=True,
include_optimizer=True
)
def loadModel():
global model
# if the model file exists load it
if os.path.isfile('data/model.h5py'):
model = kr.models.load_model('data/model.h5py')
else:
# if the file doesnt exist
# start the nueral network training curremntly set to 150 epochs
nueralNet()
# Start a neural network, building it by layers.
# using sequential model
model = kr.models.Sequential()
# new pillow image that later will be saved
image1 = PIL.Image.new("RGB", (width, height), black)
draw = ImageDraw.Draw(image1)
# set the position of the windows
root = tk.Tk()
root.geometry("+{xPos}+{yPos}".format(xPos = 0, yPos = 0))
# Tkinter create a canvas to draw on
cv = tk.Canvas(root, width=width, height=height, bg='white')
# pack the gui
cv.pack()
# left click
cv.bind("<B1-Motion>", paint)
# right click
cv.bind('<Button-3>', clearCanvas)
# create text and buttons
button = tk.Button(text="Check Number",command=save)
text1 = tk.Label(text="Left Click Draw")
text2 = tk.Label(text="Right Click Clear")
result = tk.Label(text="You have not Checked a Number yet")
# pack the canvas
text1.pack()
text2.pack()
button.pack()
result.pack()
root.mainloop() | [
"sklearn.preprocessing.LabelBinarizer",
"PIL.Image.open",
"keras.models.load_model",
"gzip.open",
"PIL.Image.new",
"tkinter.Button",
"keras.models.Sequential",
"keras.utils.to_categorical",
"tkinter.Canvas",
"PIL.ImageDraw.Draw",
"tkinter.Tk",
"keras.models.save_model",
"tkinter.Label",
"k... | [((4411, 4433), 'keras.models.Sequential', 'kr.models.Sequential', ([], {}), '()\n', (4431, 4433), True, 'import keras as kr\n'), ((4488, 4532), 'PIL.Image.new', 'PIL.Image.new', (['"""RGB"""', '(width, height)', 'black'], {}), "('RGB', (width, height), black)\n", (4501, 4532), False, 'import PIL\n'), ((4540, 4562), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image1'], {}), '(image1)\n', (4554, 4562), False, 'from PIL import Image, ImageDraw\n'), ((4605, 4612), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (4610, 4612), True, 'import tkinter as tk\n'), ((4715, 4770), 'tkinter.Canvas', 'tk.Canvas', (['root'], {'width': 'width', 'height': 'height', 'bg': '"""white"""'}), "(root, width=width, height=height, bg='white')\n", (4724, 4770), True, 'import tkinter as tk\n'), ((4925, 4969), 'tkinter.Button', 'tk.Button', ([], {'text': '"""Check Number"""', 'command': 'save'}), "(text='Check Number', command=save)\n", (4934, 4969), True, 'import tkinter as tk\n'), ((4977, 5009), 'tkinter.Label', 'tk.Label', ([], {'text': '"""Left Click Draw"""'}), "(text='Left Click Draw')\n", (4985, 5009), True, 'import tkinter as tk\n'), ((5018, 5052), 'tkinter.Label', 'tk.Label', ([], {'text': '"""Right Click Clear"""'}), "(text='Right Click Clear')\n", (5026, 5052), True, 'import tkinter as tk\n'), ((5062, 5112), 'tkinter.Label', 'tk.Label', ([], {'text': '"""You have not Checked a Number yet"""'}), "(text='You have not Checked a Number yet')\n", (5070, 5112), True, 'import tkinter as tk\n'), ((1681, 1715), 'keras.utils.to_categorical', 'kr.utils.to_categorical', (['train_lbl'], {}), '(train_lbl)\n', (1704, 1715), True, 'import keras as kr\n'), ((1851, 1871), 'sklearn.preprocessing.LabelBinarizer', 'pre.LabelBinarizer', ([], {}), '()\n', (1869, 1871), True, 'import sklearn.preprocessing as pre\n'), ((2488, 2532), 'PIL.Image.new', 'PIL.Image.new', (['"""RGB"""', '(width, height)', 'black'], {}), "('RGB', (width, height), black)\n", (2501, 2532), False, 'import PIL\n'), ((2544, 2566), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image1'], {}), '(image1)\n', (2558, 2566), False, 'from PIL import Image, ImageDraw\n'), ((3885, 3975), 'keras.models.save_model', 'kr.models.save_model', (['model', '"""data/model.h5py"""'], {'overwrite': '(True)', 'include_optimizer': '(True)'}), "(model, 'data/model.h5py', overwrite=True,\n include_optimizer=True)\n", (3905, 3975), True, 'import keras as kr\n'), ((688, 738), 'gzip.open', 'gzip.open', (['"""data/train-images-idx3-ubyte.gz"""', '"""rb"""'], {}), "('data/train-images-idx3-ubyte.gz', 'rb')\n", (697, 738), False, 'import gzip\n'), ((818, 868), 'gzip.open', 'gzip.open', (['"""data/train-labels-idx1-ubyte.gz"""', '"""rb"""'], {}), "('data/train-labels-idx1-ubyte.gz', 'rb')\n", (827, 868), False, 'import gzip\n'), ((914, 963), 'gzip.open', 'gzip.open', (['"""data/t10k-images-idx3-ubyte.gz"""', '"""rb"""'], {}), "('data/t10k-images-idx3-ubyte.gz', 'rb')\n", (923, 963), False, 'import gzip\n'), ((1008, 1057), 'gzip.open', 'gzip.open', (['"""data/t10k-labels-idx1-ubyte.gz"""', '"""rb"""'], {}), "('data/t10k-labels-idx1-ubyte.gz', 'rb')\n", (1017, 1057), False, 'import gzip\n'), ((1179, 1267), 'keras.layers.Dense', 'kr.layers.Dense', (['(512)'], {'input_dim': '(784)', 'activation': '"""relu"""', 'kernel_initializer': '"""normal"""'}), "(512, input_dim=784, activation='relu', kernel_initializer=\n 'normal')\n", (1194, 1267), True, 'import keras as kr\n'), ((1279, 1349), 'keras.layers.Dense', 'kr.layers.Dense', (['(10)'], {'activation': '"""softmax"""', 'kernel_initializer': '"""normal"""'}), "(10, activation='softmax', kernel_initializer='normal')\n", (1294, 1349), True, 'import keras as kr\n'), ((4149, 4188), 'keras.models.load_model', 'kr.models.load_model', (['"""data/model.h5py"""'], {}), "('data/model.h5py')\n", (4169, 4188), True, 'import keras as kr\n'), ((3492, 3508), 'PIL.Image.open', 'Image.open', (['argv'], {}), '(argv)\n', (3502, 3508), False, 'from PIL import Image, ImageDraw\n')] |
# -*- coding: utf-8 -*-
"""
Summarise Sound Scattering Layers (SSLs)
@author: <NAME>
"""
## import packages
import matplotlib.pyplot as plt
import gzip
import pickle
import numpy as np
from pyechoplot.plotting import plot_pseudo_SSL, save_png_plot, plot_Sv
## import pyechometrics modules
from pyechometrics.metrics import stats, dims, nasc
## get Sv data and mask
def get_obj(filepath):
f = gzip.open(filepath,'rb')
obj = pickle.load(f,encoding = 'bytes')
f.close()
return obj
## noise_level
noise_level = -999
## read Sv
Sv18 = get_obj('./data/PS_Sv18.pklz')
## get SSL mask - see 'ident_SSLs' example in pyechomask
Sv18mask = get_obj('./data/SSL_flag_mask_18.pklz')
## plot
plt.figure(1)
plt.subplot(211)
plot_Sv(Sv18)
plt.subplot(212)
plot_Sv(Sv18,mask = Sv18mask)
plt.title('SSL identification - 18 kHz echosounder data')
plt.show()
## sample interval in meters for this echogram
sample_int = 0.2 ## in meters
## calculate NASC (include all SSLs)
NASC = nasc(Sv18, sample_int, mask = Sv18mask)
## plot NASC by ping
plt.plot(NASC)
plt.xlabel('ping')
plt.ylabel(r'NASC $m^2nmi^{-2}$')
plt.title('NASC values for SSLs')
plt.show()
## save plot
#save_png_plot('./','NASCexampleWiki')
## make binary mask for a single sound scattering layer (SSL) (Sv18mask == 2)
SSLmask = np.zeros(Sv18mask.shape)
SSLmask[Sv18mask == 2] = 1
## get SSL stats and dimensions
SSL_mean, SSL_median, SSL_std, n = stats(Sv18, mask = SSLmask)
mean_row, mean_height, mean_col, mean_length = dims(Sv18, mask = SSLmask)
## change row to depth
mean_depth = mean_row * sample_int
mean_height = mean_height * sample_int
## plot a pseudo SSL using metrics
## *assume single normal distribution
plot_pseudo_SSL(SSL_mean,SSL_std,mean_height,mean_depth)
plt.ylabel('depth (m)')
plt.xlabel('pings')
plt.title('pseudo DSL produced using summary metrics',fontsize = 16)
plt.show()
## save plot
#save_png_plot('./','exampleWiki')
| [
"matplotlib.pyplot.ylabel",
"gzip.open",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pyechometrics.metrics.stats",
"pickle.load",
"pyechometrics.metrics.nasc",
"pyechometrics.metrics.dims",
"pyechoplot.plotting.plot_Sv",
"matplotlib.pyplot.figure",
"numpy.zeros",
"pyechoplot.plottin... | [((703, 716), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (713, 716), True, 'import matplotlib.pyplot as plt\n'), ((717, 733), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (728, 733), True, 'import matplotlib.pyplot as plt\n'), ((734, 747), 'pyechoplot.plotting.plot_Sv', 'plot_Sv', (['Sv18'], {}), '(Sv18)\n', (741, 747), False, 'from pyechoplot.plotting import plot_pseudo_SSL, save_png_plot, plot_Sv\n'), ((748, 764), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (759, 764), True, 'import matplotlib.pyplot as plt\n'), ((765, 793), 'pyechoplot.plotting.plot_Sv', 'plot_Sv', (['Sv18'], {'mask': 'Sv18mask'}), '(Sv18, mask=Sv18mask)\n', (772, 793), False, 'from pyechoplot.plotting import plot_pseudo_SSL, save_png_plot, plot_Sv\n'), ((795, 852), 'matplotlib.pyplot.title', 'plt.title', (['"""SSL identification - 18 kHz echosounder data"""'], {}), "('SSL identification - 18 kHz echosounder data')\n", (804, 852), True, 'import matplotlib.pyplot as plt\n'), ((853, 863), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (861, 863), True, 'import matplotlib.pyplot as plt\n'), ((988, 1025), 'pyechometrics.metrics.nasc', 'nasc', (['Sv18', 'sample_int'], {'mask': 'Sv18mask'}), '(Sv18, sample_int, mask=Sv18mask)\n', (992, 1025), False, 'from pyechometrics.metrics import stats, dims, nasc\n'), ((1050, 1064), 'matplotlib.pyplot.plot', 'plt.plot', (['NASC'], {}), '(NASC)\n', (1058, 1064), True, 'import matplotlib.pyplot as plt\n'), ((1065, 1083), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ping"""'], {}), "('ping')\n", (1075, 1083), True, 'import matplotlib.pyplot as plt\n'), ((1084, 1116), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""NASC $m^2nmi^{-2}$"""'], {}), "('NASC $m^2nmi^{-2}$')\n", (1094, 1116), True, 'import matplotlib.pyplot as plt\n'), ((1118, 1151), 'matplotlib.pyplot.title', 'plt.title', (['"""NASC values for SSLs"""'], {}), "('NASC values for SSLs')\n", (1127, 1151), True, 'import matplotlib.pyplot as plt\n'), ((1152, 1162), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1160, 1162), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1347), 'numpy.zeros', 'np.zeros', (['Sv18mask.shape'], {}), '(Sv18mask.shape)\n', (1331, 1347), True, 'import numpy as np\n'), ((1455, 1480), 'pyechometrics.metrics.stats', 'stats', (['Sv18'], {'mask': 'SSLmask'}), '(Sv18, mask=SSLmask)\n', (1460, 1480), False, 'from pyechometrics.metrics import stats, dims, nasc\n'), ((1530, 1554), 'pyechometrics.metrics.dims', 'dims', (['Sv18'], {'mask': 'SSLmask'}), '(Sv18, mask=SSLmask)\n', (1534, 1554), False, 'from pyechometrics.metrics import stats, dims, nasc\n'), ((1730, 1789), 'pyechoplot.plotting.plot_pseudo_SSL', 'plot_pseudo_SSL', (['SSL_mean', 'SSL_std', 'mean_height', 'mean_depth'], {}), '(SSL_mean, SSL_std, mean_height, mean_depth)\n', (1745, 1789), False, 'from pyechoplot.plotting import plot_pseudo_SSL, save_png_plot, plot_Sv\n'), ((1787, 1810), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""depth (m)"""'], {}), "('depth (m)')\n", (1797, 1810), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1832), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""pings"""'], {}), "('pings')\n", (1823, 1832), True, 'import matplotlib.pyplot as plt\n'), ((1833, 1900), 'matplotlib.pyplot.title', 'plt.title', (['"""pseudo DSL produced using summary metrics"""'], {'fontsize': '(16)'}), "('pseudo DSL produced using summary metrics', fontsize=16)\n", (1842, 1900), True, 'import matplotlib.pyplot as plt\n'), ((1902, 1912), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1910, 1912), True, 'import matplotlib.pyplot as plt\n'), ((402, 427), 'gzip.open', 'gzip.open', (['filepath', '"""rb"""'], {}), "(filepath, 'rb')\n", (411, 427), False, 'import gzip\n'), ((437, 469), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (448, 469), False, 'import pickle\n')] |
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: <NAME>
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020 <NAME>
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import chb.util.graphutil as UG
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple, TYPE_CHECKING
from chb.util.DotGraph import DotGraph
if TYPE_CHECKING:
import chb.app.CfgBlock
import chb.app.Function
import chb.app.Instruction
class DotCfg:
def __init__(
self,
graphname: str,
fn: "chb.app.Function.Function",
looplevelcolors: List[str] = [], # [ color numbers ]
showpredicates: bool = False, # show branch predicates on edges
showcalls: bool = False, # show call instrs on nodes
showinstr_opcodes: bool = False, # show all instrs on nodes
showinstr_text: bool = False, # show all instr annotations on nodes
showstores: bool = False, # show all STR and STRB and STRH instr annotations
mips: bool = False, # for mips subtract 4 from block end addr
sink: str = None, # restrict paths to basic block destination
segments: List[str] = [], # restrict paths to include these basic blocks
# replacement text for node and edge labels
replacements: Dict[str, str] = {}) -> None:
self.fn = fn
self.graphname = graphname
self.looplevelcolors = looplevelcolors
self.showpredicates = showpredicates
self.showcalls = showcalls
self.showinstr_opcodes = showinstr_opcodes
self.showinstr_text = showinstr_text
self.showstores = showstores
self.mips = mips
self.sink = sink
self.segments = segments
self.replacements = replacements
self.pathnodes: Set[str] = set([])
self.dotgraph = DotGraph(graphname)
def build(self) -> DotGraph:
if self.sink is not None:
self.restrict_nodes(self.sink)
elif len(self.segments) > 0:
self.restrict_paths(self.segments)
else:
self.pathnodes = set(self.fn.cfg.blocks.keys())
for n in self.fn.cfg.blocks:
self.add_cfg_node(n)
for e in self.fn.cfg.edges:
self.add_cfg_edge(e)
return self.dotgraph
def restrict_nodes(self, sink: str) -> None:
nodes = self.fn.cfg.blocks
edges = self.fn.cfg.edges # adjacency list n -> [ n ]
if sink not in nodes:
print('Sink ' + sink + ' not found in nodes')
self.pathnodes = set(nodes.keys())
return
g = UG.DirectedGraph(list(nodes.keys()), edges)
g.find_paths(self.fn.faddr, sink)
for p in g.paths:
print('Path: ' + str(p))
self.pathnodes = self.pathnodes.union(p)
if len(self.pathnodes) == 0:
self.pathnodes = set(nodes.keys())
def restrict_paths(self, segments: List[str]) -> None:
nodes = self.fn.cfg.blocks
edges = self.fn.cfg.edges
for b in segments:
if b not in list(nodes.keys()):
print('Segment ' + b + ' not found in nodes')
self.pathnodes = set(nodes.keys())
return
segments = [self.fn.faddr] + segments
g = UG.DirectedGraph(list(nodes.keys()), edges)
for i in range(len(segments) - 1):
src = segments[i]
dst = segments[i+1]
g.find_paths(src, dst)
for p in g.paths:
print('Path: ' + str(p))
self.pathnodes = self.pathnodes.union(p)
if len(self.pathnodes) == 0:
self.pathnodes = set(nodes.keys())
def get_branch_instruction(
self,
edge: str) -> "chb.app.Instruction.Instruction":
srcblock = self.fn.cfg.blocks[edge]
instraddr = srcblock.lastaddr
if instraddr.startswith('B'):
ctxtaddr = instraddr[2:].split('_')
iaddr_i = int(ctxtaddr[1], 16)
if self.mips:
iaddr_i -= 4 # delay slot
instraddr = 'B:' + ctxtaddr[0] + '_' + hex(iaddr_i)
else:
instraddr_i = int(instraddr, 16)
if self.mips:
instraddr_i -= 4 # take into account delay slot
instraddr = hex(instraddr_i)
return self.fn.instruction(instraddr)
def to_json(self) -> Dict[str, Any]:
d: Dict[str, Any] = {}
d['nodes'] = []
d['edges'] = {}
for n in self.fn.cfg.blocks:
d['nodes'].append(str(n))
for e in self.fn.cfg.edges:
d['edges'][str(e)] = {}
def default() -> None:
for tgt in self.fn.cfg.edges[e]:
d['edges'][str(e)][str(tgt)] = 'none'
if len(self.fn.cfg.edges[e]) > 1:
branchinstr = self.get_branch_instruction(e)
if branchinstr.is_branch_instruction:
ftconditions = branchinstr.ft_conditions
if len(ftconditions) > 1:
for i, tgt in enumerate(self.fn.cfg.edges[e]):
d['edges'][str(e)][str(tgt)] = ftconditions[i]
else:
default()
else:
default()
else:
default()
return d
def replace_text(self, txt: str) -> str:
result = txt
for src in sorted(self.replacements, key=lambda x: len(x), reverse=True):
result = result.replace(src, self.replacements[src])
return result
def add_cfg_node(self, n: str) -> None:
if n not in self.pathnodes:
return
basicblock = self.fn.block(str(n))
blocktxt = str(n)
color = 'lightblue'
if self.showinstr_opcodes:
instrs = basicblock.instructions.values()
pinstrs = [i.opcodetext for i in instrs]
blocktxt = (
blocktxt
+ "\\n"
+ "\\n".join(pinstrs))
elif self.showinstr_text:
instrs = basicblock.instructions.values()
pinstrs = [i.annotation for i in instrs]
blocktxt = (
blocktxt
+ "\\n"
+ "\\n".join(pinstrs))
elif self.showcalls or self.showstores:
if self.showcalls:
callinstrs = basicblock.call_instructions
pcallinstrs = [i.annotation for i in callinstrs]
print(' \n'.join([str(a) for a in pcallinstrs]))
if len(callinstrs) > 0:
blocktxt = (
blocktxt
+ '\\n'
+ '\\n'.join(pcallinstrs))
if self.showstores:
storeinstrs = basicblock.store_instructions
pstoreinstrs = [i.annotation for i in storeinstrs]
print(' \n'.join([str(a) for a in pstoreinstrs]))
if len(storeinstrs) > 0:
blocktxt = (
blocktxt
+ "\\n"
+ "\\n".join(pstoreinstrs))
if len(self.looplevelcolors) > 0:
looplevels = self.fn.cfg.loop_levels(n)
if len(looplevels) > 0:
level = len(looplevels)
if level > len(self.looplevelcolors):
color = self.looplevelcolors[-1]
else:
color = self.looplevelcolors[level-1]
# if n == self.fn.faddr:
# color = 'purple'
blocktxt = self.replace_text(blocktxt)
self.dotgraph.add_node(str(n), labeltxt=str(blocktxt), color=color)
def add_cfg_edge(self, e: str) -> None:
if e not in self.pathnodes:
return
def default() -> None:
for tgt in self.fn.cfg.edges[e]:
if tgt in self.pathnodes:
self.dotgraph.add_edge(str(e), str(tgt), labeltxt=None)
labeltxt: Optional[str] = None
if len(self.fn.cfg.edges[e]) > 1:
if self.showpredicates:
branchinstr = self.get_branch_instruction(e)
if branchinstr and branchinstr.is_branch_instruction:
ftconditions = branchinstr.ft_conditions
if len(ftconditions) == 2:
for i, tgt in enumerate(self.fn.cfg.edges[e]):
if tgt in self.pathnodes:
labeltxt = str(ftconditions[i])
labeltxt = self.replace_text(labeltxt)
self.dotgraph.add_edge(
str(e), str(tgt), labeltxt=labeltxt)
else:
default()
else:
default()
else:
default()
else:
default()
| [
"chb.util.DotGraph.DotGraph"
] | [((3205, 3224), 'chb.util.DotGraph.DotGraph', 'DotGraph', (['graphname'], {}), '(graphname)\n', (3213, 3224), False, 'from chb.util.DotGraph import DotGraph\n')] |
from locust import (
constant,
)
from locust.env import Environment, LoadTestShape
from locust.user import (
User,
task,
)
from locust.user.task import TaskSet
from .testcases import LocustTestCase
from .fake_module1_for_env_test import MyUserWithSameName as MyUserWithSameName1
from .fake_module2_for_env_test import MyUserWithSameName as MyUserWithSameName2
class TestEnvironment(LocustTestCase):
def test_user_classes_count(self):
class MyUser1(User):
wait_time = constant(0)
@task
def my_task(self):
pass
class MyUser2(User):
wait_time = constant(0)
@task
def my_task(self):
pass
environment = Environment(user_classes=[MyUser1, MyUser2])
self.assertDictEqual({"MyUser1": MyUser1, "MyUser2": MyUser2}, environment.user_classes_by_name)
def test_user_classes_with_same_name_is_error(self):
with self.assertRaises(ValueError) as e:
environment = Environment(user_classes=[MyUserWithSameName1, MyUserWithSameName2])
self.assertEqual(
e.exception.args[0],
"The following user classes have the same class name: locust.test.fake_module1_for_env_test.MyUserWithSameName, locust.test.fake_module2_for_env_test.MyUserWithSameName",
)
def test_assign_equal_weights(self):
def verify_tasks(u, target_tasks):
self.assertEqual(len(u.tasks), len(target_tasks))
tasks = [t.__name__ for t in u.tasks]
self.assertEqual(len(tasks), len(set(tasks)))
self.assertEqual(set(tasks), set(target_tasks))
# Base case
class MyUser1(User):
wait_time = constant(0)
@task(4)
def my_task(self):
pass
@task(1)
def my_task_2(self):
pass
environment = Environment(user_classes=[MyUser1])
environment.assign_equal_weights()
u = environment.user_classes[0]
verify_tasks(u, ["my_task", "my_task_2"])
# Testing nested task sets
class MyUser2(User):
@task
class TopLevelTaskSet(TaskSet):
@task
class IndexTaskSet(TaskSet):
@task(10)
def index(self):
self.client.get("/")
@task
def stop(self):
self.client.get("/hi")
@task(2)
def stats(self):
self.client.get("/stats/requests")
environment = Environment(user_classes=[MyUser2])
environment.assign_equal_weights()
u = environment.user_classes[0]
verify_tasks(u, ["index", "stop", "stats"])
# Testing task assignment via instance variable
def outside_task():
pass
def outside_task_2():
pass
class SingleTaskSet(TaskSet):
tasks = [outside_task, outside_task, outside_task_2]
class MyUser3(User):
tasks = [SingleTaskSet, outside_task]
environment = Environment(user_classes=[MyUser3])
environment.assign_equal_weights()
u = environment.user_classes[0]
verify_tasks(u, ["outside_task", "outside_task_2"])
# Testing task assignment via dict
class DictTaskSet(TaskSet):
def dict_task_1():
pass
def dict_task_2():
pass
def dict_task_3():
pass
tasks = {
dict_task_1: 5,
dict_task_2: 3,
dict_task_3: 1,
}
class MyUser4(User):
tasks = [DictTaskSet, SingleTaskSet, SingleTaskSet]
# Assign user tasks in dict
environment = Environment(user_classes=[MyUser4])
environment.assign_equal_weights()
u = environment.user_classes[0]
verify_tasks(u, ["outside_task", "outside_task_2", "dict_task_1", "dict_task_2", "dict_task_3"])
class MyUser5(User):
tasks = {
DictTaskSet: 5,
SingleTaskSet: 3,
outside_task: 6,
}
environment = Environment(user_classes=[MyUser5])
environment.assign_equal_weights()
u = environment.user_classes[0]
verify_tasks(u, ["outside_task", "outside_task_2", "dict_task_1", "dict_task_2", "dict_task_3"])
def test_user_classes_with_zero_weight_are_removed(self):
class MyUser1(User):
wait_time = constant(0)
weight = 0
@task
def my_task(self):
pass
class MyUser2(User):
wait_time = constant(0)
weight = 1
@task
def my_task(self):
pass
environment = Environment(user_classes=[MyUser1, MyUser2])
self.assertEqual(len(environment.user_classes), 1)
self.assertIs(environment.user_classes[0], MyUser2)
def test_all_user_classes_with_zero_weight_raises_exception(self):
class MyUser1(User):
wait_time = constant(0)
weight = 0
@task
def my_task(self):
pass
class MyUser2(User):
wait_time = constant(0)
weight = 0
@task
def my_task(self):
pass
with self.assertRaises(ValueError) as e:
environment = Environment(user_classes=[MyUser1, MyUser2])
self.assertEqual(
e.exception.args[0],
"There are no users with weight > 0.",
)
def test_shape_class_attribute(self):
class SubLoadTestShape(LoadTestShape):
"""Inherited from locust.env.LoadTestShape"""
with self.assertRaisesRegex(
ValueError, r"instance of LoadTestShape or subclass LoadTestShape", msg="exception message is mismatching"
):
Environment(user_classes=[MyUserWithSameName1], shape_class=SubLoadTestShape)
| [
"locust.constant",
"locust.env.Environment",
"locust.user.task"
] | [((748, 792), 'locust.env.Environment', 'Environment', ([], {'user_classes': '[MyUser1, MyUser2]'}), '(user_classes=[MyUser1, MyUser2])\n', (759, 792), False, 'from locust.env import Environment, LoadTestShape\n'), ((1928, 1963), 'locust.env.Environment', 'Environment', ([], {'user_classes': '[MyUser1]'}), '(user_classes=[MyUser1])\n', (1939, 1963), False, 'from locust.env import Environment, LoadTestShape\n'), ((2650, 2685), 'locust.env.Environment', 'Environment', ([], {'user_classes': '[MyUser2]'}), '(user_classes=[MyUser2])\n', (2661, 2685), False, 'from locust.env import Environment, LoadTestShape\n'), ((3178, 3213), 'locust.env.Environment', 'Environment', ([], {'user_classes': '[MyUser3]'}), '(user_classes=[MyUser3])\n', (3189, 3213), False, 'from locust.env import Environment, LoadTestShape\n'), ((3881, 3916), 'locust.env.Environment', 'Environment', ([], {'user_classes': '[MyUser4]'}), '(user_classes=[MyUser4])\n', (3892, 3916), False, 'from locust.env import Environment, LoadTestShape\n'), ((4293, 4328), 'locust.env.Environment', 'Environment', ([], {'user_classes': '[MyUser5]'}), '(user_classes=[MyUser5])\n', (4304, 4328), False, 'from locust.env import Environment, LoadTestShape\n'), ((4922, 4966), 'locust.env.Environment', 'Environment', ([], {'user_classes': '[MyUser1, MyUser2]'}), '(user_classes=[MyUser1, MyUser2])\n', (4933, 4966), False, 'from locust.env import Environment, LoadTestShape\n'), ((505, 516), 'locust.constant', 'constant', (['(0)'], {}), '(0)\n', (513, 516), False, 'from locust import constant\n'), ((642, 653), 'locust.constant', 'constant', (['(0)'], {}), '(0)\n', (650, 653), False, 'from locust import constant\n'), ((1032, 1100), 'locust.env.Environment', 'Environment', ([], {'user_classes': '[MyUserWithSameName1, MyUserWithSameName2]'}), '(user_classes=[MyUserWithSameName1, MyUserWithSameName2])\n', (1043, 1100), False, 'from locust.env import Environment, LoadTestShape\n'), ((1743, 1754), 'locust.constant', 'constant', (['(0)'], {}), '(0)\n', (1751, 1754), False, 'from locust import constant\n'), ((1769, 1776), 'locust.user.task', 'task', (['(4)'], {}), '(4)\n', (1773, 1776), False, 'from locust.user import User, task\n'), ((1843, 1850), 'locust.user.task', 'task', (['(1)'], {}), '(1)\n', (1847, 1850), False, 'from locust.user import User, task\n'), ((4633, 4644), 'locust.constant', 'constant', (['(0)'], {}), '(0)\n', (4641, 4644), False, 'from locust import constant\n'), ((4793, 4804), 'locust.constant', 'constant', (['(0)'], {}), '(0)\n', (4801, 4804), False, 'from locust import constant\n'), ((5212, 5223), 'locust.constant', 'constant', (['(0)'], {}), '(0)\n', (5220, 5223), False, 'from locust import constant\n'), ((5372, 5383), 'locust.constant', 'constant', (['(0)'], {}), '(0)\n', (5380, 5383), False, 'from locust import constant\n'), ((5554, 5598), 'locust.env.Environment', 'Environment', ([], {'user_classes': '[MyUser1, MyUser2]'}), '(user_classes=[MyUser1, MyUser2])\n', (5565, 5598), False, 'from locust.env import Environment, LoadTestShape\n'), ((6048, 6125), 'locust.env.Environment', 'Environment', ([], {'user_classes': '[MyUserWithSameName1]', 'shape_class': 'SubLoadTestShape'}), '(user_classes=[MyUserWithSameName1], shape_class=SubLoadTestShape)\n', (6059, 6125), False, 'from locust.env import Environment, LoadTestShape\n'), ((2531, 2538), 'locust.user.task', 'task', (['(2)'], {}), '(2)\n', (2535, 2538), False, 'from locust.user import User, task\n'), ((2312, 2320), 'locust.user.task', 'task', (['(10)'], {}), '(10)\n', (2316, 2320), False, 'from locust.user import User, task\n')] |
import csv
from io import BytesIO
import pandas as pd
from urllib.request import urlopen
from zipfile import ZipFile
def fetch_geds(url, subset=None):
'''
Fetches the geds dataset from Canada's Open Data Portal
Args:
url:
A string containing the url to the Canada Open Data Portal web page
that downloads a zipped csv containing the geds dataset.
subset:
A string containing the acronym found in the "Department Acronym"
field in the geds dataframe (e.g. "ESDC-EDSC") - used to build the
org chart tool for only a subset of geds.
Returns:
df:
A pandas dataframe containing the original contents of the zipped
csv file.
'''
# Fetch the response from the geds url
resp = urlopen(url)
# Extract the file from the bytes object returned by urlopen
zipped_file = ZipFile(BytesIO(resp.read()))
# Extract the csv contents line-by-line
lines = []
# Note that zipped_file.namelist() returns ['gedsOpenData.csv'], so
# zipped_file.namelist()[0] returns the file name
for idx, line in enumerate(zipped_file.open(zipped_file.namelist()[0]).readlines()):
# Need to use the csv module to read the string returned by line.decode()
# Reason is csv module contains the logic to parse commas that are
# contained within double quotes.
decoded = [str(line.decode('ISO-8859-1'))]
line = [item for item in csv.reader(decoded)][0]
# There are a few observations (~90) that are not parsed correctly - this
# needs to be investigated further.
if len(line) == 44:
lines.append(line)
# Convert to pandas dataframe
df = pd.DataFrame(lines[1:], columns=lines[0])
# Select a subset of the dataframe (if any)
if subset is not None:
df = df[df["Department Acronym"] == subset]
return df | [
"pandas.DataFrame",
"csv.reader",
"urllib.request.urlopen"
] | [((813, 825), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (820, 825), False, 'from urllib.request import urlopen\n'), ((1748, 1789), 'pandas.DataFrame', 'pd.DataFrame', (['lines[1:]'], {'columns': 'lines[0]'}), '(lines[1:], columns=lines[0])\n', (1760, 1789), True, 'import pandas as pd\n'), ((1496, 1515), 'csv.reader', 'csv.reader', (['decoded'], {}), '(decoded)\n', (1506, 1515), False, 'import csv\n')] |
import os
from tqdm import tqdm
from joblib import Parallel, delayed
try:
import seaborn as sns
except:
pass
import numpy as np
import cv2
from lost_ds.util import get_fs
from lost_ds.geometry.lost_geom import LOSTGeometries
from lost_ds.functional.api import remove_empty
def get_fontscale(fontscale, thickness, img_h, text_max_h_frac=0.04):
if isinstance(fontscale, (int, float)):
return fontscale
elif fontscale=='auto':
text_h = int(text_max_h_frac * img_h)
fontscale = cv2.getFontScaleFromHeight(cv2.FONT_HERSHEY_SIMPLEX,
max(text_h, 10),
thickness)
return fontscale
def get_thickness(line_thickness, img_h, thickness_max_h_frac=0.002):
if line_thickness == 'auto':
return int(thickness_max_h_frac * img_h)
else:
return line_thickness
def vis_sample(img, df, line_thickness=3, color=(0, 0, 255),
lbl_col='anno_lbl', lost_geometries:LOSTGeometries=None,
blow_up=None, radius=2, fontscale=2):
'''Visualize annos of an image
Args:
img (np.ndarray): image to draw on
df (pandas.DataFrame): The DataFrame that contains annoations to
visualize. If df is None a random image from df will be
sampled.
color (tuple, dict of tuple): colors (B,G,R) for all annos if tuple
or dict for labelwise mapping like {label: color}
line_thickness (int, dict of int): line thickness for annotations if int
or dict for anno-type wise mapping like {dtype: thickness}
lost_geometries (LOSTGeometries): LOSTGeometries instance to use, will
create a new one if None
blow_up (): TODO: implement
Returns:
np.array: Image painted with annotations.
'''
df = remove_empty(df, 'anno_data')
if len(df) > 0:
geom = lost_geometries
if lost_geometries is None:
geom = LOSTGeometries()
anno_data = list(df['anno_data'])
anno_conf = None
if hasattr(df, 'anno_confidence'):
anno_conf = list(df['anno_confidence'])
anno_lbl = list(df[lbl_col])
anno_dtype = list(df['anno_dtype'])
anno_style = list(df['anno_style'])
anno_format = list(df['anno_format'])
thickness = get_thickness(line_thickness, img.shape[0])
fontscale = get_fontscale(fontscale, thickness, img.shape[0])
thickness = max(1, thickness)
img = geom.draw(img, anno_data, anno_conf, anno_lbl, anno_dtype,
anno_style, anno_format, thickness, fontscale, color,
radius)
return img
def vis_and_store(df, out_dir, lbl_col='anno_lbl', color=(0, 0, 255),
line_thickness=2, fontscale=2, filesystem=None,
radius=2):
'''Visualize annotations and store them to a folder
Args:
df (pd.DataFrame): Optional dataset in lost format to visualize
out_dir (str): Directory to store the visualized annotations
color (tuple, dict of tuple): colors (B,G,R) for all annos if tuple
or dict for labelwise mapping like {label: color}
line_thickness (int, dict of int): line thickness for annotations if int
or dict for anno-type wise mapping like {dtype: thickness}
lbl_col (str): column containing the labels
radius (int): radius to draw for points/circles
filesystem (fsspec.filesystem, FileMan): filesystem to use. Use local
if not initialized
'''
fs = get_fs(filesystem)
fs.makedirs(out_dir, exist_ok=True)
def vis_img(img_path, df_vis):
geom = LOSTGeometries()
out_path = os.path.join(out_dir, os.path.basename(img_path))
if df_vis['anno_data'].notnull().any():
img = fs.read_img(img_path)
img = vis_sample(img=img, df=df_vis, line_thickness=line_thickness,
color=color, lbl_col=lbl_col, lost_geometries=geom,
radius=radius, fontscale=fontscale)
fs.write_img(img, out_path)
else:
fs.copy(img_path, out_path)
Parallel(n_jobs=-1)(delayed(vis_img)(path, df_vis)
for path, df_vis in tqdm(df.groupby('img_path'),
desc='visualize'))
# for path, df_vis in tqdm(df.groupby('img_path'), desc='visualize'):
# vis_img(path, df_vis)
def vis_semantic_segmentation(df, out_dir, n_classes, palette='dark',
seg_path_col='seg_path', filesystem=None):
"""Visualize the stored semantic segmentations by coloring it
Args:
df (pandas.DataFrame): The DataFrame that contains annoations to
visualize.
out_dir (str): path to store images
n_classes (int): number of classes occuring in pixelmaps, number of
different colors needed for visualization
palette (str): seaborn color palette i.e. 'dark', 'bright', 'pastel',...
refer https://seaborn.pydata.org/tutorial/color_palettes.html
filesystem (fsspec.filesystem, FileMan): filesystem to use. Use local
if not initialized
"""
fs = get_fs(filesystem)
fs.makedirs(out_dir, exist_ok=True)
palette = sns.color_palette(palette, n_classes)
palette = [(np.array(x)*255).astype(np.uint8) for x in palette]
segmentations = df[seg_path_col].unique()
def vis_seg(seg_path):
seg = fs.read_img(seg_path)
vis = np.zeros(seg.shape[:2] + (3,))
for i in range(n_classes):
vis = np.where(seg==i, palette[i], vis)
fs.write_img(vis, os.path.join(out_dir, seg_path.split('/')[-1]))
Parallel(n_jobs=-1)(delayed(vis_seg)(seg_path)
for seg_path in tqdm(segmentations, desc='vis sem. seg.'))
| [
"seaborn.color_palette",
"numpy.where",
"tqdm.tqdm",
"lost_ds.util.get_fs",
"joblib.Parallel",
"numpy.zeros",
"numpy.array",
"lost_ds.geometry.lost_geom.LOSTGeometries",
"os.path.basename",
"joblib.delayed",
"lost_ds.functional.api.remove_empty"
] | [((1871, 1900), 'lost_ds.functional.api.remove_empty', 'remove_empty', (['df', '"""anno_data"""'], {}), "(df, 'anno_data')\n", (1883, 1900), False, 'from lost_ds.functional.api import remove_empty\n'), ((3667, 3685), 'lost_ds.util.get_fs', 'get_fs', (['filesystem'], {}), '(filesystem)\n', (3673, 3685), False, 'from lost_ds.util import get_fs\n'), ((5382, 5400), 'lost_ds.util.get_fs', 'get_fs', (['filesystem'], {}), '(filesystem)\n', (5388, 5400), False, 'from lost_ds.util import get_fs\n'), ((5460, 5497), 'seaborn.color_palette', 'sns.color_palette', (['palette', 'n_classes'], {}), '(palette, n_classes)\n', (5477, 5497), True, 'import seaborn as sns\n'), ((3781, 3797), 'lost_ds.geometry.lost_geom.LOSTGeometries', 'LOSTGeometries', ([], {}), '()\n', (3795, 3797), False, 'from lost_ds.geometry.lost_geom import LOSTGeometries\n'), ((4294, 4313), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (4302, 4313), False, 'from joblib import Parallel, delayed\n'), ((5691, 5721), 'numpy.zeros', 'np.zeros', (['(seg.shape[:2] + (3,))'], {}), '(seg.shape[:2] + (3,))\n', (5699, 5721), True, 'import numpy as np\n'), ((5896, 5915), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (5904, 5915), False, 'from joblib import Parallel, delayed\n'), ((2007, 2023), 'lost_ds.geometry.lost_geom.LOSTGeometries', 'LOSTGeometries', ([], {}), '()\n', (2021, 2023), False, 'from lost_ds.geometry.lost_geom import LOSTGeometries\n'), ((3839, 3865), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (3855, 3865), False, 'import os\n'), ((5775, 5810), 'numpy.where', 'np.where', (['(seg == i)', 'palette[i]', 'vis'], {}), '(seg == i, palette[i], vis)\n', (5783, 5810), True, 'import numpy as np\n'), ((4314, 4330), 'joblib.delayed', 'delayed', (['vis_img'], {}), '(vis_img)\n', (4321, 4330), False, 'from joblib import Parallel, delayed\n'), ((5916, 5932), 'joblib.delayed', 'delayed', (['vis_seg'], {}), '(vis_seg)\n', (5923, 5932), False, 'from joblib import Parallel, delayed\n'), ((5972, 6013), 'tqdm.tqdm', 'tqdm', (['segmentations'], {'desc': '"""vis sem. seg."""'}), "(segmentations, desc='vis sem. seg.')\n", (5976, 6013), False, 'from tqdm import tqdm\n'), ((5514, 5525), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5522, 5525), True, 'import numpy as np\n')] |
from collections import deque
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QGraphicsRectItem
from cadnano.gui.palette import getNoPen
from cadnano.proxies.cnenum import StrandType
from .pathextras import PreXoverItem
class PreXoverManager(QGraphicsRectItem):
"""Summary
Attributes:
active_pxis (dict): Description
hovered_items (list): Description
HUE_FACTOR (float): Description
KEYMAP (TYPE): Description
neighbor_prexover_items (dict): Description
part_item (TYPE): Description
prexover_item_map (dict): Description
pxi_pool (TYPE): Description
virtual_helix_item (VirtualHelixItem): Description
"""
HUE_FACTOR = 1.6
KEYMAP = {i: getattr(Qt, 'Key_%d' % i) for i in range(10)}
def __init__(self, part_item):
"""Summary
Args:
part_item (TYPE): Description
"""
super(QGraphicsRectItem, self).__init__(part_item)
self.part_item = part_item
self.virtual_helix_item = None
self.setPen(getNoPen())
self._colors = []
# dictionary of tuple of a (PreXoverItem, List[PreXoverItem])
# for activating on hover events
self.prexover_item_map = {}
self.neighbor_prexover_items = {} # just a dictionary of neighbors
self.hovered_items = []
self._key_press_dict = {}
# for reuse of PreXoverItem objects
self.pxi_pool = deque()
self.active_pxis = {}
# end def
def __repr__(self):
return "<{}>".format(self.__class__.__name__)
### ACCESSORS ###
def window(self):
"""Summary
Returns:
TYPE: Description
"""
return self._parent.window()
def virtualHelixItem(self):
"""Summary
Returns:
TYPE: Description
"""
return self.virtual_helix_item
# end def
def addKeyPress(self, key_int, info):
"""Summary
Args:
key_int (TYPE): Description
info (TYPE): Description
Returns:
TYPE: Description
"""
qtkey = self.KEYMAP[key_int]
self._key_press_dict[qtkey] = info
### EVENT HANDLERS ###
### PRIVATE SUPPORT METHODS ###
def updateBasesPerRepeat(self, step_size):
"""Recreates colors, all vhi
Args:
step_size (TYPE): Description
"""
hue_scale = step_size*self.HUE_FACTOR
self._colors = [QColor.fromHsvF(i / hue_scale, 0.75, 0.8).name()
for i in range(int(step_size))]
# self.removeRepeats()
# self.addRepeats()
# end def
def handlePreXoverKeyPress(self, key):
"""Summary
Args:
key (TYPE): Description
Returns:
TYPE: Description
"""
# print("handling key", key, self.KEYMAP.get(key, None))
if key not in self._key_press_dict:
return
# active item
part = self.part_item.part()
active_id_num, a_is_fwd, a_idx, a_to_id = part.active_base_info
a_strand_type = StrandType.FWD if a_is_fwd else StrandType.REV
neighbor_id_num, n_is_fwd, n_idx, n_to_id = self._key_press_dict[key]
n_strand_type = StrandType.FWD if n_is_fwd else StrandType.REV
if not part.hasStrandAtIdx(active_id_num, a_idx)[a_strand_type]:
print("no active strand", key)
return
if not part.hasStrandAtIdx(neighbor_id_num, n_idx)[n_strand_type]:
print("no neighbor strand", key)
return
a_strandset = part.getStrandSets(active_id_num)[a_strand_type]
n_strandset = part.getStrandSets(neighbor_id_num)[n_strand_type]
a_strand = a_strandset.getStrand(a_idx)
n_strand = n_strandset.getStrand(n_idx)
if a_strand.hasXoverAt(a_idx):
return
if n_strand.hasXoverAt(n_idx):
return
# SPECIAL CASE: neighbor already has a 3' end, and active has
# a 5' end, so assume the user wants to install a returning xover
if a_strand.idx5Prime() == a_idx and n_strand.idx3Prime() == n_idx:
part.createXover(n_strand, n_idx, a_strand, a_idx)
return
# DEFAULT CASE: the active strand acts as strand5p,
# install a crossover to the neighbor acting as strand3p
if a_strand_type == n_strand_type:
if a_is_fwd:
if part.isAGreaterThanB_Z(active_id_num, a_idx,
neighbor_id_num, n_idx):
part.createXover(n_strand, n_idx, a_strand, a_idx)
else:
part.createXover(a_strand, a_idx, n_strand, n_idx)
else:
if part.isAGreaterThanB_Z(active_id_num, a_idx,
neighbor_id_num, n_idx):
part.createXover(a_strand, a_idx, n_strand, n_idx)
else:
part.createXover(n_strand, n_idx, a_strand, a_idx)
else:
part.createXover(a_strand, a_idx, n_strand, n_idx)
# end def
def updateTurnsPerRepeat(self):
"""Summary
Returns:
TYPE: Description
"""
# end def
def part(self):
"""Summary
Returns:
TYPE: Description
"""
return self.parentItem().part()
### PUBLIC SUPPORT METHODS ###
def getItem(self, id_num, is_fwd, idx):
"""Summary
Args:
id_num (int): VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
is_fwd (TYPE): Description
idx (int): the base index within the virtual helix
Returns:
TYPE: Description
"""
return self.prexover_item_map[(id_num, is_fwd, idx)]
# end def
def clearPreXoverItems(self):
"""Summary
Returns:
TYPE: Description
"""
# self.deactivateNeighbors()
self.hovered_items = []
pxi_pool = self.pxi_pool
active_pxis = self.active_pxis
while active_pxis:
k, x = active_pxis.popitem()
x.shutdown()
pxi_pool.append(x)
self.prexover_item_map = {}
for x in self.neighbor_prexover_items.values():
x.shutdown()
pxi_pool.append(x)
self._key_press_dict = {}
self.neighbor_prexover_items = {}
# end def
@staticmethod
def getPoolItem(pool, cls, *args):
"""grab an item from a pool if there is one and reconfigure it
otherwise, create a new object of type `cls`
Useful to avoid issues with deleting animations
Args:
pool (TYPE): Description
cls (TYPE): Description
*args (TYPE): Description
"""
if len(pool) > 0:
item = pool.pop()
item.resetItem(*args)
return item
else:
return cls(*args)
# end def
def reset(self):
"""Summary
Returns:
TYPE: Description
"""
self.clearPreXoverItems()
self.virtual_helix_item = None
# end def
def activateVirtualHelix(self, virtual_helix_item, this_idx, per_neighbor_hits):
"""Populate self.prexover_item_map dictionary which maps a tuple
of (id_num, is_fwd, idx) to a given PreXoverItem and a List of neighbor PreXoverItems
This also deactivates any previusly active VirtualHelix
Args:
virtual_helix_item (cadnano.guil.views.pathview.virtualhelixitem.VirtualHelixItem)
this_idx (int): the base index within the virtual helix
per_neighbor_hits (Tuple())
"""
# print("ACTIVATING VH", virtual_helix_item.idNum())
# 1. Clear all PreXoverItems
self.clearPreXoverItems()
pxis = self.prexover_item_map
neighbor_pxis_dict = self.neighbor_prexover_items # for avoiding duplicates
part_item = self.part_item
pxi_pool = self.pxi_pool
getPoolItem = self.getPoolItem
bpr = virtual_helix_item.getProperty('bases_per_repeat')
self.virtual_helix_item = virtual_helix_item
self.updateBasesPerRepeat(bpr)
# the list of neighbors per strand
id_num = virtual_helix_item.idNum()
fwd_st_type, rev_st_type = True, False # for clarity in the call to constructors
# start, length = part_item.part().normalizedRange(id_num, this_idx)
active_pxis = self.active_pxis
# 1. Construct PXIs for the active virtual_helix_item
for neighbor_id, hits in per_neighbor_hits.items():
fwd_axis_hits, rev_axis_hits = hits
# Track active and neighbor idxs in flat list
# so we can look for idx pairs
fwd_active_idxs = [i[0] for i in fwd_axis_hits]
rev_active_idxs = [i[0] for i in rev_axis_hits]
fwd_neighbor_idxs = [j for k in [i[2] for i in fwd_axis_hits] for j in k]
rev_neighbor_idxs = [j for k in [i[1] for i in rev_axis_hits] for j in k]
nvhi = part_item.idToVirtualHelixItem(neighbor_id)
# n_step_size = nvhi.getProperty('bases_per_repeat')
for idx, fwd_idxs, rev_idxs in fwd_axis_hits:
# print("f fwd_active_idxs", fwd_active_idxs)
nearby_idxs = []
if idx-1 in fwd_active_idxs:
nearby_idxs.append(idx-1)
if idx+1 in fwd_active_idxs:
nearby_idxs.append(idx+1)
apxi = getPoolItem(pxi_pool,
PreXoverItem,
virtual_helix_item, fwd_st_type, idx,
nearby_idxs, neighbor_id, self
)
# apxi = active_pxis[(fwd_st_type, idx)]
apxi.enableActive(True, to_vh_id_num=neighbor_id)
active_pxis[(fwd_st_type, idx)] = apxi
neighbor_pxis = []
pxis[(id_num, fwd_st_type, idx)] = (apxi, neighbor_pxis)
# print("f fwd_neighbor_idxs", fwd_neighbor_idxs)
for j in fwd_idxs:
nkey = (neighbor_id, fwd_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
nearby_idxs = []
if j-1 in rev_neighbor_idxs:
nearby_idxs.append(j-1)
if j+1 in rev_neighbor_idxs:
nearby_idxs.append(j+1)
npxi = getPoolItem(pxi_pool,
PreXoverItem,
nvhi, fwd_st_type, j,
nearby_idxs, id_num, self
)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
# print("f rev_neighbor_idxs", rev_neighbor_idxs)
for j in rev_idxs:
nkey = (neighbor_id, rev_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
nearby_idxs = []
if j-1 in fwd_neighbor_idxs:
nearby_idxs.append(j-1)
if j+1 in fwd_neighbor_idxs:
nearby_idxs.append(j+1)
npxi = getPoolItem(pxi_pool,
PreXoverItem,
nvhi, rev_st_type, j,
nearby_idxs, id_num, self
)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
for idx, fwd_idxs, rev_idxs in rev_axis_hits:
# print("r rev_active_idxs", rev_active_idxs)
nearby_idxs = []
if idx-1 in rev_active_idxs:
nearby_idxs.append(idx-1)
if idx+1 in rev_active_idxs:
nearby_idxs.append(idx+1)
apxi = getPoolItem(pxi_pool,
PreXoverItem,
virtual_helix_item, rev_st_type, idx,
nearby_idxs, neighbor_id, self
)
# apxi = active_pxis[(rev_st_type, idx)]
apxi.enableActive(True, to_vh_id_num=neighbor_id)
active_pxis[(rev_st_type, idx)] = apxi
neighbor_pxis = []
pxis[(id_num, rev_st_type, idx)] = (apxi, neighbor_pxis)
# print("r fwd_neighbor_idxs", fwd_neighbor_idxs)
for j in fwd_idxs:
nkey = (neighbor_id, fwd_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
nearby_idxs = []
if j-1 in rev_neighbor_idxs:
nearby_idxs.append(j-1)
if j+1 in rev_neighbor_idxs:
nearby_idxs.append(j+1)
npxi = getPoolItem(pxi_pool,
PreXoverItem,
nvhi, fwd_st_type, j,
nearby_idxs, id_num, self
)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
# print("r rev_neighbor_idxs", rev_neighbor_idxs)
for j in rev_idxs:
nkey = (neighbor_id, rev_st_type, j)
npxi = neighbor_pxis_dict.get(nkey)
if npxi is None:
nearby_idxs = []
if j-1 in fwd_neighbor_idxs:
nearby_idxs.append(j-1)
if j+1 in fwd_neighbor_idxs:
nearby_idxs.append(j+1)
npxi = getPoolItem(pxi_pool,
PreXoverItem,
nvhi, rev_st_type, j,
nearby_idxs, id_num, self
)
neighbor_pxis_dict[nkey] = npxi
neighbor_pxis.append(npxi)
# end for per_neighbor_hits
# end def
def activateNeighbors(self, id_num, is_fwd, idx):
"""Summary
Args:
id_num (int): VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
is_fwd (TYPE): Description
idx (int): the base index within the virtual helix
Returns:
TYPE: Description
"""
# print("ACTIVATING neighbors", id_num, idx)
item = self.prexover_item_map.get((id_num, is_fwd, idx))
if item is None:
apxi = self.active_pxis.get((is_fwd, idx))
if apxi is not None:
apxi.setActiveHovered(True)
self.hovered_items.append(apxi)
else:
pxi, neighbor_list = item
# print("Should have {} neighbors".format(len(neighbor_list)))
for k, npxi in enumerate(neighbor_list):
npxi.activateNeighbor(pxi, shortcut=str(k))
self.addKeyPress(k, npxi.getInfo())
self.hovered_items.append(npxi)
# end def
def deactivateNeighbors(self):
"""Summary
Returns:
TYPE: Description
"""
self._key_press_dict = {}
while self.hovered_items:
self.hovered_items.pop().deactivateNeighbor()
def updateModelActiveBaseInfo(self, pre_xover_info):
"""Notify model of pre_xover_item hover state.
Args:
pre_xover_info (Tuple): from call to getInfo()
"""
self.part_item.part().setActiveBaseInfo(pre_xover_info)
# end def
def isVirtualHelixActive(self, id_num):
"""Summary
Args:
id_num (int): VirtualHelix ID number. See `NucleicAcidPart` for description and related methods.
Returns:
TYPE: Description
"""
return self.part_item.part().isVirtualHelixActive(id_num)
# end def
# end class
| [
"cadnano.gui.palette.getNoPen",
"collections.deque",
"PyQt5.QtGui.QColor.fromHsvF"
] | [((1494, 1501), 'collections.deque', 'deque', ([], {}), '()\n', (1499, 1501), False, 'from collections import deque\n'), ((1095, 1105), 'cadnano.gui.palette.getNoPen', 'getNoPen', ([], {}), '()\n', (1103, 1105), False, 'from cadnano.gui.palette import getNoPen\n'), ((2532, 2573), 'PyQt5.QtGui.QColor.fromHsvF', 'QColor.fromHsvF', (['(i / hue_scale)', '(0.75)', '(0.8)'], {}), '(i / hue_scale, 0.75, 0.8)\n', (2547, 2573), False, 'from PyQt5.QtGui import QColor\n')] |
'''
decorators - decorators to help with flask applications
'''
# standard
from datetime import timedelta
from functools import update_wrapper
# pypi
from flask import make_response, request, current_app
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
'''
crossdomain decorator (from https://web.archive.org/web/20190128010149/http://flask.pocoo.org/snippets/56/)
:param methods: Optionally a list of methods that are allowed for this view. If not provided it will allow all methods that are implemented.
:param headers: Optionally a list of headers that are allowed for this request.
:param origin: '*' to allow all origins, otherwise a string with a URL or a list of URLs that might access the resource.
:param max_age: The number of seconds as integer or timedelta object for which the preflighted request is valid.
:param attach_to_all: True if the decorator should add the access control headers to all HTTP methods or False if it should only add them to OPTIONS responses.
:param automatic_options: If enabled the decorator will use the default Flask OPTIONS response and attach the headers there, otherwise the view function will be called to generate an appropriate response.
'''
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator | [
"flask.current_app.make_default_options_response",
"functools.update_wrapper"
] | [((1797, 1840), 'flask.current_app.make_default_options_response', 'current_app.make_default_options_response', ([], {}), '()\n', (1838, 1840), False, 'from flask import make_response, request, current_app\n'), ((2640, 2675), 'functools.update_wrapper', 'update_wrapper', (['wrapped_function', 'f'], {}), '(wrapped_function, f)\n', (2654, 2675), False, 'from functools import update_wrapper\n'), ((2045, 2088), 'flask.current_app.make_default_options_response', 'current_app.make_default_options_response', ([], {}), '()\n', (2086, 2088), False, 'from flask import make_response, request, current_app\n')] |
def secondarybase64_layer5(nearing_the_end_script):
import base64
print("Secondary base64 encrypting")
joe = (nearing_the_end_script)
spliting = joe.encode('utf-8')
spliting = base64.b64encode(spliting)
spliting = spliting.decode('utf-8')
split_strings = []
n = int((len(spliting))/20)
for index in range(0, len(spliting), n):
split_strings.append(spliting[index : index + n])
lmaooo = ('"'+ '"+"'.join(split_strings) + '"')
dude_im_so_done_with_this = '''import base64;exec((base64.b64decode(({lmaooo}).encode('utf-8'))).decode('utf-8'))'''.format(lmaooo=lmaooo)
return(dude_im_so_done_with_this)
| [
"base64.b64encode"
] | [((187, 213), 'base64.b64encode', 'base64.b64encode', (['spliting'], {}), '(spliting)\n', (203, 213), False, 'import base64\n')] |
from flask_wtf import FlaskForm
from wtforms import PasswordField, SubmitField, StringField
from wtforms.validators import DataRequired, Length
class InstagramLoginForm(FlaskForm):
username = StringField('Instagram Username', validators=[DataRequired(),
Length(min=6, max=20)])
password = PasswordField('<PASSWORD>', validators=[DataRequired()])
submit = SubmitField('Save')
| [
"wtforms.validators.Length",
"wtforms.validators.DataRequired",
"wtforms.SubmitField"
] | [((430, 449), 'wtforms.SubmitField', 'SubmitField', (['"""Save"""'], {}), "('Save')\n", (441, 449), False, 'from wtforms import PasswordField, SubmitField, StringField\n'), ((244, 258), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (256, 258), False, 'from wtforms.validators import DataRequired, Length\n'), ((321, 342), 'wtforms.validators.Length', 'Length', ([], {'min': '(6)', 'max': '(20)'}), '(min=6, max=20)\n', (327, 342), False, 'from wtforms.validators import DataRequired, Length\n'), ((400, 414), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (412, 414), False, 'from wtforms.validators import DataRequired, Length\n')] |
# NEEDS FIXING
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: MuadDib
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['onlinemovies.tube', 'watchonline.pro']
self.base_link = 'http://watchonline.pro'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if not str(url).startswith('http'):
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/episode/%s-s%02de%02d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
year = re.findall('(\d{4})', data['premiered'])[0]
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
y = client.parseDOM(r, 'span', attrs = {'class': 'date'})
y += [i for i in client.parseDOM(r, 'div', attrs = {'class': 'metadatac'}) if 'date' in i]
y = re.findall('(\d{4})', y[0])[0]
if not y == year: raise Exception()
else:
#url = '%s/watch/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])
url = '%s/%s-%s/' % (self.base_link, cleantitle.geturl(data['title']), data['year'])
url = client.request(url, output='geturl')
if url == None: raise Exception()
r = client.request(url)
else:
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
links = client.parseDOM(r, 'iframe', ret='src')
for link in links:
try:
url = link.replace('\/', '/')
url = client.replaceHTMLCodes(url)
url = 'http:' + url if url.startswith('//') else url
url = url.encode('utf-8')
if not '.php' in url: raise Exception()
r = client.request(url, timeout='10')
s = re.compile('<script>(.+?)</script>', re.DOTALL).findall(r)
for i in s:
try: r += jsunpack.unpack(i)
except: pass
r = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', r)
for i in r:
try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False})
except: pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return directstream.googlepass(url)
| [
"resources.lib.modules.cleantitle.geturl",
"urlparse.urljoin",
"urlparse.parse_qs",
"resources.lib.modules.client.request",
"re.compile",
"resources.lib.modules.directstream.googlepass",
"urllib.urlencode",
"resources.lib.modules.directstream.googletag",
"re.findall",
"resources.lib.modules.client... | [((4768, 4796), 'resources.lib.modules.directstream.googlepass', 'directstream.googlepass', (['url'], {}), '(url)\n', (4791, 4796), False, 'from resources.lib.modules import directstream\n'), ((1221, 1242), 'urllib.urlencode', 'urllib.urlencode', (['url'], {}), '(url)\n', (1237, 1242), False, 'import re, urllib, urlparse\n'), ((1503, 1524), 'urllib.urlencode', 'urllib.urlencode', (['url'], {}), '(url)\n', (1519, 1524), False, 'import re, urllib, urlparse\n'), ((1727, 1749), 'urlparse.parse_qs', 'urlparse.parse_qs', (['url'], {}), '(url)\n', (1744, 1749), False, 'import re, urllib, urlparse\n'), ((1955, 1976), 'urllib.urlencode', 'urllib.urlencode', (['url'], {}), '(url)\n', (1971, 1976), False, 'import re, urllib, urlparse\n'), ((3629, 3668), 'resources.lib.modules.client.parseDOM', 'client.parseDOM', (['r', '"""iframe"""'], {'ret': '"""src"""'}), "(r, 'iframe', ret='src')\n", (3644, 3668), False, 'from resources.lib.modules import client\n'), ((2242, 2264), 'urlparse.parse_qs', 'urlparse.parse_qs', (['url'], {}), '(url)\n', (2259, 2264), False, 'import re, urllib, urlparse\n'), ((3528, 3565), 'urlparse.urljoin', 'urlparse.urljoin', (['self.base_link', 'url'], {}), '(self.base_link, url)\n', (3544, 3565), False, 'import re, urllib, urlparse\n'), ((3587, 3606), 'resources.lib.modules.client.request', 'client.request', (['url'], {}), '(url)\n', (3601, 3606), False, 'from resources.lib.modules import client\n'), ((2647, 2683), 'resources.lib.modules.client.request', 'client.request', (['url'], {'output': '"""geturl"""'}), "(url, output='geturl')\n", (2661, 2683), False, 'from resources.lib.modules import client\n'), ((2763, 2782), 'resources.lib.modules.client.request', 'client.request', (['url'], {}), '(url)\n', (2777, 2782), False, 'from resources.lib.modules import client\n'), ((2808, 2859), 'resources.lib.modules.client.parseDOM', 'client.parseDOM', (['r', '"""span"""'], {'attrs': "{'class': 'date'}"}), "(r, 'span', attrs={'class': 'date'})\n", (2823, 2859), False, 'from resources.lib.modules import client\n'), ((3351, 3387), 'resources.lib.modules.client.request', 'client.request', (['url'], {'output': '"""geturl"""'}), "(url, output='geturl')\n", (3365, 3387), False, 'from resources.lib.modules import client\n'), ((3467, 3486), 'resources.lib.modules.client.request', 'client.request', (['url'], {}), '(url)\n', (3481, 3486), False, 'from resources.lib.modules import client\n'), ((3798, 3826), 'resources.lib.modules.client.replaceHTMLCodes', 'client.replaceHTMLCodes', (['url'], {}), '(url)\n', (3821, 3826), False, 'from resources.lib.modules import client\n'), ((4032, 4065), 'resources.lib.modules.client.request', 'client.request', (['url'], {'timeout': '"""10"""'}), "(url, timeout='10')\n", (4046, 4065), False, 'from resources.lib.modules import client\n'), ((4298, 4349), 're.findall', 're.findall', (['"""file\\\\s*:\\\\s*(?:"|\')(.+?)(?:"|\')"""', 'r'], {}), '(\'file\\\\s*:\\\\s*(?:"|\\\')(.+?)(?:"|\\\')\', r)\n', (4308, 4349), False, 'import re, urllib, urlparse\n'), ((2576, 2617), 're.findall', 're.findall', (['"""(\\\\d{4})"""', "data['premiered']"], {}), "('(\\\\d{4})', data['premiered'])\n", (2586, 2617), False, 'import re, urllib, urlparse\n'), ((2997, 3025), 're.findall', 're.findall', (['"""(\\\\d{4})"""', 'y[0]'], {}), "('(\\\\d{4})', y[0])\n", (3007, 3025), False, 'import re, urllib, urlparse\n'), ((2466, 2504), 'resources.lib.modules.cleantitle.geturl', 'cleantitle.geturl', (["data['tvshowtitle']"], {}), "(data['tvshowtitle'])\n", (2483, 2504), False, 'from resources.lib.modules import cleantitle\n'), ((2899, 2954), 'resources.lib.modules.client.parseDOM', 'client.parseDOM', (['r', '"""div"""'], {'attrs': "{'class': 'metadatac'}"}), "(r, 'div', attrs={'class': 'metadatac'})\n", (2914, 2954), False, 'from resources.lib.modules import client\n'), ((3276, 3308), 'resources.lib.modules.cleantitle.geturl', 'cleantitle.geturl', (["data['title']"], {}), "(data['title'])\n", (3293, 3308), False, 'from resources.lib.modules import cleantitle\n'), ((4091, 4138), 're.compile', 're.compile', (['"""<script>(.+?)</script>"""', 're.DOTALL'], {}), "('<script>(.+?)</script>', re.DOTALL)\n", (4101, 4138), False, 'import re, urllib, urlparse\n'), ((4459, 4484), 'resources.lib.modules.directstream.googletag', 'directstream.googletag', (['i'], {}), '(i)\n', (4481, 4484), False, 'from resources.lib.modules import directstream\n')] |
import pytest
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('name', [
('nodejs'),
])
def test_packages_are_installed(host, name):
package = host.package(name)
assert package.is_installed
@pytest.mark.parametrize('path,user,group', [
('/usr/bin/node', 'root', 'root'),
('/usr/bin/ncu', 'root', 'root'),
('/usr/bin/yarn', 'root', 'root'),
])
def test_binaries_are_installed(host, path, user, group):
binary = host.file(path)
assert binary.exists
assert binary.is_file
assert binary.user == user
assert binary.group == group
| [
"pytest.mark.parametrize"
] | [((192, 235), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', "['nodejs']"], {}), "('name', ['nodejs'])\n", (215, 235), False, 'import pytest\n'), ((356, 512), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""path,user,group"""', "[('/usr/bin/node', 'root', 'root'), ('/usr/bin/ncu', 'root', 'root'), (\n '/usr/bin/yarn', 'root', 'root')]"], {}), "('path,user,group', [('/usr/bin/node', 'root',\n 'root'), ('/usr/bin/ncu', 'root', 'root'), ('/usr/bin/yarn', 'root',\n 'root')])\n", (379, 512), False, 'import pytest\n')] |
"""
created by <NAME> at 1/8/19
"""
import os
import pandas as pd
def export_to_bed(gtf,intermediate_file_dir, lincRNA):
if lincRNA:
lincRNAIDs = pd.read_csv(os.path.join(intermediate_file_dir, 'intersect_total.txt'), names=['ids'], sep='\t')
exons = gtf[(gtf.feature == 'exon') & (gtf.seqname != 'chrM') & (gtf.gene_type ==
'protein_coding') | (gtf.gene_id.isin(lincRNAIDs['ids']))][
['seqname', 'start', 'end', 'strand', 'gene_id', 'gene_name', 'gene_type']]
else:
exons = gtf[(gtf.feature == 'exon') & (gtf.seqname != 'chrM') & (gtf.gene_type ==
'protein_coding')][
['seqname', 'start', 'end', 'strand', 'gene_id', 'gene_name', 'gene_type']]
exons.start = exons.start - 1
exons.to_csv(os.path.join(intermediate_file_dir, 'exon.bed'), index=None, header=False, sep='\t')
def check_gtf_tsv(gtf_tsv, annotation_file):
if not os.path.isfile(gtf_tsv):
from gtfparse import read_gtf
parsed_gtf = read_gtf(annotation_file)
parsed_gtf.to_csv(gtf_tsv, sep='\t', index=None)
| [
"os.path.isfile",
"gtfparse.read_gtf",
"os.path.join"
] | [((901, 948), 'os.path.join', 'os.path.join', (['intermediate_file_dir', '"""exon.bed"""'], {}), "(intermediate_file_dir, 'exon.bed')\n", (913, 948), False, 'import os\n'), ((1042, 1065), 'os.path.isfile', 'os.path.isfile', (['gtf_tsv'], {}), '(gtf_tsv)\n', (1056, 1065), False, 'import os\n'), ((1126, 1151), 'gtfparse.read_gtf', 'read_gtf', (['annotation_file'], {}), '(annotation_file)\n', (1134, 1151), False, 'from gtfparse import read_gtf\n'), ((172, 230), 'os.path.join', 'os.path.join', (['intermediate_file_dir', '"""intersect_total.txt"""'], {}), "(intermediate_file_dir, 'intersect_total.txt')\n", (184, 230), False, 'import os\n')] |
import glob
import numpy as np
import os
import pandas as pd
import yaml
from dask_image.imread import imread
from dlclabel import misc
from itertools import groupby
from napari.layers import Shapes
from napari.plugins._builtins import napari_write_shapes
from napari.types import LayerData
from skimage.io import imsave
from skimage.util import img_as_ubyte
from typing import Any, Dict, List, Optional, Sequence, Union
SUPPORTED_IMAGES = "jpg", "jpeg", "png"
def handle_path(path: Union[str, Sequence[str]]) -> Union[str, Sequence[str]]:
"""Dispatch files in folder to the relevant plugin readers."""
paths = [path] if isinstance(path, str) else path
paths = [os.fspath(path) for path in paths]
if not isinstance(paths, (tuple, list)):
raise ValueError("'path' argument must be a string, list, or tuple")
# Test first whether a 'labeled-data' folder was passed in
if len(paths) == 1:
path = paths[0]
if os.path.isdir(path):
files = os.listdir(path)
images = ""
for file in files:
if any(file.endswith(ext) for ext in SUPPORTED_IMAGES):
images = os.path.join(path, f"*{os.path.splitext(file)[1]}")
break
if not images:
raise IOError("No supported images were found.")
datafile = ""
for file in files:
if file.endswith(".h5"):
datafile = os.path.join(path, "*.h5")
break
if datafile:
return [images, datafile]
return [images]
return paths
def _populate_metadata(
header: misc.DLCHeader,
*,
labels: Optional[Sequence[str]] = None,
ids: Optional[Sequence[str]] = None,
likelihood: Optional[Sequence[float]] = None,
paths: Optional[List[str]] = None,
size: Optional[int] = 8,
pcutoff: Optional[float] = 0.6,
colormap: Optional[str] = "viridis",
) -> Dict:
if labels is None:
labels = header.bodyparts
if ids is None:
ids = header.individuals
if likelihood is None:
likelihood = np.ones(len(labels))
label_colors = misc.build_color_cycle(len(header.bodyparts), colormap)
id_colors = misc.build_color_cycle(len(header.individuals), colormap)
face_color_cycle_maps = {
"label": dict(zip(header.bodyparts, label_colors)),
"id": dict(zip(header.individuals, id_colors)),
}
return {
"name": "keypoints",
"text": "label",
"properties": {
"label": list(labels),
"id": list(ids),
"likelihood": likelihood,
"valid": likelihood > pcutoff,
},
"face_color_cycle": label_colors,
"edge_color": "valid",
"edge_color_cycle": ["black", "red"],
"size": size,
"metadata": {
"header": header,
"face_color_cycle_maps": face_color_cycle_maps,
"paths": paths or [],
},
}
def _load_config(config_path: str):
with open(config_path) as file:
return yaml.safe_load(file)
def read_config(configname: str) -> List[LayerData]:
config = _load_config(configname)
header = misc.DLCHeader.from_config(config)
metadata = _populate_metadata(
header,
size=config["dotsize"],
pcutoff=config["pcutoff"],
colormap=config["colormap"],
)
metadata["name"] = f"CollectedData_{config['scorer']}"
return [(None, metadata, "points")]
def read_images(path: Union[str, List[str]]) -> List[LayerData]:
if isinstance(path, list):
root, ext = os.path.splitext(path[0])
path = os.path.join(os.path.dirname(root), f"*{ext}")
# Retrieve filepaths exactly as parsed by pims
filepaths = []
for filepath in sorted(glob.glob(path)):
_, *relpath = filepath.rsplit(os.sep, 3)
filepaths.append(os.path.join(*relpath))
params = {
"name": "images",
"metadata": {
"paths": filepaths,
"root": os.path.split(path)[0]
}
}
return [(imread(path), params, "image")]
def read_hdf(filename: str) -> List[LayerData]:
layers = []
for filename in glob.glob(filename):
temp = pd.read_hdf(filename)
header = misc.DLCHeader(temp.columns)
temp = temp.droplevel("scorer", axis=1)
if "individuals" not in temp.columns.names:
# Append a fake level to the MultiIndex
# to make it look like a multi-animal DataFrame
old_idx = temp.columns.to_frame()
old_idx.insert(0, "individuals", "")
temp.columns = pd.MultiIndex.from_frame(old_idx)
df = temp.stack(["individuals", "bodyparts"]).reset_index()
nrows = df.shape[0]
data = np.empty((nrows, 3))
image_paths = df["level_0"]
if np.issubdtype(image_paths.dtype, np.number):
image_inds = image_paths.values
paths2inds = []
else:
image_inds, paths2inds = misc.encode_categories(image_paths, return_map=True)
data[:, 0] = image_inds
data[:, 1:] = df[["y", "x"]].to_numpy()
metadata = _populate_metadata(
header,
labels=df["bodyparts"],
ids=df["individuals"],
likelihood=df.get("likelihood"),
paths=list(paths2inds),
)
metadata["name"] = os.path.split(filename)[1].split(".")[0]
metadata["metadata"]["root"] = os.path.split(filename)[0]
layers.append((data, metadata, "points"))
return layers
def write_hdf(filename: str, data: Any, metadata: Dict) -> Optional[str]:
temp = pd.DataFrame(data[:, -1:0:-1], columns=["x", "y"])
properties = metadata["properties"]
meta = metadata["metadata"]
temp["bodyparts"] = properties["label"]
temp["individuals"] = properties["id"]
temp["inds"] = data[:, 0].astype(int)
temp["likelihood"] = properties["likelihood"]
temp["scorer"] = meta["header"].scorer
df = temp.set_index(["scorer", "individuals", "bodyparts", "inds"]).stack()
df.index = df.index.set_names("coords", -1)
df = df.unstack(["scorer", "individuals", "bodyparts", "coords"])
df.index.name = None
if not properties["id"][0]:
df = df.droplevel("individuals", axis=1)
df = df.reindex(meta["header"].columns, axis=1)
if meta["paths"]:
df.index = [meta["paths"][i] for i in df.index]
name = metadata["name"]
root = meta["root"]
if "machine" in name: # We are attempting to save refined model predictions
df.drop("likelihood", axis=1, level="coords", inplace=True)
header = misc.DLCHeader(df.columns)
gt_file = ""
for file in os.listdir(root):
if file.startswith("CollectedData") and file.endswith("h5"):
gt_file = file
break
if gt_file: # Refined predictions must be merged into the existing data
df_gt = pd.read_hdf(os.path.join(root, gt_file))
new_scorer = df_gt.columns.get_level_values("scorer")[0]
header.scorer = new_scorer
df.columns = header.columns
df = pd.concat((df, df_gt))
df = df[~df.index.duplicated(keep="first")]
name = os.path.splitext(gt_file)[0]
else:
# Let us fetch the config.yaml file to get the scorer name...
project_folder = root.rsplit(os.sep, 2)[0]
config = _load_config(os.path.join(project_folder, "config.yaml"))
new_scorer = config["scorer"]
header.scorer = new_scorer
df.columns = header.columns
name = f"CollectedData_{new_scorer}"
df.sort_index(inplace=True)
filename = name + ".h5"
df.to_hdf(os.path.join(root, filename), key="df_with_missing")
return filename
def write_masks(foldername: str, data: Any, metadata: Dict) -> Optional[str]:
folder, _ = os.path.splitext(foldername)
os.makedirs(folder, exist_ok=True)
filename = os.path.join(folder, "{}_obj_{}.png")
shapes = Shapes(data, shape_type="polygon")
meta = metadata["metadata"]
frame_inds = [int(array[0, 0]) for array in data]
shape_inds = []
for _, group in groupby(frame_inds):
shape_inds += range(sum(1 for _ in group))
masks = shapes.to_masks(mask_shape=meta["shape"][1:])
for n, mask in enumerate(masks):
image_name = os.path.basename(meta["paths"][frame_inds[n]])
output_path = filename.format(os.path.splitext(image_name)[0], shape_inds[n])
imsave(output_path, img_as_ubyte(mask).squeeze(), check_contrast=False)
napari_write_shapes(os.path.join(folder, "vertices.csv"), data, metadata)
return folder
| [
"pandas.MultiIndex.from_frame",
"dlclabel.misc.DLCHeader",
"os.fspath",
"os.listdir",
"skimage.util.img_as_ubyte",
"os.path.split",
"numpy.issubdtype",
"napari.layers.Shapes",
"os.path.isdir",
"numpy.empty",
"pandas.DataFrame",
"pandas.read_hdf",
"glob.glob",
"os.path.splitext",
"os.path... | [((3236, 3270), 'dlclabel.misc.DLCHeader.from_config', 'misc.DLCHeader.from_config', (['config'], {}), '(config)\n', (3262, 3270), False, 'from dlclabel import misc\n'), ((4235, 4254), 'glob.glob', 'glob.glob', (['filename'], {}), '(filename)\n', (4244, 4254), False, 'import glob\n'), ((5697, 5747), 'pandas.DataFrame', 'pd.DataFrame', (['data[:, -1:0:-1]'], {'columns': "['x', 'y']"}), "(data[:, -1:0:-1], columns=['x', 'y'])\n", (5709, 5747), True, 'import pandas as pd\n'), ((7975, 8003), 'os.path.splitext', 'os.path.splitext', (['foldername'], {}), '(foldername)\n', (7991, 8003), False, 'import os\n'), ((8008, 8042), 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), '(folder, exist_ok=True)\n', (8019, 8042), False, 'import os\n'), ((8058, 8095), 'os.path.join', 'os.path.join', (['folder', '"""{}_obj_{}.png"""'], {}), "(folder, '{}_obj_{}.png')\n", (8070, 8095), False, 'import os\n'), ((8109, 8143), 'napari.layers.Shapes', 'Shapes', (['data'], {'shape_type': '"""polygon"""'}), "(data, shape_type='polygon')\n", (8115, 8143), False, 'from napari.layers import Shapes\n'), ((8270, 8289), 'itertools.groupby', 'groupby', (['frame_inds'], {}), '(frame_inds)\n', (8277, 8289), False, 'from itertools import groupby\n'), ((678, 693), 'os.fspath', 'os.fspath', (['path'], {}), '(path)\n', (687, 693), False, 'import os\n'), ((958, 977), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (971, 977), False, 'import os\n'), ((3109, 3129), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (3123, 3129), False, 'import yaml\n'), ((3649, 3674), 'os.path.splitext', 'os.path.splitext', (['path[0]'], {}), '(path[0])\n', (3665, 3674), False, 'import os\n'), ((3834, 3849), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (3843, 3849), False, 'import glob\n'), ((4271, 4292), 'pandas.read_hdf', 'pd.read_hdf', (['filename'], {}), '(filename)\n', (4282, 4292), True, 'import pandas as pd\n'), ((4310, 4338), 'dlclabel.misc.DLCHeader', 'misc.DLCHeader', (['temp.columns'], {}), '(temp.columns)\n', (4324, 4338), False, 'from dlclabel import misc\n'), ((4818, 4838), 'numpy.empty', 'np.empty', (['(nrows, 3)'], {}), '((nrows, 3))\n', (4826, 4838), True, 'import numpy as np\n'), ((4886, 4929), 'numpy.issubdtype', 'np.issubdtype', (['image_paths.dtype', 'np.number'], {}), '(image_paths.dtype, np.number)\n', (4899, 4929), True, 'import numpy as np\n'), ((6694, 6720), 'dlclabel.misc.DLCHeader', 'misc.DLCHeader', (['df.columns'], {}), '(df.columns)\n', (6708, 6720), False, 'from dlclabel import misc\n'), ((6762, 6778), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (6772, 6778), False, 'import os\n'), ((7806, 7834), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (7818, 7834), False, 'import os\n'), ((8458, 8504), 'os.path.basename', 'os.path.basename', (["meta['paths'][frame_inds[n]]"], {}), "(meta['paths'][frame_inds[n]])\n", (8474, 8504), False, 'import os\n'), ((8695, 8731), 'os.path.join', 'os.path.join', (['folder', '"""vertices.csv"""'], {}), "(folder, 'vertices.csv')\n", (8707, 8731), False, 'import os\n'), ((999, 1015), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1009, 1015), False, 'import os\n'), ((3703, 3724), 'os.path.dirname', 'os.path.dirname', (['root'], {}), '(root)\n', (3718, 3724), False, 'import os\n'), ((3926, 3948), 'os.path.join', 'os.path.join', (['*relpath'], {}), '(*relpath)\n', (3938, 3948), False, 'import os\n'), ((4117, 4129), 'dask_image.imread.imread', 'imread', (['path'], {}), '(path)\n', (4123, 4129), False, 'from dask_image.imread import imread\n'), ((4673, 4706), 'pandas.MultiIndex.from_frame', 'pd.MultiIndex.from_frame', (['old_idx'], {}), '(old_idx)\n', (4697, 4706), True, 'import pandas as pd\n'), ((5054, 5106), 'dlclabel.misc.encode_categories', 'misc.encode_categories', (['image_paths'], {'return_map': '(True)'}), '(image_paths, return_map=True)\n', (5076, 5106), False, 'from dlclabel import misc\n'), ((5515, 5538), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (5528, 5538), False, 'import os\n'), ((7213, 7235), 'pandas.concat', 'pd.concat', (['(df, df_gt)'], {}), '((df, df_gt))\n', (7222, 7235), True, 'import pandas as pd\n'), ((4065, 4084), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (4078, 4084), False, 'import os\n'), ((7019, 7046), 'os.path.join', 'os.path.join', (['root', 'gt_file'], {}), '(root, gt_file)\n', (7031, 7046), False, 'import os\n'), ((7311, 7336), 'os.path.splitext', 'os.path.splitext', (['gt_file'], {}), '(gt_file)\n', (7327, 7336), False, 'import os\n'), ((7517, 7560), 'os.path.join', 'os.path.join', (['project_folder', '"""config.yaml"""'], {}), "(project_folder, 'config.yaml')\n", (7529, 7560), False, 'import os\n'), ((8543, 8571), 'os.path.splitext', 'os.path.splitext', (['image_name'], {}), '(image_name)\n', (8559, 8571), False, 'import os\n'), ((1472, 1498), 'os.path.join', 'os.path.join', (['path', '"""*.h5"""'], {}), "(path, '*.h5')\n", (1484, 1498), False, 'import os\n'), ((8619, 8637), 'skimage.util.img_as_ubyte', 'img_as_ubyte', (['mask'], {}), '(mask)\n', (8631, 8637), False, 'from skimage.util import img_as_ubyte\n'), ((5435, 5458), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (5448, 5458), False, 'import os\n'), ((1195, 1217), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (1211, 1217), False, 'import os\n')] |
#!/usr/bin/env python2
# coding=utf-8
"""
The default per-repository configuration
"""
import sys
import json
import string
from os.path import exists, dirname
from gitver.defines import CFGFILE
from termcolors import term, bold
default_config_text = """{
# automatically generated configuration file
#
# These defaults implement Semantic Versioning as described in the latest
# available documentation at http://semver.org/spec/v2.0.0.html
# by default, terminal output is NOT colorized for compatibility with older
# terminal emulators: you may enable this if you like a more modern look
"use_terminal_colors": false,
# prevent gitver from storing any information in its configuration directory
# if the .gitignore file doesn't exclude it from the repository
"safe_mode": true,
# default pre-release metadata when commit count > 0 AND
# no NEXT has been defined
"default_meta_pr_in_next_no_next": "NEXT",
# default pre-release metadata when commit count > 0
"default_meta_pr_in_next": "SNAPSHOT",
# default pre-release metadata prefix
"meta_pr_prefix": "-",
# default commit count prefix
"commit_count_prefix": ".",
# Python-based format string variable names are:
# maj, min, patch, rev, rev_prefix, meta_pr_prefix, meta_pr,
# commit_count_prefix, commit_count, build_id, build_id_full
#
# Note that prefixes will be empty strings if their valued counterpart
# doesn't have a meaningful value (i.e., 0 for commit count, no meta
# pre-release, ..)
# format string used to build the current version string when the
# commit count is 0
"format": "%(maj)s.%(min)s.%(patch)s%(rev_prefix)s%(rev)s%(meta_pr_prefix)s%(meta_pr)s",
# format string used to build the current version string when the
# commit count is > 0
"format_next": "%(maj)s.%(min)s.%(patch)s%(rev_prefix)s%(rev)s%(meta_pr_prefix)s%(meta_pr)s%(commit_count_prefix)s%(commit_count)s+%(build_id)s"
}"""
def remove_comments(text):
"""
Removes line comments denoted by sub-strings starting with a '#'
character from the specified string, construct a new text and returns it.
"""
data = string.split(text, '\n')
ret = ''
for line in data:
if not line.strip().startswith('#'):
ret += line
return ret
default_config = json.loads(remove_comments(default_config_text))
def create_default_configuration_file():
"""
Creates a default configuration file from the default gitver's
configuration text string in the predefined gitver's configuration
directory.
"""
if not exists(CFGFILE):
if exists(dirname(CFGFILE)):
with open(CFGFILE, 'w') as f:
f.writelines(default_config_text)
return True
return False
def load_user_config():
"""
Returns the gitver's configuration: tries to read the stored configuration
file and merges it with the default one, ensuring a valid configuration is
always returned.
"""
try:
with open(CFGFILE, 'r') as f:
data = ''
for line in f:
l = line.strip()
if not l.startswith('#'):
data += l
user = json.loads(data)
except IOError:
user = dict()
except (ValueError, KeyError) as v:
term.err("An error occured parsing the configuration file \"" +
CFGFILE + "\": " + v.message +
"\nPlease check its syntax or rename it and generate the "
"default one with the " + bold("gitver init") + " command.")
sys.exit(1)
# merge user with defaults
return dict(default_config, **user)
| [
"os.path.exists",
"json.loads",
"string.split",
"os.path.dirname",
"termcolors.bold",
"sys.exit"
] | [((2214, 2238), 'string.split', 'string.split', (['text', '"""\n"""'], {}), "(text, '\\n')\n", (2226, 2238), False, 'import string\n'), ((2649, 2664), 'os.path.exists', 'exists', (['CFGFILE'], {}), '(CFGFILE)\n', (2655, 2664), False, 'from os.path import exists, dirname\n'), ((2684, 2700), 'os.path.dirname', 'dirname', (['CFGFILE'], {}), '(CFGFILE)\n', (2691, 2700), False, 'from os.path import exists, dirname\n'), ((3282, 3298), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (3292, 3298), False, 'import json\n'), ((3665, 3676), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3673, 3676), False, 'import sys\n'), ((3622, 3641), 'termcolors.bold', 'bold', (['"""gitver init"""'], {}), "('gitver init')\n", (3626, 3641), False, 'from termcolors import term, bold\n')] |
from random import randint
import re; import json
class Passenger:
def __init__(self, passengerId, passengerName, email, password, address, contact):
self.passengerId = passengerId
self.passengerName = passengerName
self.email = email
self.password = password
self.address = address
self.contact = contact
class ValidateField:
def __init__(self, passengerObj):
self.passengerObj = passengerObj
def checkPassengerName(self):
if not "".join(self.passengerObj.passengerName.split()).isalpha():
print("\nEnter a valid name.")
elif len(self.passengerObj.passengerName)>50:
print("\nName should not exceed 50 characters.")
else:
return True
def checkEmail(self):
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,4}$'
if not re.match(regex, self.passengerObj.email):
print("\nPlease enter a valid email.")
else:
return True
def checkPassword(self):
if not len(self.passengerObj.password)>7:
print("\nPassword should be greater than 7.")
else:
return True
def checkAddress(self):
if len(self.passengerObj.address)>100:
print("\nAddress should not exceeds 100 characters.")
elif not len(self.passengerObj.address):
print("\nAddress should not be empty.")
else:
return True
def checkContact(self):
if len(str(self.passengerObj.contact))>10:
print("\nContact number should not exceeds 10 characters.")
else:
return True
def displayPassengers(passengerList):
if len(passengerList):
print(json.dumps([p.__dict__ for p in passengerList], indent=4))
else:
print("\nNo data found.")
def registration(passengerList):
try:
passengerId = int(randint(1000000,9999999) or 0000000)
print("\nPassenger ID:", passengerId)
passengerName = input("Enter the passenger name:")
email = input("Enter email:")
password = input("Enter password:")
address = input("Enter address:").capitalize()
contact = int(input("Enter contact number:"))
passengerObj = Passenger(passengerId, passengerName, email, password, address, contact)
v = ValidateField(passengerObj)
if v.checkPassengerName() and v.checkEmail() and v.checkPassword() and v.checkAddress() and v.checkContact():
passengerList.append(passengerObj)
print("\nPassenger Registration is Sucessful!")
except Exception as e:
print("Error:", e)
if __name__ == '__main__':
print("="*52+"\nPASSENGER REGISTRATION\n"+"="*52)
print("1. Enter 1 to register a passenger.")
print("2. Enter 2 to display all the registered passengers.")
print("3. Enter -1 to exit.\n"+"-"*52)
passengerList = []
while True:
c = int(input("Enter you choice:") or -1)
if c==1:
registration(passengerList)
elif c==2:
displayPassengers(passengerList)
else:
break
print("-"*52)
| [
"json.dumps",
"re.match",
"random.randint"
] | [((899, 939), 're.match', 're.match', (['regex', 'self.passengerObj.email'], {}), '(regex, self.passengerObj.email)\n', (907, 939), False, 'import re\n'), ((1764, 1821), 'json.dumps', 'json.dumps', (['[p.__dict__ for p in passengerList]'], {'indent': '(4)'}), '([p.__dict__ for p in passengerList], indent=4)\n', (1774, 1821), False, 'import json\n'), ((1940, 1965), 'random.randint', 'randint', (['(1000000)', '(9999999)'], {}), '(1000000, 9999999)\n', (1947, 1965), False, 'from random import randint\n')] |
import torch
from torch.distributions import Uniform
from rl_sandbox.constants import CPU
class UniformPrior:
def __init__(self, low, high, device=torch.device(CPU)):
self.device = device
self.dist = Uniform(low=low, high=high)
def sample(self, num_samples):
return self.dist.rsample(sample_shape=num_samples).to(self.device)
def lprob(self, samples):
return self.dist.log_prob(samples)
| [
"torch.distributions.Uniform",
"torch.device"
] | [((155, 172), 'torch.device', 'torch.device', (['CPU'], {}), '(CPU)\n', (167, 172), False, 'import torch\n'), ((224, 251), 'torch.distributions.Uniform', 'Uniform', ([], {'low': 'low', 'high': 'high'}), '(low=low, high=high)\n', (231, 251), False, 'from torch.distributions import Uniform\n')] |
import tweepy
import csv
class dealWithTwitter:
def __init__(self):
self.access_token = ""
self.access_token_secret = ""
self.consumer_key = ""
self.consumer_secret = ""
self.api = ""
def loadTokens(self):
tokens = []
with open('pwd.txt') as pwd_file:
for line in pwd_file:
tokens.append(line.strip())
print(line.strip())
self.access_key = tokens[0]
self.access_secret = tokens[1]
self.consumer_key = tokens[2]
self.consumer_secret = tokens[3]
def get_all_tweets(self, screen_name):
# Twitter only allows access to a users most recent 3240 tweets with this method
self.loadTokens()
auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
auth.set_access_token(self.access_key, self.access_secret)
self.api = tweepy.API(auth)
# initialize a list to hold all the tweepy Tweets
alltweets = []
# make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = self.api.user_timeline(screen_name=screen_name, count=200)
# save most recent tweets
alltweets.extend(new_tweets)
# save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
while len(new_tweets) > 0:
print("getting tweets before %s" % (oldest))
# all subsiquent requests use the max_id param to prevent duplicates
new_tweets = self.api.user_timeline(screen_name=screen_name, count=200, max_id=oldest)
# save most recent tweets
alltweets.extend(new_tweets)
# update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print("...%s tweets downloaded so far" % (len(alltweets)))
# transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8")] for tweet in alltweets]
# write the csv
try:
with open('%s_tweets.csv' % screen_name, 'wb') as f:
writer = csv.writer(f)
writer.writerow(["id".encode(), "created_at".encode(), "text".encode()])
writer.writerows(outtweets)
except:
pass
if __name__ == '__main__':
t = dealWithTwitter()
t.loadTokens()
t.get_all_tweets('realDonaldTrump')
# t.connectToTwitter()
| [
"tweepy.API",
"csv.writer",
"tweepy.OAuthHandler"
] | [((762, 822), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['self.consumer_key', 'self.consumer_secret'], {}), '(self.consumer_key, self.consumer_secret)\n', (781, 822), False, 'import tweepy\n'), ((909, 925), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (919, 925), False, 'import tweepy\n'), ((2171, 2184), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (2181, 2184), False, 'import csv\n')] |
# TODO:
# - allow inheritance from GDScript class
# - overload native method ?
import pytest
from godot.bindings import ResourceLoader, GDScript, PluginScript
def test_native_method(node):
original_name = node.get_name()
try:
node.set_name("foo")
name = node.get_name()
assert name == "foo"
finally:
node.set_name(original_name)
@pytest.mark.xfail
def test_overloaded_native_method(node, subnode):
expected = """
*
***
*****
|
"""
ret = node.print_tree()
assert ret == expected
ret = subnode.print_tree()
assert ret == expected
def test_node_ready_called(node):
assert node.is_ready_called()
def test_subnode_ready_called(subnode):
assert subnode.is_ready_called()
assert subnode.is_sub_ready_called()
def test_method_call(anynode):
ret = anynode.meth("foo")
assert ret == "foo"
def test_overloaded_method_call(subnode):
ret = subnode.overloaded_by_child_meth("foo")
assert ret == "sub:foo"
def test_property_without_default_value(anynode):
value = anynode.prop
assert value is None
def test_property(anynode):
anynode.prop = 42
value = anynode.prop
assert value == 42
@pytest.mark.xfail(reason="default value seems to be only set in .tscn")
def test_overloaded_property_default_value(pynode, pysubnode):
# Parent property
value = pynode.overloaded_by_child_prop
assert value == "default"
# Overloaded property
value = pysubnode.overloaded_by_child_prop
assert value == "sub:default"
def test_overloaded_property(pynode, pysubnode):
# Not supported by GDScript
# Parent property
pynode.overloaded_by_child_prop = "foo"
value = pynode.overloaded_by_child_prop
assert value == "foo"
# Overloaded property
pysubnode.overloaded_by_child_prop = "foo"
value = pysubnode.overloaded_by_child_prop
assert value == "sub:foo"
def test_static_method_call(node):
value = node.static_meth("foo")
assert value == "static:foo"
@pytest.mark.parametrize(
"path,expected_type",
[("res://gdnode.gd", GDScript), ("res://pynode.py", PluginScript)],
)
def test_load_script(path, expected_type):
script = ResourceLoader.load(path, "", False)
try:
assert isinstance(script, expected_type)
assert script.can_instance()
finally:
script.free()
| [
"pytest.mark.xfail",
"pytest.mark.parametrize",
"godot.bindings.ResourceLoader.load"
] | [((1218, 1289), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""default value seems to be only set in .tscn"""'}), "(reason='default value seems to be only set in .tscn')\n", (1235, 1289), False, 'import pytest\n'), ((2036, 2154), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""path,expected_type"""', "[('res://gdnode.gd', GDScript), ('res://pynode.py', PluginScript)]"], {}), "('path,expected_type', [('res://gdnode.gd', GDScript\n ), ('res://pynode.py', PluginScript)])\n", (2059, 2154), False, 'import pytest\n'), ((2217, 2253), 'godot.bindings.ResourceLoader.load', 'ResourceLoader.load', (['path', '""""""', '(False)'], {}), "(path, '', False)\n", (2236, 2253), False, 'from godot.bindings import ResourceLoader, GDScript, PluginScript\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
def test_stmt_simplify():
ib = tvm.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
n = tvm.var("n")
with ib.for_range(0, n, name="i") as i:
with ib.if_scope(i < 12):
A[i] = C[i]
body = tvm.stmt.LetStmt(n, 10, ib.get())
body = tvm.ir_pass.CanonicalSimplify(body)
assert isinstance(body.body, tvm.stmt.Store)
def test_thread_extent_simplify():
ib = tvm.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
n = tvm.var("n")
tx = tvm.thread_axis("threadIdx.x")
ty = tvm.thread_axis("threadIdx.y")
ib.scope_attr(tx, "thread_extent", n)
ib.scope_attr(tx, "thread_extent", n)
ib.scope_attr(ty, "thread_extent", 1)
with ib.if_scope(tx + ty < 12):
A[tx] = C[tx + ty]
body = tvm.stmt.LetStmt(n, 10, ib.get())
body = tvm.ir_pass.CanonicalSimplify(body)
assert isinstance(body.body.body.body, tvm.stmt.Store)
if __name__ == "__main__":
test_stmt_simplify()
test_thread_extent_simplify()
| [
"tvm.var",
"tvm.ir_pass.CanonicalSimplify",
"tvm.ir_builder.create",
"tvm.thread_axis"
] | [((832, 855), 'tvm.ir_builder.create', 'tvm.ir_builder.create', ([], {}), '()\n', (853, 855), False, 'import tvm\n'), ((944, 956), 'tvm.var', 'tvm.var', (['"""n"""'], {}), "('n')\n", (951, 956), False, 'import tvm\n'), ((1116, 1151), 'tvm.ir_pass.CanonicalSimplify', 'tvm.ir_pass.CanonicalSimplify', (['body'], {}), '(body)\n', (1145, 1151), False, 'import tvm\n'), ((1247, 1270), 'tvm.ir_builder.create', 'tvm.ir_builder.create', ([], {}), '()\n', (1268, 1270), False, 'import tvm\n'), ((1359, 1371), 'tvm.var', 'tvm.var', (['"""n"""'], {}), "('n')\n", (1366, 1371), False, 'import tvm\n'), ((1381, 1411), 'tvm.thread_axis', 'tvm.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (1396, 1411), False, 'import tvm\n'), ((1421, 1451), 'tvm.thread_axis', 'tvm.thread_axis', (['"""threadIdx.y"""'], {}), "('threadIdx.y')\n", (1436, 1451), False, 'import tvm\n'), ((1697, 1732), 'tvm.ir_pass.CanonicalSimplify', 'tvm.ir_pass.CanonicalSimplify', (['body'], {}), '(body)\n', (1726, 1732), False, 'import tvm\n')] |
"""The Worker class, which manages running policy evaluations."""
import datetime
import grpc
import gym
import os
from google.protobuf import empty_pb2
from proto.neuroevolution_pb2 import Evaluation, Individual
from proto.neuroevolution_pb2_grpc import NeuroStub
from worker.policy import Policy
ENVIRONMENT = os.getenv("ENVIRONMENT", "Venture-v4")
HOST = os.getenv("HOST_ADDRESS", "127.0.0.1") + ":" + os.getenv("HOST_PORT", "8080")
MUTATION_STRENGTH = float(os.getenv("MUTATION_STRENGTH", "0.005"))
class Worker:
"""Worker manages the evaluation of candidate policies from the master server.
Attributes:
client (NeuroStub): The client stub to the master server.
env (gym.Env): The gym environment being evaluated.
policy (Policy): The policy network, with changeable weights.
strength (float): The genetic mutation strength.
"""
def __init__(self, env_name=ENVIRONMENT, strength=MUTATION_STRENGTH, host=HOST):
"""Creates a Worker instance.
Args:
env (string): The valid gym environment name.
host (string): The hostname of the master server.
strength (float): The genetic mutation strength.
"""
self.client = NeuroStub(grpc.insecure_channel(host))
self.env = gym.make(env_name)
self.policy = Policy(self.env.action_space.n)
self.strength = strength
print("Host:", host)
print("Environment:", env_name)
print("Mutation Strength:", strength)
def seek(self):
"""Gets a new set of seeds to try from the master server.
Returns:
seeds (list of ints): The seed sequence defining the next policy
to try out.
"""
return self.client.Seek(empty_pb2.Empty(), timeout=30).seeds
def show(self, seeds, score):
"""Sends the seeds and corresponding score to the master server.
Args:
seeds (list of ints): The seed sequence defining a policy.
score (float): The score it achieved on the environment.
"""
self.client.Show(
Evaluation(
individual=Individual(
seeds=seeds,
),
score=score,
),
timeout=30,
)
def run_one(self):
"""Gets, evaluates, and reports a policy."""
t = datetime.datetime.now()
seeds = self.seek()
self.policy.set_weights(seeds, self.strength)
setup_time = datetime.datetime.now() - t
t = datetime.datetime.now()
i = 0
score = 0
done = False
state = self.env.reset()
while not done:
action = self.policy.act(state)
state, reward, done, _ = self.env.step(action)
score += reward
i += 1
if i >= 20000:
break
self.show(seeds, score)
run_time = datetime.datetime.now() - t
print(
"Score: ", score,
"Seeds: ", seeds,
"Frames: ", i,
"Setup Time: ", setup_time,
"Run Time: ", run_time,
"FPS during run: ", i / run_time.total_seconds()
)
def run(self):
"""Repeatedly gets, evaluates, and reports a policy."""
while True:
self.run_one()
| [
"proto.neuroevolution_pb2.Individual",
"worker.policy.Policy",
"os.getenv",
"grpc.insecure_channel",
"datetime.datetime.now",
"google.protobuf.empty_pb2.Empty",
"gym.make"
] | [((315, 353), 'os.getenv', 'os.getenv', (['"""ENVIRONMENT"""', '"""Venture-v4"""'], {}), "('ENVIRONMENT', 'Venture-v4')\n", (324, 353), False, 'import os\n'), ((408, 438), 'os.getenv', 'os.getenv', (['"""HOST_PORT"""', '"""8080"""'], {}), "('HOST_PORT', '8080')\n", (417, 438), False, 'import os\n'), ((465, 504), 'os.getenv', 'os.getenv', (['"""MUTATION_STRENGTH"""', '"""0.005"""'], {}), "('MUTATION_STRENGTH', '0.005')\n", (474, 504), False, 'import os\n'), ((361, 399), 'os.getenv', 'os.getenv', (['"""HOST_ADDRESS"""', '"""127.0.0.1"""'], {}), "('HOST_ADDRESS', '127.0.0.1')\n", (370, 399), False, 'import os\n'), ((1295, 1313), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (1303, 1313), False, 'import gym\n'), ((1336, 1367), 'worker.policy.Policy', 'Policy', (['self.env.action_space.n'], {}), '(self.env.action_space.n)\n', (1342, 1367), False, 'from worker.policy import Policy\n'), ((2391, 2414), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2412, 2414), False, 'import datetime\n'), ((2558, 2581), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2579, 2581), False, 'import datetime\n'), ((1247, 1274), 'grpc.insecure_channel', 'grpc.insecure_channel', (['host'], {}), '(host)\n', (1268, 1274), False, 'import grpc\n'), ((2518, 2541), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2539, 2541), False, 'import datetime\n'), ((2942, 2965), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2963, 2965), False, 'import datetime\n'), ((1771, 1788), 'google.protobuf.empty_pb2.Empty', 'empty_pb2.Empty', ([], {}), '()\n', (1786, 1788), False, 'from google.protobuf import empty_pb2\n'), ((2160, 2183), 'proto.neuroevolution_pb2.Individual', 'Individual', ([], {'seeds': 'seeds'}), '(seeds=seeds)\n', (2170, 2183), False, 'from proto.neuroevolution_pb2 import Evaluation, Individual\n')] |
#!/usr/bin/python3
import json
import pprint
import sys
import os
import numpy as np
import traceback
import random
import argparse
import json
import tensorflow
import keras
from keras import optimizers
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.layers.advanced_activations import LeakyReLU
from keras.preprocessing.image import img_to_array, load_img
from keras.callbacks import ModelCheckpoint, History
from PIL import Image
# start with PYTHONHASHSEED=89
np.random.seed(44)
random.seed(22)
tensorflow.set_random_seed(11)
# session_conf = tensorflow.ConfigProto(intra_op_parallelism_threads=1,
# inter_op_parallelism_threads=1)
# tf_sess = tensorflow.Session(graph=tensorflow.get_default_graph(), config=session_conf)
# keras.backend.set_session(tf_sess)
pp = pprint.PrettyPrinter()
modes = ['train', 'predict', 'validate']
aparser = argparse.ArgumentParser()
aparser.add_argument('-tSet', help='Choose source training set (from augmentation)')
aparser.add_argument('mode', help=str(modes))
aparser.add_argument('name', help='Name of this particular run')
aparser.add_argument('-augRunName', help='Name of the source augmentation')
aparser.add_argument('-ls', help='List all current models', action='store_true')
aparser.add_argument('-useTdata', help='Use training data for prediction/validation instead of validation data', action='store_true')
aparser.add_argument('-pFile', help='prediction mode: name of the image file to predict')
aparser.add_argument('-pathCap', help='Specify path to capture-output', nargs=1)
aparser.add_argument('-pathModel', help='Specify path to models', nargs=1)
aparser.add_argument('-pathAug', help='Specify path to augmentation-output', nargs=1)
aparser.add_argument('-save', help='Save config into cfg.json', action='store_true')
args = aparser.parse_args()
if os.path.exists('cfg.json'):
with open('cfg.json', 'r') as cfgfile:
cfg = json.load(cfgfile)
else:
cfg = {}
if args.pathCap or 'capturepath' not in cfg:
cfg['capturepath'] = args.pathCap
if args.pathModel or 'modelpath' not in cfg:
cfg['modelpath'] = args.pathModel
if args.pathAug or 'augpath' not in cfg:
cfg['augpath'] = args.pathAug
if args.tSet or 'tSet' not in cfg:
cfg['tSet'] = args.tSet
if args.name or 'nameOfRun' not in cfg:
cfg['nameOfRun'] = args.augRunName
if args.save:
with open('cfg.json', 'w') as cfgfile:
cfgfile.write(json.dumps(cfg, sort_keys=True, indent=2))
trainingSet = cfg['tSet']
mode = args.mode
nameofrun = args.name
predfile = args.pFile
srcT = args.useTdata
assert mode in modes
# paths
modelpath = cfg['modelpath']
if args.ls:
print('available runs: ' + str(os.listdir(os.path.join(modelpath, trainingSet))))
sys.exit()
outpath = os.path.join(modelpath, trainingSet, nameofrun)
modelPathBare = os.path.join(outpath, nameofrun)
cpmodelPathBare = os.path.join(outpath, 'chkp')
modelPath = modelPathBare + '.h5'
if not os.path.isdir(outpath):
os.makedirs(outpath)
if not os.path.isdir(cpmodelPathBare):
os.makedirs(cpmodelPathBare)
if len(os.listdir(cpmodelPathBare)):
cpmodelPath = os.path.join(cpmodelPathBare, sorted(os.listdir(cpmodelPathBare))[-1])
assert cpmodelPath.endswith('.h5')
else:
cpmodelPath = None
if not os.path.isfile(modelPath) and not cpmodelPath:
model = Sequential()
model.add(Conv2D(16, (3, 3), input_shape=(720, 1280, 3)))
model.add(LeakyReLU(alpha=.3))
model.add(MaxPooling2D(pool_size=(2, 3)))
model.add(Conv2D(32, (3, 3)))
model.add(LeakyReLU(alpha=.3))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(LeakyReLU(alpha=.3))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(LeakyReLU(alpha=.3))
model.add(MaxPooling2D(pool_size=(2, 2)))
# the model so far outputs 3D feature maps (height, width, features)
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64, kernel_initializer='random_uniform'))
model.add(LeakyReLU(alpha=.3))
#model.add(Dropout(0.5))
model.add(Dense(2))
model.add(Activation('sigmoid'))
adaD = optimizers.Adadelta()
model.compile(loss='mse', optimizer=adaD)
startEpoch = 0
else:
# load model
if os.path.isfile(modelPath):
model = load_model(modelPath)
# load training cfg
if os.path.isfile(modelPathBare + '.json'):
with open(modelPathBare + '.json', 'r') as jsonfile:
modelcfg = json.load(jsonfile)
startEpoch = modelcfg['epochsTrained']
else:
startEpoch = 0
else:
model = load_model(cpmodelPath)
startEpoch = int(os.path.basename(cpmodelPath).split('.')[0])
scaleX = 1920 * 2
scaleY = 1080
with open(os.path.join(cfg['capturepath'], trainingSet + '.json')) as jsonfile:
trainingdata = json.load(jsonfile)
dset = {}
for d in trainingdata:
dset[d['f'].split('.')[0]] = (float(d['x']) / scaleX,
float(d['y']) / scaleY)
tset = {}
tfiles = []
vset = {}
vfiles = []
trainDir = os.path.join(cfg['augpath'], cfg['nameOfRun'] + '-train', 'images')
valDir = os.path.join(cfg['augpath'], cfg['nameOfRun'] + '-validate', 'images')
for f in os.listdir(trainDir):
tset[f] = dset[f.split('.')[0].split('_')[0]]
tfiles.append(f)
for f in os.listdir(valDir):
vset[f] = dset[f.split('.')[0]]
vfiles.append(f)
batch_size = min(16, len(tfiles) // 16)
print('{} training samples, {} validation samples'.format(len(tfiles), len(vfiles)))
print(' -> Batch size chosen: {}'.format(batch_size))
class DataGen(keras.utils.Sequence):
def __init__(self, filenames, path, labels, batchSize, dim, nChannels, shuffle=True):
self.dim = dim
self.batchSize = batchSize
self.labels = labels
self.filenames = filenames
self.path = path
self.nChannels = nChannels
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.filenames) / self.batchSize))
def __getitem__(self, index):
indexes = self.indexes[index * self.batchSize : (index + 1) * self.batchSize]
fNamesTmp = [self.filenames[k] for k in indexes]
X, y = self.__data_generation(fNamesTmp)
return X, y
def on_epoch_end(self):
self.indexes = np.arange(len(self.filenames))
if self.shuffle:
np.random.shuffle(self.indexes)
def __data_generation(self, fNamesTmp):
X = np.empty((self.batchSize, *self.dim, self.nChannels))
Y = np.empty((self.batchSize, 2))
for idx, fname in enumerate(fNamesTmp):
img = load_img(os.path.join(self.path, fname))
x = img_to_array(img)
x.reshape((720, 1280, 3))
x *= 1.0/256.0
X[idx,] = x
Y[idx,] = np.asarray(self.labels[fname])
return X, Y
if mode == 'train':
training_generator = DataGen(tfiles, trainDir, tset, batch_size, (720, 1280), 3, shuffle=True)
validation_generator = DataGen(vfiles, valDir, vset, batch_size, (720, 1280), 3, shuffle=False)
checkpointer = ModelCheckpoint(filepath=os.path.join(cpmodelPathBare, '{epoch:03d}.h5'), verbose=1, save_best_only=True)
hist = History()
try:
model.fit_generator(training_generator,
steps_per_epoch=len(tfiles) // batch_size,
epochs=50,
validation_data=validation_generator,
validation_steps=len(vfiles) // batch_size,
max_queue_size=4,
workers=4,
initial_epoch=startEpoch,
callbacks=[checkpointer, hist])
except:
print()
traceback.print_exc()
finally:
print('hist: loss - validation loss')
if 'loss' in hist.history:
epochsTrained = len(hist.history['loss'])
for l, vl in zip(hist.history['loss'], hist.history['val_loss']):
print('{:.5f} - {:.5f}'.format(l, vl))
else:
print('N/A')
epochsTrained = 0
# always save your weights after training or during training
model.save(modelPath)
print('Saved model as "{}"'.format(modelPath))
with open(modelPathBare + '.json', 'w') as jsonfile:
jsonfile.write(json.dumps({'epochsTrained': epochsTrained + startEpoch}, sort_keys = True, indent = 2))
elif mode == 'predict':
# print(model.summary())
# pp.pprint(model.get_weights())
X = np.empty((1, 720, 1280, 3))
img = load_img(os.path.join(trainDir if srcT else valDir, sys.argv[4]))
x = img_to_array(img)
x.reshape((720, 1280, 3))
x = x / 256.0
X[0,] = x
output = model.predict(X, None, verbose=1)[0]
print('output: ({:.5f}, {:.5f}) - unscaled: ({:5.2f}, {:5.2f})'.format(output[0], output[1], output[0] * scaleX, output[1] * scaleY))
exp = np.asarray(tset[predfile] if srcT else vset[predfile])
print('expected: ({:.5f}, {:.5f}) - unscaled: ({:5.2f}, {:5.2f})'.format(exp[0], exp[1], exp[0] * scaleX, exp[1] * scaleY))
elif mode == 'validate':
if srcT:
files = tfiles
validation_generator = DataGen(files, trainDir, tset, batch_size, (720, 1280), 3, shuffle=False)
else:
files = vfiles
validation_generator = DataGen(files, valDir, vset, batch_size, (720, 1280), 3, shuffle=False)
predictions = model.predict_generator(validation_generator, verbose=1)
MSE = 0
for f, pred in zip(files, predictions):
exp = np.asarray(tset[f] if srcT else vset[f])
mse = ((exp[0] - pred[0])**2 + (exp[1] - pred[1])**2) / 2
print('{}: ({:.3f}, {:.3f}) -> ({:.3f}, {:.3f}) [mse: {:.3f}]'.format(f, exp[0], exp[1], pred[0], pred[1], mse))
MSE += mse
print('/MSE: {:.3f}'.format(MSE / len(files)))
| [
"keras.preprocessing.image.img_to_array",
"keras.layers.Conv2D",
"keras.callbacks.History",
"keras.layers.Activation",
"sys.exit",
"keras.layers.Dense",
"tensorflow.set_random_seed",
"keras.optimizers.Adadelta",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"json.dumps",
"numpy.... | [((609, 627), 'numpy.random.seed', 'np.random.seed', (['(44)'], {}), '(44)\n', (623, 627), True, 'import numpy as np\n'), ((628, 643), 'random.seed', 'random.seed', (['(22)'], {}), '(22)\n', (639, 643), False, 'import random\n'), ((644, 674), 'tensorflow.set_random_seed', 'tensorflow.set_random_seed', (['(11)'], {}), '(11)\n', (670, 674), False, 'import tensorflow\n'), ((953, 975), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (973, 975), False, 'import pprint\n'), ((1029, 1054), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1052, 1054), False, 'import argparse\n'), ((1991, 2017), 'os.path.exists', 'os.path.exists', (['"""cfg.json"""'], {}), "('cfg.json')\n", (2005, 2017), False, 'import os\n'), ((2913, 2960), 'os.path.join', 'os.path.join', (['modelpath', 'trainingSet', 'nameofrun'], {}), '(modelpath, trainingSet, nameofrun)\n', (2925, 2960), False, 'import os\n'), ((2977, 3009), 'os.path.join', 'os.path.join', (['outpath', 'nameofrun'], {}), '(outpath, nameofrun)\n', (2989, 3009), False, 'import os\n'), ((3028, 3057), 'os.path.join', 'os.path.join', (['outpath', '"""chkp"""'], {}), "(outpath, 'chkp')\n", (3040, 3057), False, 'import os\n'), ((5292, 5359), 'os.path.join', 'os.path.join', (["cfg['augpath']", "(cfg['nameOfRun'] + '-train')", '"""images"""'], {}), "(cfg['augpath'], cfg['nameOfRun'] + '-train', 'images')\n", (5304, 5359), False, 'import os\n'), ((5369, 5439), 'os.path.join', 'os.path.join', (["cfg['augpath']", "(cfg['nameOfRun'] + '-validate')", '"""images"""'], {}), "(cfg['augpath'], cfg['nameOfRun'] + '-validate', 'images')\n", (5381, 5439), False, 'import os\n'), ((5449, 5469), 'os.listdir', 'os.listdir', (['trainDir'], {}), '(trainDir)\n', (5459, 5469), False, 'import os\n'), ((5552, 5570), 'os.listdir', 'os.listdir', (['valDir'], {}), '(valDir)\n', (5562, 5570), False, 'import os\n'), ((2891, 2901), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2899, 2901), False, 'import sys\n'), ((3100, 3122), 'os.path.isdir', 'os.path.isdir', (['outpath'], {}), '(outpath)\n', (3113, 3122), False, 'import os\n'), ((3128, 3148), 'os.makedirs', 'os.makedirs', (['outpath'], {}), '(outpath)\n', (3139, 3148), False, 'import os\n'), ((3156, 3186), 'os.path.isdir', 'os.path.isdir', (['cpmodelPathBare'], {}), '(cpmodelPathBare)\n', (3169, 3186), False, 'import os\n'), ((3192, 3220), 'os.makedirs', 'os.makedirs', (['cpmodelPathBare'], {}), '(cpmodelPathBare)\n', (3203, 3220), False, 'import os\n'), ((3229, 3256), 'os.listdir', 'os.listdir', (['cpmodelPathBare'], {}), '(cpmodelPathBare)\n', (3239, 3256), False, 'import os\n'), ((3484, 3496), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3494, 3496), False, 'from keras.models import Sequential\n'), ((4346, 4367), 'keras.optimizers.Adadelta', 'optimizers.Adadelta', ([], {}), '()\n', (4365, 4367), False, 'from keras import optimizers\n'), ((4465, 4490), 'os.path.isfile', 'os.path.isfile', (['modelPath'], {}), '(modelPath)\n', (4479, 4490), False, 'import os\n'), ((5067, 5086), 'json.load', 'json.load', (['jsonfile'], {}), '(jsonfile)\n', (5076, 5086), False, 'import json\n'), ((7484, 7493), 'keras.callbacks.History', 'History', ([], {}), '()\n', (7491, 7493), False, 'from keras.callbacks import ModelCheckpoint, History\n'), ((2076, 2094), 'json.load', 'json.load', (['cfgfile'], {}), '(cfgfile)\n', (2085, 2094), False, 'import json\n'), ((3425, 3450), 'os.path.isfile', 'os.path.isfile', (['modelPath'], {}), '(modelPath)\n', (3439, 3450), False, 'import os\n'), ((3512, 3558), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'input_shape': '(720, 1280, 3)'}), '(16, (3, 3), input_shape=(720, 1280, 3))\n', (3518, 3558), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3574, 3594), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (3583, 3594), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3609, 3639), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 3)'}), '(pool_size=(2, 3))\n', (3621, 3639), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3656, 3674), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (3662, 3674), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3690, 3710), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (3699, 3710), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3725, 3755), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3737, 3755), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3772, 3790), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (3778, 3790), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3806, 3826), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (3815, 3826), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3841, 3871), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3853, 3871), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3888, 3906), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (3894, 3906), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((3922, 3942), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (3931, 3942), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3957, 3987), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3969, 3987), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((4077, 4086), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4084, 4086), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((4161, 4207), 'keras.layers.Dense', 'Dense', (['(64)'], {'kernel_initializer': '"""random_uniform"""'}), "(64, kernel_initializer='random_uniform')\n", (4166, 4207), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((4223, 4243), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (4232, 4243), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((4287, 4295), 'keras.layers.Dense', 'Dense', (['(2)'], {}), '(2)\n', (4292, 4295), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((4311, 4332), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (4321, 4332), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((4508, 4529), 'keras.models.load_model', 'load_model', (['modelPath'], {}), '(modelPath)\n', (4518, 4529), False, 'from keras.models import load_model\n'), ((4569, 4608), 'os.path.isfile', 'os.path.isfile', (["(modelPathBare + '.json')"], {}), "(modelPathBare + '.json')\n", (4583, 4608), False, 'import os\n'), ((4840, 4863), 'keras.models.load_model', 'load_model', (['cpmodelPath'], {}), '(cpmodelPath)\n', (4850, 4863), False, 'from keras.models import load_model\n'), ((4978, 5033), 'os.path.join', 'os.path.join', (["cfg['capturepath']", "(trainingSet + '.json')"], {}), "(cfg['capturepath'], trainingSet + '.json')\n", (4990, 5033), False, 'import os\n'), ((6726, 6779), 'numpy.empty', 'np.empty', (['(self.batchSize, *self.dim, self.nChannels)'], {}), '((self.batchSize, *self.dim, self.nChannels))\n', (6734, 6779), True, 'import numpy as np\n'), ((6792, 6821), 'numpy.empty', 'np.empty', (['(self.batchSize, 2)'], {}), '((self.batchSize, 2))\n', (6800, 6821), True, 'import numpy as np\n'), ((8838, 8865), 'numpy.empty', 'np.empty', (['(1, 720, 1280, 3)'], {}), '((1, 720, 1280, 3))\n', (8846, 8865), True, 'import numpy as np\n'), ((8950, 8967), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (8962, 8967), False, 'from keras.preprocessing.image import img_to_array, load_img\n'), ((9228, 9282), 'numpy.asarray', 'np.asarray', (['(tset[predfile] if srcT else vset[predfile])'], {}), '(tset[predfile] if srcT else vset[predfile])\n', (9238, 9282), True, 'import numpy as np\n'), ((2576, 2617), 'json.dumps', 'json.dumps', (['cfg'], {'sort_keys': '(True)', 'indent': '(2)'}), '(cfg, sort_keys=True, indent=2)\n', (2586, 2617), False, 'import json\n'), ((6637, 6668), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (6654, 6668), True, 'import numpy as np\n'), ((6945, 6962), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (6957, 6962), False, 'from keras.preprocessing.image import img_to_array, load_img\n'), ((7075, 7105), 'numpy.asarray', 'np.asarray', (['self.labels[fname]'], {}), '(self.labels[fname])\n', (7085, 7105), True, 'import numpy as np\n'), ((7392, 7439), 'os.path.join', 'os.path.join', (['cpmodelPathBare', '"""{epoch:03d}.h5"""'], {}), "(cpmodelPathBare, '{epoch:03d}.h5')\n", (7404, 7439), False, 'import os\n'), ((8035, 8056), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8054, 8056), False, 'import traceback\n'), ((8885, 8940), 'os.path.join', 'os.path.join', (['(trainDir if srcT else valDir)', 'sys.argv[4]'], {}), '(trainDir if srcT else valDir, sys.argv[4])\n', (8897, 8940), False, 'import os\n'), ((3314, 3341), 'os.listdir', 'os.listdir', (['cpmodelPathBare'], {}), '(cpmodelPathBare)\n', (3324, 3341), False, 'import os\n'), ((4702, 4721), 'json.load', 'json.load', (['jsonfile'], {}), '(jsonfile)\n', (4711, 4721), False, 'import json\n'), ((6897, 6927), 'os.path.join', 'os.path.join', (['self.path', 'fname'], {}), '(self.path, fname)\n', (6909, 6927), False, 'import os\n'), ((8650, 8737), 'json.dumps', 'json.dumps', (["{'epochsTrained': epochsTrained + startEpoch}"], {'sort_keys': '(True)', 'indent': '(2)'}), "({'epochsTrained': epochsTrained + startEpoch}, sort_keys=True,\n indent=2)\n", (8660, 8737), False, 'import json\n'), ((9860, 9900), 'numpy.asarray', 'np.asarray', (['(tset[f] if srcT else vset[f])'], {}), '(tset[f] if srcT else vset[f])\n', (9870, 9900), True, 'import numpy as np\n'), ((2847, 2883), 'os.path.join', 'os.path.join', (['modelpath', 'trainingSet'], {}), '(modelpath, trainingSet)\n', (2859, 2883), False, 'import os\n'), ((4889, 4918), 'os.path.basename', 'os.path.basename', (['cpmodelPath'], {}), '(cpmodelPath)\n', (4905, 4918), False, 'import os\n')] |
from dataclasses import dataclass, field
from typing import List
from xml.etree.ElementTree import QName
__NAMESPACE__ = "http://schemas.microsoft.com/2003/10/Serialization/"
@dataclass
class Array:
class Meta:
namespace = "http://schemas.microsoft.com/2003/10/Serialization/"
item: List[object] = field(
default_factory=list,
metadata={
"name": "Item",
"type": "Element",
"namespace": "",
"nillable": True,
}
)
item_type: QName = field(
default=QName("{http://www.w3.org/2001/XMLSchema}anyType"),
metadata={
"name": "ItemType",
"type": "Attribute",
"namespace": "http://schemas.microsoft.com/2003/10/Serialization/",
}
)
dimensions: List[int] = field(
default_factory=lambda: [
1,
],
metadata={
"name": "Dimensions",
"type": "Attribute",
"tokens": True,
}
)
lower_bounds: List[int] = field(
default_factory=lambda: [
0,
],
metadata={
"name": "LowerBounds",
"type": "Attribute",
"tokens": True,
}
)
| [
"xml.etree.ElementTree.QName",
"dataclasses.field"
] | [((318, 430), 'dataclasses.field', 'field', ([], {'default_factory': 'list', 'metadata': "{'name': 'Item', 'type': 'Element', 'namespace': '', 'nillable': True}"}), "(default_factory=list, metadata={'name': 'Item', 'type': 'Element',\n 'namespace': '', 'nillable': True})\n", (323, 430), False, 'from dataclasses import dataclass, field\n'), ((814, 923), 'dataclasses.field', 'field', ([], {'default_factory': '(lambda : [1])', 'metadata': "{'name': 'Dimensions', 'type': 'Attribute', 'tokens': True}"}), "(default_factory=lambda : [1], metadata={'name': 'Dimensions', 'type':\n 'Attribute', 'tokens': True})\n", (819, 923), False, 'from dataclasses import dataclass, field\n'), ((1041, 1151), 'dataclasses.field', 'field', ([], {'default_factory': '(lambda : [0])', 'metadata': "{'name': 'LowerBounds', 'type': 'Attribute', 'tokens': True}"}), "(default_factory=lambda : [0], metadata={'name': 'LowerBounds', 'type':\n 'Attribute', 'tokens': True})\n", (1046, 1151), False, 'from dataclasses import dataclass, field\n'), ((554, 604), 'xml.etree.ElementTree.QName', 'QName', (['"""{http://www.w3.org/2001/XMLSchema}anyType"""'], {}), "('{http://www.w3.org/2001/XMLSchema}anyType')\n", (559, 604), False, 'from xml.etree.ElementTree import QName\n')] |
from __future__ import print_function
try:
import h5py
WITH_H5PY = True
except ImportError:
WITH_H5PY = False
try:
import zarr
WITH_ZARR = True
from .io import IoZarr
except ImportError:
WITH_ZARR = False
try:
import z5py
WITH_Z5PY = True
from .io import IoN5
except ImportError:
WITH_Z5PY = False
import os
import json
from random import shuffle
import numpy as np
import re
import fnmatch
from .inference import load_input_crop
import dask
import toolz as tz
import logging
def _offset_list(shape, output_shape):
in_list = []
for z in np.arange(0, shape[0], output_shape[0]):
for y in np.arange(0, shape[1], output_shape[1]):
for x in np.arange(0, shape[2], output_shape[2]):
in_list.append([float(z), float(y), float(x)])
return in_list
# NOTE this will not cover the whole volume
def _offset_list_with_shift(shape, output_shape, shift):
in_list = []
for z in np.arange(0, shape[0], output_shape[0]):
for y in np.arange(0, shape[1], output_shape[1]):
for x in np.arange(0, shape[2], output_shape[2]):
in_list.append([min(float(z) + shift[0], shape[0]),
min(float(y) + shift[1], shape[1]),
min(float(x) + shift[2], shape[2])])
return in_list
# this returns the offsets for the given output blocks.
# blocks are padded on the fly during inference if necessary
def get_offset_lists(shape,
gpu_list,
save_folder,
output_shape,
randomize=False,
shift=None):
in_list = _offset_list(shape, output_shape) if shift is None else\
_offset_list_with_shift(shape, output_shape, shift)
if randomize:
shuffle(in_list)
n_splits = len(gpu_list)
out_list = [in_list[i::n_splits] for i in range(n_splits)]
if not os.path.exists(save_folder):
os.mkdir(save_folder)
for ii, olist in enumerate(out_list):
list_name = os.path.join(save_folder, 'list_gpu_%i.json' % gpu_list[ii])
with open(list_name, 'w') as f:
json.dump(olist, f)
# this returns the offsets for the given output blocks and bounding box.
# blocks are padded on the fly during inference if necessary
def get_offset_lists_with_bb(shape,
gpu_list,
save_folder,
output_shape,
bb_start,
bb_stop,
randomize=False):
# zap the bounding box to grid defined by out_blocks
bb_start_c = [(bbs // outs) * outs for bbs, outs in zip(bb_start, output_shape)]
bb_stop_c = [(bbs // outs + 1) * outs for bbs, outs in zip(bb_stop, output_shape)]
in_list = []
for z in range(bb_start_c[0], bb_stop_c[0], output_shape[0]):
for y in range(bb_start_c[1], bb_stop_c[1], output_shape[1]):
for x in range(bb_start_c[2], bb_stop_c[2], output_shape[2]):
in_list.append([z, y, x])
if randomize:
shuffle(in_list)
n_splits = len(gpu_list)
out_list = [in_list[i::n_splits] for i in range(n_splits)]
if not os.path.exists(save_folder):
os.mkdir(save_folder)
for ii, olist in enumerate(out_list):
list_name = os.path.join(save_folder, 'list_gpu_%i.json' % gpu_list[ii])
with open(list_name, 'w') as f:
json.dump(olist, f)
# redistributing offset lists from failed jobs
def redistribute_offset_lists(gpu_list, save_folder):
p_full = re.compile("list_gpu_\d+.json")
p_proc = re.compile("list_gpu_\d+_\S*_processed.txt")
full_list_jsons = []
processed_list_files = []
for f in os.listdir(save_folder):
mo_full = p_full.match(f)
mo_proc = p_proc.match(f)
if mo_full is not None:
full_list_jsons.append(f)
if mo_proc is not None:
processed_list_files.append(f)
full_block_list = set()
for fl in full_list_jsons:
with open(os.path.join(save_folder, fl), 'r') as f:
bl = json.load(f)
full_block_list.update({tuple(coo) for coo in bl})
processed_block_list = set()
bls = []
for pl in processed_list_files:
with open(os.path.join(save_folder, pl), 'r') as f:
bl_txt = f.read()
bl_txt = '[' + bl_txt[:bl_txt.rfind(']') + 1] + ']'
bls.append(json.loads(bl_txt))
processed_block_list.update({tuple(coo) for coo in bls[-1]})
to_be_processed_block_list = list(full_block_list - processed_block_list)
previous_tries = []
p_tries = re.compile("list_gpu_\d+_try\d+.json")
for f in os.listdir(save_folder):
mo_tries = p_tries.match(f)
if mo_tries is not None:
previous_tries.append(f)
if len(previous_tries) == 0:
tryno = 0
else:
trynos = []
for tr in previous_tries:
trynos.append(int(tr.split('try')[1].split('.json')[0]))
tryno = max(trynos)+1
print('Backing up last try ({0:})'.format(tryno))
for f in full_list_jsons:
os.rename(os.path.join(save_folder,f), os.path.join(save_folder, f[:-5] + '_try{0:}.json'.format(tryno)))
for f in processed_list_files:
os.rename(os.path.join(save_folder,f), os.path.join(save_folder, f[:-4] + '_try{0:}.txt'.format(tryno)))
n_splits = len(gpu_list)
out_list = [to_be_processed_block_list[i::n_splits] for i in range(n_splits)]
for ii, olist in enumerate(out_list):
if len(olist) > 0:
list_name = os.path.join(save_folder, 'list_gpu_%i.json' % gpu_list[ii])
with open(list_name, 'w') as f:
json.dump(olist, f)
def load_ds(path, key):
ext = os.path.splitext(path)[-1]
if ext.lower() in ('.h5', '.hdf', '.hdf'):
assert WITH_H5PY
with h5py.File(path, 'r') as f:
ds = f[key]
elif ext.lower() in ('.zr', '.zarr', '.n5'):
assert WITH_Z5PY or WITH_ZARR
if WITH_ZARR:
f = zarr.open(path)
ds = f[key]
elif WITH_Z5PY:
with z5py.File(path) as f:
ds = f[key]
return ds
def generate_list_for_mask(offset_file_json, output_shape_wc, path, mask_ds, n_cpus, mask_voxel_size=None):
mask = load_ds(path, mask_ds)
if mask_voxel_size is None:
if "pixelResolution" in mask.attrs:
mask_voxel_size = mask.attrs["pixelResolution"]["dimensions"]
elif "resolution" in mask.attrs:
mask_voxel_size = mask.attrs["resolution"]
else:
mask_voxel_size = (1,) * len(output_shape_wc)
logging.warning("Did not find resolution information in attributes, defaulting to {0:}".format(mask_voxel_size))
shape_wc = tuple(np.array(mask.shape) * np.array(mask_voxel_size))
complete_offset_list = _offset_list(shape_wc, output_shape_wc)
if WITH_Z5PY:
io = IoN5(path, mask_ds, voxel_size=mask_voxel_size, channel_order=None)
else:
io = IoZarr(path, mask_ds, voxel_size=mask_voxel_size, channel_order=None)
@dask.delayed()
def load_offset(offset_wc):
return load_input_crop(io, offset_wc, (0,) * len(output_shape_wc), output_shape_wc, padding_mode="constant")[0]
@dask.delayed()
def evaluate_mask(mask_block):
if np.sum(mask_block) > 0:
return True
else:
return False
offsets_mask_eval = []
for offset_wc in complete_offset_list:
keep_offset = tz.pipe(offset_wc, load_offset, evaluate_mask)
offsets_mask_eval.append((offset_wc, keep_offset))
offsets_mask_eval = dask.compute(*offsets_mask_eval, scheduler="threads", num_workers=n_cpus)
offsets_in_mask = []
for o, m in offsets_mask_eval:
if m:
offsets_in_mask.append(o)
logging.info("{0:}/{1:} blocks contained in mask, saving offsets in {2:}".format(len(offsets_in_mask),
len(complete_offset_list),
offset_file_json))
with open(offset_file_json, 'w') as f:
json.dump(offsets_in_mask, f)
def generate_full_list(offset_file_json, output_shape_wc, path, raw_ds, raw_voxel_size=None):
raw = load_ds(path, raw_ds)
if raw_voxel_size is None:
if "pixelResolution" in raw.attrs:
raw_voxel_size = raw.attrs["pixelResolution"]["dimensions"]
elif "resolution" in raw.attrs:
raw_voxel_size = raw.attrs["resolution"]
else:
raw_voxel_size = (1,) * len(output_shape_wc)
logging.warning("Did not find resolution information in attributes, defaulting to {0:}".format(raw_voxel_size))
shape_wc = tuple(np.array(raw.shape) * np.array(raw_voxel_size))
complete_offset_list = _offset_list(shape_wc, output_shape_wc)
with open(offset_file_json, "w") as f:
json.dump(complete_offset_list, f)
# this returns the offsets for the given output blocks.
# blocks are padded on the fly in the inference if necessary
def offset_list_from_precomputed(input_list,
gpu_list,
save_folder,
list_name_extension='',
randomize=False):
if isinstance(input_list, str):
with open(input_list, 'r') as f:
input_list = json.load(f)
else:
assert isinstance(input_list, list)
if randomize:
shuffle(input_list)
n_splits = len(gpu_list)
out_list = [input_list[i::n_splits] for i in range(n_splits)]
if not os.path.exists(save_folder):
os.mkdir(save_folder)
print("Original len", len(input_list))
for ii, olist in enumerate(out_list):
list_name = os.path.join(save_folder, 'list_gpu_{0:}{1:}.json'.format(gpu_list[ii], list_name_extension))
print("Dumping list number", ii, "of len", len(olist))
with open(list_name, 'w') as f:
json.dump(olist, f)
def stitch_prediction_blocks(save_path,
block_folder,
shape,
key='data',
end_channel=None,
n_workers=8,
chunks=(1, 64, 64, 64)):
from concurrent import futures
if end_channel is None:
chan_slice = (slice(None),)
else:
assert end_channel <= shape[0]
chan_slice = (slice(0, end_channel),)
def stitch_block(ds, block_id, block_file, n_blocks):
print("Stitching block %i / %i" % (block_id, n_blocks))
offsets = [int(off) for off in block_file[:-3].split('_')[1:]]
with h5py.File(os.path.join(block_folder, block_file), 'r') as g:
block_data = g['data'][:]
block_shape = block_data.shape[1:]
# Need to add slice for channel dimension
bb = chan_slice + tuple(slice(off, off + block_shape[ii])
for ii, off in enumerate(offsets))
ds[bb] = block_data
with h5py.File(save_path, 'w') as f:
ds = f.create_dataset(key,
shape=shape,
dtype='float32',
compression='gzip',
chunks=chunks)
files = os.listdir(block_folder)
# filter out invalid filenames
files = [ff for ff in files if ff.startswith('block')]
# make sure all blocks are h5 files
assert all(ff[-3:] == '.h5' for ff in files)
n_blocks = len(files)
with futures.ThreadPoolExecutor(max_workers=n_workers) as tp:
tasks = [tp.submit(stitch_block, ds, block_id, block_file, n_blocks)
for block_id, block_file in enumerate(files)]
[t.result() for t in tasks]
def extract_nn_affinities(save_prefix,
block_folder,
shape,
invert_affs=False):
from concurrent import futures
save_path_xy = save_prefix + '_xy.h5'
save_path_z = save_prefix + '_z.h5'
with h5py.File(save_path_xy, 'w') as f_xy, h5py.File(save_path_z, 'w') as f_z:
ds_xy = f_xy.create_dataset('data',
shape=shape,
dtype='float32',
compression='gzip',
chunks=(56, 56, 56))
ds_z = f_z.create_dataset('data',
shape=shape,
dtype='float32',
compression='gzip',
chunks=(56, 56, 56))
files = os.listdir(block_folder)
def extract_block(i, ff):
print("Stitching block %i / %i" % (i, len(files)))
offsets = [int(off) for off in ff[:-3].split('_')[1:]]
with h5py.File(os.path.join(block_folder, ff), 'r') as g:
block_data = g['data'][:3]
if invert_affs:
block_data = 1. - block_data
block_shape = block_data.shape[1:]
# Need to add slice for channel dimension
bb = tuple(slice(off, off + block_shape[ii]) for ii, off in enumerate(offsets))
ds_xy[bb] = (block_data[1] + block_data[2]) / 2.
ds_z[bb] = block_data[0]
with futures.ThreadPoolExecutor(max_workers=20) as tp:
tasks = []
for i, ff in enumerate(files):
if not ff.startswith('block'):
continue
assert ff[-3:] == '.h5'
tasks.append(tp.submit(extract_block, i, ff))
[t.result() for t in tasks]
def reject_empty_batch(data):
return np.sum(data) == 0
| [
"toolz.pipe",
"re.compile",
"numpy.array",
"numpy.arange",
"os.path.exists",
"os.listdir",
"z5py.File",
"os.mkdir",
"json.loads",
"dask.delayed",
"random.shuffle",
"dask.compute",
"os.path.splitext",
"h5py.File",
"zarr.open",
"concurrent.futures.ThreadPoolExecutor",
"os.path.join",
... | [((592, 631), 'numpy.arange', 'np.arange', (['(0)', 'shape[0]', 'output_shape[0]'], {}), '(0, shape[0], output_shape[0])\n', (601, 631), True, 'import numpy as np\n'), ((968, 1007), 'numpy.arange', 'np.arange', (['(0)', 'shape[0]', 'output_shape[0]'], {}), '(0, shape[0], output_shape[0])\n', (977, 1007), True, 'import numpy as np\n'), ((3650, 3682), 're.compile', 're.compile', (['"""list_gpu_\\\\d+.json"""'], {}), "('list_gpu_\\\\d+.json')\n", (3660, 3682), False, 'import re\n'), ((3695, 3741), 're.compile', 're.compile', (['"""list_gpu_\\\\d+_\\\\S*_processed.txt"""'], {}), "('list_gpu_\\\\d+_\\\\S*_processed.txt')\n", (3705, 3741), False, 'import re\n'), ((3808, 3831), 'os.listdir', 'os.listdir', (['save_folder'], {}), '(save_folder)\n', (3818, 3831), False, 'import os\n'), ((4713, 4753), 're.compile', 're.compile', (['"""list_gpu_\\\\d+_try\\\\d+.json"""'], {}), "('list_gpu_\\\\d+_try\\\\d+.json')\n", (4723, 4753), False, 'import re\n'), ((4765, 4788), 'os.listdir', 'os.listdir', (['save_folder'], {}), '(save_folder)\n', (4775, 4788), False, 'import os\n'), ((7194, 7208), 'dask.delayed', 'dask.delayed', ([], {}), '()\n', (7206, 7208), False, 'import dask\n'), ((7367, 7381), 'dask.delayed', 'dask.delayed', ([], {}), '()\n', (7379, 7381), False, 'import dask\n'), ((7739, 7812), 'dask.compute', 'dask.compute', (['*offsets_mask_eval'], {'scheduler': '"""threads"""', 'num_workers': 'n_cpus'}), "(*offsets_mask_eval, scheduler='threads', num_workers=n_cpus)\n", (7751, 7812), False, 'import dask\n'), ((650, 689), 'numpy.arange', 'np.arange', (['(0)', 'shape[1]', 'output_shape[1]'], {}), '(0, shape[1], output_shape[1])\n', (659, 689), True, 'import numpy as np\n'), ((1026, 1065), 'numpy.arange', 'np.arange', (['(0)', 'shape[1]', 'output_shape[1]'], {}), '(0, shape[1], output_shape[1])\n', (1035, 1065), True, 'import numpy as np\n'), ((1833, 1849), 'random.shuffle', 'shuffle', (['in_list'], {}), '(in_list)\n', (1840, 1849), False, 'from random import shuffle\n'), ((1955, 1982), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (1969, 1982), False, 'import os\n'), ((1992, 2013), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (2000, 2013), False, 'import os\n'), ((2077, 2137), 'os.path.join', 'os.path.join', (['save_folder', "('list_gpu_%i.json' % gpu_list[ii])"], {}), "(save_folder, 'list_gpu_%i.json' % gpu_list[ii])\n", (2089, 2137), False, 'import os\n'), ((3157, 3173), 'random.shuffle', 'shuffle', (['in_list'], {}), '(in_list)\n', (3164, 3173), False, 'from random import shuffle\n'), ((3279, 3306), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (3293, 3306), False, 'import os\n'), ((3316, 3337), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (3324, 3337), False, 'import os\n'), ((3401, 3461), 'os.path.join', 'os.path.join', (['save_folder', "('list_gpu_%i.json' % gpu_list[ii])"], {}), "(save_folder, 'list_gpu_%i.json' % gpu_list[ii])\n", (3413, 3461), False, 'import os\n'), ((5838, 5860), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (5854, 5860), False, 'import os\n'), ((7608, 7654), 'toolz.pipe', 'tz.pipe', (['offset_wc', 'load_offset', 'evaluate_mask'], {}), '(offset_wc, load_offset, evaluate_mask)\n', (7615, 7654), True, 'import toolz as tz\n'), ((8301, 8330), 'json.dump', 'json.dump', (['offsets_in_mask', 'f'], {}), '(offsets_in_mask, f)\n', (8310, 8330), False, 'import json\n'), ((9079, 9113), 'json.dump', 'json.dump', (['complete_offset_list', 'f'], {}), '(complete_offset_list, f)\n', (9088, 9113), False, 'import json\n'), ((9671, 9690), 'random.shuffle', 'shuffle', (['input_list'], {}), '(input_list)\n', (9678, 9690), False, 'from random import shuffle\n'), ((9799, 9826), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (9813, 9826), False, 'import os\n'), ((9836, 9857), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (9844, 9857), False, 'import os\n'), ((11262, 11287), 'h5py.File', 'h5py.File', (['save_path', '"""w"""'], {}), "(save_path, 'w')\n", (11271, 11287), False, 'import h5py\n'), ((11530, 11554), 'os.listdir', 'os.listdir', (['block_folder'], {}), '(block_folder)\n', (11540, 11554), False, 'import os\n'), ((12328, 12356), 'h5py.File', 'h5py.File', (['save_path_xy', '"""w"""'], {}), "(save_path_xy, 'w')\n", (12337, 12356), False, 'import h5py\n'), ((12366, 12393), 'h5py.File', 'h5py.File', (['save_path_z', '"""w"""'], {}), "(save_path_z, 'w')\n", (12375, 12393), False, 'import h5py\n'), ((12926, 12950), 'os.listdir', 'os.listdir', (['block_folder'], {}), '(block_folder)\n', (12936, 12950), False, 'import os\n'), ((13987, 13999), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (13993, 13999), True, 'import numpy as np\n'), ((712, 751), 'numpy.arange', 'np.arange', (['(0)', 'shape[2]', 'output_shape[2]'], {}), '(0, shape[2], output_shape[2])\n', (721, 751), True, 'import numpy as np\n'), ((1088, 1127), 'numpy.arange', 'np.arange', (['(0)', 'shape[2]', 'output_shape[2]'], {}), '(0, shape[2], output_shape[2])\n', (1097, 1127), True, 'import numpy as np\n'), ((2190, 2209), 'json.dump', 'json.dump', (['olist', 'f'], {}), '(olist, f)\n', (2199, 2209), False, 'import json\n'), ((3514, 3533), 'json.dump', 'json.dump', (['olist', 'f'], {}), '(olist, f)\n', (3523, 3533), False, 'import json\n'), ((4180, 4192), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4189, 4192), False, 'import json\n'), ((4507, 4525), 'json.loads', 'json.loads', (['bl_txt'], {}), '(bl_txt)\n', (4517, 4525), False, 'import json\n'), ((5213, 5241), 'os.path.join', 'os.path.join', (['save_folder', 'f'], {}), '(save_folder, f)\n', (5225, 5241), False, 'import os\n'), ((5362, 5390), 'os.path.join', 'os.path.join', (['save_folder', 'f'], {}), '(save_folder, f)\n', (5374, 5390), False, 'import os\n'), ((5661, 5721), 'os.path.join', 'os.path.join', (['save_folder', "('list_gpu_%i.json' % gpu_list[ii])"], {}), "(save_folder, 'list_gpu_%i.json' % gpu_list[ii])\n", (5673, 5721), False, 'import os\n'), ((5950, 5970), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (5959, 5970), False, 'import h5py\n'), ((6879, 6899), 'numpy.array', 'np.array', (['mask.shape'], {}), '(mask.shape)\n', (6887, 6899), True, 'import numpy as np\n'), ((6902, 6927), 'numpy.array', 'np.array', (['mask_voxel_size'], {}), '(mask_voxel_size)\n', (6910, 6927), True, 'import numpy as np\n'), ((7428, 7446), 'numpy.sum', 'np.sum', (['mask_block'], {}), '(mask_block)\n', (7434, 7446), True, 'import numpy as np\n'), ((8913, 8932), 'numpy.array', 'np.array', (['raw.shape'], {}), '(raw.shape)\n', (8921, 8932), True, 'import numpy as np\n'), ((8935, 8959), 'numpy.array', 'np.array', (['raw_voxel_size'], {}), '(raw_voxel_size)\n', (8943, 8959), True, 'import numpy as np\n'), ((9577, 9589), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9586, 9589), False, 'import json\n'), ((10173, 10192), 'json.dump', 'json.dump', (['olist', 'f'], {}), '(olist, f)\n', (10182, 10192), False, 'import json\n'), ((11797, 11846), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': 'n_workers'}), '(max_workers=n_workers)\n', (11823, 11846), False, 'from concurrent import futures\n'), ((13610, 13652), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(20)'}), '(max_workers=20)\n', (13636, 13652), False, 'from concurrent import futures\n'), ((4121, 4150), 'os.path.join', 'os.path.join', (['save_folder', 'fl'], {}), '(save_folder, fl)\n', (4133, 4150), False, 'import os\n'), ((4356, 4385), 'os.path.join', 'os.path.join', (['save_folder', 'pl'], {}), '(save_folder, pl)\n', (4368, 4385), False, 'import os\n'), ((5782, 5801), 'json.dump', 'json.dump', (['olist', 'f'], {}), '(olist, f)\n', (5791, 5801), False, 'import json\n'), ((6126, 6141), 'zarr.open', 'zarr.open', (['path'], {}), '(path)\n', (6135, 6141), False, 'import zarr\n'), ((10909, 10947), 'os.path.join', 'os.path.join', (['block_folder', 'block_file'], {}), '(block_folder, block_file)\n', (10921, 10947), False, 'import os\n'), ((13144, 13174), 'os.path.join', 'os.path.join', (['block_folder', 'ff'], {}), '(block_folder, ff)\n', (13156, 13174), False, 'import os\n'), ((6207, 6222), 'z5py.File', 'z5py.File', (['path'], {}), '(path)\n', (6216, 6222), False, 'import z5py\n')] |
"""
Add the taxonomy to the patric metadata file
"""
import os
import sys
import argparse
from taxon import get_taxonomy_db, get_taxonomy
c = get_taxonomy_db()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Append taxonomy to the patric metadata file. This adds it at column 67")
parser.add_argument('-f', help='patric metadata file', required=True)
parser.add_argument('-o', help='output file', required=True)
parser.add_argument('-c', help='taxonomy ID column', required=True, type=int)
parser.add_argument('-t', help='taxonomy directory (or we will use default)')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
want = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']
# read the file once to figure out the longest line
maxp=0
with open(args.f, 'r', encoding='utf-8') as f:
for l in f:
p = l.strip().split("\t")
if len(p) > maxp:
maxp = len(p)
with open(args.o, 'w', encoding='utf-8') as out:
with open(args.f, 'r', encoding='utf-8') as f:
for l in f:
p = l.strip().split("\t")
while (len(p) < maxp):
p.append("")
if l.startswith("genome_id"):
out.write("{}\t{}\n".format(l.strip(), "\t".join(want)))
continue
tid = p[args.c]
level = {}
t, n = get_taxonomy(tid, c)
while t and t.parent > 1 and t.parent != 131567:
# 131567 is cellular organisms
if t.rank in want:
level[t.rank] = n.scientific_name
t, n = get_taxonomy(t.parent, c)
for w in want:
if w in level:
p.append(level[w])
else:
p.append("")
out.write("\t".join(map(str, p)))
out.write("\n")
| [
"taxon.get_taxonomy_db",
"argparse.ArgumentParser",
"taxon.get_taxonomy"
] | [((144, 161), 'taxon.get_taxonomy_db', 'get_taxonomy_db', ([], {}), '()\n', (159, 161), False, 'from taxon import get_taxonomy_db, get_taxonomy\n'), ((203, 317), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Append taxonomy to the patric metadata file. This adds it at column 67"""'}), "(description=\n 'Append taxonomy to the patric metadata file. This adds it at column 67')\n", (226, 317), False, 'import argparse\n'), ((1531, 1551), 'taxon.get_taxonomy', 'get_taxonomy', (['tid', 'c'], {}), '(tid, c)\n', (1543, 1551), False, 'from taxon import get_taxonomy_db, get_taxonomy\n'), ((1793, 1818), 'taxon.get_taxonomy', 'get_taxonomy', (['t.parent', 'c'], {}), '(t.parent, c)\n', (1805, 1818), False, 'from taxon import get_taxonomy_db, get_taxonomy\n')] |
"""
Writing actual code might be hard to understand for new-learners. Pseudocode is a tool
for writing algorithms without knowing how to code. This module contains classes and
methods for parsing pseudocode to AST and then evaluating it.
Example:
If you installed this module with pip you can run pseudocode from file, i.e. to run
`test.pdc` file type::
$ pdc test.pdc
If you want to parse it by your own you will need `pseudo.lexer.Lexer` instance.::
from pseudo.lexer import Lexer
lex = Lexer("x := 12")
expression = lex.read_next()
print(expression)
If lexer reach the end of input, the `pseudo.stream.EndOfFile` exception will be raised.
"""
__author__ = "<NAME>"
__version__ = "0.11.0"
import gc
from pseudo.lexer import Lexer
from pseudo.stream import EndOfFile
from pseudo.utils import append
def compile(text_input: str, range_symbol: str = "...") -> list:
"""Compile from string to list of operations."""
lexer = Lexer(text_input)
lexer.range_symbol = range_symbol
x = None
instructions = []
while True:
try:
x = lexer.read_next(prev=x)
except EndOfFile:
break
instructions = append(instructions, x)
del lexer
gc.collect()
return instructions
| [
"pseudo.utils.append",
"pseudo.lexer.Lexer",
"gc.collect"
] | [((1001, 1018), 'pseudo.lexer.Lexer', 'Lexer', (['text_input'], {}), '(text_input)\n', (1006, 1018), False, 'from pseudo.lexer import Lexer\n'), ((1272, 1284), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1282, 1284), False, 'import gc\n'), ((1230, 1253), 'pseudo.utils.append', 'append', (['instructions', 'x'], {}), '(instructions, x)\n', (1236, 1253), False, 'from pseudo.utils import append\n')] |
import random
import os
import logging
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
# import faiss
################################################################################
# General-purpose #
################################################################################
def str_list(l):
return '_'.join([str(x) for x in l])
def set_logger(log_path):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Logging to a file
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
# Logging to console
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
return logger
class Logger(object):
""" Class to update every epoch to keep trace of the results
Methods:
- log() log and save
"""
def __init__(self, path):
self.path = path
self.data = []
def log(self, train_point):
self.data.append(train_point)
with open(os.path.join(self.path), 'wb') as fp:
pickle.dump(self.data, fp, -1)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def get_datetime(time_delta):
days_delta = time_delta // (24*3600)
time_delta = time_delta % (24*3600)
hour_delta = time_delta // 3600
time_delta = time_delta % 3600
mins_delta = time_delta // 60
time_delta = time_delta % 60
secs_delta = time_delta
return '{}:{}:{}:{}'.format(days_delta, hour_delta, mins_delta, secs_delta)
################################################################################
# Metric-related ops #
################################################################################
def _fast_hist(label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class) # Exclude unlabelled data.
hist = np.bincount(n_class * label_true[mask] + label_pred[mask],\
minlength=n_class ** 2).reshape(n_class, n_class)
return hist
def scores(label_trues, label_preds, n_class):
hist = np.zeros((n_class, n_class))
for lt, lp in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
return hist
def get_result_metrics(histogram):
tp = np.diag(histogram)
fp = np.sum(histogram, 0) - tp
fn = np.sum(histogram, 1) - tp
iou = tp / (tp + fp + fn)
prc = tp / (tp + fn)
opc = np.sum(tp) / np.sum(histogram)
result = {"iou": iou,
"mean_iou": np.nanmean(iou),
"precision_per_class (per class accuracy)": prc,
"mean_precision (class-avg accuracy)": np.nanmean(prc),
"overall_precision (pixel accuracy)": opc}
result = {k: 100*v for k, v in result.items()}
return result
def compute_negative_euclidean(featmap, centroids, metric_function):
centroids = centroids.unsqueeze(-1).unsqueeze(-1)
return - (1 - 2*metric_function(featmap)\
+ (centroids*centroids).sum(dim=1).unsqueeze(0)) # negative l2 squared
def get_metric_as_conv(centroids):
N, C = centroids.size()
centroids_weight = centroids.unsqueeze(-1).unsqueeze(-1)
metric_function = nn.Conv2d(C, N, 1, padding=0, stride=1, bias=False)
metric_function.weight.data = centroids_weight
metric_function = nn.DataParallel(metric_function)
metric_function = metric_function.cuda()
return metric_function
################################################################################
# General torch ops #
################################################################################
def freeze_all(model):
for param in model.module.parameters():
param.requires_grad = False
def initialize_classifier(args):
classifier = get_linear(args.in_dim, args.K_train)
classifier = nn.DataParallel(classifier)
classifier = classifier.cuda()
return classifier
def get_linear(indim, outdim):
classifier = nn.Conv2d(indim, outdim, kernel_size=1, stride=1, padding=0, bias=True)
classifier.weight.data.normal_(0, 0.01)
classifier.bias.data.zero_()
return classifier
def feature_flatten(feats):
if len(feats.size()) == 2:
# feature already flattened.
return feats
feats = feats.view(feats.size(0), feats.size(1), -1).transpose(2, 1)\
.contiguous().view(-1, feats.size(1))
return feats
################################################################################
# Faiss related #
################################################################################
def get_faiss_module(args):
res = faiss.StandardGpuResources()
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = False
cfg.device = 0 #NOTE: Single GPU only.
idx = faiss.GpuIndexFlatL2(res, args.in_dim, cfg)
return idx
def get_init_centroids(args, K, featlist, index):
clus = faiss.Clustering(args.in_dim, K)
clus.seed = np.random.randint(args.seed)
clus.niter = args.kmeans_n_iter
clus.max_points_per_centroid = 10000000
clus.train(featlist, index)
return faiss.vector_float_to_array(clus.centroids).reshape(K, args.in_dim)
def module_update_centroids(index, centroids):
index.reset()
index.add(centroids)
return index
def fix_seed_for_reproducability(seed):
"""
Unfortunately, backward() of [interpolate] functional seems to be never deterministic.
Below are related threads:
https://github.com/pytorch/pytorch/issues/7068
https://discuss.pytorch.org/t/non-deterministic-behavior-of-pytorch-upsample-interpolate/42842?u=sbelharbi
"""
# Use random seed.
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
cudnn.deterministic = True
cudnn.benchmark = False
def worker_init_fn(seed):
return lambda x: np.random.seed(seed + x)
################################################################################
# Training Pipelines #
################################################################################
def postprocess_label(args, K, idx, idx_img, scores, n_dual):
out = scores[idx].topk(1, dim=0)[1].flatten().detach().cpu().numpy()
# Save labels.
if not os.path.exists(os.path.join(args.save_model_path, 'label_' + str(n_dual))):
os.makedirs(os.path.join(args.save_model_path, 'label_' + str(n_dual)))
torch.save(out, os.path.join(args.save_model_path, 'label_' + str(n_dual), '{}.pkl'.format(idx_img)))
# Count for re-weighting.
counts = torch.tensor(np.bincount(out, minlength=K)).float()
return counts
def eqv_transform_if_needed(args, dataloader, indice, input):
if args.equiv:
input = dataloader.dataset.transform_eqv(indice, input)
return input
def get_transform_params(args):
inv_list = []
eqv_list = []
if args.augment:
if args.blur:
inv_list.append('blur')
if args.grey:
inv_list.append('grey')
if args.jitter:
inv_list.extend(['brightness', 'contrast', 'saturation', 'hue'])
if args.equiv:
if args.h_flip:
eqv_list.append('h_flip')
if args.v_flip:
eqv_list.append('v_flip')
if args.random_crop:
eqv_list.append('random_crop')
return inv_list, eqv_list
def collate_train(batch):
if batch[0][-1] is not None:
indice = [b[0] for b in batch]
image1 = torch.stack([b[1] for b in batch])
image2 = torch.stack([b[2] for b in batch])
label1 = torch.stack([b[3] for b in batch])
label2 = torch.stack([b[4] for b in batch])
return indice, image1, image2, label1, label2
indice = [b[0] for b in batch]
image1 = torch.stack([b[1] for b in batch])
return indice, image1
def collate_eval(batch):
indice = [b[0] for b in batch]
image = torch.stack([b[1] for b in batch])
label = torch.stack([b[2] for b in batch])
return indice, image, label
def collate_train_baseline(batch):
if batch[0][-1] is not None:
return collate_eval(batch)
indice = [b[0] for b in batch]
image = torch.stack([b[1] for b in batch])
return indice, image | [
"logging.getLogger",
"logging.StreamHandler",
"numpy.nanmean",
"logging.FileHandler",
"numpy.random.seed",
"numpy.bincount",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"pickle.dump",
"logging.Formatter",
"torch.stack",
"os.path.join",
"torch.nn.DataParallel",
"random.seed",
"nump... | [((539, 558), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (556, 558), False, 'import logging\n'), ((637, 666), 'logging.FileHandler', 'logging.FileHandler', (['log_path'], {}), '(log_path)\n', (656, 666), False, 'import logging\n'), ((841, 864), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (862, 864), False, 'import logging\n'), ((2734, 2762), 'numpy.zeros', 'np.zeros', (['(n_class, n_class)'], {}), '((n_class, n_class))\n', (2742, 2762), True, 'import numpy as np\n'), ((2938, 2956), 'numpy.diag', 'np.diag', (['histogram'], {}), '(histogram)\n', (2945, 2956), True, 'import numpy as np\n'), ((3861, 3912), 'torch.nn.Conv2d', 'nn.Conv2d', (['C', 'N', '(1)'], {'padding': '(0)', 'stride': '(1)', 'bias': '(False)'}), '(C, N, 1, padding=0, stride=1, bias=False)\n', (3870, 3912), True, 'import torch.nn as nn\n'), ((3986, 4018), 'torch.nn.DataParallel', 'nn.DataParallel', (['metric_function'], {}), '(metric_function)\n', (4001, 4018), True, 'import torch.nn as nn\n'), ((4552, 4579), 'torch.nn.DataParallel', 'nn.DataParallel', (['classifier'], {}), '(classifier)\n', (4567, 4579), True, 'import torch.nn as nn\n'), ((4687, 4758), 'torch.nn.Conv2d', 'nn.Conv2d', (['indim', 'outdim'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(indim, outdim, kernel_size=1, stride=1, padding=0, bias=True)\n', (4696, 4758), True, 'import torch.nn as nn\n'), ((5738, 5766), 'numpy.random.randint', 'np.random.randint', (['args.seed'], {}), '(args.seed)\n', (5755, 5766), True, 'import numpy as np\n'), ((6441, 6458), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (6452, 6458), False, 'import random\n'), ((6508, 6528), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6522, 6528), True, 'import numpy as np\n'), ((6533, 6556), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (6550, 6556), False, 'import torch\n'), ((6561, 6589), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (6583, 6589), False, 'import torch\n'), ((6594, 6626), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (6620, 6626), False, 'import torch\n'), ((8722, 8756), 'torch.stack', 'torch.stack', (['[b[1] for b in batch]'], {}), '([b[1] for b in batch])\n', (8733, 8756), False, 'import torch\n'), ((8857, 8891), 'torch.stack', 'torch.stack', (['[b[1] for b in batch]'], {}), '([b[1] for b in batch])\n', (8868, 8891), False, 'import torch\n'), ((8904, 8938), 'torch.stack', 'torch.stack', (['[b[2] for b in batch]'], {}), '([b[2] for b in batch])\n', (8915, 8938), False, 'import torch\n'), ((9130, 9164), 'torch.stack', 'torch.stack', (['[b[1] for b in batch]'], {}), '([b[1] for b in batch])\n', (9141, 9164), False, 'import torch\n'), ((697, 756), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s:%(levelname)s: %(message)s"""'], {}), "('%(asctime)s:%(levelname)s: %(message)s')\n", (714, 756), False, 'import logging\n'), ((897, 929), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (914, 929), False, 'import logging\n'), ((2966, 2986), 'numpy.sum', 'np.sum', (['histogram', '(0)'], {}), '(histogram, 0)\n', (2972, 2986), True, 'import numpy as np\n'), ((3001, 3021), 'numpy.sum', 'np.sum', (['histogram', '(1)'], {}), '(histogram, 1)\n', (3007, 3021), True, 'import numpy as np\n'), ((3095, 3105), 'numpy.sum', 'np.sum', (['tp'], {}), '(tp)\n', (3101, 3105), True, 'import numpy as np\n'), ((3108, 3125), 'numpy.sum', 'np.sum', (['histogram'], {}), '(histogram)\n', (3114, 3125), True, 'import numpy as np\n'), ((3178, 3193), 'numpy.nanmean', 'np.nanmean', (['iou'], {}), '(iou)\n', (3188, 3193), True, 'import numpy as np\n'), ((3309, 3324), 'numpy.nanmean', 'np.nanmean', (['prc'], {}), '(prc)\n', (3319, 3324), True, 'import numpy as np\n'), ((6734, 6758), 'numpy.random.seed', 'np.random.seed', (['(seed + x)'], {}), '(seed + x)\n', (6748, 6758), True, 'import numpy as np\n'), ((8423, 8457), 'torch.stack', 'torch.stack', (['[b[1] for b in batch]'], {}), '([b[1] for b in batch])\n', (8434, 8457), False, 'import torch\n'), ((8475, 8509), 'torch.stack', 'torch.stack', (['[b[2] for b in batch]'], {}), '([b[2] for b in batch])\n', (8486, 8509), False, 'import torch\n'), ((8527, 8561), 'torch.stack', 'torch.stack', (['[b[3] for b in batch]'], {}), '([b[3] for b in batch])\n', (8538, 8561), False, 'import torch\n'), ((8579, 8613), 'torch.stack', 'torch.stack', (['[b[4] for b in batch]'], {}), '([b[4] for b in batch])\n', (8590, 8613), False, 'import torch\n'), ((1344, 1374), 'pickle.dump', 'pickle.dump', (['self.data', 'fp', '(-1)'], {}), '(self.data, fp, -1)\n', (1355, 1374), False, 'import pickle\n'), ((2520, 2607), 'numpy.bincount', 'np.bincount', (['(n_class * label_true[mask] + label_pred[mask])'], {'minlength': '(n_class ** 2)'}), '(n_class * label_true[mask] + label_pred[mask], minlength=\n n_class ** 2)\n', (2531, 2607), True, 'import numpy as np\n'), ((1294, 1317), 'os.path.join', 'os.path.join', (['self.path'], {}), '(self.path)\n', (1306, 1317), False, 'import os\n'), ((7495, 7524), 'numpy.bincount', 'np.bincount', (['out'], {'minlength': 'K'}), '(out, minlength=K)\n', (7506, 7524), True, 'import numpy as np\n')] |
import feedparser
def read_rss_feed(feed_url):
feed = feedparser.parse(feed_url)
return [trim_entry(entry) for entry in feed.entries]
def trim_entry(entry):
return {
'date': "{}/{}/{}".format(entry.published_parsed.tm_year, entry.published_parsed.tm_mon, entry.published_parsed.tm_mday),
'title': entry.title,
'link': entry.link,
'author': entry.author,
'summary': entry.summary
} | [
"feedparser.parse"
] | [((59, 85), 'feedparser.parse', 'feedparser.parse', (['feed_url'], {}), '(feed_url)\n', (75, 85), False, 'import feedparser\n')] |
import asyncio
import datetime
import json
import logging
import os
import sys
import typing
from pathlib import Path
from typing import Any, Dict, Optional, Tuple
import discord
import toml
from discord.ext.commands import BadArgument
from pydantic import BaseModel
from pydantic import BaseSettings as PydanticBaseSettings
from pydantic import Field, SecretStr
from pydantic.env_settings import SettingsSourceCallable
from pydantic.types import conint
log = logging.getLogger(__name__)
CONFIG_PATHS: list = [
f"{os.getcwd()}/config.toml",
f"{os.getcwd()}/modmail/config.toml",
"./config.toml",
]
DEFAULT_CONFIG_PATHS = [os.path.join(os.path.dirname(__file__), "config-default.toml")]
def determine_file_path(
paths=typing.Union[list, tuple], config_type: str = "default"
) -> typing.Union[str, None]:
path = None
for file_path in paths:
config_file = Path(file_path)
if (config_file).exists():
path = config_file
log.debug(f"Found {config_type} config at {file_path}")
break
return path or None
DEFAULT_CONFIG_PATH = determine_file_path(DEFAULT_CONFIG_PATHS)
USER_CONFIG_PATH = determine_file_path(CONFIG_PATHS, config_type="")
def toml_default_config_source(settings: PydanticBaseSettings) -> Dict[str, Any]:
"""
A simple settings source that loads variables from a toml file
from within the module's source folder.
Here we happen to choose to use the `env_file_encoding` from Config
when reading `config-default.toml`
"""
return dict(**toml.load(DEFAULT_CONFIG_PATH))
def toml_user_config_source(settings: PydanticBaseSettings) -> Dict[str, Any]:
"""
A simple settings source that loads variables from a toml file
from within the module's source folder.
Here we happen to choose to use the `env_file_encoding` from Config
when reading `config-default.toml`
"""
if USER_CONFIG_PATH:
return dict(**toml.load(USER_CONFIG_PATH))
else:
return dict()
class BaseSettings(PydanticBaseSettings):
class Config:
extra = "ignore"
env_file = ".env"
env_file_encoding = "utf-8"
@classmethod
def customise_sources(
cls,
init_settings: SettingsSourceCallable,
env_settings: SettingsSourceCallable,
file_secret_settings: SettingsSourceCallable,
) -> Tuple[SettingsSourceCallable, ...]:
return (
env_settings,
init_settings,
file_secret_settings,
toml_user_config_source,
toml_default_config_source,
)
class ThreadBaseSettings(BaseSettings):
class Config:
env_prefix = "thread."
# @classmethod
# def alias_generator(cls, string: str) -> str:
# return f"thread.{super.__name__}.{string}"
class BotActivityConfig(BaseSettings):
twitch_url: str = "https://www.twitch.tv/discordmodmail/"
class BotConfig(BaseSettings):
prefix: str = "?"
activity: BotActivityConfig
token: str = None
modmail_guild_id: str = None
guild_id: str = None
multi_bot: bool = False
log_url: str = None
log_url_prefix = "/"
github_token: SecretStr = None
database_type: str = "mongodb" # TODO limit to specific strings
enable_plugins: bool = True
enable_eval: bool = True
data_collection = True
owners: str = 1
connection_uri: str = None
level_permissions: dict = None
class Config:
# env_prefix = "bot."
allow_mutation = False
class ColorsConfig(BaseSettings):
main_color: str = str(discord.Colour.blurple())
error_color: str = str(discord.Colour.red())
recipient_color: str = str(discord.Colour.green())
mod_color: str = str(discord.Colour.blue())
class ChannelConfig(BaseSettings):
# all of the below should be validated to channels
# either by name or by int
main_category: str = None
fallback_category: str = None
log_channel: str = None
mention_channel: str = None
update_channel: str = None
class DevConfig(BaseSettings):
"""
Developer specific configuration.
These settings should not be changed unless you know what you're doing.
"""
log_level: conint(ge=0, le=50) = getattr(logging, "NOTICE", 25)
class EmojiConfig(BaseSettings):
"""
Standard emojis that the bot uses when a specific emoji is not defined for a specific use.
"""
sent_emoji: str = "\\N{WHITE HEAVY CHECK MARK}" # TODO type as a discord emoji
blocked_emoji: str = "\\N{NO ENTRY SIGN}" # TODO type as a discord emoji
class InternalConfig(BaseModel):
# do NOT set these yourself. The bot will handle these
activity_message: str = None
activity_type: None = None
status: None = None
dm_disabled: int = 0
# moderation
blocked: dict = dict()
blocked_roles: dict = dict()
blocked_whitelist: list = dict()
command_permissions: dict = dict()
level_permissions: dict = dict()
override_command_level: dict = dict()
# threads
snippets: dict = dict()
notifications: dict = dict()
subscriptions: dict = dict()
closures: dict = dict()
# misc
plugins: list = list()
aliases: dict = dict()
auto_triggers: dict = dict()
command_permissions: dict = dict()
level_permissions: dict = dict()
class Config:
arbitrary_types_allowed = True
class MentionConfig(BaseSettings):
alert_on_mention: bool = False
silent_alert_on_mention: bool = False
mention_channel: int = None
class SnippetConfig(BaseSettings):
anonmous_snippets: bool = False
use_regex_autotrigger: bool = False
class ThreadAnonConfig(ThreadBaseSettings):
username: str = "Response"
footer: str = "Staff Team"
class ThreadAutoCloseConfig(ThreadBaseSettings):
time: datetime.timedelta = 0
silently: bool = False
response: str = "This thread has been closed automatically due to inactivity after {timeout}."
class ThreadCloseConfig(ThreadBaseSettings):
footer: str = "Replying will create a new thread"
title: str = "Thread Closed"
response: str = "{closer.mention} has closed this Modmail thread."
on_leave: bool = False
on_leave_reason: str = "The recipient has left the server."
self_close_response: str = "You have closed this Modmail thread."
class ThreadConfirmCreationConfig(ThreadBaseSettings):
enabled: bool = False
title: str = "Confirm thread creation"
response: str = "React to confirm thread creation which will directly contact the moderators"
accept_emoji: str = "\N{WHITE HEAVY CHECK MARK}" # TODO type as a discord emoji
deny_emoji: str = "\N{NO ENTRY SIGN}" # TODO type as a discord emoji
class ThreadCooldownConfig(ThreadBaseSettings):
time: datetime.timedelta = 0
embed_title: str = "Message not sent!"
response: str = "You must wait for {delta} before you can contact me again."
class ThreadCreationConfig(ThreadBaseSettings):
response: str = "The staff team will get back to you as soon as possible."
footer: str = "Your message has been sent"
title: str = "Thread Created"
class ThreadDisabledConfig(ThreadBaseSettings):
new_title: str = "Not Delivered"
new_response: str = "We are not accepting new threads."
new_footer: str = "Please try again later..."
current_title: str = "Not Delivered"
current_response: str = "We are not accepting any messages."
current_footer: str = "Please try again later..."
class ThreadMoveConfig(ThreadBaseSettings):
title: str = "Thread Moved"
notify: bool = False
notify_mods: bool = False
response: str = "This thread has been moved."
class ThreadSelfClosableConfig(ThreadBaseSettings):
enabled: bool = False
lock_emoji: str = "\N{LOCK}"
creation_footer: str = "Click the lock to close the thread"
class ThreadConfig(BaseSettings):
anon_reply_without_command: bool = False
reply_without_command: bool = False
plain_reply_without_command: bool = False
mention: str = "@here"
user_typing: bool = False
mod_typing: bool = False
transfer_reactions: bool = True
contact_silently: bool = False
account_age: datetime.timedelta = 0
guild_age: datetime.timedelta = 0
mod_tag: str = ""
show_timestamp: bool = True
anon: ThreadAnonConfig
auto_close: ThreadAutoCloseConfig
close: ThreadCloseConfig
confirm_creation: ThreadConfirmCreationConfig
cooldown: ThreadCooldownConfig
creation: ThreadCreationConfig
disabled: ThreadDisabledConfig
move: ThreadMoveConfig
self_closable: ThreadSelfClosableConfig
class UpdateConfig(BaseSettings):
disable_autoupdates: bool = False
update_notifications: bool = True
class Config:
allow_mutation = False
env_prefix = "updates."
class ModmailConfig(BaseSettings):
bot: BotConfig
colors: ColorsConfig
channels: ChannelConfig
dev: DevConfig
emoji: EmojiConfig
mention: MentionConfig
snippets: SnippetConfig
thread: ThreadConfig
updates: UpdateConfig
shell: str = None
CONFIG = ModmailConfig()
INTERNAL = InternalConfig()
| [
"logging.getLogger",
"pydantic.types.conint",
"pathlib.Path",
"discord.Colour.green",
"discord.Colour.red",
"os.getcwd",
"os.path.dirname",
"toml.load",
"discord.Colour.blurple",
"discord.Colour.blue"
] | [((462, 489), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (479, 489), False, 'import logging\n'), ((4295, 4314), 'pydantic.types.conint', 'conint', ([], {'ge': '(0)', 'le': '(50)'}), '(ge=0, le=50)\n', (4301, 4314), False, 'from pydantic.types import conint\n'), ((651, 676), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (666, 676), False, 'import os\n'), ((891, 906), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (895, 906), False, 'from pathlib import Path\n'), ((3660, 3684), 'discord.Colour.blurple', 'discord.Colour.blurple', ([], {}), '()\n', (3682, 3684), False, 'import discord\n'), ((3713, 3733), 'discord.Colour.red', 'discord.Colour.red', ([], {}), '()\n', (3731, 3733), False, 'import discord\n'), ((3766, 3788), 'discord.Colour.green', 'discord.Colour.green', ([], {}), '()\n', (3786, 3788), False, 'import discord\n'), ((3815, 3836), 'discord.Colour.blue', 'discord.Colour.blue', ([], {}), '()\n', (3834, 3836), False, 'import discord\n'), ((521, 532), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (530, 532), False, 'import os\n'), ((555, 566), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (564, 566), False, 'import os\n'), ((1559, 1589), 'toml.load', 'toml.load', (['DEFAULT_CONFIG_PATH'], {}), '(DEFAULT_CONFIG_PATH)\n', (1568, 1589), False, 'import toml\n'), ((1958, 1985), 'toml.load', 'toml.load', (['USER_CONFIG_PATH'], {}), '(USER_CONFIG_PATH)\n', (1967, 1985), False, 'import toml\n')] |
# Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by <NAME>
# Python translation by <NAME>, with <NAME> and <NAME>
#
# Revised 5 March 2018 - <NAME> - Fixed call to prop_cubic_conv by
# getting rid of the flattening of the coordinate arrays.
import os
import proper
import numpy as np
from math import sin, cos
# from . import lib_dir
lib_dir = os.path.dirname(proper.__file__)
import scipy.signal as ss
if not proper.use_cubic_conv:
from scipy.ndimage.interpolation import map_coordinates
def prop_dm(wf, dm_z0, dm_xc, dm_yc, spacing = 0., **kwargs):
"""Simulate a deformable mirror of specified actuator spacing, including the
effects of the DM influence function.
Parameters
----------
wf : obj
WaveFront class object
dm_z0 : str or numpy ndarray
Either a 2D numpy array containing the surface piston of each DM
actuator in meters or the name of a 2D FITS image file containing the
above
dm_xc, dm_yc : list or numpy ndarray
The location of the optical axis (center of the wavefront) on the DM in
actuator units (0 ro num_actuator-1). The center of the first actuator
is (0.0, 0.0)
spacing : float
Defines the spacing in meters between actuators; must not be used when
n_act_across_pupil is specified.
Returns
-------
dmap : numpy ndarray
Returns DM surface (not wavefront) map in meters
Other Parameters
----------------
FIT : bool
Switch that tells routine that the values in "dm_z" are the desired
surface heights rather than commanded actuator heights, and so the
routine should fit this map, accounting for actuator influence functions,
to determine the necessary actuator heights. An iterative error-minimizing
loop is used for the fit.
NO_APPLY : bool
If set, the DM pattern is not added to the wavefront. Useful if the DM
surface map is needed but should not be applied to the wavefront
N_ACT_ACROSS_PUPIL : int
Specifies the number of actuators that span the X-axis beam diameter. If
it is a whole number, the left edge of the left pixel is aligned with
the left edge of the beam, and the right edge of the right pixel with
the right edge of the beam. This determines the spacing and size of the
actuators. Should not be used when "spacing" value is specified.
XTILT, YTILT, ZTILT : float
Specify the rotation of the DM surface with respect to the wavefront plane
in degrees about the X, Y, Z axes, respectively, with the origin at the
center of the wavefront. The DM surface is interpolated and orthographically
projected onto the wavefront grid. The coordinate system assumes that
the wavefront and initial DM surface are in the X,Y plane with a lower
left origin with Z towards the observer. The rotations are left handed.
The default rotation order is X, Y, then Z unless the /ZYX switch is set.
XYZ or ZYX : bool
Specifies the rotation order if two or more of XTILT, YTILT, or ZTILT
are specified. The default is /XYZ for X, Y, then Z rotations.
Raises
------
ValueError:
User cannot specify both actuator spacing and N_ACT_ACROSS_PUPIL
ValueError:
User must specify either actuator spacing or N_ACT_ACROSS_PUPIL
"""
if "ZYX" in kwargs and "XYZ" in kwargs:
raise ValueError('PROP_DM: Error: Cannot specify both XYZ and ZYX rotation orders. Stopping')
elif not "ZYX" in kwargs and not 'XYZ' in kwargs:
XYZ = 1 # default is rotation around X, then Y, then Z
ZYX = 0
elif "ZYX" in kwargs:
ZYX = 1
XYZ = 0
elif "XYZ" in kwargs:
XYZ = 1
ZYX = 0
if "XTILT" in kwargs:
xtilt = kwargs["XTILT"]
else:
xtilt = 0.
if "YTILT" in kwargs:
ytilt = kwargs["YTILT"]
else:
ytilt = 0.
if "ZTILT" in kwargs:
ztilt = kwargs["ZTILT"]
else:
ztilt = 0.
if type(dm_z0) == str:
dm_z = proper.prop_fits_read(dm_z0) # Read DM setting from FITS file
else:
dm_z = dm_z0
n = proper.prop_get_gridsize(wf)
dx_surf = proper.prop_get_sampling(wf) # sampling of current surface in meters
beamradius = proper.prop_get_beamradius(wf)
# influence function sampling is 0.1 mm, peak at (x,y)=(45,45)
# Influence function has shape = 1x91x91. Saving it as a 2D array
# before continuing with processing
inf = proper.prop_fits_read(os.path.join(lib_dir, "influence_dm5v2.fits"))
inf = inf[0,:,:]
s = inf.shape
nx_inf = s[1]
ny_inf = s[0]
xc_inf = int(nx_inf/2)
yc_inf = int(ny_inf/2)
dx_inf = 0.1e-3 # influence function spacing in meters
dx_dm_inf = 1.e-3 # spacing between DM actuators in meters assumed by influence function
inf_mag = 10
if spacing != 0 and "N_ACT_ACROSS_PUPIL" in kwargs:
raise ValueError("PROP_DM: User cannot specify both actuator spacing and N_ACT_ACROSS_PUPIL. Stopping.")
if spacing == 0 and not "N_ACT_ACROSS_PUPIL" in kwargs:
raise ValueError("PROP_DM: User must specify either actuator spacing or N_ACT_ACROSS_PUPIL. Stopping.")
if "N_ACT_ACROSS_PUPIL" in kwargs:
dx_dm = 2. * beamradius / int(kwargs["N_ACT_ACROSS_PUPIL"])
else:
dx_dm = spacing
dx_inf = dx_inf * dx_dm / dx_dm_inf # Influence function sampling scaled
# to specified DM actuator spacing
if "FIT" in kwargs:
x = (np.arange(5, dtype = np.float64) - 2) * dx_dm
if proper.use_cubic_conv:
inf_kernel = proper.prop_cubic_conv(inf.T, x/dx_inf+xc_inf, x/dx_inf+yc_inf, GRID=True)
else:
xygrid = np.meshgrid(x/dx_inf+xc_inf, x/dx_inf+yc_inf)
inf_kernel = map_coordinates(inf.T, xygrid, order = 3, mode = "nearest")
(dm_z_commanded, dms) = proper.prop_fit_dm(dm_z, inf_kernel)
else:
dm_z_commanded = dm_z
s = dm_z.shape
nx_dm = s[1]
ny_dm = s[0]
# Create subsampled DM grid
margin = 9 * inf_mag
nx_grid = nx_dm * inf_mag + 2 * margin
ny_grid = ny_dm * inf_mag + 2 * margin
xoff_grid = margin + inf_mag/2 # pixel location of 1st actuator center in subsampled grid
yoff_grid = xoff_grid
dm_grid = np.zeros([ny_grid, nx_grid], dtype = np.float64)
x = np.arange(nx_dm, dtype = np.int16) * int(inf_mag) + int(xoff_grid)
y = np.arange(ny_dm, dtype = np.int16) * int(inf_mag) + int(yoff_grid)
dm_grid[np.tile(np.vstack(y), (nx_dm,)), np.tile(x, (ny_dm,1))] = dm_z_commanded
dm_grid = ss.fftconvolve(dm_grid, inf, mode = 'same')
# 3D rotate DM grid and project orthogonally onto wavefront
xdim = int(np.round(np.sqrt(2) * nx_grid * dx_inf / dx_surf)) # grid dimensions (pix) projected onto wavefront
ydim = int(np.round(np.sqrt(2) * ny_grid * dx_inf / dx_surf))
if xdim > n: xdim = n
if ydim > n: ydim = n
x = np.ones((ydim,1), dtype = np.int) * ((np.arange(xdim) - int(xdim/2)) * dx_surf)
y = (np.ones((xdim,1), dtype = np.int) * ((np.arange(ydim) - int(ydim/2)) * dx_surf)).T
a = xtilt * np.pi / 180
b = ytilt * np.pi / 180
g = ztilt * np.pi /180
if XYZ:
m = np.array([ [cos(b)*cos(g), -cos(b)*sin(g), sin(b), 0],
[cos(a)*sin(g) + sin(a)*sin(b)*cos(g), cos(a)*cos(g)-sin(a)*sin(b)*sin(g), -sin(a)*cos(b), 0],
[sin(a)*sin(g)-cos(a)*sin(b)*cos(g), sin(a)*cos(g)+cos(a)*sin(b)*sin(g), cos(a)*cos(b), 0],
[0, 0, 0, 1] ])
else:
m = np.array([ [cos(b)*cos(g), cos(g)*sin(a)*sin(b)-cos(a)*sin(g), cos(a)*cos(g)*sin(b)+sin(a)*sin(g), 0],
[cos(b)*sin(g), cos(a)*cos(g)+sin(a)*sin(b)*sin(g), -cos(g)*sin(a)+cos(a)*sin(b)*sin(g), 0],
[-sin(b), cos(b)*sin(a), cos(a)*cos(b), 0],
[0, 0, 0, 1] ])
# Forward project a square
edge = np.array([[-1.0,-1.0,0.0,0.0], [1.0,-1.0,0.0,0.0], [1.0,1.0,0.0,0.0], [-1.0,1.0,0.0,0.0]])
new_xyz = np.dot(edge, m)
# determine backward projection for screen-raster-to-DM-surce computation
dx_dxs = (new_xyz[0,0] - new_xyz[1,0]) / (edge[0,0] - edge[1,0])
dx_dys = (new_xyz[1,0] - new_xyz[2,0]) / (edge[1,1] - edge[2,1])
dy_dxs = (new_xyz[0,1] - new_xyz[1,1]) / (edge[0,0] - edge[1,0])
dy_dys = (new_xyz[1,1] - new_xyz[2,1]) / (edge[1,1] - edge[2,1])
xs = ( x/dx_dxs - y*dx_dys/(dx_dxs*dy_dys) ) / ( 1 - dy_dxs*dx_dys/(dx_dxs*dy_dys) )
ys = ( y/dy_dys - x*dy_dxs/(dx_dxs*dy_dys) ) / ( 1 - dx_dys*dy_dxs/(dx_dxs*dy_dys) )
xdm = (xs + dm_xc * dx_dm) / dx_inf + xoff_grid
ydm = (ys + dm_yc * dx_dm) / dx_inf + yoff_grid
if proper.use_cubic_conv:
grid = proper.prop_cubic_conv(dm_grid.T, xdm, ydm, GRID = False)
grid = grid.reshape([xdm.shape[1], xdm.shape[0]])
else:
grid = map_coordinates(dm_grid.T, [xdm, ydm], order = 3, mode = "nearest", prefilter = True)
dmap = np.zeros([n,n], dtype = np.float64)
nx_grid, ny_grid = grid.shape
xmin, xmax = int(n/2 - xdim/2), int(n/2 - xdim/2 + nx_grid)
ymin, ymax = int(n/2 - ydim/2), int(n/2 - ydim/2 + ny_grid)
dmap[ymin:ymax, xmin:xmax] = grid
# Random dots sometimes appear in the phase map. This is a little temporary hack to deal with that bug!
import scipy.ndimage
sigma = [1, 1]
dmap = scipy.ndimage.filters.gaussian_filter(dmap, sigma, mode='constant')
if not "NO_APPLY" in kwargs:
proper.prop_add_phase(wf, 2 * dmap) # x2 to convert surface to wavefront error
return dmap
| [
"numpy.sqrt",
"math.cos",
"numpy.array",
"proper.prop_cubic_conv",
"proper.prop_get_sampling",
"numpy.arange",
"proper.prop_get_beamradius",
"proper.prop_get_gridsize",
"scipy.signal.fftconvolve",
"numpy.dot",
"numpy.vstack",
"numpy.meshgrid",
"numpy.tile",
"numpy.ones",
"proper.prop_fit... | [((592, 624), 'os.path.dirname', 'os.path.dirname', (['proper.__file__'], {}), '(proper.__file__)\n', (607, 624), False, 'import os\n'), ((4461, 4489), 'proper.prop_get_gridsize', 'proper.prop_get_gridsize', (['wf'], {}), '(wf)\n', (4485, 4489), False, 'import proper\n'), ((4504, 4532), 'proper.prop_get_sampling', 'proper.prop_get_sampling', (['wf'], {}), '(wf)\n', (4528, 4532), False, 'import proper\n'), ((4591, 4621), 'proper.prop_get_beamradius', 'proper.prop_get_beamradius', (['wf'], {}), '(wf)\n', (4617, 4621), False, 'import proper\n'), ((6679, 6725), 'numpy.zeros', 'np.zeros', (['[ny_grid, nx_grid]'], {'dtype': 'np.float64'}), '([ny_grid, nx_grid], dtype=np.float64)\n', (6687, 6725), True, 'import numpy as np\n'), ((6978, 7019), 'scipy.signal.fftconvolve', 'ss.fftconvolve', (['dm_grid', 'inf'], {'mode': '"""same"""'}), "(dm_grid, inf, mode='same')\n", (6992, 7019), True, 'import scipy.signal as ss\n'), ((8223, 8330), 'numpy.array', 'np.array', (['[[-1.0, -1.0, 0.0, 0.0], [1.0, -1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [-1.0,\n 1.0, 0.0, 0.0]]'], {}), '([[-1.0, -1.0, 0.0, 0.0], [1.0, -1.0, 0.0, 0.0], [1.0, 1.0, 0.0, \n 0.0], [-1.0, 1.0, 0.0, 0.0]])\n', (8231, 8330), True, 'import numpy as np\n'), ((8328, 8343), 'numpy.dot', 'np.dot', (['edge', 'm'], {}), '(edge, m)\n', (8334, 8343), True, 'import numpy as np\n'), ((9268, 9302), 'numpy.zeros', 'np.zeros', (['[n, n]'], {'dtype': 'np.float64'}), '([n, n], dtype=np.float64)\n', (9276, 9302), True, 'import numpy as np\n'), ((4359, 4387), 'proper.prop_fits_read', 'proper.prop_fits_read', (['dm_z0'], {}), '(dm_z0)\n', (4380, 4387), False, 'import proper\n'), ((4832, 4877), 'os.path.join', 'os.path.join', (['lib_dir', '"""influence_dm5v2.fits"""'], {}), "(lib_dir, 'influence_dm5v2.fits')\n", (4844, 4877), False, 'import os\n'), ((6260, 6296), 'proper.prop_fit_dm', 'proper.prop_fit_dm', (['dm_z', 'inf_kernel'], {}), '(dm_z, inf_kernel)\n', (6278, 6296), False, 'import proper\n'), ((7331, 7363), 'numpy.ones', 'np.ones', (['(ydim, 1)'], {'dtype': 'np.int'}), '((ydim, 1), dtype=np.int)\n', (7338, 7363), True, 'import numpy as np\n'), ((9029, 9084), 'proper.prop_cubic_conv', 'proper.prop_cubic_conv', (['dm_grid.T', 'xdm', 'ydm'], {'GRID': '(False)'}), '(dm_grid.T, xdm, ydm, GRID=False)\n', (9051, 9084), False, 'import proper\n'), ((9170, 9249), 'scipy.ndimage.interpolation.map_coordinates', 'map_coordinates', (['dm_grid.T', '[xdm, ydm]'], {'order': '(3)', 'mode': '"""nearest"""', 'prefilter': '(True)'}), "(dm_grid.T, [xdm, ydm], order=3, mode='nearest', prefilter=True)\n", (9185, 9249), False, 'from scipy.ndimage.interpolation import map_coordinates\n'), ((9779, 9814), 'proper.prop_add_phase', 'proper.prop_add_phase', (['wf', '(2 * dmap)'], {}), '(wf, 2 * dmap)\n', (9800, 9814), False, 'import proper\n'), ((5986, 6072), 'proper.prop_cubic_conv', 'proper.prop_cubic_conv', (['inf.T', '(x / dx_inf + xc_inf)', '(x / dx_inf + yc_inf)'], {'GRID': '(True)'}), '(inf.T, x / dx_inf + xc_inf, x / dx_inf + yc_inf,\n GRID=True)\n', (6008, 6072), False, 'import proper\n'), ((6096, 6149), 'numpy.meshgrid', 'np.meshgrid', (['(x / dx_inf + xc_inf)', '(x / dx_inf + yc_inf)'], {}), '(x / dx_inf + xc_inf, x / dx_inf + yc_inf)\n', (6107, 6149), True, 'import numpy as np\n'), ((6167, 6222), 'scipy.ndimage.interpolation.map_coordinates', 'map_coordinates', (['inf.T', 'xygrid'], {'order': '(3)', 'mode': '"""nearest"""'}), "(inf.T, xygrid, order=3, mode='nearest')\n", (6182, 6222), False, 'from scipy.ndimage.interpolation import map_coordinates\n'), ((6737, 6769), 'numpy.arange', 'np.arange', (['nx_dm'], {'dtype': 'np.int16'}), '(nx_dm, dtype=np.int16)\n', (6746, 6769), True, 'import numpy as np\n'), ((6812, 6844), 'numpy.arange', 'np.arange', (['ny_dm'], {'dtype': 'np.int16'}), '(ny_dm, dtype=np.int16)\n', (6821, 6844), True, 'import numpy as np\n'), ((6924, 6946), 'numpy.tile', 'np.tile', (['x', '(ny_dm, 1)'], {}), '(x, (ny_dm, 1))\n', (6931, 6946), True, 'import numpy as np\n'), ((7420, 7452), 'numpy.ones', 'np.ones', (['(xdim, 1)'], {'dtype': 'np.int'}), '((xdim, 1), dtype=np.int)\n', (7427, 7452), True, 'import numpy as np\n'), ((5880, 5910), 'numpy.arange', 'np.arange', (['(5)'], {'dtype': 'np.float64'}), '(5, dtype=np.float64)\n', (5889, 5910), True, 'import numpy as np\n'), ((6899, 6911), 'numpy.vstack', 'np.vstack', (['y'], {}), '(y)\n', (6908, 6911), True, 'import numpy as np\n'), ((7369, 7384), 'numpy.arange', 'np.arange', (['xdim'], {}), '(xdim)\n', (7378, 7384), True, 'import numpy as np\n'), ((7458, 7473), 'numpy.arange', 'np.arange', (['ydim'], {}), '(ydim)\n', (7467, 7473), True, 'import numpy as np\n'), ((7656, 7662), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7659, 7662), False, 'from math import sin, cos\n'), ((7111, 7121), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7118, 7121), True, 'import numpy as np\n'), ((7226, 7236), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7233, 7236), True, 'import numpy as np\n'), ((7625, 7631), 'math.cos', 'cos', (['b'], {}), '(b)\n', (7628, 7631), False, 'from math import sin, cos\n'), ((7632, 7638), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7635, 7638), False, 'from math import sin, cos\n'), ((7648, 7654), 'math.sin', 'sin', (['g'], {}), '(g)\n', (7651, 7654), False, 'from math import sin, cos\n'), ((7760, 7766), 'math.cos', 'cos', (['b'], {}), '(b)\n', (7763, 7766), False, 'from math import sin, cos\n'), ((7853, 7859), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7856, 7859), False, 'from math import sin, cos\n'), ((7860, 7866), 'math.cos', 'cos', (['b'], {}), '(b)\n', (7863, 7866), False, 'from math import sin, cos\n'), ((7930, 7936), 'math.cos', 'cos', (['b'], {}), '(b)\n', (7933, 7936), False, 'from math import sin, cos\n'), ((7937, 7943), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7940, 7943), False, 'from math import sin, cos\n'), ((8024, 8030), 'math.cos', 'cos', (['b'], {}), '(b)\n', (8027, 8030), False, 'from math import sin, cos\n'), ((8031, 8037), 'math.sin', 'sin', (['g'], {}), '(g)\n', (8034, 8037), False, 'from math import sin, cos\n'), ((8120, 8126), 'math.sin', 'sin', (['b'], {}), '(b)\n', (8123, 8126), False, 'from math import sin, cos\n'), ((8128, 8134), 'math.cos', 'cos', (['b'], {}), '(b)\n', (8131, 8134), False, 'from math import sin, cos\n'), ((8135, 8141), 'math.sin', 'sin', (['a'], {}), '(a)\n', (8138, 8141), False, 'from math import sin, cos\n'), ((8143, 8149), 'math.cos', 'cos', (['a'], {}), '(a)\n', (8146, 8149), False, 'from math import sin, cos\n'), ((8150, 8156), 'math.cos', 'cos', (['b'], {}), '(b)\n', (8153, 8156), False, 'from math import sin, cos\n'), ((7641, 7647), 'math.cos', 'cos', (['b'], {}), '(b)\n', (7644, 7647), False, 'from math import sin, cos\n'), ((7678, 7684), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7681, 7684), False, 'from math import sin, cos\n'), ((7685, 7691), 'math.sin', 'sin', (['g'], {}), '(g)\n', (7688, 7691), False, 'from math import sin, cos\n'), ((7708, 7714), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7711, 7714), False, 'from math import sin, cos\n'), ((7716, 7722), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7719, 7722), False, 'from math import sin, cos\n'), ((7723, 7729), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7726, 7729), False, 'from math import sin, cos\n'), ((7744, 7750), 'math.sin', 'sin', (['g'], {}), '(g)\n', (7747, 7750), False, 'from math import sin, cos\n'), ((7753, 7759), 'math.sin', 'sin', (['a'], {}), '(a)\n', (7756, 7759), False, 'from math import sin, cos\n'), ((7781, 7787), 'math.sin', 'sin', (['a'], {}), '(a)\n', (7784, 7787), False, 'from math import sin, cos\n'), ((7788, 7794), 'math.sin', 'sin', (['g'], {}), '(g)\n', (7791, 7794), False, 'from math import sin, cos\n'), ((7809, 7815), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7812, 7815), False, 'from math import sin, cos\n'), ((7817, 7823), 'math.sin', 'sin', (['a'], {}), '(a)\n', (7820, 7823), False, 'from math import sin, cos\n'), ((7824, 7830), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7827, 7830), False, 'from math import sin, cos\n'), ((7845, 7851), 'math.sin', 'sin', (['g'], {}), '(g)\n', (7848, 7851), False, 'from math import sin, cos\n'), ((7959, 7965), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7962, 7965), False, 'from math import sin, cos\n'), ((7966, 7972), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7969, 7972), False, 'from math import sin, cos\n'), ((7973, 7979), 'math.sin', 'sin', (['g'], {}), '(g)\n', (7976, 7979), False, 'from math import sin, cos\n'), ((7995, 8001), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7998, 8001), False, 'from math import sin, cos\n'), ((8002, 8008), 'math.sin', 'sin', (['a'], {}), '(a)\n', (8005, 8008), False, 'from math import sin, cos\n'), ((8009, 8015), 'math.sin', 'sin', (['g'], {}), '(g)\n', (8012, 8015), False, 'from math import sin, cos\n'), ((8039, 8045), 'math.cos', 'cos', (['a'], {}), '(a)\n', (8042, 8045), False, 'from math import sin, cos\n'), ((8046, 8052), 'math.cos', 'cos', (['g'], {}), '(g)\n', (8049, 8052), False, 'from math import sin, cos\n'), ((8067, 8073), 'math.sin', 'sin', (['g'], {}), '(g)\n', (8070, 8073), False, 'from math import sin, cos\n'), ((8083, 8089), 'math.sin', 'sin', (['a'], {}), '(a)\n', (8086, 8089), False, 'from math import sin, cos\n'), ((8104, 8110), 'math.sin', 'sin', (['g'], {}), '(g)\n', (8107, 8110), False, 'from math import sin, cos\n'), ((7694, 7700), 'math.sin', 'sin', (['a'], {}), '(a)\n', (7697, 7700), False, 'from math import sin, cos\n'), ((7701, 7707), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7704, 7707), False, 'from math import sin, cos\n'), ((7730, 7736), 'math.sin', 'sin', (['a'], {}), '(a)\n', (7733, 7736), False, 'from math import sin, cos\n'), ((7737, 7743), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7740, 7743), False, 'from math import sin, cos\n'), ((7795, 7801), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7798, 7801), False, 'from math import sin, cos\n'), ((7802, 7808), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7805, 7808), False, 'from math import sin, cos\n'), ((7831, 7837), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7834, 7837), False, 'from math import sin, cos\n'), ((7838, 7844), 'math.sin', 'sin', (['b'], {}), '(b)\n', (7841, 7844), False, 'from math import sin, cos\n'), ((7945, 7951), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7948, 7951), False, 'from math import sin, cos\n'), ((7952, 7958), 'math.sin', 'sin', (['a'], {}), '(a)\n', (7955, 7958), False, 'from math import sin, cos\n'), ((7981, 7987), 'math.cos', 'cos', (['a'], {}), '(a)\n', (7984, 7987), False, 'from math import sin, cos\n'), ((7988, 7994), 'math.cos', 'cos', (['g'], {}), '(g)\n', (7991, 7994), False, 'from math import sin, cos\n'), ((8053, 8059), 'math.sin', 'sin', (['a'], {}), '(a)\n', (8056, 8059), False, 'from math import sin, cos\n'), ((8060, 8066), 'math.sin', 'sin', (['b'], {}), '(b)\n', (8063, 8066), False, 'from math import sin, cos\n'), ((8076, 8082), 'math.cos', 'cos', (['g'], {}), '(g)\n', (8079, 8082), False, 'from math import sin, cos\n'), ((8090, 8096), 'math.cos', 'cos', (['a'], {}), '(a)\n', (8093, 8096), False, 'from math import sin, cos\n'), ((8097, 8103), 'math.sin', 'sin', (['b'], {}), '(b)\n', (8100, 8103), False, 'from math import sin, cos\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""dataset
Custom dataset.
"""
import numpy as np
from mindspore import dataset as ds
def get_data(num, img_size=(1, 32, 32), num_classes=10, is_onehot=True):
for _ in range(num):
img = np.random.randn(*img_size)
target = np.random.randint(0, num_classes)
target_ret = np.array([target]).astype(np.float32)
if is_onehot:
target_onehot = np.zeros(shape=(num_classes,))
target_onehot[target] = 1
target_ret = target_onehot.astype(np.float32)
yield img.astype(np.float32), target_ret
def create_train_dataset(num_data=32768, batch_size=32, repeat_size=1):
input_data = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data', 'label'])
input_data = input_data.batch(batch_size, drop_remainder=True)
input_data = input_data.repeat(repeat_size)
return input_data
def create_eval_dataset(num_data=2048, batch_size=2048, repeat_size=1):
input_data = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data', 'label'])
input_data = input_data.batch(batch_size)
input_data = input_data.repeat(repeat_size)
return input_data
| [
"numpy.array",
"numpy.random.randint",
"numpy.random.randn",
"numpy.zeros"
] | [((866, 892), 'numpy.random.randn', 'np.random.randn', (['*img_size'], {}), '(*img_size)\n', (881, 892), True, 'import numpy as np\n'), ((910, 943), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_classes'], {}), '(0, num_classes)\n', (927, 943), True, 'import numpy as np\n'), ((1053, 1083), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_classes,)'}), '(shape=(num_classes,))\n', (1061, 1083), True, 'import numpy as np\n'), ((965, 983), 'numpy.array', 'np.array', (['[target]'], {}), '([target])\n', (973, 983), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2016, 2017, 2018, 2019 Sqreen. All rights reserved.
# Please refer to our terms for more information:
#
# https://www.sqreen.io/terms.html
#
import logging
import traceback
from datetime import datetime
from ..runtime_storage import runtime
from ..utils import is_string
LOGGER = logging.getLogger(__name__)
_SQREEN_EVENT_PREFIX = "sq."
_TRACK_OPTIONS_FIELDS = frozenset(
["properties", "user_identifiers", "timestamp"]
)
_TRACK_PAYLOAD_SECTIONS = ("request", "response", "params", "headers")
STACKTRACE_EVENTS = set()
_MAX_EVENT_PROPERTIES = 16
def _track_unsafe(event, options, storage=runtime):
"""Track an SDK event.
This function is used internally in the agent to send built-in SDK events,
e.g. output of security actions. It does not perform any check and is not
exposed to the user.
"""
if "timestamp" not in options:
options = dict(options)
options["timestamp"] = datetime.utcnow()
storage.observe(
"sdk",
["track", options["timestamp"], event, options],
payload_sections=_TRACK_PAYLOAD_SECTIONS,
report=True,
)
return True
def track(event, options=None, storage=runtime):
"""Track an SDK event."""
# Check event type.
if not is_string(event):
raise TypeError(
"event name must be a string, not {}".format(
event.__class__.__name__
)
)
# Check event name.
if event.startswith(_SQREEN_EVENT_PREFIX):
LOGGER.warning(
"Event names starting with %r are reserved, "
"event %r has been ignored",
_SQREEN_EVENT_PREFIX,
event,
)
return False
if options is None:
options = {}
else:
options = dict(options)
# Check option keys.
for option_key in list(options):
if option_key not in _TRACK_OPTIONS_FIELDS:
LOGGER.warning("Invalid option key %r, skipped", option_key)
del options[option_key]
timestamp = options.get("timestamp")
if timestamp and not isinstance(timestamp, datetime):
raise TypeError(
"timestamp option must be a datetime object, not {}".format(
event.__class__.__name__
)
)
properties = options.get("properties")
# Check the number of properties.
if properties and len(properties) > _MAX_EVENT_PROPERTIES:
LOGGER.warning(
"Event %r has %d properties, "
"only the first %d ones will be reported",
event,
len(properties),
_MAX_EVENT_PROPERTIES,
)
options["properties"] = dict(
sorted(properties.items())[:_MAX_EVENT_PROPERTIES]
)
# Store stacktrace if required.
if event in STACKTRACE_EVENTS:
LOGGER.debug("Stacktrace recorded by for event %s", event)
options["stacktrace"] = traceback.format_stack()
return _track_unsafe(event, options, storage=storage)
def track_action(action, output, storage=runtime):
"""Track an action output."""
if not action.send_response:
return
return _track_unsafe(
"sq.action.{}".format(action.name),
{"properties": {"output": output, "action_id": action.iden}},
storage=storage,
)
| [
"logging.getLogger",
"traceback.format_stack",
"datetime.datetime.utcnow"
] | [((327, 354), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (344, 354), False, 'import logging\n'), ((973, 990), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (988, 990), False, 'from datetime import datetime\n'), ((2949, 2973), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (2971, 2973), False, 'import traceback\n')] |
import json
import pickle
import re
from copy import copy, deepcopy
from functools import lru_cache
from json import JSONDecodeError
from os import system, walk, sep
from abc import ABC, abstractmethod
from pathlib import Path
import time
from subprocess import check_output
from tempfile import NamedTemporaryFile
from warnings import warn
from .utils import subset_dict_preserving_order, run_command, nice_time
class no_quotes(str):
def __repr__(self):
original = super().__repr__()
return original[1:-1]
class Rule(ABC):
"""Design principles (or how does it differ from snakemake):
- fully python3; no strange make/python mishmash
- prefer verbosity over ambiguity (named inputs/outputs)
- Jupyter centered
- interactive graphs
- implicit passing of arguments to the executing command
"""
cache_dir: Path
tmp_dir: Path
is_setup = False
rules = {}
def __init__(self, name, **kwargs):
"""Notes:
- input and output will be passed in the same order as it appears in kwargs
- if the input is a dictionary, the keys will be interpreted as argument names;
empty key can be used to insert a positional argument
- the arguments will be serialized preserving the Python type, i.e.
input={'name': 1}
may result in:
--name 1
while:
input={'name': "1"}
would result in
--name "1"
You can force string to be displayed without quotes using:
input={'name': no_quotes("1")}
"""
assert name not in self.rules
self.name = name
self.execution_time = None
self.rules[name] = self
extra_kwargs = set(kwargs) - {'output', 'input', 'group', 'parameters'}
if extra_kwargs:
raise Exception(f'Unrecognized keyword arguments to {self.__class__.__name__}: {extra_kwargs}')
self.arguments = subset_dict_preserving_order(
kwargs,
{'input', 'output', 'parameters'}
)
self.group = kwargs.get('group', None)
self.outputs = {}
self.inputs = {}
self.parameters = {}
if 'output' in kwargs:
output = kwargs['output']
# todo support lists of positionals
self.outputs = output if isinstance(output, dict) else {'': output}
if 'input' in kwargs:
input = kwargs['input']
self.inputs = input if isinstance(input, dict) else {'': input}
if 'parameters' in kwargs:
self.parameters = kwargs['parameters']
@property
def has_inputs(self):
return len(self.inputs) != 0
@property
def has_outputs(self):
return len(self.outputs) != 0
@abstractmethod
def run(self, use_cache: bool) -> int:
if not self.is_setup:
raise ValueError('Please set up the rules class settings with Rule.setup() first!')
@classmethod
def setup(cls, cache_dir: Path, tmp_dir: Path):
cls.cache_dir = Path(cache_dir)
cls.tmp_dir = Path(tmp_dir)
cls.is_setup = True
@abstractmethod
def to_json(self):
pass
def __repr__(self):
fragments = [repr(self.name)]
if self.group:
fragments.append(f'({self.group})')
if self.has_inputs or self.has_outputs:
fragments.append('with')
if self.has_inputs:
fragments.append(f'{len(self.inputs)} inputs')
if self.has_inputs and self.has_outputs:
fragments.append('and')
if self.has_outputs:
fragments.append(f'{len(self.outputs)} outputs')
fragments = ' '.join(fragments)
return f'<{self.__class__.__name__} {fragments}>'
class Group:
"""A group of rules"""
groups = {}
def __init__(self, id: str, name: str, color='#cccccc', parent=None):
assert name not in self.groups
self.name = name
self.id = id
self.color = color
self.groups[id] = self
self.parent = parent
def to_json(self):
return {
'label': self.name,
'id': self.id,
'color': self.color,
'parent': self.parent
}
class ShellRule(Rule):
"""
Named arguments will be passed in order,
preceded with a single dash for single letter names
or a double dash for longer names.
"""
def __init__(self, name, command, **kwargs):
super().__init__(self, name, **kwargs)
self.command = command
def serialize(self, arguments_group):
if isinstance(arguments_group, dict):
return ' '.join(
(
(
('-' + key if len(key) == 1 else '--' + key)
+
' '
)
if len(key) else
''
) + (
repr(value)
)
for key, value in arguments_group.items()
)
else:
return repr(arguments_group)
@property
def serialized_arguments(self):
return ' '.join({
self.serialize(arguments_group)
for arguments_group in self.arguments.values()
})
def run(self, use_cache=False) -> int:
super().run(use_cache)
start_time = time.time()
status = system(f'{self.command} {self.serialized_arguments}')
self.execution_time = time.time() - start_time
return status
def to_json(self):
return {
'name': self.command,
'arguments': self.serialized_arguments,
'execution_time': self.execution_time,
'type': 'shell'
}
def expand_run_magics(notebook):
out_notebook = copy(notebook)
new_cells = []
for cell in notebook['cells']:
if cell['cell_type'] != 'code':
new_cells.append(cell)
continue
if any(line.startswith('%run') for line in cell['source']):
other_code = []
for line in cell['source']:
if line.startswith('%run'):
if other_code:
split_cell = copy(cell)
split_cell['source'] = other_code
new_cells.append(split_cell)
other_code = []
to_include = line[5:].strip()
with open(to_include) as o:
nb_run = json.load(o)
new_cells.extend(nb_run['cells'])
else:
other_code.append(line)
if other_code:
split_cell = copy(cell)
split_cell['source'] = other_code
new_cells.append(split_cell)
else:
new_cells.append(cell)
out_notebook['cells'] = new_cells
return out_notebook
class NotebookRule(Rule):
options: None
@property
def output_nb_dir(self) -> Path:
return self.tmp_dir / 'out'
@property
def reference_nb_dir(self) -> Path:
return self.tmp_dir / 'ref'
@property
def stripped_nb_dir(self) -> Path:
return self.tmp_dir / 'stripped'
def __init__(
self, *args, notebook,
diff=True,
deduce_io=True,
deduce_io_from_data_vault=True,
execute=True,
**kwargs
):
"""Rule for Jupyter Notebooks
Args:
deduce_io: whether to automatically deduce inputs and outputs from the code cells tagged "inputs" and "outputs";
local variables defined in the cell will be evaluated and used as inputs or outputs.
If you want to generate paths with a helper function for brevity, assign a dict of {variable: path}
to `__inputs__`/`__outputs__` in the tagged cell using `io.create_paths()` helper.
diff: whether to generate diffs against the current state of the notebook
deduce_io_from_data_vault: whether to deduce the inputs and outputs from `data_vault` magics
(`%vault store` and `%vault import`), see https://github.com/krassowski/data-vault
execute: if False, the notebook will note be run; useful to include final "leaf" notebooks
which may take too long to run, but are not essential to the overall results
"""
super().__init__(*args, **kwargs)
self.todos = []
self.notebook = notebook
self.absolute_notebook_path = Path(notebook).absolute()
self.generate_diff = diff
self.diff = None
self.text_diff = None
self.fidelity = None
self.images = []
self.headers = []
self.status = None
self.execute = execute
from datetime import datetime, timedelta
month_ago = (datetime.today() - timedelta(days=30)).timestamp()
self.changes = run_command(f'git rev-list --max-age {month_ago} HEAD --count {self.notebook}')
if deduce_io:
self.deduce_io_from_tags()
if deduce_io_from_data_vault:
self.deduce_io_from_data_vault()
def deduce_io_from_data_vault(self):
notebook_json = self.notebook_json
stored = set()
for index, cell in enumerate(notebook_json['cells']):
if 'source' not in cell:
continue
for line in cell['source']:
if line.startswith('%vault'):
try:
from data_vault import VaultMagics
from data_vault.actions import ImportAction, StoreAction
from data_vault.parsing import split_variables, unquote
except ImportError:
warn('Could not deduce I/O from data-vault %vault magics: data_vault not installed')
return
vault_magics = VaultMagics()
arguments = vault_magics.extract_arguments(line[7:])
action = vault_magics.select_action(arguments)
if isinstance(action, ImportAction):
variables = arguments['import']
for var_index, variable in enumerate(split_variables(variables)):
if 'from' in arguments:
import_path = arguments['from'] + '/' + variable
else:
import_path = unquote(arguments['import'])
if import_path in stored:
warn(f'Skipping {line} which was previously stored from this notebook to avoid cycles')
else:
self.inputs[(index, var_index)] = import_path
elif isinstance(action, StoreAction):
variables = split_variables(arguments['store'])
if 'as' in arguments:
assert len(variables) == 1
variables = [arguments['as']]
for var_index, variable in enumerate(variables):
store_path = arguments['in'] + '/' + variable
self.outputs[(index, var_index)] = store_path
stored.add(store_path)
def deduce_io_from_tags(self, io_tags={'inputs', 'outputs'}):
notebook_json = self.notebook_json
io_cells = {}
for index, cell in enumerate(notebook_json['cells']):
if 'tags' in cell['metadata']:
cell_io_tags = io_tags.intersection(cell['metadata']['tags'])
if cell_io_tags:
assert len(cell_io_tags) == 1
io_cells[list(cell_io_tags)[0]] = cell, index
for io, (cell, index) in io_cells.items():
assert not getattr(self, f'has_{io}')
source = ''.join(cell['source'])
if f'__{io}__' in source:
assert len(cell['outputs']) == 1
# TODO: search through lists
values = cell['outputs'][0]['metadata']
else:
# so we don't want to use eval (we are not within an isolated copy yet!),
# thus only simple regular expression matching which will fail on multi-line strings
# (and anything which is dynamically generated)
assignments = {
match.group('key'): match.group('value')
for match in re.finditer(r'^\s*(?P<key>.*?)\s*=\s*([\'"])(?P<value>.*)\2', source, re.MULTILINE)
}
values = {
key: value
for key, value in assignments.items()
if key.isidentifier() and value
}
if len(assignments) != len(values):
# TODO: add nice exception or warning
raise
setattr(self, io, values)
def serialize(self, arguments_group):
return '-p ' + (' -p '.join(
f'{key} {value}'
for key, value in arguments_group.items()
))
@property
def serialized_arguments(self):
return ' '.join({
self.serialize(arguments_group)
for arguments_group in self.arguments.values()
if arguments_group
})
def outline(self, max_depth=3):
return self.headers
@property
@lru_cache()
def notebook_json(self):
with open(self.absolute_notebook_path) as f:
return expand_run_magics(json.load(f))
def maybe_create_output_dirs(self):
if self.has_outputs:
for name, output in self.outputs.items():
path = Path(output)
path = path.parent
if not path.exists():
print(f'Creating path "{path}" for "{name}" output argument')
path.mkdir(parents=True, exist_ok=True)
def run(self, use_cache=True) -> int:
"""
Run JupyterNotebook using PaperMill and compare the output with reference using nbdime
Returns: status code from the papermill run (0 if successful)
"""
super().run(use_cache)
notebook = self.notebook
path = Path(notebook)
output_nb_dir = self.output_nb_dir / path.parent
output_nb_dir.mkdir(parents=True, exist_ok=True)
reference_nb_dir = self.reference_nb_dir / path.parent
reference_nb_dir.mkdir(parents=True, exist_ok=True)
stripped_nb_dir = self.stripped_nb_dir / path.parent
stripped_nb_dir.mkdir(parents=True, exist_ok=True)
output_nb = output_nb_dir / path.name
reference_nb = reference_nb_dir / path.name
stripped_nb = stripped_nb_dir / path.name
md5 = run_command(f'md5sum {str(self.absolute_notebook_path)}').split()[0]
cache_dir = self.cache_dir / path.parent
cache_dir.mkdir(parents=True, exist_ok=True)
cache_nb_file = cache_dir / f'{md5}.json'
to_cache = ['execution_time', 'fidelity', 'diff', 'text_diff', 'todos', 'headers', 'images']
if use_cache and cache_nb_file.exists():
with open(cache_nb_file, 'rb') as f:
pickled = pickle.load(f)
print(f'Reusing cached results for {self}')
for key in to_cache:
setattr(self, key, pickled[key])
return 0
notebook_json = self.notebook_json
self.images = [
output['data']['image/png']
for cell in notebook_json['cells']
for output in cell.get('outputs', [])
if 'data' in output and 'image/png' in output['data']
]
self.headers = []
for cell in notebook_json['cells']:
if cell['cell_type'] == 'markdown':
for line in cell['source']:
if line.startswith('#'):
self.headers.append(line)
for cell in notebook_json['cells']:
for line in cell.get('source', ''):
if 'TODO' in line:
self.todos.append(line)
# strip outputs (otherwise if it stops, the diff will be too optimistic)
notebook_stripped = deepcopy(notebook_json)
for cell in notebook_json['cells']:
cell['outputs'] = []
with open(stripped_nb, 'w') as f:
json.dump(notebook_stripped, f)
if self.execute:
# execute
start_time = time.time()
status = system(f'papermill {stripped_nb} {output_nb} {self.serialized_arguments}') or 0
self.execution_time = time.time() - start_time
else:
status = 0
warn(f'Skipping {self} (execute != True)')
if self.execute and self.generate_diff:
# inject parameters to a "reference" copy (so that we do not have spurious noise in the diff)
system(
f'papermill {self.absolute_notebook_path} {reference_nb} {self.serialized_arguments} --prepare-only'
# do not print "Input Notebook:" and "Output Notebook:" for the second time
' --log-level WARNING'
)
with NamedTemporaryFile(delete=False) as tf:
command = f'nbdiff {reference_nb} {output_nb} --ignore-metadata --ignore-details --out {tf.name}'
result = run_command(command)
with open(tf.name) as f:
try:
self.diff = json.load(f)
except JSONDecodeError as e:
warn(f'Could not load the diff file: {result}, {f.readlines()}')
command = f'nbdiff {reference_nb} {output_nb} --ignore-metadata --ignore-details --no-use-diff --no-git'
self.text_diff = run_command(command)
from ansi2html import Ansi2HTMLConverter
conv = Ansi2HTMLConverter()
self.text_diff = conv.convert(self.text_diff)
changes = len(self.diff[0]['diff']) if self.diff else 0
# TODO: count only the code cells, not markdown cells?
total_cells = len(notebook_json['cells'])
self.fidelity = (total_cells - changes) / total_cells * 100
if status == 0:
with open(cache_nb_file, 'wb') as f:
pickle.dump({
key: getattr(self, key)
for key in to_cache
}, f)
self.status = status
return status
def to_json(self):
notebook_name = Path(self.notebook).name
return {
'name': self.name,
'arguments': self.serialized_arguments,
'execution_time': self.execution_time,
'type': 'notebook',
'notebook': self.notebook,
'notebook_name': notebook_name,
'fidelity': self.fidelity,
'changes_this_month': self.changes,
'nice_time': nice_time(self.execution_time),
'diff': self.diff,
'text_diff': self.text_diff,
'images': self.images,
'label': self.notebook,
'headers': self.headers,
'status': self.status,
'todos': self.todos,
'group': self.group
# TODO: requires testing
# 'is_tracked': is_tracked_in_version_control(self.notebook)
}
def to_graphiz(self, changes=False):
data = self.to_json()
# TODO: move to static_graph
buttons = []
if changes: # TODO allow to activate
buttons += [f'<td href="{self.repository_url}/commits/master/{self.notebook}">{self.changes} changes this month</td>']
if self.fidelity is not None:
buttons += [f'<td href="">Reproducibility: {self.fidelity:.2f}%</td>']
if self.execution_time is not None:
buttons += [f'<td>Runtime: {nice_time(self.execution_time)}</td>']
buttons_html = '\n'.join(buttons)
if buttons_html:
buttons_html = f'<tr>{ buttons_html }</tr>'
return {
**data,
**{
'shape': 'plain',
'label': f"""<<table cellspacing="0">
<tr><td href="{self.repository_url}/blob/master/{self.notebook}" colspan="{len(buttons)}" title="{data['notebook_name']}">{self.name.replace('&', ' and ')}</td></tr>
</table>>"""
}
}
def is_tracked_in_version_control(file: str):
return check_output(f'git ls-files {file}', shell=True)
def discover_notebooks(root_path='.', ignore=None, ignored_dirs=None, only_tracked_in_git=False, ignore_prefixes=('__', '.')):
"""Useful when working with input/output auto-detection"""
ignored_dirs = ignored_dirs or set()
ignore = ignore or set()
names = {}
rules = []
from typing import Dict
groups: Dict[str, Group] = {}
for dirpath, _, files in walk(root_path):
dirs = dirpath.split(sep)[1:]
if any(dir.startswith('.') or dir in ignored_dirs for dir in dirs):
continue
for file in files:
if any(file.startswith(prefix) for prefix in ignore_prefixes):
continue
if not file.endswith('.ipynb'):
continue
if only_tracked_in_git and not is_tracked_in_version_control(file):
continue
path = sep.join(dirs + [file])
if path in ignore:
continue
name = file[:-6]
name = name[0] + name[1:].replace('_', ' ')
if name in names:
print(name, 'already registered', path, names[name])
else:
names[name] = path
group_id = sep.join(dirs) if dirs else None
rule = NotebookRule(name, notebook=path, group=group_id)
rules.append(rule)
if group_id and group_id not in groups:
groups[group_id] = Group(id=group_id, name=dirs[-1], parent=sep.join(dirs[:-1]))
return {
'rules': rules,
'groups': groups
}
| [
"data_vault.parsing.split_variables",
"os.sep.join",
"data_vault.parsing.unquote",
"copy.deepcopy",
"datetime.datetime.today",
"datetime.timedelta",
"copy.copy",
"os.walk",
"pathlib.Path",
"re.finditer",
"tempfile.NamedTemporaryFile",
"warnings.warn",
"data_vault.VaultMagics",
"subprocess.... | [((5937, 5951), 'copy.copy', 'copy', (['notebook'], {}), '(notebook)\n', (5941, 5951), False, 'from copy import copy, deepcopy\n'), ((13683, 13694), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (13692, 13694), False, 'from functools import lru_cache\n'), ((20806, 20854), 'subprocess.check_output', 'check_output', (['f"""git ls-files {file}"""'], {'shell': '(True)'}), "(f'git ls-files {file}', shell=True)\n", (20818, 20854), False, 'from subprocess import check_output\n'), ((21240, 21255), 'os.walk', 'walk', (['root_path'], {}), '(root_path)\n', (21244, 21255), False, 'from os import system, walk, sep\n'), ((3150, 3165), 'pathlib.Path', 'Path', (['cache_dir'], {}), '(cache_dir)\n', (3154, 3165), False, 'from pathlib import Path\n'), ((3188, 3201), 'pathlib.Path', 'Path', (['tmp_dir'], {}), '(tmp_dir)\n', (3192, 3201), False, 'from pathlib import Path\n'), ((5507, 5518), 'time.time', 'time.time', ([], {}), '()\n', (5516, 5518), False, 'import time\n'), ((5536, 5589), 'os.system', 'system', (['f"""{self.command} {self.serialized_arguments}"""'], {}), "(f'{self.command} {self.serialized_arguments}')\n", (5542, 5589), False, 'from os import system, walk, sep\n'), ((14516, 14530), 'pathlib.Path', 'Path', (['notebook'], {}), '(notebook)\n', (14520, 14530), False, 'from pathlib import Path\n'), ((16518, 16541), 'copy.deepcopy', 'deepcopy', (['notebook_json'], {}), '(notebook_json)\n', (16526, 16541), False, 'from copy import copy, deepcopy\n'), ((5620, 5631), 'time.time', 'time.time', ([], {}), '()\n', (5629, 5631), False, 'import time\n'), ((16674, 16705), 'json.dump', 'json.dump', (['notebook_stripped', 'f'], {}), '(notebook_stripped, f)\n', (16683, 16705), False, 'import json\n'), ((16779, 16790), 'time.time', 'time.time', ([], {}), '()\n', (16788, 16790), False, 'import time\n'), ((17000, 17042), 'warnings.warn', 'warn', (['f"""Skipping {self} (execute != True)"""'], {}), "(f'Skipping {self} (execute != True)')\n", (17004, 17042), False, 'from warnings import warn\n'), ((17211, 17349), 'os.system', 'system', (['f"""papermill {self.absolute_notebook_path} {reference_nb} {self.serialized_arguments} --prepare-only --log-level WARNING"""'], {}), "(\n f'papermill {self.absolute_notebook_path} {reference_nb} {self.serialized_arguments} --prepare-only --log-level WARNING'\n )\n", (17217, 17349), False, 'from os import system, walk, sep\n'), ((18193, 18213), 'ansi2html.Ansi2HTMLConverter', 'Ansi2HTMLConverter', ([], {}), '()\n', (18211, 18213), False, 'from ansi2html import Ansi2HTMLConverter\n'), ((18847, 18866), 'pathlib.Path', 'Path', (['self.notebook'], {}), '(self.notebook)\n', (18851, 18866), False, 'from pathlib import Path\n'), ((21712, 21735), 'os.sep.join', 'sep.join', (['(dirs + [file])'], {}), '(dirs + [file])\n', (21720, 21735), False, 'from os import system, walk, sep\n'), ((6838, 6848), 'copy.copy', 'copy', (['cell'], {}), '(cell)\n', (6842, 6848), False, 'from copy import copy, deepcopy\n'), ((8685, 8699), 'pathlib.Path', 'Path', (['notebook'], {}), '(notebook)\n', (8689, 8699), False, 'from pathlib import Path\n'), ((13814, 13826), 'json.load', 'json.load', (['f'], {}), '(f)\n', (13823, 13826), False, 'import json\n'), ((13975, 13987), 'pathlib.Path', 'Path', (['output'], {}), '(output)\n', (13979, 13987), False, 'from pathlib import Path\n'), ((15505, 15519), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (15516, 15519), False, 'import pickle\n'), ((16812, 16886), 'os.system', 'system', (['f"""papermill {stripped_nb} {output_nb} {self.serialized_arguments}"""'], {}), "(f'papermill {stripped_nb} {output_nb} {self.serialized_arguments}')\n", (16818, 16886), False, 'from os import system, walk, sep\n'), ((16926, 16937), 'time.time', 'time.time', ([], {}), '()\n', (16935, 16937), False, 'import time\n'), ((17499, 17531), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (17517, 17531), False, 'from tempfile import NamedTemporaryFile\n'), ((9009, 9025), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (9023, 9025), False, 'from datetime import datetime, timedelta\n'), ((9028, 9046), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (9037, 9046), False, 'from datetime import datetime, timedelta\n'), ((10087, 10100), 'data_vault.VaultMagics', 'VaultMagics', ([], {}), '()\n', (10098, 10100), False, 'from data_vault import VaultMagics\n'), ((22056, 22070), 'os.sep.join', 'sep.join', (['dirs'], {}), '(dirs)\n', (22064, 22070), False, 'from os import system, walk, sep\n'), ((6356, 6366), 'copy.copy', 'copy', (['cell'], {}), '(cell)\n', (6360, 6366), False, 'from copy import copy, deepcopy\n'), ((6649, 6661), 'json.load', 'json.load', (['o'], {}), '(o)\n', (6658, 6661), False, 'import json\n'), ((12732, 12824), 're.finditer', 're.finditer', (['"""^\\\\s*(?P<key>.*?)\\\\s*=\\\\s*([\\\\\'"])(?P<value>.*)\\\\2"""', 'source', 're.MULTILINE'], {}), '(\'^\\\\s*(?P<key>.*?)\\\\s*=\\\\s*([\\\\\\\'"])(?P<value>.*)\\\\2\', source,\n re.MULTILINE)\n', (12743, 12824), False, 'import re\n'), ((17801, 17813), 'json.load', 'json.load', (['f'], {}), '(f)\n', (17810, 17813), False, 'import json\n'), ((9936, 10030), 'warnings.warn', 'warn', (['"""Could not deduce I/O from data-vault %vault magics: data_vault not installed"""'], {}), "(\n 'Could not deduce I/O from data-vault %vault magics: data_vault not installed'\n )\n", (9940, 10030), False, 'from warnings import warn\n'), ((10415, 10441), 'data_vault.parsing.split_variables', 'split_variables', (['variables'], {}), '(variables)\n', (10430, 10441), False, 'from data_vault.parsing import split_variables, unquote\n'), ((11066, 11101), 'data_vault.parsing.split_variables', 'split_variables', (["arguments['store']"], {}), "(arguments['store'])\n", (11081, 11101), False, 'from data_vault.parsing import split_variables, unquote\n'), ((22333, 22352), 'os.sep.join', 'sep.join', (['dirs[:-1]'], {}), '(dirs[:-1])\n', (22341, 22352), False, 'from os import system, walk, sep\n'), ((10657, 10685), 'data_vault.parsing.unquote', 'unquote', (["arguments['import']"], {}), "(arguments['import'])\n", (10664, 10685), False, 'from data_vault.parsing import split_variables, unquote\n'), ((10772, 10869), 'warnings.warn', 'warn', (['f"""Skipping {line} which was previously stored from this notebook to avoid cycles"""'], {}), "(\n f'Skipping {line} which was previously stored from this notebook to avoid cycles'\n )\n", (10776, 10869), False, 'from warnings import warn\n')] |
import math
from stable_baselines_model_based_rl.wrapper.step_handler import StepRewardDoneHandler
class ContinuousMountainCarStepHandler(StepRewardDoneHandler):
goal_position = 0.45
goal_velocity = 0
def get_done(self, step: int) -> bool:
s = self.observation.to_value_list()
position = s[0]
velocity = s[1]
# Convert a possible numpy bool to a Python bool.
return bool(
position >= self.goal_position and velocity >= self.goal_velocity
)
def get_reward(self, step: int) -> float:
action = self.action.to_value_list()
reward = 0
if self.get_done(step):
reward = 100.0
reward -= math.pow(action[0], 2) * 0.1
return reward
| [
"math.pow"
] | [((731, 753), 'math.pow', 'math.pow', (['action[0]', '(2)'], {}), '(action[0], 2)\n', (739, 753), False, 'import math\n')] |
#!/usr/bin/env python3
import os
import sys
import json
from datetime import datetime
from submitty_utils import dateutils
def generatePossibleDatabases():
current = dateutils.get_current_semester()
pre = 'submitty_' + current + '_'
path = "/var/local/submitty/courses/" + current
return [pre + name for name in sorted(os.listdir(path)) if os.path.isdir(path + "/" + name)]
if(__name__ == "__main__"):
num_args = len(sys.argv)
possible_databases = generatePossibleDatabases()
database = possible_databases[0]
if(num_args > 2):
print('Too many arguments. Use --help for help.')
sys.exit()
elif(num_args == 2):
if(sys.argv[1] == '--help' or sys.argv[1] == '-h'):
print('This tool can be used to test forum scalability -- pg_dump after execution to save the test data which can be sourced later.')
print('This tool takes in an optional argument: database, so an example usage is: `python3 input_forum_data.py submitty_f18_blank`')
print('Note this will delete forum data in the database you specify. The database will default to `submitty_f18_blank` if not specified.')
sys.exit()
elif(sys.argv[1] not in possible_databases):
print('Unknown argument: {:s}, use --help or -h for help.'.format(sys.argv[1]))
sys.exit()
database = sys.argv[1]
threads = abs(int(input("Enter number of threads (i.e. 1000): ").strip()))
posts = abs(int(input("Enter number of posts per thread (i.e. 20): ").strip()))
usr_path = "/usr/local/submitty"
settings = json.load(open(os.path.join(usr_path, ".setup", "submitty_conf.json")))
print("WARNING: This tool is going to delete data from the following tables:\n\tthreads\n\tposts\n\tforum_posts_history\n\tstudent_favorites\n\tviewed_responses\n\tthread_categories\n\tcategories_list")
answer = input("Do you agree for this data to be removed from {:s}? [yes/no]: ".format(database)).strip()
if(answer.lower() != "yes"):
print("Exiting...")
sys.exit()
variables = (settings['database_password'], settings['database_host'], settings['database_user'], database)
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"TRUNCATE TABLE threads RESTART IDENTITY CASCADE\" > /dev/null""".format(*variables))
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"DELETE FROM thread_categories\" > /dev/null""".format(*variables))
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"TRUNCATE TABLE categories_list RESTART IDENTITY CASCADE\" > /dev/null""".format(*variables))
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"INSERT INTO categories_list (category_desc) VALUES ('TESTDATA')\" > /dev/null""".format(*variables))
print()
for i in range(threads):
if((i+1) % 10 == 0):
print("Completed: {:d}/{:d}".format(i+1, threads))
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"INSERT INTO threads (title, created_by, pinned, deleted, merged_thread_id, merged_post_id, is_visible) VALUES (\'{:s}\', \'{:s}\', false, false, -1, -1, true)\" > /dev/null""".format(*variables, "Thread{:d}".format(i+1), "aphacker"))
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"INSERT INTO thread_categories (thread_id, category_id) VALUES ({:d}, 1)\" > /dev/null""".format(*variables, i+1))
for pid in range(posts):
os.system("""PGPASSWORD='{}' psql --host={} --username={} --dbname={} -c \"INSERT INTO posts (thread_id, parent_id, author_user_id, content, timestamp, anonymous, deleted, type, has_attachment) VALUES ({}, {}, {}, {}, \'{}\', false, false, 0, false)\" > /dev/null""".format(*variables, i+1, -1 if pid == 0 else i*posts + pid, "'aphacker'", "'Post{:d}'".format(i*posts + pid+1), datetime.now()))
| [
"os.listdir",
"submitty_utils.dateutils.get_current_semester",
"os.path.join",
"datetime.datetime.now",
"os.path.isdir",
"sys.exit"
] | [((171, 203), 'submitty_utils.dateutils.get_current_semester', 'dateutils.get_current_semester', ([], {}), '()\n', (201, 203), False, 'from submitty_utils import dateutils\n'), ((596, 606), 'sys.exit', 'sys.exit', ([], {}), '()\n', (604, 606), False, 'import sys\n'), ((1933, 1943), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1941, 1943), False, 'import sys\n'), ((348, 380), 'os.path.isdir', 'os.path.isdir', (["(path + '/' + name)"], {}), "(path + '/' + name)\n", (361, 380), False, 'import os\n'), ((1502, 1556), 'os.path.join', 'os.path.join', (['usr_path', '""".setup"""', '"""submitty_conf.json"""'], {}), "(usr_path, '.setup', 'submitty_conf.json')\n", (1514, 1556), False, 'import os\n'), ((327, 343), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (337, 343), False, 'import os\n'), ((1101, 1111), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1109, 1111), False, 'import sys\n'), ((1245, 1255), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1253, 1255), False, 'import sys\n'), ((3738, 3752), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3750, 3752), False, 'from datetime import datetime\n')] |
# Copyright (c) 2017, IGLU consortium
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import time
import multiprocessing
import logging
import numpy as np
import unittest
import matplotlib.pyplot as plt
from home_platform.env import BasicEnvironment
from panda3d.core import LVector3f
TEST_DATA_DIR = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "..", "data")
TEST_SUNCG_DATA_DIR = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "..", "data", "suncg")
class TestBasicEnvironment(unittest.TestCase):
def testRender(self):
env = BasicEnvironment("0004d52d1aeeb8ae6de39d6bd993e992",
suncgDatasetRoot=TEST_SUNCG_DATA_DIR, depth=True)
env.agent.setPos(LVector3f(42, -39, 1))
env.agent.setHpr(LVector3f(60.0, 0.0, 0.0))
env.step()
image = env.renderWorld.getRgbImages()['agent-0']
depth = env.renderWorld.getDepthImages(mode='distance')['agent-0']
fig = plt.figure(figsize=(16, 8))
plt.axis("off")
ax = plt.subplot(121)
ax.imshow(image)
ax = plt.subplot(122)
ax.imshow(depth / np.max(depth), cmap='binary')
plt.show(block=False)
time.sleep(1.0)
plt.close(fig)
env.destroy()
def testGenerateSpawnPositions(self):
env = BasicEnvironment("0004d52d1aeeb8ae6de39d6bd993e992",
suncgDatasetRoot=TEST_SUNCG_DATA_DIR, depth=False)
occupancyMap, occupancyMapCoord, positions = env.generateSpawnPositions(
n=10)
xmin, ymin = np.min(occupancyMapCoord, axis=(0, 1))
xmax, ymax = np.max(occupancyMapCoord, axis=(0, 1))
fig = plt.figure()
plt.axis("on")
ax = plt.subplot(111)
ax.imshow(occupancyMap, cmap='gray', extent=[xmin, xmax, ymin, ymax])
ax.scatter(positions[:, 0], positions[:, 1], s=40, c=[1.0, 0.0, 0.0])
plt.show(block=False)
time.sleep(1.0)
plt.close(fig)
env.destroy()
def testMultiprocessing(self):
# Spawn new process with independent simulations using the
# multiprocessing module
# Not supported in OSX for now
if sys.platform == 'darwin':
return
nbProcesses = 2
nbSteps = 100
def worker():
env = BasicEnvironment("0004d52d1aeeb8ae6de39d6bd993e992", suncgDatasetRoot=TEST_SUNCG_DATA_DIR,
depth=False, debug=True)
env.agent.setPos(LVector3f(45, -42, 1))
env.agent.setHpr(LVector3f(45.0, 0.0, 0.0))
# Simulation loop
for _ in range(nbSteps):
env.step()
_ = env.getObservation()
env.destroy()
processes = []
for _ in range(nbProcesses):
p = multiprocessing.Process(target=worker)
processes.append(p)
p.start()
for p in processes:
p.join()
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
np.seterr(all='raise')
unittest.main()
| [
"logging.basicConfig",
"multiprocessing.Process",
"panda3d.core.LVector3f",
"numpy.min",
"time.sleep",
"numpy.max",
"os.path.realpath",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.close",
"home_platform.env.BasicEnvironment",
"unittest.main",
"matplotlib.pyplo... | [((4506, 4545), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARN'}), '(level=logging.WARN)\n', (4525, 4545), False, 'import logging\n'), ((4550, 4572), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (4559, 4572), True, 'import numpy as np\n'), ((4577, 4592), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4590, 4592), False, 'import unittest\n'), ((1820, 1846), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1836, 1846), False, 'import os\n'), ((1919, 1945), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1935, 1945), False, 'import os\n'), ((2061, 2168), 'home_platform.env.BasicEnvironment', 'BasicEnvironment', (['"""0004d52d1aeeb8ae6de39d6bd993e992"""'], {'suncgDatasetRoot': 'TEST_SUNCG_DATA_DIR', 'depth': '(True)'}), "('0004d52d1aeeb8ae6de39d6bd993e992', suncgDatasetRoot=\n TEST_SUNCG_DATA_DIR, depth=True)\n", (2077, 2168), False, 'from home_platform.env import BasicEnvironment\n'), ((2465, 2492), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (2475, 2492), True, 'import matplotlib.pyplot as plt\n'), ((2501, 2516), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2509, 2516), True, 'import matplotlib.pyplot as plt\n'), ((2530, 2546), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (2541, 2546), True, 'import matplotlib.pyplot as plt\n'), ((2585, 2601), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (2596, 2601), True, 'import matplotlib.pyplot as plt\n'), ((2666, 2687), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (2674, 2687), True, 'import matplotlib.pyplot as plt\n'), ((2696, 2711), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (2706, 2711), False, 'import time\n'), ((2720, 2734), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2729, 2734), True, 'import matplotlib.pyplot as plt\n'), ((2816, 2924), 'home_platform.env.BasicEnvironment', 'BasicEnvironment', (['"""0004d52d1aeeb8ae6de39d6bd993e992"""'], {'suncgDatasetRoot': 'TEST_SUNCG_DATA_DIR', 'depth': '(False)'}), "('0004d52d1aeeb8ae6de39d6bd993e992', suncgDatasetRoot=\n TEST_SUNCG_DATA_DIR, depth=False)\n", (2832, 2924), False, 'from home_platform.env import BasicEnvironment\n'), ((3073, 3111), 'numpy.min', 'np.min', (['occupancyMapCoord'], {'axis': '(0, 1)'}), '(occupancyMapCoord, axis=(0, 1))\n', (3079, 3111), True, 'import numpy as np\n'), ((3133, 3171), 'numpy.max', 'np.max', (['occupancyMapCoord'], {'axis': '(0, 1)'}), '(occupancyMapCoord, axis=(0, 1))\n', (3139, 3171), True, 'import numpy as np\n'), ((3187, 3199), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3197, 3199), True, 'import matplotlib.pyplot as plt\n'), ((3208, 3222), 'matplotlib.pyplot.axis', 'plt.axis', (['"""on"""'], {}), "('on')\n", (3216, 3222), True, 'import matplotlib.pyplot as plt\n'), ((3236, 3252), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3247, 3252), True, 'import matplotlib.pyplot as plt\n'), ((3417, 3438), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3425, 3438), True, 'import matplotlib.pyplot as plt\n'), ((3447, 3462), 'time.sleep', 'time.sleep', (['(1.0)'], {}), '(1.0)\n', (3457, 3462), False, 'import time\n'), ((3471, 3485), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3480, 3485), True, 'import matplotlib.pyplot as plt\n'), ((2221, 2242), 'panda3d.core.LVector3f', 'LVector3f', (['(42)', '(-39)', '(1)'], {}), '(42, -39, 1)\n', (2230, 2242), False, 'from panda3d.core import LVector3f\n'), ((2269, 2294), 'panda3d.core.LVector3f', 'LVector3f', (['(60.0)', '(0.0)', '(0.0)'], {}), '(60.0, 0.0, 0.0)\n', (2278, 2294), False, 'from panda3d.core import LVector3f\n'), ((3830, 3950), 'home_platform.env.BasicEnvironment', 'BasicEnvironment', (['"""0004d52d1aeeb8ae6de39d6bd993e992"""'], {'suncgDatasetRoot': 'TEST_SUNCG_DATA_DIR', 'depth': '(False)', 'debug': '(True)'}), "('0004d52d1aeeb8ae6de39d6bd993e992', suncgDatasetRoot=\n TEST_SUNCG_DATA_DIR, depth=False, debug=True)\n", (3846, 3950), False, 'from home_platform.env import BasicEnvironment\n'), ((4330, 4368), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'worker'}), '(target=worker)\n', (4353, 4368), False, 'import multiprocessing\n'), ((2628, 2641), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (2634, 2641), True, 'import numpy as np\n'), ((4011, 4032), 'panda3d.core.LVector3f', 'LVector3f', (['(45)', '(-42)', '(1)'], {}), '(45, -42, 1)\n', (4020, 4032), False, 'from panda3d.core import LVector3f\n'), ((4063, 4088), 'panda3d.core.LVector3f', 'LVector3f', (['(45.0)', '(0.0)', '(0.0)'], {}), '(45.0, 0.0, 0.0)\n', (4072, 4088), False, 'from panda3d.core import LVector3f\n')] |
#!/usr/bin/env python
"""
_selfupdate_
Util command for updating the cirrus install itself
Supports getting a spefified branch or tag, or defaults to
looking up the latest release and using that instead.
"""
import sys
import argparse
import arrow
import os
import requests
import inspect
import contextlib
from cirrus.invoke_helpers import local
import cirrus
from cirrus.configuration import load_configuration
from cirrus.environment import cirrus_home, virtualenv_home, is_anaconda
from cirrus.github_tools import get_releases
from cirrus.git_tools import update_to_branch, update_to_tag
from cirrus.logger import get_logger
LOGGER = get_logger()
PYPI_JSON_URL = "https://pypi.python.org/pypi/cirrus-cli/json"
@contextlib.contextmanager
def chdir(dirname=None):
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
def build_parser(argslist):
"""
_build_parser_
Set up command line parser for the selfupdate command
"""
parser = argparse.ArgumentParser(
description=(
'git cirrus selfupdate command, '
'used to update cirrus itself'
)
)
parser.add_argument('command', nargs='?')
parser.add_argument(
'--version',
help='specify a tag to install',
required=False,
default=None,
)
parser.add_argument(
'--upgrade-setuptools',
help="upgrade setuptools in cirrus installation (needed for some conda installations)",
default=False,
action='store_true'
)
parser.add_argument(
'--branch',
help='specify a branch to use',
required=False,
default=None,
)
parser.add_argument(
'--legacy-repo',
help='Use the old, non pip update process',
required=False,
dest='legacy_repo',
action='store_true',
default=False,
)
opts = parser.parse_args(argslist)
return opts
def sort_by_date(d1):
"""
cmp function to sort by datetime string
that is second element of tuples in list
"""
return arrow.get(d1[1])
def latest_release(config):
"""
_latest_release_
pull list of releases from GH repo, pick the newest by
publication date.
"""
releases = get_releases(config.organisation_name(), config.package_name())
tags = [(release['tag_name'], release['published_at']) for release in releases]
sorted(tags, key=sort_by_date)
most_recent_tag = tags[0][0]
return most_recent_tag
def latest_pypi_release():
"""grab latest release from pypi"""
resp = requests.get(PYPI_JSON_URL)
resp.raise_for_status()
content = resp.json()
latest = content['info']['version']
return latest
def find_cirrus_install():
"""
_find_cirrus_install_
Use inspect to find root path of install so we
can cd there and run the cirrus updates in the right location
The install process will check out the cirrus repo, the cirrus
module will be in src/cirrus of that dir
"""
cirrus_mod = os.path.dirname(inspect.getsourcefile(cirrus))
src_dir = os.path.dirname(cirrus_mod)
cirrus_dir = os.path.dirname(src_dir)
return cirrus_dir
def setup_develop(config):
"""
_setup_develop_
run local python setup.py develop via fab
"""
LOGGER.info("running setup.py develop...")
local(
'git cirrus build --upgrade'
)
local(
' . ./{0}/bin/activate && python setup.py develop'.format(
config.venv_name()
)
)
return
def pip_install(version, update_setuptools=False):
"""pip install the version of cirrus requested"""
pip_req = 'cirrus-cli=={0}'.format(version)
venv_path = virtualenv_home()
venv_name = os.path.basename(venv_path)
LOGGER.info("running pip upgrade...")
if is_anaconda():
if update_setuptools:
local(
'source {0}/bin/activate {1} && pip install --upgrade setuptools'.format(
venv_path, venv_path
)
)
local(
'source {0}/bin/activate {1} && pip install --upgrade {2}'.format(
venv_path, venv_path, pip_req
)
)
else:
if update_setuptools:
local(
' . ./{0}/bin/activate && pip install --upgrade setuptools'.format(
venv_name
)
)
local(
' . ./{0}/bin/activate && pip install --upgrade {1}'.format(
venv_name, pip_req
)
)
def legacy_update(opts):
"""update repo installed cirrus"""
install = find_cirrus_install()
with chdir(install):
config = load_configuration()
if opts.branch and opts.version:
msg = "Can specify branch -OR- version, not both"
raise RuntimeError(msg)
if opts.branch is not None:
update_to_branch(opts.branch, config)
setup_develop(config)
return
if opts.version is not None:
tag = opts.version
else:
tag = latest_release(config)
LOGGER.info("Retrieved latest tag: {0}".format(tag))
update_to_tag(tag, config)
setup_develop(config)
def pip_update(opts):
"""update pip installed cirrus"""
install = cirrus_home()
with chdir(install):
if opts.version is not None:
tag = opts.version
LOGGER.info("tag specified: {0}".format(tag))
else:
# should probably be a pip call now...
tag = latest_pypi_release()
LOGGER.info("Retrieved latest tag: {0}".format(tag))
pip_install(tag, opts.upgrade_setuptools)
def main():
"""
_main_
parse command line opts and deduce wether to check out
a branch or tag, default behaviour is to look up latest
release on github and install that
"""
opts = build_parser(sys.argv)
if opts.legacy_repo:
legacy_update(opts)
else:
pip_update(opts)
return
if __name__ == '__main__':
main()
| [
"inspect.getsourcefile",
"cirrus.configuration.load_configuration",
"cirrus.environment.virtualenv_home",
"argparse.ArgumentParser",
"cirrus.git_tools.update_to_branch",
"requests.get",
"os.getcwd",
"cirrus.logger.get_logger",
"os.path.dirname",
"cirrus.environment.is_anaconda",
"arrow.get",
"... | [((644, 656), 'cirrus.logger.get_logger', 'get_logger', ([], {}), '()\n', (654, 656), False, 'from cirrus.logger import get_logger\n'), ((787, 798), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (796, 798), False, 'import os\n'), ((1060, 1163), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""git cirrus selfupdate command, used to update cirrus itself"""'}), "(description=\n 'git cirrus selfupdate command, used to update cirrus itself')\n", (1083, 1163), False, 'import argparse\n'), ((2157, 2173), 'arrow.get', 'arrow.get', (['d1[1]'], {}), '(d1[1])\n', (2166, 2173), False, 'import arrow\n'), ((2662, 2689), 'requests.get', 'requests.get', (['PYPI_JSON_URL'], {}), '(PYPI_JSON_URL)\n', (2674, 2689), False, 'import requests\n'), ((3183, 3210), 'os.path.dirname', 'os.path.dirname', (['cirrus_mod'], {}), '(cirrus_mod)\n', (3198, 3210), False, 'import os\n'), ((3228, 3252), 'os.path.dirname', 'os.path.dirname', (['src_dir'], {}), '(src_dir)\n', (3243, 3252), False, 'import os\n'), ((3439, 3474), 'cirrus.invoke_helpers.local', 'local', (['"""git cirrus build --upgrade"""'], {}), "('git cirrus build --upgrade')\n", (3444, 3474), False, 'from cirrus.invoke_helpers import local\n'), ((3797, 3814), 'cirrus.environment.virtualenv_home', 'virtualenv_home', ([], {}), '()\n', (3812, 3814), False, 'from cirrus.environment import cirrus_home, virtualenv_home, is_anaconda\n'), ((3831, 3858), 'os.path.basename', 'os.path.basename', (['venv_path'], {}), '(venv_path)\n', (3847, 3858), False, 'import os\n'), ((3909, 3922), 'cirrus.environment.is_anaconda', 'is_anaconda', ([], {}), '()\n', (3920, 3922), False, 'from cirrus.environment import cirrus_home, virtualenv_home, is_anaconda\n'), ((5428, 5441), 'cirrus.environment.cirrus_home', 'cirrus_home', ([], {}), '()\n', (5439, 5441), False, 'from cirrus.environment import cirrus_home, virtualenv_home, is_anaconda\n'), ((905, 921), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (913, 921), False, 'import os\n'), ((3138, 3167), 'inspect.getsourcefile', 'inspect.getsourcefile', (['cirrus'], {}), '(cirrus)\n', (3159, 3167), False, 'import inspect\n'), ((4797, 4817), 'cirrus.configuration.load_configuration', 'load_configuration', ([], {}), '()\n', (4815, 4817), False, 'from cirrus.configuration import load_configuration\n'), ((5295, 5321), 'cirrus.git_tools.update_to_tag', 'update_to_tag', (['tag', 'config'], {}), '(tag, config)\n', (5308, 5321), False, 'from cirrus.git_tools import update_to_branch, update_to_tag\n'), ((852, 869), 'os.chdir', 'os.chdir', (['dirname'], {}), '(dirname)\n', (860, 869), False, 'import os\n'), ((5007, 5044), 'cirrus.git_tools.update_to_branch', 'update_to_branch', (['opts.branch', 'config'], {}), '(opts.branch, config)\n', (5023, 5044), False, 'from cirrus.git_tools import update_to_branch, update_to_tag\n')] |
from base_bot import log
def atoi(text):
return int(text) if text.isdigit() else text
def bool_to_emoticon(value):
return value and "✅" or "❌"
# https://stackoverflow.com/questions/7204805/how-to-merge-dictionaries-of-dictionaries
# merges b into a
def merge(a, b, path=None):
if path is None: path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
else:
a[key] = b[key]
return a
def flatten(*args):
lst = []
for arg in args:
if type(arg) == str and arg != '':
lst.append(arg)
elif type(arg) == list:
lst.extend(arg)
return lst
async def pluralize_author(author):
if author[-1] == 's':
author += "'"
else:
author += "'s"
return author
def chunks(iterable, chunk_size):
for i in range(0, len(iterable), chunk_size):
yield iterable[i:i + chunk_size]
def debug(message):
guild = '-'
if message.guild:
guild = message.guild.name
log.debug(f'[{guild}][{message.channel}][{message.author.display_name}] {message.content}')
def convert_color_array(data_object):
return [c.replace('Color', '').lower() for c, v in data_object['ManaColors'].items() if v]
| [
"base_bot.log.debug"
] | [((1108, 1209), 'base_bot.log.debug', 'log.debug', (['f"""[{guild}][{message.channel}][{message.author.display_name}] {message.content}"""'], {}), "(\n f'[{guild}][{message.channel}][{message.author.display_name}] {message.content}'\n )\n", (1117, 1209), False, 'from base_bot import log\n')] |
from jsgf import parse_grammar_string
def main(args):
# Parse input grammar file.
with open(args.input_file_path, "r") as fp:
text = fp.read()
print("\ninput grammar: ")
print(text)
grammar = parse_grammar_string(text)
# Print it.
print("\noutput grammar: ")
text = grammar.compile()
print(text)
with open(args.output_file_path, "w") as fp:
fp.write(text)
if __name__ == '__main__':
import argparse
"""Prepares arguments for the demo"""
# Usage: python jsgf_tags.py test.jsgf out.jsgf
parser = argparse.ArgumentParser(description='expand JSGF grammar tags.')
parser.add_argument('input_file_path', type=str, help='Input JSGF grammar file path.')
parser.add_argument('output_file_path', type=str, help='Output file path.')
args = parser.parse_args()
main(args)
| [
"jsgf.parse_grammar_string",
"argparse.ArgumentParser"
] | [((588, 652), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""expand JSGF grammar tags."""'}), "(description='expand JSGF grammar tags.')\n", (611, 652), False, 'import argparse\n'), ((233, 259), 'jsgf.parse_grammar_string', 'parse_grammar_string', (['text'], {}), '(text)\n', (253, 259), False, 'from jsgf import parse_grammar_string\n')] |