code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
import os import json import subprocess import platform import base64 import glob from .__shared import CACHE_DIR, SCRIPT_DIR, concourse_context STARTER_DIR = "starter" PYTHON_DIR = "pythonpath" class Task: def __init__(self, fun, jobname, secret_manager, image_resource, script, inputs=[], timeout="5m", privileged=False, outputs=[], secrets={}, attempts=1, caches=[], name=None, env={}): if not name: name = fun.__name__.replace("_", "-") self.name = name self.timeout = timeout self.privileged = privileged self.attempts = attempts self.caches = caches self.secret_manager = secret_manager self.config = { "platform": "linux", "image_resource": image_resource, "outputs": [{"name": CACHE_DIR}] + list(map(lambda x: {"name": x}, outputs)), "inputs": [{"name": CACHE_DIR}, {"name": SCRIPT_DIR}] + list(map(lambda x: {"name": x}, inputs)), "caches": [{"path": cache} for cache in self.caches], "params": { **dict(map(lambda kv: (str(kv[1]), "(({}))".format(str(kv[1]))), secrets.items())), **{ "PYTHONPATH": f"{SCRIPT_DIR}/{PYTHON_DIR}:{SCRIPT_DIR}/{STARTER_DIR}:/usr/local/lib/python/garden-tools", "REQUESTS_CA_BUNDLE": "/etc/ssl/certs/ca-certificates.crt", }, **env, }, "run": { "path": "/usr/bin/python3", "args": [os.path.join(SCRIPT_DIR, STARTER_DIR, os.path.basename(script)), "--job", jobname, "--task", name, "--concourse"], }, } cache_file = os.path.join(CACHE_DIR, jobname, name + ".json") def fn(): print(f"Running: {name}") kwargs = {} for kv in secrets.items(): kwargs[kv[0]] = self.secret_manager(str(kv[1])) if not kwargs[kv[0]] and not isinstance(kv[1], OptionalSecret): raise Exception(f'Secret not available as environment variable "{kv[1]}"') for out in outputs: dir = out if not concourse_context(): dir = os.path.join("/tmp", "outputs", jobname, out) else: dir = os.path.abspath(out) os.makedirs(dir, exist_ok=True) kwargs[out] = dir result = fun(**kwargs) os.makedirs(os.path.dirname(cache_file), exist_ok=True) with open(cache_file, "w") as fd: json.dump(result, fd) return result def fn_cached(): try: with open(cache_file, "r") as fd: return json.load(fd) except FileNotFoundError: try: return fn() except Exception as exc: raise exc from None self.fn = fn self.fn_cached = fn # fn_cached def concourse(self): concourse = { "task": self.name.replace("_", "-"), "timeout": self.timeout, "privileged": self.privileged, "config": self.config, } if self.attempts != 1: # explicitly setting attempts to 1 add clutter to the concourse UI concourse["attempts"] = self.attempts return concourse class InitTask: def __init__(self, init_dirs, image_resource): self.init_dirs = init_dirs self.image_resource = image_resource def package(self): tar = "gtar" if platform.system() == "Darwin" else "tar" files = [] transform = [] init_dirs = sorted(list(self.init_dirs.items()), key=lambda d: len(d[1]), reverse=True) for dir_concourse, dir_local in init_dirs: dir_local = os.path.abspath(dir_local) transform.append(f"--transform 's|{dir_local}|{dir_concourse}|g'") files = files + list(glob.glob(os.path.join(dir_local, "**", "*.[ps][yh]"), recursive=True)) cmd = f'{tar} cj --sort=name --mtime="UTC 2019-01-01" {" ".join(transform)} --owner=root:0 --group=root:0 -b 1 -P -f - {" ".join(files)}' return base64.b64encode(subprocess.check_output(cmd, shell=True)).decode("utf-8") def concourse(self): return { "task": "init", "config": { "platform": "linux", "image_resource": self.image_resource, "outputs": [{"name": CACHE_DIR}, {"name": SCRIPT_DIR}], "run": { "path": "/bin/bash", "args": [ "-ceu", f'echo "{self.package()}" | base64 -d | tar -C {SCRIPT_DIR} -xvjf -', ], }, }, } class OptionalSecret: def __init__(self, name): self.name = name def __str__(self): return self.name
[ "json.dump", "os.path.abspath", "json.load", "os.makedirs", "os.path.basename", "os.path.dirname", "subprocess.check_output", "platform.system", "os.path.join" ]
[((1696, 1744), 'os.path.join', 'os.path.join', (['CACHE_DIR', 'jobname', "(name + '.json')"], {}), "(CACHE_DIR, jobname, name + '.json')\n", (1708, 1744), False, 'import os\n'), ((3869, 3895), 'os.path.abspath', 'os.path.abspath', (['dir_local'], {}), '(dir_local)\n', (3884, 3895), False, 'import os\n'), ((2363, 2394), 'os.makedirs', 'os.makedirs', (['dir'], {'exist_ok': '(True)'}), '(dir, exist_ok=True)\n', (2374, 2394), False, 'import os\n'), ((2488, 2515), 'os.path.dirname', 'os.path.dirname', (['cache_file'], {}), '(cache_file)\n', (2503, 2515), False, 'import os\n'), ((2594, 2615), 'json.dump', 'json.dump', (['result', 'fd'], {}), '(result, fd)\n', (2603, 2615), False, 'import json\n'), ((3615, 3632), 'platform.system', 'platform.system', ([], {}), '()\n', (3630, 3632), False, 'import platform\n'), ((2232, 2277), 'os.path.join', 'os.path.join', (['"""/tmp"""', '"""outputs"""', 'jobname', 'out'], {}), "('/tmp', 'outputs', jobname, out)\n", (2244, 2277), False, 'import os\n'), ((2326, 2346), 'os.path.abspath', 'os.path.abspath', (['out'], {}), '(out)\n', (2341, 2346), False, 'import os\n'), ((2762, 2775), 'json.load', 'json.load', (['fd'], {}), '(fd)\n', (2771, 2775), False, 'import json\n'), ((4258, 4298), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (4281, 4298), False, 'import subprocess\n'), ((1573, 1597), 'os.path.basename', 'os.path.basename', (['script'], {}), '(script)\n', (1589, 1597), False, 'import os\n'), ((4018, 4061), 'os.path.join', 'os.path.join', (['dir_local', '"""**"""', '"""*.[ps][yh]"""'], {}), "(dir_local, '**', '*.[ps][yh]')\n", (4030, 4061), False, 'import os\n')]
from pydriller import Repository for commit in Repository('https://github.com/williamsartijose/Trabalho-Cadastro-de-Aluno.git').traverse_commits(): print(commit.hash) print(commit.msg) print(commit.author.name) print("\n") for file in commit.modified_files: print(file.filename, ' has changed') print("\n\n")
[ "pydriller.Repository" ]
[((50, 135), 'pydriller.Repository', 'Repository', (['"""https://github.com/williamsartijose/Trabalho-Cadastro-de-Aluno.git"""'], {}), "('https://github.com/williamsartijose/Trabalho-Cadastro-de-Aluno.git'\n )\n", (60, 135), False, 'from pydriller import Repository\n')]
#!/usr/bin/python import rospy from std_msgs.msg import Float32, Bool from sensor_msgs.msg import Joy from geometry_msgs.msg import Vector3 from geometry_msgs.msg import Twist from math import pi class SpotMicroJoystickControl(): BUTTON_IDLE = 0 BUTTON_WALK = 1 BUTTON_STAND = 2 BUTTON_ANGLE = 3 ANGLE_AXES_ROLL = 0 ANGLE_AXES_HEIGHT = 1 ANGLE_AXES_YAW = 2 ANGLE_AXES_PITCH = 3 WALK_AXES_FORWARD = 1 WALK_AXES_STRAFE = 0 WALK_AXES_YAW = 2 MODE_IDLE = 0 MODE_STAND = 1 MODE_ANGLE = 2 MODE_WALK = 3 MAX_ROLL_DEG = 45 MAX_YAW_DEG = 45 MAX_PATCH_DEG = 45 MAX_FORWARD_SPEED = 0.05 MAX_STRAFE_SPEED = 0.05 MAX_YAW_SPEED_DEG = 15 def __init__(self): self._angle_cmd_msg = Vector3() self._angle_cmd_msg.x = 0 self._angle_cmd_msg.y = 0 self._angle_cmd_msg.z = 0 self._vel_cmd_msg = Twist() self._vel_cmd_msg.linear.x = 0 self._vel_cmd_msg.linear.y = 0 self._vel_cmd_msg.linear.z = 0 self._vel_cmd_msg.angular.x = 0 self._vel_cmd_msg.angular.y = 0 self._vel_cmd_msg.angular.z = 0 self._walk_event_cmd_msg = Bool() self._walk_event_cmd_msg.data = True # Mostly acts as an event driven action on receipt of a true message self._stand_event_cmd_msg = Bool() self._stand_event_cmd_msg.data = True self._idle_event_cmd_msg = Bool() self._idle_event_cmd_msg.data = True rospy.loginfo("Setting Up the Spot Micro Joystick Control Node...") # Set up and title the ros node for this code rospy.init_node('spot_micro_joystick_control') # Create publishers for commanding velocity, angle, and robot states self._ros_pub_angle_cmd = rospy.Publisher('/angle_cmd', Vector3, queue_size=1) self._ros_pub_vel_cmd = rospy.Publisher('/cmd_vel', Twist, queue_size=1) self._ros_pub_walk_cmd = rospy.Publisher('/walk_cmd', Bool, queue_size=1) self._ros_pub_stand_cmd = rospy.Publisher('/stand_cmd', Bool, queue_size=1) self._ros_pub_idle_cmd = rospy.Publisher('/idle_cmd', Bool, queue_size=1) rospy.loginfo("Joystick control node publishers corrrectly initialized") def reset_all_motion_commands_to_zero(self): '''Reset body motion cmd states to zero and publish zero value body motion commands''' self._vel_cmd_msg.linear.x = 0 self._vel_cmd_msg.linear.y = 0 self._vel_cmd_msg.linear.z = 0 self._vel_cmd_msg.angular.x = 0 self._vel_cmd_msg.angular.y = 0 self._vel_cmd_msg.angular.z = 0 self._ros_pub_vel_cmd.publish(self._vel_cmd_msg) def reset_all_angle_commands_to_zero(self): '''Reset angle cmd states to zero and publish them''' self._angle_cmd_msg.x = 0 self._angle_cmd_msg.y = 0 self._angle_cmd_msg.z = 0 self._ros_pub_angle_cmd.publish(self._angle_cmd_msg) def on_joy(self, msg): self.on_joy_buttons(msg.buttons) self.on_joy_axes(msg.axes) def on_joy_buttons(self, buttons): if buttons[self.BUTTON_IDLE] == 1: self._ros_pub_idle_cmd.publish(self._idle_event_cmd_msg) rospy.loginfo('Idle command issued from joystick.') self.mode = self.MODE_IDLE elif buttons[self.BUTTON_STAND] == 1: self._ros_pub_stand_cmd.publish(self._stand_event_cmd_msg) rospy.loginfo('Stand command issued from joystick.') self.mode = self.MODE_STAND elif buttons[self.BUTTON_ANGLE] == 1: self.reset_all_angle_commands_to_zero() rospy.loginfo('Entering joystick angle command mode.') self.mode = self.MODE_ANGLE elif buttons[self.BUTTON_WALK] == 1: self.reset_all_angle_commands_to_zero() self._ros_pub_walk_cmd.publish(self._walk_event_cmd_msg) rospy.loginfo('Entering joystick walk command mode.') self.mode = self.MODE_WALK def on_joy_axes(self, axes): if self.mode == self.MODE_ANGLE: self.on_joy_angle_mode(axes) elif self.mode == self.MODE_WALK: self.on_joy_walk_mode(axes) def on_joy_walk_mode(self, axes): self._vel_cmd_msg.linear.x = axes[self.WALK_AXES_FORWARD] * self.MAX_FORWARD_SPEED self._vel_cmd_msg.linear.y = axes[self.WALK_AXES_STRAFE] * self.MAX_STRAFE_SPEED self._vel_cmd_msg.angular.z = pi / 180 * axes[self.WALK_AXES_YAW] * self.MAX_YAW_SPEED_DEG print('Cmd Values: x speed: %1.3f m/s, y speed: %1.3f m/s, yaw rate: %1.3f deg/s ' \ % (self._vel_cmd_msg.linear.x, self._vel_cmd_msg.linear.y, self._vel_cmd_msg.angular.z * 180 / pi)) self._ros_pub_vel_cmd.publish(self._vel_cmd_msg) def on_joy_angle_mode(self, axes): self._angle_cmd_msg.x = pi / 180 * axes[self.ANGLE_AXES_ROLL] * self.MAX_ROLL_DEG * -1 self._angle_cmd_msg.y = pi / 180 * axes[self.ANGLE_AXES_PITCH] * self.MAX_PATCH_DEG * -1 self._angle_cmd_msg.z = pi / 180 * axes[self.ANGLE_AXES_YAW] * self.MAX_YAW_DEG print('Cmd Values: phi: %1.3f deg, theta: %1.3f deg, psi: %1.3f deg ' \ % ( self._angle_cmd_msg.x * 180 / pi, self._angle_cmd_msg.y * 180 / pi, self._angle_cmd_msg.z * 180 / pi)) self._ros_pub_angle_cmd.publish(self._angle_cmd_msg) def run(self): print("green = idle") print("yellow = stand") print("blue = angle") print("red = walk") # Publish all body motion commands to 0 self.reset_all_motion_commands_to_zero() rospy.Subscriber("/joy", Joy, self.on_joy) rospy.spin() if __name__ == "__main__": smjc = SpotMicroJoystickControl() smjc.run()
[ "geometry_msgs.msg.Vector3", "rospy.Subscriber", "rospy.Publisher", "geometry_msgs.msg.Twist", "rospy.loginfo", "rospy.init_node", "rospy.spin", "std_msgs.msg.Bool" ]
[((771, 780), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (778, 780), False, 'from geometry_msgs.msg import Vector3\n'), ((912, 919), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (917, 919), False, 'from geometry_msgs.msg import Twist\n'), ((1193, 1199), 'std_msgs.msg.Bool', 'Bool', ([], {}), '()\n', (1197, 1199), False, 'from std_msgs.msg import Float32, Bool\n'), ((1352, 1358), 'std_msgs.msg.Bool', 'Bool', ([], {}), '()\n', (1356, 1358), False, 'from std_msgs.msg import Float32, Bool\n'), ((1441, 1447), 'std_msgs.msg.Bool', 'Bool', ([], {}), '()\n', (1445, 1447), False, 'from std_msgs.msg import Float32, Bool\n'), ((1502, 1569), 'rospy.loginfo', 'rospy.loginfo', (['"""Setting Up the Spot Micro Joystick Control Node..."""'], {}), "('Setting Up the Spot Micro Joystick Control Node...')\n", (1515, 1569), False, 'import rospy\n'), ((1633, 1679), 'rospy.init_node', 'rospy.init_node', (['"""spot_micro_joystick_control"""'], {}), "('spot_micro_joystick_control')\n", (1648, 1679), False, 'import rospy\n'), ((1792, 1844), 'rospy.Publisher', 'rospy.Publisher', (['"""/angle_cmd"""', 'Vector3'], {'queue_size': '(1)'}), "('/angle_cmd', Vector3, queue_size=1)\n", (1807, 1844), False, 'import rospy\n'), ((1877, 1925), 'rospy.Publisher', 'rospy.Publisher', (['"""/cmd_vel"""', 'Twist'], {'queue_size': '(1)'}), "('/cmd_vel', Twist, queue_size=1)\n", (1892, 1925), False, 'import rospy\n'), ((1959, 2007), 'rospy.Publisher', 'rospy.Publisher', (['"""/walk_cmd"""', 'Bool'], {'queue_size': '(1)'}), "('/walk_cmd', Bool, queue_size=1)\n", (1974, 2007), False, 'import rospy\n'), ((2042, 2091), 'rospy.Publisher', 'rospy.Publisher', (['"""/stand_cmd"""', 'Bool'], {'queue_size': '(1)'}), "('/stand_cmd', Bool, queue_size=1)\n", (2057, 2091), False, 'import rospy\n'), ((2125, 2173), 'rospy.Publisher', 'rospy.Publisher', (['"""/idle_cmd"""', 'Bool'], {'queue_size': '(1)'}), "('/idle_cmd', Bool, queue_size=1)\n", (2140, 2173), False, 'import rospy\n'), ((2183, 2255), 'rospy.loginfo', 'rospy.loginfo', (['"""Joystick control node publishers corrrectly initialized"""'], {}), "('Joystick control node publishers corrrectly initialized')\n", (2196, 2255), False, 'import rospy\n'), ((5674, 5716), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/joy"""', 'Joy', 'self.on_joy'], {}), "('/joy', Joy, self.on_joy)\n", (5690, 5716), False, 'import rospy\n'), ((5725, 5737), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (5735, 5737), False, 'import rospy\n'), ((3241, 3292), 'rospy.loginfo', 'rospy.loginfo', (['"""Idle command issued from joystick."""'], {}), "('Idle command issued from joystick.')\n", (3254, 3292), False, 'import rospy\n'), ((3461, 3513), 'rospy.loginfo', 'rospy.loginfo', (['"""Stand command issued from joystick."""'], {}), "('Stand command issued from joystick.')\n", (3474, 3513), False, 'import rospy\n'), ((3664, 3718), 'rospy.loginfo', 'rospy.loginfo', (['"""Entering joystick angle command mode."""'], {}), "('Entering joystick angle command mode.')\n", (3677, 3718), False, 'import rospy\n'), ((3937, 3990), 'rospy.loginfo', 'rospy.loginfo', (['"""Entering joystick walk command mode."""'], {}), "('Entering joystick walk command mode.')\n", (3950, 3990), False, 'import rospy\n')]
import FWCore.ParameterSet.Config as cms options = cms.untracked.PSet( FailPath = cms.untracked.vstring(), IgnoreCompletely = cms.untracked.vstring(), Rethrow = cms.untracked.vstring(), SkipEvent = cms.untracked.vstring(), allowUnscheduled = cms.obsolete.untracked.bool, canDeleteEarly = cms.untracked.vstring( ), emptyRunLumiMode = cms.obsolete.untracked.string, eventSetup = cms.untracked.PSet( forceNumberOfConcurrentIOVs = cms.untracked.PSet( ), numberOfConcurrentIOVs = cms.untracked.uint32(1) ), fileMode = cms.untracked.string('FULLMERGE'), forceEventSetupCacheClearOnNewRun = cms.untracked.bool(False), makeTriggerResults = cms.obsolete.untracked.bool, numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1), numberOfConcurrentRuns = cms.untracked.uint32(1), numberOfStreams = cms.untracked.uint32(0), numberOfThreads = cms.untracked.uint32(4), printDependencies = cms.untracked.bool(False), sizeOfStackForThreadsInKB = cms.optional.untracked.uint32, throwIfIllegalParameter = cms.untracked.bool(True), wantSummary = cms.untracked.bool(True) )
[ "FWCore.ParameterSet.Config.untracked.vstring", "FWCore.ParameterSet.Config.untracked.string", "FWCore.ParameterSet.Config.untracked.bool", "FWCore.ParameterSet.Config.untracked.PSet", "FWCore.ParameterSet.Config.untracked.uint32" ]
[((87, 110), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', ([], {}), '()\n', (108, 110), True, 'import FWCore.ParameterSet.Config as cms\n'), ((135, 158), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', ([], {}), '()\n', (156, 158), True, 'import FWCore.ParameterSet.Config as cms\n'), ((174, 197), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', ([], {}), '()\n', (195, 197), True, 'import FWCore.ParameterSet.Config as cms\n'), ((215, 238), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', ([], {}), '()\n', (236, 238), True, 'import FWCore.ParameterSet.Config as cms\n'), ((313, 336), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', ([], {}), '()\n', (334, 336), True, 'import FWCore.ParameterSet.Config as cms\n'), ((582, 615), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""FULLMERGE"""'], {}), "('FULLMERGE')\n", (602, 615), True, 'import FWCore.ParameterSet.Config as cms\n'), ((657, 682), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (675, 682), True, 'import FWCore.ParameterSet.Config as cms\n'), ((779, 802), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(1)'], {}), '(1)\n', (799, 802), True, 'import FWCore.ParameterSet.Config as cms\n'), ((833, 856), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(1)'], {}), '(1)\n', (853, 856), True, 'import FWCore.ParameterSet.Config as cms\n'), ((880, 903), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(0)'], {}), '(0)\n', (900, 903), True, 'import FWCore.ParameterSet.Config as cms\n'), ((927, 950), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(4)'], {}), '(4)\n', (947, 950), True, 'import FWCore.ParameterSet.Config as cms\n'), ((976, 1001), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (994, 1001), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1096, 1120), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (1114, 1120), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1140, 1164), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (1158, 1164), True, 'import FWCore.ParameterSet.Config as cms\n'), ((472, 492), 'FWCore.ParameterSet.Config.untracked.PSet', 'cms.untracked.PSet', ([], {}), '()\n', (490, 492), True, 'import FWCore.ParameterSet.Config as cms\n'), ((536, 559), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(1)'], {}), '(1)\n', (556, 559), True, 'import FWCore.ParameterSet.Config as cms\n')]
import array import random import numpy as np from deap import algorithms from deap import base from deap import creator from deap import tools # パラメータ定義テーブル(Ax仕様) PARAMETERS = [ { "name": "x1", "type": "range", "bounds": [-10.0, 10.0], "value_type": "float", }, { "name": "x2", "type": "range", "bounds": [-10.0, 10.0], "value_type": "float", }, ] LOCUS = np.array([128, 64, 32, 16, 8, 4, 2, 1]) # 数値変換テーブル NLOCUS = len(LOCUS) # 数値変換テーブルのビット数 NPARAM = len(PARAMETERS) # パラメータ数 NBIT = NLOCUS*NPARAM # ビット数 NGEN = 40 # 世代数 NPOP = 300 # 集団の個体数 CXPB = 0.5 # 交叉率 MUTPB = 0.2 # 突然変異率(個体) INDPB = 0.05 # 突然変異率(ビット) # Optimizerクラス class Optimizer(): def __init__(self, cb_step=None, cb_end=None): self.cb_step = cb_step # ステップ・コールバック self.cb_end = cb_end # 終了コールバック # 最小化 creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) creator.create("Individual", array.array, typecode='b', fitness=creator.FitnessMin) toolbox = base.Toolbox() # 遺伝子タイプ # Attribute generator toolbox.register("attr_bool", random.randint, 0, 1) # ビット # 初期化 # Structure initializers # 個体 toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, NBIT) # 集団 toolbox.register("population", tools.initRepeat, list, toolbox.individual) # 評価関数 toolbox.register("evaluate", self.evaluate) # 交叉 toolbox.register("mate", tools.cxTwoPoint) # 2点交叉 # 突然変異 toolbox.register("mutate", tools.mutFlipBit, indpb=INDPB) # フリップビット # 個体選択 toolbox.register("select", tools.selTournament, tournsize=3) # トーナメント # toolbox.register("select", tools.selRoulette) # ルーレット # toolbox.register("select", tools.selBest) # ランキング self.toolbox = toolbox # 数値変換 def b2n(self, l): # l: 1パラメータ分のビット列 return sum(l*LOCUS) # レンジ変換 def scale(self, a, fromBound, toBound, t): # a: 変換元数値 # fromBound, toBount: 変換前後のレンジ # t: キャスト型 (min1, max1) = fromBound (min2, max2) = toBound ret = a/(max1-min1)*(max2-min2)+min2 ret = t(ret) ret = max(min2, ret) ret = min(max2, ret) return ret # 遺伝子to実数リスト変換 def getX(self, individual): # individual: 1個体分のビット列 # 最適化対象パラメータの値を配列にセットする ls = np.array(individual).reshape([NPARAM, NLOCUS]) ret = [] for i, l in enumerate(ls): # Ax仕様のパラメータ定義テーブル p = PARAMETERS[i] type = p['type'] if type == 'range': # タイプ=レンジの場合 bounds = p['bounds'] # レンジ value_type = p['value_type'] # 型 # 型変換 t = eval(value_type) xmin = t(bounds[0]) xmax = t(bounds[1]) # 実数・レンジ変換 x = self.scale(self.b2n(l), (0, sum(LOCUS)), (xmin, xmax), t) elif type == 'choice': # タイプ=択一の場合 values = p['values'] # 選択肢 bounds = [values[0], values[-1]] # レンジ value_type = p['value_type'] # 型 # 型変換 t = eval(value_type) xmin = t(bounds[0]) xmax = t(bounds[1]) # 実数・レンジ変換 x = self.scale(self.b2n(l), (0, sum(LOCUS)), (xmin, xmax), t) # 選択肢に振り分け n = len(values) for j in range(n): a = xmin + (xmax - xmin)/n * j b = xmin + (xmax - xmin)/n * (j+1) if x >= a and x < b: x = values[j] break elif type == 'fixed': # タイプ=固定の場合 value = p['value'] # 固定値 x = value else: raise ValueError("unknown parameter type", type) ret.append(x) return np.array(ret) # count = 0 # 評価関数 def evaluate(self, individual): # self.count += 1 # print(self.count) # 最適化対象パラメータの値を配列にセットする x = self.getX(individual) score = 0 if self.cb_step: # パラメータ通知コールバック # クライアントへ今回のパラメータを通知し、評価値の受信を待ち受ける score = self.cb_step(x) else: # スクリプト単体で動作させる場合 x1 = x[0] x2 = x[1] # Booth Function score = (x1 + 2*x2 - 7)**2 + (2*x1 + x2 - 5)**2 return score, # 最適化関数 def optimize(self, ngen=NGEN): # random.seed(64) pop = self.toolbox.population(n=NPOP) # エリート保存個体 hof = tools.HallOfFame(1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) pop, log = algorithms.eaSimple(pop, self.toolbox, cxpb=CXPB, mutpb=MUTPB, ngen=ngen, stats=stats, halloffame=hof, verbose=True) # ベストパラメータ=最適解に最も近づいた値 best_ind = hof.items[0] print(best_ind) print(best_ind.fitness.values) best_parameters = self.getX(best_ind) print(best_parameters) if self.cb_end: # 終了コールバック:Unityアプリへベストパラメータを通知する self.cb_end(best_parameters) # return pop, log, hof if __name__ == "__main__": obj = Optimizer() obj.optimize()
[ "deap.base.Toolbox", "deap.tools.Statistics", "deap.creator.create", "numpy.array", "deap.algorithms.eaSimple", "deap.tools.HallOfFame" ]
[((441, 480), 'numpy.array', 'np.array', (['[128, 64, 32, 16, 8, 4, 2, 1]'], {}), '([128, 64, 32, 16, 8, 4, 2, 1])\n', (449, 480), True, 'import numpy as np\n'), ((900, 959), 'deap.creator.create', 'creator.create', (['"""FitnessMin"""', 'base.Fitness'], {'weights': '(-1.0,)'}), "('FitnessMin', base.Fitness, weights=(-1.0,))\n", (914, 959), False, 'from deap import creator\n'), ((968, 1056), 'deap.creator.create', 'creator.create', (['"""Individual"""', 'array.array'], {'typecode': '"""b"""', 'fitness': 'creator.FitnessMin'}), "('Individual', array.array, typecode='b', fitness=creator.\n FitnessMin)\n", (982, 1056), False, 'from deap import creator\n'), ((1071, 1085), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (1083, 1085), False, 'from deap import base\n'), ((4108, 4121), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (4116, 4121), True, 'import numpy as np\n'), ((4824, 4843), 'deap.tools.HallOfFame', 'tools.HallOfFame', (['(1)'], {}), '(1)\n', (4840, 4843), False, 'from deap import tools\n'), ((4860, 4908), 'deap.tools.Statistics', 'tools.Statistics', (['(lambda ind: ind.fitness.values)'], {}), '(lambda ind: ind.fitness.values)\n', (4876, 4908), False, 'from deap import tools\n'), ((5082, 5202), 'deap.algorithms.eaSimple', 'algorithms.eaSimple', (['pop', 'self.toolbox'], {'cxpb': 'CXPB', 'mutpb': 'MUTPB', 'ngen': 'ngen', 'stats': 'stats', 'halloffame': 'hof', 'verbose': '(True)'}), '(pop, self.toolbox, cxpb=CXPB, mutpb=MUTPB, ngen=ngen,\n stats=stats, halloffame=hof, verbose=True)\n', (5101, 5202), False, 'from deap import algorithms\n'), ((2506, 2526), 'numpy.array', 'np.array', (['individual'], {}), '(individual)\n', (2514, 2526), True, 'import numpy as np\n')]
"""A setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject """ # Prefer setuptools over distutils from setuptools import setup from setuptools import find_packages setup( name="oiio", version="PACKAGE_VERSION", description="OpenImageIO Python package", url="https://github.com/Correct-Syntax/oiio-python", # https://pypi.org/classifiers/ classifiers=[ "Programming Language :: Python :: 3.8", "License :: OSI Approved :: MIT License", ], packages=find_packages(exclude=[]), package_data={ # If any package (!) contains ... files, include them: "": [ "*.pyd", "*.dll", "*.so", ] }, )
[ "setuptools.find_packages" ]
[((575, 600), 'setuptools.find_packages', 'find_packages', ([], {'exclude': '[]'}), '(exclude=[])\n', (588, 600), False, 'from setuptools import find_packages\n')]
from .decorators import ( onlineChain, ) import click from peerplays.asset import Asset from peerplays.exceptions import AssetDoesNotExistsException from prettytable import PrettyTable from .main import main @main.command() @click.pass_context @onlineChain def assets(ctx): "List Assets" MAX_ASSET = 100000 assets = [] for i in range(0, MAX_ASSET): try: assets.append(Asset("1.3.{}".format(i))) except AssetDoesNotExistsException: break assetTable = PrettyTable() assetTable.field_names = ["ID", "Symbol", "Precision", "Description", "Max Supply"] for i in range (0, len(assets)): try: description = assets[i].description if description == "": description = "--" except AttributeError: description = "--" assetTable.add_row([assets[i].id, assets[i].symbol, assets[i].precision, description, assets[i].max_supply["amount"]]) click.echo(assetTable)
[ "click.echo", "prettytable.PrettyTable" ]
[((526, 539), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (537, 539), False, 'from prettytable import PrettyTable\n'), ((991, 1013), 'click.echo', 'click.echo', (['assetTable'], {}), '(assetTable)\n', (1001, 1013), False, 'import click\n')]
""" Module to process and analyse rheology data containing stress ramps Created: March 24th, 2020 Author: <NAME> """ import pandas as pd import numpy as np import matplotlib.pyplot as plt class Rstressramp(): """ Class with the functions relevant to stress ramps Main focus on extracting data from .csv files, computing K' and data visualization """ def readcsv_full2ramp(filename, export = True, file_export = None, action_name = 'viscometry ramp', variables = ['sample des', 'stress', 'strain (sample)'], sep = ',', dec = '.'): """ Function to select the desired data from raw .csv files. TO DO: combine this and Rtimedep.readcsv_full2time to a general function INPUT filename : string, file to read export : if True, the selected data is exported to a .csv file file_export : string, name of the file where the data will be exported. if None, then attaches the suffix '_clean_stress_ramp' to the file name. action_name : string, name of the dataset where the ramp data is variables : list of strings, desired variables to be extracted. The name can be a partial match of the column name, and is case insensitive. If more than one column matches a given variable name, all the corresponding columns are included. sep : string, character used as a delimiter in the .csv file dec : string, character used as a decimal separator in the .csv file OUTPUT select_data : data frame with the selected data. Only returns the value if export = False. When export = True, the function only exports the data without returning any values. """ # Import the file as a data frame data_input = pd.read_csv(filename, sep = sep, decimal = dec) print('\n Successfully imported the file: ') print(filename) # Because there is more than one action in the file, # select only the data for the stress ramp # TO DO: make this selection optional for the user data_frame = Rstressramp.splitaction_ramp(data_input, action_name = action_name) # Find the columns that match the desired variable names # and select the data within. columns = [] for ivar in variables: print('\n Variable to search:', ivar) column_names = [x for x in data_frame.columns if ivar in x.lower()] print('Variables found:', column_names) columns.extend(column_names) select_data = data_frame[columns] # Export the data to the file specified in file_export or # return the data frame if export == False. if export == True: if file_export == None: file_export = filename.replace('.csv','_clean_stress_ramp.csv') select_data.to_csv(file_export, index=False, sep = sep, decimal = dec) print('\n Selected data exported to:', file_export) else: return select_data def splitaction_ramp(data_frame, action_header = 'Action Name', action_name = 'viscometry ramp'): """ Function to extract the stress ramp data from a file with multiple types of measurement INPUT data_frame : pandas data frame with the full data action_header : string with the name of the column containing the type of measurement, or action, action_name : string with the name of the measurement, or action. It accepts a partial match, and is case insensitive. OUTPUT select_data : pandas data frame containing only the stress ram[] data. """ print('\n Splitting data by action name: ', action_name) # Gets all the actions within the data frame iaction = [x for x in data_frame[action_header].unique() if action_name in x.lower()] print(iaction) data_frame.set_index(action_header, inplace = True) # Find the location of the desired action, and save to a data frame # If the action name is not found, it prints an error message try: select_data = data_frame.loc[iaction] select_data.reset_index(inplace = True) except IndexError: print('ERROR: Action name not found') select_data = None return select_data def compute_k(stress, strain, show = None, remove_neg = True): """ Function to compute the differential storage modulus from the slope of the stress vs strain curve. INPUT stress : numpy array or list, Shear Stress (in Pa) data strain : numpy array or list, Shear Strain (in %) data show : 'stress', 'strain', 'both', or None. Plots the result remove_neg : if True, removes data where strain is negative OUTPUT stress : numpy array, mean value of the stress (in Pa) where k is computed strain : numpy array, mean value of strain (in %) where k is computed k : numpy array, differential storage modulus, (in Pa) """ # Work with numpy arrays stress = np.array(stress) strain = np.array(strain) # Start by cleaning the data from any NaN value ind_nan = np.isnan(strain) | np.isnan(stress) stress = stress[~ind_nan] strain = strain[~ind_nan] # Clean the data from values after rupture, strain must be # less than 5000% ind_nonrupture = np.where(strain < 5e3)[0] stress = stress[ind_nonrupture] strain = strain[ind_nonrupture] # Remove data where strain is negative. Note that if recording # the absolute strain of the sample, strain can be negative # in the initial interval. This data is tipically not useful # and therefore not desired. if remove_neg == True: ind_positive = np.where(strain >= 0) stress = stress[ind_positive] strain = strain[ind_positive] # Compute the differential values of strain and stress diff_stress = stress[1:] - stress[:-1] diff_strain = strain[1:] - strain[:-1] # Compute k' and the mean values of stress and strain k = diff_stress / diff_strain * 100 # multiplied by 100, because strain is in % stress = (stress[1:] + stress[:-1])/2 strain = (strain[1:] + strain[:-1])/2 # Show the results if desired if show == 'stress': Rstressramp.plot_k([stress], k) elif show == 'strain': Rstressramp.plot_k([strain], k) elif show == 'both': Rstressramp.plot_k([stress, strain], k) elif show is not None: print('Error: cannot plot: ', show) return [stress, strain, k] def plot_k(x, k, linewidth = 1.5, marker = 'o', color = 'k', marker_facecolor = 'k'): """ Function to plot, in log scale, the differential storage modulus, k as a function of stress, strain, or both. INPUT x : list of numpy arrays of dependent variables k : numpy array, differential storage modulus linewidth : float, width of the line to plot marker : string, marker of the lineplot, needs to be compatible with matplotlib.pyplot color : color for the lineplot, and marker border, needs to be compatible with matplotlib.pyplot marker_facecolor : color of the marker, compatible with matplotlib.pyplot """ # Plot the first variable x1 = x[0] plt.figure(figsize = (9,5)) plt.plot(x1, k, c = color, lw = linewidth, marker = marker, mec = color, mfc = marker_facecolor) plt.loglog() plt.ylabel('$K\'$ (Pa)') # If there is more than one dependent variable, # Plot also the second variable in a different figure try: x2 = x[1] plt.xlabel('$\sigma$ (Pa)') plt.pause(0.1) plt.figure(figsize =(9, 5)) plt.plot(x2, k, c = color, lw = linewidth, marker = marker, mec = color, mfc = marker_facecolor) plt.loglog() plt.ylabel('$K\'$ (Pa)') plt.xlabel('$\gamma$ (%)') except IndexError: pass def export_kall(data_frame, file_export = None, remove_neg = True, group_header = 'Sample Description', subgroup_header = None, stress_header = 'Shear stress(Pa)', strain_header = 'Shear strain (sample)(%)'): """ Function to compute the differential storage modulus for all the data groups (e.g. samples, interals, experiments) within a data_frame INPUT data_frame : pandas data frame with the full data file_export : string, name of the file where data will be exported if None, it saves to 'All_k_curves.csv' remove_neg : if True, removes data where strain is negative group_header : string, name of the column where the data group label are subgroup_header : string, name of the column where the sub dataset label are stress_header : string, name of the column where the stress data is strain_header : string, name of the column where the strain data is OUTPUT all_data : data frame with the computed stress, strain, k' It also saves the data_rame to file_export. """ groups_all = [] subgroups_all = [] s_all = [] y_all = [] k_all = [] for igroup in data_frame[group_header].unique(): data_group = data_frame.loc[data_frame[group_header] == igroup] try: list_subgroups = data_group[subgroup_header].unique() subset_header = subgroup_header except KeyError: list_subgroups = [igroup] subset_header = group_header for isubset in list_subgroups: data_subgroup = data_group.loc[data_group[subset_header] == isubset] stress = np.array(data_group[stress_header]) strain = np.array(data_group[strain_header]) [s, y, k] = Rstressramp.compute_k(stress, strain, remove_neg = remove_neg) groups_all.extend([igroup]*len(s)) subgroups_all.extend([isubset]*len(s)) s_all.extend(s) y_all.extend(y) k_all.extend(k) all_data = pd.DataFrame() all_data[group_header] = groups_all try: subgroup_header[0]; all_data[subgroup_header] = subgroups_all except TypeError: pass all_data['Stress (Pa)'] = s_all all_data['Strain (%)'] = y_all all_data['K prime (Pa)'] = k_all if file_export is None: file_export = 'All_k_curves.csv' all_data.to_csv(file_export, index = False) return all_data def mean_kall_interp(filename, xvariable,num_interp = 100, show_plot = True, sample_header = 'Sample Description', stress_header = 'Stress (Pa)', strain_header = 'Strain (%)', k_header = 'K prime (Pa)', sep = ',', dec = '.'): """ Function to compute the mean curve for the differential elastic modulus for all the data within a file Note that it is based on interpolation! INPUT filename : string, name of the file with the whole data xvariable : string, can be 'stress' or 'strain', indicating over which variable to compute the mean. show_plot : if True, shows the results in a plot sample_header : string, name of the column with the sample label is stress_header : string, name of the column with the shear data strain_header : string, name of the column with the strain data sep : string, character used as delimiter in csv file dec : string, character used as decimal separator in csv file OUTPUT xinterp : numpy array, vector used for interpolation kmean : numpy array, mean curve of k kstd : numpy array, standard deviation curve of k """ # Read data and get all the samples within the data frame data = pd.read_csv(filename, sep = sep, decimal = dec) all_samples = data[sample_header].unique() # Define which dependent variable to extract if 'stress' in xvariable: xvar = stress_header elif 'strain' in xvariable: xvar = strain_header # Loop to get mean values of minimum and maximum xdata for the samples xmin = []; xmax = [] for isample in all_samples: data_sample = data.loc[data[sample_header] == isample] xsample = np.array(data_sample[xvar]) xmin.append(np.min(xsample)) xmax.append(np.max(xsample)) xmin_avg = np.mean(np.array(xmin)) xmax_avg = np.mean(np.array(xmax)) xmax_std = np.std(np.array(xmax)) print('Rupture: ', xmax_avg, '+/-', xmax_std) # Build interpolation vector xmin_log = np.log10(xmin_avg) xmax_log = np.log10(xmax_avg) xinterp = np.logspace(xmin_log, xmax_log, num = num_interp) #Loop to get the interpolated curves for each sample within the file k_all = [] for isample in all_samples: data_sample = data.loc[data[sample_header] == isample] xsample = data_sample[xvar] ksample = data_sample[k_header] k_interp = np.interp(xinterp, xsample, ksample) k_all.append(k_interp) k_all = np.array(k_all) kmean = np.mean(k_all, axis = 0) kstd = np.std(k_all, axis = 0) # Plot the average curve and standard deviation, if desired if show_plot == True: plt.fill_between(xinterp, kmean - kstd, kmean + kstd, color = 'lightgray', alpha = 0.8) plt.plot(xinterp, kmean, c = 'darkgray', marker = 'o', mfc = 'w') plt.ylabel('$K\'$ (Pa)') plt.xlabel(xvar) plt.loglog() return [xinterp, kmean, kstd] def mean_kall_window(filename, xvariable, xmin_log = -1, xmax_log = 5, winavg_number = 50, show_plot = True, sample_header = 'Sample Description', stress_header = 'Stress (Pa)', strain_header = 'Strain (%)', k_header = 'K prime (Pa)', sep = ',', dec = '.'): """ Function to compute the mean curve for the differential elastic modulus for all the data within a file Note that it is based on window averaging, and not interpolation! INPUT filename : string, name of the file with the whole data xvariable : string, can be 'stress' or 'strain', indicating over which variable to compute the mean. xmin_log : float, minimum value for average -> 10**xmin xmax_log : float, minimum value for average -> 10**xmax winavg_number : number of windows used to average, in logspace show_plot : if True, shows the results in a plot sample_header : string, name of the column with the sample label is stress_header : string, name of the column with the shear data strain_header : string, name of the column with the strain data sep : string, character used as delimiter in csv file dec : string, character used as decimal separator in csv file OUTPUT xmean : numpy array, mean value of the xvariable kmean : numpy array, mean curve of k kstd : numpy array, standard deviation curve of k """ # Read data and get all the samples within the data frame data = pd.read_csv(filename, sep = sep, decimal = dec) all_samples = data[sample_header].unique() # Define which dependent variable to extract if 'stress' in xvariable: xvar = stress_header elif 'strain' in xvariable: xvar = strain_header xmean = [] kmean = [] kstd = [] # Loop to average all the curves within the window avg_windows = np.logspace(xmin_log, xmax_log, num = winavg_number) avg_windows = [round(x, 3) for x in avg_windows] for dw in range(len(avg_windows)-1): x_all = [] k_all = [] for isample in all_samples: # It extracts the xvariable and the k data from the data # frame for a given sample data_sample = data.loc[data[sample_header]==isample] xdata = data_sample[xvar] kdata = data_sample[k_header] #Selects the data within the avg window and stores it ind_selec = (xdata > avg_windows[dw]) & (xdata <= avg_windows[dw+1]) x_all.extend(xdata[ind_selec]) k_all.extend(kdata[ind_selec]) # Convert list to numpy array for mean and isnan to work properly x_all = np.array(x_all) k_all = np.array(k_all) try: # Get the mean curve, only for non values xmean.append(np.mean(x_all[~np.isnan(x_all)])) kmean.append(np.mean(k_all[~np.isnan(k_all)])) kstd.append(np.std(k_all[~np.isnan(k_all)])) except TypeError: print('Error in mean calculation') # Convert from list to numpy array xmean = np.array(xmean) kmean = np.array(kmean) kstd = np.array(kstd) # Plot the average curve and standard deviation, if desired if show_plot == True: plt.fill_between(xmean, kmean - kstd, kmean + kstd, color = 'lightgray', alpha = 0.8) plt.plot(xmean, kmean, c = 'darkgray', marker = 'o', mfc = 'w') plt.ylabel('$K\'$ (Pa)') plt.xlabel(xvar) plt.loglog() return [xmean, kmean, kstd]
[ "matplotlib.pyplot.loglog", "pandas.read_csv", "numpy.logspace", "numpy.isnan", "matplotlib.pyplot.figure", "numpy.mean", "numpy.interp", "matplotlib.pyplot.fill_between", "pandas.DataFrame", "numpy.std", "numpy.max", "numpy.log10", "matplotlib.pyplot.pause", "numpy.min", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "numpy.where", "numpy.array", "matplotlib.pyplot.xlabel" ]
[((2099, 2142), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': 'sep', 'decimal': 'dec'}), '(filename, sep=sep, decimal=dec)\n', (2110, 2142), True, 'import pandas as pd\n'), ((5868, 5884), 'numpy.array', 'np.array', (['stress'], {}), '(stress)\n', (5876, 5884), True, 'import numpy as np\n'), ((5902, 5918), 'numpy.array', 'np.array', (['strain'], {}), '(strain)\n', (5910, 5918), True, 'import numpy as np\n'), ((8340, 8366), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 5)'}), '(figsize=(9, 5))\n', (8350, 8366), True, 'import matplotlib.pyplot as plt\n'), ((8376, 8467), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'k'], {'c': 'color', 'lw': 'linewidth', 'marker': 'marker', 'mec': 'color', 'mfc': 'marker_facecolor'}), '(x1, k, c=color, lw=linewidth, marker=marker, mec=color, mfc=\n marker_facecolor)\n', (8384, 8467), True, 'import matplotlib.pyplot as plt\n'), ((8498, 8510), 'matplotlib.pyplot.loglog', 'plt.loglog', ([], {}), '()\n', (8508, 8510), True, 'import matplotlib.pyplot as plt\n'), ((8519, 8542), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$K\'$ (Pa)"""'], {}), '("$K\'$ (Pa)")\n', (8529, 8542), True, 'import matplotlib.pyplot as plt\n'), ((11421, 11435), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11433, 11435), True, 'import pandas as pd\n'), ((13343, 13386), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': 'sep', 'decimal': 'dec'}), '(filename, sep=sep, decimal=dec)\n', (13354, 13386), True, 'import pandas as pd\n'), ((14192, 14210), 'numpy.log10', 'np.log10', (['xmin_avg'], {}), '(xmin_avg)\n', (14200, 14210), True, 'import numpy as np\n'), ((14230, 14248), 'numpy.log10', 'np.log10', (['xmax_avg'], {}), '(xmax_avg)\n', (14238, 14248), True, 'import numpy as np\n'), ((14267, 14314), 'numpy.logspace', 'np.logspace', (['xmin_log', 'xmax_log'], {'num': 'num_interp'}), '(xmin_log, xmax_log, num=num_interp)\n', (14278, 14314), True, 'import numpy as np\n'), ((14725, 14740), 'numpy.array', 'np.array', (['k_all'], {}), '(k_all)\n', (14733, 14740), True, 'import numpy as np\n'), ((14757, 14779), 'numpy.mean', 'np.mean', (['k_all'], {'axis': '(0)'}), '(k_all, axis=0)\n', (14764, 14779), True, 'import numpy as np\n'), ((14797, 14818), 'numpy.std', 'np.std', (['k_all'], {'axis': '(0)'}), '(k_all, axis=0)\n', (14803, 14818), True, 'import numpy as np\n'), ((16990, 17033), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': 'sep', 'decimal': 'dec'}), '(filename, sep=sep, decimal=dec)\n', (17001, 17033), True, 'import pandas as pd\n'), ((17395, 17445), 'numpy.logspace', 'np.logspace', (['xmin_log', 'xmax_log'], {'num': 'winavg_number'}), '(xmin_log, xmax_log, num=winavg_number)\n', (17406, 17445), True, 'import numpy as np\n'), ((18697, 18712), 'numpy.array', 'np.array', (['xmean'], {}), '(xmean)\n', (18705, 18712), True, 'import numpy as np\n'), ((18729, 18744), 'numpy.array', 'np.array', (['kmean'], {}), '(kmean)\n', (18737, 18744), True, 'import numpy as np\n'), ((18760, 18774), 'numpy.array', 'np.array', (['kstd'], {}), '(kstd)\n', (18768, 18774), True, 'import numpy as np\n'), ((5994, 6010), 'numpy.isnan', 'np.isnan', (['strain'], {}), '(strain)\n', (6002, 6010), True, 'import numpy as np\n'), ((6013, 6029), 'numpy.isnan', 'np.isnan', (['stress'], {}), '(stress)\n', (6021, 6029), True, 'import numpy as np\n'), ((6217, 6242), 'numpy.where', 'np.where', (['(strain < 5000.0)'], {}), '(strain < 5000.0)\n', (6225, 6242), True, 'import numpy as np\n'), ((6631, 6652), 'numpy.where', 'np.where', (['(strain >= 0)'], {}), '(strain >= 0)\n', (6639, 6652), True, 'import numpy as np\n'), ((8712, 8740), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\sigma$ (Pa)"""'], {}), "('$\\\\sigma$ (Pa)')\n", (8722, 8740), True, 'import matplotlib.pyplot as plt\n'), ((8752, 8766), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (8761, 8766), True, 'import matplotlib.pyplot as plt\n'), ((8779, 8805), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 5)'}), '(figsize=(9, 5))\n', (8789, 8805), True, 'import matplotlib.pyplot as plt\n'), ((8819, 8910), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'k'], {'c': 'color', 'lw': 'linewidth', 'marker': 'marker', 'mec': 'color', 'mfc': 'marker_facecolor'}), '(x2, k, c=color, lw=linewidth, marker=marker, mec=color, mfc=\n marker_facecolor)\n', (8827, 8910), True, 'import matplotlib.pyplot as plt\n'), ((8945, 8957), 'matplotlib.pyplot.loglog', 'plt.loglog', ([], {}), '()\n', (8955, 8957), True, 'import matplotlib.pyplot as plt\n'), ((8970, 8993), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$K\'$ (Pa)"""'], {}), '("$K\'$ (Pa)")\n', (8980, 8993), True, 'import matplotlib.pyplot as plt\n'), ((9007, 9034), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\gamma$ (%)"""'], {}), "('$\\\\gamma$ (%)')\n", (9017, 9034), True, 'import matplotlib.pyplot as plt\n'), ((13842, 13869), 'numpy.array', 'np.array', (['data_sample[xvar]'], {}), '(data_sample[xvar])\n', (13850, 13869), True, 'import numpy as np\n'), ((13980, 13994), 'numpy.array', 'np.array', (['xmin'], {}), '(xmin)\n', (13988, 13994), True, 'import numpy as np\n'), ((14023, 14037), 'numpy.array', 'np.array', (['xmax'], {}), '(xmax)\n', (14031, 14037), True, 'import numpy as np\n'), ((14065, 14079), 'numpy.array', 'np.array', (['xmax'], {}), '(xmax)\n', (14073, 14079), True, 'import numpy as np\n'), ((14624, 14660), 'numpy.interp', 'np.interp', (['xinterp', 'xsample', 'ksample'], {}), '(xinterp, xsample, ksample)\n', (14633, 14660), True, 'import numpy as np\n'), ((14932, 15019), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['xinterp', '(kmean - kstd)', '(kmean + kstd)'], {'color': '"""lightgray"""', 'alpha': '(0.8)'}), "(xinterp, kmean - kstd, kmean + kstd, color='lightgray',\n alpha=0.8)\n", (14948, 15019), True, 'import matplotlib.pyplot as plt\n'), ((15056, 15115), 'matplotlib.pyplot.plot', 'plt.plot', (['xinterp', 'kmean'], {'c': '"""darkgray"""', 'marker': '"""o"""', 'mfc': '"""w"""'}), "(xinterp, kmean, c='darkgray', marker='o', mfc='w')\n", (15064, 15115), True, 'import matplotlib.pyplot as plt\n'), ((15134, 15157), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$K\'$ (Pa)"""'], {}), '("$K\'$ (Pa)")\n', (15144, 15157), True, 'import matplotlib.pyplot as plt\n'), ((15171, 15187), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xvar'], {}), '(xvar)\n', (15181, 15187), True, 'import matplotlib.pyplot as plt\n'), ((15200, 15212), 'matplotlib.pyplot.loglog', 'plt.loglog', ([], {}), '()\n', (15210, 15212), True, 'import matplotlib.pyplot as plt\n'), ((18258, 18273), 'numpy.array', 'np.array', (['x_all'], {}), '(x_all)\n', (18266, 18273), True, 'import numpy as np\n'), ((18294, 18309), 'numpy.array', 'np.array', (['k_all'], {}), '(k_all)\n', (18302, 18309), True, 'import numpy as np\n'), ((18886, 18971), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['xmean', '(kmean - kstd)', '(kmean + kstd)'], {'color': '"""lightgray"""', 'alpha': '(0.8)'}), "(xmean, kmean - kstd, kmean + kstd, color='lightgray',\n alpha=0.8)\n", (18902, 18971), True, 'import matplotlib.pyplot as plt\n'), ((19008, 19065), 'matplotlib.pyplot.plot', 'plt.plot', (['xmean', 'kmean'], {'c': '"""darkgray"""', 'marker': '"""o"""', 'mfc': '"""w"""'}), "(xmean, kmean, c='darkgray', marker='o', mfc='w')\n", (19016, 19065), True, 'import matplotlib.pyplot as plt\n'), ((19084, 19107), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$K\'$ (Pa)"""'], {}), '("$K\'$ (Pa)")\n', (19094, 19107), True, 'import matplotlib.pyplot as plt\n'), ((19121, 19137), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xvar'], {}), '(xvar)\n', (19131, 19137), True, 'import matplotlib.pyplot as plt\n'), ((19150, 19162), 'matplotlib.pyplot.loglog', 'plt.loglog', ([], {}), '()\n', (19160, 19162), True, 'import matplotlib.pyplot as plt\n'), ((10997, 11032), 'numpy.array', 'np.array', (['data_group[stress_header]'], {}), '(data_group[stress_header])\n', (11005, 11032), True, 'import numpy as np\n'), ((11058, 11093), 'numpy.array', 'np.array', (['data_group[strain_header]'], {}), '(data_group[strain_header])\n', (11066, 11093), True, 'import numpy as np\n'), ((13894, 13909), 'numpy.min', 'np.min', (['xsample'], {}), '(xsample)\n', (13900, 13909), True, 'import numpy as np\n'), ((13935, 13950), 'numpy.max', 'np.max', (['xsample'], {}), '(xsample)\n', (13941, 13950), True, 'import numpy as np\n'), ((18429, 18444), 'numpy.isnan', 'np.isnan', (['x_all'], {}), '(x_all)\n', (18437, 18444), True, 'import numpy as np\n'), ((18492, 18507), 'numpy.isnan', 'np.isnan', (['k_all'], {}), '(k_all)\n', (18500, 18507), True, 'import numpy as np\n'), ((18553, 18568), 'numpy.isnan', 'np.isnan', (['k_all'], {}), '(k_all)\n', (18561, 18568), True, 'import numpy as np\n')]
import logging from PySide2.QtWidgets import QFrame, QLabel, QVBoxLayout, QHBoxLayout, QScrollArea, QLineEdit,\ QWidget from PySide2.QtGui import QPainter, QBrush, QPen from PySide2.QtCore import Qt, QSize from ...config import Conf from .qast_viewer import QASTViewer l = logging.getLogger('ui.widgets.qregister_viewer') class AddressPiece(object): __slots__ = ['address'] def __init__(self, address): self.address = address class NewLinePiece(object): pass class QMemoryView(QWidget): def __init__(self, state, workspace, parent=None): super(QMemoryView, self).__init__(parent) self.workspace = workspace self.state = state self.cols = None self.rows = None # The current address being displayed. Must be set through .address self._address = None self._objects = [ ] @property def address(self): return self._address @address.setter def address(self, v): if v != self._address: self._address = v self._reload_objects() def paintEvent(self, event): if self.address is None: return MARGIN_LEFT = 5 MARGIN_TOP = 5 LINE_MARGIN = 3 painter = QPainter(self) painter.setPen(QPen(Qt.black, 1)) painter.setFont(Conf.symexec_font) x = MARGIN_LEFT y = MARGIN_TOP for obj in self._objects: obj_type = type(obj) if obj_type is NewLinePiece: # carriage return x = MARGIN_LEFT y += Conf.symexec_font_height + LINE_MARGIN elif obj_type is AddressPiece: # address addr_str = "%08x" % obj.address painter.drawText(x, y + Conf.symexec_font_ascent, addr_str) x += Conf.symexec_font_width * len(addr_str) x += 7 elif obj_type is QASTViewer: # AST viewer obj.x = x obj.y = y obj.paint(painter) x += obj.width + 2 else: raise TypeError('paintEvent(): Unsupported object type %s.' % obj_type) def _reload_objects(self): """ Reload addresses and text pieces to be displayed. :return: None """ objects = [ ] addr_base = self.address for row in range(self.rows): addr = addr_base + row * self.cols # address addr_piece = AddressPiece(addr) objects.append(addr_piece) # QASTViewer objects for col in range(self.cols): data = self.state.memory.load(addr + col, 1, inspect=False, disable_actions=True) ast_viewer = QASTViewer(data, workspace=self.workspace, custom_painting=True, display_size=False) objects.append(ast_viewer) # end of the line newline_piece = NewLinePiece() objects.append(newline_piece) self._objects = objects class QMemoryViewer(QFrame): def __init__(self, state, parent, workspace): super(QMemoryViewer, self).__init__(parent) self.workspace = workspace self._scrollarea = None # type: QScrollArea self._txt_addr = None # type: QLineEdit self._view = None # type: QMemoryView self._addr = None # the address to display self.state = state self._init_widgets() self.state.am_subscribe(self._watch_state) @property def addr(self): return self._addr @addr.setter def addr(self, v): if self._addr != v: self._addr = v self.reload() # # Overridden methods # def sizeHint(self, *args, **kwargs): return QSize(100, 100) # # Public methods # def reload(self): if self.state.am_none(): return if self.addr is None: return self._refresh_memory_view() # # Event handlers # def _on_address_entered(self): address_str = self._txt_addr.text() try: address = int(address_str, 16) except ValueError: return self.addr = address # # Private methods # def _init_widgets(self): layout = QVBoxLayout() # address lbl_addr = QLabel() lbl_addr.setText("Address") txt_addr = QLineEdit() txt_addr.returnPressed.connect(self._on_address_entered) self._txt_addr = txt_addr top_layout = QHBoxLayout() top_layout.addWidget(lbl_addr) top_layout.addWidget(txt_addr) self._view = QMemoryView(self.state, self.workspace) area = QScrollArea() self._scrollarea = area area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded) area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded) area.setWidgetResizable(True) area.setWidget(self._view) layout.addLayout(top_layout) layout.addWidget(area) layout.setContentsMargins(0, 0, 0, 0) self.setLayout(layout) def _refresh_memory_view(self): self._view.cols = 16 self._view.rows = 10 self._view.address = self.addr self._view.repaint() def _watch_state(self, **kwargs): self.reload()
[ "PySide2.QtGui.QPainter", "PySide2.QtCore.QSize", "PySide2.QtWidgets.QScrollArea", "PySide2.QtWidgets.QLabel", "PySide2.QtWidgets.QVBoxLayout", "PySide2.QtWidgets.QLineEdit", "PySide2.QtGui.QPen", "logging.getLogger", "PySide2.QtWidgets.QHBoxLayout" ]
[((280, 328), 'logging.getLogger', 'logging.getLogger', (['"""ui.widgets.qregister_viewer"""'], {}), "('ui.widgets.qregister_viewer')\n", (297, 328), False, 'import logging\n'), ((1262, 1276), 'PySide2.QtGui.QPainter', 'QPainter', (['self'], {}), '(self)\n', (1270, 1276), False, 'from PySide2.QtGui import QPainter, QBrush, QPen\n'), ((3848, 3863), 'PySide2.QtCore.QSize', 'QSize', (['(100)', '(100)'], {}), '(100, 100)\n', (3853, 3863), False, 'from PySide2.QtCore import Qt, QSize\n'), ((4388, 4401), 'PySide2.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (4399, 4401), False, 'from PySide2.QtWidgets import QFrame, QLabel, QVBoxLayout, QHBoxLayout, QScrollArea, QLineEdit, QWidget\n'), ((4441, 4449), 'PySide2.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (4447, 4449), False, 'from PySide2.QtWidgets import QFrame, QLabel, QVBoxLayout, QHBoxLayout, QScrollArea, QLineEdit, QWidget\n'), ((4506, 4517), 'PySide2.QtWidgets.QLineEdit', 'QLineEdit', ([], {}), '()\n', (4515, 4517), False, 'from PySide2.QtWidgets import QFrame, QLabel, QVBoxLayout, QHBoxLayout, QScrollArea, QLineEdit, QWidget\n'), ((4639, 4652), 'PySide2.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (4650, 4652), False, 'from PySide2.QtWidgets import QFrame, QLabel, QVBoxLayout, QHBoxLayout, QScrollArea, QLineEdit, QWidget\n'), ((4809, 4822), 'PySide2.QtWidgets.QScrollArea', 'QScrollArea', ([], {}), '()\n', (4820, 4822), False, 'from PySide2.QtWidgets import QFrame, QLabel, QVBoxLayout, QHBoxLayout, QScrollArea, QLineEdit, QWidget\n'), ((1301, 1318), 'PySide2.QtGui.QPen', 'QPen', (['Qt.black', '(1)'], {}), '(Qt.black, 1)\n', (1305, 1318), False, 'from PySide2.QtGui import QPainter, QBrush, QPen\n')]
# !/usr/bin/env python # -- coding: utf-8 -- # @Time : 2020/10/28 16:41 # @Author : liumin # @File : ICNet.py import torch import torch.nn as nn import torch.nn.functional as F import torchvision __all__ = ["ICNet"] def Conv1x1BN(in_channels,out_channels): return nn.Sequential( nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(out_channels) ) def Conv1x1BNReLU(in_channels,out_channels): return nn.Sequential( nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True) ) def Conv3x3BN(in_channels,out_channels,stride,dilation=1): return nn.Sequential( nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=dilation,dilation=dilation, bias=False), nn.BatchNorm2d(out_channels) ) def Conv3x3BNReLU(in_channels,out_channels,stride,dilation=1): return nn.Sequential( nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=dilation,dilation=dilation, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True) ) class CascadeFeatureFusion(nn.Module): def __init__(self,low_channels, high_channels, out_channels, num_classes): super(CascadeFeatureFusion, self).__init__() self.conv_low = Conv3x3BNReLU(low_channels,out_channels,1,dilation=2) self.conv_high = Conv3x3BNReLU(high_channels,out_channels,1,dilation=1) self.relu = nn.ReLU(inplace=True) self.conv_low_cls = nn.Conv2d(out_channels, num_classes, 1, bias=False) def forward(self, x_low, x_high): x_low = F.interpolate(x_low, size=x_high.size()[2:], mode='bilinear', align_corners=True) x_low = self.conv_low(x_low) x_high = self.conv_high(x_high) out = self.relu(x_low + x_high) x_low_cls = self.conv_low_cls(x_low) return out, x_low_cls class Backbone(nn.Module): def __init__(self, pyramids=[1,2,3,6]): super(Backbone, self).__init__() self.pretrained = torchvision.models.resnet50(pretrained=True) def forward(self, x): x = self.pretrained.conv1(x) x = self.pretrained.bn1(x) x = self.pretrained.relu(x) x = self.pretrained.maxpool(x) c1 = self.pretrained.layer1(x) c2 = self.pretrained.layer2(c1) c3 = self.pretrained.layer3(c2) c4 = self.pretrained.layer4(c3) return c1, c2, c3, c4 class PyramidPoolingModule(nn.Module): def __init__(self, pyramids=[1,2,3,6]): super(PyramidPoolingModule, self).__init__() self.pyramids = pyramids def forward(self, x): feat = x height, width = x.shape[2:] for bin_size in self.pyramids: feat_x = F.adaptive_avg_pool2d(x, output_size=bin_size) feat_x = F.interpolate(feat_x, size=(height, width), mode='bilinear', align_corners=True) feat = feat + feat_x return feat class ICNet(nn.Module): def __init__(self, num_classes): super(ICNet, self).__init__() self.conv_sub1 = nn.Sequential( Conv3x3BNReLU(3, 32, 2), Conv3x3BNReLU(32, 32, 2), Conv3x3BNReLU(32, 64, 2) ) self.backbone = Backbone() self.ppm = PyramidPoolingModule() self.cff_12 = CascadeFeatureFusion(128, 64, 128, num_classes) self.cff_24 = CascadeFeatureFusion(2048, 512, 128, num_classes) self.conv_cls = nn.Conv2d(128, num_classes, 1, bias=False) def forward(self, x): # sub 1 x_sub1 = self.conv_sub1(x) # sub 2 x_sub2 = F.interpolate(x, scale_factor=0.5, mode='bilinear') _, x_sub2, _, _ = self.backbone(x_sub2) # sub 4 x_sub4 = F.interpolate(x, scale_factor=0.25, mode='bilinear') _, _, _, x_sub4 = self.backbone(x_sub4) # add PyramidPoolingModule x_sub4 = self.ppm(x_sub4) outs = list() x_cff_24, x_24_cls = self.cff_24(x_sub4, x_sub2) outs.append(x_24_cls) # x_cff_12, x_12_cls = self.cff_12(x_sub2, x_sub1) x_cff_12, x_12_cls = self.cff_12(x_cff_24, x_sub1) outs.append(x_12_cls) up_x2 = F.interpolate(x_cff_12, scale_factor=2, mode='bilinear') up_x2 = self.conv_cls(up_x2) outs.append(up_x2) up_x8 = F.interpolate(up_x2, scale_factor=4, mode='bilinear') outs.append(up_x8) # 1 -> 1/4 -> 1/8 -> 1/16 outs.reverse() return outs if __name__ == '__main__': model = ICNet(num_classes=19) print(model) input = torch.randn(1,3,512,512) output = model(input) print(output[0].shape) print(output[1].shape) print(output[2].shape) print(output[3].shape)
[ "torch.nn.ReLU", "torch.nn.Conv2d", "torch.randn", "torch.nn.functional.adaptive_avg_pool2d", "torchvision.models.resnet50", "torch.nn.BatchNorm2d", "torch.nn.functional.interpolate" ]
[((4958, 4985), 'torch.randn', 'torch.randn', (['(1)', '(3)', '(512)', '(512)'], {}), '(1, 3, 512, 512)\n', (4969, 4985), False, 'import torch\n'), ((315, 417), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=1,\n stride=1, bias=False)\n', (324, 417), True, 'import torch.nn as nn\n'), ((428, 456), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (442, 456), True, 'import torch.nn as nn\n'), ((556, 658), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=1,\n stride=1, bias=False)\n', (565, 658), True, 'import torch.nn as nn\n'), ((669, 697), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (683, 697), True, 'import torch.nn as nn\n'), ((712, 733), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (719, 733), True, 'import torch.nn as nn\n'), ((849, 993), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': '(3)', 'stride': 'stride', 'padding': 'dilation', 'dilation': 'dilation', 'bias': '(False)'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=3,\n stride=stride, padding=dilation, dilation=dilation, bias=False)\n', (858, 993), True, 'import torch.nn as nn\n'), ((1003, 1031), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1017, 1031), True, 'import torch.nn as nn\n'), ((1149, 1293), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': '(3)', 'stride': 'stride', 'padding': 'dilation', 'dilation': 'dilation', 'bias': '(False)'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=3,\n stride=stride, padding=dilation, dilation=dilation, bias=False)\n', (1158, 1293), True, 'import torch.nn as nn\n'), ((1303, 1331), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (1317, 1331), True, 'import torch.nn as nn\n'), ((1346, 1367), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1353, 1367), True, 'import torch.nn as nn\n'), ((1740, 1761), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1747, 1761), True, 'import torch.nn as nn\n'), ((1791, 1842), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'num_classes', '(1)'], {'bias': '(False)'}), '(out_channels, num_classes, 1, bias=False)\n', (1800, 1842), True, 'import torch.nn as nn\n'), ((2326, 2370), 'torchvision.models.resnet50', 'torchvision.models.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2353, 2370), False, 'import torchvision\n'), ((3799, 3841), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', 'num_classes', '(1)'], {'bias': '(False)'}), '(128, num_classes, 1, bias=False)\n', (3808, 3841), True, 'import torch.nn as nn\n'), ((3959, 4010), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(0.5)', 'mode': '"""bilinear"""'}), "(x, scale_factor=0.5, mode='bilinear')\n", (3972, 4010), True, 'import torch.nn.functional as F\n'), ((4095, 4147), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(0.25)', 'mode': '"""bilinear"""'}), "(x, scale_factor=0.25, mode='bilinear')\n", (4108, 4147), True, 'import torch.nn.functional as F\n'), ((4554, 4610), 'torch.nn.functional.interpolate', 'F.interpolate', (['x_cff_12'], {'scale_factor': '(2)', 'mode': '"""bilinear"""'}), "(x_cff_12, scale_factor=2, mode='bilinear')\n", (4567, 4610), True, 'import torch.nn.functional as F\n'), ((4694, 4747), 'torch.nn.functional.interpolate', 'F.interpolate', (['up_x2'], {'scale_factor': '(4)', 'mode': '"""bilinear"""'}), "(up_x2, scale_factor=4, mode='bilinear')\n", (4707, 4747), True, 'import torch.nn.functional as F\n'), ((3066, 3112), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['x'], {'output_size': 'bin_size'}), '(x, output_size=bin_size)\n', (3087, 3112), True, 'import torch.nn.functional as F\n'), ((3135, 3220), 'torch.nn.functional.interpolate', 'F.interpolate', (['feat_x'], {'size': '(height, width)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(feat_x, size=(height, width), mode='bilinear', align_corners=True\n )\n", (3148, 3220), True, 'import torch.nn.functional as F\n')]
import numpy as np import matplotlib.pyplot as plt from copy import deepcopy from numpy.linalg import inv from scipy.linalg import schur, sqrtm import numpy as np def invSqrt(a,b,c): eps = 1e-12 mask = (b != 0) r1 = mask * (c - a) / (2. * b + eps) t1 = np.sign(r1) / (np.abs(r1) + np.sqrt(1. + r1*r1)); r = 1.0 / np.sqrt( 1. + t1*t1) t = t1*r; r = r * mask + 1.0 * (1.0 - mask); t = t * mask; x = 1. / np.sqrt( r*r*a - 2*r*t*b + t*t*c) z = 1. / np.sqrt( t*t*a + 2*r*t*b + r*r*c) d = np.sqrt( x * z) x = x / d z = z / d new_a = r*r*x + t*t*z new_b = -r*t*x + t*r*z new_c = t*t*x + r*r *z return new_a, new_b, new_c def Ell2LAF(ell): A23 = np.zeros((2,3)) A23[0,2] = ell[0] A23[1,2] = ell[1] a = ell[2] b = ell[3] c = ell[4] sc = np.sqrt(np.sqrt(a*c - b*b)) ia,ib,ic = invSqrt(a,b,c) A = np.array([[ia, ib], [ib, ic]]) / sc sc = np.sqrt(A[0,0] * A[1,1] - A[1,0] * A[0,1]) A23[0:2,0:2] = rectifyAffineTransformationUpIsUp(A / sc) * sc return A23 def rectifyAffineTransformationUpIsUp(A): det = np.sqrt(np.abs(A[0,0]*A[1,1] - A[1,0]*A[0,1] + 1e-10)) b2a2 = np.sqrt(A[0,1] * A[0,1] + A[0,0] * A[0,0]) A_new = np.zeros((2,2)) A_new[0,0] = b2a2 / det A_new[0,1] = 0 A_new[1,0] = (A[1,1]*A[0,1]+A[1,0]*A[0,0])/(b2a2*det) A_new[1,1] = det / b2a2 return A_new def ells2LAFs(ells): LAFs = np.zeros((len(ells), 2,3)) for i in range(len(ells)): LAFs[i,:,:] = Ell2LAF(ells[i,:]) return LAFs def LAF2pts(LAF, n_pts = 50): a = np.linspace(0, 2*np.pi, n_pts); x = [0] x.extend(list(np.sin(a))) x = np.array(x).reshape(1,-1) y = [0] y.extend(list(np.cos(a))) y = np.array(y).reshape(1,-1) HLAF = np.concatenate([LAF, np.array([0,0,1]).reshape(1,3)]) H_pts =np.concatenate([x,y,np.ones(x.shape)]) H_pts_out = np.transpose(np.matmul(HLAF, H_pts)) H_pts_out[:,0] = H_pts_out[:,0] / H_pts_out[:, 2] H_pts_out[:,1] = H_pts_out[:,1] / H_pts_out[:, 2] return H_pts_out[:,0:2] def convertLAFs_to_A23format(LAFs): sh = LAFs.shape if (len(sh) == 3) and (sh[1] == 2) and (sh[2] == 3): # n x 2 x 3 classical [A, (x;y)] matrix work_LAFs = deepcopy(LAFs) elif (len(sh) == 2) and (sh[1] == 7): #flat format, x y scale a11 a12 a21 a22 work_LAFs = np.zeros((sh[0], 2,3)) work_LAFs[:,0,2] = LAFs[:,0] work_LAFs[:,1,2] = LAFs[:,1] work_LAFs[:,0,0] = LAFs[:,2] * LAFs[:,3] work_LAFs[:,0,1] = LAFs[:,2] * LAFs[:,4] work_LAFs[:,1,0] = LAFs[:,2] * LAFs[:,5] work_LAFs[:,1,1] = LAFs[:,2] * LAFs[:,6] elif (len(sh) == 2) and (sh[1] == 6): #flat format, x y s*a11 s*a12 s*a21 s*a22 work_LAFs = np.zeros((sh[0], 2,3)) work_LAFs[:,0,2] = LAFs[:,0] work_LAFs[:,1,2] = LAFs[:,1] work_LAFs[:,0,0] = LAFs[:,2] work_LAFs[:,0,1] = LAFs[:,3] work_LAFs[:,1,0] = LAFs[:,4] work_LAFs[:,1,1] = LAFs[:,5] else: print ('Unknown LAF format') return None return work_LAFs def LAFs2ell(in_LAFs): LAFs = convertLAFs_to_A23format(in_LAFs) ellipses = np.zeros((len(LAFs),5)) for i in range(len(LAFs)): LAF = deepcopy(LAFs[i,:,:]) scale = np.sqrt(LAF[0,0]*LAF[1,1] - LAF[0,1]*LAF[1, 0] + 1e-10) u, W, v = np.linalg.svd(LAF[0:2,0:2] / scale, full_matrices=True) W[0] = 1. / (W[0]*W[0]*scale*scale) W[1] = 1. / (W[1]*W[1]*scale*scale) A = np.matmul(np.matmul(u, np.diag(W)), u.transpose()) ellipses[i,0] = LAF[0,2] ellipses[i,1] = LAF[1,2] ellipses[i,2] = A[0,0] ellipses[i,3] = A[0,1] ellipses[i,4] = A[1,1] return ellipses def visualize_LAFs(img, LAFs): work_LAFs = convertLAFs_to_A23format(LAFs) plt.figure() plt.imshow(255 - img) for i in range(len(work_LAFs)): ell = LAF2pts(work_LAFs[i,:,:]) plt.plot( ell[:,0], ell[:,1], 'r') plt.show() return def readMODS_keypointsFile(fname): mrSize = 3.0 * np.sqrt(3.0) features_dict = {} with open(fname, 'rb') as f: lines = f.readlines() det_num = int(lines[0]) current_pos = 1 for det_idx in range(det_num): dd = lines[current_pos] dd = dd.strip().split(' ') det_name = dd[0] desc_num = int(dd[1]) features_dict[det_name] = {} current_pos +=1 print (det_name, desc_num) for desc_idx in range(desc_num): dd2 = lines[current_pos] dd2 = dd2.strip().split(' ') desc_name = dd2[0] features_num = int(dd2[1]) print (desc_name, features_num) current_pos+=1 desc_len = int(lines[current_pos]) print (desc_len) LAFs = np.zeros((features_num, 7)) if desc_len > 0: descriptors = np.zeros((features_num, desc_len)) else: descriptors = None for feat_idx in range(features_num): current_pos+=1 l = lines[current_pos].strip().split(' ') LAFs[feat_idx,0:2] = np.array(l[0:2]) LAFs[feat_idx,2] = mrSize * np.array(float(l[2])) LAFs[feat_idx,3:] = np.array(l[3:3+4]) if desc_len > 0: descriptors[feat_idx,:] = np.array(l[8:]) features_dict[det_name][desc_name] = (LAFs, descriptors) current_pos+=1 return features_dict def readMODS_ExtractFeaturesFile(fname): mrSize = 3.0 * np.sqrt(3.0) features_dict = {} with open(fname, 'rb') as f: lines = f.readlines() det_num = int(lines[0]) current_pos = 1 for det_idx in range(det_num): dd = lines[current_pos] dd = dd.strip().split(' ') det_name = dd[0] desc_num = int(dd[1]) features_dict[det_name] = {} current_pos +=1 print (det_name, desc_num) for desc_idx in range(desc_num): dd2 = lines[current_pos] dd2 = dd2.strip().split(' ') desc_name = dd2[0] features_num = int(dd2[1]) print (desc_name, features_num) current_pos+=1 desc_len = int(lines[current_pos]) print (desc_len) LAFs = np.zeros((features_num, 7)) if desc_len > 0: descriptors = np.zeros((features_num, desc_len)) else: descriptors = None for feat_idx in range(features_num): current_pos+=1 l = lines[current_pos].strip().split(' ') LAFs[feat_idx,0:2] = np.array(l[14:16]) LAFs[feat_idx,2] = mrSize * np.array(float(l[23])) LAFs[feat_idx,3:] = np.array(l[16:20]) if desc_len > 0: descriptors[feat_idx,:] = np.array(l[26:]) features_dict[det_name][desc_name] = (LAFs, descriptors) current_pos+=1 return features_dict
[ "copy.deepcopy", "matplotlib.pyplot.show", "numpy.abs", "matplotlib.pyplot.plot", "matplotlib.pyplot.imshow", "numpy.zeros", "numpy.ones", "matplotlib.pyplot.figure", "numpy.linalg.svd", "numpy.array", "numpy.sin", "numpy.linspace", "numpy.sign", "numpy.matmul", "numpy.cos", "numpy.diag", "numpy.sqrt" ]
[((550, 564), 'numpy.sqrt', 'np.sqrt', (['(x * z)'], {}), '(x * z)\n', (557, 564), True, 'import numpy as np\n'), ((748, 764), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (756, 764), True, 'import numpy as np\n'), ((974, 1020), 'numpy.sqrt', 'np.sqrt', (['(A[0, 0] * A[1, 1] - A[1, 0] * A[0, 1])'], {}), '(A[0, 0] * A[1, 1] - A[1, 0] * A[0, 1])\n', (981, 1020), True, 'import numpy as np\n'), ((1217, 1263), 'numpy.sqrt', 'np.sqrt', (['(A[0, 1] * A[0, 1] + A[0, 0] * A[0, 0])'], {}), '(A[0, 1] * A[0, 1] + A[0, 0] * A[0, 0])\n', (1224, 1263), True, 'import numpy as np\n'), ((1272, 1288), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (1280, 1288), True, 'import numpy as np\n'), ((1625, 1657), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'n_pts'], {}), '(0, 2 * np.pi, n_pts)\n', (1636, 1657), True, 'import numpy as np\n'), ((3875, 3887), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3885, 3887), True, 'import matplotlib.pyplot as plt\n'), ((3892, 3913), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(255 - img)'], {}), '(255 - img)\n', (3902, 3913), True, 'import matplotlib.pyplot as plt\n'), ((4037, 4047), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4045, 4047), True, 'import matplotlib.pyplot as plt\n'), ((277, 288), 'numpy.sign', 'np.sign', (['r1'], {}), '(r1)\n', (284, 288), True, 'import numpy as np\n'), ((341, 363), 'numpy.sqrt', 'np.sqrt', (['(1.0 + t1 * t1)'], {}), '(1.0 + t1 * t1)\n', (348, 363), True, 'import numpy as np\n'), ((456, 502), 'numpy.sqrt', 'np.sqrt', (['(r * r * a - 2 * r * t * b + t * t * c)'], {}), '(r * r * a - 2 * r * t * b + t * t * c)\n', (463, 502), True, 'import numpy as np\n'), ((503, 549), 'numpy.sqrt', 'np.sqrt', (['(t * t * a + 2 * r * t * b + r * r * c)'], {}), '(t * t * a + 2 * r * t * b + r * r * c)\n', (510, 549), True, 'import numpy as np\n'), ((870, 892), 'numpy.sqrt', 'np.sqrt', (['(a * c - b * b)'], {}), '(a * c - b * b)\n', (877, 892), True, 'import numpy as np\n'), ((929, 959), 'numpy.array', 'np.array', (['[[ia, ib], [ib, ic]]'], {}), '([[ia, ib], [ib, ic]])\n', (937, 959), True, 'import numpy as np\n'), ((1159, 1212), 'numpy.abs', 'np.abs', (['(A[0, 0] * A[1, 1] - A[1, 0] * A[0, 1] + 1e-10)'], {}), '(A[0, 0] * A[1, 1] - A[1, 0] * A[0, 1] + 1e-10)\n', (1165, 1212), True, 'import numpy as np\n'), ((1953, 1975), 'numpy.matmul', 'np.matmul', (['HLAF', 'H_pts'], {}), '(HLAF, H_pts)\n', (1962, 1975), True, 'import numpy as np\n'), ((2288, 2302), 'copy.deepcopy', 'deepcopy', (['LAFs'], {}), '(LAFs)\n', (2296, 2302), False, 'from copy import deepcopy\n'), ((3292, 3315), 'copy.deepcopy', 'deepcopy', (['LAFs[i, :, :]'], {}), '(LAFs[i, :, :])\n', (3300, 3315), False, 'from copy import deepcopy\n'), ((3330, 3392), 'numpy.sqrt', 'np.sqrt', (['(LAF[0, 0] * LAF[1, 1] - LAF[0, 1] * LAF[1, 0] + 1e-10)'], {}), '(LAF[0, 0] * LAF[1, 1] - LAF[0, 1] * LAF[1, 0] + 1e-10)\n', (3337, 3392), True, 'import numpy as np\n'), ((3405, 3461), 'numpy.linalg.svd', 'np.linalg.svd', (['(LAF[0:2, 0:2] / scale)'], {'full_matrices': '(True)'}), '(LAF[0:2, 0:2] / scale, full_matrices=True)\n', (3418, 3461), True, 'import numpy as np\n'), ((3998, 4033), 'matplotlib.pyplot.plot', 'plt.plot', (['ell[:, 0]', 'ell[:, 1]', '"""r"""'], {}), "(ell[:, 0], ell[:, 1], 'r')\n", (4006, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4115, 4127), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (4122, 4127), True, 'import numpy as np\n'), ((5774, 5786), 'numpy.sqrt', 'np.sqrt', (['(3.0)'], {}), '(3.0)\n', (5781, 5786), True, 'import numpy as np\n'), ((292, 302), 'numpy.abs', 'np.abs', (['r1'], {}), '(r1)\n', (298, 302), True, 'import numpy as np\n'), ((305, 327), 'numpy.sqrt', 'np.sqrt', (['(1.0 + r1 * r1)'], {}), '(1.0 + r1 * r1)\n', (312, 327), True, 'import numpy as np\n'), ((1687, 1696), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (1693, 1696), True, 'import numpy as np\n'), ((1707, 1718), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1715, 1718), True, 'import numpy as np\n'), ((1763, 1772), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (1769, 1772), True, 'import numpy as np\n'), ((1783, 1794), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1791, 1794), True, 'import numpy as np\n'), ((1905, 1921), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (1912, 1921), True, 'import numpy as np\n'), ((2406, 2429), 'numpy.zeros', 'np.zeros', (['(sh[0], 2, 3)'], {}), '((sh[0], 2, 3))\n', (2414, 2429), True, 'import numpy as np\n'), ((2805, 2828), 'numpy.zeros', 'np.zeros', (['(sh[0], 2, 3)'], {}), '((sh[0], 2, 3))\n', (2813, 2828), True, 'import numpy as np\n'), ((3585, 3595), 'numpy.diag', 'np.diag', (['W'], {}), '(W)\n', (3592, 3595), True, 'import numpy as np\n'), ((4951, 4978), 'numpy.zeros', 'np.zeros', (['(features_num, 7)'], {}), '((features_num, 7))\n', (4959, 4978), True, 'import numpy as np\n'), ((6610, 6637), 'numpy.zeros', 'np.zeros', (['(features_num, 7)'], {}), '((features_num, 7))\n', (6618, 6637), True, 'import numpy as np\n'), ((1841, 1860), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1849, 1860), True, 'import numpy as np\n'), ((5046, 5080), 'numpy.zeros', 'np.zeros', (['(features_num, desc_len)'], {}), '((features_num, desc_len))\n', (5054, 5080), True, 'import numpy as np\n'), ((5333, 5349), 'numpy.array', 'np.array', (['l[0:2]'], {}), '(l[0:2])\n', (5341, 5349), True, 'import numpy as np\n'), ((5462, 5482), 'numpy.array', 'np.array', (['l[3:3 + 4]'], {}), '(l[3:3 + 4])\n', (5470, 5482), True, 'import numpy as np\n'), ((6705, 6739), 'numpy.zeros', 'np.zeros', (['(features_num, desc_len)'], {}), '((features_num, desc_len))\n', (6713, 6739), True, 'import numpy as np\n'), ((6992, 7010), 'numpy.array', 'np.array', (['l[14:16]'], {}), '(l[14:16])\n', (7000, 7010), True, 'import numpy as np\n'), ((7124, 7142), 'numpy.array', 'np.array', (['l[16:20]'], {}), '(l[16:20])\n', (7132, 7142), True, 'import numpy as np\n'), ((5569, 5584), 'numpy.array', 'np.array', (['l[8:]'], {}), '(l[8:])\n', (5577, 5584), True, 'import numpy as np\n'), ((7231, 7247), 'numpy.array', 'np.array', (['l[26:]'], {}), '(l[26:])\n', (7239, 7247), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- # @Time : 2019/3/4 9:29 # @Author : Mr.Robot # @Site : # @File : secl2txt.py # @Software: PyCharm import struct import os # 拼音表偏移, startPy = 0x1540; # 汉语词组表偏移 startChinese = 0x2628; # 全局拼音表 GPy_Table = {} # 解析结果 # 元组(词频,拼音,中文词组)的列表 GTable = [] # 原始字节码转为字符串 def byte2str(data): pos = 0 string = '' while pos < len(data): c = chr(struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0]) if c != chr(0): string += c pos += 2 return string # 获取拼音表 def getPyTable(data): data = data[4:] pos = 0 while pos < len(data): index = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0] pos += 2 lenPy = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0] pos += 2 py = byte2str(data[pos:pos + lenPy]) GPy_Table[index] = py pos += lenPy # 获取一个词组的拼音 def getWordPy(data): pos = 0 ret = '' while pos < len(data): index = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0] ret += GPy_Table[index] pos += 2 return ret # 读取中文表 def getChinese(data): pos = 0 while pos < len(data): # 同音词数量 same = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0] # 拼音索引表长度 pos += 2 py_table_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0] # 拼音索引表 pos += 2 py = getWordPy(data[pos: pos + py_table_len]) # 中文词组 pos += py_table_len for i in range(same): # 中文词组长度 c_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0] # 中文词组 pos += 2 word = byte2str(data[pos: pos + c_len]) # 扩展数据长度 pos += c_len ext_len = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0] # 词频 pos += 2 count = struct.unpack('H', bytes([data[pos], data[pos + 1]]))[0] # 保存 GTable.append((count, py, word)) # 到下个词的偏移位置 pos += ext_len def scel2txt(file_name): # 分隔符 print('-' * 60) # 读取文件 with open(file_name, 'rb') as scel_f: data = scel_f.read() print("词库名:", byte2str(data[0x130:0x338])) # .encode('GB18030') print("词库类型:", byte2str(data[0x338:0x540])) print("描述信息:", byte2str(data[0x540:0xd40])) print("词库示例:", byte2str(data[0xd40:startPy])) getPyTable(data[startPy:startChinese]) getChinese(data[startChinese:]) if __name__ == '__main__': # scel所在文件夹路径 in_path = "./sogouDict/" out_path = "./transDict/" fin = [fname for fname in os.listdir(in_path) if fname[-5:] == ".scel"] for src_f in fin: src_f_path = os.path.join(in_path, src_f) scel2txt(src_f_path) dest_f_path = out_path + src_f[:-5] + ".txt" dest_f = open(dest_f_path, 'w', encoding='UTF8') for count, py, word in GTable: dest_f.write(word + '\n') dest_f.close() GTable.clear()
[ "os.path.join", "os.listdir" ]
[((2765, 2793), 'os.path.join', 'os.path.join', (['in_path', 'src_f'], {}), '(in_path, src_f)\n', (2777, 2793), False, 'import os\n'), ((2676, 2695), 'os.listdir', 'os.listdir', (['in_path'], {}), '(in_path)\n', (2686, 2695), False, 'import os\n')]
import functools from typing import Iterable, List __all__ = ['DocGroundtruthPair'] if False: from . import Document class DocGroundtruthPair: """ Helper class to expose common interface to the traversal logic of the BaseExecutable Driver. It is important to note that it checks the matching structure of `docs` and `groundtruths`. It is important while traversing to ensure that then the driver can be applied at a comparable level of granularity and adjacency. This does not imply that you can't compare at the end a document with 10 matches with a groundtruth with 20 matches :param doc: Target `Document`. :param groundtruth: The :class:`Document` with desired state. """ def __init__(self, doc: 'Document', groundtruth: 'Document'): """Set constructor method. :param doc: actual Document :param groundtruth: groundtruth Document """ self.doc = doc self.groundtruth = groundtruth @property def matches(self) -> Iterable['DocGroundtruthPair']: """Get the pairs between matches and Groundtruth. :yields: DocGroundtruthPair object """ assert len(self.doc.matches) == len(self.groundtruth.matches) for doc, groundtruth in zip(self.doc.matches, self.groundtruth.matches): yield DocGroundtruthPair(doc, groundtruth) @property def chunks(self) -> Iterable['DocGroundtruthPair']: """Get the pairs between chunks and Groundtruth. :yields: DocGroundtruthPair object """ assert len(self.doc.chunks) == len(self.groundtruth.chunks) for doc, groundtruth in zip(self.doc.chunks, self.groundtruth.chunks): yield DocGroundtruthPair(doc, groundtruth) class VersionedMixin: """ Helper class to add versioning to an object. The version number is incremented each time an attribute is set. """ version = 0 ON_GETATTR: List = [] def _increase_version(self): super().__setattr__('version', self.version + 1) def __setattr__(self, attr, value): super().__setattr__(attr, value) self._increase_version() def __delattr__(self, attr): super(VersionedMixin, self).__delattr__(attr) self._increase_version() def versioned(fn): """ Decorator function that increases the version number each time the decorated method is called. The class of the decorated method must be a subclass of :class:`VersionedMixin` :param fn: the method to decorate :return: decorated function """ @functools.wraps(fn) def wrapper(self, *args, **kwargs): self._increase_version() return fn(self, *args, **kwargs) return wrapper
[ "functools.wraps" ]
[((2577, 2596), 'functools.wraps', 'functools.wraps', (['fn'], {}), '(fn)\n', (2592, 2596), False, 'import functools\n')]
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys sys.path.append('..') from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place import numpy as np from functools import partial from typing import Optional, List, Callable, Dict, Any, Set import unittest import hypothesis import hypothesis.strategies as st from hypothesis import assume def sample_program_configs(draw): #The number of elements in Input(X) should be 1 in_shape = draw(st.lists(st.integers( min_value=1, max_value=1), min_size=1, max_size=1)) step_data = draw(st.floats(min_value=0.1, max_value=0.5)) input_type = draw(st.sampled_from(["type_int", "type_int64", "type_float"])) def generate_input1(*args, **kwargs): return np.random.random(in_shape).astype(np.float32) def generate_input2(*args, **kwargs): return np.random.randint(in_shape).astype(np.int32) def generate_input3(*args, **kwargs): return np.random.randint(in_shape).astype(np.int64) build_ops = OpConfig( type = "increment", inputs = { "X" : ["input_data"], }, outputs = { "Out": ["output_data"], }, attrs = { "step" : step_data, }) if input_type == "type_int": program_config = ProgramConfig( ops=[build_ops], weights={}, inputs={ "input_data": TensorConfig(data_gen=partial(generate_input2)), }, outputs=["output_data"]) elif input_type == "type_int64": program_config = ProgramConfig( ops=[build_ops], weights={}, inputs={ "input_data": TensorConfig(data_gen=partial(generate_input3)), }, outputs=["output_data"]) elif input_type == "type_float": program_config = ProgramConfig( ops=[build_ops], weights={}, inputs={ "input_data": TensorConfig(data_gen=partial(generate_input1)), }, outputs=["output_data"]) return program_config
[ "sys.path.append", "functools.partial", "program_config.OpConfig", "hypothesis.strategies.sampled_from", "numpy.random.random", "numpy.random.randint", "hypothesis.strategies.integers", "hypothesis.strategies.floats" ]
[((622, 643), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (637, 643), False, 'import sys\n'), ((1647, 1769), 'program_config.OpConfig', 'OpConfig', ([], {'type': '"""increment"""', 'inputs': "{'X': ['input_data']}", 'outputs': "{'Out': ['output_data']}", 'attrs': "{'step': step_data}"}), "(type='increment', inputs={'X': ['input_data']}, outputs={'Out': [\n 'output_data']}, attrs={'step': step_data})\n", (1655, 1769), False, 'from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\n'), ((1193, 1232), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.1)', 'max_value': '(0.5)'}), '(min_value=0.1, max_value=0.5)\n', (1202, 1232), True, 'import hypothesis.strategies as st\n'), ((1256, 1313), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (["['type_int', 'type_int64', 'type_float']"], {}), "(['type_int', 'type_int64', 'type_float'])\n", (1271, 1313), True, 'import hypothesis.strategies as st\n'), ((1095, 1132), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(1)'}), '(min_value=1, max_value=1)\n', (1106, 1132), True, 'import hypothesis.strategies as st\n'), ((1373, 1399), 'numpy.random.random', 'np.random.random', (['in_shape'], {}), '(in_shape)\n', (1389, 1399), True, 'import numpy as np\n'), ((1476, 1503), 'numpy.random.randint', 'np.random.randint', (['in_shape'], {}), '(in_shape)\n', (1493, 1503), True, 'import numpy as np\n'), ((1578, 1605), 'numpy.random.randint', 'np.random.randint', (['in_shape'], {}), '(in_shape)\n', (1595, 1605), True, 'import numpy as np\n'), ((2076, 2100), 'functools.partial', 'partial', (['generate_input2'], {}), '(generate_input2)\n', (2083, 2100), False, 'from functools import partial\n'), ((2346, 2370), 'functools.partial', 'partial', (['generate_input3'], {}), '(generate_input3)\n', (2353, 2370), False, 'from functools import partial\n'), ((2616, 2640), 'functools.partial', 'partial', (['generate_input1'], {}), '(generate_input1)\n', (2623, 2640), False, 'from functools import partial\n')]
from selenium.webdriver.support.select import Select from model.contact import Contact import re class ContactHelper: def __init__(self, app): self.app = app def check_if_contacts_page(self): wd = self.app.wd if not (len(wd.find_elements_by_link_text("Logout")) > 0 and len(wd.find_elements_by_name("searchstring")) > 0): wd.find_element_by_link_text("home").click() def create(self, contact): wd = self.app.wd self.check_if_contacts_page() # init contact creation wd.find_element_by_link_text("add new").click() # fill contact form wd.find_element_by_name("firstname").clear() wd.find_element_by_name("firstname").send_keys("%s" % contact.firstname) wd.find_element_by_name("lastname").clear() wd.find_element_by_name("lastname").send_keys("%s" % contact.lastname) wd.find_element_by_name("nickname").clear() wd.find_element_by_name("nickname").send_keys("%s" % contact.nickname) wd.find_element_by_name("title").clear() wd.find_element_by_name("title").send_keys("%s" % contact.title) wd.find_element_by_name("company").clear() wd.find_element_by_name("company").send_keys("%s" % contact.company) wd.find_element_by_name("address").clear() wd.find_element_by_name("address").send_keys(u"%s" % contact.address) wd.find_element_by_name("home").clear() wd.find_element_by_name("home").send_keys("+%s" % contact.homephone) wd.find_element_by_name("mobile").clear() wd.find_element_by_name("mobile").send_keys("+%s" % contact.mobilephone) wd.find_element_by_name("email").clear() wd.find_element_by_name("email").send_keys("%s" % contact.email) Select(wd.find_element_by_name("bday")).select_by_visible_text("15") wd.find_element_by_xpath("//option[@value='15']").click() wd.find_element_by_name("bmonth").click() Select(wd.find_element_by_name("bmonth")).select_by_visible_text("October") wd.find_element_by_xpath("//option[@value='October']").click() wd.find_element_by_name("byear").clear() wd.find_element_by_name("byear").send_keys("%s" % contact.byear) # submit contact creation wd.find_element_by_xpath("(//input[@name='submit'])[2]").click() self.contact_cache = None def select_first_contact(self): wd = self.app.wd wd.find_element_by_name("selected[]").click() def update_value(self, contact_field_name, text): wd = self.app.wd if text is not None: wd.find_element_by_name(contact_field_name).click() wd.find_element_by_name(contact_field_name).clear() wd.find_element_by_name(contact_field_name).send_keys(text) def fill_contact_form(self, contact): wd = self.app.wd self.update_value("firstname", contact.firstname) self.update_value("email", contact.email) def select_contact_by_index(self, index): wd = self.app.wd wd.find_elements_by_name("selected[]")[index].click() def modify_first_contact(self): self.modify_contact_by_index(0) def modify_contact_by_index(self, index, new_contact_data): wd = self.app.wd self.check_if_contacts_page() self.select_contact_by_index(index) # submit edition wd.find_elements_by_css_selector("img[alt='Edit']")[index].click() # fill contact form self.fill_contact_form(new_contact_data) # submit contact update wd.find_element_by_xpath("(//input[@name='update'])[2]").click() self.contact_cache = None def delete_first_contact(self): self.delete_contact_by_index(0) def delete_contact_by_index(self, index): wd = self.app.wd self.check_if_contacts_page() self.select_contact_by_index(index) # submit deletion wd.find_element_by_xpath("//input[@value='Delete']").click() # close dialog wd.switch_to.alert.accept() self.contact_cache = None def return_to_contacts_page(self): wd = self.app.wd wd.find_element_by_link_text("home page").click() def count(self): wd = self.app.wd self.check_if_contacts_page() return len(wd.find_elements_by_name("selected[]")) contact_cache = None def get_contact_list(self): if self.contact_cache is None: wd = self.app.wd self.check_if_contacts_page() self.contact_cache = [] for row in wd.find_elements_by_name("entry"): cells = row.find_elements_by_tag_name("td") lastname = cells[1].text firstname = cells[2].text id = cells[0].find_element_by_tag_name("input").get_attribute("value") all_phones = cells[5].text all_emails = cells[4].text self.contact_cache.append(Contact(firstname=firstname, lastname=lastname, id=id, all_phones_from_home_page=all_phones, all_emails_from_home_page=all_emails)) return list(self.contact_cache) def open_contact_to_edit_by_index(self, index): wd = self.app.wd self.check_if_contacts_page() row = wd.find_elements_by_name("entry")[index] cell = row.find_elements_by_tag_name("td")[7] cell.find_element_by_tag_name("a").click() def open_contact_view_by_index(self, index): wd = self.app.wd self.check_if_contacts_page() row = wd.find_elements_by_name("entry")[index] cell = row.find_elements_by_tag_name("td")[6] cell.find_element_by_tag_name("a").click() def get_contact_info_from_edit_page(self, index): wd = self.app.wd self.open_contact_to_edit_by_index(index) firstname = wd.find_element_by_name("firstname").get_attribute("value") lastname = wd.find_element_by_name("lastname").get_attribute("value") id = wd.find_element_by_name("id").get_attribute("value") homephone = wd.find_element_by_name("home").get_attribute("value") workphone = wd.find_element_by_name("work").get_attribute("value") mobilephone = wd.find_element_by_name("mobile").get_attribute("value") secondaryphone = wd.find_element_by_name("phone2").get_attribute("value") email = wd.find_element_by_name("email").get_attribute("value") email2 = wd.find_element_by_name("email2").get_attribute("value") email3 = wd.find_element_by_name("email3").get_attribute("value") return Contact(firstname=firstname, lastname=lastname, id=id, homephone=homephone, workphone=workphone, mobilephone=mobilephone, secondaryphone=secondaryphone, email=email, email2=email2, email3=email3) def get_contact_from_view_page(self, index): wd = self.app.wd self.open_contact_view_by_index(index) text = wd.find_element_by_id("content").text homephone = re.search("H: (.*)", text) if homephone: homephone = homephone.group(1) mobilephone = re.search("M: (.*)", text) if mobilephone: mobilephone = mobilephone.group(1) workphone = re.search("W: (.*)", text) if workphone: workphone = workphone.group(1) secondaryphone = re.search("P: (.*)", text) if secondaryphone: secondaryphone = secondaryphone.group(1) return Contact(homephone=homephone, workphone=workphone, mobilephone=mobilephone, secondaryphone=secondaryphone) def get_email_from_view_page(self, index): wd = self.app.wd self.open_contact_view_by_index(index) #text = wd.find_element_by_id("content").text email = wd.find_element_by_xpath("//*[@id='content']/a[1]").get_attribute("value").group(1) email2 = wd.find_element_by_xpath("//*[@id='content']/a[2]").get_attribute("value").group(1) email3 = wd.find_element_by_xpath("//*[@id='content']/a[3]").get_attribute("value").group(1) # email = re.search("(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", text) # if email: # email = email.group(1) # # email2 = re.search("(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", text) # if email2: # email2 = email2.group(1) # # email3 = re.search("(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", text) # if email3: # email3 = email3.group(1) # email = re.search("(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", text).group(1) # email2 = re.search("(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", text).group(1) # email3 = re.search("(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", text).group(1) return Contact(email=email, email2=email2, email3=email3)
[ "model.contact.Contact", "re.search" ]
[((6617, 6821), 'model.contact.Contact', 'Contact', ([], {'firstname': 'firstname', 'lastname': 'lastname', 'id': 'id', 'homephone': 'homephone', 'workphone': 'workphone', 'mobilephone': 'mobilephone', 'secondaryphone': 'secondaryphone', 'email': 'email', 'email2': 'email2', 'email3': 'email3'}), '(firstname=firstname, lastname=lastname, id=id, homephone=homephone,\n workphone=workphone, mobilephone=mobilephone, secondaryphone=\n secondaryphone, email=email, email2=email2, email3=email3)\n', (6624, 6821), False, 'from model.contact import Contact\n'), ((7031, 7057), 're.search', 're.search', (['"""H: (.*)"""', 'text'], {}), "('H: (.*)', text)\n", (7040, 7057), False, 'import re\n'), ((7146, 7172), 're.search', 're.search', (['"""M: (.*)"""', 'text'], {}), "('M: (.*)', text)\n", (7155, 7172), False, 'import re\n'), ((7265, 7291), 're.search', 're.search', (['"""W: (.*)"""', 'text'], {}), "('W: (.*)', text)\n", (7274, 7291), False, 'import re\n'), ((7383, 7409), 're.search', 're.search', (['"""P: (.*)"""', 'text'], {}), "('P: (.*)', text)\n", (7392, 7409), False, 'import re\n'), ((7506, 7615), 'model.contact.Contact', 'Contact', ([], {'homephone': 'homephone', 'workphone': 'workphone', 'mobilephone': 'mobilephone', 'secondaryphone': 'secondaryphone'}), '(homephone=homephone, workphone=workphone, mobilephone=mobilephone,\n secondaryphone=secondaryphone)\n', (7513, 7615), False, 'from model.contact import Contact\n'), ((8863, 8913), 'model.contact.Contact', 'Contact', ([], {'email': 'email', 'email2': 'email2', 'email3': 'email3'}), '(email=email, email2=email2, email3=email3)\n', (8870, 8913), False, 'from model.contact import Contact\n'), ((4996, 5130), 'model.contact.Contact', 'Contact', ([], {'firstname': 'firstname', 'lastname': 'lastname', 'id': 'id', 'all_phones_from_home_page': 'all_phones', 'all_emails_from_home_page': 'all_emails'}), '(firstname=firstname, lastname=lastname, id=id,\n all_phones_from_home_page=all_phones, all_emails_from_home_page=all_emails)\n', (5003, 5130), False, 'from model.contact import Contact\n')]
from dash.dependencies import Input, Output from dash import dcc from app import app from dash import html from layouts.home import home_layout # create server server = app.server app.layout = html.Div([ dcc.Location(id='url', refresh=False), html.Div(id='page-content') ]) @app.callback(Output('page-content', 'children'), Input('url', 'pathname')) def display_page(pathname): print(pathname) if pathname == '/' or pathname == '' or pathname == '/home': return home_layout() else: return '404' if __name__ == '__main__': app.run_server(debug=True)
[ "layouts.home.home_layout", "dash.html.Div", "dash.dcc.Location", "dash.dependencies.Input", "app.app.run_server", "dash.dependencies.Output" ]
[((301, 335), 'dash.dependencies.Output', 'Output', (['"""page-content"""', '"""children"""'], {}), "('page-content', 'children')\n", (307, 335), False, 'from dash.dependencies import Input, Output\n'), ((351, 375), 'dash.dependencies.Input', 'Input', (['"""url"""', '"""pathname"""'], {}), "('url', 'pathname')\n", (356, 375), False, 'from dash.dependencies import Input, Output\n'), ((583, 609), 'app.app.run_server', 'app.run_server', ([], {'debug': '(True)'}), '(debug=True)\n', (597, 609), False, 'from app import app\n'), ((211, 248), 'dash.dcc.Location', 'dcc.Location', ([], {'id': '"""url"""', 'refresh': '(False)'}), "(id='url', refresh=False)\n", (223, 248), False, 'from dash import dcc\n'), ((254, 281), 'dash.html.Div', 'html.Div', ([], {'id': '"""page-content"""'}), "(id='page-content')\n", (262, 281), False, 'from dash import html\n'), ((505, 518), 'layouts.home.home_layout', 'home_layout', ([], {}), '()\n', (516, 518), False, 'from layouts.home import home_layout\n')]
# from turtle import Turtle, Screen # # accessed this way # turtle = Turtle() # # or # from turtle import * # # accessed this way # Turtle() # # or # import turtle # # accessed this way # tim = turtle.Turtle() # # or # import turtle as tur # # accessed this way # tim = tur.Turtle() # turtle.shape("turtle") # turtle.color("red") # Draw turtle square with turtle # turtle.forward(100) # turtle.left(90) # turtle.forward(100) # turtle.left(90) # turtle.forward(100) # turtle.left(90) # turtle.forward(100) # Same using turtle loop # for i in range(4): # turtle.forward(100) # turtle.left(90) # Modules that aren't packaged with python standard library # use pip (python packages) See: https://pypi.org # terminal: 'pip install heroes' # import heroes # print(heroes.gen()) # Assignment "Draw a dashed line for 50 paces" from turtle import Turtle, Screen a = Turtle() # for i in range(25): # a.pd() # a.pensize(2) # a.forward(5) # for j in range(25): # a.pu() # a.forward(0.5) # or for _ in range(15): a.pd() a.pensize(1) a.forward(5) a.pu() a.forward(5) screen = Screen() screen.exitonclick()
[ "turtle.Screen", "turtle.Turtle" ]
[((882, 890), 'turtle.Turtle', 'Turtle', ([], {}), '()\n', (888, 890), False, 'from turtle import Turtle, Screen\n'), ((1142, 1150), 'turtle.Screen', 'Screen', ([], {}), '()\n', (1148, 1150), False, 'from turtle import Turtle, Screen\n')]
import pytest from polzybackend import create_app, models, db from config import Config from copy import deepcopy import polzyFunctions import os if not os.path.basename(os.getcwd()) == "tests": os.chdir(os.path.join(os.path.dirname(polzyFunctions.__file__), "tests")) @pytest.fixture(scope="session", params=["pqa"]) def stage(request): return request.param @pytest.fixture def user(stage=stage, email="<EMAIL>"): """ A is read from the database. if nothing stated, it will be <EMAIL> from stage = PQA :param stage: :param email: :return: """ app = create_app(Config) app.app_context().push() with db.session.no_autoflush: user = deepcopy(db.session.query(models.User).filter_by(email=email).first()) user.stage = stage yield user db.session.remove()
[ "polzybackend.create_app", "os.getcwd", "os.path.dirname", "polzybackend.db.session.remove", "pytest.fixture", "polzybackend.db.session.query" ]
[((277, 324), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'params': "['pqa']"}), "(scope='session', params=['pqa'])\n", (291, 324), False, 'import pytest\n'), ((591, 609), 'polzybackend.create_app', 'create_app', (['Config'], {}), '(Config)\n', (601, 609), False, 'from polzybackend import create_app, models, db\n'), ((814, 833), 'polzybackend.db.session.remove', 'db.session.remove', ([], {}), '()\n', (831, 833), False, 'from polzybackend import create_app, models, db\n'), ((171, 182), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (180, 182), False, 'import os\n'), ((222, 262), 'os.path.dirname', 'os.path.dirname', (['polzyFunctions.__file__'], {}), '(polzyFunctions.__file__)\n', (237, 262), False, 'import os\n'), ((698, 727), 'polzybackend.db.session.query', 'db.session.query', (['models.User'], {}), '(models.User)\n', (714, 727), False, 'from polzybackend import create_app, models, db\n')]
import numpy as np from sklearn.metrics import f1_score def optimize_f1(x: float, y_true: np.ndarray, y_pred: np.ndarray) -> float: return -f1_score(y_true, y_pred >= x)
[ "sklearn.metrics.f1_score" ]
[((146, 175), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', '(y_pred >= x)'], {}), '(y_true, y_pred >= x)\n', (154, 175), False, 'from sklearn.metrics import f1_score\n')]
import datetime import pytz from django.conf import settings from django.test import TestCase from mock import ANY, patch from periods import models as period_models from periods.management.commands import notify_upcoming_period from periods.tests.factories import FlowEventFactory class TestCommand(TestCase): EMAIL_FOOTER = ('Check your calendar: http://example.com/calendar/\nFound a bug? Have a ' 'feature request? Please let us know: https://github.com/jessamynsmith/' 'eggtimer-server/issues\nDisable email notifications: ' 'http://example.com/accounts/profile/\n') def setUp(self): self.command = notify_upcoming_period.Command() flow_event = FlowEventFactory() self.user = flow_event.user FlowEventFactory(user=self.user, timestamp=pytz.utc.localize(datetime.datetime(2014, 2, 28))) @patch('django.core.mail.EmailMultiAlternatives.send') def test_notify_upcoming_period_no_periods(self, mock_send): period_models.FlowEvent.objects.all().delete() self.command.handle() self.assertFalse(mock_send.called) @patch('django.core.mail.EmailMultiAlternatives.send') @patch('periods.models.today') def test_notify_upcoming_period_send_disabled(self, mock_today, mock_send): mock_today.return_value = pytz.utc.localize(datetime.datetime(2014, 3, 14)) self.user.send_emails = False self.user.save() self.command.handle() self.assertFalse(mock_send.called) @patch('periods.email_sender.send') @patch('periods.models.today') def test_notify_upcoming_period_no_events(self, mock_today, mock_send): mock_today.return_value = pytz.utc.localize(datetime.datetime(2014, 3, 13)) self.command.handle() self.assertFalse(mock_send.called) @patch('periods.email_sender.send') @patch('periods.models.today') def test_notify_upcoming_period_all_events_in_future(self, mock_today, mock_send): mock_today.return_value = pytz.utc.localize(datetime.datetime(2014, 1, 15)) self.command.handle() self.assertFalse(mock_send.called) @patch('periods.email_sender.send') @patch('periods.models.today') def test_notify_upcoming_period_ovulation(self, mock_today, mock_send): mock_today.return_value = pytz.utc.localize(datetime.datetime(2014, 3, 14)) self.command.handle() email_text = ('Hello Jessamyn,\n\nYou are probably ovulating today, ' 'Friday March 14, 2014!\n\nCheers!\n%s\n\n%s' % (settings.ADMINS[0][0], self.EMAIL_FOOTER)) mock_send.assert_called_once_with(self.user, 'Ovulation today!', email_text, ANY) @patch('periods.email_sender.send') @patch('periods.models.today') def test_notify_upcoming_period_expected_soon(self, mock_today, mock_send): mock_today.return_value = pytz.utc.localize(datetime.datetime(2014, 3, 25)) self.command.handle() email_text = ('Hello Jessamyn,\n\nYou should be getting your period in 3 days, on Friday ' 'March 28, 2014.\n\nCheers!\n%s\n\n%s' % (settings.ADMINS[0][0], self.EMAIL_FOOTER)) mock_send.assert_called_once_with(self.user, 'Period starting', email_text, ANY) @patch('periods.email_sender.send') @patch('periods.models.today') def test_notify_upcoming_period_expected_today(self, mock_today, mock_send): mock_today.return_value = pytz.utc.localize(datetime.datetime(2014, 3, 28)) self.command.handle() email_text = ('Hello Jessamyn,\n\nYou should be getting your period today, Friday March ' '28, 2014!\n\nCheers!\n%s\n\n%s' % (settings.ADMINS[0][0], self.EMAIL_FOOTER)) mock_send.assert_called_once_with(self.user, 'Period today!', email_text, ANY) @patch('periods.email_sender.send') @patch('periods.models.today') def test_notify_upcoming_period_overdue(self, mock_today, mock_send): mock_today.return_value = pytz.utc.localize(datetime.datetime(2014, 3, 29)) self.command.handle() email_text = ('Hello Jessamyn,\n\nYou should have gotten your period 1 day ago, on ' 'Friday March 28, 2014.\nDid you forget to add your last period?\n\n' 'Cheers!\n%s\n\n%s' % (settings.ADMINS[0][0], self.EMAIL_FOOTER)) mock_send.assert_called_once_with(self.user, 'Period was expected 1 day ago', email_text, ANY)
[ "periods.management.commands.notify_upcoming_period.Command", "mock.patch", "datetime.datetime", "periods.models.FlowEvent.objects.all", "periods.tests.factories.FlowEventFactory" ]
[((927, 980), 'mock.patch', 'patch', (['"""django.core.mail.EmailMultiAlternatives.send"""'], {}), "('django.core.mail.EmailMultiAlternatives.send')\n", (932, 980), False, 'from mock import ANY, patch\n'), ((1182, 1235), 'mock.patch', 'patch', (['"""django.core.mail.EmailMultiAlternatives.send"""'], {}), "('django.core.mail.EmailMultiAlternatives.send')\n", (1187, 1235), False, 'from mock import ANY, patch\n'), ((1241, 1270), 'mock.patch', 'patch', (['"""periods.models.today"""'], {}), "('periods.models.today')\n", (1246, 1270), False, 'from mock import ANY, patch\n'), ((1579, 1613), 'mock.patch', 'patch', (['"""periods.email_sender.send"""'], {}), "('periods.email_sender.send')\n", (1584, 1613), False, 'from mock import ANY, patch\n'), ((1619, 1648), 'mock.patch', 'patch', (['"""periods.models.today"""'], {}), "('periods.models.today')\n", (1624, 1648), False, 'from mock import ANY, patch\n'), ((1890, 1924), 'mock.patch', 'patch', (['"""periods.email_sender.send"""'], {}), "('periods.email_sender.send')\n", (1895, 1924), False, 'from mock import ANY, patch\n'), ((1930, 1959), 'mock.patch', 'patch', (['"""periods.models.today"""'], {}), "('periods.models.today')\n", (1935, 1959), False, 'from mock import ANY, patch\n'), ((2212, 2246), 'mock.patch', 'patch', (['"""periods.email_sender.send"""'], {}), "('periods.email_sender.send')\n", (2217, 2246), False, 'from mock import ANY, patch\n'), ((2252, 2281), 'mock.patch', 'patch', (['"""periods.models.today"""'], {}), "('periods.models.today')\n", (2257, 2281), False, 'from mock import ANY, patch\n'), ((2784, 2818), 'mock.patch', 'patch', (['"""periods.email_sender.send"""'], {}), "('periods.email_sender.send')\n", (2789, 2818), False, 'from mock import ANY, patch\n'), ((2824, 2853), 'mock.patch', 'patch', (['"""periods.models.today"""'], {}), "('periods.models.today')\n", (2829, 2853), False, 'from mock import ANY, patch\n'), ((3373, 3407), 'mock.patch', 'patch', (['"""periods.email_sender.send"""'], {}), "('periods.email_sender.send')\n", (3378, 3407), False, 'from mock import ANY, patch\n'), ((3413, 3442), 'mock.patch', 'patch', (['"""periods.models.today"""'], {}), "('periods.models.today')\n", (3418, 3442), False, 'from mock import ANY, patch\n'), ((3954, 3988), 'mock.patch', 'patch', (['"""periods.email_sender.send"""'], {}), "('periods.email_sender.send')\n", (3959, 3988), False, 'from mock import ANY, patch\n'), ((3994, 4023), 'mock.patch', 'patch', (['"""periods.models.today"""'], {}), "('periods.models.today')\n", (3999, 4023), False, 'from mock import ANY, patch\n'), ((685, 717), 'periods.management.commands.notify_upcoming_period.Command', 'notify_upcoming_period.Command', ([], {}), '()\n', (715, 717), False, 'from periods.management.commands import notify_upcoming_period\n'), ((739, 757), 'periods.tests.factories.FlowEventFactory', 'FlowEventFactory', ([], {}), '()\n', (755, 757), False, 'from periods.tests.factories import FlowEventFactory\n'), ((1403, 1433), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(3)', '(14)'], {}), '(2014, 3, 14)\n', (1420, 1433), False, 'import datetime\n'), ((1777, 1807), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(3)', '(13)'], {}), '(2014, 3, 13)\n', (1794, 1807), False, 'import datetime\n'), ((2099, 2129), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(1)', '(15)'], {}), '(2014, 1, 15)\n', (2116, 2129), False, 'import datetime\n'), ((2410, 2440), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(3)', '(14)'], {}), '(2014, 3, 14)\n', (2427, 2440), False, 'import datetime\n'), ((2986, 3016), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(3)', '(25)'], {}), '(2014, 3, 25)\n', (3003, 3016), False, 'import datetime\n'), ((3576, 3606), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(3)', '(28)'], {}), '(2014, 3, 28)\n', (3593, 3606), False, 'import datetime\n'), ((4150, 4180), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(3)', '(29)'], {}), '(2014, 3, 29)\n', (4167, 4180), False, 'import datetime\n'), ((1054, 1091), 'periods.models.FlowEvent.objects.all', 'period_models.FlowEvent.objects.all', ([], {}), '()\n', (1089, 1091), True, 'from periods import models as period_models\n'), ((888, 918), 'datetime.datetime', 'datetime.datetime', (['(2014)', '(2)', '(28)'], {}), '(2014, 2, 28)\n', (905, 918), False, 'import datetime\n')]
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import numpy as np import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay from config import word_emb_param_names, pos_enc_param_names def position_encoding_init(n_position, d_pos_vec): """ Generate the initial values for the sinusoid position encoding table. """ channels = d_pos_vec position = np.arange(n_position) num_timescales = channels // 2 log_timescale_increment = (np.log(float(1e4) / float(1)) / (num_timescales - 1)) inv_timescales = np.exp( np.arange(num_timescales)) * -log_timescale_increment scaled_time = np.expand_dims(position, 1) * np.expand_dims( inv_timescales, 0) signal = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1) signal = np.pad(signal, [[0, 0], [0, np.mod(channels, 2)]], 'constant') position_enc = signal return position_enc.astype("float32") class NoamDecay(LearningRateDecay): """ learning rate scheduler """ def __init__(self, d_model, warmup_steps, static_lr=2.0, begin=1, step=1, dtype='float32'): super(NoamDecay, self).__init__(begin, step, dtype) self.d_model = d_model self.warmup_steps = warmup_steps self.static_lr = static_lr def step(self): a = self.create_lr_var(self.step_num**-0.5) b = self.create_lr_var((self.warmup_steps**-1.5) * self.step_num) lr_value = (self.d_model**-0.5) * layers.elementwise_min( a, b) * self.static_lr return lr_value class PrePostProcessLayer(Layer): """ PrePostProcessLayer """ def __init__(self, process_cmd, normalized_shape=None): super(PrePostProcessLayer, self).__init__() for cmd in process_cmd: if cmd == "n": self._layer_norm = LayerNorm( normalized_shape = normalized_shape, param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(1.)), bias_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(0.))) def forward(self, prev_out, out, process_cmd, dropout_rate=0.): """ forward :param prev_out: :param out: :param process_cmd: :param dropout_rate: :return: """ for cmd in process_cmd: if cmd == "a": # add residual connection out = out + prev_out if prev_out else out elif cmd == "n": # add layer normalization out = self._layer_norm(out) elif cmd == "d": # add dropout if dropout_rate: out = layers.dropout(out, dropout_prob=dropout_rate, is_test=False) return out class PositionwiseFeedForwardLayer(Layer): """ PositionwiseFeedForwardLayer """ def __init__(self, input_hid, d_inner_hid, d_hid, dropout_rate): super(PositionwiseFeedForwardLayer, self).__init__() self._i2h = Linear( input_dim= input_hid, output_dim=d_inner_hid, act="relu") self._h2o = Linear( input_dim = d_inner_hid, output_dim=d_hid) self._dropout_rate = dropout_rate def forward(self, x): """ forward :param x: :return: """ hidden = self._i2h(x) if self._dropout_rate: hidden = layers.dropout(hidden, dropout_prob=self._dropout_rate, is_test=False) out = self._h2o(hidden) return out class MultiHeadAttentionLayer(Layer): """ MultiHeadAttentionLayer """ def __init__(self, d_key, d_value, d_model, n_head=1, dropout_rate=0., cache=None, gather_idx=None, static_kv=False): super(MultiHeadAttentionLayer, self).__init__() self._n_head = n_head self._d_key = d_key self._d_value = d_value self._d_model = d_model self._dropout_rate = dropout_rate self._q_fc = Linear( input_dim = d_model, output_dim=d_key * n_head, bias_attr=False ) self._k_fc = Linear( input_dim = d_model, output_dim=d_key * n_head, bias_attr=False ) self._v_fc = Linear( input_dim = d_model, output_dim=d_value * n_head, bias_attr=False ) self._proj_fc = Linear( input_dim = d_model, output_dim=self._d_model, bias_attr=False ) def forward(self, queries, keys, values, attn_bias, cache=None, gather_idx=None): """ forward :param queries: :param keys: :param values: :param attn_bias: :return: """ # compute q ,k ,v keys = queries if keys is None else keys values = keys if values is None else values q = self._q_fc(queries) k = self._k_fc(keys) v = self._v_fc(values) # split head reshaped_q = layers.reshape(x=q, shape=[ q.shape[0], q.shape[1], self._n_head, self._d_key], inplace=False) transpose_q = layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3]) reshaped_k = layers.reshape(x=k, shape=[ k.shape[0], k.shape[1], self._n_head, self._d_key], inplace=False) transpose_k = layers.transpose(x=reshaped_k, perm=[0, 2, 1, 3]) reshaped_v = layers.reshape(x=v, shape=[ v.shape[0], v.shape[1], self._n_head, self._d_value], inplace=False) transpose_v = layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3]) if cache is not None: cache_k, cache_v = cache["k"], cache["v"] transpose_k = layers.concat([cache_k, transpose_k], axis=2) transpose_v = layers.concat([cache_v, transpose_v], axis=2) cache["k"], cache["v"] = transpose_k, transpose_v # scale dot product attention product = layers.matmul(x=transpose_q, y=transpose_k, transpose_y=True, alpha=self._d_model**-0.5) if attn_bias: product += attn_bias weights = layers.softmax(product) if self._dropout_rate: weights_droped = layers.dropout(weights, dropout_prob=self._dropout_rate, is_test=False) out = layers.matmul(weights_droped, transpose_v) else: out = layers.matmul(weights, transpose_v) # combine heads if len(out.shape) != 4: raise ValueError("Input(x) should be a 4-D Tensor.") trans_x = layers.transpose(out, perm=[0, 2, 1, 3]) final_out = layers.reshape( x=trans_x, shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]], inplace=False) # fc to output proj_out = self._proj_fc(final_out) return proj_out class EncoderSubLayer(Layer): """ EncoderSubLayer """ def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd="n", postprocess_cmd="da"): super(EncoderSubLayer, self).__init__() self._preprocess_cmd = preprocess_cmd self._postprocess_cmd = postprocess_cmd self._prepostprocess_dropout = prepostprocess_dropout self._preprocess_layer = PrePostProcessLayer(self._preprocess_cmd, [d_model]) self._multihead_attention_layer = MultiHeadAttentionLayer( d_key, d_value, d_model, n_head, attention_dropout) self._postprocess_layer = PrePostProcessLayer(self._postprocess_cmd, None) self._preprocess_layer2 = PrePostProcessLayer(self._preprocess_cmd, [d_model]) self._positionwise_feed_forward = PositionwiseFeedForwardLayer( d_model, d_inner_hid, d_model, relu_dropout) self._postprocess_layer2 = PrePostProcessLayer( self._postprocess_cmd, None) def forward(self, enc_input, attn_bias): """ forward :param enc_input: :param attn_bias: :return: """ pre_process_multihead = self._preprocess_layer( None, enc_input, self._preprocess_cmd, self._prepostprocess_dropout) attn_output = self._multihead_attention_layer(pre_process_multihead, None, None, attn_bias) attn_output = self._postprocess_layer(enc_input, attn_output, self._postprocess_cmd, self._prepostprocess_dropout) pre_process2_output = self._preprocess_layer2( None, attn_output, self._preprocess_cmd, self._prepostprocess_dropout) ffd_output = self._positionwise_feed_forward(pre_process2_output) return self._postprocess_layer2(attn_output, ffd_output, self._postprocess_cmd, self._prepostprocess_dropout) class EncoderLayer(Layer): """ encoder """ def __init__(self, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd="n", postprocess_cmd="da"): super(EncoderLayer, self).__init__() self._preprocess_cmd = preprocess_cmd self._encoder_sublayers = list() self._prepostprocess_dropout = prepostprocess_dropout self._n_layer = n_layer self._preprocess_layer = PrePostProcessLayer( self._preprocess_cmd, [d_model]) for i in range(n_layer): self._encoder_sublayers.append( self.add_sublayer( 'esl_%d' % i, EncoderSubLayer(n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd))) def forward(self, enc_input, attn_bias): """ forward :param enc_input: :param attn_bias: :return: """ for i in range(self._n_layer): enc_output = self._encoder_sublayers[i](enc_input, attn_bias) enc_input = enc_output return self._preprocess_layer(None, enc_output, self._preprocess_cmd, self._prepostprocess_dropout) class PrepareEncoderDecoderLayer(Layer): """ PrepareEncoderDecoderLayer """ def __init__(self, src_vocab_size, src_emb_dim, src_max_len, dropout_rate, word_emb_param_name=None, pos_enc_param_name=None): super(PrepareEncoderDecoderLayer, self).__init__() self._src_max_len = src_max_len self._src_emb_dim = src_emb_dim self._src_vocab_size = src_vocab_size self._dropout_rate = dropout_rate self._input_emb = Embedding(size=[src_vocab_size, src_emb_dim], padding_idx=0, param_attr=fluid.ParamAttr( name=word_emb_param_name, initializer=fluid.initializer.Normal( 0., src_emb_dim**-0.5))) pos_inp = position_encoding_init(src_max_len, src_emb_dim) self._pos_emb = Embedding( size=[self._src_max_len, src_emb_dim], param_attr=fluid.ParamAttr( name=pos_enc_param_name, initializer=fluid.initializer.NumpyArrayInitializer(pos_inp), trainable=False)) # use in dygraph_mode to fit different length batch # self._pos_emb._w = to_variable( # position_encoding_init(self._src_max_len, self._src_emb_dim)) def forward(self, src_word, src_pos): """ forward :param src_word: :param src_pos: :return: """ # print("here") # print(self._input_emb._w._numpy().shape) src_word_emb = self._input_emb(src_word) src_word_emb = layers.scale(x=src_word_emb, scale=self._src_emb_dim**0.5) # # TODO change this to fit dynamic length input src_pos_emb = self._pos_emb(src_pos) src_pos_emb.stop_gradient = True enc_input = src_word_emb + src_pos_emb enc_input = layers.reshape( enc_input, shape=[ enc_input.shape[0], enc_input.shape[1], -1]) return layers.dropout( enc_input, dropout_prob=self._dropout_rate, is_test=False) if self._dropout_rate else enc_input class WrapEncoderLayer(Layer): """ encoderlayer """ def __init__(self, src_vocab_size, max_length, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd, weight_sharing): """ The wrapper assembles together all needed layers for the encoder. """ super(WrapEncoderLayer, self).__init__() self._prepare_encoder_layer = PrepareEncoderDecoderLayer( src_vocab_size, d_model, max_length, prepostprocess_dropout, word_emb_param_name=word_emb_param_names[0], pos_enc_param_name=pos_enc_param_names[0]) self._encoder = EncoderLayer(n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd) def forward(self, enc_inputs): """forward""" src_word, src_pos, src_slf_attn_bias = enc_inputs enc_input = self._prepare_encoder_layer(src_word, src_pos) enc_output = self._encoder(enc_input, src_slf_attn_bias) return enc_output class DecoderSubLayer(Layer): """ decoder """ def __init__(self, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd): super(DecoderSubLayer, self).__init__() self._postprocess_cmd = postprocess_cmd self._preprocess_cmd = preprocess_cmd self._prepostprcess_dropout = prepostprocess_dropout self._pre_process_layer = PrePostProcessLayer( preprocess_cmd, [d_model]) self._multihead_attention_layer = MultiHeadAttentionLayer( d_key, d_value, d_model, n_head, attention_dropout) self._post_process_layer = PrePostProcessLayer( postprocess_cmd, None) self._pre_process_layer2 = PrePostProcessLayer( preprocess_cmd, [d_model]) self._multihead_attention_layer2 = MultiHeadAttentionLayer( d_key, d_value, d_model, n_head, attention_dropout) self._post_process_layer2 = PrePostProcessLayer( postprocess_cmd, [d_model]) self._pre_process_layer3 = PrePostProcessLayer( preprocess_cmd, [d_model]) self._positionwise_feed_forward_layer = PositionwiseFeedForwardLayer( d_model, d_inner_hid, d_model, relu_dropout) self._post_process_layer3 = PrePostProcessLayer( postprocess_cmd, None) def forward(self, dec_input, enc_output, slf_attn_bias, dec_enc_attn_bias, cache=None, gather_idx=None): """ forward :param dec_input: :param enc_output: :param slf_attn_bias: :param dec_enc_attn_bias: :return: """ pre_process_rlt = self._pre_process_layer(None, dec_input, self._preprocess_cmd, self._prepostprcess_dropout) slf_attn_output = self._multihead_attention_layer( pre_process_rlt, None, None, slf_attn_bias, cache, gather_idx) slf_attn_output_pp = self._post_process_layer( dec_input, slf_attn_output, self._postprocess_cmd, self._prepostprcess_dropout) pre_process_rlt2 = self._pre_process_layer2(None, slf_attn_output_pp, self._preprocess_cmd, self._prepostprcess_dropout) enc_attn_output_pp = self._multihead_attention_layer2( pre_process_rlt2, enc_output, enc_output, dec_enc_attn_bias) enc_attn_output = self._post_process_layer2(slf_attn_output_pp, enc_attn_output_pp, self._postprocess_cmd, self._prepostprcess_dropout) pre_process_rlt3 = self._pre_process_layer3(None, enc_attn_output, self._preprocess_cmd, self._prepostprcess_dropout) ffd_output = self._positionwise_feed_forward_layer(pre_process_rlt3) dec_output = self._post_process_layer3(enc_attn_output, ffd_output, self._postprocess_cmd, self._prepostprcess_dropout) return dec_output class DecoderLayer(Layer): """ decoder """ def __init__(self, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd): super(DecoderLayer, self).__init__() self._pre_process_layer = PrePostProcessLayer(preprocess_cmd, [d_model]) self._decoder_sub_layers = list() self._n_layer = n_layer self._preprocess_cmd = preprocess_cmd self._prepostprocess_dropout = prepostprocess_dropout for i in range(n_layer): self._decoder_sub_layers.append( self.add_sublayer( 'dsl_%d' % i, DecoderSubLayer( n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd))) def forward(self, dec_input, enc_output, dec_slf_attn_bias, dec_enc_attn_bias, caches=None, gather_idx=None): """ forward :param dec_input: :param enc_output: :param dec_slf_attn_bias: :param dec_enc_attn_bias: :return: """ for i in range(self._n_layer): tmp_dec_output = self._decoder_sub_layers[i]( dec_input, enc_output, dec_slf_attn_bias, dec_enc_attn_bias, None if caches is None else caches[i], gather_idx) dec_input = tmp_dec_output dec_output = self._pre_process_layer(None, tmp_dec_output, self._preprocess_cmd, self._prepostprocess_dropout) return dec_output class WrapDecoderLayer(Layer): """ decoder """ def __init__(self, trg_vocab_size, max_length, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd, weight_sharing, gather_idx=None): """ The wrapper assembles together all needed layers for the encoder. """ super(WrapDecoderLayer, self).__init__() self._prepare_decoder_layer = PrepareEncoderDecoderLayer( trg_vocab_size, d_model, max_length, prepostprocess_dropout, word_emb_param_name=word_emb_param_names[1], pos_enc_param_name=pos_enc_param_names[1]) self._decoder_layer = DecoderLayer(n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd) self._weight_sharing = weight_sharing if not weight_sharing: self._fc = Linear(input_dim = d_model, output_dim=trg_vocab_size, bias_attr=False) def forward(self, dec_inputs, enc_output, caches=None, gather_idx=None): """ forward :param dec_inputs: :param enc_output: :return: """ trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias = dec_inputs dec_input = self._prepare_decoder_layer(trg_word, trg_pos) dec_output = self._decoder_layer(dec_input, enc_output, trg_slf_attn_bias, trg_src_attn_bias, caches, gather_idx) dec_output_reshape = layers.reshape(dec_output, shape=[-1, dec_output.shape[-1]], inplace=False) if self._weight_sharing: predict = layers.matmul(x=dec_output_reshape, y=self._prepare_decoder_layer._input_emb.weight, transpose_y=True) else: predict = self._fc(dec_output_reshape) if dec_inputs is None: # Return probs for independent decoder program. predict_out = layers.softmax(predict) return predict_out return predict class TransFormer(Layer): """ model """ def __init__(self, src_vocab_size, trg_vocab_size, max_length, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd, weight_sharing, label_smooth_eps=0.0): super(TransFormer, self).__init__() self._label_smooth_eps = label_smooth_eps self._trg_vocab_size = trg_vocab_size if weight_sharing: assert src_vocab_size == trg_vocab_size, ( "Vocabularies in source and target should be same for weight sharing." ) self._wrap_encoder_layer = WrapEncoderLayer( src_vocab_size, max_length, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd, weight_sharing) self._wrap_decoder_layer = WrapDecoderLayer( trg_vocab_size, max_length, n_layer, n_head, d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd, weight_sharing) if weight_sharing: self._wrap_decoder_layer._prepare_decoder_layer._input_emb.weight = self._wrap_encoder_layer._prepare_encoder_layer._input_emb.weight self.n_layer = n_layer self.n_head = n_head self.d_key = d_key self.d_value = d_value def forward(self, enc_inputs, dec_inputs, label, weights): """ forward :param enc_inputs: :param dec_inputs: :param label: :param weights: :return: """ enc_output = self._wrap_encoder_layer(enc_inputs) predict = self._wrap_decoder_layer(dec_inputs, enc_output) if self._label_smooth_eps: label_out = layers.label_smooth(label=layers.one_hot( input=label, depth=self._trg_vocab_size), epsilon=self._label_smooth_eps) cost = layers.softmax_with_cross_entropy( logits=predict, label=label_out, soft_label=True if self._label_smooth_eps else False) weighted_cost = cost * weights sum_cost = layers.reduce_sum(weighted_cost) token_num = layers.reduce_sum(weights) token_num.stop_gradient = True avg_cost = sum_cost / token_num return sum_cost, avg_cost, predict, token_num def beam_search(self, enc_inputs, dec_inputs, bos_id=0, eos_id=1, beam_size=4, max_len=30, alpha=0.6): """ Beam search with the alive and finished two queues, both have a beam size capicity separately. It includes `grow_topk` `grow_alive` `grow_finish` as steps. 1. `grow_topk` selects the top `2*beam_size` candidates to avoid all getting EOS. 2. `grow_alive` selects the top `beam_size` non-EOS candidates as the inputs of next decoding step. 3. `grow_finish` compares the already finished candidates in the finished queue and newly added finished candidates from `grow_topk`, and selects the top `beam_size` finished candidates. """ def expand_to_beam_size(tensor, beam_size): tensor = layers.reshape(tensor, [tensor.shape[0], 1] + tensor.shape[1:]) tile_dims = [1] * len(tensor.shape) tile_dims[1] = beam_size return layers.expand(tensor, tile_dims) def merge_beam_dim(tensor): return layers.reshape(tensor, [-1] + tensor.shape[2:]) # run encoder enc_output = self._wrap_encoder_layer(enc_inputs) # constant number inf = float(1. * 1e7) batch_size = enc_output.shape[0] ### initialize states of beam search ### ## init for the alive ## initial_ids, trg_src_attn_bias = dec_inputs # (batch_size, 1) initial_log_probs = to_variable( np.array([[0.] + [-inf] * (beam_size - 1)], dtype="float32")) alive_log_probs = layers.expand(initial_log_probs, [batch_size, 1]) alive_seq = to_variable( np.tile(np.array([[[bos_id]]], dtype="int64"), (batch_size, beam_size, 1))) ## init for the finished ## finished_scores = to_variable( np.array([[-inf] * beam_size], dtype="float32")) finished_scores = layers.expand(finished_scores, [batch_size, 1]) finished_seq = to_variable( np.tile(np.array([[[bos_id]]], dtype="int64"), (batch_size, beam_size, 1))) finished_flags = layers.zeros_like(finished_scores) ### initialize inputs and states of transformer decoder ### ## init inputs for decoder, shaped `[batch_size*beam_size, ...]` trg_word = layers.reshape(alive_seq[:, :, -1], [batch_size * beam_size, 1, 1]) trg_pos = layers.zeros_like(trg_word) trg_src_attn_bias = merge_beam_dim( expand_to_beam_size(trg_src_attn_bias, beam_size)) enc_output = merge_beam_dim(expand_to_beam_size(enc_output, beam_size)) ## init states (caches) for transformer, need to be updated according to selected beam caches = [{ "k": layers.fill_constant( shape=[batch_size * beam_size, self.n_head, 0, self.d_key], dtype=enc_output.dtype, value=0), "v": layers.fill_constant( shape=[batch_size * beam_size, self.n_head, 0, self.d_value], dtype=enc_output.dtype, value=0), } for i in range(self.n_layer)] def update_states(caches, beam_idx, beam_size): for cache in caches: cache["k"] = gather_2d_by_gather(cache["k"], beam_idx, beam_size, batch_size, False) cache["v"] = gather_2d_by_gather(cache["v"], beam_idx, beam_size, batch_size, False) return caches def gather_2d_by_gather(tensor_nd, beam_idx, beam_size, batch_size, need_flat=True): batch_idx = layers.range(0, batch_size, 1, dtype="int64") * beam_size flat_tensor = merge_beam_dim(tensor_nd) if need_flat else tensor_nd idx = layers.reshape(layers.elementwise_add(beam_idx, batch_idx, 0), [-1]) new_flat_tensor = layers.gather(flat_tensor, idx) new_tensor_nd = layers.reshape( new_flat_tensor, shape=[batch_size, beam_idx.shape[1]] + tensor_nd.shape[2:]) if need_flat else new_flat_tensor return new_tensor_nd def early_finish(alive_log_probs, finished_scores, finished_in_finished): max_length_penalty = np.power(((5. + max_len) / 6.), alpha) # The best possible score of the most likely alive sequence lower_bound_alive_scores = alive_log_probs[:, 0] / max_length_penalty # Now to compute the lowest score of a finished sequence in finished # If the sequence isn't finished, we multiply it's score by 0. since # scores are all -ve, taking the min will give us the score of the lowest # finished item. lowest_score_of_fininshed_in_finished = layers.reduce_min( finished_scores * finished_in_finished, 1) # If none of the sequences have finished, then the min will be 0 and # we have to replace it by -ve INF if it is. The score of any seq in alive # will be much higher than -ve INF and the termination condition will not # be met. lowest_score_of_fininshed_in_finished += ( 1. - layers.reduce_max(finished_in_finished, 1)) * -inf bound_is_met = layers.reduce_all( layers.greater_than(lowest_score_of_fininshed_in_finished, lower_bound_alive_scores)) return bound_is_met def grow_topk(i, logits, alive_seq, alive_log_probs, states): logits = layers.reshape(logits, [batch_size, beam_size, -1]) candidate_log_probs = layers.log(layers.softmax(logits, axis=2)) log_probs = layers.elementwise_add(candidate_log_probs, alive_log_probs, 0) length_penalty = np.power(5.0 + (i + 1.0) / 6.0, alpha) curr_scores = log_probs / length_penalty flat_curr_scores = layers.reshape(curr_scores, [batch_size, -1]) topk_scores, topk_ids = layers.topk(flat_curr_scores, k=beam_size * 2) print( "topk ids", topk_ids) topk_log_probs = topk_scores * length_penalty topk_beam_index = topk_ids // self._trg_vocab_size topk_ids = topk_ids % self._trg_vocab_size print( "topk ids2", topk_ids) # use gather as gather_nd, TODO: use gather_nd topk_seq = gather_2d_by_gather(alive_seq, topk_beam_index, beam_size, batch_size) print( "topk ids", topk_ids ) reshape_temp = layers.reshape(topk_ids, topk_ids.shape + [1]) topk_seq = layers.concat( [topk_seq, reshape_temp], axis=2) states = update_states(states, topk_beam_index, beam_size) eos = layers.fill_constant(shape=topk_ids.shape, dtype="int64", value=eos_id) topk_finished = layers.cast(layers.equal(topk_ids, eos), "float32") #topk_seq: [batch_size, 2*beam_size, i+1] #topk_log_probs, topk_scores, topk_finished: [batch_size, 2*beam_size] return topk_seq, topk_log_probs, topk_scores, topk_finished, states def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished, states): curr_scores += curr_finished * -inf _, topk_indexes = layers.topk(curr_scores, k=beam_size) alive_seq = gather_2d_by_gather(curr_seq, topk_indexes, beam_size * 2, batch_size) alive_log_probs = gather_2d_by_gather(curr_log_probs, topk_indexes, beam_size * 2, batch_size) states = update_states(states, topk_indexes, beam_size * 2) return alive_seq, alive_log_probs, states def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq, curr_scores, curr_finished): # finished scores finished_seq = layers.concat([ finished_seq, layers.fill_constant(shape=[batch_size, beam_size, 1], dtype="int64", value=eos_id) ], axis=2) # Set the scores of the unfinished seq in curr_seq to large negative # values curr_scores += (1. - curr_finished) * -inf # concatenating the sequences and scores along beam axis curr_finished_seq = layers.concat([finished_seq, curr_seq], axis=1) curr_finished_scores = layers.concat([finished_scores, curr_scores], axis=1) curr_finished_flags = layers.concat([finished_flags, curr_finished], axis=1) _, topk_indexes = layers.topk(curr_finished_scores, k=beam_size) finished_seq = gather_2d_by_gather(curr_finished_seq, topk_indexes, beam_size * 3, batch_size) finished_scores = gather_2d_by_gather(curr_finished_scores, topk_indexes, beam_size * 3, batch_size) finished_flags = gather_2d_by_gather(curr_finished_flags, topk_indexes, beam_size * 3, batch_size) return finished_seq, finished_scores, finished_flags for i in range(max_len): logits = self._wrap_decoder_layer( (trg_word, trg_pos, None, trg_src_attn_bias), enc_output, caches) topk_seq, topk_log_probs, topk_scores, topk_finished, states = grow_topk( i, logits, alive_seq, alive_log_probs, caches) alive_seq, alive_log_probs, states = grow_alive( topk_seq, topk_scores, topk_log_probs, topk_finished, states) finished_seq, finished_scores, finished_flags = grow_finished( finished_seq, finished_scores, finished_flags, topk_seq, topk_scores, topk_finished) trg_word = layers.reshape(alive_seq[:, :, -1], [batch_size * beam_size, 1, 1]) trg_pos = layers.fill_constant(shape=trg_word.shape, dtype="int64", value=i) if early_finish(alive_log_probs, finished_scores, finished_flags).numpy(): break return finished_seq, finished_scores
[ "paddle.fluid.layers.reduce_min", "paddle.fluid.initializer.Constant", "numpy.sin", "numpy.arange", "paddle.fluid.layers.transpose", "paddle.fluid.layers.softmax_with_cross_entropy", "paddle.fluid.layers.concat", "paddle.fluid.layers.reduce_sum", "paddle.fluid.layers.greater_than", "paddle.fluid.layers.range", "paddle.fluid.layers.scale", "paddle.fluid.layers.one_hot", "numpy.power", "paddle.fluid.dygraph.Linear", "paddle.fluid.layers.softmax", "paddle.fluid.layers.fill_constant", "paddle.fluid.initializer.NumpyArrayInitializer", "paddle.fluid.layers.zeros_like", "paddle.fluid.layers.equal", "paddle.fluid.layers.reshape", "numpy.mod", "paddle.fluid.layers.gather", "numpy.cos", "paddle.fluid.layers.elementwise_add", "paddle.fluid.layers.expand", "paddle.fluid.layers.topk", "paddle.fluid.layers.matmul", "paddle.fluid.layers.reduce_max", "numpy.expand_dims", "paddle.fluid.layers.elementwise_min", "numpy.array", "paddle.fluid.initializer.Normal", "paddle.fluid.layers.dropout" ]
[((1145, 1166), 'numpy.arange', 'np.arange', (['n_position'], {}), '(n_position)\n', (1154, 1166), True, 'import numpy as np\n'), ((1427, 1454), 'numpy.expand_dims', 'np.expand_dims', (['position', '(1)'], {}), '(position, 1)\n', (1441, 1454), True, 'import numpy as np\n'), ((1457, 1490), 'numpy.expand_dims', 'np.expand_dims', (['inv_timescales', '(0)'], {}), '(inv_timescales, 0)\n', (1471, 1490), True, 'import numpy as np\n'), ((4008, 4071), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'input_hid', 'output_dim': 'd_inner_hid', 'act': '"""relu"""'}), "(input_dim=input_hid, output_dim=d_inner_hid, act='relu')\n", (4014, 4071), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((4141, 4188), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'd_inner_hid', 'output_dim': 'd_hid'}), '(input_dim=d_inner_hid, output_dim=d_hid)\n', (4147, 4188), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((5218, 5287), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'd_model', 'output_dim': '(d_key * n_head)', 'bias_attr': '(False)'}), '(input_dim=d_model, output_dim=d_key * n_head, bias_attr=False)\n', (5224, 5287), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((5361, 5430), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'd_model', 'output_dim': '(d_key * n_head)', 'bias_attr': '(False)'}), '(input_dim=d_model, output_dim=d_key * n_head, bias_attr=False)\n', (5367, 5430), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((5504, 5575), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'd_model', 'output_dim': '(d_value * n_head)', 'bias_attr': '(False)'}), '(input_dim=d_model, output_dim=d_value * n_head, bias_attr=False)\n', (5510, 5575), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((5652, 5720), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'd_model', 'output_dim': 'self._d_model', 'bias_attr': '(False)'}), '(input_dim=d_model, output_dim=self._d_model, bias_attr=False)\n', (5658, 5720), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((6384, 6482), 'paddle.fluid.layers.reshape', 'layers.reshape', ([], {'x': 'q', 'shape': '[q.shape[0], q.shape[1], self._n_head, self._d_key]', 'inplace': '(False)'}), '(x=q, shape=[q.shape[0], q.shape[1], self._n_head, self.\n _d_key], inplace=False)\n', (6398, 6482), True, 'import paddle.fluid.layers as layers\n'), ((6573, 6622), 'paddle.fluid.layers.transpose', 'layers.transpose', ([], {'x': 'reshaped_q', 'perm': '[0, 2, 1, 3]'}), '(x=reshaped_q, perm=[0, 2, 1, 3])\n', (6589, 6622), True, 'import paddle.fluid.layers as layers\n'), ((6644, 6742), 'paddle.fluid.layers.reshape', 'layers.reshape', ([], {'x': 'k', 'shape': '[k.shape[0], k.shape[1], self._n_head, self._d_key]', 'inplace': '(False)'}), '(x=k, shape=[k.shape[0], k.shape[1], self._n_head, self.\n _d_key], inplace=False)\n', (6658, 6742), True, 'import paddle.fluid.layers as layers\n'), ((6833, 6882), 'paddle.fluid.layers.transpose', 'layers.transpose', ([], {'x': 'reshaped_k', 'perm': '[0, 2, 1, 3]'}), '(x=reshaped_k, perm=[0, 2, 1, 3])\n', (6849, 6882), True, 'import paddle.fluid.layers as layers\n'), ((6904, 7004), 'paddle.fluid.layers.reshape', 'layers.reshape', ([], {'x': 'v', 'shape': '[v.shape[0], v.shape[1], self._n_head, self._d_value]', 'inplace': '(False)'}), '(x=v, shape=[v.shape[0], v.shape[1], self._n_head, self.\n _d_value], inplace=False)\n', (6918, 7004), True, 'import paddle.fluid.layers as layers\n'), ((7095, 7144), 'paddle.fluid.layers.transpose', 'layers.transpose', ([], {'x': 'reshaped_v', 'perm': '[0, 2, 1, 3]'}), '(x=reshaped_v, perm=[0, 2, 1, 3])\n', (7111, 7144), True, 'import paddle.fluid.layers as layers\n'), ((7493, 7588), 'paddle.fluid.layers.matmul', 'layers.matmul', ([], {'x': 'transpose_q', 'y': 'transpose_k', 'transpose_y': '(True)', 'alpha': '(self._d_model ** -0.5)'}), '(x=transpose_q, y=transpose_k, transpose_y=True, alpha=self.\n _d_model ** -0.5)\n', (7506, 7588), True, 'import paddle.fluid.layers as layers\n'), ((7751, 7774), 'paddle.fluid.layers.softmax', 'layers.softmax', (['product'], {}), '(product)\n', (7765, 7774), True, 'import paddle.fluid.layers as layers\n'), ((8264, 8304), 'paddle.fluid.layers.transpose', 'layers.transpose', (['out'], {'perm': '[0, 2, 1, 3]'}), '(out, perm=[0, 2, 1, 3])\n', (8280, 8304), True, 'import paddle.fluid.layers as layers\n'), ((8325, 8420), 'paddle.fluid.layers.reshape', 'layers.reshape', ([], {'x': 'trans_x', 'shape': '[0, 0, trans_x.shape[2] * trans_x.shape[3]]', 'inplace': '(False)'}), '(x=trans_x, shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],\n inplace=False)\n', (8339, 8420), True, 'import paddle.fluid.layers as layers\n'), ((14495, 14555), 'paddle.fluid.layers.scale', 'layers.scale', ([], {'x': 'src_word_emb', 'scale': '(self._src_emb_dim ** 0.5)'}), '(x=src_word_emb, scale=self._src_emb_dim ** 0.5)\n', (14507, 14555), True, 'import paddle.fluid.layers as layers\n'), ((14801, 14878), 'paddle.fluid.layers.reshape', 'layers.reshape', (['enc_input'], {'shape': '[enc_input.shape[0], enc_input.shape[1], -1]'}), '(enc_input, shape=[enc_input.shape[0], enc_input.shape[1], -1])\n', (14815, 14878), True, 'import paddle.fluid.layers as layers\n'), ((24251, 24326), 'paddle.fluid.layers.reshape', 'layers.reshape', (['dec_output'], {'shape': '[-1, dec_output.shape[-1]]', 'inplace': '(False)'}), '(dec_output, shape=[-1, dec_output.shape[-1]], inplace=False)\n', (24265, 24326), True, 'import paddle.fluid.layers as layers\n'), ((27277, 27401), 'paddle.fluid.layers.softmax_with_cross_entropy', 'layers.softmax_with_cross_entropy', ([], {'logits': 'predict', 'label': 'label_out', 'soft_label': '(True if self._label_smooth_eps else False)'}), '(logits=predict, label=label_out,\n soft_label=True if self._label_smooth_eps else False)\n', (27310, 27401), True, 'import paddle.fluid.layers as layers\n'), ((27493, 27525), 'paddle.fluid.layers.reduce_sum', 'layers.reduce_sum', (['weighted_cost'], {}), '(weighted_cost)\n', (27510, 27525), True, 'import paddle.fluid.layers as layers\n'), ((27546, 27572), 'paddle.fluid.layers.reduce_sum', 'layers.reduce_sum', (['weights'], {}), '(weights)\n', (27563, 27572), True, 'import paddle.fluid.layers as layers\n'), ((29483, 29532), 'paddle.fluid.layers.expand', 'layers.expand', (['initial_log_probs', '[batch_size, 1]'], {}), '(initial_log_probs, [batch_size, 1])\n', (29496, 29532), True, 'import paddle.fluid.layers as layers\n'), ((29837, 29884), 'paddle.fluid.layers.expand', 'layers.expand', (['finished_scores', '[batch_size, 1]'], {}), '(finished_scores, [batch_size, 1])\n', (29850, 29884), True, 'import paddle.fluid.layers as layers\n'), ((30054, 30088), 'paddle.fluid.layers.zeros_like', 'layers.zeros_like', (['finished_scores'], {}), '(finished_scores)\n', (30071, 30088), True, 'import paddle.fluid.layers as layers\n'), ((30250, 30317), 'paddle.fluid.layers.reshape', 'layers.reshape', (['alive_seq[:, :, -1]', '[batch_size * beam_size, 1, 1]'], {}), '(alive_seq[:, :, -1], [batch_size * beam_size, 1, 1])\n', (30264, 30317), True, 'import paddle.fluid.layers as layers\n'), ((30370, 30397), 'paddle.fluid.layers.zeros_like', 'layers.zeros_like', (['trg_word'], {}), '(trg_word)\n', (30387, 30397), True, 'import paddle.fluid.layers as layers\n'), ((1355, 1380), 'numpy.arange', 'np.arange', (['num_timescales'], {}), '(num_timescales)\n', (1364, 1380), True, 'import numpy as np\n'), ((1529, 1548), 'numpy.sin', 'np.sin', (['scaled_time'], {}), '(scaled_time)\n', (1535, 1548), True, 'import numpy as np\n'), ((1550, 1569), 'numpy.cos', 'np.cos', (['scaled_time'], {}), '(scaled_time)\n', (1556, 1569), True, 'import numpy as np\n'), ((4441, 4511), 'paddle.fluid.layers.dropout', 'layers.dropout', (['hidden'], {'dropout_prob': 'self._dropout_rate', 'is_test': '(False)'}), '(hidden, dropout_prob=self._dropout_rate, is_test=False)\n', (4455, 4511), True, 'import paddle.fluid.layers as layers\n'), ((7256, 7301), 'paddle.fluid.layers.concat', 'layers.concat', (['[cache_k, transpose_k]'], {'axis': '(2)'}), '([cache_k, transpose_k], axis=2)\n', (7269, 7301), True, 'import paddle.fluid.layers as layers\n'), ((7328, 7373), 'paddle.fluid.layers.concat', 'layers.concat', (['[cache_v, transpose_v]'], {'axis': '(2)'}), '([cache_v, transpose_v], axis=2)\n', (7341, 7373), True, 'import paddle.fluid.layers as layers\n'), ((7835, 7906), 'paddle.fluid.layers.dropout', 'layers.dropout', (['weights'], {'dropout_prob': 'self._dropout_rate', 'is_test': '(False)'}), '(weights, dropout_prob=self._dropout_rate, is_test=False)\n', (7849, 7906), True, 'import paddle.fluid.layers as layers\n'), ((8013, 8055), 'paddle.fluid.layers.matmul', 'layers.matmul', (['weights_droped', 'transpose_v'], {}), '(weights_droped, transpose_v)\n', (8026, 8055), True, 'import paddle.fluid.layers as layers\n'), ((8088, 8123), 'paddle.fluid.layers.matmul', 'layers.matmul', (['weights', 'transpose_v'], {}), '(weights, transpose_v)\n', (8101, 8123), True, 'import paddle.fluid.layers as layers\n'), ((14896, 14969), 'paddle.fluid.layers.dropout', 'layers.dropout', (['enc_input'], {'dropout_prob': 'self._dropout_rate', 'is_test': '(False)'}), '(enc_input, dropout_prob=self._dropout_rate, is_test=False)\n', (14910, 14969), True, 'import paddle.fluid.layers as layers\n'), ((23560, 23629), 'paddle.fluid.dygraph.Linear', 'Linear', ([], {'input_dim': 'd_model', 'output_dim': 'trg_vocab_size', 'bias_attr': '(False)'}), '(input_dim=d_model, output_dim=trg_vocab_size, bias_attr=False)\n', (23566, 23629), False, 'from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, to_variable, Layer, guard\n'), ((24471, 24578), 'paddle.fluid.layers.matmul', 'layers.matmul', ([], {'x': 'dec_output_reshape', 'y': 'self._prepare_decoder_layer._input_emb.weight', 'transpose_y': '(True)'}), '(x=dec_output_reshape, y=self._prepare_decoder_layer.\n _input_emb.weight, transpose_y=True)\n', (24484, 24578), True, 'import paddle.fluid.layers as layers\n'), ((24829, 24852), 'paddle.fluid.layers.softmax', 'layers.softmax', (['predict'], {}), '(predict)\n', (24843, 24852), True, 'import paddle.fluid.layers as layers\n'), ((28668, 28731), 'paddle.fluid.layers.reshape', 'layers.reshape', (['tensor', '([tensor.shape[0], 1] + tensor.shape[1:])'], {}), '(tensor, [tensor.shape[0], 1] + tensor.shape[1:])\n', (28682, 28731), True, 'import paddle.fluid.layers as layers\n'), ((28872, 28904), 'paddle.fluid.layers.expand', 'layers.expand', (['tensor', 'tile_dims'], {}), '(tensor, tile_dims)\n', (28885, 28904), True, 'import paddle.fluid.layers as layers\n'), ((28961, 29008), 'paddle.fluid.layers.reshape', 'layers.reshape', (['tensor', '([-1] + tensor.shape[2:])'], {}), '(tensor, [-1] + tensor.shape[2:])\n', (28975, 29008), True, 'import paddle.fluid.layers as layers\n'), ((29395, 29456), 'numpy.array', 'np.array', (['[[0.0] + [-inf] * (beam_size - 1)]'], {'dtype': '"""float32"""'}), "([[0.0] + [-inf] * (beam_size - 1)], dtype='float32')\n", (29403, 29456), True, 'import numpy as np\n'), ((29762, 29809), 'numpy.array', 'np.array', (['[[-inf] * beam_size]'], {'dtype': '"""float32"""'}), "([[-inf] * beam_size], dtype='float32')\n", (29770, 29809), True, 'import numpy as np\n'), ((32115, 32146), 'paddle.fluid.layers.gather', 'layers.gather', (['flat_tensor', 'idx'], {}), '(flat_tensor, idx)\n', (32128, 32146), True, 'import paddle.fluid.layers as layers\n'), ((32525, 32563), 'numpy.power', 'np.power', (['((5.0 + max_len) / 6.0)', 'alpha'], {}), '((5.0 + max_len) / 6.0, alpha)\n', (32533, 32563), True, 'import numpy as np\n'), ((33048, 33108), 'paddle.fluid.layers.reduce_min', 'layers.reduce_min', (['(finished_scores * finished_in_finished)', '(1)'], {}), '(finished_scores * finished_in_finished, 1)\n', (33065, 33108), True, 'import paddle.fluid.layers as layers\n'), ((33838, 33889), 'paddle.fluid.layers.reshape', 'layers.reshape', (['logits', '[batch_size, beam_size, -1]'], {}), '(logits, [batch_size, beam_size, -1])\n', (33852, 33889), True, 'import paddle.fluid.layers as layers\n'), ((33991, 34054), 'paddle.fluid.layers.elementwise_add', 'layers.elementwise_add', (['candidate_log_probs', 'alive_log_probs', '(0)'], {}), '(candidate_log_probs, alive_log_probs, 0)\n', (34013, 34054), True, 'import paddle.fluid.layers as layers\n'), ((34132, 34170), 'numpy.power', 'np.power', (['(5.0 + (i + 1.0) / 6.0)', 'alpha'], {}), '(5.0 + (i + 1.0) / 6.0, alpha)\n', (34140, 34170), True, 'import numpy as np\n'), ((34255, 34300), 'paddle.fluid.layers.reshape', 'layers.reshape', (['curr_scores', '[batch_size, -1]'], {}), '(curr_scores, [batch_size, -1])\n', (34269, 34300), True, 'import paddle.fluid.layers as layers\n'), ((34338, 34384), 'paddle.fluid.layers.topk', 'layers.topk', (['flat_curr_scores'], {'k': '(beam_size * 2)'}), '(flat_curr_scores, k=beam_size * 2)\n', (34349, 34384), True, 'import paddle.fluid.layers as layers\n'), ((34976, 35022), 'paddle.fluid.layers.reshape', 'layers.reshape', (['topk_ids', '(topk_ids.shape + [1])'], {}), '(topk_ids, topk_ids.shape + [1])\n', (34990, 35022), True, 'import paddle.fluid.layers as layers\n'), ((35046, 35093), 'paddle.fluid.layers.concat', 'layers.concat', (['[topk_seq, reshape_temp]'], {'axis': '(2)'}), '([topk_seq, reshape_temp], axis=2)\n', (35059, 35093), True, 'import paddle.fluid.layers as layers\n'), ((35233, 35304), 'paddle.fluid.layers.fill_constant', 'layers.fill_constant', ([], {'shape': 'topk_ids.shape', 'dtype': '"""int64"""', 'value': 'eos_id'}), "(shape=topk_ids.shape, dtype='int64', value=eos_id)\n", (35253, 35304), True, 'import paddle.fluid.layers as layers\n'), ((35869, 35906), 'paddle.fluid.layers.topk', 'layers.topk', (['curr_scores'], {'k': 'beam_size'}), '(curr_scores, k=beam_size)\n', (35880, 35906), True, 'import paddle.fluid.layers as layers\n'), ((37068, 37115), 'paddle.fluid.layers.concat', 'layers.concat', (['[finished_seq, curr_seq]'], {'axis': '(1)'}), '([finished_seq, curr_seq], axis=1)\n', (37081, 37115), True, 'import paddle.fluid.layers as layers\n'), ((37151, 37204), 'paddle.fluid.layers.concat', 'layers.concat', (['[finished_scores, curr_scores]'], {'axis': '(1)'}), '([finished_scores, curr_scores], axis=1)\n', (37164, 37204), True, 'import paddle.fluid.layers as layers\n'), ((37288, 37342), 'paddle.fluid.layers.concat', 'layers.concat', (['[finished_flags, curr_finished]'], {'axis': '(1)'}), '([finished_flags, curr_finished], axis=1)\n', (37301, 37342), True, 'import paddle.fluid.layers as layers\n'), ((37421, 37467), 'paddle.fluid.layers.topk', 'layers.topk', (['curr_finished_scores'], {'k': 'beam_size'}), '(curr_finished_scores, k=beam_size)\n', (37432, 37467), True, 'import paddle.fluid.layers as layers\n'), ((38791, 38858), 'paddle.fluid.layers.reshape', 'layers.reshape', (['alive_seq[:, :, -1]', '[batch_size * beam_size, 1, 1]'], {}), '(alive_seq[:, :, -1], [batch_size * beam_size, 1, 1])\n', (38805, 38858), True, 'import paddle.fluid.layers as layers\n'), ((38919, 38985), 'paddle.fluid.layers.fill_constant', 'layers.fill_constant', ([], {'shape': 'trg_word.shape', 'dtype': '"""int64"""', 'value': 'i'}), "(shape=trg_word.shape, dtype='int64', value=i)\n", (38939, 38985), True, 'import paddle.fluid.layers as layers\n'), ((1621, 1640), 'numpy.mod', 'np.mod', (['channels', '(2)'], {}), '(channels, 2)\n', (1627, 1640), True, 'import numpy as np\n'), ((2360, 2388), 'paddle.fluid.layers.elementwise_min', 'layers.elementwise_min', (['a', 'b'], {}), '(a, b)\n', (2382, 2388), True, 'import paddle.fluid.layers as layers\n'), ((29586, 29623), 'numpy.array', 'np.array', (['[[[bos_id]]]'], {'dtype': '"""int64"""'}), "([[[bos_id]]], dtype='int64')\n", (29594, 29623), True, 'import numpy as np\n'), ((29941, 29978), 'numpy.array', 'np.array', (['[[[bos_id]]]'], {'dtype': '"""int64"""'}), "([[[bos_id]]], dtype='int64')\n", (29949, 29978), True, 'import numpy as np\n'), ((30729, 30847), 'paddle.fluid.layers.fill_constant', 'layers.fill_constant', ([], {'shape': '[batch_size * beam_size, self.n_head, 0, self.d_key]', 'dtype': 'enc_output.dtype', 'value': '(0)'}), '(shape=[batch_size * beam_size, self.n_head, 0, self.\n d_key], dtype=enc_output.dtype, value=0)\n', (30749, 30847), True, 'import paddle.fluid.layers as layers\n'), ((30922, 31042), 'paddle.fluid.layers.fill_constant', 'layers.fill_constant', ([], {'shape': '[batch_size * beam_size, self.n_head, 0, self.d_value]', 'dtype': 'enc_output.dtype', 'value': '(0)'}), '(shape=[batch_size * beam_size, self.n_head, 0, self.\n d_value], dtype=enc_output.dtype, value=0)\n', (30942, 31042), True, 'import paddle.fluid.layers as layers\n'), ((31790, 31835), 'paddle.fluid.layers.range', 'layers.range', (['(0)', 'batch_size', '(1)'], {'dtype': '"""int64"""'}), "(0, batch_size, 1, dtype='int64')\n", (31802, 31835), True, 'import paddle.fluid.layers as layers\n'), ((31998, 32044), 'paddle.fluid.layers.elementwise_add', 'layers.elementwise_add', (['beam_idx', 'batch_idx', '(0)'], {}), '(beam_idx, batch_idx, 0)\n', (32020, 32044), True, 'import paddle.fluid.layers as layers\n'), ((32175, 32271), 'paddle.fluid.layers.reshape', 'layers.reshape', (['new_flat_tensor'], {'shape': '([batch_size, beam_idx.shape[1]] + tensor_nd.shape[2:])'}), '(new_flat_tensor, shape=[batch_size, beam_idx.shape[1]] +\n tensor_nd.shape[2:])\n', (32189, 32271), True, 'import paddle.fluid.layers as layers\n'), ((33591, 33679), 'paddle.fluid.layers.greater_than', 'layers.greater_than', (['lowest_score_of_fininshed_in_finished', 'lower_bound_alive_scores'], {}), '(lowest_score_of_fininshed_in_finished,\n lower_bound_alive_scores)\n', (33610, 33679), True, 'import paddle.fluid.layers as layers\n'), ((33935, 33965), 'paddle.fluid.layers.softmax', 'layers.softmax', (['logits'], {'axis': '(2)'}), '(logits, axis=2)\n', (33949, 33965), True, 'import paddle.fluid.layers as layers\n'), ((35423, 35450), 'paddle.fluid.layers.equal', 'layers.equal', (['topk_ids', 'eos'], {}), '(topk_ids, eos)\n', (35435, 35450), True, 'import paddle.fluid.layers as layers\n'), ((27111, 27166), 'paddle.fluid.layers.one_hot', 'layers.one_hot', ([], {'input': 'label', 'depth': 'self._trg_vocab_size'}), '(input=label, depth=self._trg_vocab_size)\n', (27125, 27166), True, 'import paddle.fluid.layers as layers\n'), ((33478, 33520), 'paddle.fluid.layers.reduce_max', 'layers.reduce_max', (['finished_in_finished', '(1)'], {}), '(finished_in_finished, 1)\n', (33495, 33520), True, 'import paddle.fluid.layers as layers\n'), ((36588, 36676), 'paddle.fluid.layers.fill_constant', 'layers.fill_constant', ([], {'shape': '[batch_size, beam_size, 1]', 'dtype': '"""int64"""', 'value': 'eos_id'}), "(shape=[batch_size, beam_size, 1], dtype='int64', value\n =eos_id)\n", (36608, 36676), True, 'import paddle.fluid.layers as layers\n'), ((13577, 13627), 'paddle.fluid.initializer.Normal', 'fluid.initializer.Normal', (['(0.0)', '(src_emb_dim ** -0.5)'], {}), '(0.0, src_emb_dim ** -0.5)\n', (13601, 13627), True, 'import paddle.fluid as fluid\n'), ((13935, 13983), 'paddle.fluid.initializer.NumpyArrayInitializer', 'fluid.initializer.NumpyArrayInitializer', (['pos_inp'], {}), '(pos_inp)\n', (13974, 13983), True, 'import paddle.fluid as fluid\n'), ((3601, 3662), 'paddle.fluid.layers.dropout', 'layers.dropout', (['out'], {'dropout_prob': 'dropout_rate', 'is_test': '(False)'}), '(out, dropout_prob=dropout_rate, is_test=False)\n', (3615, 3662), True, 'import paddle.fluid.layers as layers\n'), ((2877, 2908), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', (['(1.0)'], {}), '(1.0)\n', (2903, 2908), True, 'import paddle.fluid as fluid\n'), ((2993, 3024), 'paddle.fluid.initializer.Constant', 'fluid.initializer.Constant', (['(0.0)'], {}), '(0.0)\n', (3019, 3024), True, 'import paddle.fluid as fluid\n')]
#!/usr/bin/env python3 import re def rearrange_name(name): result = re.search(r"^([\w .]*), ([\w .]*)$", name) if result == None: return result return "{} {}".format(result[2], result[1])
[ "re.search" ]
[((70, 113), 're.search', 're.search', (['"""^([\\\\w .]*), ([\\\\w .]*)$"""', 'name'], {}), "('^([\\\\w .]*), ([\\\\w .]*)$', name)\n", (79, 113), False, 'import re\n')]
#!/usr/bin/env python3 # To the extent possible under law, the libtcod maintainers have waived all # copyright and related or neighboring rights for this example. This work is # published from: United States. # https://creativecommons.org/publicdomain/zero/1.0/ """An demonstration of event handling using the tcod.event module. """ from typing import List import tcod WIDTH, HEIGHT = 720, 480 def main() -> None: """Example program for tcod.event""" event_log: List[str] = [] motion_desc = "" with tcod.context.new(width=WIDTH, height=HEIGHT) as context: console = context.new_console() while True: # Display all event items. console.clear() console.print(0, console.height - 1, motion_desc) for i, item in enumerate(event_log[::-1]): y = console.height - 3 - i if y < 0: break console.print(0, y, item) context.present(console, integer_scaling=True) # Handle events. for event in tcod.event.wait(): context.convert_event(event) # Set tile coordinates for event. print(repr(event)) if isinstance(event, tcod.event.Quit): raise SystemExit() if isinstance(event, tcod.event.WindowResized) and event.type == "WINDOWRESIZED": console = context.new_console() if isinstance(event, tcod.event.MouseMotion): motion_desc = str(event) else: event_log.append(str(event)) if __name__ == "__main__": main()
[ "tcod.event.wait", "tcod.context.new" ]
[((522, 566), 'tcod.context.new', 'tcod.context.new', ([], {'width': 'WIDTH', 'height': 'HEIGHT'}), '(width=WIDTH, height=HEIGHT)\n', (538, 566), False, 'import tcod\n'), ((1074, 1091), 'tcod.event.wait', 'tcod.event.wait', ([], {}), '()\n', (1089, 1091), False, 'import tcod\n')]
from django.urls import path, include from rest_framework import routers from . import views router = routers.DefaultRouter() router.register(r'js_error', views.JSErrorViewSet) app_name = "js_error_logger" urlpatterns = [ path('', include(router.urls)), ]
[ "rest_framework.routers.DefaultRouter", "django.urls.include" ]
[((105, 128), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {}), '()\n', (126, 128), False, 'from rest_framework import routers\n'), ((239, 259), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (246, 259), False, 'from django.urls import path, include\n')]
from aiohttp import web import tomodachi from tomodachi.transport.http import http @tomodachi.service class HttpServiceOne(tomodachi.Service): name = 'test_http1' options = { 'http': { 'port': 54322, } } @http('GET', r'/test/?') async def test(self, request: web.Request) -> str: return 'test' @tomodachi.service class HttpServiceTwo(tomodachi.Service): name = 'test_http2' options = { 'http': { 'port': 54322, } } @http('GET', r'/test/?') async def test(self, request: web.Request) -> str: return 'test'
[ "tomodachi.transport.http.http" ]
[((252, 274), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/test/?"""'], {}), "('GET', '/test/?')\n", (256, 274), False, 'from tomodachi.transport.http import http\n'), ((522, 544), 'tomodachi.transport.http.http', 'http', (['"""GET"""', '"""/test/?"""'], {}), "('GET', '/test/?')\n", (526, 544), False, 'from tomodachi.transport.http import http\n')]
''' Copyright (c) 2015 by <NAME> This file is part of Statistical Parameter Estimation Tool (SPOTPY). :author: <NAME> ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import multiprocessing as mp class ForEach(object): def __init__(self,process): self.size = mp.cpu_count() self.pool = mp.Pool() self.process = process def is_idle(self): return False def terminate(self): self.pool.join() def __call__(self,jobs): for result in self.pool.imap_unordered(self.process, jobs): yield result def proc(j): for i in xrange(10000): q = i,i ** 2 return j,j**2 if __name__ == '__main__': fe = ForEach(proc) jobs = range(10000) for j,q in fe(jobs): print(j)
[ "multiprocessing.Pool", "multiprocessing.cpu_count" ]
[((378, 392), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (390, 392), True, 'import multiprocessing as mp\n'), ((413, 422), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (420, 422), True, 'import multiprocessing as mp\n')]
#!/usr/bin/env python """ This file will create a config file for use with the TRExFitter/TtHFitter package *** REQUIRES PYTHON3 *** author: <NAME> <<EMAIL>> """ import sys import subprocess import aidapy.meta f = open('aida.config','w') print('Job: "myAIDAfit"', file=f) print(' CmeLabel: "13 TeV"', file=f) print(' POI: "mu_TTBAR"', file=f) print(' ReadFrom: HIST', file=f) print(' HistoPath: "/Users/ddavis/Software/aidapy"', file=f) print(' Lumi: 1.0', file=f) print(' LumiLabel: "36.1 fb^{-1}"', file=f) print(' MCstatThreshold: 0.01', file=f) print(' ImageFormat: "pdf"', file=f) print(' SystControlPlots: TRUE', file=f) print(' DebugLevel: 1', file=f) print(' SystPruningShape: .001', file=f) print(' SystPruningNorm: .001', file=f) print(' RankingMaxNP: 20', file=f) print(' RankingPlot: Systs', file=f) print('', file=f) print('Fit: "fit"', file=f) print(' FitType: SPLUSB', file=f) print(' FitRegion: CRSR', file=f) print(' FitBlind: TRUE', file=f) print(' NumCPU: 2', file=f) print('', file=f) print('Region: "met_2pj"', file=f) print(' Type: SIGNAL', file=f) print(' HistoFile: "aida_histograms_met_2pj"', file=f) print(' VariableTitle: "#it{E}_{T}^{miss} [GeV]"', file=f) print(' ShortLabel: "2pj"', file=f) print(' Label: "#geq 2 jet"', file=f) print(' TexLabel: "$N_{\mathrm{jets}} \geq 2$"', file=f) print('', file=f) print('Region: "met_1j1b"', file=f) print(' Type: SIGNAL', file=f) print(' HistoFile: "aida_histograms_met_1j1b"', file=f) print(' VariableTitle: "#it{E}_{T}^{miss} [GeV]"', file=f) print(' ShortLabel: "1j"', file=f) print(' Label: "1 jet"', file=f) print(' TexLabel: "$N_{\mathrm{bjets}} = 1$"', file=f) print('', file=f) print('Region: "met_0j"', file=f) print(' Type: SIGNAL', file=f) print(' HistoFile: "aida_histograms_met_0j"', file=f) print(' VariableTitle: "#it{E}_{T}^{miss} [GeV]"', file=f) print(' ShortLabel: "No jets"', file=f) print(' Label: "No jets"', file=f) print(' TexLabel: "$N_{\mathrm{jets}} = 0$"', file=f) print('', file=f) print('Sample: "Data"', file=f) print(' Title: "Data"', file=f) print(' Type: DATA', file=f) print(' HistoName: "Data"', file=f) print(' Regions: "met_2pj","met_0j","met_1j1b"', file=f) print('', file=f) print('Sample: "ttbar"', file=f) print(' Type: SIGNAL', file=f) print(' Title: "t#bar{t}"', file=f) print(' FillColor: 0', file=f) print(' LineColor: 1', file=f) print(' HistoName: "ttbar_FULL_main"', file=f) print(' NormFactor: "mu_TTBAR",1,-10,100', file=f) print(' Regions: "met_2pj","met_0j","met_1j1b"', file=f) print('', file=f) print('Sample: "WtHSGHOST"', file=f) print(' Type: GHOST', file=f) print(' Title: "WtHSGHOST"', file=f) print(' HistoName: "Wt_FULL_sysHS1"', file=f) print(' Regions: "met_2pj","met_0j","met_1j1b"', file=f) print('', file=f) print('Sample: "Wt"', file=f) print(' Type: SIGNAL', file=f) print(' Title: "Wt"', file=f) print(' FillColor: 62', file=f) print(' LineColor: 1', file=f) print(' HistoName: "Wt_FULL_main"', file=f) print(' NormFactor: "mu_WT",1,-10,100', file=f) print(' Regions: "met_2pj","met_0j","met_1j1b"', file=f) print('', file=f) print('Sample: "Ztautau"', file=f) print(' Type: SIGNAL', file=f) print(' Title: "Z#rightarrow#tau#tau"', file=f) print(' FillColor: 32', file=f) print(' LineColor: 1', file=f) print(' HistoName: "Ztautau_FULL_main"', file=f) print(' NormFactor: "mu_ZTAUTAU",1,-10,100', file=f) print(' Regions: "met_2pj","met_0j"', file=f) print('', file=f) print('Sample: "WW"', file=f) print(' Type: SIGNAL', file=f) print(' Title: "WW"', file=f) print(' FillColor: 42', file=f) print(' LineColor: 1', file=f) print(' HistoName: "WW_FULL_main"', file=f) print(' NormFactor: "mu_WW",1,-10,100', file=f) print(' Regions: "met_2pj","met_0j"', file=f) print('', file=f) print('Sample: "Diboson"', file=f) print(' Type: BACKGROUND', file=f) print(' Title: "Diboson"', file=f) print(' FillColor: 22', file=f) print(' LineColor: 1', file=f) print(' HistoName: "Diboson_FULL_main"', file=f) print(' Regions: "met_2pj","met_0j"', file=f) print('', file=f) print('Sample: "Fakes"', file=f) print(' Type: BACKGROUND', file=f) print(' Title: "Fakes"', file=f) print(' FillColor: 72', file=f) print(' LineColor: 1', file=f) print(' HistoName: "Fakes_FULL_main"', file=f) print(' Regions: "met_2pj","met_0j"', file=f) print('', file=f) print('Sample: "RareSM"', file=f) print(' Type: BACKGROUND', file=f) print(' Title: "RareSM"', file=f) print(' FillColor: 12', file=f) print(' LineColor: 1', file=f) print(' HistoName: "RareSM_FULL_main"', file=f) print(' Regions: "met_2pj","met_0j"', file=f) print('', file=f) print('NormFactor: "mu_TTBAR"', file=f) print(' Title: "#mu_{t#bar{t}}"', file=f) print(' Nominal: 1', file=f) print(' Min: -10', file=f) print(' Max: 100', file=f) print(' Samples: ttbar', file=f) print('', file=f) print('NormFactor: "mu_WT"', file=f) print(' Title: "#mu_{Wt}"', file=f) print(' Nominal: 1', file=f) print(' Min: -10', file=f) print(' Max: 100', file=f) print(' Samples: Wt', file=f) print('', file=f) print('NormFactor: "mu_ZTAUTAU"', file=f) print(' Title: "#mu_{Z#rightarrow#tau#tau}"', file=f) print(' Nominal: 1', file=f) print(' Min: -10', file=f) print(' Max: 100', file=f) print(' Samples: Ztautau', file=f) print('', file=f) print('NormFactor: "mu_WW"', file=f) print(' Title: "#mu_{WW}"', file=f) print(' Nominal: 1', file=f) print(' Min: -10', file=f) print(' Max: 100', file=f) print(' Samples: WW', file=f) print('', file=f) print('Systematic: "Luminosity"', file=f) print(' Title: "Luminosity"', file=f) print(' Type: OVERALL', file=f) print(' OverallUp: 0.03', file=f) print(' OverallDown: -0.03', file=f) print(' Samples: ttbar,Wt,WW,Ztautau,Diboson,Fakes,RareSM', file=f) print(' Category: Instrumental', file=f) print('', file=f) print('Systematic: "ttbar_AR"', file=f) print(' Title: "ttbar Additional Radiation"', file=f) print(' Type: HISTO', file=f) print(' Samples: ttbar', file=f) print(' HistoNameUp: "ttbar_FULL_sysARup"', file=f) print(' HistoNameDown: "ttbar_FULL_sysARdown"', file=f) print(' Regions: met_2pj,met_0j,met_1j1b', file=f) print(' Symmetrisation: TWOSIDED', file=f) print(' Category: Theory', file=f) print('', file=f) print('Systematic: "ttbar_HS"', file=f) print(' Title: "ttbar Hard Scatter"', file=f) print(' Type: HISTO', file=f) print(' Samples: ttbar', file=f) print(' HistoNameUp: "ttbar_FULL_sysHS"', file=f) print(' Symmetrisation: ONESIDED', file=f) print(' Regions: met_2pj,met_0j,met_1j1b', file=f) print(' Category: Theory', file=f) print('', file=f) print('Systematic: "ttbar_FH"', file=f) print(' Title: "ttbar Factorization/Hadronization"', file=f) print(' Type: HISTO', file=f) print(' Samples: ttbar', file=f) print(' HistoNameUp: "ttbar_FULL_sysFH"', file=f) print(' Symmetrisation: ONESIDED', file=f) print(' Regions: met_2pj,met_0j,met_1j1b', file=f) print(' Category: Theory', file=f) print('', file=f) print('Systematic: "Wt_AR"', file=f) print(' Title: "Wt Additional Radiation"', file=f) print(' Type: HISTO', file=f) print(' Samples: Wt', file=f) print(' HistoNameUp: "Wt_FULL_sysARup"', file=f) print(' HistoNameDown: "Wt_FULL_sysARdown"', file=f) print(' Regions: met_2pj,met_0j,met_1j1b', file=f) print(' Symmetrisation: TWOSIDED', file=f) print(' Category: Theory', file=f) print('', file=f) print('Systematic: "Wt_DR_DS"', file=f) print(' Title: "Wt DR vs DS"', file=f) print(' Type: HISTO', file=f) print(' Samples: Wt', file=f) print(' HistoNameUp: "Wt_FULL_sysDS"', file=f) print(' Symmetrisation: ONESIDED', file=f) print(' Regions: met_2pj,met_0j,met_1j1b', file=f) print(' Category: Theory', file=f) print('', file=f) print('Systematic: "Wt_FH"', file=f) print(' Title: "Wt Factorization/Hadronization"', file=f) print(' Type: HISTO', file=f) print(' Samples: Wt', file=f) print(' HistoNameUp: "Wt_FULL_sysFH"', file=f) print(' Symmetrisation: ONESIDED', file=f) print(' Regions: met_2pj,met_0j,met_1j1b', file=f) print(' Category: Theory', file=f) print('', file=f) print('Systematic: "Wt_HS"', file=f) print(' Title: "Wt Hard Scatter"', file=f) print(' Type: HISTO', file=f) print(' Samples: Wt', file=f) print(' HistoNameUp: "Wt_FULL_sysHS2"', file=f) #print(' HistoNameDown: "Wt_FULL_sysHS1"', file=f) print(' ReferenceSample: WtHSGHOST', file=f) print(' Symmetrisation: ONESIDED', file=f) print(' Regions: met_2pj,met_0j,met_1j1b', file=f) print(' Category: Theory', file=f) print('', file=f) for s in aidapy.meta._systematic_ud_prefixes: up = '__1up"' down = '__1down"' if 'MET_' in s[0]: up = 'Up"' down = 'Down"' print('Systematic: "'+s[0]+'"', file=f) print(' Title: "'+s[1]+'"', file=f) print(' Samples: ttbar,Wt,WW,Ztautau,Diboson,Fakes,RareSM', file=f) print(' Type: HISTO', file=f) print(' HistoNameSufUp: "_'+s[0]+up, file=f) print(' HistoNameSufDown: "_'+s[0]+down, file=f) print(' Regions: met_2pj,met_0j,met_1j1b', file=f) print(' Symmetrisation: TWOSIDED', file=f) print(' Category: Instrumental', file=f) print('', file=f) for s in aidapy.meta._systematic_singles: print('Systematic: "'+s[0]+'"', file=f) print(' Title: "'+s[1]+'"', file=f) print(' Samples: ttbar,Wt,WW,Ztautau,Diboson,Fakes,RareSM', file=f) print(' Regions: met_2pj,met_0j,met_1j1b', file=f) print(' Type: HISTO', file=f) print(' HistoNameSufUp: "_'+s[0]+'"', file=f) print(' Symmetrisation: ONESIDED', file=f) print(' Category: Instrumental', file=f) print('', file=f) for s in aidapy.meta._systematic_weights: up, down, title = s[0], s[1], s[2] name = up.split('_UP')[0].split('weightSyswLum_')[-1] print('Systematic: "'+name+'"', file=f) print(' Title: "'+title+'"', file=f) print(' Samples: ttbar,Wt,WW,Ztautau,Diboson,Fakes,RareSM', file=f) print(' Regions: met_2pj,met_0j,met_1j1b', file=f) print(' Type: HISTO', file=f) print(' Symmetrisation: TWOSIDED', file=f) print(' HistoNameSufUp: "_'+up+'"', file=f) print(' HistoNameSufDown: "_'+down+'"', file=f) print('', file=f) for s in aidapy.meta._systematic_btag_weights: title = s[2] up = s[0] down = s[1] name = str(s[0].split('weightSyswLum_')[-1]).replace('_up_','') print('Systematic: "'+name+'"', file=f) print(' Title: "'+title+'"', file=f) print(' Samples: ttbar,Wt', file=f) print(' Regions: met_1j1b', file=f) print(' Type: HISTO', file=f) print(' Symmetrisation: TWOSIDED', file=f) print(' HistoNameSufUp: "_'+up+'"', file=f) print(' HistoNameSufDown: "_'+down+'"', file=f) print('', file=f) f.close() if 'andrun' in sys.argv: subprocess.call('rm -rf myAIDAfit', shell=True) print('myFit.exe h aida.config') print('myFit.exe w aida.config') print('myFit.exe f aida.config') print('myFit.exe d aida.config') print('myFit.exe p aida.config')
[ "subprocess.call" ]
[((10781, 10828), 'subprocess.call', 'subprocess.call', (['"""rm -rf myAIDAfit"""'], {'shell': '(True)'}), "('rm -rf myAIDAfit', shell=True)\n", (10796, 10828), False, 'import subprocess\n')]
#!/usr/bin/python # -*- coding: utf-8 -*- __author__ = 'AleksNeStu' # Task05-02: # Create simple threaded http server like in *task02-01.py* which can be extended using decorator-based plug-in model for handing requests. # Example: # ```python # >>> from datetime import datetime # >>> from server import run, get_handler # >>> @get_handler('/date') # ... def date(): # ... return datetime.now() # ... # >>> run() # ``` # And at the same time: # ```bash # $ curl -s 'http://192.168.1.2:8080/date' # 2015-03-10 12:24:43.492631 # ``` # Addition info: from datetime import datetime from EPAM.task02 import Get_handler, Server @Get_handler('/date') def Date(): return datetime.now() if __name__ == '__main__': Server()
[ "EPAM.task02.Get_handler", "datetime.datetime.now", "EPAM.task02.Server" ]
[((636, 656), 'EPAM.task02.Get_handler', 'Get_handler', (['"""/date"""'], {}), "('/date')\n", (647, 656), False, 'from EPAM.task02 import Get_handler, Server\n'), ((680, 694), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (692, 694), False, 'from datetime import datetime\n'), ((728, 736), 'EPAM.task02.Server', 'Server', ([], {}), '()\n', (734, 736), False, 'from EPAM.task02 import Get_handler, Server\n')]
import re from epcpy.epc_schemes.base_scheme import EPCScheme from epcpy.utils.common import ConvertException from epcpy.utils.regex import IMOVN_URI IMOVN_URI_REGEX = re.compile(IMOVN_URI) class IMOVN(EPCScheme): """IMOVN EPC scheme implementation. IMOVN pure identities are of the form: urn:epc:id:imovn:<IMOvesselNumber> Example: urn:epc:id:imovn:9176187 This class can be created using EPC pure identities via its constructor """ def __init__(self, epc_uri) -> None: super().__init__() if not IMOVN_URI_REGEX.fullmatch(epc_uri): raise ConvertException(message=f"Invalid IMOVN URI {epc_uri}") self.epc_uri = epc_uri self._vessel_number = epc_uri.split(":")[4]
[ "epcpy.utils.common.ConvertException", "re.compile" ]
[((170, 191), 're.compile', 're.compile', (['IMOVN_URI'], {}), '(IMOVN_URI)\n', (180, 191), False, 'import re\n'), ((616, 672), 'epcpy.utils.common.ConvertException', 'ConvertException', ([], {'message': 'f"""Invalid IMOVN URI {epc_uri}"""'}), "(message=f'Invalid IMOVN URI {epc_uri}')\n", (632, 672), False, 'from epcpy.utils.common import ConvertException\n')]
from __future__ import unicode_literals import urlparse class ContentType(object): def detect(self, url, content_type): pass def render(self): return None class Video(ContentType): def detect(self, url, content_type): return 'video' class Image(ContentType): def detect(self, url, content_type): return 'image' class YouTube(Video): def detect(self, url, content_type): url_data = urlparse.urlparse(url) query = urlparse.parse_qs(url_data.query) if len(query.get("v", [])) > 0: # return query["v"][0] return 'youtube' def youtube_video_id(self, url): url_data = urlparse.urlparse(url) query = urlparse.parse_qs(url_data.query) if len(query.get("v", [])) > 0: return query["v"][0] class Imgur(ContentType): def detect(self, url, content_type): url_data = urlparse.urlparse(url) if url_data.netloc == 'imgur.com': return 'imgur' class Link(ContentType): def detect(self, url, content_type): return 'link' class GifV(ContentType): def detect(self, url, content_type): url_data = urlparse.urlparse(url) if ((url_data.netloc == 'imgur.com' or url_data.netloc == 'i.imgur.com') and url[-5:] == '.gifv'): return 'gifv'
[ "urlparse.urlparse", "urlparse.parse_qs" ]
[((450, 472), 'urlparse.urlparse', 'urlparse.urlparse', (['url'], {}), '(url)\n', (467, 472), False, 'import urlparse\n'), ((489, 522), 'urlparse.parse_qs', 'urlparse.parse_qs', (['url_data.query'], {}), '(url_data.query)\n', (506, 522), False, 'import urlparse\n'), ((684, 706), 'urlparse.urlparse', 'urlparse.urlparse', (['url'], {}), '(url)\n', (701, 706), False, 'import urlparse\n'), ((723, 756), 'urlparse.parse_qs', 'urlparse.parse_qs', (['url_data.query'], {}), '(url_data.query)\n', (740, 756), False, 'import urlparse\n'), ((918, 940), 'urlparse.urlparse', 'urlparse.urlparse', (['url'], {}), '(url)\n', (935, 940), False, 'import urlparse\n'), ((1188, 1210), 'urlparse.urlparse', 'urlparse.urlparse', (['url'], {}), '(url)\n', (1205, 1210), False, 'import urlparse\n')]
import numpy as np from scipy import optimize import logging logger = logging.getLogger(__name__) def scipyFit(x, y, method,p0 = None,boundaries = (-np.inf, np.inf),sigma = None): if boundaries is not None and len(boundaries) != 2: raise ValueError("Boundaries need to be a two 2D tuple") if p0 is not None and boundaries is not None and boundaries != (-np.inf, np.inf) and len(p0) != len(boundaries[0]) : raise ValueError("P0 and Fixed Array have to have the same length") popt, pcov = optimize.curve_fit(method, x, y,p0=p0,bounds = boundaries,sigma=sigma) perr = np.sqrt(np.diag(pcov)) return popt, perr def gaussian_amp(x, y0, amp, cen, wid): ''' Fitting function used. Fits a Gaussian using the following function: .. math:: y(x)=y_0+\frac{amp}{\sqrt{2\pi wid}}\text{exp}(-\frac{(x-cen)^2}{2*wid^2}) :param x:x-Axis against which we will approximate the function :type x:1-D numpy array :param y0:y-Offset of the function :type y0:float :param amp:Amplitude of the gaussian :type amp:float :param cen:x-Value of center of distribution :type cen:float :param wid:Standard deviation of the distribution :type wid:float :return:y-Array of a gaussian distribution :rtype:1-D numpy array ''' return y0 + (amp / (np.sqrt(2 * np.pi) * wid)) * np.exp(-(x - cen) ** 2 / (2 * wid ** 2)) def gaussian(x, y0, cen, wid): ''' Fitting function used. Fits a Gaussian using the following function: .. math:: y(x)=y_0+\frac{amp}{\sqrt{2\pi wid}}\text{exp}(-\frac{(x-cen)^2}{2*wid^2}) :param x:x-Axis against which we will approximate the function :type x:1-D numpy array :param y0:y-Offset of the function :type y0:float :param amp:Amplitude of the gaussian :type amp:float :param cen:x-Value of center of distribution :type cen:float :param wid:Standard deviation of the distribution :type wid:float :return:y-Array of a gaussian distribution :rtype:1-D numpy array ''' return y0 + (1 / (np.sqrt(2 * np.pi) * wid)) * np.exp(-(x - cen) ** 2 / (2 * wid ** 2)) def sinOffset(x,amp,tau,offset,phi): return offset+amp*np.sin(2*np.pi*x/tau+phi) def linearPolynomial(x, a, b): return a + b * x def exponentialDistribution(x,A,B,u): return A+B*np.exp(-x*u)*u def quadraticPolynomial(x, a, b, c,d): return a + b * x + c * x ** 2 +d * x ** 3 def sin(x,amp,tau): ''' Represents the used sin within our Fit :type x: 1-D numpy array :param amp: Amplitude of sin :type amp: float :type tau: float :return: the functional values for the array x :rtype: 1-D numpy array ''' return amp*np.sin(2*np.pi*4*x/tau) def sinc(x, a, tau_acf): ''' Represents the used sinc within our Fit :param x: 1-D numpy array :param a: float, amplitude of the sinc :param tau_acf: float :return: the functional value for the array x :rtype: 1-D numpy array ''' return a * np.sinc(4 * x / tau_acf)**2 def sinc_sin(x,a,tau,a_s): return sinc(x,a,tau) + sin(x,a_s,tau) def trismooth(x,window_width): ''' This function is implemented to create a similar function to the Trismooth function of idl :rtype: 1-D numpy array :type window_width: int :param x: The array containg the data which should be filtered. In our case this represents the Flux within the lightCurve :type x: 1-D numpy array :param window_width: The bin size which the function will look at :return: The smoothed variant of x ''' if window_width%2 != 0: window_width = window_width+1 lend = len(x)-1 if (lend+1) < window_width: raise ValueError("Window_width cannot be bigger than length -1") halfWeights = np.arange(window_width/2) weights = np.append(halfWeights,[window_width/2]) weights = np.append(weights,halfWeights[::-1]) weights +=1 tot = np.sum(weights) smoothed = np.zeros(lend+1) offset = int(window_width/2) for i in range(offset,lend-offset): smoothed[i]=np.sum(x[i-offset:i+offset+1]*weights) smoothed /=tot for i in range(0,offset): smoothed[i] = np.sum(x[0:i+offset+1]*weights[offset-i:]) / np.sum(weights[offset-i:]) for i in range(lend-offset,lend-1,-1): smoothed[i] = np.sum(x[i-offset:]*weights[0:offset+(lend-i)]) / np.sum(weights[0:offset+(lend-i)]) return smoothed
[ "numpy.sum", "numpy.zeros", "scipy.optimize.curve_fit", "numpy.append", "numpy.sinc", "numpy.sin", "numpy.arange", "numpy.exp", "numpy.diag", "logging.getLogger", "numpy.sqrt" ]
[((71, 98), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (88, 98), False, 'import logging\n'), ((519, 590), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['method', 'x', 'y'], {'p0': 'p0', 'bounds': 'boundaries', 'sigma': 'sigma'}), '(method, x, y, p0=p0, bounds=boundaries, sigma=sigma)\n', (537, 590), False, 'from scipy import optimize\n'), ((3802, 3829), 'numpy.arange', 'np.arange', (['(window_width / 2)'], {}), '(window_width / 2)\n', (3811, 3829), True, 'import numpy as np\n'), ((3842, 3884), 'numpy.append', 'np.append', (['halfWeights', '[window_width / 2]'], {}), '(halfWeights, [window_width / 2])\n', (3851, 3884), True, 'import numpy as np\n'), ((3896, 3933), 'numpy.append', 'np.append', (['weights', 'halfWeights[::-1]'], {}), '(weights, halfWeights[::-1])\n', (3905, 3933), True, 'import numpy as np\n'), ((3959, 3974), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3965, 3974), True, 'import numpy as np\n'), ((3991, 4009), 'numpy.zeros', 'np.zeros', (['(lend + 1)'], {}), '(lend + 1)\n', (3999, 4009), True, 'import numpy as np\n'), ((609, 622), 'numpy.diag', 'np.diag', (['pcov'], {}), '(pcov)\n', (616, 622), True, 'import numpy as np\n'), ((2713, 2744), 'numpy.sin', 'np.sin', (['(2 * np.pi * 4 * x / tau)'], {}), '(2 * np.pi * 4 * x / tau)\n', (2719, 2744), True, 'import numpy as np\n'), ((4102, 4148), 'numpy.sum', 'np.sum', (['(x[i - offset:i + offset + 1] * weights)'], {}), '(x[i - offset:i + offset + 1] * weights)\n', (4108, 4148), True, 'import numpy as np\n'), ((1357, 1397), 'numpy.exp', 'np.exp', (['(-(x - cen) ** 2 / (2 * wid ** 2))'], {}), '(-(x - cen) ** 2 / (2 * wid ** 2))\n', (1363, 1397), True, 'import numpy as np\n'), ((2098, 2138), 'numpy.exp', 'np.exp', (['(-(x - cen) ** 2 / (2 * wid ** 2))'], {}), '(-(x - cen) ** 2 / (2 * wid ** 2))\n', (2104, 2138), True, 'import numpy as np\n'), ((2199, 2232), 'numpy.sin', 'np.sin', (['(2 * np.pi * x / tau + phi)'], {}), '(2 * np.pi * x / tau + phi)\n', (2205, 2232), True, 'import numpy as np\n'), ((3015, 3039), 'numpy.sinc', 'np.sinc', (['(4 * x / tau_acf)'], {}), '(4 * x / tau_acf)\n', (3022, 3039), True, 'import numpy as np\n'), ((4214, 4264), 'numpy.sum', 'np.sum', (['(x[0:i + offset + 1] * weights[offset - i:])'], {}), '(x[0:i + offset + 1] * weights[offset - i:])\n', (4220, 4264), True, 'import numpy as np\n'), ((4259, 4287), 'numpy.sum', 'np.sum', (['weights[offset - i:]'], {}), '(weights[offset - i:])\n', (4265, 4287), True, 'import numpy as np\n'), ((4352, 4407), 'numpy.sum', 'np.sum', (['(x[i - offset:] * weights[0:offset + (lend - i)])'], {}), '(x[i - offset:] * weights[0:offset + (lend - i)])\n', (4358, 4407), True, 'import numpy as np\n'), ((4402, 4440), 'numpy.sum', 'np.sum', (['weights[0:offset + (lend - i)]'], {}), '(weights[0:offset + (lend - i)])\n', (4408, 4440), True, 'import numpy as np\n'), ((2333, 2347), 'numpy.exp', 'np.exp', (['(-x * u)'], {}), '(-x * u)\n', (2339, 2347), True, 'import numpy as np\n'), ((1328, 1346), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1335, 1346), True, 'import numpy as np\n'), ((2069, 2087), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2076, 2087), True, 'import numpy as np\n')]
from linkace_cli.api.base import APIBase from linkace_cli import models class Lists(APIBase): """CRUD interaction for all things list-based""" def get(self, id: int = None, order_by: models.OrderBy = None, order_dir: models.OrderDir = None): """ Get all lists or a single list's details. The order can be modified using the enums in models. If a numeric ID for a list is provided, it will only return details for that list. """ order_dir = order_dir.value if order_dir else None order_by = order_by.value if order_by else None if not id: resp = self.api.get('lists', {'order_by': order_by, 'order_dir': order_dir}) resp = models.ListsPagination().load(resp) lists = resp['data'] while(resp['next_page_url']): resp = models.ListsPagination().load(self.api.get(resp['next_page_url'])) lists.extend(resp['data']) return lists return models.List().load(self.api.get(f'lists/{id}')) def create(self, link: models.List): return self.api.post('lists', link) def delete(self, id: int): return self.api.delete(f'lists/{id}') def update(self, id: int, link: models.List): return self.api.patch(f'lists/{id}', link) def links(self, id: int): resp = self.api.get(f'lists/{id}/links') resp = models.LinksPagination().load(resp) links = resp['data'] while(resp['next_page_url']): resp = models.LinksPagination().load(self.api.get(resp['next_page_url'])) links.extend(resp['data']) return links
[ "linkace_cli.models.LinksPagination", "linkace_cli.models.List", "linkace_cli.models.ListsPagination" ]
[((999, 1012), 'linkace_cli.models.List', 'models.List', ([], {}), '()\n', (1010, 1012), False, 'from linkace_cli import models\n'), ((1408, 1432), 'linkace_cli.models.LinksPagination', 'models.LinksPagination', ([], {}), '()\n', (1430, 1432), False, 'from linkace_cli import models\n'), ((714, 738), 'linkace_cli.models.ListsPagination', 'models.ListsPagination', ([], {}), '()\n', (736, 738), False, 'from linkace_cli import models\n'), ((1531, 1555), 'linkace_cli.models.LinksPagination', 'models.LinksPagination', ([], {}), '()\n', (1553, 1555), False, 'from linkace_cli import models\n'), ((849, 873), 'linkace_cli.models.ListsPagination', 'models.ListsPagination', ([], {}), '()\n', (871, 873), False, 'from linkace_cli import models\n')]
""" Neural net encoder/decoder (seperate) (Not sure if I still want this...) """ from __future__ import absolute_import from __future__ import division import random import numpy as np import tensorflow as tf from tensorflow.python.framework import ops from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import rnn from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops.math_ops import sigmoid from tensorflow.python.ops.math_ops import tanh import exps.nlc_env class GRUCellAttn(rnn_cell.GRUCell): def __init__(self, num_units, encoder_output, scope=None): self.hs = encoder_output with vs.variable_scope(scope or type(self).__name__): with vs.variable_scope("Attn1"): hs2d = tf.reshape(self.hs, [-1, num_units]) phi_hs2d = tanh(rnn_cell._linear(hs2d, num_units, True, 1.0)) self.phi_hs = tf.reshape(phi_hs2d, tf.shape(self.hs)) super(GRUCellAttn, self).__init__(num_units) def __call__(self, inputs, state, scope=None): gru_out, gru_state = super(GRUCellAttn, self).__call__(inputs, state, scope) with vs.variable_scope(scope or type(self).__name__): with vs.variable_scope("Attn2"): gamma_h = tanh(rnn_cell._linear(gru_out, self._num_units, True, 1.0)) weights = tf.reduce_sum(self.phi_hs * gamma_h, reduction_indices=2, keep_dims=True) weights = tf.exp(weights - tf.reduce_max(weights, reduction_indices=0, keep_dims=True)) weights = weights / (1e-6 + tf.reduce_sum(weights, reduction_indices=0, keep_dims=True)) context = tf.reduce_sum(self.hs * weights, reduction_indices=0) with vs.variable_scope("AttnConcat"): out = tf.nn.relu(rnn_cell._linear([context, gru_out], self._num_units, True, 1.0)) self.attn_map = tf.squeeze(tf.slice(weights, [0, 0, 0], [-1, -1, 1])) return (out, out) class Encoder(object): def __init__(self, vocab_size, size, num_layers, max_gradient_norm, batch_size, learning_rate, learning_rate_decay_factor, dropout, forward_only=False): self.size = size self.vocab_size = vocab_size self.batch_size = batch_size self.num_layers = num_layers self.keep_prob_config = 1.0 - dropout self.learning_rate = tf.Variable(float(learning_rate), trainable=False) self.learning_rate_decay_op = self.learning_rate.assign(self.learning_rate * learning_rate_decay_factor) self.global_step = tf.Variable(0, trainable=False) self.keep_prob = tf.placeholder(tf.float32) self.source_tokens = tf.placeholder(tf.int32, shape=[None, None]) self.target_tokens = tf.placeholder(tf.int32, shape=[None, None]) self.source_mask = tf.placeholder(tf.int32, shape=[None, None]) self.target_mask = tf.placeholder(tf.int32, shape=[None, None]) self.beam_size = tf.placeholder(tf.int32) self.target_length = tf.reduce_sum(self.target_mask, reduction_indices=0) self.decoder_state_input, self.decoder_state_output = [], [] for i in xrange(num_layers): self.decoder_state_input.append(tf.placeholder(tf.float32, shape=[None, size])) with tf.variable_scope("Encoder", initializer=tf.uniform_unit_scaling_initializer(1.0)): self.setup_embeddings() self.setup_encoder() # calculate/setup loss later # self.setup_decoder() # self.setup_loss() # self.setup_beam() # params = tf.trainable_variables() # if not forward_only: # opt = tf.train.AdamOptimizer(self.learning_rate) # # gradients = tf.gradients(self.losses, params) # clipped_gradients, _ = tf.clip_by_global_norm(gradients, max_gradient_norm) # # self.gradient_norm = tf.global_norm(clipped_gradients) # self.gradient_norm = tf.global_norm(gradients) # self.param_norm = tf.global_norm(params) # self.updates = opt.apply_gradients( # zip(clipped_gradients, params), global_step=self.global_step) # # self.saver = tf.train.Saver(tf.all_variables(), max_to_keep=0) def setup_embeddings(self): with vs.variable_scope("embeddings"): self.L_enc = tf.get_variable("L_enc", [self.vocab_size, self.size]) # self.L_dec = tf.get_variable("L_dec", [self.vocab_size, self.size]) self.encoder_inputs = embedding_ops.embedding_lookup(self.L_enc, self.source_tokens) # self.decoder_inputs = embedding_ops.embedding_lookup(self.L_dec, self.target_tokens) def setup_encoder(self): self.encoder_cell = rnn_cell.GRUCell(self.size) with vs.variable_scope("PryamidEncoder"): inp = self.encoder_inputs mask = self.source_mask out = None for i in xrange(self.num_layers): with vs.variable_scope("EncoderCell%d" % i) as scope: srclen = tf.reduce_sum(mask, reduction_indices=0) out, _ = self.bidirectional_rnn(self.encoder_cell, inp, srclen, scope=scope) dropin, mask = self.downscale(out, mask) inp = self.dropout(dropin) self.encoder_output = out def dropout(self, inp): return tf.nn.dropout(inp, self.keep_prob) def downscale(self, inp, mask): with vs.variable_scope("Downscale"): inshape = tf.shape(inp) T, batch_size, dim = inshape[0], inshape[1], inshape[2] inp2d = tf.reshape(tf.transpose(inp, perm=[1, 0, 2]), [-1, 2 * self.size]) out2d = rnn_cell._linear(inp2d, self.size, True, 1.0) out3d = tf.reshape(out2d, tf.pack((batch_size, tf.to_int32(T / 2), dim))) out3d = tf.transpose(out3d, perm=[1, 0, 2]) out3d.set_shape([None, None, self.size]) out = tanh(out3d) mask = tf.transpose(mask) mask = tf.reshape(mask, [-1, 2]) mask = tf.cast(mask, tf.bool) mask = tf.reduce_any(mask, reduction_indices=1) mask = tf.to_int32(mask) mask = tf.reshape(mask, tf.pack([batch_size, -1])) mask = tf.transpose(mask) return out, mask def bidirectional_rnn(self, cell, inputs, lengths, scope=None): name = scope.name or "BiRNN" # Forward direction with vs.variable_scope(name + "_FW") as fw_scope: output_fw, output_state_fw = rnn.dynamic_rnn(cell, inputs, time_major=True, dtype=dtypes.float32, sequence_length=lengths, scope=fw_scope) # Backward direction inputs_bw = tf.reverse_sequence(inputs, tf.to_int64(lengths), seq_dim=0, batch_dim=1) with vs.variable_scope(name + "_BW") as bw_scope: output_bw, output_state_bw = rnn.dynamic_rnn(cell, inputs_bw, time_major=True, dtype=dtypes.float32, sequence_length=lengths, scope=bw_scope) output_bw = tf.reverse_sequence(output_bw, tf.to_int64(lengths), seq_dim=0, batch_dim=1) outputs = output_fw + output_bw output_state = output_state_fw + output_state_bw return (outputs, output_state) class Decoder(object): def __init__(self, vocab_size, size, num_layers, encoder_output, batch_size, dropout): self.size = size self.vocab_size = vocab_size self.batch_size = batch_size self.num_layers = num_layers self.keep_prob_config = 1.0 - dropout self.encoder_output = encoder_output self.target_tokens = tf.placeholder(tf.int32, shape=[None, None]) self.target_mask = tf.placeholder(tf.int32, shape=[None, None]) self.keep_prob = tf.placeholder(tf.float32) self.target_length = tf.reduce_sum(self.target_mask, reduction_indices=0) self.decoder_state_input, self.decoder_state_output = [], [] for i in xrange(num_layers): self.decoder_state_input.append(tf.placeholder(tf.float32, shape=[None, size])) def dropout(self, inp): return tf.nn.dropout(inp, self.keep_prob) def setup_embeddings(self): with vs.variable_scope("embeddings"): self.L_dec = tf.get_variable("L_dec", [self.vocab_size, self.size]) self.decoder_inputs = embedding_ops.embedding_lookup(self.L_dec, self.target_tokens) def setup_decoder(self): if self.num_layers > 1: self.decoder_cell = rnn_cell.GRUCell(self.size) self.attn_cell = GRUCellAttn(self.size, self.encoder_output, scope="DecoderAttnCell") with vs.variable_scope("Decoder"): inp = self.decoder_inputs for i in xrange(self.num_layers - 1): with vs.variable_scope("DecoderCell%d" % i) as scope: out, state_output = rnn.dynamic_rnn(self.decoder_cell, inp, time_major=True, dtype=dtypes.float32, sequence_length=self.target_length, scope=scope, initial_state=self.decoder_state_input[i]) inp = self.dropout(out) self.decoder_state_output.append(state_output) with vs.variable_scope("DecoderAttnCell") as scope: out, state_output = rnn.dynamic_rnn(self.attn_cell, inp, time_major=True, dtype=dtypes.float32, sequence_length=self.target_length, scope=scope, initial_state=self.decoder_state_input[i + 1]) self.decoder_output = self.dropout(out) self.decoder_state_output.append(state_output)
[ "tensorflow.reduce_sum", "tensorflow.reshape", "tensorflow.Variable", "tensorflow.python.ops.math_ops.tanh", "tensorflow.reduce_max", "tensorflow.get_variable", "tensorflow.python.ops.rnn_cell._linear", "tensorflow.to_int64", "tensorflow.placeholder", "tensorflow.cast", "tensorflow.to_int32", "tensorflow.pack", "tensorflow.reduce_any", "tensorflow.uniform_unit_scaling_initializer", "tensorflow.transpose", "tensorflow.python.ops.rnn.dynamic_rnn", "tensorflow.python.ops.embedding_ops.embedding_lookup", "tensorflow.shape", "tensorflow.slice", "tensorflow.python.ops.rnn_cell.GRUCell", "tensorflow.nn.dropout", "tensorflow.python.ops.variable_scope.variable_scope" ]
[((2771, 2802), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (2782, 2802), True, 'import tensorflow as tf\n'), ((2829, 2855), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2843, 2855), True, 'import tensorflow as tf\n'), ((2885, 2929), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]'}), '(tf.int32, shape=[None, None])\n', (2899, 2929), True, 'import tensorflow as tf\n'), ((2959, 3003), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]'}), '(tf.int32, shape=[None, None])\n', (2973, 3003), True, 'import tensorflow as tf\n'), ((3031, 3075), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]'}), '(tf.int32, shape=[None, None])\n', (3045, 3075), True, 'import tensorflow as tf\n'), ((3103, 3147), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]'}), '(tf.int32, shape=[None, None])\n', (3117, 3147), True, 'import tensorflow as tf\n'), ((3173, 3197), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {}), '(tf.int32)\n', (3187, 3197), True, 'import tensorflow as tf\n'), ((3227, 3279), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.target_mask'], {'reduction_indices': '(0)'}), '(self.target_mask, reduction_indices=0)\n', (3240, 3279), True, 'import tensorflow as tf\n'), ((5034, 5061), 'tensorflow.python.ops.rnn_cell.GRUCell', 'rnn_cell.GRUCell', (['self.size'], {}), '(self.size)\n', (5050, 5061), False, 'from tensorflow.python.ops import rnn_cell\n'), ((5682, 5716), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inp', 'self.keep_prob'], {}), '(inp, self.keep_prob)\n', (5695, 5716), True, 'import tensorflow as tf\n'), ((8032, 8076), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]'}), '(tf.int32, shape=[None, None])\n', (8046, 8076), True, 'import tensorflow as tf\n'), ((8104, 8148), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, None]'}), '(tf.int32, shape=[None, None])\n', (8118, 8148), True, 'import tensorflow as tf\n'), ((8174, 8200), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (8188, 8200), True, 'import tensorflow as tf\n'), ((8231, 8283), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.target_mask'], {'reduction_indices': '(0)'}), '(self.target_mask, reduction_indices=0)\n', (8244, 8283), True, 'import tensorflow as tf\n'), ((8527, 8561), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inp', 'self.keep_prob'], {}), '(inp, self.keep_prob)\n', (8540, 8561), True, 'import tensorflow as tf\n'), ((1557, 1630), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.phi_hs * gamma_h)'], {'reduction_indices': '(2)', 'keep_dims': '(True)'}), '(self.phi_hs * gamma_h, reduction_indices=2, keep_dims=True)\n', (1570, 1630), True, 'import tensorflow as tf\n'), ((1854, 1907), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.hs * weights)'], {'reduction_indices': '(0)'}), '(self.hs * weights, reduction_indices=0)\n', (1867, 1907), True, 'import tensorflow as tf\n'), ((4585, 4616), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""embeddings"""'], {}), "('embeddings')\n", (4602, 4616), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((4643, 4697), 'tensorflow.get_variable', 'tf.get_variable', (['"""L_enc"""', '[self.vocab_size, self.size]'], {}), "('L_enc', [self.vocab_size, self.size])\n", (4658, 4697), True, 'import tensorflow as tf\n'), ((4814, 4876), 'tensorflow.python.ops.embedding_ops.embedding_lookup', 'embedding_ops.embedding_lookup', (['self.L_enc', 'self.source_tokens'], {}), '(self.L_enc, self.source_tokens)\n', (4844, 4876), False, 'from tensorflow.python.ops import embedding_ops\n'), ((5075, 5110), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""PryamidEncoder"""'], {}), "('PryamidEncoder')\n", (5092, 5110), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((5767, 5797), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""Downscale"""'], {}), "('Downscale')\n", (5784, 5797), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((5821, 5834), 'tensorflow.shape', 'tf.shape', (['inp'], {}), '(inp)\n', (5829, 5834), True, 'import tensorflow as tf\n'), ((6010, 6055), 'tensorflow.python.ops.rnn_cell._linear', 'rnn_cell._linear', (['inp2d', 'self.size', '(True)', '(1.0)'], {}), '(inp2d, self.size, True, 1.0)\n', (6026, 6055), False, 'from tensorflow.python.ops import rnn_cell\n'), ((6162, 6197), 'tensorflow.transpose', 'tf.transpose', (['out3d'], {'perm': '[1, 0, 2]'}), '(out3d, perm=[1, 0, 2])\n', (6174, 6197), True, 'import tensorflow as tf\n'), ((6269, 6280), 'tensorflow.python.ops.math_ops.tanh', 'tanh', (['out3d'], {}), '(out3d)\n', (6273, 6280), False, 'from tensorflow.python.ops.math_ops import tanh\n'), ((6301, 6319), 'tensorflow.transpose', 'tf.transpose', (['mask'], {}), '(mask)\n', (6313, 6319), True, 'import tensorflow as tf\n'), ((6339, 6364), 'tensorflow.reshape', 'tf.reshape', (['mask', '[-1, 2]'], {}), '(mask, [-1, 2])\n', (6349, 6364), True, 'import tensorflow as tf\n'), ((6384, 6406), 'tensorflow.cast', 'tf.cast', (['mask', 'tf.bool'], {}), '(mask, tf.bool)\n', (6391, 6406), True, 'import tensorflow as tf\n'), ((6426, 6466), 'tensorflow.reduce_any', 'tf.reduce_any', (['mask'], {'reduction_indices': '(1)'}), '(mask, reduction_indices=1)\n', (6439, 6466), True, 'import tensorflow as tf\n'), ((6486, 6503), 'tensorflow.to_int32', 'tf.to_int32', (['mask'], {}), '(mask)\n', (6497, 6503), True, 'import tensorflow as tf\n'), ((6586, 6604), 'tensorflow.transpose', 'tf.transpose', (['mask'], {}), '(mask)\n', (6598, 6604), True, 'import tensorflow as tf\n'), ((6778, 6809), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (["(name + '_FW')"], {}), "(name + '_FW')\n", (6795, 6809), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((6864, 6977), 'tensorflow.python.ops.rnn.dynamic_rnn', 'rnn.dynamic_rnn', (['cell', 'inputs'], {'time_major': '(True)', 'dtype': 'dtypes.float32', 'sequence_length': 'lengths', 'scope': 'fw_scope'}), '(cell, inputs, time_major=True, dtype=dtypes.float32,\n sequence_length=lengths, scope=fw_scope)\n', (6879, 6977), False, 'from tensorflow.python.ops import rnn\n'), ((7108, 7128), 'tensorflow.to_int64', 'tf.to_int64', (['lengths'], {}), '(lengths)\n', (7119, 7128), True, 'import tensorflow as tf\n'), ((7167, 7198), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (["(name + '_BW')"], {}), "(name + '_BW')\n", (7184, 7198), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((7253, 7369), 'tensorflow.python.ops.rnn.dynamic_rnn', 'rnn.dynamic_rnn', (['cell', 'inputs_bw'], {'time_major': '(True)', 'dtype': 'dtypes.float32', 'sequence_length': 'lengths', 'scope': 'bw_scope'}), '(cell, inputs_bw, time_major=True, dtype=dtypes.float32,\n sequence_length=lengths, scope=bw_scope)\n', (7268, 7369), False, 'from tensorflow.python.ops import rnn\n'), ((7475, 7495), 'tensorflow.to_int64', 'tf.to_int64', (['lengths'], {}), '(lengths)\n', (7486, 7495), True, 'import tensorflow as tf\n'), ((8608, 8639), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""embeddings"""'], {}), "('embeddings')\n", (8625, 8639), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((8666, 8720), 'tensorflow.get_variable', 'tf.get_variable', (['"""L_dec"""', '[self.vocab_size, self.size]'], {}), "('L_dec', [self.vocab_size, self.size])\n", (8681, 8720), True, 'import tensorflow as tf\n'), ((8755, 8817), 'tensorflow.python.ops.embedding_ops.embedding_lookup', 'embedding_ops.embedding_lookup', (['self.L_dec', 'self.target_tokens'], {}), '(self.L_dec, self.target_tokens)\n', (8785, 8817), False, 'from tensorflow.python.ops import embedding_ops\n'), ((8912, 8939), 'tensorflow.python.ops.rnn_cell.GRUCell', 'rnn_cell.GRUCell', (['self.size'], {}), '(self.size)\n', (8928, 8939), False, 'from tensorflow.python.ops import rnn_cell\n'), ((9048, 9076), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""Decoder"""'], {}), "('Decoder')\n", (9065, 9076), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((916, 942), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""Attn1"""'], {}), "('Attn1')\n", (933, 942), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((967, 1003), 'tensorflow.reshape', 'tf.reshape', (['self.hs', '[-1, num_units]'], {}), '(self.hs, [-1, num_units])\n', (977, 1003), True, 'import tensorflow as tf\n'), ((1421, 1447), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""Attn2"""'], {}), "('Attn2')\n", (1438, 1447), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((1925, 1956), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""AttnConcat"""'], {}), "('AttnConcat')\n", (1942, 1956), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((2096, 2137), 'tensorflow.slice', 'tf.slice', (['weights', '[0, 0, 0]', '[-1, -1, 1]'], {}), '(weights, [0, 0, 0], [-1, -1, 1])\n', (2104, 2137), True, 'import tensorflow as tf\n'), ((3431, 3477), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, size]'}), '(tf.float32, shape=[None, size])\n', (3445, 3477), True, 'import tensorflow as tf\n'), ((5934, 5967), 'tensorflow.transpose', 'tf.transpose', (['inp'], {'perm': '[1, 0, 2]'}), '(inp, perm=[1, 0, 2])\n', (5946, 5967), True, 'import tensorflow as tf\n'), ((6540, 6565), 'tensorflow.pack', 'tf.pack', (['[batch_size, -1]'], {}), '([batch_size, -1])\n', (6547, 6565), True, 'import tensorflow as tf\n'), ((8435, 8481), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, size]'}), '(tf.float32, shape=[None, size])\n', (8449, 8481), True, 'import tensorflow as tf\n'), ((9688, 9724), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (['"""DecoderAttnCell"""'], {}), "('DecoderAttnCell')\n", (9705, 9724), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((9771, 9951), 'tensorflow.python.ops.rnn.dynamic_rnn', 'rnn.dynamic_rnn', (['self.attn_cell', 'inp'], {'time_major': '(True)', 'dtype': 'dtypes.float32', 'sequence_length': 'self.target_length', 'scope': 'scope', 'initial_state': 'self.decoder_state_input[i + 1]'}), '(self.attn_cell, inp, time_major=True, dtype=dtypes.float32,\n sequence_length=self.target_length, scope=scope, initial_state=self.\n decoder_state_input[i + 1])\n', (9786, 9951), False, 'from tensorflow.python.ops import rnn\n'), ((1036, 1080), 'tensorflow.python.ops.rnn_cell._linear', 'rnn_cell._linear', (['hs2d', 'num_units', '(True)', '(1.0)'], {}), '(hs2d, num_units, True, 1.0)\n', (1052, 1080), False, 'from tensorflow.python.ops import rnn_cell\n'), ((1133, 1150), 'tensorflow.shape', 'tf.shape', (['self.hs'], {}), '(self.hs)\n', (1141, 1150), True, 'import tensorflow as tf\n'), ((1480, 1533), 'tensorflow.python.ops.rnn_cell._linear', 'rnn_cell._linear', (['gru_out', 'self._num_units', '(True)', '(1.0)'], {}), '(gru_out, self._num_units, True, 1.0)\n', (1496, 1533), False, 'from tensorflow.python.ops import rnn_cell\n'), ((1670, 1729), 'tensorflow.reduce_max', 'tf.reduce_max', (['weights'], {'reduction_indices': '(0)', 'keep_dims': '(True)'}), '(weights, reduction_indices=0, keep_dims=True)\n', (1683, 1729), True, 'import tensorflow as tf\n'), ((1771, 1830), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weights'], {'reduction_indices': '(0)', 'keep_dims': '(True)'}), '(weights, reduction_indices=0, keep_dims=True)\n', (1784, 1830), True, 'import tensorflow as tf\n'), ((1991, 2055), 'tensorflow.python.ops.rnn_cell._linear', 'rnn_cell._linear', (['[context, gru_out]', 'self._num_units', '(True)', '(1.0)'], {}), '([context, gru_out], self._num_units, True, 1.0)\n', (2007, 2055), False, 'from tensorflow.python.ops import rnn_cell\n'), ((3534, 3574), 'tensorflow.uniform_unit_scaling_initializer', 'tf.uniform_unit_scaling_initializer', (['(1.0)'], {}), '(1.0)\n', (3569, 3574), True, 'import tensorflow as tf\n'), ((5276, 5314), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (["('EncoderCell%d' % i)"], {}), "('EncoderCell%d' % i)\n", (5293, 5314), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((5354, 5394), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['mask'], {'reduction_indices': '(0)'}), '(mask, reduction_indices=0)\n', (5367, 5394), True, 'import tensorflow as tf\n'), ((9187, 9225), 'tensorflow.python.ops.variable_scope.variable_scope', 'vs.variable_scope', (["('DecoderCell%d' % i)"], {}), "('DecoderCell%d' % i)\n", (9204, 9225), True, 'from tensorflow.python.ops import variable_scope as vs\n'), ((9276, 9456), 'tensorflow.python.ops.rnn.dynamic_rnn', 'rnn.dynamic_rnn', (['self.decoder_cell', 'inp'], {'time_major': '(True)', 'dtype': 'dtypes.float32', 'sequence_length': 'self.target_length', 'scope': 'scope', 'initial_state': 'self.decoder_state_input[i]'}), '(self.decoder_cell, inp, time_major=True, dtype=dtypes.\n float32, sequence_length=self.target_length, scope=scope, initial_state\n =self.decoder_state_input[i])\n', (9291, 9456), False, 'from tensorflow.python.ops import rnn\n'), ((6115, 6133), 'tensorflow.to_int32', 'tf.to_int32', (['(T / 2)'], {}), '(T / 2)\n', (6126, 6133), True, 'import tensorflow as tf\n')]
import os import warnings import logging import numpy as np from activitysim.core import inject import pandas as pd import yaml from activitysim.core import pipeline from activitysim.core import config warnings.filterwarnings('ignore', category=pd.io.pytables.PerformanceWarning) pd.options.mode.chained_assignment = None logger = logging.getLogger(__name__) @inject.injectable(cache=True) def store(data_dir, settings): if 'store' not in settings: logger.error("store file name not specified in settings") raise RuntimeError("store file name not specified in settings") fname = os.path.join(data_dir, settings["store"]) if not os.path.exists(fname): logger.error("store file not found: %s" % fname) raise RuntimeError("store file not found: %s" % fname) file = pd.HDFStore(fname, mode='r') pipeline.close_on_exit(file, fname) return file @inject.injectable(cache=True) def trace_zones(settings): zones = settings.get('trace_zones', None) if zones and not (isinstance(zones, list) and all(isinstance(x, int) for x in zones)): logger.warn("setting trace_zones is wrong type, should be a list of integers, but was %s" % zones) zones = None return zones
[ "pandas.HDFStore", "warnings.filterwarnings", "os.path.exists", "activitysim.core.inject.injectable", "os.path.join", "activitysim.core.pipeline.close_on_exit", "logging.getLogger" ]
[((207, 284), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'pd.io.pytables.PerformanceWarning'}), "('ignore', category=pd.io.pytables.PerformanceWarning)\n", (230, 284), False, 'import warnings\n'), ((337, 364), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (354, 364), False, 'import logging\n'), ((368, 397), 'activitysim.core.inject.injectable', 'inject.injectable', ([], {'cache': '(True)'}), '(cache=True)\n', (385, 397), False, 'from activitysim.core import inject\n'), ((908, 937), 'activitysim.core.inject.injectable', 'inject.injectable', ([], {'cache': '(True)'}), '(cache=True)\n', (925, 937), False, 'from activitysim.core import inject\n'), ((611, 652), 'os.path.join', 'os.path.join', (['data_dir', "settings['store']"], {}), "(data_dir, settings['store'])\n", (623, 652), False, 'import os\n'), ((819, 847), 'pandas.HDFStore', 'pd.HDFStore', (['fname'], {'mode': '"""r"""'}), "(fname, mode='r')\n", (830, 847), True, 'import pandas as pd\n'), ((852, 887), 'activitysim.core.pipeline.close_on_exit', 'pipeline.close_on_exit', (['file', 'fname'], {}), '(file, fname)\n', (874, 887), False, 'from activitysim.core import pipeline\n'), ((664, 685), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (678, 685), False, 'import os\n')]
# Copyright 2008-2018 pydicom authors. See LICENSE file for details. """ Use the jpeg_ls (CharPyLS) python package to decode pixel transfer syntaxes. """ try: import numpy HAVE_NP = True except ImportError: HAVE_NP = False try: import jpeg_ls HAVE_JPEGLS = True except ImportError: HAVE_JPEGLS = False import pydicom.encaps from pydicom.pixel_data_handlers.util import dtype_corrected_for_endianness import pydicom.uid HANDLER_NAME = 'JPEG-LS' DEPENDENCIES = { 'numpy': ('http://www.numpy.org/', 'NumPy'), 'jpeg_ls': ('https://github.com/Who8MyLunch/CharPyLS', 'CharPyLS'), } SUPPORTED_TRANSFER_SYNTAXES = [ pydicom.uid.JPEGLSLossless, pydicom.uid.JPEGLSLossy, ] def is_available(): """Return True if the handler has its dependencies met.""" return HAVE_NP and HAVE_JPEGLS def needs_to_convert_to_RGB(dicom_dataset): return False def should_change_PhotometricInterpretation_to_RGB(dicom_dataset): should_change = dicom_dataset.SamplesPerPixel == 3 return False def supports_transfer_syntax(transfer_syntax): """ Returns ------- bool True if this pixel data handler might support this transfer syntax. False to prevent any attempt to try to use this handler to decode the given transfer syntax """ return transfer_syntax in SUPPORTED_TRANSFER_SYNTAXES def get_pixeldata(dicom_dataset): """ Use the jpeg_ls package to decode the PixelData attribute Returns ------- numpy.ndarray A correctly sized (but not shaped) numpy array of the entire data volume Raises ------ ImportError if the required packages are not available NotImplementedError if the transfer syntax is not supported TypeError if the pixel data type is unsupported """ if (dicom_dataset.file_meta.TransferSyntaxUID not in SUPPORTED_TRANSFER_SYNTAXES): msg = ("The jpeg_ls does not support " "this transfer syntax {0}.".format( dicom_dataset.file_meta.TransferSyntaxUID.name)) raise NotImplementedError(msg) if not HAVE_JPEGLS: msg = ("The jpeg_ls package is required to use pixel_array " "for this transfer syntax {0}, and jpeg_ls could not " "be imported.".format( dicom_dataset.file_meta.TransferSyntaxUID.name)) raise ImportError(msg) # Make NumPy format code, e.g. "uint16", "int32" etc # from two pieces of info: # dicom_dataset.PixelRepresentation -- 0 for unsigned, 1 for signed; # dicom_dataset.BitsAllocated -- 8, 16, or 32 if dicom_dataset.PixelRepresentation == 0: format_str = 'uint{}'.format(dicom_dataset.BitsAllocated) elif dicom_dataset.PixelRepresentation == 1: format_str = 'int{}'.format(dicom_dataset.BitsAllocated) else: format_str = 'bad_pixel_representation' try: numpy_format = numpy.dtype(format_str) except TypeError: msg = ("Data type not understood by NumPy: " "format='{}', PixelRepresentation={}, " "BitsAllocated={}".format( format_str, dicom_dataset.PixelRepresentation, dicom_dataset.BitsAllocated)) raise TypeError(msg) numpy_format = dtype_corrected_for_endianness( dicom_dataset.is_little_endian, numpy_format) # decompress here UncompressedPixelData = bytearray() if ('NumberOfFrames' in dicom_dataset and dicom_dataset.NumberOfFrames > 1): # multiple compressed frames CompressedPixelDataSeq = pydicom.encaps.decode_data_sequence( dicom_dataset.PixelData) # print len(CompressedPixelDataSeq) for frame in CompressedPixelDataSeq: decompressed_image = jpeg_ls.decode( numpy.frombuffer(frame, dtype=numpy.uint8)) UncompressedPixelData.extend(decompressed_image.tobytes()) else: # single compressed frame CompressedPixelData = pydicom.encaps.defragment_data( dicom_dataset.PixelData) decompressed_image = jpeg_ls.decode( numpy.frombuffer(CompressedPixelData, dtype=numpy.uint8)) UncompressedPixelData.extend(decompressed_image.tobytes()) pixel_array = numpy.frombuffer(UncompressedPixelData, numpy_format) if should_change_PhotometricInterpretation_to_RGB(dicom_dataset): dicom_dataset.PhotometricInterpretation = "RGB" return pixel_array
[ "numpy.frombuffer", "numpy.dtype", "pydicom.pixel_data_handlers.util.dtype_corrected_for_endianness" ]
[((3358, 3434), 'pydicom.pixel_data_handlers.util.dtype_corrected_for_endianness', 'dtype_corrected_for_endianness', (['dicom_dataset.is_little_endian', 'numpy_format'], {}), '(dicom_dataset.is_little_endian, numpy_format)\n', (3388, 3434), False, 'from pydicom.pixel_data_handlers.util import dtype_corrected_for_endianness\n'), ((4357, 4410), 'numpy.frombuffer', 'numpy.frombuffer', (['UncompressedPixelData', 'numpy_format'], {}), '(UncompressedPixelData, numpy_format)\n', (4373, 4410), False, 'import numpy\n'), ((2979, 3002), 'numpy.dtype', 'numpy.dtype', (['format_str'], {}), '(format_str)\n', (2990, 3002), False, 'import numpy\n'), ((4213, 4269), 'numpy.frombuffer', 'numpy.frombuffer', (['CompressedPixelData'], {'dtype': 'numpy.uint8'}), '(CompressedPixelData, dtype=numpy.uint8)\n', (4229, 4269), False, 'import numpy\n'), ((3898, 3940), 'numpy.frombuffer', 'numpy.frombuffer', (['frame'], {'dtype': 'numpy.uint8'}), '(frame, dtype=numpy.uint8)\n', (3914, 3940), False, 'import numpy\n')]
# -*- coding: utf-8 -*- from sqlalchemy import Table, Column, Integer, String, \ MetaData, ForeignKey, ForeignKeyConstraint, UniqueConstraint from sqlalchemy.orm import relationship, backref from config.config import db, metadata from schema.users import User from schema.admingroup import AdminGroup __tablename__ = 'usersmadmingroup' class UsersMAdminGroup(db.Model): __tablename__ = __tablename__ id = Column(Integer, primary_key=True) #user_id = Column(Integer) #admingroup_id = Column(Integer) user_id = Column(Integer, ForeignKey(User.id), primary_key=True) admingroup_id = Column(Integer, ForeignKey(AdminGroup.id), primary_key=True) #user_id = Column(Integer, ForeignKey(User.id)) #admingroup_id = Column(Integer, ForeignKey(AdminGroup.id)) __table_args__ = (UniqueConstraint('user_id', 'admingroup_id', name='_user_admingroup'), ) # if user delete, this mapping also delete too. user_obj = relationship('User', backref=backref('users', uselist=True, cascade='delete,all', lazy='dynamic')) admingroup_obj = relationship('AdminGroup', backref=backref('admingroups', lazy='dynamic')) #user_obj = relationship('User', lazy='dynamic', cascade='all') #admingroup_obj = relationship('AdminGroup', lazy='dynamic', cascade='all') #__table_args__ = (ForeignKeyConstraint( # #[user_id, admingroup_id],[User.id, AdminGroup.id]), {}) # [user_id, admingroup_id],['user.id', 'admingroup.id']), {}) SchemaUsersMAdminGroup = Table(__tablename__, metadata, Column('id', Integer, primary_key=True), Column('user_id', None, ForeignKey('users.id')), #ForeignKey("new.new_id", onupdate="CASCADE", # ondelete="CASCADE"), #Column('location_code', Unicode(10)), Column('admingroup_id', None, ForeignKey('admingroup.id')), UniqueConstraint('user_id', 'admingroup_id') #ForeignKeyConstraint(['user_id', 'admingroup_id'], ['user.id', # 'admingroup.id']) )
[ "sqlalchemy.orm.backref", "sqlalchemy.UniqueConstraint", "sqlalchemy.ForeignKey", "sqlalchemy.Column" ]
[((424, 457), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (430, 457), False, 'from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, ForeignKeyConstraint, UniqueConstraint\n'), ((1601, 1640), 'sqlalchemy.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)'}), "('id', Integer, primary_key=True)\n", (1607, 1640), False, 'from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, ForeignKeyConstraint, UniqueConstraint\n'), ((1916, 1960), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""user_id"""', '"""admingroup_id"""'], {}), "('user_id', 'admingroup_id')\n", (1932, 1960), False, 'from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, ForeignKeyConstraint, UniqueConstraint\n'), ((556, 575), 'sqlalchemy.ForeignKey', 'ForeignKey', (['User.id'], {}), '(User.id)\n', (566, 575), False, 'from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, ForeignKeyConstraint, UniqueConstraint\n'), ((631, 656), 'sqlalchemy.ForeignKey', 'ForeignKey', (['AdminGroup.id'], {}), '(AdminGroup.id)\n', (641, 656), False, 'from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, ForeignKeyConstraint, UniqueConstraint\n'), ((814, 883), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""user_id"""', '"""admingroup_id"""'], {'name': '"""_user_admingroup"""'}), "('user_id', 'admingroup_id', name='_user_admingroup')\n", (830, 883), False, 'from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, ForeignKeyConstraint, UniqueConstraint\n'), ((1670, 1692), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""users.id"""'], {}), "('users.id')\n", (1680, 1692), False, 'from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, ForeignKeyConstraint, UniqueConstraint\n'), ((1882, 1909), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""admingroup.id"""'], {}), "('admingroup.id')\n", (1892, 1909), False, 'from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, ForeignKeyConstraint, UniqueConstraint\n'), ((999, 1067), 'sqlalchemy.orm.backref', 'backref', (['"""users"""'], {'uselist': '(True)', 'cascade': '"""delete,all"""', 'lazy': '"""dynamic"""'}), "('users', uselist=True, cascade='delete,all', lazy='dynamic')\n", (1006, 1067), False, 'from sqlalchemy.orm import relationship, backref\n'), ((1170, 1208), 'sqlalchemy.orm.backref', 'backref', (['"""admingroups"""'], {'lazy': '"""dynamic"""'}), "('admingroups', lazy='dynamic')\n", (1177, 1208), False, 'from sqlalchemy.orm import relationship, backref\n')]
""" This script implements the "sandbox" AWS provisioning method, using device certificate from ECC. It is intended to be invoked from iotprovison, but can also be run stand-alone. """ import os from logging import getLogger import hashlib import binascii from cryptography import x509 from cryptography.hazmat.primitives import serialization from pytrustplatform.ecc_cert_builder import build_certs_from_ecc from pyawsutils.aws_cloudformation import MCHP_SANDBOX_ENDPOINT class AwsSandboxProvisioner: """ Provides "sandbox" provisioning for AWS cloud :param signer_cert_file: Path to file containing the signer certificate :type signer_cert_file: str (path) :param device_cert_file: Path to the file to write the generated device certificate to :type device_cert_file: str (path) :param force_new_device_certificate: Force creation of new device certificate even if it exists already :type force_new_device_certificate: boolean, optional """ def __init__(self, signer_cert_file, device_cert_file="device_aws_sandbox.pem", force_new_device_certificate=False): """ """ self.logger = getLogger(__name__) self.signer_cert_file = signer_cert_file self.device_cert_file = device_cert_file self.force_new_device_certificate = force_new_device_certificate def provision(self, fwinterface): """ Do the actual provisioning Read out device certificate from kit, save it to file, extract "thing name" (AKA subject key identifier), save these items to WINC flash for easy access by application. :param fwinterface: Firmware interface :type fwinterface: :class: ProvisioningFirmwareInterface :return: "Thing name" (Subject Key Identifier) if successful, else None :rtype: str """ thing_name = None self.logger.info("Erasing WINC TLS certificate sector") fwinterface.winc_erase_tls_certificate_sector() if self.force_new_device_certificate or not os.path.isfile(self.device_cert_file): self.logger.info("Generating certificates") device_cert, signer_cert = build_certs_from_ecc(fwinterface.get_firmware_driver(), self.signer_cert_file, self.device_cert_file, force=self.force_new_device_certificate) try: ski = device_cert.extensions.get_extension_for_oid( x509.oid.ExtensionOID.SUBJECT_KEY_IDENTIFIER).value.digest thing_name = binascii.b2a_hex(ski).decode() except x509.ExtensionNotFound: pubkey = device_cert.public_key().public_bytes(encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo) thing_name = hashlib.sha1(pubkey[-65:]).hexdigest() # FIXME: The WINC specific code should be split out, so we can # use this for cellular provisioning also. # Add device certificate for storage in WINC self.logger.info("Sending Device Certificate") fwinterface.winc_add_client_certificate(device_cert.public_bytes(encoding=serialization.Encoding.DER).hex()) # Add signer certifate for storage in WINC self.logger.info("Sending Signer Certificate") fwinterface.winc_add_client_certificate(signer_cert.public_bytes(encoding=serialization.Encoding.DER).hex()) self.logger.info("Transferring certificates to WINC") fwinterface.winc_write_tls_certificates_sector() self.logger.info("Saving thing name in WINC") fwinterface.winc_write_thing_name(thing_name) self.logger.debug("Locking ECC slots 10-12") for slot in [10, 11, 12]: fwinterface.ecc_lock_slot(slot) #Endpoint for Microchip sandbox account endpoint = MCHP_SANDBOX_ENDPOINT self.logger.info("Saving AWS endpoint in WINC") fwinterface.winc_write_endpoint_name(endpoint) return thing_name
[ "hashlib.sha1", "os.path.isfile", "logging.getLogger", "binascii.b2a_hex" ]
[((1168, 1187), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (1177, 1187), False, 'from logging import getLogger\n'), ((2054, 2091), 'os.path.isfile', 'os.path.isfile', (['self.device_cert_file'], {}), '(self.device_cert_file)\n', (2068, 2091), False, 'import os\n'), ((2672, 2693), 'binascii.b2a_hex', 'binascii.b2a_hex', (['ski'], {}), '(ski)\n', (2688, 2693), False, 'import binascii\n'), ((2978, 3004), 'hashlib.sha1', 'hashlib.sha1', (['pubkey[-65:]'], {}), '(pubkey[-65:])\n', (2990, 3004), False, 'import hashlib\n')]
# Generated by Django 2.0.5 on 2019-02-25 23:13 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('mirari', '0016_auto_20181219_1659'), ] operations = [ migrations.AddField( model_name='organization', name='DASHBOARD_LOGO_WIDTH', field=models.PositiveIntegerField(blank=True, default='0', help_text='px', null=True), ), ]
[ "django.db.models.PositiveIntegerField" ]
[((353, 432), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'blank': '(True)', 'default': '"""0"""', 'help_text': '"""px"""', 'null': '(True)'}), "(blank=True, default='0', help_text='px', null=True)\n", (380, 432), False, 'from django.db import migrations, models\n')]
import importlib from hydroDL.master import basins from hydroDL.app import waterQuality from hydroDL import kPath from hydroDL.model import trainTS from hydroDL.data import gageII, usgs from hydroDL.post import axplot, figplot import torch import os import json import numpy as np import pandas as pd import matplotlib.pyplot as plt # test outName = 'Silica64-Y8090-00955-opt1' master = basins.loadMaster(outName) dataName = master['dataName'] wqData = waterQuality.DataModelWQ(dataName) trainset = '00955-Y8090' testset = '00955-Y0010' if master['varY'] is not None: plotVar = ['00060', '00955'] else: plotVar = ['00955'] # point test yP1, ycP1 = basins.testModel(outName, trainset, wqData=wqData) errMatC1 = wqData.errBySiteC(ycP1, subset=trainset, varC=master['varYC']) if master['varY'] is not None: errMatQ1 = wqData.errBySiteQ(yP1, subset=trainset, varQ=master['varY']) yP2, ycP2 = basins.testModel(outName, testset, wqData=wqData) errMatC2 = wqData.errBySiteC(ycP2, subset=testset, varC=master['varYC']) if master['varY'] is not None: errMatQ2 = wqData.errBySiteQ(yP2, subset=testset, varQ=master['varY']) # box dataBox = list() for k in range(2): for var in plotVar: if var == '00060': temp = [errMatQ1[:, 0, k], errMatQ2[:, 0, k]] else: ic = master['varYC'].index(var) temp = [errMatC1[:, ic, k], errMatC2[:, ic, k]] dataBox.append(temp) fig = figplot.boxPlot(dataBox, label1=['RMSE', 'Corr'], label2=[ 'train', 'test'], sharey=False) fig.show() # seq test siteNoLst = wqData.info['siteNo'].unique().tolist() basins.testModelSeq(outName, siteNoLst, wqData=wqData) # time series map dfCrd = gageII.readData( varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst) lat = dfCrd['LAT_GAGE'].values lon = dfCrd['LNG_GAGE'].values codePdf = usgs.codePdf def funcMap(): nM = len(plotVar) figM, axM = plt.subplots(nM, 1, figsize=(8, 6)) axM = np.array([axM]) if nM == 1 else axM for k, var in enumerate(plotVar): if var == '00060': axplot.mapPoint(axM[k], lat, lon, errMatQ2[:, 0, 1], s=12) axM[k].set_title('streamflow') else: ic = master['varYC'].index(var) shortName = codePdf.loc[var]['shortName'] title = '{} {}'.format(shortName, var) axplot.mapPoint(axM[k], lat, lon, errMatC2[:, ic, 1], s=12) axM[k].set_title(title) figP, axP = plt.subplots(nM, 1, figsize=(8, 6)) axP = np.array([axP]) if nM == 1 else axP return figM, axM, figP, axP, lon, lat def funcPoint(iP, axP): siteNo = siteNoLst[iP] dfPred, dfObs = basins.loadSeq(outName, siteNo) t = dfPred.index.values.astype(np.datetime64) tBar = np.datetime64('2000-01-01') info1 = wqData.subsetInfo(trainset) info2 = wqData.subsetInfo(testset) ind1 = info1[info1['siteNo'] == siteNo].index ind2 = info2[info2['siteNo'] == siteNo].index t1 = info1['date'][ind1].values.astype(np.datetime64) t2 = info2['date'][ind2].values.astype(np.datetime64) tp = np.concatenate([t1, t2]) yp = np.concatenate([ycP1[ind1], ycP2[ind2]]) for k, var in enumerate(plotVar): rmse, corr = waterQuality.calErrSeq(dfPred[var], dfObs[var]) tStr = '{}, rmse [{:.2f} {:.2f}], corr [{:.2f} {:.2f}]'.format( siteNo, rmse[0], rmse[1], corr[0], corr[1]) if var == '00060': styLst = '--' title = 'streamflow '+tStr axplot.plotTS(axP[k], t, [dfPred[var], dfObs[var]], tBar=tBar, legLst=['LSTM', 'observation'], styLst=styLst, cLst='br') axP[k].set_title(title) else: styLst = '-*' shortName = codePdf.loc[var]['shortName'] title = shortName + ' ' + tStr axplot.plotTS(axP[k], t, dfPred[var], tBar=tBar, legLst=['LSTM-sequence'], styLst='-', cLst='b') axplot.plotTS(axP[k], tp, yp, legLst=[ 'LSTM-sample'], styLst='*', cLst='g') axplot.plotTS(axP[k], t, dfObs[var], legLst=['observation'], styLst='*', cLst='r') axP[k].set_title(title) importlib.reload(figplot) figM, figP = figplot.clickMap(funcMap, funcPoint) for ax in figP.axes: ax.set_xlim(np.datetime64('2015-01-01'), np.datetime64('2020-01-01')) figP.canvas.draw() for ax in figP.axes: ax.set_xlim(np.datetime64('1990-01-01'), np.datetime64('1995-01-01')) figP.canvas.draw() for ax in figP.axes: ax.set_xlim(np.datetime64('1980-01-01'), np.datetime64('2020-01-01')) figP.canvas.draw()
[ "hydroDL.post.axplot.plotTS", "hydroDL.app.waterQuality.calErrSeq", "hydroDL.post.figplot.clickMap", "numpy.datetime64", "hydroDL.master.basins.testModel", "hydroDL.app.waterQuality.DataModelWQ", "hydroDL.master.basins.loadSeq", "importlib.reload", "hydroDL.master.basins.loadMaster", "numpy.array", "hydroDL.post.axplot.mapPoint", "hydroDL.post.figplot.boxPlot", "hydroDL.data.gageII.readData", "matplotlib.pyplot.subplots", "hydroDL.master.basins.testModelSeq", "numpy.concatenate" ]
[((389, 415), 'hydroDL.master.basins.loadMaster', 'basins.loadMaster', (['outName'], {}), '(outName)\n', (406, 415), False, 'from hydroDL.master import basins\n'), ((455, 489), 'hydroDL.app.waterQuality.DataModelWQ', 'waterQuality.DataModelWQ', (['dataName'], {}), '(dataName)\n', (479, 489), False, 'from hydroDL.app import waterQuality\n'), ((659, 709), 'hydroDL.master.basins.testModel', 'basins.testModel', (['outName', 'trainset'], {'wqData': 'wqData'}), '(outName, trainset, wqData=wqData)\n', (675, 709), False, 'from hydroDL.master import basins\n'), ((903, 952), 'hydroDL.master.basins.testModel', 'basins.testModel', (['outName', 'testset'], {'wqData': 'wqData'}), '(outName, testset, wqData=wqData)\n', (919, 952), False, 'from hydroDL.master import basins\n'), ((1441, 1534), 'hydroDL.post.figplot.boxPlot', 'figplot.boxPlot', (['dataBox'], {'label1': "['RMSE', 'Corr']", 'label2': "['train', 'test']", 'sharey': '(False)'}), "(dataBox, label1=['RMSE', 'Corr'], label2=['train', 'test'],\n sharey=False)\n", (1456, 1534), False, 'from hydroDL.post import axplot, figplot\n'), ((1629, 1683), 'hydroDL.master.basins.testModelSeq', 'basins.testModelSeq', (['outName', 'siteNoLst'], {'wqData': 'wqData'}), '(outName, siteNoLst, wqData=wqData)\n', (1648, 1683), False, 'from hydroDL.master import basins\n'), ((1711, 1780), 'hydroDL.data.gageII.readData', 'gageII.readData', ([], {'varLst': "['LAT_GAGE', 'LNG_GAGE']", 'siteNoLst': 'siteNoLst'}), "(varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)\n", (1726, 1780), False, 'from hydroDL.data import gageII, usgs\n'), ((4231, 4256), 'importlib.reload', 'importlib.reload', (['figplot'], {}), '(figplot)\n', (4247, 4256), False, 'import importlib\n'), ((4270, 4306), 'hydroDL.post.figplot.clickMap', 'figplot.clickMap', (['funcMap', 'funcPoint'], {}), '(funcMap, funcPoint)\n', (4286, 4306), False, 'from hydroDL.post import axplot, figplot\n'), ((1926, 1961), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nM', '(1)'], {'figsize': '(8, 6)'}), '(nM, 1, figsize=(8, 6))\n', (1938, 1961), True, 'import matplotlib.pyplot as plt\n'), ((2474, 2509), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nM', '(1)'], {'figsize': '(8, 6)'}), '(nM, 1, figsize=(8, 6))\n', (2486, 2509), True, 'import matplotlib.pyplot as plt\n'), ((2671, 2702), 'hydroDL.master.basins.loadSeq', 'basins.loadSeq', (['outName', 'siteNo'], {}), '(outName, siteNo)\n', (2685, 2702), False, 'from hydroDL.master import basins\n'), ((2764, 2791), 'numpy.datetime64', 'np.datetime64', (['"""2000-01-01"""'], {}), "('2000-01-01')\n", (2777, 2791), True, 'import numpy as np\n'), ((3097, 3121), 'numpy.concatenate', 'np.concatenate', (['[t1, t2]'], {}), '([t1, t2])\n', (3111, 3121), True, 'import numpy as np\n'), ((3131, 3171), 'numpy.concatenate', 'np.concatenate', (['[ycP1[ind1], ycP2[ind2]]'], {}), '([ycP1[ind1], ycP2[ind2]])\n', (3145, 3171), True, 'import numpy as np\n'), ((1972, 1987), 'numpy.array', 'np.array', (['[axM]'], {}), '([axM])\n', (1980, 1987), True, 'import numpy as np\n'), ((2520, 2535), 'numpy.array', 'np.array', (['[axP]'], {}), '([axP])\n', (2528, 2535), True, 'import numpy as np\n'), ((3232, 3279), 'hydroDL.app.waterQuality.calErrSeq', 'waterQuality.calErrSeq', (['dfPred[var]', 'dfObs[var]'], {}), '(dfPred[var], dfObs[var])\n', (3254, 3279), False, 'from hydroDL.app import waterQuality\n'), ((4345, 4372), 'numpy.datetime64', 'np.datetime64', (['"""2015-01-01"""'], {}), "('2015-01-01')\n", (4358, 4372), True, 'import numpy as np\n'), ((4374, 4401), 'numpy.datetime64', 'np.datetime64', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (4387, 4401), True, 'import numpy as np\n'), ((4460, 4487), 'numpy.datetime64', 'np.datetime64', (['"""1990-01-01"""'], {}), "('1990-01-01')\n", (4473, 4487), True, 'import numpy as np\n'), ((4489, 4516), 'numpy.datetime64', 'np.datetime64', (['"""1995-01-01"""'], {}), "('1995-01-01')\n", (4502, 4516), True, 'import numpy as np\n'), ((4575, 4602), 'numpy.datetime64', 'np.datetime64', (['"""1980-01-01"""'], {}), "('1980-01-01')\n", (4588, 4602), True, 'import numpy as np\n'), ((4604, 4631), 'numpy.datetime64', 'np.datetime64', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (4617, 4631), True, 'import numpy as np\n'), ((2085, 2143), 'hydroDL.post.axplot.mapPoint', 'axplot.mapPoint', (['axM[k]', 'lat', 'lon', 'errMatQ2[:, 0, 1]'], {'s': '(12)'}), '(axM[k], lat, lon, errMatQ2[:, 0, 1], s=12)\n', (2100, 2143), False, 'from hydroDL.post import axplot, figplot\n'), ((2362, 2421), 'hydroDL.post.axplot.mapPoint', 'axplot.mapPoint', (['axM[k]', 'lat', 'lon', 'errMatC2[:, ic, 1]'], {'s': '(12)'}), '(axM[k], lat, lon, errMatC2[:, ic, 1], s=12)\n', (2377, 2421), False, 'from hydroDL.post import axplot, figplot\n'), ((3512, 3637), 'hydroDL.post.axplot.plotTS', 'axplot.plotTS', (['axP[k]', 't', '[dfPred[var], dfObs[var]]'], {'tBar': 'tBar', 'legLst': "['LSTM', 'observation']", 'styLst': 'styLst', 'cLst': '"""br"""'}), "(axP[k], t, [dfPred[var], dfObs[var]], tBar=tBar, legLst=[\n 'LSTM', 'observation'], styLst=styLst, cLst='br')\n", (3525, 3637), False, 'from hydroDL.post import axplot, figplot\n'), ((3844, 3944), 'hydroDL.post.axplot.plotTS', 'axplot.plotTS', (['axP[k]', 't', 'dfPred[var]'], {'tBar': 'tBar', 'legLst': "['LSTM-sequence']", 'styLst': '"""-"""', 'cLst': '"""b"""'}), "(axP[k], t, dfPred[var], tBar=tBar, legLst=['LSTM-sequence'],\n styLst='-', cLst='b')\n", (3857, 3944), False, 'from hydroDL.post import axplot, figplot\n'), ((3979, 4054), 'hydroDL.post.axplot.plotTS', 'axplot.plotTS', (['axP[k]', 'tp', 'yp'], {'legLst': "['LSTM-sample']", 'styLst': '"""*"""', 'cLst': '"""g"""'}), "(axP[k], tp, yp, legLst=['LSTM-sample'], styLst='*', cLst='g')\n", (3992, 4054), False, 'from hydroDL.post import axplot, figplot\n'), ((4084, 4170), 'hydroDL.post.axplot.plotTS', 'axplot.plotTS', (['axP[k]', 't', 'dfObs[var]'], {'legLst': "['observation']", 'styLst': '"""*"""', 'cLst': '"""r"""'}), "(axP[k], t, dfObs[var], legLst=['observation'], styLst='*',\n cLst='r')\n", (4097, 4170), False, 'from hydroDL.post import axplot, figplot\n')]
import os import typing import numpy as np from aocd import get_data from dotenv import load_dotenv from utils import timeit def get_session() -> str: load_dotenv() return os.getenv('SESSION_COOKIE') def get_list(data: str = None, day: int = None, year: int = None) -> typing.List: if not data: aoc_input = [int(x) for x in get_data(get_session(), day=day, year=year).split(',')] else: aoc_input = [int(x) for x in data.split(',')] return aoc_input # This method works for 80 days and does not scale for 256 days @timeit def part1(aoc_input: typing.List, days: int) -> int: aoc_input_copy = [] aoc_input_copy = flash(aoc_input, aoc_input_copy, days) return int(len(aoc_input_copy)) def flash(aoc_input: list, aoc_input_copy: typing.List, days: int) -> typing.List: for day in range(days): aoc_input_copy = [] for timer in aoc_input: if timer == 0: # Each day 0 becomes a 6 and adds a new 8 to the end of the list aoc_input_copy.append(6) aoc_input_copy.append(8) else: aoc_input_copy.append(timer - 1) # Decrease the timer of each fish after each day aoc_input = aoc_input_copy return aoc_input_copy @timeit def part2(aoc_input: typing.List, days: int) -> int: np_array = np.zeros(9, dtype=np.float64) # Get a 1D array of 0's for x in aoc_input: np_array[x] += 1 # Count the timers for day in range(days): np_array = np.roll(np_array, -1) # np.roll is awesome. # Saves inserting, deleting, shifting np_array[6] += np_array[8] return int(np.sum(np_array)) if __name__ == '__main__': print(f'Part 1: {part1(get_list(data=None, day=6, year=2021), 80)}') print(f'Part 2: {part2(get_list(data=None, day=6, year=2021), 256)}')
[ "numpy.sum", "numpy.roll", "numpy.zeros", "dotenv.load_dotenv", "os.getenv" ]
[((159, 172), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (170, 172), False, 'from dotenv import load_dotenv\n'), ((184, 211), 'os.getenv', 'os.getenv', (['"""SESSION_COOKIE"""'], {}), "('SESSION_COOKIE')\n", (193, 211), False, 'import os\n'), ((1342, 1371), 'numpy.zeros', 'np.zeros', (['(9)'], {'dtype': 'np.float64'}), '(9, dtype=np.float64)\n', (1350, 1371), True, 'import numpy as np\n'), ((1513, 1534), 'numpy.roll', 'np.roll', (['np_array', '(-1)'], {}), '(np_array, -1)\n', (1520, 1534), True, 'import numpy as np\n'), ((1661, 1677), 'numpy.sum', 'np.sum', (['np_array'], {}), '(np_array)\n', (1667, 1677), True, 'import numpy as np\n')]
import pickle from keras.models import load_model from sklearn.preprocessing import MultiLabelBinarizer from gensim import models from nltk.tokenize import RegexpTokenizer from stop_words import get_stop_words import numpy as np import subprocess subprocess.call(['sh', 'src/models/get_word2vec.sh']) with open('data/processed/Genredict.pkl','rb') as f: Genre_ID_to_name=pickle.load(f) model_textual = load_model('models/overview_nn.h5') w2v_model = models.KeyedVectors.load_word2vec_format('data/external/GoogleNews-vectors-negative300-SLIM.bin', binary=True) tokenizer = RegexpTokenizer(r'\w+') en_stop = get_stop_words('en') with open('models/mlb.pkl','rb') as f: mlb=pickle.load(f) genre_list=sorted(list(Genre_ID_to_name.keys())) def nn_predict(input_string): movie_mean_wordvec=np.zeros((1,300)) tokens = tokenizer.tokenize(input_string) stopped_tokens = [k for k in tokens if not k in en_stop] count_in_vocab=0 s=0 for tok in stopped_tokens: if tok.lower() in w2v_model.vocab: count_in_vocab+=1 s+=w2v_model[tok.lower()] if count_in_vocab!=0: movie_mean_wordvec[0]=s/float(count_in_vocab) pred_array = model_textual.predict(movie_mean_wordvec) predicted = np.argsort(pred_array[0])[::-1][:3] predicted_genre_Y = np.array([[1 if k in predicted else 0 for k in range(len(pred_array[0])) ]]) predicted_genre_ids = mlb.inverse_transform(predicted_genre_Y)[0] predicted_genres = list(map(Genre_ID_to_name.get, predicted_genre_ids)) return predicted_genres
[ "keras.models.load_model", "nltk.tokenize.RegexpTokenizer", "stop_words.get_stop_words", "numpy.zeros", "numpy.argsort", "pickle.load", "subprocess.call", "gensim.models.KeyedVectors.load_word2vec_format" ]
[((248, 301), 'subprocess.call', 'subprocess.call', (["['sh', 'src/models/get_word2vec.sh']"], {}), "(['sh', 'src/models/get_word2vec.sh'])\n", (263, 301), False, 'import subprocess\n'), ((413, 448), 'keras.models.load_model', 'load_model', (['"""models/overview_nn.h5"""'], {}), "('models/overview_nn.h5')\n", (423, 448), False, 'from keras.models import load_model\n'), ((461, 576), 'gensim.models.KeyedVectors.load_word2vec_format', 'models.KeyedVectors.load_word2vec_format', (['"""data/external/GoogleNews-vectors-negative300-SLIM.bin"""'], {'binary': '(True)'}), "(\n 'data/external/GoogleNews-vectors-negative300-SLIM.bin', binary=True)\n", (501, 576), False, 'from gensim import models\n'), ((584, 607), 'nltk.tokenize.RegexpTokenizer', 'RegexpTokenizer', (['"""\\\\w+"""'], {}), "('\\\\w+')\n", (599, 607), False, 'from nltk.tokenize import RegexpTokenizer\n'), ((618, 638), 'stop_words.get_stop_words', 'get_stop_words', (['"""en"""'], {}), "('en')\n", (632, 638), False, 'from stop_words import get_stop_words\n'), ((377, 391), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (388, 391), False, 'import pickle\n'), ((686, 700), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (697, 700), False, 'import pickle\n'), ((806, 824), 'numpy.zeros', 'np.zeros', (['(1, 300)'], {}), '((1, 300))\n', (814, 824), True, 'import numpy as np\n'), ((1257, 1282), 'numpy.argsort', 'np.argsort', (['pred_array[0]'], {}), '(pred_array[0])\n', (1267, 1282), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- """ Created on Mon Dec 17 02:27:29 2018 @author: james """ # -*- coding: utf-8 -*- """ Created on Thu Nov 29 15:00:40 2018 @author: JamesChiou """ import os import random import time import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim import lr_scheduler from torch.utils.data import Dataset, DataLoader from torch.utils.data.sampler import SubsetRandomSampler from torchvision import transforms import math import matplotlib.pyplot as plt # Define dataset class class ImageDataset(Dataset): def __init__(self, file_path, transform = None): df = pd.read_csv(file_path) if 'label' in df.columns: self.is_train = True else: self.is_train = False if self.is_train: # training data self.X = df.iloc[:,1:].values.reshape((-1,28,28)).astype(np.uint8)[:,:,:,None] self.y = torch.from_numpy(df.iloc[:,0].values) else: # test data self.X = df.iloc[:,1:].values.reshape((-1,28,28)).astype(np.uint8)[:,:,:,None] self.y = None self.transform = transform def __len__(self): return len(self.X) def __getitem__(self, idx): if self.y is not None: return self.transform(self.X[idx]), self.y[idx] else: return self.transform(self.X[idx]) # Define WideResNet network ############################## class BasicBlock(nn.Module): def __init__(self, in_planes, out_planes, stride, dropRate=0.0): super(BasicBlock, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.relu1 = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(out_planes) self.relu2 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False) self.droprate = dropRate self.equalInOut = (in_planes == out_planes) self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False) or None def forward(self, x): if not self.equalInOut: x = self.relu1(self.bn1(x)) else: out = self.relu1(self.bn1(x)) out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x))) if self.droprate > 0: out = F.dropout(out, p=self.droprate, training=self.training) out = self.conv2(out) return torch.add(x if self.equalInOut else self.convShortcut(x), out) class NetworkBlock(nn.Module): def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0): super(NetworkBlock, self).__init__() self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate) def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate): layers = [] for i in range(nb_layers): layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate)) return nn.Sequential(*layers) def forward(self, x): return self.layer(x) class WideResNet(nn.Module): def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0): super(WideResNet, self).__init__() nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor] assert (depth - 4) % 6 == 0, 'depth should be 6n+4' n = (depth - 4) // 6 block = BasicBlock # 1st conv before any network block self.conv1 = nn.Conv2d(1, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) # 1st block self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate) # 2nd block self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate) # 3rd block self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate) # global average pooling and classifier self.bn1 = nn.BatchNorm2d(nChannels[3]) self.relu = nn.ReLU(inplace=True) self.fc = nn.Linear(nChannels[3], num_classes) self.nChannels = nChannels[3] for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() def forward(self, x): out = self.conv1(x) out = self.block1(out) out = self.block2(out) out = self.block3(out) out = self.relu(self.bn1(out)) out = F.avg_pool2d(out, 7) out = out.view(-1, self.nChannels) return self.fc(out) def wrn(**kwargs): """ Constructs a Wide Residual Networks. """ model = WideResNet(**kwargs) return model # Model save dir if not os.path.isdir('models'): os.mkdir('models') modeldir = 'models' # Use cuda or cpu if torch.cuda.is_available(): device = torch.device('cuda:0') else: device = torch.device('cpu') # Fix random seed for reproducibility randomSeed = 2018 random.seed(randomSeed) torch.manual_seed(randomSeed) np.random.seed(randomSeed) # Best valid accuracy best_acc = 0 best_epoch = 0 # Training parameters n_epoches = 300 # Record losses = np.zeros((n_epoches)) valid_losses = np.zeros((int(n_epoches/2))) valid_accuracy = np.zeros((int(n_epoches/2),2)) y_pred = [] def main(): global best_acc,best_epoch,losses,valid_losses,valid_accuracy global n_epoches global y_pred # Dataset transforms transform1 = transforms.Compose([ transforms.ToPILImage(), transforms.RandomHorizontalFlip(p=1.), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) transform2 = transforms.Compose([ transforms.ToPILImage(), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) # Load dataset print('Start loading data') test_dataset1 = ImageDataset('data/test.csv', transform1) test_dataset2 = ImageDataset('data/test.csv', transform2) # Load dataloader testloader1 = DataLoader(test_dataset1,batch_size=100, num_workers=2,shuffle=False,pin_memory=True) testloader2 = DataLoader(test_dataset2,batch_size=100, num_workers=2,shuffle=False,pin_memory=True) y_pred_probs = [] print('Start predict') #1 model = wrn(num_classes=10, depth=28, widen_factor=10, dropRate=0.,).to(device) best_para = torch.load('models/best/CC_epoch_258_nodropout.pth') y_pred_prob1 = test(testloader1,model,None,device,best_para,best_epoch) y_pred_prob2 = test(testloader2,model,None,device,best_para,best_epoch) y_pred_prob = (y_pred_prob1+y_pred_prob2)/2 y_pred_probs.append(y_pred_prob) print('predict complete: 1') #2 model = wrn(num_classes=10, depth=28, widen_factor=10, dropRate=0.3,).to(device) best_para = torch.load('models/best/CC_epoch_184_1211_9602.pth') y_pred_prob1 = test(testloader1,model,None,device,best_para,best_epoch) y_pred_prob2 = test(testloader2,model,None,device,best_para,best_epoch) y_pred_prob = (y_pred_prob1+y_pred_prob2)/2 y_pred_probs.append(y_pred_prob) print('predict complete: 2') #3 model = wrn(num_classes=10, depth=28, widen_factor=10, dropRate=0.3,).to(device) best_para = torch.load('models/best/CC_epoch_192_1212_9580.pth') y_pred_prob1 = test(testloader1,model,None,device,best_para,best_epoch) y_pred_prob2 = test(testloader2,model,None,device,best_para,best_epoch) y_pred_prob = (y_pred_prob1+y_pred_prob2)/2 y_pred_probs.append(y_pred_prob) print('predict complete: 3') #4 model = wrn(num_classes=10, depth=28, widen_factor=10, dropRate=0.3,).to(device) best_para = torch.load('models/best/CC_epoch_240_1217_9595_pseudolabel.pth') y_pred_prob1 = test(testloader1,model,None,device,best_para,best_epoch) y_pred_prob2 = test(testloader2,model,None,device,best_para,best_epoch) y_pred_prob = (y_pred_prob1+y_pred_prob2)/2 y_pred_probs.append(y_pred_prob) print('predict complete: 4') y_pred_probs_mean = (y_pred_probs[0]+y_pred_probs[1]+y_pred_probs[2]+y_pred_probs[3])/4 y_pred_probs_mean = y_pred_probs_mean.cpu().data.numpy() y_pred = np.argmax(y_pred_probs_mean, axis=1) sub = pd.DataFrame(y_pred, columns=['label']) sub.index.name='id' sub.to_csv('ensemble_4_hflip.csv', index=True) def test(testloader,model,criterion,device,best_param,best_epoch): # custom select model model.load_state_dict(best_param) model.eval() y_pred_prob = [] for i_batch, data in enumerate(testloader): images = data.to(device) outputs = model(images).detach() prob = torch.nn.Softmax(dim=1)(outputs) y_pred_prob.append(prob) y_pred_prob = torch.cat(y_pred_prob) #print(y_pred_prob) return y_pred_prob ''' y_pred = y_pred.astype(int) sub = pd.DataFrame(y_pred, columns=['label']) sub.index.name='id' sub.to_csv('answer_wrn_28_10_%d.csv'%best_epoch, index=True) ''' if __name__ == '__main__': main()
[ "os.mkdir", "numpy.random.seed", "numpy.argmax", "pandas.read_csv", "torch.nn.functional.dropout", "torch.cat", "torch.nn.Softmax", "torch.device", "torchvision.transforms.Normalize", "pandas.DataFrame", "torch.utils.data.DataLoader", "torch.nn.functional.avg_pool2d", "torch.load", "torchvision.transforms.ToPILImage", "random.seed", "torch.nn.Linear", "torchvision.transforms.RandomHorizontalFlip", "math.sqrt", "torch.manual_seed", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "torch.cuda.is_available", "torch.from_numpy", "torch.nn.ReLU", "torch.nn.Sequential", "os.path.isdir", "numpy.zeros", "torchvision.transforms.ToTensor" ]
[((5450, 5475), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5473, 5475), False, 'import torch\n'), ((5609, 5632), 'random.seed', 'random.seed', (['randomSeed'], {}), '(randomSeed)\n', (5620, 5632), False, 'import random\n'), ((5633, 5662), 'torch.manual_seed', 'torch.manual_seed', (['randomSeed'], {}), '(randomSeed)\n', (5650, 5662), False, 'import torch\n'), ((5663, 5689), 'numpy.random.seed', 'np.random.seed', (['randomSeed'], {}), '(randomSeed)\n', (5677, 5689), True, 'import numpy as np\n'), ((5799, 5818), 'numpy.zeros', 'np.zeros', (['n_epoches'], {}), '(n_epoches)\n', (5807, 5818), True, 'import numpy as np\n'), ((5360, 5383), 'os.path.isdir', 'os.path.isdir', (['"""models"""'], {}), "('models')\n", (5373, 5383), False, 'import os\n'), ((5389, 5407), 'os.mkdir', 'os.mkdir', (['"""models"""'], {}), "('models')\n", (5397, 5407), False, 'import os\n'), ((5490, 5512), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (5502, 5512), False, 'import torch\n'), ((5532, 5551), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5544, 5551), False, 'import torch\n'), ((6714, 6806), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset1'], {'batch_size': '(100)', 'num_workers': '(2)', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(test_dataset1, batch_size=100, num_workers=2, shuffle=False,\n pin_memory=True)\n', (6724, 6806), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((6846, 6938), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset2'], {'batch_size': '(100)', 'num_workers': '(2)', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(test_dataset2, batch_size=100, num_workers=2, shuffle=False,\n pin_memory=True)\n', (6856, 6938), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((7169, 7221), 'torch.load', 'torch.load', (['"""models/best/CC_epoch_258_nodropout.pth"""'], {}), "('models/best/CC_epoch_258_nodropout.pth')\n", (7179, 7221), False, 'import torch\n'), ((7648, 7700), 'torch.load', 'torch.load', (['"""models/best/CC_epoch_184_1211_9602.pth"""'], {}), "('models/best/CC_epoch_184_1211_9602.pth')\n", (7658, 7700), False, 'import torch\n'), ((8127, 8179), 'torch.load', 'torch.load', (['"""models/best/CC_epoch_192_1212_9580.pth"""'], {}), "('models/best/CC_epoch_192_1212_9580.pth')\n", (8137, 8179), False, 'import torch\n'), ((8606, 8670), 'torch.load', 'torch.load', (['"""models/best/CC_epoch_240_1217_9595_pseudolabel.pth"""'], {}), "('models/best/CC_epoch_240_1217_9595_pseudolabel.pth')\n", (8616, 8670), False, 'import torch\n'), ((9112, 9148), 'numpy.argmax', 'np.argmax', (['y_pred_probs_mean'], {'axis': '(1)'}), '(y_pred_probs_mean, axis=1)\n', (9121, 9148), True, 'import numpy as np\n'), ((9159, 9198), 'pandas.DataFrame', 'pd.DataFrame', (['y_pred'], {'columns': "['label']"}), "(y_pred, columns=['label'])\n", (9171, 9198), True, 'import pandas as pd\n'), ((9670, 9692), 'torch.cat', 'torch.cat', (['y_pred_prob'], {}), '(y_pred_prob)\n', (9679, 9692), False, 'import torch\n'), ((686, 708), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (697, 708), True, 'import pandas as pd\n'), ((1682, 1707), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['in_planes'], {}), '(in_planes)\n', (1696, 1707), True, 'import torch.nn as nn\n'), ((1729, 1750), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1736, 1750), True, 'import torch.nn as nn\n'), ((1772, 1861), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=1,\n bias=False)\n', (1781, 1861), True, 'import torch.nn as nn\n'), ((1908, 1934), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_planes'], {}), '(out_planes)\n', (1922, 1934), True, 'import torch.nn as nn\n'), ((1956, 1977), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1963, 1977), True, 'import torch.nn as nn\n'), ((1999, 2085), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=\n False)\n', (2008, 2085), True, 'import torch.nn as nn\n'), ((3352, 3374), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (3365, 3374), True, 'import torch.nn as nn\n'), ((3834, 3908), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', 'nChannels[0]'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(1, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)\n', (3843, 3908), True, 'import torch.nn as nn\n'), ((4325, 4353), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nChannels[3]'], {}), '(nChannels[3])\n', (4339, 4353), True, 'import torch.nn as nn\n'), ((4374, 4395), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4381, 4395), True, 'import torch.nn as nn\n'), ((4414, 4450), 'torch.nn.Linear', 'nn.Linear', (['nChannels[3]', 'num_classes'], {}), '(nChannels[3], num_classes)\n', (4423, 4450), True, 'import torch.nn as nn\n'), ((5116, 5136), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['out', '(7)'], {}), '(out, 7)\n', (5128, 5136), True, 'import torch.nn.functional as F\n'), ((999, 1037), 'torch.from_numpy', 'torch.from_numpy', (['df.iloc[:, 0].values'], {}), '(df.iloc[:, 0].values)\n', (1015, 1037), False, 'import torch\n'), ((2656, 2711), 'torch.nn.functional.dropout', 'F.dropout', (['out'], {'p': 'self.droprate', 'training': 'self.training'}), '(out, p=self.droprate, training=self.training)\n', (2665, 2711), True, 'import torch.nn.functional as F\n'), ((6127, 6150), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (6148, 6150), False, 'from torchvision import transforms\n'), ((6168, 6206), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(1.0)'}), '(p=1.0)\n', (6199, 6206), False, 'from torchvision import transforms\n'), ((6223, 6244), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6242, 6244), False, 'from torchvision import transforms\n'), ((6262, 6304), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (6282, 6304), False, 'from torchvision import transforms\n'), ((6362, 6385), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (6383, 6385), False, 'from torchvision import transforms\n'), ((6404, 6425), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6423, 6425), False, 'from torchvision import transforms\n'), ((6444, 6486), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (6464, 6486), False, 'from torchvision import transforms\n'), ((9581, 9604), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (9597, 9604), False, 'import torch\n'), ((2251, 2340), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'padding': '(0)', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=1, stride=stride, padding=0,\n bias=False)\n', (2260, 2340), True, 'import torch.nn as nn\n'), ((4678, 4696), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (4687, 4696), False, 'import math\n')]
import pymetry pymetry.circle(60, "brown", 4)
[ "pymetry.circle" ]
[((15, 45), 'pymetry.circle', 'pymetry.circle', (['(60)', '"""brown"""', '(4)'], {}), "(60, 'brown', 4)\n", (29, 45), False, 'import pymetry\n')]
import json import mock import pytest from click.testing import CliRunner from gradient.api_sdk import sdk_exceptions from gradient.api_sdk.clients.http_client import default_headers from gradient.cli import cli from tests import MockResponse, example_responses EXPECTED_HEADERS = default_headers.copy() EXPECTED_HEADERS["ps_client_name"] = "gradient-cli" EXPECTED_HEADERS_WITH_CHANGED_API_KEY = EXPECTED_HEADERS.copy() EXPECTED_HEADERS_WITH_CHANGED_API_KEY["X-API-Key"] = "some_key" @pytest.fixture def basic_options_metrics_stream_websocket_connection_iterator(): def generator(self): yield """{"handle":"nrwed38p","object_type":"notebook","chart_name":"memoryUsage", "pod_metrics":{"nrwed38p":{"time_stamp":1588066152,"value":"54013952"}}}""" yield """{"handle":"nrwed38p","object_type":"notebook","chart_name":"cpuPercentage", "pod_metrics":{"nrwed38p":{"time_stamp":1588066152,"value":"0.006907773333334353"}}}""" yield """{"handle":"nrwed38p","object_type":"notebook","chart_name":"memoryUsage", "pod_metrics":{"nrwed38p":{"time_stamp":1588066155,"value":"12345667"}}}""" raise sdk_exceptions.GradientSdkError() return generator @pytest.fixture def all_options_metrics_stream_websocket_connection_iterator(): def generator(self): yield """{"handle":"nrwed38p","object_type":"notebook","chart_name":"gpuMemoryFree", "pod_metrics":{"nrwed38p":{"time_stamp":1588068626,"value":"1234"}}}""" yield """{"handle":"nrwed38p","object_type":"notebook","chart_name":"gpuMemoryUsed", "pod_metrics":{"nrwed38p":{"time_stamp":1588068646,"value":"32"}}}""" yield """{"handle":"nrwed38p","object_type":"notebook","chart_name":"gpuMemoryFree", "pod_metrics":{"nrwed38p":{"time_stamp":1588068646,"value":"2345"}}}""" raise sdk_exceptions.GradientSdkError() return generator class TestNotebooksCreate(object): URL = "https://api.paperspace.io/notebooks/v2/createNotebook" COMMAND = [ "notebooks", "create", "--machineType", "P5000", "--container", "jupyter/notebook", "--clusterId", "321" ] EXPECTED_REQUEST_JSON = { "vmTypeLabel": "P5000", "containerName": "jupyter/notebook", "clusterId": "321", 'isPreemptible': False, 'isPublic': False, } EXPECTED_RESPONSE_JSON = { "handle": "some_id", "notebookToken": None, "jobId": 20163, "isPublic": False, "id": 1811, "containerName": "jupyter/notebook", } EXPECTED_STDOUT = "Created new notebook with id: some_id\n" \ "https://www.paperspace.com/some_namespace/notebook/prg284tu2\n" COMMAND_WITH_API_KEY_USED = [ "notebooks", "create", "--machineType", "P5000", "--container", "jupyter/notebook", "--clusterId", "321", "--apiKey", "some_key", ] COMMAND_WITH_ALL_OPTIONS = [ "notebooks", "create", "--machineType", "P5000", "--container", "jupyter/notebook", "--clusterId", "321", "--name", "some_notebook_name", "--registryUsername", "some_username", "--registryPassword", "<PASSWORD>", "--command", "some_entrypoint", "--containerUser", "some_container_user", "--shutdownTimeout", "8", "--isPreemptible", ] EXPECTED_REQUEST_JSON_WITH_ALL_OPTIONS = { "vmTypeLabel": "P5000", "containerName": "jupyter/notebook", "clusterId": "321", "name": "some_notebook_name", "registryUsername": "some_username", "registryPassword": "<PASSWORD>", "defaultEntrypoint": "c29tZV9lbnRyeXBvaW50", "containerUser": "some_container_user", "shutdownTimeout": 8, "isPreemptible": True, "isPublic": False, } COMMAND_WITH_OPTIONS_FILE_USED = ["notebooks", "create", "--optionsFile", ] # path added in test RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"} EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Failed to create resource: Invalid API token\n" @mock.patch("gradient.api_sdk.clients.http_client.requests.get") @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_send_post_request_and_print_notebook_id(self, post_patched, get_patched): post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON) get_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.get") @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_send_changed_headers_when_api_key_option_was_used(self, post_patched, get_patched): post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON) get_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.get") @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_send_post_request_and_print_notebook_id_when_all_options_were_used(self, post_patched, get_patched): post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON) get_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND_WITH_ALL_OPTIONS) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON_WITH_ALL_OPTIONS, data=None, files=None, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.get") @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_read_option_from_yaml_file(self, post_patched, get_patched, notebooks_create_config_path): post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON) get_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE) command = self.COMMAND_WITH_OPTIONS_FILE_USED[:] + [notebooks_create_config_path] runner = CliRunner() result = runner.invoke(cli.cli, command) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json=self.EXPECTED_REQUEST_JSON_WITH_ALL_OPTIONS, data=None, files=None, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, post_patched): post_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info post_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) assert result.exit_code == 0 @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, post_patched): post_patched.return_value = MockResponse(status_code=400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == "Failed to create resource\n", result.exc_info post_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) assert result.exit_code == 0 # TODO: Add test case for creating notebook with tag class TestNotebooksFork(object): URL = "https://api.paperspace.io/notebooks/v2/forkNotebook" COMMAND = [ "notebooks", "fork", "--id", "n1234", ] EXPECTED_REQUEST_JSON = { "notebookId": "n1234", } EXPECTED_RESPONSE_JSON = { "handle": "n1234", "notebookToken": None, "jobId": 20163, "isPublic": False, "id": 1811, } EXPECTED_STDOUT = "Notebook forked to id: n1234\n" COMMAND_WITH_API_KEY_USED = [ "notebooks", "fork", "--id", "n1234", "--apiKey", "some_key", ] RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"} EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Failed to fork notebook: Invalid API token\n" @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_send_post_request_and_print_notebook_id(self, post_patched): post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_send_changed_headers_when_api_key_option_was_used(self, post_patched): post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, post_patched): post_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info post_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) assert result.exit_code == 0 @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, post_patched): post_patched.return_value = MockResponse(status_code=400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == "Failed to fork notebook\n", result.exc_info post_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) assert result.exit_code == 0 class TestNotebooksStart(object): URL = "https://api.paperspace.io/notebooks/v2/startNotebook" COMMAND = [ "notebooks", "start", "--id", "n123", "--machineType", "c5.xlarge", "--clusterId", "cl123", ] EXPECTED_REQUEST_JSON = { "notebookId": "n123", "vmTypeLabel": "c5.xlarge", "clusterId": "cl123", "isPreemptible": False, } EXPECTED_RESPONSE_JSON = { "handle": "n123", "notebookToken": None, "jobId": 20163, "isPublic": False, "id": 1811, "containerId": 123, } EXPECTED_STDOUT = "Started notebook with id: n123\n" COMMAND_WITH_API_KEY_USED = [ "notebooks", "start", "--id", "n123", "--machineType", "c5.xlarge", "--clusterId", "cl123", "--apiKey", "some_key", ] RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"} EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Failed to create resource: Invalid API token\n" EXPECTED_STDOUT_WITH_KEY = "Started notebook with id: n123\n" \ "https://www.paperspace.com/some_namespace/notebook/prg284tu2\n" @mock.patch("gradient.api_sdk.clients.http_client.requests.get") @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_send_changed_headers_when_api_key_option_was_used(self, post_patched, get_patched): post_patched.return_value = MockResponse(self.EXPECTED_RESPONSE_JSON) get_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED) assert result.output == self.EXPECTED_STDOUT_WITH_KEY, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, post_patched): post_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info post_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) assert result.exit_code == 0 @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, post_patched): post_patched.return_value = MockResponse(status_code=400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == "Failed to create resource\n", result.exc_info post_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) assert result.exit_code == 0 class TestNotebooksStop(object): URL = "https://api.paperspace.io/notebooks/v2/stopNotebook" COMMAND = [ "notebooks", "stop", "--id", "n123", ] EXPECTED_REQUEST_JSON = { "notebookId": 'n123', } EXPECTED_STDOUT = "Stopping notebook with id: n123\n" COMMAND_WITH_API_KEY_USED = [ "notebooks", "stop", "--id", "n123", "--apiKey", "some_key", ] RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"} EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Unable to stop instance: Invalid API token\n" @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_send_post_request_and_print_notebook_id(self, post_patched): post_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.get") @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_send_changed_headers_when_api_key_option_was_used(self, post_patched, get_patched): post_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE) get_patched.return_value = MockResponse(example_responses.NOTEBOOK_GET_RESPONSE) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, post_patched): post_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info post_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) assert result.exit_code == 0 @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, post_patched): post_patched.return_value = MockResponse(status_code=400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == "Unable to stop instance\n", result.exc_info post_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) assert result.exit_code == 0 class TestListNotebookArtifacts(object): runner = CliRunner() URL = "https://api.paperspace.io/notebooks/artifactsList" @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_send_valid_get_request_with_all_parameters_for_a_list_of_artifacts(self, get_patched): get_patched.return_value = MockResponse() notebook_id = "some_notebook_id" result = self.runner.invoke(cli.cli, ["notebooks", "artifacts", "list", "--id", notebook_id, "--apiKey", "some_key", "--size", "--links", "--files", "foo"]) get_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json=None, params={"notebookId": notebook_id, "size": True, "links": True, "files": "foo"}) assert result.exit_code == 0 @mock.patch("gradient.api_sdk.clients.http_client.requests.get") @pytest.mark.parametrize('option,param', [("--size", "size"), ("-s", "size"), ("--links", "links"), ("-l", "links")]) def test_should_send_valid_get_request_with_valid_param_for_a_list_of_artifacts_for_both_formats_of_param(self, get_patched, option, param): get_patched.return_value = MockResponse(status_code=200) notebook_id = "some_notebook_id" result = self.runner.invoke(cli.cli, ["notebooks", "artifacts", "list", "--id", notebook_id, "--apiKey", "some_key"] + [option]) get_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json=None, params={"notebookId": notebook_id, param: True}) assert result.exit_code == 0 class TestNotebooksDelete(object): URL = "https://api.paperspace.io/notebooks/v2/deleteNotebook" COMMAND = [ "notebooks", "delete", "--id", "some_id", ] EXPECTED_REQUEST_JSON = {"notebookId": "some_id"} EXPECTED_STDOUT = "Notebook deleted\n" COMMAND_WITH_API_KEY_USED = [ "notebooks", "delete", "--id", "some_id", "--apiKey", "some_key", ] COMMAND_WITH_OPTIONS_FILE_USED = ["notebooks", "delete", "--optionsFile", ] # path added in test RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"} EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Failed to delete resource: Invalid API token\n" @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_send_post_request_and_print_notebook_id(self, post_patched): post_patched.return_value = MockResponse(status_code=204) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_send_changed_headers_when_api_key_option_was_used(self, post_patched): post_patched.return_value = MockResponse(status_code=204) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_read_option_from_yaml_file(self, post_patched, notebooks_delete_config_path): post_patched.return_value = MockResponse(status_code=204) command = self.COMMAND_WITH_OPTIONS_FILE_USED[:] + [notebooks_delete_config_path] runner = CliRunner() result = runner.invoke(cli.cli, command) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, get_patched): get_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info get_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) assert result.exit_code == 0 @mock.patch("gradient.api_sdk.clients.http_client.requests.post") def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, get_patched): get_patched.return_value = MockResponse(status_code=400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == "Failed to delete resource\n", result.exc_info get_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json=self.EXPECTED_REQUEST_JSON, data=None, files=None, params=None) assert result.exit_code == 0 class TestNotebooksdetails(object): URL = "https://api.paperspace.io/notebooks/getNotebook" COMMAND = ["notebooks", "details", "--id", "some_id"] EXPECTED_STDOUT = """+---------+-----------------------------------+ | Name | some_name | +---------+-----------------------------------+ | ID | ngw7piq9 | | VM Type | K80 | | State | Running | | FQDN | ngw7piq9.dgradient.paperspace.com | | Tags | | +---------+-----------------------------------+ """ EXPECTED_STDOUT_WITH_TAGS = """+---------+-----------------------------------+ | Name | some_name | +---------+-----------------------------------+ | ID | ngw7piq9 | | VM Type | K80 | | State | Running | | FQDN | ngw7piq9.dgradient.paperspace.com | | Tags | tag1, tag2 | +---------+-----------------------------------+ """ RESPONSE_JSON = example_responses.NOTEBOOK_GET_RESPONSE RESPONSE_JSON_WITH_TAGS = example_responses.NOTEBOOK_GET_RESPONSE_WITH_TAGS COMMAND_WITH_API_KEY_USED = ["notebooks", "details", "--id", "some_id", "--apiKey", "some_key"] COMMAND_WITH_OPTIONS_FILE_USED = ["notebooks", "details", "--optionsFile", ] # path added in test RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"} EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Failed to fetch data: Invalid API token\n" @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_send_post_request_and_print_notebook_details(self, post_patched): post_patched.return_value = MockResponse(self.RESPONSE_JSON) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS, json={"notebookId": "some_id"}, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_send_post_request_and_print_notebook_details_with_tags(self, post_patched): post_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_TAGS) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT_WITH_TAGS, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS, json={"notebookId": "some_id"}, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_send_changed_headers_when_api_key_option_was_used(self, post_patched): post_patched.return_value = MockResponse(self.RESPONSE_JSON) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json={"notebookId": "some_id"}, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_read_option_from_yaml_file(self, post_patched, notebooks_show_config_path): post_patched.return_value = MockResponse(self.RESPONSE_JSON) command = self.COMMAND_WITH_OPTIONS_FILE_USED[:] + [notebooks_show_config_path] runner = CliRunner() result = runner.invoke(cli.cli, command) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json={"notebookId": "some_id"}, params=None) @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, get_patched): get_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info get_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json={"notebookId": "some_id"}, params=None) assert result.exit_code == 0 @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, get_patched): get_patched.return_value = MockResponse(status_code=400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == "Failed to fetch data\n", result.exc_info get_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json={"notebookId": "some_id"}, params=None) assert result.exit_code == 0 class TestNotebooksList(object): URL = "https://api.paperspace.io/notebooks/getNotebooks" COMMAND = ["notebooks", "list"] COMMAND_WITH_FILTERING_BY_TAGS = [ "notebooks", "list", "--tag", "tag1", "--tag", "tag2", ] EXPECTED_STDOUT = """+--------------------+----------+ | Name | ID | +--------------------+----------+ | job 1 | n1vmfj6x | | job 1 | nhdf8zf3 | | My Notebook 123 | nslk5r03 | | My Notebook 123 | ng9a3tp4 | | some_name | ngw7piq9 | | some_notebook_name | n8h0d5lf | | some_notebook_name | nl0b6cn0 | | some_notebook_name | njmq1zju | | some_notebook_name | nfcuwqu5 | +--------------------+----------+ """ RESPONSE_JSON = example_responses.NOTEBOOKS_LIST_RESPONSE_JSON COMMAND_WITH_API_KEY_USED = ["notebooks", "list", "--apiKey", "some_key"] COMMAND_WITH_OPTIONS_FILE_USED = ["notebooks", "list", "--optionsFile", ] # path added in test EXPECTED_FILTERS = { "filter": { "where": { "dtDeleted": None, }, "limit": 20, "order": "jobId desc", "offset": 0, }, } RESPONSE_JSON_WITH_WRONG_API_TOKEN = {"status": 400, "message": "Invalid API token"} EXPECTED_STDOUT_WITH_WRONG_API_TOKEN = "Failed to fetch data: Invalid API token\n" @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_send_post_request_and_print_notebook_details(self, post_patched): post_patched.return_value = MockResponse(self.RESPONSE_JSON) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS, json=None, params=mock.ANY) params = post_patched.call_args.kwargs["params"] filter_params = params["filter"] filter_params = json.loads(filter_params) assert filter_params == self.EXPECTED_FILTERS assert "tagFilter[0]" not in params @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_send_post_request_and_print_notebook_details_when_filtering_by_tags(self, post_patched): post_patched.return_value = MockResponse(self.RESPONSE_JSON) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND_WITH_FILTERING_BY_TAGS) assert result.output == self.EXPECTED_STDOUT, result.exc_info post_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS, json=None, params=mock.ANY) params = post_patched.call_args.kwargs["params"] filter_params = params["filter"] filter_params = json.loads(filter_params) assert filter_params == self.EXPECTED_FILTERS assert "tagFilter[0]" in params assert params["tagFilter[0]"] in ("tag1", "tag2") assert params["tagFilter[1]"] in ("tag1", "tag2") assert params["tagFilter[0]"] != params["tagFilter[1]"] @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_send_changed_headers_when_api_key_option_was_used(self, get_patched): get_patched.return_value = MockResponse(self.RESPONSE_JSON) runner = CliRunner() result = runner.invoke(cli.cli, self.COMMAND_WITH_API_KEY_USED) assert result.output == self.EXPECTED_STDOUT, result.exc_info get_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json=None, params=mock.ANY) params = get_patched.call_args.kwargs["params"] filter_params = params["filter"] filter_params = json.loads(filter_params) assert filter_params == self.EXPECTED_FILTERS @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_read_option_from_yaml_file(self, get_patched, notebooks_list_config_path): get_patched.return_value = MockResponse(self.RESPONSE_JSON) command = self.COMMAND_WITH_OPTIONS_FILE_USED[:] + [notebooks_list_config_path] runner = CliRunner() result = runner.invoke(cli.cli, command) assert result.output == self.EXPECTED_STDOUT, result.exc_info get_patched.assert_called_once_with(self.URL, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, json=None, params=mock.ANY) params = get_patched.call_args.kwargs["params"] filter_params = params["filter"] filter_params = json.loads(filter_params) assert filter_params == self.EXPECTED_FILTERS @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_print_valid_error_message_when_command_was_used_with_invalid_api_token(self, get_patched): get_patched.return_value = MockResponse(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == self.EXPECTED_STDOUT_WITH_WRONG_API_TOKEN, result.exc_info get_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json=None, params=mock.ANY) params = get_patched.call_args.kwargs["params"] filter_params = params["filter"] filter_params = json.loads(filter_params) assert filter_params == self.EXPECTED_FILTERS assert result.exit_code == 0 @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_print_valid_error_message_when_no_content_was_received_in_response(self, get_patched): get_patched.return_value = MockResponse(status_code=400) cli_runner = CliRunner() result = cli_runner.invoke(cli.cli, self.COMMAND) assert result.output == "Failed to fetch data\n", result.exc_info get_patched.assert_called_with(self.URL, headers=EXPECTED_HEADERS, json=None, params=mock.ANY) params = get_patched.call_args.kwargs["params"] filter_params = params["filter"] filter_params = json.loads(filter_params) assert filter_params == self.EXPECTED_FILTERS assert result.exit_code == 0 class TestNotebooksMetricsGetCommand(object): GET_NOTEBOOK_URL = "https://api.paperspace.io/notebooks/getNotebook" GET_METRICS_URL = "https://aws-testing.paperspace.io/metrics/api/v1/range" BASIC_OPTIONS_COMMAND = [ "notebooks", "metrics", "get", "--id", "ngw7piq9", ] ALL_OPTIONS_COMMAND = [ "notebooks", "metrics", "get", "--id", "ngw7piq9", "--metric", "gpuMemoryFree", "--metric", "gpuMemoryUsed", "--interval", "20s", "--start", "2020-04-01", "--end", "2020-04-02 21:37:00", "--apiKey", "some_key", ] FULL_OPTIONS_COMMAND_WITH_OPTIONS_FILE = [ "notebooks", "metrics", "get", "--optionsFile", # path added in test, ] GET_NOTEBOOK_REQUEST_JSON = {"notebookId": "ngw7piq9"} BASIC_COMMAND_GET_METRICS_REQUEST_PARAMS = { "start": "2019-09-03T11:10:36Z", "handle": "ngw7piq9", "interval": "30s", "charts": "cpuPercentage,memoryUsage", "objecttype": "notebook", } ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS = { "start": "2020-04-01T00:00:00Z", "handle": "ngw7piq9", "interval": "20s", "charts": "gpuMemoryFree,gpuMemoryUsed", "objecttype": "notebook", "end": "2020-04-02T21:37:00Z", } GET_NOTEBOOK_RESPONSE_JSON = example_responses.NOTEBOOK_GET_RESPONSE GET_METRICS_RESPONSE_JSON = example_responses.NOTEBOOKS_METRICS_GET_RESPONSE EXPECTED_STDOUT = """{ "cpuPercentage": { "npmnnm6e": [ { "time_stamp": 1587993000, "value": "0" }, { "time_stamp": 1587993030, "value": "0" }, { "time_stamp": 1587993060, "value": "0" }, { "time_stamp": 1587993090, "value": "0" }, { "time_stamp": 1587993120, "value": "0" }, { "time_stamp": 1587993150, "value": "0" }, { "time_stamp": 1587993180, "value": "0" }, { "time_stamp": 1587993210, "value": "0" }, { "time_stamp": 1587993240, "value": "0" }, { "time_stamp": 1587993270, "value": "0" }, { "time_stamp": 1587993300, "value": "0" }, { "time_stamp": 1587993330, "value": "0" }, { "time_stamp": 1587993360, "value": "0" } ] }, "memoryUsage": { "npmnnm6e": [ { "time_stamp": 1587992970, "value": "0" }, { "time_stamp": 1587993000, "value": "782336" }, { "time_stamp": 1587993030, "value": "782336" }, { "time_stamp": 1587993060, "value": "782336" }, { "time_stamp": 1587993090, "value": "782336" }, { "time_stamp": 1587993120, "value": "782336" }, { "time_stamp": 1587993150, "value": "782336" }, { "time_stamp": 1587993180, "value": "782336" }, { "time_stamp": 1587993210, "value": "782336" }, { "time_stamp": 1587993240, "value": "782336" }, { "time_stamp": 1587993270, "value": "782336" }, { "time_stamp": 1587993300, "value": "782336" }, { "time_stamp": 1587993330, "value": "782336" }, { "time_stamp": 1587993360, "value": "782336" } ] } } """ EXPECTED_STDOUT_WHEN_INVALID_API_KEY_WAS_USED = "Failed to fetch data: Invalid API token\n" EXPECTED_STDOUT_WHEN_EXPERIMENT_WAS_NOT_FOUND = "Failed to fetch data: Not found. " \ "Please contact <EMAIL> for help.\n" EXPECTED_STDOUT_WHEN_NO_METRICS_WERE_FOUND = """{ "cpuPercentage": null, "memoryUsage": null } """ EXPECTED_STDOUT_WHEN_ERROR_CODE_WAS_RETURNED_WITHOUT_ERROR_MESSAGE = "Failed to fetch data\n" @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_read_all_available_metrics_when_metrics_get_command_was_used_with_basic_options(self, get_patched): get_patched.side_effect = [ MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON), MockResponse(self.GET_METRICS_RESPONSE_JSON), ] runner = CliRunner() result = runner.invoke(cli.cli, self.BASIC_OPTIONS_COMMAND) assert json.loads(result.output.strip()) == json.loads(self.EXPECTED_STDOUT.strip()), \ str(result.output) + str(result.exc_info) get_patched.assert_has_calls( [ mock.call( self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON, params=None, headers=EXPECTED_HEADERS, ), mock.call( self.GET_METRICS_URL, json=None, params=self.BASIC_COMMAND_GET_METRICS_REQUEST_PARAMS, headers=EXPECTED_HEADERS, ), ] ) assert result.exit_code == 0, result.exc_info @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_read_metrics_when_metrics_get_command_was_used_with_all_options(self, get_patched): get_patched.side_effect = [ MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON), MockResponse(self.GET_METRICS_RESPONSE_JSON), ] runner = CliRunner() result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND) # comparing objects instead of strings because Py2 and Py3 produce slightly different outputs assert json.loads(result.output.strip()) == json.loads(self.EXPECTED_STDOUT.strip()), result.exc_info get_patched.assert_has_calls( [ mock.call( self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON, params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ), mock.call( self.GET_METRICS_URL, json=None, params=self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ), ] ) assert result.exit_code == 0, result.exc_info @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_read_metrics_when_metrics_get_was_executed_and_options_file_was_used( self, get_patched, notebooks_metrics_get_config_path): get_patched.side_effect = [ MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON), MockResponse(self.GET_METRICS_RESPONSE_JSON), ] command = self.FULL_OPTIONS_COMMAND_WITH_OPTIONS_FILE[:] + [notebooks_metrics_get_config_path] runner = CliRunner() result = runner.invoke(cli.cli, command) # comparing objects instead of strings because Py2 and Py3 produce slightly different outputs assert json.loads(result.output.strip()) == json.loads(self.EXPECTED_STDOUT.strip()), result.exc_info get_patched.assert_has_calls( [ mock.call( self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON, params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ), mock.call( self.GET_METRICS_URL, json=None, params=self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ), ] ) assert result.exit_code == 0, result.exc_info @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_print_valid_error_message_when_invalid_api_key_was_used(self, get_patched): get_patched.return_value = MockResponse({"status": 400, "message": "Invalid API token"}, status_code=403) runner = CliRunner() result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND) assert result.output == self.EXPECTED_STDOUT_WHEN_INVALID_API_KEY_WAS_USED, result.exc_info get_patched.assert_called_once_with( self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON, params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ) assert result.exit_code == 0, result.exc_info @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_print_valid_error_message_when_deployment_was_not_found(self, get_patched): get_patched.side_effect = [ MockResponse({"error": {"name": "ApplicationError", "status": 404, "message": "Not found. Please contact <EMAIL> for help."}}, status_code=404), ] runner = CliRunner() result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND) assert result.output == self.EXPECTED_STDOUT_WHEN_EXPERIMENT_WAS_NOT_FOUND, result.exc_info get_patched.assert_has_calls( [ mock.call( self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON, params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ), ] ) assert result.exit_code == 0, result.exc_info @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_print_valid_message_when_was_no_metrics_were_returned(self, get_patched): get_patched.side_effect = [ MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON), MockResponse(example_responses.NOTEBOOKS_METRICS_GET_RESPONSE_WHEN_NO_METRICS_WERE_FOUND), ] runner = CliRunner() result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND) assert json.loads(result.output.strip()) == json.loads(self.EXPECTED_STDOUT_WHEN_NO_METRICS_WERE_FOUND.strip()) \ , str(result.output) + str(result.exc_info) get_patched.assert_has_calls( [ mock.call( self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON, params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ), mock.call( self.GET_METRICS_URL, json=None, params=self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ), ] ) assert result.exit_code == 0, result.exc_info @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_print_valid_error_message_when_error_code_was_returned_without_error_message(self, get_patched): get_patched.side_effect = [ MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON), MockResponse(status_code=500), ] runner = CliRunner() result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND) assert result.output == self.EXPECTED_STDOUT_WHEN_ERROR_CODE_WAS_RETURNED_WITHOUT_ERROR_MESSAGE, result.exc_info get_patched.assert_has_calls( [ mock.call( self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON, params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ), mock.call( self.GET_METRICS_URL, json=None, params=self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ), ] ) assert result.exit_code == 0, result.exc_info class TestExperimentsMetricsStreamCommand(object): GET_NOTEBOOK_URL = "https://api.paperspace.io/notebooks/getNotebook" GET_METRICS_URL = "https://aws-testing.paperspace.io/metrics/api/v1/stream" BASIC_OPTIONS_COMMAND = [ "notebooks", "metrics", "stream", "--id", "ngw7piq9", ] ALL_OPTIONS_COMMAND = [ "notebooks", "metrics", "stream", "--id", "ngw7piq9", "--metric", "gpuMemoryFree", "--metric", "gpuMemoryUsed", "--interval", "20s", "--apiKey", "some_key", ] ALL_OPTIONS_COMMAND_WITH_OPTIONS_FILE = [ "notebooks", "metrics", "stream", "--optionsFile", # path added in test, ] GET_NOTEBOOK_REQUEST_JSON = {"notebookId": "ngw7piq9"} BASIC_COMMAND_CHART_DESCRIPTOR = '{"chart_names": ["cpuPercentage", "memoryUsage"], "handles": ["ngw7piq9"' \ '], "object_type": "notebook", "poll_interval": "30s"}' ALL_COMMANDS_CHART_DESCRIPTOR = '{"chart_names": ["gpuMemoryFree", "gpuMemoryUsed"], "handles": ["ngw7piq9' \ '"], "object_type": "notebook", "poll_interval": "20s"}' GET_NOTEBOOK_RESPONSE_JSON = example_responses.NOTEBOOK_GET_RESPONSE GET_NOTEBOOK_RESPONSE_JSON_WHEN_NOTEBOOK_NOT_FOUND = { "error": { "name": "ApplicationError", "status": 404, "message": "Not found. Please contact <EMAIL> for help.", }, } EXPECTED_TABLE_1 = """+----------+---------------+-------------+ | Pod | cpuPercentage | memoryUsage | +----------+---------------+-------------+ | nrwed38p | | 54013952 | +----------+---------------+-------------+ """ EXPECTED_TABLE_2 = """+----------+----------------------+-------------+ | Pod | cpuPercentage | memoryUsage | +----------+----------------------+-------------+ | nrwed38p | 0.006907773333334353 | 54013952 | +----------+----------------------+-------------+ """ EXPECTED_TABLE_3 = """+----------+----------------------+-------------+ | Pod | cpuPercentage | memoryUsage | +----------+----------------------+-------------+ | nrwed38p | 0.006907773333334353 | 12345667 | +----------+----------------------+-------------+ """ ALL_OPTIONS_EXPECTED_TABLE_1 = """+----------+---------------+---------------+ | Pod | gpuMemoryFree | gpuMemoryUsed | +----------+---------------+---------------+ | nrwed38p | 1234 | | +----------+---------------+---------------+ """ ALL_OPTIONS_EXPECTED_TABLE_2 = """+----------+---------------+---------------+ | Pod | gpuMemoryFree | gpuMemoryUsed | +----------+---------------+---------------+ | nrwed38p | 1234 | | +----------+---------------+---------------+ """ ALL_OPTIONS_EXPECTED_TABLE_3 = """+----------+---------------+---------------+ | Pod | gpuMemoryFree | gpuMemoryUsed | +----------+---------------+---------------+ | nrwed38p | 2345 | 32 | +----------+---------------+---------------+ """ EXPECTED_STDOUT_WHEN_INVALID_API_KEY_WAS_USED = "Failed to fetch data: Incorrect API Key provided\nForbidden\n" EXPECTED_STDOUT_WHEN_DEPLOYMENT_WAS_NOT_FOUND = "Failed to fetch data: Not found. Please contact " \ "<EMAIL> for help.\n" @mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection") @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_read_all_available_metrics_when_metrics_get_command_was_used_with_basic_options( self, get_patched, create_ws_connection_patched, basic_options_metrics_stream_websocket_connection_iterator): get_patched.return_value = MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON) ws_connection_instance_mock = mock.MagicMock() ws_connection_instance_mock.__iter__ = basic_options_metrics_stream_websocket_connection_iterator create_ws_connection_patched.return_value = ws_connection_instance_mock runner = CliRunner() result = runner.invoke(cli.cli, self.BASIC_OPTIONS_COMMAND) assert self.EXPECTED_TABLE_1 in result.output, result.exc_info assert self.EXPECTED_TABLE_2 in result.output, result.exc_info assert self.EXPECTED_TABLE_3 in result.output, result.exc_info get_patched.assert_called_once_with( self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON, params=None, headers=EXPECTED_HEADERS, ) ws_connection_instance_mock.send.assert_called_once_with(self.BASIC_COMMAND_CHART_DESCRIPTOR) assert result.exit_code == 0, result.exc_info @mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection") @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_read_metrics_when_metrics_get_command_was_used_with_all_options( self, get_patched, create_ws_connection_patched, all_options_metrics_stream_websocket_connection_iterator): get_patched.return_value = MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON) ws_connection_instance_mock = mock.MagicMock() ws_connection_instance_mock.__iter__ = all_options_metrics_stream_websocket_connection_iterator create_ws_connection_patched.return_value = ws_connection_instance_mock runner = CliRunner() result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND) assert self.ALL_OPTIONS_EXPECTED_TABLE_1 in result.output, result.exc_info assert self.ALL_OPTIONS_EXPECTED_TABLE_2 in result.output, result.exc_info assert self.ALL_OPTIONS_EXPECTED_TABLE_3 in result.output, result.exc_info get_patched.assert_called_once_with( self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON, params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ) ws_connection_instance_mock.send.assert_called_once_with(self.ALL_COMMANDS_CHART_DESCRIPTOR) assert result.exit_code == 0, result.exc_info @mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection") @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_read_metrics_when_metrics_get_was_executed_and_options_file_was_used( self, get_patched, create_ws_connection_patched, all_options_metrics_stream_websocket_connection_iterator, notebooks_metrics_stream_config_path): get_patched.return_value = MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON) ws_connection_instance_mock = mock.MagicMock() ws_connection_instance_mock.__iter__ = all_options_metrics_stream_websocket_connection_iterator create_ws_connection_patched.return_value = ws_connection_instance_mock command = self.ALL_OPTIONS_COMMAND_WITH_OPTIONS_FILE[:] + [notebooks_metrics_stream_config_path] runner = CliRunner() result = runner.invoke(cli.cli, command) assert self.ALL_OPTIONS_EXPECTED_TABLE_1 in result.output, result.exc_info assert self.ALL_OPTIONS_EXPECTED_TABLE_2 in result.output, result.exc_info assert self.ALL_OPTIONS_EXPECTED_TABLE_3 in result.output, result.exc_info get_patched.assert_called_once_with( self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON, params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ) ws_connection_instance_mock.send.assert_called_once_with(self.ALL_COMMANDS_CHART_DESCRIPTOR) assert result.exit_code == 0, result.exc_info @mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection") @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_print_valid_error_message_when_invalid_api_key_was_used( self, get_patched, create_ws_connection_patched): get_patched.return_value = MockResponse({"status": 400, "message": "Invalid API token"}, 400) runner = CliRunner() result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND) assert "Failed to fetch data: Invalid API token\n" == result.output, result.exc_info get_patched.assert_called_once_with( self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON, params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ) create_ws_connection_patched.assert_not_called() assert result.exit_code == 0, result.exc_info @mock.patch("gradient.api_sdk.repositories.common.websocket.create_connection") @mock.patch("gradient.api_sdk.clients.http_client.requests.get") def test_should_print_valid_error_message_when_deployment_was_not_found( self, get_patched, create_ws_connection_patched): get_patched.return_value = MockResponse(self.GET_NOTEBOOK_RESPONSE_JSON_WHEN_NOTEBOOK_NOT_FOUND, 404) runner = CliRunner() result = runner.invoke(cli.cli, self.ALL_OPTIONS_COMMAND) assert result.output == self.EXPECTED_STDOUT_WHEN_DEPLOYMENT_WAS_NOT_FOUND, result.exc_info get_patched.assert_called_once_with( self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON, params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY, ) create_ws_connection_patched.assert_not_called() assert result.exit_code == 0, result.exc_info
[ "gradient.api_sdk.clients.http_client.default_headers.copy", "json.loads", "mock.call", "gradient.api_sdk.sdk_exceptions.GradientSdkError", "mock.patch", "mock.MagicMock", "tests.MockResponse", "pytest.mark.parametrize", "click.testing.CliRunner" ]
[((284, 306), 'gradient.api_sdk.clients.http_client.default_headers.copy', 'default_headers.copy', ([], {}), '()\n', (304, 306), False, 'from gradient.api_sdk.clients.http_client import default_headers\n'), ((4187, 4250), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (4197, 4250), False, 'import mock\n'), ((4256, 4320), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (4266, 4320), False, 'import mock\n'), ((5118, 5181), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (5128, 5181), False, 'import mock\n'), ((5187, 5251), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (5197, 5251), False, 'import mock\n'), ((6098, 6161), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (6108, 6161), False, 'import mock\n'), ((6167, 6231), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (6177, 6231), False, 'import mock\n'), ((7090, 7153), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (7100, 7153), False, 'import mock\n'), ((7159, 7223), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (7169, 7223), False, 'import mock\n'), ((8161, 8225), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (8171, 8225), False, 'import mock\n'), ((9005, 9069), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (9015, 9069), False, 'import mock\n'), ((10643, 10707), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (10653, 10707), False, 'import mock\n'), ((11403, 11467), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (11413, 11467), False, 'import mock\n'), ((12212, 12276), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (12222, 12276), False, 'import mock\n'), ((13056, 13120), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (13066, 13120), False, 'import mock\n'), ((15068, 15131), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (15078, 15131), False, 'import mock\n'), ((15137, 15201), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (15147, 15201), False, 'import mock\n'), ((16057, 16121), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (16067, 16121), False, 'import mock\n'), ((16901, 16965), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (16911, 16965), False, 'import mock\n'), ((18318, 18382), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (18328, 18382), False, 'import mock\n'), ((19090, 19153), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (19100, 19153), False, 'import mock\n'), ((19159, 19223), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (19169, 19223), False, 'import mock\n'), ((20082, 20146), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (20092, 20146), False, 'import mock\n'), ((20926, 20990), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (20936, 20990), False, 'import mock\n'), ((21773, 21784), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (21782, 21784), False, 'from click.testing import CliRunner\n'), ((21853, 21916), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (21863, 21916), False, 'import mock\n'), ((22880, 22943), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (22890, 22943), False, 'import mock\n'), ((22949, 23069), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""option,param"""', "[('--size', 'size'), ('-s', 'size'), ('--links', 'links'), ('-l', 'links')]"], {}), "('option,param', [('--size', 'size'), ('-s', 'size'),\n ('--links', 'links'), ('-l', 'links')])\n", (22972, 23069), False, 'import pytest\n'), ((25034, 25098), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (25044, 25098), False, 'import mock\n'), ((25782, 25846), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (25792, 25846), False, 'import mock\n'), ((26579, 26643), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (26589, 26643), False, 'import mock\n'), ((27450, 27514), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (27460, 27514), False, 'import mock\n'), ((28286, 28350), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.post"""'], {}), "('gradient.api_sdk.clients.http_client.requests.post')\n", (28296, 28350), False, 'import mock\n'), ((30687, 30750), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (30697, 30750), False, 'import mock\n'), ((31328, 31391), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (31338, 31391), False, 'import mock\n'), ((31999, 32062), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (32009, 32062), False, 'import mock\n'), ((32684, 32747), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (32694, 32747), False, 'import mock\n'), ((33439, 33502), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (33449, 33502), False, 'import mock\n'), ((34172, 34235), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (34182, 34235), False, 'import mock\n'), ((36228, 36291), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (36238, 36291), False, 'import mock\n'), ((37098, 37161), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (37108, 37161), False, 'import mock\n'), ((38190, 38253), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (38200, 38253), False, 'import mock\n'), ((39053, 39116), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (39063, 39116), False, 'import mock\n'), ((39986, 40049), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (39996, 40049), False, 'import mock\n'), ((40903, 40966), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (40913, 40966), False, 'import mock\n'), ((45918, 45981), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (45928, 45981), False, 'import mock\n'), ((47115, 47178), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (47125, 47178), False, 'import mock\n'), ((48397, 48460), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (48407, 48460), False, 'import mock\n'), ((49817, 49880), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (49827, 49880), False, 'import mock\n'), ((50621, 50684), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (50631, 50684), False, 'import mock\n'), ((51626, 51689), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (51636, 51689), False, 'import mock\n'), ((52909, 52972), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (52919, 52972), False, 'import mock\n'), ((57477, 57555), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.repositories.common.websocket.create_connection"""'], {}), "('gradient.api_sdk.repositories.common.websocket.create_connection')\n", (57487, 57555), False, 'import mock\n'), ((57561, 57624), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (57571, 57624), False, 'import mock\n'), ((58860, 58938), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.repositories.common.websocket.create_connection"""'], {}), "('gradient.api_sdk.repositories.common.websocket.create_connection')\n", (58870, 58938), False, 'import mock\n'), ((58944, 59007), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (58954, 59007), False, 'import mock\n'), ((60278, 60356), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.repositories.common.websocket.create_connection"""'], {}), "('gradient.api_sdk.repositories.common.websocket.create_connection')\n", (60288, 60356), False, 'import mock\n'), ((60362, 60425), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (60372, 60425), False, 'import mock\n'), ((61838, 61916), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.repositories.common.websocket.create_connection"""'], {}), "('gradient.api_sdk.repositories.common.websocket.create_connection')\n", (61848, 61916), False, 'import mock\n'), ((61922, 61985), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (61932, 61985), False, 'import mock\n'), ((62759, 62837), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.repositories.common.websocket.create_connection"""'], {}), "('gradient.api_sdk.repositories.common.websocket.create_connection')\n", (62769, 62837), False, 'import mock\n'), ((62843, 62906), 'mock.patch', 'mock.patch', (['"""gradient.api_sdk.clients.http_client.requests.get"""'], {}), "('gradient.api_sdk.clients.http_client.requests.get')\n", (62853, 62906), False, 'import mock\n'), ((1151, 1184), 'gradient.api_sdk.sdk_exceptions.GradientSdkError', 'sdk_exceptions.GradientSdkError', ([], {}), '()\n', (1182, 1184), False, 'from gradient.api_sdk import sdk_exceptions\n'), ((1846, 1879), 'gradient.api_sdk.sdk_exceptions.GradientSdkError', 'sdk_exceptions.GradientSdkError', ([], {}), '()\n', (1877, 1879), False, 'from gradient.api_sdk import sdk_exceptions\n'), ((4451, 4492), 'tests.MockResponse', 'MockResponse', (['self.EXPECTED_RESPONSE_JSON'], {}), '(self.EXPECTED_RESPONSE_JSON)\n', (4463, 4492), False, 'from tests import MockResponse, example_responses\n'), ((4528, 4581), 'tests.MockResponse', 'MockResponse', (['example_responses.NOTEBOOK_GET_RESPONSE'], {}), '(example_responses.NOTEBOOK_GET_RESPONSE)\n', (4540, 4581), False, 'from tests import MockResponse, example_responses\n'), ((4600, 4611), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4609, 4611), False, 'from click.testing import CliRunner\n'), ((5392, 5433), 'tests.MockResponse', 'MockResponse', (['self.EXPECTED_RESPONSE_JSON'], {}), '(self.EXPECTED_RESPONSE_JSON)\n', (5404, 5433), False, 'from tests import MockResponse, example_responses\n'), ((5469, 5522), 'tests.MockResponse', 'MockResponse', (['example_responses.NOTEBOOK_GET_RESPONSE'], {}), '(example_responses.NOTEBOOK_GET_RESPONSE)\n', (5481, 5522), False, 'from tests import MockResponse, example_responses\n'), ((5541, 5552), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (5550, 5552), False, 'from click.testing import CliRunner\n'), ((6389, 6430), 'tests.MockResponse', 'MockResponse', (['self.EXPECTED_RESPONSE_JSON'], {}), '(self.EXPECTED_RESPONSE_JSON)\n', (6401, 6430), False, 'from tests import MockResponse, example_responses\n'), ((6466, 6519), 'tests.MockResponse', 'MockResponse', (['example_responses.NOTEBOOK_GET_RESPONSE'], {}), '(example_responses.NOTEBOOK_GET_RESPONSE)\n', (6478, 6519), False, 'from tests import MockResponse, example_responses\n'), ((6538, 6549), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (6547, 6549), False, 'from click.testing import CliRunner\n'), ((7371, 7412), 'tests.MockResponse', 'MockResponse', (['self.EXPECTED_RESPONSE_JSON'], {}), '(self.EXPECTED_RESPONSE_JSON)\n', (7383, 7412), False, 'from tests import MockResponse, example_responses\n'), ((7448, 7501), 'tests.MockResponse', 'MockResponse', (['example_responses.NOTEBOOK_GET_RESPONSE'], {}), '(example_responses.NOTEBOOK_GET_RESPONSE)\n', (7460, 7501), False, 'from tests import MockResponse, example_responses\n'), ((7610, 7621), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (7619, 7621), False, 'from click.testing import CliRunner\n'), ((8374, 8432), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON_WITH_WRONG_API_TOKEN', '(400)'], {}), '(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)\n', (8386, 8432), False, 'from tests import MockResponse, example_responses\n'), ((8455, 8466), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (8464, 8466), False, 'from click.testing import CliRunner\n'), ((9214, 9243), 'tests.MockResponse', 'MockResponse', ([], {'status_code': '(400)'}), '(status_code=400)\n', (9226, 9243), False, 'from tests import MockResponse, example_responses\n'), ((9266, 9277), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (9275, 9277), False, 'from click.testing import CliRunner\n'), ((10825, 10866), 'tests.MockResponse', 'MockResponse', (['self.EXPECTED_RESPONSE_JSON'], {}), '(self.EXPECTED_RESPONSE_JSON)\n', (10837, 10866), False, 'from tests import MockResponse, example_responses\n'), ((10885, 10896), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (10894, 10896), False, 'from click.testing import CliRunner\n'), ((11595, 11636), 'tests.MockResponse', 'MockResponse', (['self.EXPECTED_RESPONSE_JSON'], {}), '(self.EXPECTED_RESPONSE_JSON)\n', (11607, 11636), False, 'from tests import MockResponse, example_responses\n'), ((11655, 11666), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (11664, 11666), False, 'from click.testing import CliRunner\n'), ((12425, 12483), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON_WITH_WRONG_API_TOKEN', '(400)'], {}), '(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)\n', (12437, 12483), False, 'from tests import MockResponse, example_responses\n'), ((12506, 12517), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (12515, 12517), False, 'from click.testing import CliRunner\n'), ((13265, 13294), 'tests.MockResponse', 'MockResponse', ([], {'status_code': '(400)'}), '(status_code=400)\n', (13277, 13294), False, 'from tests import MockResponse, example_responses\n'), ((13317, 13328), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (13326, 13328), False, 'from click.testing import CliRunner\n'), ((15342, 15383), 'tests.MockResponse', 'MockResponse', (['self.EXPECTED_RESPONSE_JSON'], {}), '(self.EXPECTED_RESPONSE_JSON)\n', (15354, 15383), False, 'from tests import MockResponse, example_responses\n'), ((15419, 15472), 'tests.MockResponse', 'MockResponse', (['example_responses.NOTEBOOK_GET_RESPONSE'], {}), '(example_responses.NOTEBOOK_GET_RESPONSE)\n', (15431, 15472), False, 'from tests import MockResponse, example_responses\n'), ((15491, 15502), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (15500, 15502), False, 'from click.testing import CliRunner\n'), ((16270, 16328), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON_WITH_WRONG_API_TOKEN', '(400)'], {}), '(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)\n', (16282, 16328), False, 'from tests import MockResponse, example_responses\n'), ((16351, 16362), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (16360, 16362), False, 'from click.testing import CliRunner\n'), ((17110, 17139), 'tests.MockResponse', 'MockResponse', ([], {'status_code': '(400)'}), '(status_code=400)\n', (17122, 17139), False, 'from tests import MockResponse, example_responses\n'), ((17162, 17173), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (17171, 17173), False, 'from click.testing import CliRunner\n'), ((18500, 18553), 'tests.MockResponse', 'MockResponse', (['example_responses.NOTEBOOK_GET_RESPONSE'], {}), '(example_responses.NOTEBOOK_GET_RESPONSE)\n', (18512, 18553), False, 'from tests import MockResponse, example_responses\n'), ((18572, 18583), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (18581, 18583), False, 'from click.testing import CliRunner\n'), ((19364, 19417), 'tests.MockResponse', 'MockResponse', (['example_responses.NOTEBOOK_GET_RESPONSE'], {}), '(example_responses.NOTEBOOK_GET_RESPONSE)\n', (19376, 19417), False, 'from tests import MockResponse, example_responses\n'), ((19453, 19506), 'tests.MockResponse', 'MockResponse', (['example_responses.NOTEBOOK_GET_RESPONSE'], {}), '(example_responses.NOTEBOOK_GET_RESPONSE)\n', (19465, 19506), False, 'from tests import MockResponse, example_responses\n'), ((19525, 19536), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (19534, 19536), False, 'from click.testing import CliRunner\n'), ((20295, 20353), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON_WITH_WRONG_API_TOKEN', '(400)'], {}), '(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)\n', (20307, 20353), False, 'from tests import MockResponse, example_responses\n'), ((20376, 20387), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (20385, 20387), False, 'from click.testing import CliRunner\n'), ((21135, 21164), 'tests.MockResponse', 'MockResponse', ([], {'status_code': '(400)'}), '(status_code=400)\n', (21147, 21164), False, 'from tests import MockResponse, example_responses\n'), ((21187, 21198), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (21196, 21198), False, 'from click.testing import CliRunner\n'), ((22059, 22073), 'tests.MockResponse', 'MockResponse', ([], {}), '()\n', (22071, 22073), False, 'from tests import MockResponse, example_responses\n'), ((23714, 23743), 'tests.MockResponse', 'MockResponse', ([], {'status_code': '(200)'}), '(status_code=200)\n', (23726, 23743), False, 'from tests import MockResponse, example_responses\n'), ((25216, 25245), 'tests.MockResponse', 'MockResponse', ([], {'status_code': '(204)'}), '(status_code=204)\n', (25228, 25245), False, 'from tests import MockResponse, example_responses\n'), ((25264, 25275), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (25273, 25275), False, 'from click.testing import CliRunner\n'), ((25974, 26003), 'tests.MockResponse', 'MockResponse', ([], {'status_code': '(204)'}), '(status_code=204)\n', (25986, 26003), False, 'from tests import MockResponse, example_responses\n'), ((26022, 26033), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (26031, 26033), False, 'from click.testing import CliRunner\n'), ((26778, 26807), 'tests.MockResponse', 'MockResponse', ([], {'status_code': '(204)'}), '(status_code=204)\n', (26790, 26807), False, 'from tests import MockResponse, example_responses\n'), ((26916, 26927), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (26925, 26927), False, 'from click.testing import CliRunner\n'), ((27661, 27719), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON_WITH_WRONG_API_TOKEN', '(400)'], {}), '(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)\n', (27673, 27719), False, 'from tests import MockResponse, example_responses\n'), ((27742, 27753), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (27751, 27753), False, 'from click.testing import CliRunner\n'), ((28493, 28522), 'tests.MockResponse', 'MockResponse', ([], {'status_code': '(400)'}), '(status_code=400)\n', (28505, 28522), False, 'from tests import MockResponse, example_responses\n'), ((28545, 28556), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (28554, 28556), False, 'from click.testing import CliRunner\n'), ((30873, 30905), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON'], {}), '(self.RESPONSE_JSON)\n', (30885, 30905), False, 'from tests import MockResponse, example_responses\n'), ((30924, 30935), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (30933, 30935), False, 'from click.testing import CliRunner\n'), ((31524, 31566), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON_WITH_TAGS'], {}), '(self.RESPONSE_JSON_WITH_TAGS)\n', (31536, 31566), False, 'from tests import MockResponse, example_responses\n'), ((31585, 31596), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (31594, 31596), False, 'from click.testing import CliRunner\n'), ((32190, 32222), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON'], {}), '(self.RESPONSE_JSON)\n', (32202, 32222), False, 'from tests import MockResponse, example_responses\n'), ((32241, 32252), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (32250, 32252), False, 'from click.testing import CliRunner\n'), ((32880, 32912), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON'], {}), '(self.RESPONSE_JSON)\n', (32892, 32912), False, 'from tests import MockResponse, example_responses\n'), ((33019, 33030), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (33028, 33030), False, 'from click.testing import CliRunner\n'), ((33649, 33707), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON_WITH_WRONG_API_TOKEN', '(400)'], {}), '(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)\n', (33661, 33707), False, 'from tests import MockResponse, example_responses\n'), ((33730, 33741), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (33739, 33741), False, 'from click.testing import CliRunner\n'), ((34378, 34407), 'tests.MockResponse', 'MockResponse', ([], {'status_code': '(400)'}), '(status_code=400)\n', (34390, 34407), False, 'from tests import MockResponse, example_responses\n'), ((34430, 34441), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (34439, 34441), False, 'from click.testing import CliRunner\n'), ((36414, 36446), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON'], {}), '(self.RESPONSE_JSON)\n', (36426, 36446), False, 'from tests import MockResponse, example_responses\n'), ((36465, 36476), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (36474, 36476), False, 'from click.testing import CliRunner\n'), ((36968, 36993), 'json.loads', 'json.loads', (['filter_params'], {}), '(filter_params)\n', (36978, 36993), False, 'import json\n'), ((37307, 37339), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON'], {}), '(self.RESPONSE_JSON)\n', (37319, 37339), False, 'from tests import MockResponse, example_responses\n'), ((37358, 37369), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (37367, 37369), False, 'from click.testing import CliRunner\n'), ((37884, 37909), 'json.loads', 'json.loads', (['filter_params'], {}), '(filter_params)\n', (37894, 37909), False, 'import json\n'), ((38379, 38411), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON'], {}), '(self.RESPONSE_JSON)\n', (38391, 38411), False, 'from tests import MockResponse, example_responses\n'), ((38430, 38441), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (38439, 38441), False, 'from click.testing import CliRunner\n'), ((38967, 38992), 'json.loads', 'json.loads', (['filter_params'], {}), '(filter_params)\n', (38977, 38992), False, 'import json\n'), ((39247, 39279), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON'], {}), '(self.RESPONSE_JSON)\n', (39259, 39279), False, 'from tests import MockResponse, example_responses\n'), ((39386, 39397), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (39395, 39397), False, 'from click.testing import CliRunner\n'), ((39900, 39925), 'json.loads', 'json.loads', (['filter_params'], {}), '(filter_params)\n', (39910, 39925), False, 'import json\n'), ((40196, 40254), 'tests.MockResponse', 'MockResponse', (['self.RESPONSE_JSON_WITH_WRONG_API_TOKEN', '(400)'], {}), '(self.RESPONSE_JSON_WITH_WRONG_API_TOKEN, 400)\n', (40208, 40254), False, 'from tests import MockResponse, example_responses\n'), ((40277, 40288), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (40286, 40288), False, 'from click.testing import CliRunner\n'), ((40780, 40805), 'json.loads', 'json.loads', (['filter_params'], {}), '(filter_params)\n', (40790, 40805), False, 'import json\n'), ((41109, 41138), 'tests.MockResponse', 'MockResponse', ([], {'status_code': '(400)'}), '(status_code=400)\n', (41121, 41138), False, 'from tests import MockResponse, example_responses\n'), ((41161, 41172), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (41170, 41172), False, 'from click.testing import CliRunner\n'), ((41647, 41672), 'json.loads', 'json.loads', (['filter_params'], {}), '(filter_params)\n', (41657, 41672), False, 'import json\n'), ((46283, 46294), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (46292, 46294), False, 'from click.testing import CliRunner\n'), ((47464, 47475), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (47473, 47475), False, 'from click.testing import CliRunner\n'), ((48901, 48912), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (48910, 48912), False, 'from click.testing import CliRunner\n'), ((50012, 50090), 'tests.MockResponse', 'MockResponse', (["{'status': 400, 'message': 'Invalid API token'}"], {'status_code': '(403)'}), "({'status': 400, 'message': 'Invalid API token'}, status_code=403)\n", (50024, 50090), False, 'from tests import MockResponse, example_responses\n'), ((50157, 50168), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (50166, 50168), False, 'from click.testing import CliRunner\n'), ((51063, 51074), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (51072, 51074), False, 'from click.testing import CliRunner\n'), ((52010, 52021), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (52019, 52021), False, 'from click.testing import CliRunner\n'), ((53256, 53267), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (53265, 53267), False, 'from click.testing import CliRunner\n'), ((57895, 57940), 'tests.MockResponse', 'MockResponse', (['self.GET_NOTEBOOK_RESPONSE_JSON'], {}), '(self.GET_NOTEBOOK_RESPONSE_JSON)\n', (57907, 57940), False, 'from tests import MockResponse, example_responses\n'), ((57980, 57996), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (57994, 57996), False, 'import mock\n'), ((58201, 58212), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (58210, 58212), False, 'from click.testing import CliRunner\n'), ((59260, 59305), 'tests.MockResponse', 'MockResponse', (['self.GET_NOTEBOOK_RESPONSE_JSON'], {}), '(self.GET_NOTEBOOK_RESPONSE_JSON)\n', (59272, 59305), False, 'from tests import MockResponse, example_responses\n'), ((59345, 59361), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (59359, 59361), False, 'import mock\n'), ((59564, 59575), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (59573, 59575), False, 'from click.testing import CliRunner\n'), ((60733, 60778), 'tests.MockResponse', 'MockResponse', (['self.GET_NOTEBOOK_RESPONSE_JSON'], {}), '(self.GET_NOTEBOOK_RESPONSE_JSON)\n', (60745, 60778), False, 'from tests import MockResponse, example_responses\n'), ((60817, 60833), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (60831, 60833), False, 'import mock\n'), ((61141, 61152), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (61150, 61152), False, 'from click.testing import CliRunner\n'), ((62160, 62226), 'tests.MockResponse', 'MockResponse', (["{'status': 400, 'message': 'Invalid API token'}", '(400)'], {}), "({'status': 400, 'message': 'Invalid API token'}, 400)\n", (62172, 62226), False, 'from tests import MockResponse, example_responses\n'), ((62245, 62256), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (62254, 62256), False, 'from click.testing import CliRunner\n'), ((63081, 63155), 'tests.MockResponse', 'MockResponse', (['self.GET_NOTEBOOK_RESPONSE_JSON_WHEN_NOTEBOOK_NOT_FOUND', '(404)'], {}), '(self.GET_NOTEBOOK_RESPONSE_JSON_WHEN_NOTEBOOK_NOT_FOUND, 404)\n', (63093, 63155), False, 'from tests import MockResponse, example_responses\n'), ((63174, 63185), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (63183, 63185), False, 'from click.testing import CliRunner\n'), ((46150, 46195), 'tests.MockResponse', 'MockResponse', (['self.GET_NOTEBOOK_RESPONSE_JSON'], {}), '(self.GET_NOTEBOOK_RESPONSE_JSON)\n', (46162, 46195), False, 'from tests import MockResponse, example_responses\n'), ((46209, 46253), 'tests.MockResponse', 'MockResponse', (['self.GET_METRICS_RESPONSE_JSON'], {}), '(self.GET_METRICS_RESPONSE_JSON)\n', (46221, 46253), False, 'from tests import MockResponse, example_responses\n'), ((47331, 47376), 'tests.MockResponse', 'MockResponse', (['self.GET_NOTEBOOK_RESPONSE_JSON'], {}), '(self.GET_NOTEBOOK_RESPONSE_JSON)\n', (47343, 47376), False, 'from tests import MockResponse, example_responses\n'), ((47390, 47434), 'tests.MockResponse', 'MockResponse', (['self.GET_METRICS_RESPONSE_JSON'], {}), '(self.GET_METRICS_RESPONSE_JSON)\n', (47402, 47434), False, 'from tests import MockResponse, example_responses\n'), ((48666, 48711), 'tests.MockResponse', 'MockResponse', (['self.GET_NOTEBOOK_RESPONSE_JSON'], {}), '(self.GET_NOTEBOOK_RESPONSE_JSON)\n', (48678, 48711), False, 'from tests import MockResponse, example_responses\n'), ((48725, 48769), 'tests.MockResponse', 'MockResponse', (['self.GET_METRICS_RESPONSE_JSON'], {}), '(self.GET_METRICS_RESPONSE_JSON)\n', (48737, 48769), False, 'from tests import MockResponse, example_responses\n'), ((50829, 50981), 'tests.MockResponse', 'MockResponse', (["{'error': {'name': 'ApplicationError', 'status': 404, 'message':\n 'Not found. Please contact <EMAIL> for help.'}}"], {'status_code': '(404)'}), "({'error': {'name': 'ApplicationError', 'status': 404,\n 'message': 'Not found. Please contact <EMAIL> for help.'}}, status_code=404\n )\n", (50841, 50981), False, 'from tests import MockResponse, example_responses\n'), ((51832, 51877), 'tests.MockResponse', 'MockResponse', (['self.GET_NOTEBOOK_RESPONSE_JSON'], {}), '(self.GET_NOTEBOOK_RESPONSE_JSON)\n', (51844, 51877), False, 'from tests import MockResponse, example_responses\n'), ((51891, 51985), 'tests.MockResponse', 'MockResponse', (['example_responses.NOTEBOOKS_METRICS_GET_RESPONSE_WHEN_NO_METRICS_WERE_FOUND'], {}), '(example_responses.\n NOTEBOOKS_METRICS_GET_RESPONSE_WHEN_NO_METRICS_WERE_FOUND)\n', (51903, 51985), False, 'from tests import MockResponse, example_responses\n'), ((53138, 53183), 'tests.MockResponse', 'MockResponse', (['self.GET_NOTEBOOK_RESPONSE_JSON'], {}), '(self.GET_NOTEBOOK_RESPONSE_JSON)\n', (53150, 53183), False, 'from tests import MockResponse, example_responses\n'), ((53197, 53226), 'tests.MockResponse', 'MockResponse', ([], {'status_code': '(500)'}), '(status_code=500)\n', (53209, 53226), False, 'from tests import MockResponse, example_responses\n'), ((46582, 46694), 'mock.call', 'mock.call', (['self.GET_NOTEBOOK_URL'], {'json': 'self.GET_NOTEBOOK_REQUEST_JSON', 'params': 'None', 'headers': 'EXPECTED_HEADERS'}), '(self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON,\n params=None, headers=EXPECTED_HEADERS)\n', (46591, 46694), False, 'import mock\n'), ((46807, 46934), 'mock.call', 'mock.call', (['self.GET_METRICS_URL'], {'json': 'None', 'params': 'self.BASIC_COMMAND_GET_METRICS_REQUEST_PARAMS', 'headers': 'EXPECTED_HEADERS'}), '(self.GET_METRICS_URL, json=None, params=self.\n BASIC_COMMAND_GET_METRICS_REQUEST_PARAMS, headers=EXPECTED_HEADERS)\n', (46816, 46934), False, 'import mock\n'), ((47823, 47956), 'mock.call', 'mock.call', (['self.GET_NOTEBOOK_URL'], {'json': 'self.GET_NOTEBOOK_REQUEST_JSON', 'params': 'None', 'headers': 'EXPECTED_HEADERS_WITH_CHANGED_API_KEY'}), '(self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON,\n params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY)\n', (47832, 47956), False, 'import mock\n'), ((48069, 48221), 'mock.call', 'mock.call', (['self.GET_METRICS_URL'], {'json': 'None', 'params': 'self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS', 'headers': 'EXPECTED_HEADERS_WITH_CHANGED_API_KEY'}), '(self.GET_METRICS_URL, json=None, params=self.\n ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS, headers=\n EXPECTED_HEADERS_WITH_CHANGED_API_KEY)\n', (48078, 48221), False, 'import mock\n'), ((49243, 49376), 'mock.call', 'mock.call', (['self.GET_NOTEBOOK_URL'], {'json': 'self.GET_NOTEBOOK_REQUEST_JSON', 'params': 'None', 'headers': 'EXPECTED_HEADERS_WITH_CHANGED_API_KEY'}), '(self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON,\n params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY)\n', (49252, 49376), False, 'import mock\n'), ((49489, 49641), 'mock.call', 'mock.call', (['self.GET_METRICS_URL'], {'json': 'None', 'params': 'self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS', 'headers': 'EXPECTED_HEADERS_WITH_CHANGED_API_KEY'}), '(self.GET_METRICS_URL, json=None, params=self.\n ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS, headers=\n EXPECTED_HEADERS_WITH_CHANGED_API_KEY)\n', (49498, 49641), False, 'import mock\n'), ((51311, 51444), 'mock.call', 'mock.call', (['self.GET_NOTEBOOK_URL'], {'json': 'self.GET_NOTEBOOK_REQUEST_JSON', 'params': 'None', 'headers': 'EXPECTED_HEADERS_WITH_CHANGED_API_KEY'}), '(self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON,\n params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY)\n', (51320, 51444), False, 'import mock\n'), ((52335, 52468), 'mock.call', 'mock.call', (['self.GET_NOTEBOOK_URL'], {'json': 'self.GET_NOTEBOOK_REQUEST_JSON', 'params': 'None', 'headers': 'EXPECTED_HEADERS_WITH_CHANGED_API_KEY'}), '(self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON,\n params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY)\n', (52344, 52468), False, 'import mock\n'), ((52581, 52733), 'mock.call', 'mock.call', (['self.GET_METRICS_URL'], {'json': 'None', 'params': 'self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS', 'headers': 'EXPECTED_HEADERS_WITH_CHANGED_API_KEY'}), '(self.GET_METRICS_URL, json=None, params=self.\n ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS, headers=\n EXPECTED_HEADERS_WITH_CHANGED_API_KEY)\n', (52590, 52733), False, 'import mock\n'), ((53525, 53658), 'mock.call', 'mock.call', (['self.GET_NOTEBOOK_URL'], {'json': 'self.GET_NOTEBOOK_REQUEST_JSON', 'params': 'None', 'headers': 'EXPECTED_HEADERS_WITH_CHANGED_API_KEY'}), '(self.GET_NOTEBOOK_URL, json=self.GET_NOTEBOOK_REQUEST_JSON,\n params=None, headers=EXPECTED_HEADERS_WITH_CHANGED_API_KEY)\n', (53534, 53658), False, 'import mock\n'), ((53771, 53923), 'mock.call', 'mock.call', (['self.GET_METRICS_URL'], {'json': 'None', 'params': 'self.ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS', 'headers': 'EXPECTED_HEADERS_WITH_CHANGED_API_KEY'}), '(self.GET_METRICS_URL, json=None, params=self.\n ALL_COMMANDS_GET_METRICS_REQUEST_PARAMS, headers=\n EXPECTED_HEADERS_WITH_CHANGED_API_KEY)\n', (53780, 53923), False, 'import mock\n')]
# Generated by Django 2.2.1 on 2019-09-08 17:58 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('images', '0015_auto_20190909_0253'), ] operations = [ migrations.RenameField( model_name='comment', old_name='refer', new_name='referComment', ), ]
[ "django.db.migrations.RenameField" ]
[((226, 318), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""comment"""', 'old_name': '"""refer"""', 'new_name': '"""referComment"""'}), "(model_name='comment', old_name='refer', new_name=\n 'referComment')\n", (248, 318), False, 'from django.db import migrations\n')]
# # Copyright (C) [2020] Futurewei Technologies, Inc. # # FORCE-RISCV is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES # OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the License for the specific language governing permissions and # limitations under the License. # import RandomUtils from DV.riscv.trees.instruction_tree import RV_G_map, RV32_G_map from base.Sequence import Sequence from riscv.EnvRISCV import EnvRISCV from riscv.GenThreadRISCV import GenThreadRISCV # This test performs some basic checks with thread locking and shared memory. class MainSequence(Sequence): def generate(self, **kargs): shared_phys_addresses_name = "Shared PAs" with self.threadLockingContext(): # One thread needs to generate the shared physical addresses if not self.hasSharedThreadObject(shared_phys_addresses_name): shared_phys_addresses = [] for _ in range(3): phys_addr = self.genPA(Size=8, Align=8, Type="D", Shared=1) shared_phys_addresses.append(phys_addr) self.setSharedThreadObject(shared_phys_addresses_name, shared_phys_addresses) for _ in range(RandomUtils.random32(2, 5)): with self.threadLockingContext(): shared_phys_addresses = self.getSharedThreadObject(shared_phys_addresses_name) self._genSharedLoadInstruction(self.choice(shared_phys_addresses)) self._genRandomInstructions() # Generate a load instruction to a shared address and assert that the # destination register is marked unpredictable. # # @param aSharedPhysAddr A shared physical address to target with the # load instruction. def _genSharedLoadInstruction(self, aSharedPhysAddr): target_addr = self.genVAforPA(Size=8, Align=8, Type="D", PA=aSharedPhysAddr) if self.getGlobalState("AppRegisterWidth") == 32: instr = "LW##RISCV" else: instr = "LD##RISCV" instr_id = self.genInstruction(instr, {"LSTarget": target_addr}) instr_record = self.queryInstructionRecord(instr_id) dest_reg_index = instr_record["Dests"]["rd"] dest_reg_name = "x%d" % dest_reg_index if (dest_reg_index != 0) and ( not self.isRegisterReserved(dest_reg_name, access="Read", resv_type="Unpredictable") ): self.error("Destination register %s was not marked as unpredictable" % dest_reg_name) # Generate a random number of a wide variety of instructions. def _genRandomInstructions(self): for _ in range(RandomUtils.random32(0, 10)): if self.getGlobalState("AppRegisterWidth") == 32: instr = RV32_G_map.pick(self.genThread) else: instr = RV_G_map.pick(self.genThread) self.genInstruction(instr) MainSequenceClass = MainSequence GenThreadClass = GenThreadRISCV EnvClass = EnvRISCV
[ "DV.riscv.trees.instruction_tree.RV32_G_map.pick", "RandomUtils.random32", "DV.riscv.trees.instruction_tree.RV_G_map.pick" ]
[((1537, 1563), 'RandomUtils.random32', 'RandomUtils.random32', (['(2)', '(5)'], {}), '(2, 5)\n', (1557, 1563), False, 'import RandomUtils\n'), ((2956, 2983), 'RandomUtils.random32', 'RandomUtils.random32', (['(0)', '(10)'], {}), '(0, 10)\n', (2976, 2983), False, 'import RandomUtils\n'), ((3072, 3103), 'DV.riscv.trees.instruction_tree.RV32_G_map.pick', 'RV32_G_map.pick', (['self.genThread'], {}), '(self.genThread)\n', (3087, 3103), False, 'from DV.riscv.trees.instruction_tree import RV_G_map, RV32_G_map\n'), ((3146, 3175), 'DV.riscv.trees.instruction_tree.RV_G_map.pick', 'RV_G_map.pick', (['self.genThread'], {}), '(self.genThread)\n', (3159, 3175), False, 'from DV.riscv.trees.instruction_tree import RV_G_map, RV32_G_map\n')]
import pathlib import matplotlib.pyplot as plt import numpy as np import imageio def correct_line_shift(img: np.ndarray, value: int): """Corrects the lineshift of a given image.""" rolled = np.roll(img[::2, :], value, axis=1) img[::2, :] = rolled return img def show_corrected_image(img: np.ndarray): fig, ax = plt.subplots() ax.imshow(corrected, cmap="gray") ax.axis("off") return fig, ax if __name__ == "__main__": fname = pathlib.Path("/data/Amit_QNAP/Calcium_FXS/x10/") images = [ next((fname / "WT_674").glob("AVG*WT*.png")), next((fname / "FXS_614").glob("AVG*FXS*.png")), ] for image in images: data = imageio.imread(image) corrected = correct_line_shift(data, 3) fig, ax = show_corrected_image(corrected) fig.savefig(image.with_suffix(".corrected.png"), transparent=True, dpi=300)
[ "imageio.imread", "pathlib.Path", "matplotlib.pyplot.subplots", "numpy.roll" ]
[((201, 236), 'numpy.roll', 'np.roll', (['img[::2, :]', 'value'], {'axis': '(1)'}), '(img[::2, :], value, axis=1)\n', (208, 236), True, 'import numpy as np\n'), ((336, 350), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (348, 350), True, 'import matplotlib.pyplot as plt\n'), ((468, 516), 'pathlib.Path', 'pathlib.Path', (['"""/data/Amit_QNAP/Calcium_FXS/x10/"""'], {}), "('/data/Amit_QNAP/Calcium_FXS/x10/')\n", (480, 516), False, 'import pathlib\n'), ((688, 709), 'imageio.imread', 'imageio.imread', (['image'], {}), '(image)\n', (702, 709), False, 'import imageio\n')]
#! /usr/bin/env python # vim: set fenc=utf8 ts=4 sw=4 et : # # Layer 2 network neighbourhood discovery tool # written by <NAME> (mail at <EMAIL>) from __future__ import absolute_import, division, print_function import logging import scapy.config import scapy.layers.l2 import scapy.route import socket import netifaces import math import errno import pycurl import os, sys import json import ast import couchdb from StringIO import StringIO class Region: def __init__(self,rab_user,rab_pass,vhost,c_user,c_pass): self.rab_user=rab_user self.rab_pass=rab_pass self.rab_vhost=vhost self.c_user=c_user self.c_pass=c_pass self.couch=couchdb.Server('http://'+c_user+':'+c_pass+'@127.0.0.1:5984/') def myIp(self): inter=netifaces.interfaces() for i in inter: try: res=netifaces.ifaddresses(i)[2][0]['addr'] if res.split(".")[0]!="127": return res except KeyError: pass return None def myMac(self): inter=netifaces.interfaces() for i in inter: try: res=netifaces.ifaddresses(i) if res[2][0]['addr'].split(".")[0]!="127": return res[17][0]['addr'] except KeyError: pass return None def addGwToDatabase(self,name,ip,mac): db=self.couch['admin'] look=db.view('views/docs_by_type') for p in look['cluster']: doc=db[p.value] doc['nodes_mac'].append(mac) doc['nodes_name'].append(name) doc['nodes_ip'].append(ip) db[doc.id]=doc def initClustDatabase(self,name,api,c_name,ip,mac): db=self.couch['admin'] look=db.view('views/docs_by_type') for p in look['cluster']: doc=db[p.value] db.delete(doc) db.save({'type':'cluster','reg_api':api,'reg_name':name,'nodes_name':[c_name],'nodes_ip':[ip],'nodes_mac':[mac],'master':[ip,c_name]}) def addCouchNode(self,ip): buffer=StringIO() c=pycurl.Curl() host="http://localhost:5986/_nodes/couchdb@"+ip c.setopt(c.URL,host) c.setopt(c.WRITEDATA,buffer) c.setopt(c.USERPWD,'%s:%s' %(self.c_user,self.c_pass)) data = '{}' c.setopt(c.POSTFIELDS,data) c.setopt(c.CUSTOMREQUEST,"PUT") c.perform() resp=c.getinfo(c.RESPONSE_CODE) c.close() if resp==201: return "ok" else: print(resp) return "Error" def removeCouchNode(self,ip): #Does not Work! buffer=StringIO() c=pycurl.Curl() host="http://localhost:5986/_nodes/couchdb@"+ip c.setopt(c.URL,host) c.setopt(c.WRITEDATA,buffer) c.setopt(c.USERPWD,'%s:%s' %(self.c_user,self.c_pass)) c.setopt(c.CUSTOMREQUEST,"GET") c.perform() resp=c.getinfo(c.RESPONSE_CODE) c.close() val=ast.literal_eval(buffer.getvalue()) rev=val["_rev"] buffer=StringIO() c=pycurl.Curl() host="http://localhost:5986/_nodes/couchdb@"+ip+"?rev="+rev c.setopt(c.URL,host) c.setopt(c.WRITEDATA,buffer) c.setopt(c.USERPWD,'%s:%s' %(self.c_user,self.c_pass)) c.setopt(c.CUSTOMREQUEST,"DELETE") c.perform() resp=c.getinfo(c.RESPONSE_CODE) c.close() if resp==200: return "ok" else: return "Error" def getCouchNodes(self): buffer=StringIO() c=pycurl.Curl() host="http://localhost:5984/_membership" c.setopt(c.URL,host) c.setopt(c.WRITEDATA,buffer) c.setopt(c.USERPWD,'%s:%s' %(self.c_user,self.c_pass)) c.setopt(c.CUSTOMREQUEST,"GET") c.perform() resp=c.getinfo(c.RESPONSE_CODE) c.close() if resp==200: return buffer.getvalue() else: return "Error" def setClustQueue(self,name): c=pycurl.Curl() #c.setopt(c.URL,"http://localhost:15672/api/paremeters/federation-upstream/%2f/my-upstream") c.setopt(c.URL,"http://localhost:15672/api/exchanges/"+self.rab_vhost+"/federation."+name) c.setopt(c.CUSTOMREQUEST,"PUT") c.setopt(pycurl.HTTPHEADER,['Content-type: application/json']) data = '{"auto_delete":false,"durable":true}' c.setopt(c.POSTFIELDS,data) c.setopt(c.USERPWD,'%s:%s' %(self.rab_user,self.rab_pass)) c.perform() resp=c.getinfo(c.HTTP_CODE) c.close() if resp==204: return "ok" else: return "Error" def createFedPolicy(self,name): buffer=StringIO() c=pycurl.Curl() #c.setopt(c.URL,"http://localhost:15672/api/paremeters/federation-upstream/%2f/my-upstream") host="http://localhost:15672/api/policies/"+self.rab_vhost+"/federate-me" c.setopt(c.URL,host) c.setopt(c.WRITEDATA,buffer) c.setopt(c.USERPWD,'%s:%s' %(self.rab_user,self.rab_pass)) c.setopt(pycurl.HTTPHEADER,['Content-type: application/json']) data2 = json.dumps({"pattern":"^federation."+name,"definition":{"federation-upstream-set":"all"},"apply-to":"exchanges"}) c.setopt(pycurl.POSTFIELDS,data2) c.setopt(c.CUSTOMREQUEST,"PUT") c.perform() resp=c.getinfo(c.RESPONSE_CODE) c.close() if resp==204: return "ok" else: return "Error" def addUpstream(self,user,passw,addr,virt): buffer=StringIO() c=pycurl.Curl() host="http://localhost:15672/api/parameters/federation-upstream/"+self.rab_vhost+"/Fed-upstream-addr" c.setopt(c.URL,host) c.setopt(c.WRITEDATA,buffer) c.setopt(c.USERPWD,'%s:%s' %(self.rab_user,self.rab_pass)) c.setopt(pycurl.HTTPHEADER,['Content-type: application/json']) data2 = json.dumps({"value":{"uri":"amqp://"+user+":"+passw+"@"+addr+"/"+virt,"ack-mode":"on-confirm","trust-user-id":True}}) c.setopt(pycurl.POSTFIELDS,data2) c.setopt(c.CUSTOMREQUEST,"PUT") c.perform() resp=c.getinfo(c.RESPONSE_CODE) c.close() if resp==204: return "ok" else: return "Error" ## Below only for Discovery of Stuff def long2net(self,arg): if (arg <= 0 or arg >= 0xFFFFFFFF): raise ValueError("illegal netmask value", hex(arg)) return 32 - int(round(math.log(0xFFFFFFFF - arg, 2))) def to_CIDR_notation(self,bytes_network, bytes_netmask): network = scapy.utils.ltoa(bytes_network) netmask = self.long2net(bytes_netmask) net = "%s/%s" % (network, netmask) if netmask < 16: print("%s is too big. skipping" % net) return None return net def scan_and_print_neighbors(self,net, interface, ref, timeout=1): results=[] try: ans, unans = scapy.layers.l2.arping(net, iface=interface, timeout=timeout, verbose=False) for s, r in ans.res: try: hostname = socket.gethostbyaddr(r.psrc) if (r.sprintf("%Ether.src%")[0:8].upper()==ref.upper()): results.append([r.sprintf("%Ether.src%"),r.sprintf("%ARP.psrc%"),hostname[0]]) except socket.herror: # failed to resolve pass except socket.error as e: if e.errno == errno.EPERM: # Operation not permitted print("%s. Did you run as root?", e.strerror) else: raise return results def getNetw(self): networks=[] for network, netmask, _, interface, address in scapy.config.conf.route.routes: if network == 0 or interface == 'lo' or address == '127.0.0.1' or address == '0.0.0.0': continue if netmask <= 0 or netmask == 0xFFFFFFFF: continue if interface != scapy.config.conf.iface: # see http://trac.secdev.org/scapy/ticket/537 print("skipping %s because scapy currently doesn't support arping on non-primary network interfaces", net) continue net = self.to_CIDR_notation(network, netmask) if net: networks.append([net,interface]) return networks def getDevsOnWan(self,ref): networks = self.getNetw() res=[] for [net,interface] in networks: for item in self.scan_and_print_neighbors(net, interface,ref): res.append(item[0:2]) return res def getExchangeInfo(self,name): buffer=StringIO() c=pycurl.Curl() host="http://localhost:15672/api/exchanges/test/"+name+"?lengths_age=30" c.setopt(c.URL,host) c.setopt(c.WRITEDATA,buffer) c.setopt(c.USERPWD,'%s:%s' %(self.c_user,self.c_pass)) c.setopt(c.CUSTOMREQUEST,"GET") c.perform() resp=c.getinfo(c.RESPONSE_CODE) c.close() if resp==200: data=json.loads(buffer.getvalue()) msg={} if data['outgoing']!=None: for data in data['outgoing']: cnt=data['stats']['publish'] que=data['queue']['name'] msg[que]=cnt return(msg) else: return "Error" def checkDevsApp(self,dev): db=self.couch['apps'] look=db.view('views/app_for_dev') apps=[] for p in look[dev]: apps.append(p.value) return apps def checkDatabForApp(self): db=self.couch['apps'] databs={} app_n=[] for doc in db: name=db[doc].get('name') if (name!=None): app_n.append(name) for d in self.couch: if d[0:4]=="app_": for a in app_n: if d[4:]==a.lower(): databs[a]=self.couch[d].info().get('disk_size') return databs def saveMonitoring(self,save): db=self.couch['monitoring'] db.save(save) if __name__ == "__main__": reg=Region("admin","hunter","test","admin","hunter") #print(reg.getDevsOnWan("B8:27:EB")) #reg.addGwToDatabase("the_Great_test","192.168.0.2","Random MAc") #reg.initClustDatabase("Reg_name","Reg_api","My_name","My-ip","My_MAc") #print(reg.removeCouchNode("10.0.0.199")) print(reg.addCouchNode("10.0.0.78")) #print(reg.getCouchNodes()) #print(reg.setClustQueue('test')) #print(reg.createFedPolicy()) # This is Raspi #print(reg.addUpstream("admin","hunter","10.0.0.68","test")) #print(reg.getExchangeInfo("apps")) #print(reg.getExchangeInfo("cloud")) #print(reg.myIp()) #print(reg.myMac()) #print(reg.checkDevsApp("UUID1")) #print(reg.checkDatabForApp())
[ "netifaces.interfaces", "couchdb.Server", "json.dumps", "math.log", "netifaces.ifaddresses", "socket.gethostbyaddr", "pycurl.Curl", "StringIO.StringIO" ]
[((683, 753), 'couchdb.Server', 'couchdb.Server', (["('http://' + c_user + ':' + c_pass + '@127.0.0.1:5984/')"], {}), "('http://' + c_user + ':' + c_pass + '@127.0.0.1:5984/')\n", (697, 753), False, 'import couchdb\n'), ((781, 803), 'netifaces.interfaces', 'netifaces.interfaces', ([], {}), '()\n', (801, 803), False, 'import netifaces\n'), ((1090, 1112), 'netifaces.interfaces', 'netifaces.interfaces', ([], {}), '()\n', (1110, 1112), False, 'import netifaces\n'), ((2121, 2131), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (2129, 2131), False, 'from StringIO import StringIO\n'), ((2142, 2155), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (2153, 2155), False, 'import pycurl\n'), ((2709, 2719), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (2717, 2719), False, 'from StringIO import StringIO\n'), ((2730, 2743), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (2741, 2743), False, 'import pycurl\n'), ((3134, 3144), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (3142, 3144), False, 'from StringIO import StringIO\n'), ((3155, 3168), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (3166, 3168), False, 'import pycurl\n'), ((3633, 3643), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (3641, 3643), False, 'from StringIO import StringIO\n'), ((3654, 3667), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (3665, 3667), False, 'import pycurl\n'), ((4118, 4131), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (4129, 4131), False, 'import pycurl\n'), ((4813, 4823), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (4821, 4823), False, 'from StringIO import StringIO\n'), ((4834, 4847), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (4845, 4847), False, 'import pycurl\n'), ((5251, 5377), 'json.dumps', 'json.dumps', (["{'pattern': '^federation.' + name, 'definition': {'federation-upstream-set':\n 'all'}, 'apply-to': 'exchanges'}"], {}), "({'pattern': '^federation.' + name, 'definition': {\n 'federation-upstream-set': 'all'}, 'apply-to': 'exchanges'})\n", (5261, 5377), False, 'import json\n'), ((5676, 5686), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (5684, 5686), False, 'from StringIO import StringIO\n'), ((5697, 5710), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (5708, 5710), False, 'import pycurl\n'), ((6041, 6182), 'json.dumps', 'json.dumps', (["{'value': {'uri': 'amqp://' + user + ':' + passw + '@' + addr + '/' + virt,\n 'ack-mode': 'on-confirm', 'trust-user-id': True}}"], {}), "({'value': {'uri': 'amqp://' + user + ':' + passw + '@' + addr +\n '/' + virt, 'ack-mode': 'on-confirm', 'trust-user-id': True}})\n", (6051, 6182), False, 'import json\n'), ((8864, 8874), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (8872, 8874), False, 'from StringIO import StringIO\n'), ((8885, 8898), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (8896, 8898), False, 'import pycurl\n'), ((1174, 1198), 'netifaces.ifaddresses', 'netifaces.ifaddresses', (['i'], {}), '(i)\n', (1195, 1198), False, 'import netifaces\n'), ((6630, 6659), 'math.log', 'math.log', (['(4294967295 - arg)', '(2)'], {}), '(4294967295 - arg, 2)\n', (6638, 6659), False, 'import math\n'), ((7276, 7304), 'socket.gethostbyaddr', 'socket.gethostbyaddr', (['r.psrc'], {}), '(r.psrc)\n', (7296, 7304), False, 'import socket\n'), ((865, 889), 'netifaces.ifaddresses', 'netifaces.ifaddresses', (['i'], {}), '(i)\n', (886, 889), False, 'import netifaces\n')]
from __future__ import annotations import os import random import re import time import discord import pygame from discord.errors import HTTPException from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils from pgbot.commands.base import BaseCommand, CodeBlock class UserCommand(BaseCommand): """ Base class to handle user commands. """ async def cmd_version(self): """ ->type Other commands ->signature pg!version ->description Get the version of <@&822580851241123860> ----- Implement pg!version, to report bot version """ await embed_utils.replace( self.response_msg, "Current bot's version", f"`{common.VERSION}`" ) async def cmd_clock(self): """ ->type Get help ->signature pg!clock ->description 24 Hour Clock showing <@&778205389942030377> 's who are available to help ----- Implement pg!clock, to display a clock of helpfulies/mods/wizards """ t = time.time() pygame.image.save(clock.user_clock(t), f"temp{t}.png") common.cmd_logs[self.invoke_msg.id] = await self.response_msg.channel.send( file=discord.File(f"temp{t}.png") ) await self.response_msg.delete() os.remove(f"temp{t}.png") async def _cmd_doc(self, modname, page=0, msg=None): """ Helper function for doc, handle pg!refresh stuff """ if not msg: msg = self.response_msg await docs.put_doc(modname, msg, self.invoke_msg.author, page) async def cmd_doc(self, name: str): """ ->type Get help ->signature pg!doc [module.Class.method] ->description Look up the docstring of a Python/Pygame object, e.g str or pygame.Rect ----- Implement pg!doc, to view documentation """ await self._cmd_doc(name) async def cmd_exec(self, code: CodeBlock): """ ->type Run code ->signature pg!exec [python code block] ->description Run python code in an isolated environment. ->extended description Import is not available. Various methods of builtin objects have been disabled for security reasons. The available preimported modules are: `math, cmath, random, re, time, string, itertools, pygame` To show an image, overwrite `output.img` to a surface (see example command). To make it easier to read and write code use code blocks (see [HERE](https://discord.com/channels/772505616680878080/774217896971730974/785510505728311306)). ->example command pg!exec \\`\\`\\`py ```py # Draw a red rectangle on a transparent surface output.img = pygame.Surface((200, 200)).convert_alpha() output.img.fill((0, 0, 0, 0)) pygame.draw.rect(output.img, (200, 0, 0), (50, 50, 100, 100))``` \\`\\`\\` ----- Implement pg!exec, for execution of python code """ tstamp = time.perf_counter_ns() returned = await sandbox.exec_sandbox( code.code, tstamp, 10 if self.is_priv else 5 ) dur = returned.duration # the execution time of the script alone if returned.exc is None: if returned.img: if os.path.getsize(f"temp{tstamp}.png") < 2 ** 22: await self.response_msg.channel.send( file=discord.File(f"temp{tstamp}.png") ) else: await embed_utils.replace( self.response_msg, "Image cannot be sent:", "The image file size is above 4MiB", ) os.remove(f"temp{tstamp}.png") await embed_utils.replace( self.response_msg, f"Returned text (code executed in {utils.format_time(dur)}):", utils.code_block(returned.text) ) else: await embed_utils.replace( self.response_msg, common.EXC_TITLES[1], utils.code_block(", ".join(map(str, returned.exc.args))) ) async def _cmd_help(self, argname, page=0, msg=None): """ Helper function for pg!help, handle pg!refresh stuff """ if not msg: msg = self.response_msg if argname is None: await utils.send_help_message( msg, self.invoke_msg.author, self.cmds_and_funcs, page=page ) else: await utils.send_help_message( msg, self.invoke_msg.author, self.cmds_and_funcs, argname ) async def cmd_help(self, name: str = None): """ ->type Get help ->signature pg!help [command] ->description Ask me for help ->example command pg!help help ----- Implement pg!help, to display a help message """ await self._cmd_help(name) async def cmd_pet(self): """ ->type Play With Me :snake: ->signature pg!pet ->description Pet me :3 . Don't pet me too much or I will get mad. ----- Implement pg!pet, to pet the bot """ emotion.pet_anger -= (time.time() - emotion.last_pet - common.PET_INTERVAL) * ( emotion.pet_anger / common.JUMPSCARE_THRESHOLD ) - common.PET_COST if emotion.pet_anger < common.PET_COST: emotion.pet_anger = common.PET_COST emotion.last_pet = time.time() fname = "die.gif" if emotion.pet_anger > common.JUMPSCARE_THRESHOLD else "pet.gif" await embed_utils.replace( self.response_msg, "", "", 0xFFFFAA, "https://raw.githubusercontent.com/PygameCommunityDiscord/" + f"PygameCommunityBot/main/assets/images/{fname}" ) async def cmd_vibecheck(self): """ ->type Play With Me :snake: ->signature pg!vibecheck ->description Check my mood. ----- Implement pg!vibecheck, to check if the bot is angry """ await embed_utils.replace( self.response_msg, "Vibe Check, snek?", f"Previous petting anger: {emotion.pet_anger:.2f}/{common.JUMPSCARE_THRESHOLD:.2f}" + f"\nIt was last pet {utils.format_long_time(round(time.time() - emotion.last_pet))} ago", ) async def cmd_sorry(self): """ ->type Play With Me :snake: ->signature pg!sorry ->description You were hitting me <:pg_bonk:780423317718302781> and you're now trying to apologize? Let's see what I'll say :unamused: ----- Implement pg!sorry, to ask forgiveness from the bot after bonccing it """ if not emotion.boncc_count: await embed_utils.replace( self.response_msg, "Ask forgiveness from snek?", "Snek is happy. Awww, don't be sorry." ) return num = random.randint(0, 3) if num: emotion.boncc_count -= num if emotion.boncc_count < 0: emotion.boncc_count = 0 await embed_utils.replace( self.response_msg, "Ask forgiveness from snek?", "Your pythonic lord accepts your apology.\n" + f"Now go to code again.\nThe boncc count is {emotion.boncc_count}" ) else: await embed_utils.replace( self.response_msg, "Ask forgiveness from snek?", "How did you dare to boncc a snake?\nBold of you to assume " + "I would apologize to you, two-feet-standing being!\nThe " + f"boncc count is {emotion.boncc_count}" ) async def cmd_bonkcheck(self): """ ->type Play With Me :snake: ->signature pg!bonkcheck ->description Check how many times you have done me harm. ----- Implement pg!bonkcheck, to check how much the snek has been boncced """ if emotion.boncc_count: await embed_utils.replace( self.response_msg, "The snek is hurt and angry:", f"The boncc count is {emotion.boncc_count}" ) else: await embed_utils.replace( self.response_msg, "The snek is right", "Please, don't hit the snek" ) async def cmd_refresh(self, msg_id: int): """ ->type Other commands ->signature pg!refresh [message_id] ->description Refresh a message which support pages. ----- Implement pg!refresh, to refresh a message which supports pages """ try: msg = await self.invoke_msg.channel.fetch_message(msg_id) except (discord.errors.NotFound, discord.errors.HTTPException): await embed_utils.replace( self.response_msg, "Message not found", "Message was not found. Make sure that the id is correct and that " "you are in the same channel as the message." ) return if not msg.embeds or not msg.embeds[0].footer or not msg.embeds[0].footer.text: await embed_utils.replace( self.response_msg, "Message does not support pages", "The message specified does not support pages. Make sure " "the id of the message is correct." ) return data = msg.embeds[0].footer.text.split("\n") page = re.search(r'\d+', data[0]).group() command = data[2].replace("Command: ", "").split() if not page or not command or not self.cmds_and_funcs.get(command[0]): await embed_utils.replace( self.response_msg, "Message does not support pages", "The message specified does not support pages. Make sure " "the id of the message is correct." ) return await self.response_msg.delete() await self.invoke_msg.delete() if command[0] == "help": if len(command) == 1: command.append(None) await self._cmd_help( command[1], page=int(page) - 1, msg=msg ) elif command[0] == "doc": await self._cmd_doc( command[1], page=int(page) - 1, msg=msg )
[ "os.remove", "pgbot.utils.send_help_message", "random.randint", "pgbot.utils.code_block", "discord.File", "pgbot.docs.put_doc", "os.path.getsize", "pgbot.utils.format_time", "time.time", "pgbot.clock.user_clock", "re.search", "time.perf_counter_ns", "pgbot.embed_utils.replace", "pgbot.sandbox.exec_sandbox" ]
[((1044, 1055), 'time.time', 'time.time', ([], {}), '()\n', (1053, 1055), False, 'import time\n'), ((1308, 1333), 'os.remove', 'os.remove', (['f"""temp{t}.png"""'], {}), "(f'temp{t}.png')\n", (1317, 1333), False, 'import os\n'), ((3032, 3054), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (3052, 3054), False, 'import time\n'), ((5700, 5711), 'time.time', 'time.time', ([], {}), '()\n', (5709, 5711), False, 'import time\n'), ((7242, 7262), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (7256, 7262), False, 'import random\n'), ((630, 720), 'pgbot.embed_utils.replace', 'embed_utils.replace', (['self.response_msg', '"""Current bot\'s version"""', 'f"""`{common.VERSION}`"""'], {}), '(self.response_msg, "Current bot\'s version",\n f\'`{common.VERSION}`\')\n', (649, 720), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((1082, 1101), 'pgbot.clock.user_clock', 'clock.user_clock', (['t'], {}), '(t)\n', (1098, 1101), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((1544, 1600), 'pgbot.docs.put_doc', 'docs.put_doc', (['modname', 'msg', 'self.invoke_msg.author', 'page'], {}), '(modname, msg, self.invoke_msg.author, page)\n', (1556, 1600), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((3080, 3146), 'pgbot.sandbox.exec_sandbox', 'sandbox.exec_sandbox', (['code.code', 'tstamp', '(10 if self.is_priv else 5)'], {}), '(code.code, tstamp, 10 if self.is_priv else 5)\n', (3100, 3146), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((5818, 5995), 'pgbot.embed_utils.replace', 'embed_utils.replace', (['self.response_msg', '""""""', '""""""', '(16777130)', "('https://raw.githubusercontent.com/PygameCommunityDiscord/' +\n f'PygameCommunityBot/main/assets/images/{fname}')"], {}), "(self.response_msg, '', '', 16777130, \n 'https://raw.githubusercontent.com/PygameCommunityDiscord/' +\n f'PygameCommunityBot/main/assets/images/{fname}')\n", (5837, 5995), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((3776, 3806), 'os.remove', 'os.remove', (['f"""temp{tstamp}.png"""'], {}), "(f'temp{tstamp}.png')\n", (3785, 3806), False, 'import os\n'), ((4484, 4572), 'pgbot.utils.send_help_message', 'utils.send_help_message', (['msg', 'self.invoke_msg.author', 'self.cmds_and_funcs'], {'page': 'page'}), '(msg, self.invoke_msg.author, self.cmds_and_funcs,\n page=page)\n', (4507, 4572), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((4679, 4765), 'pgbot.utils.send_help_message', 'utils.send_help_message', (['msg', 'self.invoke_msg.author', 'self.cmds_and_funcs', 'argname'], {}), '(msg, self.invoke_msg.author, self.cmds_and_funcs,\n argname)\n', (4702, 4765), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((7037, 7149), 'pgbot.embed_utils.replace', 'embed_utils.replace', (['self.response_msg', '"""Ask forgiveness from snek?"""', '"""Snek is happy. Awww, don\'t be sorry."""'], {}), '(self.response_msg, \'Ask forgiveness from snek?\',\n "Snek is happy. Awww, don\'t be sorry.")\n', (7056, 7149), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((7416, 7614), 'pgbot.embed_utils.replace', 'embed_utils.replace', (['self.response_msg', '"""Ask forgiveness from snek?"""', '(\'Your pythonic lord accepts your apology.\\n\' +\n f"""Now go to code again.\nThe boncc count is {emotion.boncc_count}""")'], {}), '(self.response_msg, \'Ask forgiveness from snek?\', \n """Your pythonic lord accepts your apology.\n""" +\n f"""Now go to code again.\nThe boncc count is {emotion.boncc_count}""")\n', (7435, 7614), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((7710, 7962), 'pgbot.embed_utils.replace', 'embed_utils.replace', (['self.response_msg', '"""Ask forgiveness from snek?"""', '("""How did you dare to boncc a snake?\nBold of you to assume """ +\n """I would apologize to you, two-feet-standing being!\nThe """ +\n f\'boncc count is {emotion.boncc_count}\')'], {}), '(self.response_msg, \'Ask forgiveness from snek?\', \n """How did you dare to boncc a snake?\nBold of you to assume """ +\n """I would apologize to you, two-feet-standing being!\nThe """ +\n f\'boncc count is {emotion.boncc_count}\')\n', (7729, 7962), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((8373, 8491), 'pgbot.embed_utils.replace', 'embed_utils.replace', (['self.response_msg', '"""The snek is hurt and angry:"""', 'f"""The boncc count is {emotion.boncc_count}"""'], {}), "(self.response_msg, 'The snek is hurt and angry:',\n f'The boncc count is {emotion.boncc_count}')\n", (8392, 8491), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((8582, 8675), 'pgbot.embed_utils.replace', 'embed_utils.replace', (['self.response_msg', '"""The snek is right"""', '"""Please, don\'t hit the snek"""'], {}), '(self.response_msg, \'The snek is right\',\n "Please, don\'t hit the snek")\n', (8601, 8675), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((9578, 9752), 'pgbot.embed_utils.replace', 'embed_utils.replace', (['self.response_msg', '"""Message does not support pages"""', '"""The message specified does not support pages. Make sure the id of the message is correct."""'], {}), "(self.response_msg, 'Message does not support pages',\n 'The message specified does not support pages. Make sure the id of the message is correct.'\n )\n", (9597, 9752), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((9914, 9940), 're.search', 're.search', (['"""\\\\d+"""', 'data[0]'], {}), "('\\\\d+', data[0])\n", (9923, 9940), False, 'import re\n'), ((10106, 10280), 'pgbot.embed_utils.replace', 'embed_utils.replace', (['self.response_msg', '"""Message does not support pages"""', '"""The message specified does not support pages. Make sure the id of the message is correct."""'], {}), "(self.response_msg, 'Message does not support pages',\n 'The message specified does not support pages. Make sure the id of the message is correct.'\n )\n", (10125, 10280), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((1220, 1248), 'discord.File', 'discord.File', (['f"""temp{t}.png"""'], {}), "(f'temp{t}.png')\n", (1232, 1248), False, 'import discord\n'), ((3325, 3361), 'os.path.getsize', 'os.path.getsize', (['f"""temp{tstamp}.png"""'], {}), "(f'temp{tstamp}.png')\n", (3340, 3361), False, 'import os\n'), ((3977, 4008), 'pgbot.utils.code_block', 'utils.code_block', (['returned.text'], {}), '(returned.text)\n', (3993, 4008), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((9199, 9379), 'pgbot.embed_utils.replace', 'embed_utils.replace', (['self.response_msg', '"""Message not found"""', '"""Message was not found. Make sure that the id is correct and that you are in the same channel as the message."""'], {}), "(self.response_msg, 'Message not found',\n 'Message was not found. Make sure that the id is correct and that you are in the same channel as the message.'\n )\n", (9218, 9379), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((3564, 3668), 'pgbot.embed_utils.replace', 'embed_utils.replace', (['self.response_msg', '"""Image cannot be sent:"""', '"""The image file size is above 4MiB"""'], {}), "(self.response_msg, 'Image cannot be sent:',\n 'The image file size is above 4MiB')\n", (3583, 3668), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((5431, 5442), 'time.time', 'time.time', ([], {}), '()\n', (5440, 5442), False, 'import time\n'), ((3933, 3955), 'pgbot.utils.format_time', 'utils.format_time', (['dur'], {}), '(dur)\n', (3950, 3955), False, 'from pgbot import clock, common, docs, embed_utils, emotion, sandbox, utils\n'), ((3460, 3493), 'discord.File', 'discord.File', (['f"""temp{tstamp}.png"""'], {}), "(f'temp{tstamp}.png')\n", (3472, 3493), False, 'import discord\n'), ((6569, 6580), 'time.time', 'time.time', ([], {}), '()\n', (6578, 6580), False, 'import time\n')]
from collections import defaultdict from typing import Any, Callable, Dict, Iterable, Sequence import numpy as np import pandas as pd import scipy.optimize from invoice_net.parsers import ( parses_as_full_date, parses_as_amount, parses_as_invoice_number, ) from invoice_net.data_handler import DataHandler def __inner_filter_out_mistakes( tokens: Iterable[str], filter_func: Callable[[str], Any], ignore_exceptions: bool = False, ) -> np.ndarray: mask = [] for token in tokens: try: mask.append(bool(filter_func(token))) except Exception: if ignore_exceptions: mask.append(False) else: raise return np.array(mask) def _filter_out_mistakes(token_predictions: pd.DataFrame) -> pd.DataFrame: """Filter out obvious mistakes, like Foo bar -> date prediction""" filters_table: Dict[str, Callable[[str], Any]] = defaultdict( lambda: lambda x: x ) filters_table["document_date"] = parses_as_full_date filters_table["document_id"] = parses_as_invoice_number filters_table["amount_total"] = parses_as_amount groups = [] for prediction, group in token_predictions.groupby("pred"): groups.append( group[ __inner_filter_out_mistakes( group.word, filters_table[prediction] ) ] ) return pd.concat(groups) def _get_token_predictions( predictions: np.ndarray, raw_text: Sequence[str], file_names: pd.Series ) -> pd.DataFrame: """Take model predictions and flatten to prediction per token.""" assert predictions.shape[0] == len(raw_text) == len(file_names), ( f"Number of samples does not match; ({predictions.shape[0]}, " f"{len(raw_text)}, {len(file_names)})" ) assert predictions.ndim == 3 candidates = np.where(predictions > 0.5) tokens = [line.split() for line in raw_text] tmp = [] for sample_idx, token_idx, class_idx in zip(*candidates): # if prediction is not for padding text if len(tokens[sample_idx]) > token_idx: tmp.append( { "word": tokens[sample_idx][token_idx], "pred": class_idx, "confidence": predictions[sample_idx, token_idx, class_idx], "file_name": file_names.iloc[sample_idx], } ) return pd.DataFrame.from_records(tmp) def hungarian_prediction(token_predictions): predictions = defaultdict(dict) for file_name, df in token_predictions.groupby("file_name"): hungarian_table = pd.pivot_table( df, values=["cost"], index=["word"], columns=["pred"], aggfunc=np.min, fill_value=1, ) row_idxs, col_idxs = scipy.optimize.linear_sum_assignment( hungarian_table ) for row_idx, col_idx in zip(row_idxs, col_idxs): col_name = hungarian_table.columns[col_idx][1] predictions[file_name][col_name] = ( hungarian_table.iloc[row_idx].name, 1 - hungarian_table.iloc[row_idx, col_idx], ) predictions_df = pd.DataFrame(predictions).transpose() return predictions_df.reindex(columns=sorted(predictions_df.columns)) def get_predicted_classes( predictions: np.ndarray, data_handler: DataHandler ) -> pd.DataFrame: """Get one predicted label per one file""" token_predictions = _get_token_predictions( predictions, data_handler.data.raw_text, data_handler.data.file_name ) token_predictions["cost"] = 1 - token_predictions["confidence"] token_predictions["pred"] = data_handler.to_human_readable_classes( token_predictions.pred ) token_predictions.drop( token_predictions[token_predictions.pred == "unclassified"].index, inplace=True, ) token_predictions = _filter_out_mistakes(token_predictions) return hungarian_prediction(token_predictions)
[ "pandas.DataFrame", "pandas.pivot_table", "collections.defaultdict", "numpy.where", "numpy.array", "pandas.DataFrame.from_records", "pandas.concat" ]
[((723, 737), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (731, 737), True, 'import numpy as np\n'), ((939, 972), 'collections.defaultdict', 'defaultdict', (['(lambda : lambda x: x)'], {}), '(lambda : lambda x: x)\n', (950, 972), False, 'from collections import defaultdict\n'), ((1434, 1451), 'pandas.concat', 'pd.concat', (['groups'], {}), '(groups)\n', (1443, 1451), True, 'import pandas as pd\n'), ((1893, 1920), 'numpy.where', 'np.where', (['(predictions > 0.5)'], {}), '(predictions > 0.5)\n', (1901, 1920), True, 'import numpy as np\n'), ((2468, 2498), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['tmp'], {}), '(tmp)\n', (2493, 2498), True, 'import pandas as pd\n'), ((2564, 2581), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (2575, 2581), False, 'from collections import defaultdict\n'), ((2673, 2776), 'pandas.pivot_table', 'pd.pivot_table', (['df'], {'values': "['cost']", 'index': "['word']", 'columns': "['pred']", 'aggfunc': 'np.min', 'fill_value': '(1)'}), "(df, values=['cost'], index=['word'], columns=['pred'],\n aggfunc=np.min, fill_value=1)\n", (2687, 2776), True, 'import pandas as pd\n'), ((3273, 3298), 'pandas.DataFrame', 'pd.DataFrame', (['predictions'], {}), '(predictions)\n', (3285, 3298), True, 'import pandas as pd\n')]
#!/usr/bin/env python # # Copyright (c) 2013-2015 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # # vim: tabstop=4 shiftwidth=4 softtabstop=4 # All Rights Reserved. # from cgtsclient.common import utils from cgtsclient import exc from cgtsclient.v1 import ihost as ihost_utils def _print_port_show(port): fields = ['name', 'namedisplay', 'type', 'pciaddr', 'dev_id', 'numa_node', 'sriov_totalvfs', 'sriov_numvfs', 'sriov_vfs_pci_address', 'driver', 'pclass', 'pvendor', 'pdevice', 'capabilities', 'uuid', 'host_uuid', 'interface_uuid', 'dpdksupport', 'created_at', 'updated_at'] labels = ['name', 'namedisplay', 'type', 'pciaddr', 'dev_id', 'processor', 'sriov_totalvfs', 'sriov_numvfs', 'sriov_vfs_pci_address', 'driver', 'pclass', 'pvendor', 'pdevice', 'capabilities', 'uuid', 'host_uuid', 'interface_uuid', 'accelerated', 'created_at', 'updated_at'] data = [(f, getattr(port, f, '')) for f in fields] utils.print_tuple_list(data, labels) def _find_port(cc, ihost, portnameoruuid): ports = cc.port.list(ihost.uuid) for p in ports: if p.name == portnameoruuid or p.uuid == portnameoruuid: break else: raise exc.CommandError('Port not found: host %s port %s' % (ihost.id, portnameoruuid)) return p @utils.arg('hostnameorid', metavar='<hostname or id>', help="Name or ID of host") @utils.arg('pnameoruuid', metavar='<port name or uuid>', help="Name or UUID of port") def do_host_port_show(cc, args): """Show host port details.""" ihost = ihost_utils._find_ihost(cc, args.hostnameorid) port = _find_port(cc, ihost, args.pnameoruuid) _print_port_show(port) @utils.arg('hostnameorid', metavar='<hostname or id>', help="Name or ID of host") def do_host_port_list(cc, args): """List host ports.""" from cgtsclient.common import wrapping_formatters terminal_width = utils.get_terminal_size()[0] ihost = ihost_utils._find_ihost(cc, args.hostnameorid) ports = cc.port.list(ihost.uuid) field_labels = ['uuid', 'name', 'type', 'pci address', 'device', 'processor', 'accelerated', 'device type'] fields = ['uuid', 'name', 'type', 'pciaddr', 'dev_id', 'numa_node', 'dpdksupport', 'pdevice'] format_spec = wrapping_formatters.build_best_guess_formatters_using_average_widths(ports, fields, field_labels, no_wrap_fields=['pciaddr']) # best-guess formatter does not make a good guess for # proper width of pdevice until terminal is > 155 # We override that width here. pdevice_width = None if terminal_width <= 130: pdevice_width = .1 elif 131 >= terminal_width <= 150: pdevice_width = .13 elif 151 >= terminal_width <= 155: pdevice_width = .14 if pdevice_width and format_spec["pdevice"] > pdevice_width: format_spec["pdevice"] = pdevice_width formatters = wrapping_formatters.build_wrapping_formatters(ports, fields, field_labels, format_spec) utils.print_list(ports, fields, field_labels, formatters=formatters, sortby=1)
[ "cgtsclient.common.utils.print_tuple_list", "cgtsclient.v1.ihost._find_ihost", "cgtsclient.common.wrapping_formatters.build_best_guess_formatters_using_average_widths", "cgtsclient.common.utils.arg", "cgtsclient.common.wrapping_formatters.build_wrapping_formatters", "cgtsclient.common.utils.get_terminal_size", "cgtsclient.common.utils.print_list", "cgtsclient.exc.CommandError" ]
[((1511, 1596), 'cgtsclient.common.utils.arg', 'utils.arg', (['"""hostnameorid"""'], {'metavar': '"""<hostname or id>"""', 'help': '"""Name or ID of host"""'}), "('hostnameorid', metavar='<hostname or id>', help='Name or ID of host'\n )\n", (1520, 1596), False, 'from cgtsclient.common import utils\n'), ((1615, 1704), 'cgtsclient.common.utils.arg', 'utils.arg', (['"""pnameoruuid"""'], {'metavar': '"""<port name or uuid>"""', 'help': '"""Name or UUID of port"""'}), "('pnameoruuid', metavar='<port name or uuid>', help=\n 'Name or UUID of port')\n", (1624, 1704), False, 'from cgtsclient.common import utils\n'), ((1907, 1992), 'cgtsclient.common.utils.arg', 'utils.arg', (['"""hostnameorid"""'], {'metavar': '"""<hostname or id>"""', 'help': '"""Name or ID of host"""'}), "('hostnameorid', metavar='<hostname or id>', help='Name or ID of host'\n )\n", (1916, 1992), False, 'from cgtsclient.common import utils\n'), ((1168, 1204), 'cgtsclient.common.utils.print_tuple_list', 'utils.print_tuple_list', (['data', 'labels'], {}), '(data, labels)\n', (1190, 1204), False, 'from cgtsclient.common import utils\n'), ((1779, 1825), 'cgtsclient.v1.ihost._find_ihost', 'ihost_utils._find_ihost', (['cc', 'args.hostnameorid'], {}), '(cc, args.hostnameorid)\n', (1802, 1825), True, 'from cgtsclient.v1 import ihost as ihost_utils\n'), ((2189, 2235), 'cgtsclient.v1.ihost._find_ihost', 'ihost_utils._find_ihost', (['cc', 'args.hostnameorid'], {}), '(cc, args.hostnameorid)\n', (2212, 2235), True, 'from cgtsclient.v1 import ihost as ihost_utils\n'), ((2538, 2667), 'cgtsclient.common.wrapping_formatters.build_best_guess_formatters_using_average_widths', 'wrapping_formatters.build_best_guess_formatters_using_average_widths', (['ports', 'fields', 'field_labels'], {'no_wrap_fields': "['pciaddr']"}), "(ports,\n fields, field_labels, no_wrap_fields=['pciaddr'])\n", (2606, 2667), False, 'from cgtsclient.common import wrapping_formatters\n'), ((3245, 3336), 'cgtsclient.common.wrapping_formatters.build_wrapping_formatters', 'wrapping_formatters.build_wrapping_formatters', (['ports', 'fields', 'field_labels', 'format_spec'], {}), '(ports, fields, field_labels,\n format_spec)\n', (3290, 3336), False, 'from cgtsclient.common import wrapping_formatters\n'), ((3338, 3416), 'cgtsclient.common.utils.print_list', 'utils.print_list', (['ports', 'fields', 'field_labels'], {'formatters': 'formatters', 'sortby': '(1)'}), '(ports, fields, field_labels, formatters=formatters, sortby=1)\n', (3354, 3416), False, 'from cgtsclient.common import utils\n'), ((1414, 1499), 'cgtsclient.exc.CommandError', 'exc.CommandError', (["('Port not found: host %s port %s' % (ihost.id, portnameoruuid))"], {}), "('Port not found: host %s port %s' % (ihost.id, portnameoruuid)\n )\n", (1430, 1499), False, 'from cgtsclient import exc\n'), ((2147, 2172), 'cgtsclient.common.utils.get_terminal_size', 'utils.get_terminal_size', ([], {}), '()\n', (2170, 2172), False, 'from cgtsclient.common import utils\n')]
# Instalem os pacotes no terminal dessa forma: python -m pip install pymongo dnspython import os import pymongo from pokemon_dataset import dataset from dotenv import load_dotenv load_dotenv() mongoDBString = os.getenv('MONGO_DB_LINK') class Database: def __init__(self): self.clusterConnection = pymongo.MongoClient( mongoDBString, # CASO OCORRA O ERRO [SSL_INVALID_CERTIFICATE] tlsAllowInvalidCertificates=True ) self.db = self.clusterConnection['pokemon'] self.db.drop_collection('pokedex') self.collection = self.db['pokedex'] self.collection.insert_many(dataset) def executeQuery(self, filters, optionalFilters): response = self.collection.find({filters}, {optionalFilters}) pokemons = [] for pokemon in response: pokemons.append(pokemon) return pokemons def getAllPokemons(self): response = self.collection.find({}, {"name": 1, "_id": 0}) pokemons = [] for pokemon in response: pokemons.append(pokemon) return pokemons def getPokemonByName(self, name: str): response = self.collection.find({"name": name}, {"_id": 0, "name": 1, "next_evolution": 1, "prev_evolution": 1, "type": 1, "weaknesses": 1}) result = {} for pokemon in response: result = pokemon return result def getPokemonsByType(self, type: list): response = self.collection.find({"type": {"$all": type}}, { "_id": 0, "name": 1, "type": 1}) result = [] for pokemon in response: result.append(pokemon) return result def getPokemonEvolutionsByName(self, name: str): pokemon = self.getPokemonByName(name) evolutions = [pokemon['name']] hasNextEvolutions = ('next_evolution' in pokemon) hasPrevEvolutions = ('prev_evolution' in pokemon) if hasNextEvolutions: nextEvolutions = list(pokemon['next_evolution']) for evolution in nextEvolutions: evolution = self.getPokemonByName(evolution['name']) evolutions.append(evolution['name']) if hasPrevEvolutions: previousEvolutions = list(pokemon['prev_evolution']) for evolution in previousEvolutions: evolution = self.getPokemonByName(evolution['name']) evolutions.append(evolution['name']) return evolutions
[ "dotenv.load_dotenv", "pymongo.MongoClient", "os.getenv" ]
[((180, 193), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (191, 193), False, 'from dotenv import load_dotenv\n'), ((210, 236), 'os.getenv', 'os.getenv', (['"""MONGO_DB_LINK"""'], {}), "('MONGO_DB_LINK')\n", (219, 236), False, 'import os\n'), ((311, 379), 'pymongo.MongoClient', 'pymongo.MongoClient', (['mongoDBString'], {'tlsAllowInvalidCertificates': '(True)'}), '(mongoDBString, tlsAllowInvalidCertificates=True)\n', (330, 379), False, 'import pymongo\n')]
import re import time import requests from pyquery import PyQuery as pq from fetchers.BaseFetcher import BaseFetcher class KaiXinFetcher(BaseFetcher): """ http://www.kxdaili.com/dailiip.html 代码由 [Zealot666](https://github.com/Zealot666) 提供 """ def fetch(self): """ 执行一次爬取,返回一个数组,每个元素是(protocol, ip, port),portocol是协议名称,目前主要为http 返回示例:[('http', '127.0.0.1', 8080), ('http', '127.0.0.1', 1234)] """ urls = [] urls = urls + [f'http://www.kxdaili.com/dailiip/1/{page}.html' for page in range(1, 11)] urls = urls + [f'http://www.kxdaili.com/dailiip/2/{page}.html' for page in range(1, 11)] proxies = [] ip_regex = re.compile(r'^\d+\.\d+\.\d+\.\d+$') port_regex = re.compile(r'^\d+$') for url in urls: html = requests.get(url, timeout=10, verify=False).text doc = pq(html) for line in doc('tr').items(): tds = list(line('td').items()) if len(tds) >= 2: ip = tds[0].text().strip() port = tds[1].text().strip() if re.match(ip_regex, ip) is not None and re.match(port_regex, port) is not None: self.proxies.append(('http', ip, int(port))) if __name__ == '__main__': f = KaiXinFetcher() ps = f.fetch() print(ps)
[ "pyquery.PyQuery", "re.match", "requests.get", "re.compile" ]
[((707, 748), 're.compile', 're.compile', (['"""^\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+$"""'], {}), "('^\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\.\\\\d+$')\n", (717, 748), False, 'import re\n'), ((764, 784), 're.compile', 're.compile', (['"""^\\\\d+$"""'], {}), "('^\\\\d+$')\n", (774, 784), False, 'import re\n'), ((897, 905), 'pyquery.PyQuery', 'pq', (['html'], {}), '(html)\n', (899, 905), True, 'from pyquery import PyQuery as pq\n'), ((830, 873), 'requests.get', 'requests.get', (['url'], {'timeout': '(10)', 'verify': '(False)'}), '(url, timeout=10, verify=False)\n', (842, 873), False, 'import requests\n'), ((1149, 1171), 're.match', 're.match', (['ip_regex', 'ip'], {}), '(ip_regex, ip)\n', (1157, 1171), False, 'import re\n'), ((1188, 1214), 're.match', 're.match', (['port_regex', 'port'], {}), '(port_regex, port)\n', (1196, 1214), False, 'import re\n')]
# IMPORT LIBRARIES import random # For random number generation import matplotlib.pyplot as plt # For plotting, use shorthand from matplotlib.animation import FuncAnimation # For animation import tkinter # For GUI programming import csv # For reading in csv files import agentframework1 # Contains the Agent class import matplotlib # For mapping matplotlib.use('TkAgg') # For creating GUI # SET VARIABLES # For the number of sheep (agents) num_of_agents = 25 # For the number of times the model iterates num_of_iterations = 70 # For the proximity of sheep to share resources with others neighbourhood = 20 # Create empty environment list environment = [] # Create empty list for agents agents = [] # CREATE THE ENVIRONMENT - FIELD FOR SHEEP # Use CSV reader code to open and read in text file f = open('in.txt', newline = '') reader = csv.reader(f, quoting = csv.QUOTE_NONNUMERIC) # Shift environment into a 2.D. list for row in reader: # For every row rowlist = [] # Create an empty rowlist for value in row: rowlist.append(value) # Attach all values to rowlist environment.append(rowlist) # Attach rowlist to environment plt.savefig('Field') # Save the figure f.close() # Good practice to close file # CREATE THE AGENTS - SHEEP TO GRAZE FIELD # Give each agent access to info on others for i in range(num_of_agents): agents.append(agentframework1.Agent(environment, agents, neighbourhood)) # Check the agentframework file is connected using print function a = agentframework1.Agent(environment, agents, neighbourhood) # print(a._y, a._x) a.move() # Check the co-ordinates move # print(a._y, a._x) # SET UP THE MODEL fig = plt.figure(figsize=(14, 14)) # Set figure size using matplotlib def update(frame_number): # Define update function by frame_number global carry_on fig.clear() # Clear figure plt.imshow(environment) # Show field random.shuffle(agents) # Randomise order of actions # For each agent initialize behaviour from agentframework1 for i in range(num_of_agents): agents[i].move() agents[i].eat() agents[i].share_with_neighbours(neighbourhood) agents[i].sick() # Plot the sheep for i in range(num_of_agents): # For every agent plt.scatter(agents[i]._x,agents[i]._y, marker='*', color="white", s=100) # Plot using a white star # Edit how the figure looks plt.ylim(0,100) # Y axis length plt.xlim(0,100) # X axis length plt.ylabel("Grass field") # Set y axis label plt.xlabel("Grass field") # Set x axis label plt.title("An Agent Based Model depicting sheep grazing a field", fontsize='xx-large') # Choose title plt.legend(["Sheep"], loc = "upper center") # Choose legend and location # ANIMATE THE MODEL def run(): # Define run function # Use FuncAnimation to set the frames to number of iterations animation = matplotlib.animation.FuncAnimation(fig, update, frames= num_of_iterations, repeat = False) canvas.draw() # Draw the figure # SET UP GUI # Build the main window root = tkinter.Tk() root.wm_title("ABM") # Set window title to ABM canvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root) # canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1) # Build menu bar with option to run model menu_bar = tkinter.Menu(root) root.config(menu=menu_bar) model_menu = tkinter.Menu(menu_bar) menu_bar.add_cascade(label="Model", menu=model_menu) model_menu.add_command(label="Run model", command=run) tkinter.mainloop() # Sets the GUI waiting for events # Write out environment as file f2= open('environmentout.csv', 'a', newline='') # Open csv file writer= csv.writer(f2, delimiter=',') # Use csv writer for row in environment: writer.writerow(row) f2.close() # Close file # Work out total stored by all agents totalstore = 0 for agent in agents: totalstore = agent.store + totalstore print("Total consumption:", totalstore) # Print total store
[ "matplotlib.pyplot.title", "csv.reader", "random.shuffle", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.figure", "matplotlib.pyplot.imshow", "tkinter.Tk", "tkinter.Menu", "csv.writer", "tkinter.mainloop", "matplotlib.pyplot.ylim", "matplotlib.pyplot.legend", "matplotlib.use", "matplotlib.pyplot.ylabel", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "matplotlib.pyplot.xlim", "matplotlib.pyplot.scatter", "agentframework1.Agent", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.savefig" ]
[((356, 379), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (370, 379), False, 'import matplotlib\n'), ((1068, 1111), 'csv.reader', 'csv.reader', (['f'], {'quoting': 'csv.QUOTE_NONNUMERIC'}), '(f, quoting=csv.QUOTE_NONNUMERIC)\n', (1078, 1111), False, 'import csv\n'), ((1751, 1808), 'agentframework1.Agent', 'agentframework1.Agent', (['environment', 'agents', 'neighbourhood'], {}), '(environment, agents, neighbourhood)\n', (1772, 1808), False, 'import agentframework1\n'), ((1922, 1950), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 14)'}), '(figsize=(14, 14))\n', (1932, 1950), True, 'import matplotlib.pyplot as plt\n'), ((3525, 3537), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (3535, 3537), False, 'import tkinter\n'), ((3596, 3665), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', (['fig'], {'master': 'root'}), '(fig, master=root)\n', (3647, 3665), False, 'import matplotlib\n'), ((3795, 3813), 'tkinter.Menu', 'tkinter.Menu', (['root'], {}), '(root)\n', (3807, 3813), False, 'import tkinter\n'), ((3856, 3878), 'tkinter.Menu', 'tkinter.Menu', (['menu_bar'], {}), '(menu_bar)\n', (3868, 3878), False, 'import tkinter\n'), ((3992, 4010), 'tkinter.mainloop', 'tkinter.mainloop', ([], {}), '()\n', (4008, 4010), False, 'import tkinter\n'), ((4158, 4187), 'csv.writer', 'csv.writer', (['f2'], {'delimiter': '""","""'}), "(f2, delimiter=',')\n", (4168, 4187), False, 'import csv\n'), ((1391, 1411), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Field"""'], {}), "('Field')\n", (1402, 1411), True, 'import matplotlib.pyplot as plt\n'), ((2126, 2149), 'matplotlib.pyplot.imshow', 'plt.imshow', (['environment'], {}), '(environment)\n', (2136, 2149), True, 'import matplotlib.pyplot as plt\n'), ((2175, 2197), 'random.shuffle', 'random.shuffle', (['agents'], {}), '(agents)\n', (2189, 2197), False, 'import random\n'), ((2780, 2796), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(100)'], {}), '(0, 100)\n', (2788, 2796), True, 'import matplotlib.pyplot as plt\n'), ((2817, 2833), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(100)'], {}), '(0, 100)\n', (2825, 2833), True, 'import matplotlib.pyplot as plt\n'), ((2854, 2879), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Grass field"""'], {}), "('Grass field')\n", (2864, 2879), True, 'import matplotlib.pyplot as plt\n'), ((2904, 2929), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Grass field"""'], {}), "('Grass field')\n", (2914, 2929), True, 'import matplotlib.pyplot as plt\n'), ((2954, 3045), 'matplotlib.pyplot.title', 'plt.title', (['"""An Agent Based Model depicting sheep grazing a field"""'], {'fontsize': '"""xx-large"""'}), "('An Agent Based Model depicting sheep grazing a field', fontsize=\n 'xx-large')\n", (2963, 3045), True, 'import matplotlib.pyplot as plt\n'), ((3061, 3102), 'matplotlib.pyplot.legend', 'plt.legend', (["['Sheep']"], {'loc': '"""upper center"""'}), "(['Sheep'], loc='upper center')\n", (3071, 3102), True, 'import matplotlib.pyplot as plt\n'), ((3281, 3372), 'matplotlib.animation.FuncAnimation', 'matplotlib.animation.FuncAnimation', (['fig', 'update'], {'frames': 'num_of_iterations', 'repeat': '(False)'}), '(fig, update, frames=num_of_iterations,\n repeat=False)\n', (3315, 3372), False, 'import matplotlib\n'), ((1618, 1675), 'agentframework1.Agent', 'agentframework1.Agent', (['environment', 'agents', 'neighbourhood'], {}), '(environment, agents, neighbourhood)\n', (1639, 1675), False, 'import agentframework1\n'), ((2599, 2672), 'matplotlib.pyplot.scatter', 'plt.scatter', (['agents[i]._x', 'agents[i]._y'], {'marker': '"""*"""', 'color': '"""white"""', 's': '(100)'}), "(agents[i]._x, agents[i]._y, marker='*', color='white', s=100)\n", (2610, 2672), True, 'import matplotlib.pyplot as plt\n')]
#!/usr/bin/python # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os # import sys import torch from dlrm.data.datasets import SyntheticDataset from dlrm.model.single import Dlrm from dlrm.utils.checkpointing.serial import make_serial_checkpoint_loader from triton import deployer_lib sys.path.append('../') def get_model_args(model_args): parser = argparse.ArgumentParser() parser.add_argument("--batch_size", default=1, type=int) parser.add_argument("--fp16", action="store_true", default=False) parser.add_argument("--dump_perf_data", type=str, default=None) parser.add_argument("--model_checkpoint", type=str, default=None) parser.add_argument("--num_numerical_features", type=int, default=13) parser.add_argument("--embedding_dim", type=int, default=128) parser.add_argument("--embedding_type", type=str, default="joint", choices=["joint", "multi_table"]) parser.add_argument("--top_mlp_sizes", type=int, nargs="+", default=[1024, 1024, 512, 256, 1]) parser.add_argument("--bottom_mlp_sizes", type=int, nargs="+", default=[512, 256, 128]) parser.add_argument("--interaction_op", type=str, default="dot", choices=["dot", "cat"]) parser.add_argument("--cpu", default=False, action="store_true") parser.add_argument("--dataset", type=str, required=True) return parser.parse_args(model_args) def initialize_model(args, categorical_sizes): ''' return model, ready to trace ''' base_device = "cuda:0" if not args.cpu else "cpu" model_config = { 'top_mlp_sizes': args.top_mlp_sizes, 'bottom_mlp_sizes': args.bottom_mlp_sizes, 'embedding_dim': args.embedding_dim, 'interaction_op': args.interaction_op, 'categorical_feature_sizes': categorical_sizes, 'num_numerical_features': args.num_numerical_features, 'embedding_type': args.embedding_type, 'hash_indices': False, 'use_cpp_mlp': False, 'fp16': args.fp16, 'base_device': base_device, } model = Dlrm.from_dict(model_config) model.to(base_device) if args.model_checkpoint: checkpoint_loader = make_serial_checkpoint_loader(range(len(categorical_sizes)), device="cpu") checkpoint_loader.load_checkpoint(model, args.model_checkpoint) model.to(base_device) if args.fp16: model = model.half() return model def get_dataloader(args, categorical_sizes): dataset_test = SyntheticDataset(num_entries=2000, batch_size=args.batch_size, numerical_features=args.num_numerical_features, categorical_feature_sizes=categorical_sizes, device="cpu" if args.cpu else "cuda:0") class RemoveOutput: def __init__(self, dataset): self.dataset = dataset def __getitem__(self, idx): value = self.dataset[idx] if args.fp16: value = (value[0].half(), value[1].long(), value[2]) else: value = (value[0], value[1].long(), value[2]) return value[:-1] def __len__(self): return len(self.dataset) test_loader = torch.utils.data.DataLoader(RemoveOutput(dataset_test), batch_size=None, num_workers=0, pin_memory=False) return test_loader if __name__=='__main__': # deploys and returns removed deployer arguments deployer, model_args = deployer_lib.create_deployer(sys.argv[1:], get_model_args) with open(os.path.join(model_args.dataset, "model_size.json")) as f: categorical_sizes = list(json.load(f).values()) categorical_sizes = [s + 1 for s in categorical_sizes] model = initialize_model(model_args, categorical_sizes) dataloader = get_dataloader(model_args, categorical_sizes) if model_args.dump_perf_data: input_0, input_1 = next(iter(dataloader)) if model_args.fp16: input_0 = input_0.half() os.makedirs(model_args.dump_perf_data, exist_ok=True) input_0.detach().cpu().numpy()[0].tofile(os.path.join(model_args.dump_perf_data, "input__0")) input_1.detach().cpu().numpy()[0].tofile(os.path.join(model_args.dump_perf_data, "input__1")) deployer.deploy(dataloader, model)
[ "sys.path.append", "json.load", "argparse.ArgumentParser", "dlrm.data.datasets.SyntheticDataset", "os.makedirs", "dlrm.model.single.Dlrm.from_dict", "triton.deployer_lib.create_deployer", "os.path.join" ]
[((885, 907), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (900, 907), False, 'import sys\n'), ((955, 980), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (978, 980), False, 'import argparse\n'), ((2687, 2715), 'dlrm.model.single.Dlrm.from_dict', 'Dlrm.from_dict', (['model_config'], {}), '(model_config)\n', (2701, 2715), False, 'from dlrm.model.single import Dlrm\n'), ((3110, 3317), 'dlrm.data.datasets.SyntheticDataset', 'SyntheticDataset', ([], {'num_entries': '(2000)', 'batch_size': 'args.batch_size', 'numerical_features': 'args.num_numerical_features', 'categorical_feature_sizes': 'categorical_sizes', 'device': "('cpu' if args.cpu else 'cuda:0')"}), "(num_entries=2000, batch_size=args.batch_size,\n numerical_features=args.num_numerical_features,\n categorical_feature_sizes=categorical_sizes, device='cpu' if args.cpu else\n 'cuda:0')\n", (3126, 3317), False, 'from dlrm.data.datasets import SyntheticDataset\n'), ((4285, 4343), 'triton.deployer_lib.create_deployer', 'deployer_lib.create_deployer', (['sys.argv[1:]', 'get_model_args'], {}), '(sys.argv[1:], get_model_args)\n', (4313, 4343), False, 'from triton import deployer_lib\n'), ((4876, 4929), 'os.makedirs', 'os.makedirs', (['model_args.dump_perf_data'], {'exist_ok': '(True)'}), '(model_args.dump_perf_data, exist_ok=True)\n', (4887, 4929), False, 'import os\n'), ((4415, 4466), 'os.path.join', 'os.path.join', (['model_args.dataset', '"""model_size.json"""'], {}), "(model_args.dataset, 'model_size.json')\n", (4427, 4466), False, 'import os\n'), ((4979, 5030), 'os.path.join', 'os.path.join', (['model_args.dump_perf_data', '"""input__0"""'], {}), "(model_args.dump_perf_data, 'input__0')\n", (4991, 5030), False, 'import os\n'), ((5081, 5132), 'os.path.join', 'os.path.join', (['model_args.dump_perf_data', '"""input__1"""'], {}), "(model_args.dump_perf_data, 'input__1')\n", (5093, 5132), False, 'import os\n'), ((4507, 4519), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4516, 4519), False, 'import json\n')]
import os from setuptools import setup, find_packages # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) def getReqs(reqfile='requirements.txt'): reqs = [] with open(os.path.join(os.path.dirname(__file__), reqfile)) as fp: lines = fp.readlines() for line in lines: line = line.strip() if len(line) and line[0] != "#": reqs.append(line) return reqs REQUIREMENTS = getReqs() setup( name='do_cli', version='0.0.2', packages=find_packages(), include_package_data=True, description='manage digitalocean stuff', long_description='manage digitalocean stuff', url='https://github.com/meganlkm/do-cli', author='Dev<EMAIL>.IO', author_email='<EMAIL>', install_requires=REQUIREMENTS, entry_points=""" [console_scripts] do-cli=do_cli.cli:cli """ )
[ "os.path.abspath", "os.path.dirname", "setuptools.find_packages" ]
[((577, 592), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (590, 592), False, 'from setuptools import setup, find_packages\n'), ((135, 160), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (150, 160), False, 'import os\n'), ((259, 284), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (274, 284), False, 'import os\n')]
import sqlite3 import asyncio import discord import sys import datetime import os from utils import models from utils.models import db from configparser import ConfigParser SQLITE_FILE = './data/kurisu.sqlite' IS_DOCKER = os.environ.get('IS_DOCKER', 0) if IS_DOCKER: db_user_file = os.environ.get('DB_USER') db_password_file = os.environ.get('DB_PASSWORD') if db_user_file and db_password_file: with open(db_user_file, 'r', encoding='utf-8') as f: db_user = f.readline().strip() with open(db_password_file, 'r', encoding='utf-8') as f: db_password = f.readline().strip() DATABASE_URL = f"postgresql://{db_user}:{db_password}@db/{db_user}" else: sys.exit('Database user and database password files paths need to be provided') else: config = ConfigParser() config.read("data/config.ini") DATABASE_URL = config['Main']['database_url'] def has_seconds(str_timestamp): return "." in str_timestamp.split(":")[2] async def main(): await db.set_bind(DATABASE_URL) await db.gino.drop_all() await db.gino.create_all() conn = sqlite3.connect(SQLITE_FILE) c = conn.cursor() users = [] # Friend Codes c.execute('SELECT * FROM friend_codes') data = c.fetchall() fc_entries = [] if data: for entry in data: fc_entries.append(dict(id=entry[0], fc_3ds=entry[1])) users.append(entry[0]) # permanent roles c.execute('SELECT * FROM permanent_roles') data = c.fetchall() perm_entries = [] roles = [] if data: for entry in data: if entry[1] not in roles: roles.append(entry[1]) if entry[0] not in users: users.append(entry[0]) # There is duplicate entries for some reason if entry not in perm_entries: perm_entries.append(entry) await models.Role.insert().gino.all([dict(id=id) for id in roles]) # softban c.execute('SELECT * FROM softbans') data = c.fetchall() softban_entries = [] if data: for entry in data: softban_entries.append( dict(id=discord.utils.time_snowflake( datetime.datetime.strptime(entry[3], '%Y-%m-%d %H:%M:%S.%f' if has_seconds( entry[3]) else '%Y-%m-%d %H:%M:%S')), user=entry[0], issuer=entry[1], reason=entry[2])) if entry[0] not in users: users.append(entry[0]) if entry[1] not in users: users.append(entry[1]) # Timed Restricions c.execute('SELECT * FROM timed_restrictions') data = c.fetchall() timeres_entries = [] if data: for i, entry in enumerate(data): timeres_entries.append( dict(id=discord.utils.time_snowflake(datetime.datetime.now() + datetime.timedelta(0, i)), user=entry[0], type=entry[2], end_date=datetime.datetime.strptime(entry[1], '%Y-%m-%d %H:%M:%S.%f' if has_seconds( entry[1]) else '%Y-%m-%d %H:%M:%S'), alerted=bool(entry[3]))) if entry[0] not in users: users.append(entry[0]) # Warns c.execute('SELECT * FROM warns') data = c.fetchall() warn_entries = [] if data: for entry in data: warn_entries.append( dict(id=entry[0], user=entry[1], issuer=entry[2], reason=entry[3])) if entry[1] not in users: users.append(entry[1]) if entry[2] not in users: users.append(entry[2]) # Staff c.execute('SELECT * FROM staff') data = c.fetchall() staff_entries = [] staff = [] if data: for entry in data: staff_entries.append(dict(id=entry[0], position=entry[1], console=None)) staff.append(entry[0]) if entry[0] not in users: users.append(entry[0]) # Helpers c.execute('SELECT * FROM helpers') data = c.fetchall() helper_entries = [] if data: for entry in data: found = False for a in staff_entries: if entry[0] in a.values(): a['console'] = entry[1] found = True break if not found: helper_entries.append(dict(id=entry[0], position='Helper', console=entry[1])) if entry[0] not in users: users.append(entry[0]) # channels c.execute('SELECT * FROM nofilter') data = c.fetchall() if data: channel_entries = [dict(id=entry[0], nofilter=True) for entry in data] await models.Channel.insert().gino.all(channel_entries) # wordfilter c.execute('SELECT * FROM wordfilter') data = c.fetchall() if data: word_entries = [dict(word=entry[0], kind=entry[1]) for entry in data] await models.FilteredWord.insert().gino.all(word_entries) # invitefilter c.execute('SELECT * FROM invitefilter') data = c.fetchall() if data: invite_entries = [dict(code=entry[0], uses=entry[3], alias=entry[2]) for entry in data] await models.ApprovedInvite.insert().gino.all(invite_entries) # flags c.execute('SELECT * FROM flags') data = c.fetchall() if data: flag_entries = [dict(name=entry[0], value=bool(entry[1])) for entry in data] await models.Flag.insert().gino.all(flag_entries) await models.Member.insert().gino.all([dict(id=id) for id in users]) await models.FriendCode.insert().gino.all(fc_entries) await models.Staff.insert().gino.all(staff_entries) await models.TimedRestriction.insert().gino.all(timeres_entries) await models.Warn.insert().gino.all(warn_entries) await models.PermanentRole.insert().gino.all([dict(user_id=user, role_id=role) for user, role in perm_entries]) await models.Softban.insert().gino.all(softban_entries) await models.Staff.insert().gino.all(helper_entries) # Watchlist c.execute('SELECT * FROM watchlist') data = c.fetchall() mb_entries = [] if data: async with db.transaction(): for entry in data: if entry[0] in users: await db.status(f"UPDATE members SET watched='true' where id={entry[0]}") else: mb_entries.append(dict(id=entry[0], watched=True)) await models.Member.insert().gino.all(mb_entries) asyncio.get_event_loop().run_until_complete(main())
[ "utils.models.Flag.insert", "utils.models.Warn.insert", "utils.models.FriendCode.insert", "utils.models.TimedRestriction.insert", "datetime.timedelta", "utils.models.db.set_bind", "utils.models.db.gino.create_all", "configparser.ConfigParser", "utils.models.FilteredWord.insert", "datetime.datetime.now", "asyncio.get_event_loop", "utils.models.db.transaction", "utils.models.ApprovedInvite.insert", "utils.models.db.gino.drop_all", "sqlite3.connect", "sys.exit", "utils.models.Member.insert", "utils.models.Channel.insert", "os.environ.get", "utils.models.Staff.insert", "utils.models.PermanentRole.insert", "utils.models.db.status", "utils.models.Softban.insert", "utils.models.Role.insert" ]
[((224, 254), 'os.environ.get', 'os.environ.get', (['"""IS_DOCKER"""', '(0)'], {}), "('IS_DOCKER', 0)\n", (238, 254), False, 'import os\n'), ((289, 314), 'os.environ.get', 'os.environ.get', (['"""DB_USER"""'], {}), "('DB_USER')\n", (303, 314), False, 'import os\n'), ((338, 367), 'os.environ.get', 'os.environ.get', (['"""DB_PASSWORD"""'], {}), "('DB_PASSWORD')\n", (352, 367), False, 'import os\n'), ((819, 833), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (831, 833), False, 'from configparser import ConfigParser\n'), ((1126, 1154), 'sqlite3.connect', 'sqlite3.connect', (['SQLITE_FILE'], {}), '(SQLITE_FILE)\n', (1141, 1154), False, 'import sqlite3\n'), ((720, 799), 'sys.exit', 'sys.exit', (['"""Database user and database password files paths need to be provided"""'], {}), "('Database user and database password files paths need to be provided')\n", (728, 799), False, 'import sys\n'), ((1029, 1054), 'utils.models.db.set_bind', 'db.set_bind', (['DATABASE_URL'], {}), '(DATABASE_URL)\n', (1040, 1054), False, 'from utils.models import db\n'), ((1065, 1083), 'utils.models.db.gino.drop_all', 'db.gino.drop_all', ([], {}), '()\n', (1081, 1083), False, 'from utils.models import db\n'), ((1094, 1114), 'utils.models.db.gino.create_all', 'db.gino.create_all', ([], {}), '()\n', (1112, 1114), False, 'from utils.models import db\n'), ((6539, 6563), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (6561, 6563), False, 'import asyncio\n'), ((6205, 6221), 'utils.models.db.transaction', 'db.transaction', ([], {}), '()\n', (6219, 6221), False, 'from utils.models import db\n'), ((5538, 5560), 'utils.models.Member.insert', 'models.Member.insert', ([], {}), '()\n', (5558, 5560), False, 'from utils import models\n'), ((5611, 5637), 'utils.models.FriendCode.insert', 'models.FriendCode.insert', ([], {}), '()\n', (5635, 5637), False, 'from utils import models\n'), ((5669, 5690), 'utils.models.Staff.insert', 'models.Staff.insert', ([], {}), '()\n', (5688, 5690), False, 'from utils import models\n'), ((5725, 5757), 'utils.models.TimedRestriction.insert', 'models.TimedRestriction.insert', ([], {}), '()\n', (5755, 5757), False, 'from utils import models\n'), ((5794, 5814), 'utils.models.Warn.insert', 'models.Warn.insert', ([], {}), '()\n', (5812, 5814), False, 'from utils import models\n'), ((5848, 5877), 'utils.models.PermanentRole.insert', 'models.PermanentRole.insert', ([], {}), '()\n', (5875, 5877), False, 'from utils import models\n'), ((5964, 5987), 'utils.models.Softban.insert', 'models.Softban.insert', ([], {}), '()\n', (5985, 5987), False, 'from utils import models\n'), ((6024, 6045), 'utils.models.Staff.insert', 'models.Staff.insert', ([], {}), '()\n', (6043, 6045), False, 'from utils import models\n'), ((1923, 1943), 'utils.models.Role.insert', 'models.Role.insert', ([], {}), '()\n', (1941, 1943), False, 'from utils import models\n'), ((4739, 4762), 'utils.models.Channel.insert', 'models.Channel.insert', ([], {}), '()\n', (4760, 4762), False, 'from utils import models\n'), ((4978, 5006), 'utils.models.FilteredWord.insert', 'models.FilteredWord.insert', ([], {}), '()\n', (5004, 5006), False, 'from utils import models\n'), ((5241, 5271), 'utils.models.ApprovedInvite.insert', 'models.ApprovedInvite.insert', ([], {}), '()\n', (5269, 5271), False, 'from utils import models\n'), ((5483, 5503), 'utils.models.Flag.insert', 'models.Flag.insert', ([], {}), '()\n', (5501, 5503), False, 'from utils import models\n'), ((6318, 6385), 'utils.models.db.status', 'db.status', (['f"""UPDATE members SET watched=\'true\' where id={entry[0]}"""'], {}), '(f"UPDATE members SET watched=\'true\' where id={entry[0]}")\n', (6327, 6385), False, 'from utils.models import db\n'), ((6493, 6515), 'utils.models.Member.insert', 'models.Member.insert', ([], {}), '()\n', (6513, 6515), False, 'from utils import models\n'), ((2889, 2912), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2910, 2912), False, 'import datetime\n'), ((2915, 2939), 'datetime.timedelta', 'datetime.timedelta', (['(0)', 'i'], {}), '(0, i)\n', (2933, 2939), False, 'import datetime\n')]
#!/usr/bin/env python3 import fire class String(): """ performs operations on string """ """ strip removes leading and tailing spaces """ def strip(self, s: str): return s.strip() """ startswith return true, if the string starts with given prefix """ def startswith(self, s: str, prefix: str): return s.startswith(prefix) if __name__ == "__main__": fire.Fire(String)
[ "fire.Fire" ]
[((421, 438), 'fire.Fire', 'fire.Fire', (['String'], {}), '(String)\n', (430, 438), False, 'import fire\n')]
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from akg.utils import kernel_exec as utils import numpy as np from akg.topi.util import get_const_tuple from tests.common.test_op import prelu from tests.common.tensorio import compare_tensor from tests.common.gen_random import random_gaussian def prelu_run(shape, w_shape, dtype, rtol, attrs): if 'tuning' in attrs.keys(): t = attrs.get("tuning", False) kernel_name = attrs.get("kernel_name", False) mod = utils.op_build_test(prelu.prelu, [shape, w_shape], [dtype, dtype], kernel_name=kernel_name, attrs=attrs, tuning=t) if t: expect, input_data, w_data = gen_data(dtype, shape, w_shape) return mod, expect, (input_data, w_data, expect) else: return mod else: mod = utils.op_build_test(prelu.prelu, [shape, w_shape], [dtype, dtype], kernel_name='prelu', attrs=attrs) expect, input_data, w_data = gen_data(dtype, shape, w_shape) output = utils.mod_launch(mod, (input_data, w_data, expect), expect=expect) # #ctx.sync() # reshape_output = output_b.reshape(output_b.size) # reshape_output_np = output_np.reshape(output_np.size) # errorcount = 0 # for i in range(reshape_output.size): # limitError = abs(reshape_output[i] * rtol) # if abs(reshape_output[i] - reshape_output_np[i]) > limitError: # errorcount += 1 return (input_data, w_data), output, expect, compare_tensor(output, expect, rtol=rtol) def gen_data(dtype, shape, w_shape): # input_data = random_gaussian(shape, miu=1, sigma=50.0).astype(dtype.lower()) input_data = np.random.uniform(low=-1.0, high=1.0, size=get_const_tuple(shape)).astype(dtype) w_data = random_gaussian(w_shape, miu=1, sigma=2.0).astype(dtype.lower()) # expect = input_data * (input_data > 0) + input_data * (input_data < 0) * w_data[0] if w_shape[0] == 1: # pass expect = input_data * (input_data > 0) + input_data * (input_data < 0) * w_data[0] else: w_reshape = w_data.reshape(1, w_shape[0], 1, 1) w_broadcast = np.broadcast_to(w_reshape, shape) expect = input_data * (input_data > 0) + input_data * (input_data < 0) * w_broadcast # pass # for j in range(shape[1]): # for i in range(shape[0]): # for k in range(shape[2]): # for l in range(shape[3]): # expect[i, j, k, l] = input_data[i, j, k, l] * (input_data[i, j, k, l] > 0) + input_data[i, j, k, l] * (input_data[i, j, k, l] < 0) * w_data[j] return expect, input_data, w_data
[ "tests.common.tensorio.compare_tensor", "tests.common.gen_random.random_gaussian", "akg.utils.kernel_exec.op_build_test", "numpy.broadcast_to", "akg.topi.util.get_const_tuple", "akg.utils.kernel_exec.mod_launch" ]
[((1025, 1143), 'akg.utils.kernel_exec.op_build_test', 'utils.op_build_test', (['prelu.prelu', '[shape, w_shape]', '[dtype, dtype]'], {'kernel_name': 'kernel_name', 'attrs': 'attrs', 'tuning': 't'}), '(prelu.prelu, [shape, w_shape], [dtype, dtype],\n kernel_name=kernel_name, attrs=attrs, tuning=t)\n', (1044, 1143), True, 'from akg.utils import kernel_exec as utils\n'), ((1349, 1453), 'akg.utils.kernel_exec.op_build_test', 'utils.op_build_test', (['prelu.prelu', '[shape, w_shape]', '[dtype, dtype]'], {'kernel_name': '"""prelu"""', 'attrs': 'attrs'}), "(prelu.prelu, [shape, w_shape], [dtype, dtype],\n kernel_name='prelu', attrs=attrs)\n", (1368, 1453), True, 'from akg.utils import kernel_exec as utils\n'), ((1536, 1602), 'akg.utils.kernel_exec.mod_launch', 'utils.mod_launch', (['mod', '(input_data, w_data, expect)'], {'expect': 'expect'}), '(mod, (input_data, w_data, expect), expect=expect)\n', (1552, 1602), True, 'from akg.utils import kernel_exec as utils\n'), ((2689, 2722), 'numpy.broadcast_to', 'np.broadcast_to', (['w_reshape', 'shape'], {}), '(w_reshape, shape)\n', (2704, 2722), True, 'import numpy as np\n'), ((2042, 2083), 'tests.common.tensorio.compare_tensor', 'compare_tensor', (['output', 'expect'], {'rtol': 'rtol'}), '(output, expect, rtol=rtol)\n', (2056, 2083), False, 'from tests.common.tensorio import compare_tensor\n'), ((2317, 2359), 'tests.common.gen_random.random_gaussian', 'random_gaussian', (['w_shape'], {'miu': '(1)', 'sigma': '(2.0)'}), '(w_shape, miu=1, sigma=2.0)\n', (2332, 2359), False, 'from tests.common.gen_random import random_gaussian\n'), ((2266, 2288), 'akg.topi.util.get_const_tuple', 'get_const_tuple', (['shape'], {}), '(shape)\n', (2281, 2288), False, 'from akg.topi.util import get_const_tuple\n')]
#!/usr/bin/env python # # Copyright (c) 2015 Intel Corporation. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=F0401 import optparse import os import shutil import sys import subprocess GYP_ANDROID_DIR = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'build', 'android', 'gyp') sys.path.append(GYP_ANDROID_DIR) from util import build_utils def DoCompress(dest_path, sources): build_utils.DeleteDirectory(dest_path) build_utils.MakeDirectory(dest_path) for source in sources: shutil.copy(source, dest_path) file_to_compress = os.path.join(dest_path, os.path.basename(source)) subprocess.check_call(['lzma', '-f', file_to_compress]) def DoShowOutputNames(dest_path, sources): for source in sources: print('%s.lzma' % os.path.join(dest_path, os.path.basename(source))) def main(): parser = optparse.OptionParser() parser.add_option('--dest-path', help='Destination directory for compressed files') parser.add_option('--mode', choices=('compress', 'show-output-names'), help='Whether to compress the files or show their ' 'compressed names') parser.add_option('--sources', help='The list of files to be compressed') options, _ = parser.parse_args(sys.argv) sources = build_utils.ParseGypList(options.sources) if options.mode == 'compress': return DoCompress(options.dest_path, sources) else: return DoShowOutputNames(options.dest_path, sources) if __name__ == '__main__': sys.exit(main())
[ "sys.path.append", "subprocess.check_call", "optparse.OptionParser", "os.path.basename", "os.path.dirname", "util.build_utils.DeleteDirectory", "util.build_utils.ParseGypList", "util.build_utils.MakeDirectory", "shutil.copy" ]
[((524, 556), 'sys.path.append', 'sys.path.append', (['GYP_ANDROID_DIR'], {}), '(GYP_ANDROID_DIR)\n', (539, 556), False, 'import sys\n'), ((313, 338), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (328, 338), False, 'import os\n'), ((627, 665), 'util.build_utils.DeleteDirectory', 'build_utils.DeleteDirectory', (['dest_path'], {}), '(dest_path)\n', (654, 665), False, 'from util import build_utils\n'), ((668, 704), 'util.build_utils.MakeDirectory', 'build_utils.MakeDirectory', (['dest_path'], {}), '(dest_path)\n', (693, 704), False, 'from util import build_utils\n'), ((1067, 1090), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (1088, 1090), False, 'import optparse\n'), ((1514, 1555), 'util.build_utils.ParseGypList', 'build_utils.ParseGypList', (['options.sources'], {}), '(options.sources)\n', (1538, 1555), False, 'from util import build_utils\n'), ((735, 765), 'shutil.copy', 'shutil.copy', (['source', 'dest_path'], {}), '(source, dest_path)\n', (746, 765), False, 'import shutil\n'), ((843, 898), 'subprocess.check_call', 'subprocess.check_call', (["['lzma', '-f', file_to_compress]"], {}), "(['lzma', '-f', file_to_compress])\n", (864, 898), False, 'import subprocess\n'), ((813, 837), 'os.path.basename', 'os.path.basename', (['source'], {}), '(source)\n', (829, 837), False, 'import os\n'), ((1015, 1039), 'os.path.basename', 'os.path.basename', (['source'], {}), '(source)\n', (1031, 1039), False, 'import os\n')]
#!/usr/bin/python3.8 # node to transform positive number to negative from basics.srv import convert_positive_to_negative, convert_positive_to_negativeRequest, convert_positive_to_negativeResponse import rospy # Service callback function. def process_service_request(req): # Instantiate the response message object. res = convert_positive_to_negativeResponse() # Perform sanity check. Allow only positive real numbers. # Compose the response message accordingly. if(req.positive_number <= 0): res.success = False res.negative_number = 0 else: res.negative_number = -req.positive_number res.success = True #Return the response message. return res def positive_to_negative_server(): # ROS node for the service server. rospy.init_node('positive_to_negative_server', anonymous = False) # Create a ROS service type. service = rospy.Service('positive_to_negative', convert_positive_to_negative, process_service_request) # Log message about service availability. rospy.loginfo('Convert positive to negative service is now available.') rospy.spin() if __name__ == "__main__": positive_to_negative_server()
[ "basics.srv.convert_positive_to_negativeResponse", "rospy.loginfo", "rospy.init_node", "rospy.spin", "rospy.Service" ]
[((332, 370), 'basics.srv.convert_positive_to_negativeResponse', 'convert_positive_to_negativeResponse', ([], {}), '()\n', (368, 370), False, 'from basics.srv import convert_positive_to_negative, convert_positive_to_negativeRequest, convert_positive_to_negativeResponse\n'), ((793, 856), 'rospy.init_node', 'rospy.init_node', (['"""positive_to_negative_server"""'], {'anonymous': '(False)'}), "('positive_to_negative_server', anonymous=False)\n", (808, 856), False, 'import rospy\n'), ((907, 1003), 'rospy.Service', 'rospy.Service', (['"""positive_to_negative"""', 'convert_positive_to_negative', 'process_service_request'], {}), "('positive_to_negative', convert_positive_to_negative,\n process_service_request)\n", (920, 1003), False, 'import rospy\n'), ((1051, 1122), 'rospy.loginfo', 'rospy.loginfo', (['"""Convert positive to negative service is now available."""'], {}), "('Convert positive to negative service is now available.')\n", (1064, 1122), False, 'import rospy\n'), ((1127, 1139), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1137, 1139), False, 'import rospy\n')]
from bisect import bisect_right from itertools import combinations import sys input = sys.stdin.readline N, C = map(int, input().split()) W = list(map(int, input().split())) def possible_weight(arr): ret = [] for i in range(len(arr)+1): for c in combinations(arr, i): ret.append(sum(c)) return ret def target_weight(left, right, v): right = list(sorted(right)) ret = 0 for l in left: bi = bisect_right(right, v-l) ret += bi print(ret) m = N // 2 left = possible_weight(W[:m]) right = possible_weight(W[m:]) target_weight(left, right, C)
[ "bisect.bisect_right", "itertools.combinations" ]
[((264, 284), 'itertools.combinations', 'combinations', (['arr', 'i'], {}), '(arr, i)\n', (276, 284), False, 'from itertools import combinations\n'), ((444, 470), 'bisect.bisect_right', 'bisect_right', (['right', '(v - l)'], {}), '(right, v - l)\n', (456, 470), False, 'from bisect import bisect_right\n')]
#!/usr/bin/env python from zipline import TradingAlgorithm from zipline.transforms import MovingAverage from zipline.utils.factory import load_from_yahoo from datetime import datetime import pytz import matplotlib.pyplot as plt class DualMovingAverage(TradingAlgorithm): """Dual Moving Average Crossover algorithm. This algorithm buys apple once its short moving average crosses its long moving average (indicating upwards momentum) and sells its shares once the averages cross again (indicating downwards momentum). """ def initialize(self, short_window=100, long_window=400): # Add 2 mavg transforms, one with a long window, one # with a short window. self.add_transform(MovingAverage, 'short_mavg', ['price'], window_length=short_window) self.add_transform(MovingAverage, 'long_mavg', ['price'], window_length=long_window) # To keep track of whether we invested in the stock or not self.invested = False def handle_data(self, data): short_mavg = data['AAPL'].short_mavg['price'] long_mavg = data['AAPL'].long_mavg['price'] buy = False sell = False # Has short mavg crossed long mavg? if short_mavg > long_mavg and not self.invested: self.order('AAPL', 100) self.invested = True buy = True elif short_mavg < long_mavg and self.invested: self.order('AAPL', -100) self.invested = False sell = True # Record state variables. A column for each # variable will be added to the performance # DataFrame returned by .run() self.record(short_mavg=short_mavg, long_mavg=long_mavg, buy=buy, sell=sell) # Load data start = datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc) end = datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc) data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start, end=end, adjusted=False) # Run algorithm dma = DualMovingAverage() perf = dma.run(data) # Plot results fig = plt.figure() ax1 = fig.add_subplot(211, ylabel='Price in $') data['AAPL'].plot(ax=ax1, color='r', lw=2.) perf[['short_mavg', 'long_mavg']].plot(ax=ax1, lw=2.) ax1.plot(perf.ix[perf.buy].index, perf.short_mavg[perf.buy], '^', markersize=10, color='m') ax1.plot(perf.ix[perf.sell].index, perf.short_mavg[perf.sell], 'v', markersize=10, color='k') ax2 = fig.add_subplot(212, ylabel='Portfolio value in $') perf.portfolio_value.plot(ax=ax2, lw=2.) ax2.plot(perf.ix[perf.buy].index, perf.portfolio_value[perf.buy], '^', markersize=10, color='m') ax2.plot(perf.ix[perf.sell].index, perf.portfolio_value[perf.sell], 'v', markersize=10, color='k')
[ "zipline.utils.factory.load_from_yahoo", "matplotlib.pyplot.figure", "datetime.datetime" ]
[((1861, 1903), 'datetime.datetime', 'datetime', (['(1990)', '(1)', '(1)', '(0)', '(0)', '(0)', '(0)', 'pytz.utc'], {}), '(1990, 1, 1, 0, 0, 0, 0, pytz.utc)\n', (1869, 1903), False, 'from datetime import datetime\n'), ((1910, 1952), 'datetime.datetime', 'datetime', (['(2002)', '(1)', '(1)', '(0)', '(0)', '(0)', '(0)', 'pytz.utc'], {}), '(2002, 1, 1, 0, 0, 0, 0, pytz.utc)\n', (1918, 1952), False, 'from datetime import datetime\n'), ((1960, 2047), 'zipline.utils.factory.load_from_yahoo', 'load_from_yahoo', ([], {'stocks': "['AAPL']", 'indexes': '{}', 'start': 'start', 'end': 'end', 'adjusted': '(False)'}), "(stocks=['AAPL'], indexes={}, start=start, end=end, adjusted\n =False)\n", (1975, 2047), False, 'from zipline.utils.factory import load_from_yahoo\n'), ((2148, 2160), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2158, 2160), True, 'import matplotlib.pyplot as plt\n')]
from directory_manager import base_Directory, firstTExe,default_VE_NAME from tfl_backend import universal_Path_database with open(firstTExe) as file: try: terminator=int(file.read()) except: terminator=0 """ This function checks that whether the software was previously was installed or not. """ """ Return redundatnt if yes or 0 if no """ def update_basedir_and_default_ve(): global terminator if not terminator: universal_Path_database('add',default_VE_NAME,base_Directory) terminator=1 writeTerminator(terminator) return 0 else: return 'redundant' def writeTerminator(terminatorval): with open(firstTExe, 'w') as files: files.write(str(terminatorval)) # update_basedir_and_default_ve()
[ "tfl_backend.universal_Path_database" ]
[((457, 520), 'tfl_backend.universal_Path_database', 'universal_Path_database', (['"""add"""', 'default_VE_NAME', 'base_Directory'], {}), "('add', default_VE_NAME, base_Directory)\n", (480, 520), False, 'from tfl_backend import universal_Path_database\n')]
import gensim import pandas as pd import numpy as np from gensim.models.wrappers import LdaMallet import sys """ This class is creates a list of n recommendations that are the most similar to a list of paintings liked by the user. It uses a Latent Dirichlet Allocation approach which expresses the paintings as a distribution of topics. Topics are themselves a distribution of words. """ class QueryLdaModel: path_to_model = 'resources/models/lda.model' path_to_cos_mat = 'resources/matrices/lda/cosine-mat.npy' path_to_topdoc_mat = 'resources/matrices/lda/lda-output.npy' painting_df = pd.read_csv('resources/datasets/ng-dataset.csv') def __init__(self, painting_list, n): self.painting_list = painting_list self.n = n def load_model(self, path_to_model): """Load the LDA model""" lda_model = LdaMallet.load(path_to_model) return lda_model def load_cosine_matrix(self, path_to_cos_mat): """Load the cosine similarity matrix""" cos_sim_mat = np.load(path_to_cos_mat) return cos_sim_mat def load_topdoc_matrix(self, path_to_topdoc_mat): """Load the topic-document matrix""" topdoc_mat = np.load(path_to_topdoc_mat) return topdoc_mat def pid2index(self, painting_df, painting_id): """From the painting ID, returns the index of the painting in the painting dataframe Input: painting_df: dataframe of paintings painting_list: list of paintings ID (e.g ['000-02T4-0000', '000-03WC-0000...']) Output: index_list: list of the paintings indexes in the dataframe (e.g [32, 45, ...]) """ try: index = painting_df.loc[painting_df['painting_id'] == painting_id].index[0] except IndexError as ie: index = "Painting ID '" + painting_id + "' not found in dataset." return index def pidlist2indexlist(self, painting_df, painting_list): """From a list of painting ID, returns the indexes of the paintings Input: painting_df: dataframe of paintings painting_list: list of paintings ID (e.g ['000-02T4-0000', '000-03WC-0000...']) Output: index_list: list of the paintings indexes in the dataframe (e.g [32, 45, ...]) """ index_list = [self.pid2index(painting_df, painting_id) for painting_id in painting_list] return index_list def index2pid(self, painting_df, index): """From the index, returns the painting ID from the paintings dataframe Input: painting_df: dataframe of paintings index: index of the painting in the dataframe Output: pid: return the painting ID (e.g: 000-02T4-0000 ) """ try: pid = painting_df.iloc[index].painting_id except IndexError as ie: pid = "Index '" + index + "' not found in dataset." return pid def indexlist2pidlist(self, painting_df, index_list): """From a list of indexes, returns the painting IDs Input: painting_df: dataframe of paintings index_list: list of the painting indexes in the dataframe Output: pid: list of paintings ID """ pids_list = [self.index2pid(painting_df, index) for index in index_list] return pids_list def recommend_paintings(self, painting_df, painting_list, cos_mat, n): """Recommand paintings for a user based on a list of items that were liked Input: painting_df: dataframe of paintings painting_list: list of paintings index liked by a user cos_sim_mat: Cosine Similarity Matrix n: number of recommendation wanted Output: a list of indexes for recommended paintings """ n_painting = len(painting_list) score_list = [] index_list = self.pidlist2indexlist(painting_df, painting_list) for index in index_list: score = cos_mat[index] score[index] = 0 score_list.append(score) score_list = np.sum(score_list, 0)/n_painting top_n_index = sorted(range(len(score_list)), key=lambda i: score_list[i], reverse=True)[:n] top_n_pids = self.indexlist2pidlist(painting_df, top_n_index) return top_n_pids def main(self): model = self.load_model(self.path_to_model) cos_mat = self.load_cosine_matrix(self.path_to_cos_mat) topdoc_mat = self.load_topdoc_matrix(self.path_to_topdoc_mat) pids_list = self.recommend_paintings(self.painting_df, self.painting_list, cos_mat, self.n) return pids_list if __name__ == "__main__": lda = QueryLdaModel(['000-01DF-0000', '000-0168-0000', '000-019M-0000', '000-043Q-0000'], 10) pids_list = lda.main() print(pids_list)
[ "pandas.read_csv", "numpy.load", "numpy.sum", "gensim.models.wrappers.LdaMallet.load" ]
[((636, 684), 'pandas.read_csv', 'pd.read_csv', (['"""resources/datasets/ng-dataset.csv"""'], {}), "('resources/datasets/ng-dataset.csv')\n", (647, 684), True, 'import pandas as pd\n'), ((943, 972), 'gensim.models.wrappers.LdaMallet.load', 'LdaMallet.load', (['path_to_model'], {}), '(path_to_model)\n', (957, 972), False, 'from gensim.models.wrappers import LdaMallet\n'), ((1144, 1168), 'numpy.load', 'np.load', (['path_to_cos_mat'], {}), '(path_to_cos_mat)\n', (1151, 1168), True, 'import numpy as np\n'), ((1341, 1368), 'numpy.load', 'np.load', (['path_to_topdoc_mat'], {}), '(path_to_topdoc_mat)\n', (1348, 1368), True, 'import numpy as np\n'), ((4690, 4711), 'numpy.sum', 'np.sum', (['score_list', '(0)'], {}), '(score_list, 0)\n', (4696, 4711), True, 'import numpy as np\n')]
# Copyright 2018 AT&T Intellectual Property. All other rights reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class BaseDataSourcePlugin(object): """Provide basic hooks for data source plugins""" def __init__(self): self.source_type = None self.source_name = None super().__init__() @abc.abstractmethod def set_config_opts(self, conf): """Placeholder to set confgiuration options specific to each plugin. :param dict conf: Configuration options as dict Example: conf = { 'excel_spec': 'spec1.yaml', 'excel_path': 'excel.xls' } Each plugin will have their own config opts. """ return @abc.abstractmethod def get_racks(self, zone): """Return list of racks in the zone :param string zone: Zone name :returns: list of rack names :rtype: list Example: ['rack01', 'rack02'] """ return [] @abc.abstractmethod def get_hosts(self, zone, rack): """Return list of hosts in the zone :param string zone: Zone name :param string rack: Rack name :returns: list of hosts information :rtype: list of dict Example: [ { 'name': 'host01', 'type': 'controller', 'host_profile': 'hp_01' }, { 'name': 'host02', 'type': 'compute', 'host_profile': 'hp_02'} ] """ return [] @abc.abstractmethod def get_networks(self, zone): """Return list of networks in the zone :param string zone: Zone name :returns: list of networks and their vlans :rtype: list of dict Example: [ { 'name': 'oob', 'vlan': '41', 'subnet': '192.168.1.0/24', 'gateway': '192.168.1.1' }, { 'name': 'pxe', 'vlan': '42', 'subnet': '192.168.2.0/24', 'gateway': '192.168.2.1' }, { 'name': 'oam', 'vlan': '43', 'subnet': '192.168.3.0/24', 'gateway': '192.168.3.1' }, { 'name': 'ksn', 'vlan': '44', 'subnet': '192.168.4.0/24', 'gateway': '192.168.4.1' }, { 'name': 'storage', 'vlan': '45', 'subnet': '192.168.5.0/24', 'gateway': '192.168.5.1' }, { 'name': 'overlay', 'vlan': '45', 'subnet': '192.168.6.0/24', 'gateway': '192.168.6.1' } ] """ # TODO(nh863p): Can we fix the network names here so that plugin # will return exactly with same network names? # TODO(nh863p): Expand the return type if they are rack level subnets # TODO(nh863p): Is ingress information can be provided here? return [] @abc.abstractmethod def get_ips(self, zone, host): """Return list of IPs on the host :param string zone: Zone name :param string host: Host name :returns: Dict of IPs per network on the host :rtype: dict Example: {'oob': {'ipv4': '192.168.1.10'}, 'pxe': {'ipv4': '192.168.2.10'}} The network name from get_networks is expected to be the keys of this dict. In case some networks are missed, they are expected to be either DHCP or internally generated n the next steps by the design rules. """ return {} @abc.abstractmethod def get_dns_servers(self, zone): """Return the DNS servers :param string zone: Zone name :returns: List of DNS servers to be configured on host :rtype: List Example: ['8.8.8.8', '172.16.31.10'] """ return [] @abc.abstractmethod def get_ntp_servers(self, zone): """Return the NTP servers :param string zone: Zone name :returns: List of NTP servers to be configured on host :rtype: List Example: ['ntp1.ubuntu1.example', 'ntp2.ubuntu.example'] """ return [] @abc.abstractmethod def get_ldap_information(self, zone): """Return the LDAP server information :param string zone: Zone name :returns: LDAP server information :rtype: Dict Example: {'url': 'ldap.example.com', 'common_name': 'ldap-site1', 'domain': 'test', 'subdomain': 'test_sub1'} """ return {} @abc.abstractmethod def get_location_information(self, zone): """Return location information :param string zone: Zone name :returns: Dict of location information :rtype: dict Example: {'name': 'Dallas', 'physical_location': 'DAL01', 'state': 'Texas', 'country': 'US', 'corridor': 'CR1'} """ return {} @abc.abstractmethod def get_domain_name(self, zone): """Return the Domain name :param string zone: Zone name :returns: Domain name :rtype: string Example: example.com """ return "" @abc.abstractmethod def get_region_name(self, zone): """Return the Region name :param string zone: Zone name :returns: Region name :rtype: string Example: "RegionOne" """ return ""
[ "six.add_metaclass" ]
[((641, 671), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (658, 671), False, 'import six\n')]
import re import logging from rdflib import Graph from rdflib.namespace import Namespace, NamespaceManager from lxml import etree as et import nltk.data # language to be used by nlptk sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') # available logging for RDF logging.basicConfig(level=logging.INFO) # Global variables SEMLANCET_NS = "http://www.semanticlancet.eu/resource/" SEMLANCET_URI_PRO_ROLE_AUTHOR = "http://purl.org/spar/pro/author" SUMMARY_FILENAME = "_CITATION_CONTEXTS_SUMMARY.csv" REPORT_FILENAME = "_REPORT.txt" NO_CROSS_REFS_LIST = "_NO_CROSS_REFS.txt" RDF_EXTENSION = "ttl" RDF_SERIALIZATION_FORMAT = "turtle" NON_DECIMAL = re.compile(r'[^\d.]+') # namespaces CE = "http://www.elsevier.com/xml/common/dtd" NS_MAP = {'ce': CE} cross_ref_tag_name = et.QName(CE, 'cross-ref') cross_refs_tag_name = '{http://www.elsevier.com/xml/common/dtd}cross-refs' # rdf namspaces frbrNS = Namespace('http://purl.org/vocab/frbr/core#') coNS = Namespace('http://purl.org/co/') foafNS = Namespace('http://xmlns.com/foaf/0.1/') c4oNS = Namespace('http://purl.org/spar/c4o/') proNS = Namespace('http://purl.org/spar/pro/') docoNS = Namespace('http://purl.org/spar/doco/') ns_mgr = NamespaceManager(Graph()) ns_mgr.bind('frbr', frbrNS, override=False) ns_mgr.bind('co', coNS, override=False) ns_mgr.bind('foaf', foafNS, override=False) ns_mgr.bind('c4o', c4oNS, override=False) ns_mgr.bind('pro', proNS, override=False) ns_mgr.bind('doco', docoNS, override=False) # simple namespace def c4o = Namespace('http://purl.org/spar/c4o/') frbr = Namespace('http://purl.org/vocab/frbr/core#') doco = Namespace('http://purl.org/spar/doco/') NMSPCS = {'xocs' : 'http://www.elsevier.com/xml/xocs/dtd', 'ce' : 'http://www.elsevier.com/xml/common/dtd', 'xmlns' : "http://www.elsevier.com/xml/svapi/article/dtd", 'xmlns:xsi' : "http://www.w3.org/2001/XMLSchema-instance", 'xmlns:prism' : "http://prismstandard.org/namespaces/basic/2.0/", 'xmlns:dc' : "http://purl.org/dc/elements/1.1/", 'xmlns:xocs' : "http://www.elsevier.com/xml/xocs/dtd", 'xmlns:xlink' : "http://www.w3.org/1999/xlink", 'xmlns:tb' : "http://www.elsevier.com/xml/common/table/dtd", 'xmlns:sb' : "http://www.elsevier.com/xml/common/struct-bib/dtd", 'xmlns:sa' : "http://www.elsevier.com/xml/common/struct-aff/dtd", 'xmlns:mml' : "http://www.w3.org/1998/Math/MathML", 'xmlns:ja' : "http://www.elsevier.com/xml/ja/dtd", 'xmlns:ce' : "http://www.elsevier.com/xml/common/dtd", 'xmlns:cals' : "http://www.elsevier.com/xml/common/cals/dtd", }
[ "rdflib.Graph", "logging.basicConfig", "rdflib.namespace.Namespace", "lxml.etree.QName", "re.compile" ]
[((280, 319), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (299, 319), False, 'import logging\n'), ((660, 682), 're.compile', 're.compile', (['"""[^\\\\d.]+"""'], {}), "('[^\\\\d.]+')\n", (670, 682), False, 'import re\n'), ((784, 809), 'lxml.etree.QName', 'et.QName', (['CE', '"""cross-ref"""'], {}), "(CE, 'cross-ref')\n", (792, 809), True, 'from lxml import etree as et\n'), ((911, 956), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://purl.org/vocab/frbr/core#"""'], {}), "('http://purl.org/vocab/frbr/core#')\n", (920, 956), False, 'from rdflib.namespace import Namespace, NamespaceManager\n'), ((964, 996), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://purl.org/co/"""'], {}), "('http://purl.org/co/')\n", (973, 996), False, 'from rdflib.namespace import Namespace, NamespaceManager\n'), ((1006, 1045), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://xmlns.com/foaf/0.1/"""'], {}), "('http://xmlns.com/foaf/0.1/')\n", (1015, 1045), False, 'from rdflib.namespace import Namespace, NamespaceManager\n'), ((1054, 1092), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://purl.org/spar/c4o/"""'], {}), "('http://purl.org/spar/c4o/')\n", (1063, 1092), False, 'from rdflib.namespace import Namespace, NamespaceManager\n'), ((1101, 1139), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://purl.org/spar/pro/"""'], {}), "('http://purl.org/spar/pro/')\n", (1110, 1139), False, 'from rdflib.namespace import Namespace, NamespaceManager\n'), ((1149, 1188), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://purl.org/spar/doco/"""'], {}), "('http://purl.org/spar/doco/')\n", (1158, 1188), False, 'from rdflib.namespace import Namespace, NamespaceManager\n'), ((1511, 1549), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://purl.org/spar/c4o/"""'], {}), "('http://purl.org/spar/c4o/')\n", (1520, 1549), False, 'from rdflib.namespace import Namespace, NamespaceManager\n'), ((1557, 1602), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://purl.org/vocab/frbr/core#"""'], {}), "('http://purl.org/vocab/frbr/core#')\n", (1566, 1602), False, 'from rdflib.namespace import Namespace, NamespaceManager\n'), ((1610, 1649), 'rdflib.namespace.Namespace', 'Namespace', (['"""http://purl.org/spar/doco/"""'], {}), "('http://purl.org/spar/doco/')\n", (1619, 1649), False, 'from rdflib.namespace import Namespace, NamespaceManager\n'), ((1216, 1223), 'rdflib.Graph', 'Graph', ([], {}), '()\n', (1221, 1223), False, 'from rdflib import Graph\n')]
from os import read from django.db.models.aggregates import Count, Sum from django.db.models.fields import related_descriptors from numpy.lib.twodim_base import triu_indices_from from rest_framework import serializers from rest_framework.fields import SerializerMethodField from .models import DoubleCountingAgreement, DoubleCountingProduction, DoubleCountingSourcing, DoubleCountingDocFile from core.models import Entity, MatierePremiere, Biocarburant, Pays class EntitySerializer(serializers.ModelSerializer): class Meta: model = Entity fields = ['id', 'name', 'entity_type', 'has_mac', 'has_trading', 'has_direct_deliveries', 'has_stocks', 'legal_name', 'registration_id', 'sustainability_officer_phone_number', 'sustainability_officer', 'registered_address'] class FeedStockSerializer(serializers.ModelSerializer): class Meta: model = MatierePremiere fields = ['name', 'name_en', 'code', 'category', 'is_double_compte'] class BiofuelSerializer(serializers.ModelSerializer): class Meta: model = Biocarburant fields = ['name', 'name_en', 'code'] class CountrySerializer(serializers.ModelSerializer): class Meta: model = Pays fields = ['name', 'name_en', 'code_pays'] class DoubleCountingProductionSerializer(serializers.ModelSerializer): biofuel = BiofuelSerializer(read_only=True) feedstock = FeedStockSerializer(read_only=True) class Meta: model = DoubleCountingProduction fields = ['id', 'year', 'biofuel', 'feedstock', 'max_production_capacity', 'estimated_production', 'requested_quota', 'approved_quota'] class DoubleCountingSourcingSerializer(serializers.ModelSerializer): feedstock = FeedStockSerializer(read_only=True) origin_country = CountrySerializer(read_only=True) supply_country = CountrySerializer(read_only=True) transit_country = CountrySerializer(read_only=True) class Meta: model = DoubleCountingSourcing fields = ['id', 'year', 'feedstock', 'origin_country', 'supply_country', 'transit_country', 'metric_tonnes'] class DoubleCountingAggregatedSourcingSerializer(serializers.ModelSerializer): feedstock = FeedStockSerializer(read_only=True) sum = serializers.SerializerMethodField() def get_sum(self, queryset): print(queryset) return queryset.objects.values('year', 'feedstock').annotate(sum=Sum('metric_tonnes')) class Meta: model = DoubleCountingSourcing fields = ['year', 'feedstock', 'sum'] class DoubleCountingAgreementFullSerializer(serializers.ModelSerializer): production_site = serializers.SlugRelatedField( read_only=True, slug_field='name' ) producer_user = serializers.SlugRelatedField( read_only=True, slug_field='email' ) dgec_validator = serializers.SlugRelatedField( read_only=True, slug_field='name' ) dgpe_validator = serializers.SlugRelatedField( read_only=True, slug_field='name' ) dgddi_validator = serializers.SlugRelatedField( read_only=True, slug_field='name' ) producer = EntitySerializer(read_only=True) class Meta: model = DoubleCountingAgreement fields = ['id', 'creation_date', 'producer', 'producer_user', 'production_site', 'period_start', 'period_end', 'status', 'dgec_validated', 'dgec_validator', 'dgec_validated_dt', 'dgddi_validated', 'dgddi_validator', 'dgddi_validated_dt', 'dgpe_validated', 'dgpe_validator', 'dgpe_validated_dt'] class DoubleCountingDocFileSerializer(serializers.ModelSerializer): class Meta: model = DoubleCountingDocFile fields = ['id', 'file_name', 'file_type'] class DoubleCountingAgreementFullSerializerWithForeignKeys(serializers.ModelSerializer): production_site = serializers.SlugRelatedField( read_only=True, slug_field='name' ) producer_user = serializers.SlugRelatedField( read_only=True, slug_field='email' ) dgec_validator = serializers.SlugRelatedField( read_only=True, slug_field='name' ) dgpe_validator = serializers.SlugRelatedField( read_only=True, slug_field='name' ) dgddi_validator = serializers.SlugRelatedField( read_only=True, slug_field='name' ) sourcing = DoubleCountingSourcingSerializer(many=True, read_only=True) aggregated_sourcing = serializers.SerializerMethodField() production = DoubleCountingProductionSerializer(many=True, read_only=True) producer = EntitySerializer(read_only=True) documents = DoubleCountingDocFileSerializer(many=True, read_only=True) def get_aggregated_sourcing(self, dca): agg = dca.sourcing.all().values('year', 'feedstock').annotate(sum=Sum('metric_tonnes'), count=Count('metric_tonnes')) feedstock_ids = set(list([a['feedstock'] for a in agg])) feedstocks = {f.id: f for f in MatierePremiere.objects.filter(id__in=feedstock_ids)} for a in agg: s = FeedStockSerializer(feedstocks[a['feedstock']]) a['feedstock'] = s.data return [a for a in agg] class Meta: model = DoubleCountingAgreement fields = ['id', 'creation_date', 'producer', 'producer_user', 'production_site', 'period_start', 'period_end', 'status', 'dgec_validated', 'dgec_validator', 'dgec_validated_dt', 'dgddi_validated', 'dgddi_validator', 'dgddi_validated_dt', 'dgpe_validated', 'dgpe_validator', 'dgpe_validated_dt', 'sourcing', 'aggregated_sourcing', 'production', 'documents'] class DoubleCountingAgreementPartialSerializer(serializers.ModelSerializer): production_site = serializers.SlugRelatedField( read_only=True, slug_field='name' ) class Meta: model = DoubleCountingAgreement fields = ['id', 'creation_date', 'producer', 'production_site', 'period_start', 'period_end', 'status'] class DoubleCountingAgreementPartialSerializerWithForeignKeys(serializers.ModelSerializer): production_site = serializers.SlugRelatedField( read_only=True, slug_field='name' ) producer = EntitySerializer(read_only=True) production = DoubleCountingProductionSerializer(many=True, read_only=True) sourcing = DoubleCountingSourcingSerializer(many=True, read_only=True) documents = DoubleCountingDocFileSerializer(many=True, read_only=True) aggregated_sourcing = serializers.SerializerMethodField() def get_aggregated_sourcing(self, dca): agg = dca.sourcing.all().values('year', 'feedstock').annotate(sum=Sum('metric_tonnes'), count=Count('metric_tonnes')) feedstock_ids = set(list([a['feedstock'] for a in agg])) feedstocks = {f.id: f for f in MatierePremiere.objects.filter(id__in=feedstock_ids)} for a in agg: s = FeedStockSerializer(feedstocks[a['feedstock']]) a['feedstock'] = s.data return [a for a in agg] class Meta: model = DoubleCountingAgreement fields = ['id', 'creation_date', 'producer', 'production_site', 'period_start', 'period_end', 'status', 'production', 'sourcing', 'aggregated_sourcing', 'documents']
[ "rest_framework.serializers.SerializerMethodField", "django.db.models.aggregates.Count", "core.models.MatierePremiere.objects.filter", "django.db.models.aggregates.Sum", "rest_framework.serializers.SlugRelatedField" ]
[((2232, 2267), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (2265, 2267), False, 'from rest_framework import serializers\n'), ((2622, 2685), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'read_only': '(True)', 'slug_field': '"""name"""'}), "(read_only=True, slug_field='name')\n", (2650, 2685), False, 'from rest_framework import serializers\n'), ((2728, 2792), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'read_only': '(True)', 'slug_field': '"""email"""'}), "(read_only=True, slug_field='email')\n", (2756, 2792), False, 'from rest_framework import serializers\n'), ((2836, 2899), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'read_only': '(True)', 'slug_field': '"""name"""'}), "(read_only=True, slug_field='name')\n", (2864, 2899), False, 'from rest_framework import serializers\n'), ((2943, 3006), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'read_only': '(True)', 'slug_field': '"""name"""'}), "(read_only=True, slug_field='name')\n", (2971, 3006), False, 'from rest_framework import serializers\n'), ((3051, 3114), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'read_only': '(True)', 'slug_field': '"""name"""'}), "(read_only=True, slug_field='name')\n", (3079, 3114), False, 'from rest_framework import serializers\n'), ((3830, 3893), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'read_only': '(True)', 'slug_field': '"""name"""'}), "(read_only=True, slug_field='name')\n", (3858, 3893), False, 'from rest_framework import serializers\n'), ((3936, 4000), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'read_only': '(True)', 'slug_field': '"""email"""'}), "(read_only=True, slug_field='email')\n", (3964, 4000), False, 'from rest_framework import serializers\n'), ((4044, 4107), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'read_only': '(True)', 'slug_field': '"""name"""'}), "(read_only=True, slug_field='name')\n", (4072, 4107), False, 'from rest_framework import serializers\n'), ((4151, 4214), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'read_only': '(True)', 'slug_field': '"""name"""'}), "(read_only=True, slug_field='name')\n", (4179, 4214), False, 'from rest_framework import serializers\n'), ((4259, 4322), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'read_only': '(True)', 'slug_field': '"""name"""'}), "(read_only=True, slug_field='name')\n", (4287, 4322), False, 'from rest_framework import serializers\n'), ((4446, 4481), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (4479, 4481), False, 'from rest_framework import serializers\n'), ((5689, 5752), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'read_only': '(True)', 'slug_field': '"""name"""'}), "(read_only=True, slug_field='name')\n", (5717, 5752), False, 'from rest_framework import serializers\n'), ((6059, 6122), 'rest_framework.serializers.SlugRelatedField', 'serializers.SlugRelatedField', ([], {'read_only': '(True)', 'slug_field': '"""name"""'}), "(read_only=True, slug_field='name')\n", (6087, 6122), False, 'from rest_framework import serializers\n'), ((6448, 6483), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (6481, 6483), False, 'from rest_framework import serializers\n'), ((2399, 2419), 'django.db.models.aggregates.Sum', 'Sum', (['"""metric_tonnes"""'], {}), "('metric_tonnes')\n", (2402, 2419), False, 'from django.db.models.aggregates import Count, Sum\n'), ((4803, 4823), 'django.db.models.aggregates.Sum', 'Sum', (['"""metric_tonnes"""'], {}), "('metric_tonnes')\n", (4806, 4823), False, 'from django.db.models.aggregates import Count, Sum\n'), ((4831, 4853), 'django.db.models.aggregates.Count', 'Count', (['"""metric_tonnes"""'], {}), "('metric_tonnes')\n", (4836, 4853), False, 'from django.db.models.aggregates import Count, Sum\n'), ((4959, 5011), 'core.models.MatierePremiere.objects.filter', 'MatierePremiere.objects.filter', ([], {'id__in': 'feedstock_ids'}), '(id__in=feedstock_ids)\n', (4989, 5011), False, 'from core.models import Entity, MatierePremiere, Biocarburant, Pays\n'), ((6603, 6623), 'django.db.models.aggregates.Sum', 'Sum', (['"""metric_tonnes"""'], {}), "('metric_tonnes')\n", (6606, 6623), False, 'from django.db.models.aggregates import Count, Sum\n'), ((6631, 6653), 'django.db.models.aggregates.Count', 'Count', (['"""metric_tonnes"""'], {}), "('metric_tonnes')\n", (6636, 6653), False, 'from django.db.models.aggregates import Count, Sum\n'), ((6759, 6811), 'core.models.MatierePremiere.objects.filter', 'MatierePremiere.objects.filter', ([], {'id__in': 'feedstock_ids'}), '(id__in=feedstock_ids)\n', (6789, 6811), False, 'from core.models import Entity, MatierePremiere, Biocarburant, Pays\n')]
import discord import sqlite3 from pokedb import PokeSQL import os # Housekeeping for login information TOKEN_FILE_PATH = 'token.txt' # The Discord client. client = discord.Client() # Command prefix. COMMAND_PREFIX = '!' scdir = os.path.dirname(os.path.abspath(__file__)) conn = sqlite3.connect(os.path.join(scdir, 'pokedex.sqlite')) c = conn.cursor() db = PokeSQL(c) @client.event async def on_ready(): print('Logged in as') print(client.user.name) print(client.user.id) print('------') @client.event async def on_message(message: discord.Message): content = message.content if not content.startswith(COMMAND_PREFIX): return args = content[len(COMMAND_PREFIX):].split(' ') display_limit = 100 if args[0] == 'learnset': name = args[1].replace('_', ' ') lv = 100 if len(args) > 2: lv = int(args[2]) p_id = db.moveNum(name) if p_id == -1: await client.send_message(message.channel, content="Cannot find `{}`").format(name) return res = db.learnSet(p_id, lv) post = "```Pokémon that can learn #{:03d} {} by level {}:\n".format(p_id, name, lv) for ii in range(min(display_limit,len(res))): lvs = ["{:02d}".format(x) for x in res[ii][2]] post += "\n#{:03d}\t{:<14}\tLv.{}".format(res[ii][0], res[ii][1], "/".join(lvs)) if len(res) > display_limit: post += "\nAnd {} more.".format(len(res) - display_limit) post += "```" await client.send_message(message.channel, content=post) elif args[0] == 'movepool': name = args[1].replace('_', ' ') lv = 100 if len(args) > 2: lv = int(args[2]) p_id = db.dexNum(name) if p_id == -1: await client.send_message(message.channel, content="Cannot find `{}`").format(name) return res = db.movePool(p_id, lv) post = "```#{:03d} {}'s level-up moves (up to Lv.{}):\n".format(p_id, name, lv) for ii in range(min(display_limit,len(res))): post += "\n#{:03d}\t{:<14}\tLv.{:02d}".format(res[ii][0], res[ii][1], res[ii][2]) if len(res) > display_limit: post += "\nAnd {} more.".format(len(res) - display_limit) post += "```" await client.send_message(message.channel, content=post) elif args[0] == 'abilityset': name = args[1].replace('_', ' ') p_id = db.abilityNum(name) if p_id == -1: await client.send_message(message.channel, content="Cannot find `{}`").format(name) return res = db.abilitySet(p_id) post = "```Pokémon with the ability #{:03d} {}:".format(p_id, name) for ii in range(min(display_limit,len(res))): post += "\n#{:03d}\t{:<14}".format(res[ii][0], res[ii][1]) if res[ii][2]: post += " [HA]" if len(res) > display_limit: post += "\nAnd {} more.".format(len(res) - display_limit) post += "```" await client.send_message(message.channel, content=post) elif args[0] == 'abilitypool': name = args[1].replace('_', ' ') p_id = db.dexNum(name) if p_id == -1: await client.send_message(message.channel, content="Cannot find `{}`").format(name) return res = db.abilityPool(p_id) post = "```#{:03d} {}'s abilities:".format(p_id, name) for ii in range(min(display_limit,len(res))): post += "\n#{:03d}\t{:<14}".format(res[ii][0], res[ii][1]) if res[ii][2]: post += " [HA]" if len(res) > display_limit: post += "\nAnd {} more.".format(len(res) - display_limit) post += "```" await client.send_message(message.channel, content=post) elif args[0] == 'typeset': name = args[1].replace('_', ' ') p_id = db.typeNum(name) if p_id == -1: await client.send_message(message.channel, content="Cannot find `{}`").format(name) return res = db.typeSet(p_id) post = "```Pokémon of the {}-type:".format(name) for ii in range(min(display_limit,len(res))): post += "\n#{:03d}\t{:<14}".format(res[ii][0], res[ii][1]) if len(res) > display_limit: post += "\nAnd {} more.".format(len(res) - display_limit) post += "```" await client.send_message(message.channel, content=post) with open(os.path.join(scdir, TOKEN_FILE_PATH)) as token_file: token = token_file.readline().strip() client.run(token)
[ "pokedb.PokeSQL", "os.path.abspath", "os.path.join", "discord.Client" ]
[((167, 183), 'discord.Client', 'discord.Client', ([], {}), '()\n', (181, 183), False, 'import discord\n'), ((362, 372), 'pokedb.PokeSQL', 'PokeSQL', (['c'], {}), '(c)\n', (369, 372), False, 'from pokedb import PokeSQL\n'), ((249, 274), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (264, 274), False, 'import os\n'), ((300, 337), 'os.path.join', 'os.path.join', (['scdir', '"""pokedex.sqlite"""'], {}), "(scdir, 'pokedex.sqlite')\n", (312, 337), False, 'import os\n'), ((4560, 4596), 'os.path.join', 'os.path.join', (['scdir', 'TOKEN_FILE_PATH'], {}), '(scdir, TOKEN_FILE_PATH)\n', (4572, 4596), False, 'import os\n')]
#!/usr/bin/env python3 # https://discordapp.com/oauth2/authorize?client_id=633799032862408704&permissions=8&scope=bot import discord import wikiquotes from discord.ext import commands client = commands.Bot(command_prefix="<") token = 'TOKEN_HERE' @client.event async def on_ready(): print("Bot is online.") await client.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name="Type <usage for help.")) @client.event async def on_message(message): if message.content.startswith("<usage"): await message.channel.send("Type `<search author-name` to search for an author. Type `<day` to display the Wikiquotes quote of the day.") if message.content.startswith("<day"): await message.channel.send(wikiquotes.quote_of_the_day("english")) if message.content.startswith("<pacer"): await message.channel.send("""The FitnessGram Pacer Test is a multistage aerobic capacity test that progressively gets more difficult as it continues. The 20 meter pacer test will begin in 30 seconds. Line up at the start. The running speed starts slowly but gets faster each minute after you hear this signal. A single lap should be completed every time you hear this sound. Remember to run in a straight line and run as long as possible. The second time you fail to complete a lap before the sound, your test is over. The test will begin on the word start. On your mark. Get ready!… Start.""") await client.process_commands(message) @client.command(pass_context=True) async def search(ctx, author): await ctx.send(wikiquotes.random_quote(author, "english")) client.run(token)
[ "discord.Activity", "wikiquotes.quote_of_the_day", "wikiquotes.random_quote", "discord.ext.commands.Bot" ]
[((197, 229), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""<"""'}), "(command_prefix='<')\n", (209, 229), False, 'from discord.ext import commands\n'), ((1578, 1620), 'wikiquotes.random_quote', 'wikiquotes.random_quote', (['author', '"""english"""'], {}), "(author, 'english')\n", (1601, 1620), False, 'import wikiquotes\n'), ((359, 445), 'discord.Activity', 'discord.Activity', ([], {'type': 'discord.ActivityType.playing', 'name': '"""Type <usage for help."""'}), "(type=discord.ActivityType.playing, name=\n 'Type <usage for help.')\n", (375, 445), False, 'import discord\n'), ((758, 796), 'wikiquotes.quote_of_the_day', 'wikiquotes.quote_of_the_day', (['"""english"""'], {}), "('english')\n", (785, 796), False, 'import wikiquotes\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jan 3 2017 Webscrape @author: dhingratul """ import urllib import sys sys.path.insert(0, '../tools/') import utils i_start = 1 i_end = 401 mdir = '../data/Andaman/' base_url = "http://as1.and.nic.in/newElection/AllPdf/" for i in range(i_start, i_end + 1): print("\n", i) url = base_url + str(i) + '.pdf' fid = "PART" + "_" + str(i) + '.pdf' try: flag = utils.download_file(url, mdir, fid) if flag == 0: with open("Andaman.txt", "a") as myfile: myfile.write(url + '\n') except urllib.error.HTTPError: with open("Andaman.txt", "a") as myfile: myfile.write(url + '\n')
[ "sys.path.insert", "utils.download_file" ]
[((139, 170), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../tools/"""'], {}), "(0, '../tools/')\n", (154, 170), False, 'import sys\n'), ((448, 483), 'utils.download_file', 'utils.download_file', (['url', 'mdir', 'fid'], {}), '(url, mdir, fid)\n', (467, 483), False, 'import utils\n')]
from sqlalchemy import Column, Integer, String, DateTime from app.models import Base class AuthUser(Base): __tablename__ = 'auth_user' id = Column(Integer, primary_key=True) password = Column(String(128), nullable=False) last_login = Column(DateTime) is_superuser = Column(Integer, nullable=False) username = Column(String(150), nullable=False, unique=True) first_name = Column(String(30), nullable=False) last_name = Column(String(30), nullable=False) email = Column(String(254), nullable=False) is_staff = Column(Integer, nullable=False) is_active = Column(Integer, nullable=False) date_joined = Column(DateTime, nullable=False)
[ "sqlalchemy.String", "sqlalchemy.Column" ]
[((152, 185), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (158, 185), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((254, 270), 'sqlalchemy.Column', 'Column', (['DateTime'], {}), '(DateTime)\n', (260, 270), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((290, 321), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (296, 321), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((552, 583), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (558, 583), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((600, 631), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (606, 631), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((650, 682), 'sqlalchemy.Column', 'Column', (['DateTime'], {'nullable': '(False)'}), '(DateTime, nullable=False)\n', (656, 682), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((208, 219), 'sqlalchemy.String', 'String', (['(128)'], {}), '(128)\n', (214, 219), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((344, 355), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (350, 355), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((410, 420), 'sqlalchemy.String', 'String', (['(30)'], {}), '(30)\n', (416, 420), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((461, 471), 'sqlalchemy.String', 'String', (['(30)'], {}), '(30)\n', (467, 471), False, 'from sqlalchemy import Column, Integer, String, DateTime\n'), ((508, 519), 'sqlalchemy.String', 'String', (['(254)'], {}), '(254)\n', (514, 519), False, 'from sqlalchemy import Column, Integer, String, DateTime\n')]
#!/usr/bin/python """ Write random data to the data.txt file takes in a universe size M, writes n random digits to the universe M """ import argparse import numpy as np def main(): parser = argparse.ArgumentParser() parser.add_argument('--M', metavar='M', type=int, default=100, help='The size of the universe - data written ranges from 0, ..., M-1') parser.add_argument('--n', metavar='n', type=int, default=300, help='The number of integers to write to the data.txt file') args = parser.parse_args() File_object = open('data.txt','w+') M = args.M n = args.n datastream = np.random.randint(0, M-1, n) for data in datastream: File_object.write(str(data) + ' ') File_object.close() if __name__ == '__main__': main()
[ "numpy.random.randint", "argparse.ArgumentParser" ]
[((196, 221), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (219, 221), False, 'import argparse\n'), ((603, 633), 'numpy.random.randint', 'np.random.randint', (['(0)', '(M - 1)', 'n'], {}), '(0, M - 1, n)\n', (620, 633), True, 'import numpy as np\n')]
import os from ai import DDPGAI from client.AIExchangeService import get_service from client.aiExchangeMessages_pb2 import SimStateResponse, Control, SimulationID, VehicleID, DataRequest service = get_service() try: username = os.environ['DRIVEBUILD_USER'] password = os.environ['DRIVEBUILD_PASSWORD'] except KeyError: print("No user data found") exit(1) # Send tests sids = service.run_tests(username, password, "client/xmls/criteriaA.dbc.xml", "client/xmls/environmentA.dbe.xml") # -> Response status: 500 print(sids) print("Tests sent") # Interact with a simulation if not sids: exit(1) vid = VehicleID() vid.vid = "ego" sid = SimulationID() sid.sid = sids.sids[0] ai = DDPGAI() ai.start(sid, vid)
[ "client.aiExchangeMessages_pb2.SimulationID", "client.aiExchangeMessages_pb2.VehicleID", "ai.DDPGAI", "client.AIExchangeService.get_service" ]
[((200, 213), 'client.AIExchangeService.get_service', 'get_service', ([], {}), '()\n', (211, 213), False, 'from client.AIExchangeService import get_service\n'), ((623, 634), 'client.aiExchangeMessages_pb2.VehicleID', 'VehicleID', ([], {}), '()\n', (632, 634), False, 'from client.aiExchangeMessages_pb2 import SimStateResponse, Control, SimulationID, VehicleID, DataRequest\n'), ((658, 672), 'client.aiExchangeMessages_pb2.SimulationID', 'SimulationID', ([], {}), '()\n', (670, 672), False, 'from client.aiExchangeMessages_pb2 import SimStateResponse, Control, SimulationID, VehicleID, DataRequest\n'), ((703, 711), 'ai.DDPGAI', 'DDPGAI', ([], {}), '()\n', (709, 711), False, 'from ai import DDPGAI\n')]
# -*- coding: utf-8 -*- import os from distutils.util import strtobool from dotenv import load_dotenv load_dotenv() BOT_NAME = "Book_Crawler" SPIDER_MODULES = ["spiders"] NEWSPIDER_MODULE = "spiders" COMMANDS_MODULE = "commands" START_URL = os.getenv("START_URL", "") BASE_URL = os.getenv("BASE_URL", "") PROXY = os.getenv("PROXY", "") PROXY_AUTH = os.getenv("PROXY_AUTH", "") PROXY_ENABLED = os.getenv("PROXY_ENABLED", False) if isinstance(PROXY_ENABLED, str): PROXY_ENABLED = False if PROXY_ENABLED == 'False' else True COOKIES_JAR = "" CONCURRENT_REQUESTS = os.getenv("CONCURRENT_REQUESTS", 16) CONCURRENT_REQUESTS_PER_DOMAIN = os.getenv("CONCURRENT_REQUESTS_PER_DOMAIN", 8) DOWNLOAD_DELAY = os.getenv("DOWNLOAD_DELAY", 0) DOWNLOAD_TIMEOUT = os.getenv("DOWNLOAD_TIMEOUT", 180) ROBOTSTXT_OBEY = False COOKIES_ENABLED = True TELNETCONSOLE_ENABLED = False TELNETCONSOLE_PASSWORD = "password" # Override the default request headers: DEFAULT_REQUEST_HEADERS = { "Accept-Language": "en-US,en;q=0.5", "Cache-Control": "max-age=0", } DOWNLOADER_MIDDLEWARES = { "scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware": None, "middlewares.HttpProxyMiddleware": 543, "middlewares.LogErrorsMiddleware": 550, } LOG_LEVEL = os.getenv("LOG_LEVEL", "DEBUG") LOG_FILE = os.getenv("LOG_FILE") if os.getenv("LOG_FILE", "") else None ITEM_PIPELINES = { 'pipelines.MongoPipeline': 300, } MONGO_URI = os.getenv("MONGO_URI") PIKA_LOG_LEVEL = os.getenv("PIKA_LOG_LEVEL", "WARN") RABBITMQ_HOST = os.getenv("RABBITMQ_HOST", "localhost") RABBITMQ_PORT = os.getenv("RABBITMQ_PORT", 5672) RABBITMQ_VIRTUAL_HOST = os.getenv("RABBITMQ_VIRTUAL_HOST", "guest") RABBITMQ_USER = os.getenv("RABBITMQ_USER", "guest") RABBITMQ_PASS = os.getenv("RABBITMQ_PASS", "/") try: HTTPCACHE_ENABLED = strtobool(os.getenv("HTTPCACHE_ENABLED", "False")) except ValueError: HTTPCACHE_ENABLED = False HTTPCACHE_IGNORE_HTTP_CODES = list( map(int, (s for s in os.getenv("HTTPCACHE_IGNORE_HTTP_CODES", "").split(",") if s)) )
[ "dotenv.load_dotenv", "os.getenv" ]
[((105, 118), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (116, 118), False, 'from dotenv import load_dotenv\n'), ((247, 273), 'os.getenv', 'os.getenv', (['"""START_URL"""', '""""""'], {}), "('START_URL', '')\n", (256, 273), False, 'import os\n'), ((285, 310), 'os.getenv', 'os.getenv', (['"""BASE_URL"""', '""""""'], {}), "('BASE_URL', '')\n", (294, 310), False, 'import os\n'), ((320, 342), 'os.getenv', 'os.getenv', (['"""PROXY"""', '""""""'], {}), "('PROXY', '')\n", (329, 342), False, 'import os\n'), ((356, 383), 'os.getenv', 'os.getenv', (['"""PROXY_AUTH"""', '""""""'], {}), "('PROXY_AUTH', '')\n", (365, 383), False, 'import os\n'), ((400, 433), 'os.getenv', 'os.getenv', (['"""PROXY_ENABLED"""', '(False)'], {}), "('PROXY_ENABLED', False)\n", (409, 433), False, 'import os\n'), ((574, 610), 'os.getenv', 'os.getenv', (['"""CONCURRENT_REQUESTS"""', '(16)'], {}), "('CONCURRENT_REQUESTS', 16)\n", (583, 610), False, 'import os\n'), ((644, 690), 'os.getenv', 'os.getenv', (['"""CONCURRENT_REQUESTS_PER_DOMAIN"""', '(8)'], {}), "('CONCURRENT_REQUESTS_PER_DOMAIN', 8)\n", (653, 690), False, 'import os\n'), ((708, 738), 'os.getenv', 'os.getenv', (['"""DOWNLOAD_DELAY"""', '(0)'], {}), "('DOWNLOAD_DELAY', 0)\n", (717, 738), False, 'import os\n'), ((758, 792), 'os.getenv', 'os.getenv', (['"""DOWNLOAD_TIMEOUT"""', '(180)'], {}), "('DOWNLOAD_TIMEOUT', 180)\n", (767, 792), False, 'import os\n'), ((1256, 1287), 'os.getenv', 'os.getenv', (['"""LOG_LEVEL"""', '"""DEBUG"""'], {}), "('LOG_LEVEL', 'DEBUG')\n", (1265, 1287), False, 'import os\n'), ((1431, 1453), 'os.getenv', 'os.getenv', (['"""MONGO_URI"""'], {}), "('MONGO_URI')\n", (1440, 1453), False, 'import os\n'), ((1472, 1507), 'os.getenv', 'os.getenv', (['"""PIKA_LOG_LEVEL"""', '"""WARN"""'], {}), "('PIKA_LOG_LEVEL', 'WARN')\n", (1481, 1507), False, 'import os\n'), ((1524, 1563), 'os.getenv', 'os.getenv', (['"""RABBITMQ_HOST"""', '"""localhost"""'], {}), "('RABBITMQ_HOST', 'localhost')\n", (1533, 1563), False, 'import os\n'), ((1580, 1612), 'os.getenv', 'os.getenv', (['"""RABBITMQ_PORT"""', '(5672)'], {}), "('RABBITMQ_PORT', 5672)\n", (1589, 1612), False, 'import os\n'), ((1637, 1680), 'os.getenv', 'os.getenv', (['"""RABBITMQ_VIRTUAL_HOST"""', '"""guest"""'], {}), "('RABBITMQ_VIRTUAL_HOST', 'guest')\n", (1646, 1680), False, 'import os\n'), ((1697, 1732), 'os.getenv', 'os.getenv', (['"""RABBITMQ_USER"""', '"""guest"""'], {}), "('RABBITMQ_USER', 'guest')\n", (1706, 1732), False, 'import os\n'), ((1749, 1780), 'os.getenv', 'os.getenv', (['"""RABBITMQ_PASS"""', '"""/"""'], {}), "('RABBITMQ_PASS', '/')\n", (1758, 1780), False, 'import os\n'), ((1324, 1349), 'os.getenv', 'os.getenv', (['"""LOG_FILE"""', '""""""'], {}), "('LOG_FILE', '')\n", (1333, 1349), False, 'import os\n'), ((1299, 1320), 'os.getenv', 'os.getenv', (['"""LOG_FILE"""'], {}), "('LOG_FILE')\n", (1308, 1320), False, 'import os\n'), ((1821, 1860), 'os.getenv', 'os.getenv', (['"""HTTPCACHE_ENABLED"""', '"""False"""'], {}), "('HTTPCACHE_ENABLED', 'False')\n", (1830, 1860), False, 'import os\n'), ((1973, 2017), 'os.getenv', 'os.getenv', (['"""HTTPCACHE_IGNORE_HTTP_CODES"""', '""""""'], {}), "('HTTPCACHE_IGNORE_HTTP_CODES', '')\n", (1982, 2017), False, 'import os\n')]
import os from collections import Counter from itertools import islice, combinations from multiprocessing import Pool, cpu_count from tqdm import tqdm import numpy as np import pandas as pd import nltk try: nltk.pos_tag(nltk.word_tokenize('This is a test sentence.')) except LookupError: print('Installing nltk perceptron tagger.') nltk.download('averaged_perceptron_tagger') class CalculateScores(): """Calculates ngram scores for documents. Considered parts of speech are (see `nltk` docs for details) - Nouns: 'NN', 'NNS', 'NNP', 'NNPS' - Adjectives: 'JJ', 'JJR', 'JJS' All texts of the corpus are tokenized and POS tags are generated. A global dictionary of counts of different ngrams is build in `allNGrams`. The ngram relations of every text are listed in `outputDict`. Scoring counts occurance of different words left and right of each single token in each ngram, weighted by ngram size. :param sourceDataframe: Dataframe containing the basic corpus :type sourceDataframe: class:`pandas.DataFrame` :param textColumn: Column name to use for ngram calculation :type textColumn: str :param pubIDColumn: Column name to use for publication identification (assumend to be unique) :type pubIDColumn: str :param yearColumn: Column name for temporal ordering publications, used during writing the scoring files :type yearColumn: str :param ngramsize: Maximum of considered ngrams (default: 5-gram) :type ngramsize: int """ def __init__( self, sourceDataframe, textColumn="text", pubIDColumn="pubID", yearColumn='year', ngramsize=5, debug=False ): self.baseDF = sourceDataframe self.textCol = textColumn self.pubIDCol = pubIDColumn self.yearCol = yearColumn self.ngramEnd = ngramsize self.outputDict = {} self.allNGrams = [] self.counts = {} self.corpussize = 1 self.uniqueNGrams = () self.debug = debug def getTermPatterns(self): """Create dictionaries of occuring ngrams.""" allNGrams = {x: [] for x in range(1, self.ngramEnd + 1, 1)} pos_tag = ["NN", "NNS", "NNP", "NNPS", "JJ", "JJR", "JJS"] for _, row in tqdm(self.baseDF.iterrows()): tokens = nltk.word_tokenize(row[self.textCol]) pos = nltk.pos_tag(tokens) nnJJtokens = [x[0].lower() for x in pos if x[1] in pos_tag] tempNGram = [] for i in range(1, self.ngramEnd + 1, 1): val = allNGrams[i] newngrams = list(nltk.ngrams(nnJJtokens, i)) val.extend(newngrams) tempNGram.extend(newngrams) allNGrams.update({i: val}) self.outputDict[row[self.pubIDCol]] = tempNGram self.allNGrams = allNGrams allgrams = [x for y in [y for x, y in self.allNGrams.items()] for x in y] self.corpussize = len(allgrams) self.counts = Counter(allgrams) self.uniqueNGrams = set(allgrams) def getScore(self, target): """Calculate ngram score.""" valueList = [] for _, subgram in enumerate(target): contains = [x for x in self.allNGrams[2] if subgram in x] rvalue = len(set(x for x in contains if x[0] == subgram)) lvalue = len(set(x for x in contains if x[1] == subgram)) valueList.append((lvalue + 1) * (rvalue + 1)) return { target: 1 / self.counts[target] * (np.prod(valueList)) ** (1 / (2.0 * len(target))) } def _calcBatch(self, batch): res = [] for elem in tqdm(batch): res.append(self.getScore(elem)) return res def run(self, write=False, outpath='./', recreate=False, limitCPUs=True): """Get score for all documents.""" scores = {} self.getTermPatterns() if self.debug is True: print(f'Found {len(self.uniqueNGrams)} unique {self.ngramEnd}-grams.') if limitCPUs is True: ncores = int(cpu_count() * 1 / 4) else: ncores = cpu_count() - 2 pool = Pool(ncores) chunk_size = int(len(self.uniqueNGrams) / ncores) batches = [ list(self.uniqueNGrams)[i:i + chunk_size] for i in range(0, len(self.uniqueNGrams), chunk_size) ] ncoresResults = pool.map(self._calcBatch, batches) results = [x for y in ncoresResults for x in y] for elem in results: scores.update(elem) for key, val in self.outputDict.items(): tmpList = [] for elem in val: tmpList.append([elem, scores[elem]]) self.outputDict.update({key: tmpList}) if write is True: for year, df in self.baseDF.groupby(self.yearCol): filePath = f'{outpath}{str(year)}.tsv' if os.path.isfile(filePath): if recreate is False: raise IOError( f'File at {filePath} exists. Set recreate = True to rewrite file.' ) if recreate is True: os.remove(filePath) with open(filePath, 'a') as yearfile: for pub in df[self.pubIDCol].unique(): for elem in self.outputDict[pub]: yearfile.write(f'{pub}\t{elem[0]}\t{elem[1]}\n') return scores, self.outputDict class LinksOverTime(): """Create multilayer pajek files for corpus. To keep track of nodes over time, we need a global register of node names. This class takes care of this, by adding new keys of authors, papers or ngrams to the register. :param dataframe: Source dataframe containing metadata of texts (authors, publicationID and year) :type dataframe: class:`pandas.DataFrame` :param authorColumn: Column name for author information :param pubIDColumn: Column name to identify publications :param yearColumn: Column name with year information """ def __init__( self, dataframe, authorColumn='authors', pubIDColumn="pubID", yearColumn='year', debug=False ): self.dataframe = dataframe self.authorCol = authorColumn self.pubIDCol = pubIDColumn self.yearColumn = yearColumn self.nodeMap = {} self.debug = debug def _window(self, seq, n): """Return a sliding window (of width n) over data from the iterable. s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... """ it = iter(seq) result = tuple(islice(it, n)) if len(result) == n: yield result for elem in it: result = result[1:] + (elem,) yield result def _createSlices(self, windowsize): slices = [] years = sorted(self.dataframe[self.yearColumn].unique()) for x in self._window(years, windowsize): slices.append(x) return slices def createNodeRegister(self, sl, scorePath, scoreLimit): """Create multilayer node register for time slice.""" if self.debug is True: print(f'Slice: {sl[0]}') dataframe = self.dataframe[self.dataframe[self.yearColumn].isin(sl)] dfNgramsList = [pd.read_csv( scorePath + str(slN) + '.tsv', sep='\t', header=None ) for slN in sl] ngramdataframe = pd.concat(dfNgramsList) ngramdataframe = ngramdataframe[ngramdataframe[2] > scoreLimit] authorList = [x for y in [x.split(';') for x in dataframe[self.authorCol].values] for x in y] authors = [x for x in set(authorList) if x] pubs = dataframe[self.pubIDCol].fillna('None').unique() ngrams = ngramdataframe[1].unique() for authorval in authors: if not self.nodeMap.values(): self.nodeMap.update({authorval: 1}) else: if authorval not in self.nodeMap.keys(): self.nodeMap.update( {authorval: max(self.nodeMap.values()) + 1} ) for pubval in list(pubs): if pubval not in self.nodeMap.keys(): self.nodeMap.update({pubval: max(self.nodeMap.values()) + 1}) for ngramval in list(ngrams): if ngramval not in self.nodeMap.keys(): self.nodeMap.update({ngramval: max(self.nodeMap.values()) + 1}) if self.debug is True: print( '\tNumber of vertices (authors, papers and ngrams) {0}'.format( max(self.nodeMap.values()) ) ) def writeLinks(self, sl, scorePath, scoreLimit, outpath='./', recreate=False): """Write multilayer links to file in Pajek format.""" dataframe = self.dataframe[self.dataframe[self.yearColumn].isin(sl)] filePath = outpath + 'multilayerPajek_{0}.net'.format(sl[0]) if os.path.isfile(filePath): if recreate is False: raise IOError( f'File at {filePath} exists. Set recreate = True to rewrite file.' ) if recreate is True: os.remove(filePath) dfNgramsList = [pd.read_csv( scorePath + str(slN) + '.tsv', sep='\t', header=None ) for slN in sl] ngramdataframe = pd.concat(dfNgramsList) ngramdataframe = ngramdataframe[ngramdataframe[2] > scoreLimit] with open(filePath, 'a') as file: file.write("# A network in a general multiplex format\n") file.write("*Vertices {0}\n".format(max(self.nodeMap.values()))) for x, y in self.nodeMap.items(): tmpStr = '{0} "{1}"\n'.format(y, x) if tmpStr: file.write(tmpStr) file.write("*Multiplex\n") file.write("# layer node layer node [weight]\n") if self.debug is True: print('\tWriting inter-layer links to file.') for _, row in dataframe.fillna('').iterrows(): authors = row[self.authorCol].split(';') paper = row[self.pubIDCol] if paper not in self.nodeMap.keys(): print(f'Cannot find {paper}') ngramsList = ngramdataframe[ngramdataframe[0] == paper] paperNr = self.nodeMap[paper] if len(authors) >= 2: # pairs = [x for x in combinations(authors, 2)] for pair in combinations(authors, 2): # pairs: file.write( '{0} {1} {2} {3} 1\n'.format( 1, self.nodeMap[pair[0]], 1, self.nodeMap[pair[1]] ) ) for author in authors: try: authNr = self.nodeMap[author] file.write( '{0} {1} {2} {3} 1\n'.format( 1, authNr, 2, paperNr ) ) except KeyError: pass for _, ngramrow in ngramsList.iterrows(): try: ngramNr = self.nodeMap[ngramrow[1]] weight = ngramrow[2] file.write( '{0} {1} {2} {3} {4}\n'.format( 2, paperNr, 3, ngramNr, weight ) ) except KeyError: pass def run(self, recreate=False, windowsize=1, scorePath='./', outPath='./', scoreLimit=1.0): """Create data for all slices.""" for sl in tqdm(self._createSlices(windowsize)): self.createNodeRegister(sl, scorePath, scoreLimit) self.writeLinks(sl, scorePath, scoreLimit, outpath=outPath, recreate=recreate)
[ "tqdm.tqdm", "os.remove", "nltk.pos_tag", "collections.Counter", "nltk.ngrams", "os.path.isfile", "itertools.combinations", "itertools.islice", "multiprocessing.Pool", "nltk.download", "pandas.concat", "nltk.word_tokenize", "numpy.prod", "multiprocessing.cpu_count" ]
[((225, 271), 'nltk.word_tokenize', 'nltk.word_tokenize', (['"""This is a test sentence."""'], {}), "('This is a test sentence.')\n", (243, 271), False, 'import nltk\n'), ((345, 388), 'nltk.download', 'nltk.download', (['"""averaged_perceptron_tagger"""'], {}), "('averaged_perceptron_tagger')\n", (358, 388), False, 'import nltk\n'), ((3045, 3062), 'collections.Counter', 'Counter', (['allgrams'], {}), '(allgrams)\n', (3052, 3062), False, 'from collections import Counter\n'), ((3705, 3716), 'tqdm.tqdm', 'tqdm', (['batch'], {}), '(batch)\n', (3709, 3716), False, 'from tqdm import tqdm\n'), ((4210, 4222), 'multiprocessing.Pool', 'Pool', (['ncores'], {}), '(ncores)\n', (4214, 4222), False, 'from multiprocessing import Pool, cpu_count\n'), ((7577, 7600), 'pandas.concat', 'pd.concat', (['dfNgramsList'], {}), '(dfNgramsList)\n', (7586, 7600), True, 'import pandas as pd\n'), ((9117, 9141), 'os.path.isfile', 'os.path.isfile', (['filePath'], {}), '(filePath)\n', (9131, 9141), False, 'import os\n'), ((9559, 9582), 'pandas.concat', 'pd.concat', (['dfNgramsList'], {}), '(dfNgramsList)\n', (9568, 9582), True, 'import pandas as pd\n'), ((2356, 2393), 'nltk.word_tokenize', 'nltk.word_tokenize', (['row[self.textCol]'], {}), '(row[self.textCol])\n', (2374, 2393), False, 'import nltk\n'), ((2412, 2432), 'nltk.pos_tag', 'nltk.pos_tag', (['tokens'], {}), '(tokens)\n', (2424, 2432), False, 'import nltk\n'), ((6744, 6757), 'itertools.islice', 'islice', (['it', 'n'], {}), '(it, n)\n', (6750, 6757), False, 'from itertools import islice, combinations\n'), ((4179, 4190), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (4188, 4190), False, 'from multiprocessing import Pool, cpu_count\n'), ((4965, 4989), 'os.path.isfile', 'os.path.isfile', (['filePath'], {}), '(filePath)\n', (4979, 4989), False, 'import os\n'), ((9362, 9381), 'os.remove', 'os.remove', (['filePath'], {}), '(filePath)\n', (9371, 9381), False, 'import os\n'), ((2653, 2679), 'nltk.ngrams', 'nltk.ngrams', (['nnJJtokens', 'i'], {}), '(nnJJtokens, i)\n', (2664, 2679), False, 'import nltk\n'), ((3575, 3593), 'numpy.prod', 'np.prod', (['valueList'], {}), '(valueList)\n', (3582, 3593), True, 'import numpy as np\n'), ((10724, 10748), 'itertools.combinations', 'combinations', (['authors', '(2)'], {}), '(authors, 2)\n', (10736, 10748), False, 'from itertools import islice, combinations\n'), ((4123, 4134), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (4132, 4134), False, 'from multiprocessing import Pool, cpu_count\n'), ((5258, 5277), 'os.remove', 'os.remove', (['filePath'], {}), '(filePath)\n', (5267, 5277), False, 'import os\n')]
from django.shortcuts import render # Create your views here. def responsivehome(request): context = {} return render(request, 'design/responsivehome.html', context) def responsiveproduct(request): context = {} return render(request, 'design/responsiveproduct.html', context) def responsivepeople(request): context = {} return render(request, 'design/responsivepeople.html', context) def responsivecontactus(request): context = {} return render(request, 'design/responsivecontactus.html', context)
[ "django.shortcuts.render" ]
[((121, 175), 'django.shortcuts.render', 'render', (['request', '"""design/responsivehome.html"""', 'context'], {}), "(request, 'design/responsivehome.html', context)\n", (127, 175), False, 'from django.shortcuts import render\n'), ((237, 294), 'django.shortcuts.render', 'render', (['request', '"""design/responsiveproduct.html"""', 'context'], {}), "(request, 'design/responsiveproduct.html', context)\n", (243, 294), False, 'from django.shortcuts import render\n'), ((355, 411), 'django.shortcuts.render', 'render', (['request', '"""design/responsivepeople.html"""', 'context'], {}), "(request, 'design/responsivepeople.html', context)\n", (361, 411), False, 'from django.shortcuts import render\n'), ((475, 534), 'django.shortcuts.render', 'render', (['request', '"""design/responsivecontactus.html"""', 'context'], {}), "(request, 'design/responsivecontactus.html', context)\n", (481, 534), False, 'from django.shortcuts import render\n')]
# -*- coding: utf-8 -*- # pylint: disable=missing-docstring import logging from six import itervalues from flask_login import current_user from flask_restx._http import HTTPStatus from flask_marshmallow import Schema, base_fields from marshmallow import validate, validates_schema, ValidationError import sqlalchemy as sa log = logging.getLogger(__name__) # pylint: disable=invalid-name class Parameters(Schema): class Meta: ordered = True def __init__(self, **kwargs): super(Parameters, self).__init__(strict=True, **kwargs) # This is an add-hoc implementation of the feature which didn't make # into Marshmallow upstream: # https://github.com/marshmallow-code/marshmallow/issues/344 for required_field_name in getattr(self.Meta, 'required', []): self.fields[required_field_name].required = True def __contains__(self, field): return field in self.fields def make_instance(self, data): # pylint: disable=unused-argument """ This is a no-op function which shadows ``ModelSchema.make_instance`` method (when inherited classes inherit from ``ModelSchema``). Thus, we avoid a new instance creation because it is undesirable behaviour for parameters (they can be used not only for saving new instances). """ return def items(self): return self.fields.items() class PostFormParameters(Parameters): def __init__(self, *args, **kwargs): super(PostFormParameters, self).__init__(*args, **kwargs) for field in itervalues(self.fields): if field.dump_only: continue if not field.metadata.get('location'): field.metadata['location'] = 'form' class PatchJSONParameters(Parameters): """ Base parameters class for handling PATCH arguments according to RFC 6902. """ # All operations described in RFC 6902 OP_ADD = 'add' OP_REMOVE = 'remove' OP_REPLACE = 'replace' OP_MOVE = 'move' OP_COPY = 'copy' OP_TEST = 'test' # However, we use only those which make sense in RESTful API OPERATION_CHOICES = ( OP_TEST, OP_ADD, OP_REMOVE, OP_REPLACE, ) op = base_fields.String(required=True) # pylint: disable=invalid-name PATH_CHOICES = None path = base_fields.String(required=True) NO_VALUE_OPERATIONS = (OP_REMOVE,) value = base_fields.Raw(required=False) def __init__(self, *args, **kwargs): if 'many' in kwargs: assert kwargs['many'], "PATCH Parameters must be marked as 'many'" kwargs['many'] = True super(PatchJSONParameters, self).__init__(*args, **kwargs) if not self.PATH_CHOICES: raise ValueError('%s.PATH_CHOICES has to be set' % self.__class__.__name__) # Make a copy of `validators` as otherwise we will modify the behaviour # of all `marshmallow.Schema`-based classes self.fields['op'].validators = self.fields['op'].validators + [ validate.OneOf(self.OPERATION_CHOICES) ] self.fields['path'].validators = self.fields['path'].validators + [ validate.OneOf(self.PATH_CHOICES) ] @validates_schema def validate_patch_structure(self, data): """ Common validation of PATCH structure Provide check that 'value' present in all operations expect it. Provide check if 'path' is present. 'path' can be absent if provided without '/' at the start. Supposed that if 'path' is present than it is prepended with '/'. Removing '/' in the beginning to simplify usage in resource. """ if 'op' not in data: raise ValidationError('operation not supported') if data['op'] not in self.NO_VALUE_OPERATIONS and 'value' not in data: raise ValidationError('value is required') if 'path' not in data: raise ValidationError('Path is required and must always begin with /') else: data['field_name'] = data['path'][1:] @classmethod def perform_patch(cls, operations, obj, state=None): """ Performs all necessary operations by calling class methods with corresponding names. """ if state is None: state = {} for operation in operations: if not cls._process_patch_operation(operation, obj=obj, state=state): log.info( '%s patching has been stopped because of unknown operation %s', obj.__class__.__name__, operation, ) raise ValidationError( 'Failed to update %s details. Operation %s could not succeed.' % (obj.__class__.__name__, operation) ) return True @classmethod def _process_patch_operation(cls, operation, obj, state): """ Args: operation (dict): one patch operation in RFC 6902 format. obj (object): an instance which is needed to be patched. state (dict): inter-operations state storage Returns: processing_status (bool): True if operation was handled, otherwise False. """ field_operaion = operation['op'] if field_operaion == cls.OP_REPLACE: return cls.replace( obj, operation['field_name'], operation['value'], state=state ) elif field_operaion == cls.OP_TEST: return cls.test(obj, operation['field_name'], operation['value'], state=state) elif field_operaion == cls.OP_ADD: return cls.add(obj, operation['field_name'], operation['value'], state=state) elif field_operaion == cls.OP_MOVE: return cls.move(obj, operation['field_name'], operation['value'], state=state) elif field_operaion == cls.OP_COPY: return cls.copy(obj, operation['field_name'], operation['value'], state=state) elif field_operaion == cls.OP_REMOVE: # This deviates from RFC 6902 to permit field and value based removal. # This is used for multiple relationship tables within houston return cls.remove( obj, operation['field_name'], operation.get('value', None), state=state ) return False @classmethod def replace(cls, obj, field, value, state): """ This is method for replace operation. It is separated to provide a possibility to easily override it in your Parameters. Args: obj (object): an instance to change. field (str): field name value (str): new value state (dict): inter-operations state storage Returns: processing_status (bool): True """ # Check for existence if not hasattr(obj, field): raise ValidationError( "Field '%s' does not exist, so it cannot be patched" % field ) # Check for Enum objects try: obj_cls = obj.__class__ obj_column = getattr(obj_cls, field) obj_column_type = obj_column.expression.type if isinstance(obj_column_type, sa.sql.sqltypes.Enum): enum_values = obj_column_type.enums if value not in enum_values: args = (field, value, enum_values) raise ValidationError( "Field '%s' is an Enum and does not recognize the value '%s'. Please select one of %r" % args ) except (AttributeError): pass # Set the value setattr(obj, field, value) return True @classmethod def test(cls, obj, field, value, state): """ This is method for test operation. It is separated to provide a possibility to easily override it in your Parameters. Args: obj (object): an instance to change. field (str): field name value (str): new value state (dict): inter-operations state storage Returns: processing_status (bool): True """ return getattr(obj, field) == value @classmethod def add(cls, obj, field, value, state): raise NotImplementedError() @classmethod def remove(cls, obj, field, value, state): """ This is method for removal operation. It is separated to provide a possibility to easily override it in your Parameters. Args: obj (object): an instance to change. field (str): field name value (str): [optional] item to remove for lists, Extension on RFC 6509 state (dict): inter-operations state storage Returns: processing_status (bool): True """ raise NotImplementedError() @classmethod def move(cls, obj, field, value, state): raise NotImplementedError() @classmethod def copy(cls, obj, field, value, state): raise NotImplementedError() # noinspection PyAbstractClass class PatchJSONParametersWithPassword(PatchJSONParameters): """ Base parameters class for handling PATCH arguments according to RFC 6902 with specific handling for password validation for some sensitive fields. Provides test, add and remove methods. """ # Some classes may require all fields to be password validated, some may require some. # If the SENSITIVE_FIELDS array is left as None, all fields are password protected SENSITIVE_FIELDS = None @classmethod def test(cls, obj, field, value, state): from wbia.web.extensions.api import abort if field == 'current_password': if current_user.password == value: state['current_password'] = value return True else: abort(code=HTTPStatus.FORBIDDEN, message='Wrong password') return PatchJSONParameters.test(obj, field, value, state) @classmethod def add(cls, obj, field, value, state): from wbia.web.extensions.api import abort """ Some or all fields require extra permissions to be changed """ if not cls.SENSITIVE_FIELDS or field in cls.SENSITIVE_FIELDS: if 'current_password' not in state: abort( code=HTTPStatus.FORBIDDEN, message='Updating database requires `current_password` test operation.', ) @classmethod def remove(cls, obj, field, value, state): from wbia.web.extensions.api import abort if not cls.SENSITIVE_FIELDS or field in cls.SENSITIVE_FIELDS: if 'current_password' not in state: abort( code=HTTPStatus.FORBIDDEN, message='Updating database requires `current_password` test operation.', ) @classmethod def replace(cls, obj, field, value, state): from wbia.web.extensions.api import abort if not cls.SENSITIVE_FIELDS or field in cls.SENSITIVE_FIELDS: if 'current_password' not in state: abort( code=HTTPStatus.FORBIDDEN, message='Updating database requires `current_password` test operation.', )
[ "marshmallow.validate.OneOf", "marshmallow.ValidationError", "flask_marshmallow.base_fields.String", "wbia.web.extensions.api.abort", "six.itervalues", "flask_marshmallow.base_fields.Raw", "logging.getLogger" ]
[((332, 359), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (349, 359), False, 'import logging\n'), ((2271, 2304), 'flask_marshmallow.base_fields.String', 'base_fields.String', ([], {'required': '(True)'}), '(required=True)\n', (2289, 2304), False, 'from flask_marshmallow import Schema, base_fields\n'), ((2374, 2407), 'flask_marshmallow.base_fields.String', 'base_fields.String', ([], {'required': '(True)'}), '(required=True)\n', (2392, 2407), False, 'from flask_marshmallow import Schema, base_fields\n'), ((2461, 2492), 'flask_marshmallow.base_fields.Raw', 'base_fields.Raw', ([], {'required': '(False)'}), '(required=False)\n', (2476, 2492), False, 'from flask_marshmallow import Schema, base_fields\n'), ((1594, 1617), 'six.itervalues', 'itervalues', (['self.fields'], {}), '(self.fields)\n', (1604, 1617), False, 'from six import itervalues\n'), ((3772, 3814), 'marshmallow.ValidationError', 'ValidationError', (['"""operation not supported"""'], {}), "('operation not supported')\n", (3787, 3814), False, 'from marshmallow import validate, validates_schema, ValidationError\n'), ((3913, 3949), 'marshmallow.ValidationError', 'ValidationError', (['"""value is required"""'], {}), "('value is required')\n", (3928, 3949), False, 'from marshmallow import validate, validates_schema, ValidationError\n'), ((4000, 4064), 'marshmallow.ValidationError', 'ValidationError', (['"""Path is required and must always begin with /"""'], {}), "('Path is required and must always begin with /')\n", (4015, 4064), False, 'from marshmallow import validate, validates_schema, ValidationError\n'), ((7013, 7090), 'marshmallow.ValidationError', 'ValidationError', (['("Field \'%s\' does not exist, so it cannot be patched" % field)'], {}), '("Field \'%s\' does not exist, so it cannot be patched" % field)\n', (7028, 7090), False, 'from marshmallow import validate, validates_schema, ValidationError\n'), ((3078, 3116), 'marshmallow.validate.OneOf', 'validate.OneOf', (['self.OPERATION_CHOICES'], {}), '(self.OPERATION_CHOICES)\n', (3092, 3116), False, 'from marshmallow import validate, validates_schema, ValidationError\n'), ((3215, 3248), 'marshmallow.validate.OneOf', 'validate.OneOf', (['self.PATH_CHOICES'], {}), '(self.PATH_CHOICES)\n', (3229, 3248), False, 'from marshmallow import validate, validates_schema, ValidationError\n'), ((4722, 4849), 'marshmallow.ValidationError', 'ValidationError', (["('Failed to update %s details. Operation %s could not succeed.' % (obj.\n __class__.__name__, operation))"], {}), "(\n 'Failed to update %s details. Operation %s could not succeed.' % (obj.\n __class__.__name__, operation))\n", (4737, 4849), False, 'from marshmallow import validate, validates_schema, ValidationError\n'), ((10069, 10127), 'wbia.web.extensions.api.abort', 'abort', ([], {'code': 'HTTPStatus.FORBIDDEN', 'message': '"""Wrong password"""'}), "(code=HTTPStatus.FORBIDDEN, message='Wrong password')\n", (10074, 10127), False, 'from wbia.web.extensions.api import abort\n'), ((10533, 10643), 'wbia.web.extensions.api.abort', 'abort', ([], {'code': 'HTTPStatus.FORBIDDEN', 'message': '"""Updating database requires `current_password` test operation."""'}), "(code=HTTPStatus.FORBIDDEN, message=\n 'Updating database requires `current_password` test operation.')\n", (10538, 10643), False, 'from wbia.web.extensions.api import abort\n'), ((10948, 11058), 'wbia.web.extensions.api.abort', 'abort', ([], {'code': 'HTTPStatus.FORBIDDEN', 'message': '"""Updating database requires `current_password` test operation."""'}), "(code=HTTPStatus.FORBIDDEN, message=\n 'Updating database requires `current_password` test operation.')\n", (10953, 11058), False, 'from wbia.web.extensions.api import abort\n'), ((11364, 11474), 'wbia.web.extensions.api.abort', 'abort', ([], {'code': 'HTTPStatus.FORBIDDEN', 'message': '"""Updating database requires `current_password` test operation."""'}), "(code=HTTPStatus.FORBIDDEN, message=\n 'Updating database requires `current_password` test operation.')\n", (11369, 11474), False, 'from wbia.web.extensions.api import abort\n'), ((7553, 7674), 'marshmallow.ValidationError', 'ValidationError', (['("Field \'%s\' is an Enum and does not recognize the value \'%s\'. Please select one of %r"\n % args)'], {}), '(\n "Field \'%s\' is an Enum and does not recognize the value \'%s\'. Please select one of %r"\n % args)\n', (7568, 7674), False, 'from marshmallow import validate, validates_schema, ValidationError\n')]
#!/usr/bin/env python # coding: utf-8 # <img style="float: left;" src="earth-lab-logo-rgb.png" width="150" height="150" /> # # # Earth Analytics Education - EA Python Course Spring 2021 # ## Important - Assignment Guidelines # # 1. Before you submit your assignment to GitHub, make sure to run the entire notebook with a fresh kernel. To do this first, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart & Run All) # 2. Always replace the `raise NotImplementedError()` code with your code that addresses the activity challenge. If you don't replace that code, your notebook will not run. # # ``` # # YOUR CODE HERE # raise NotImplementedError() # ``` # # 3. Any open ended questions will have a "YOUR ANSWER HERE" within a markdown cell. Replace that text with your answer also formatted using Markdown. # 4. **DO NOT RENAME THIS NOTEBOOK File!** If the file name changes, the autograder will not grade your assignment properly. # 6. When you create a figure, comment out `plt.show()` to ensure the autograder can grade your plots. For figure cells, DO NOT DELETE the code that says `DO NOT REMOVE LINE BELOW`. # # ``` # ### DO NOT REMOVE LINE BELOW ### # student_plot1_ax = nb.convert_axes(plt) # ``` # # * Only include the package imports, code, and outputs that are required to run your homework assignment. # * Be sure that your code can be run on any operating system. This means that: # 1. the data should be downloaded in the notebook to ensure it's reproducible # 2. all paths should be created dynamically using the `os.path.join` # # ## Follow to PEP 8 Syntax Guidelines & Documentation # # * Run the `autopep8` tool on all cells prior to submitting (HINT: hit shift + the tool to run it on all cells at once! # * Use clear and expressive names for variables. # * Organize your code to support readability. # * Check for code line length # * Use comments and white space sparingly where it is needed # * Make sure all python imports are at the top of your notebook and follow PEP 8 order conventions # * Spell check your Notebook before submitting it. # # For all of the plots below, be sure to do the following: # # * Make sure each plot has a clear TITLE and, where appropriate, label the x and y axes. Be sure to include UNITS in your labels. # # ### Add Your Name Below # **Your Name:** <NAME> # <img style="float: left;" src="colored-bar.png"/> # --- # # Week 04 and 05 Homework - Automate NDVI Workflow # # For this assignment, you will write code to generate a plot of the mean normalized difference vegetation index (NDVI) for two different sites in the United States across one year of data: # # * San Joaquin Experimental Range (SJER) in Southern California, United States # * Harvard Forest (HARV) in the Northeastern United States # # The data that you will use for this week is available from **earthpy** using the following download: # # `et.data.get_data('ndvi-automation')` # # ## Assignment Goals # # Your goal in this assignment is to create the most efficient and concise workflow that you can that allows for: # # 1. The code to scale if you added new sites or more time periods to the analysis. # 2. Someone else to understand your workflow. # 3. The LEAST and most efficient (i.e. runs fast, minimize repetition) amount of code that completes the task. # # ### HINTS # # * Remove values outside of the landsat valid range of values as specified in the metadata, as needed. # * Keep any output files SEPARATE FROM input files. Outputs should be created in an outputs directory that is created in the code (if needed) and/or tested for. # * Use the functions that we demonstrated during class to make your workflow more efficient. # * BONUS - if you chose - you can export your data as a csv file. You will get bonus points for doing this. # # # ## Assignment Requirements # # Your submission to the GitHub repository should include: # * This Jupyter Notebook file (.ipynb) with: # * The code to create a plot of mean NDVI across a year for 2 NEON Field Sites: # * NDVI on the x axis and formatted dates on the y for both NEON sites on one figure/axis object # * The **data should be cleaned to remove the influence of clouds**. See the [earthdatascience website for an example of what your plot might look like with and without removal of clouds](https://www.earthdatascience.org/courses/earth-analytics-python/create-efficient-data-workflows/). # * BONUS: Create one output `.csv` file that has 3 columns - NDVI, Date and Site Name - with values for SJER and HARV. # # Your notebook should: # * Have *at least* 2 well documented and well named functions with docstrings. # * Include a Markdown cell at the top of the notebook that outlines the overall workflow using pseudocode (i.e. plain language, not code) # * Include additional Markdown cells throughout the notebook to describe: # * the data that you used - and where it is from # * how data are being processing # * how the code is optimized to run fast and be more concise # # Replace this cell with your pseudocode for this workflow # # If you happen to be a diagram person a diagram is ok too # # # 1. Import required packages # 2. Download the data # 3. Set the working directory. # 4. Create paths to sites # 5. Create cloud mask – get the cloud pixel values from earthpy # 6. Create a function to extract site name and datetime from directory path names, using the path to the directory that contains the information of interest and the date and site name location within that directory path as index lists as the function parameters # 7. Create a function that will open, crop and specify valid ranges of a landsat band, using the path to the band, the cropping extent, and the valid range as function parameters # 8. Create dataframe of mean NDVI # a. Create an empty list that will hold site, date, and mean NDVI information # b. Create a for loop to loop through site paths # i. Get list of scene paths of both sites using glob # ii. Get shapefiles for each site using glob and pulling out index 0 # iii. Open shapefiles # iv. Create a nested for loop to loop through each scene # 1. Go through each scene directory and pull out date and site information using the function created earlier in the notebook # 2. Go through each scene and create sorted list of bands in each scene using glob. Only bands 4 and 5 are needed for calculating NDVI # 3. Go through each scene and get qa pixel layers using glob and pulling out index 0. This will pop out each qa pixel layer as the loop loops through each scene so that it's not in list form and can be worked with # 4. Open the qa layer # 5. Crop the qa layer using the shapefile opened in the first layer of the loop # 6. Create an empty list that will hold bands 4 and 5 once they are cleaned and free of clouds # 7. Create another for loop inside the already nested loop # a. Clean the bands using the previously created function that will open the band, crop it using its associate shapefile, and specify landsat's valid range # b. Apply cloud mask to band # c. Append list so that it holds the cloud free bands. This list will be used to calculate mean NDVI # 8. Calculate mean NDVI # 9. Append the mean NDVI to the list holding the site information (the function that pulled site and date information from scene directory paths created a list as the output) # 10. Append this list of lists to the empty list created outside the for loop at the top # 9. Convert list into a pandas dataframe # 10. Set index on date # 11. Create figure # a. Set figure space # b. Create overall figure title # c. Create a for loop to loop through dataframe and create individual dataframes grouped by site for plotting # d. Set axes labels # e. Format date on x axis # f. Create a legend # 12. Drop na values from dataframe for exporting # 13. Export pandas dataframe to .csv file # 14. Create a figure that displays mean NDVI at the HARV and SJER locations over a year, with mean NDVI on the y-axis and the month on the x-axis using the pandas dataframe created in the previous step. # In[1]: # Autograding imports - do not modify this cell import matplotcheck.autograde as ag import matplotcheck.notebook as nb import matplotcheck.timeseries as ts from datetime import datetime # In[2]: # Import needed packages in PEP 8 order # and no unused imports listed (10 points total) import os from glob import glob import matplotlib.pyplot as plt import pandas as pd import rioxarray as rxr import xarray as xr import geopandas as gpd import earthpy as et import earthpy.mask as em from datetime import datetime import numpy as np from matplotlib.dates import DateFormatter # Download the data et.data.get_data('ndvi-automation') # Create a path to the directory directory_path = os.path.join(et.io.HOME, "earth-analytics", "data") # Set working directory os.chdir(directory_path) # In[3]: # DO NOT MODIFY THIS CELL # Tests that the working directory is set to earth-analytics/data path = os.path.normpath(os.getcwd()) student_wd_parts = path.split(os.sep) if student_wd_parts[-2:] == ['earth-analytics', 'data']: print("\u2705 Great - it looks like your working directory is set correctly to ~/earth-analytics/data") else: print("\u274C Oops, the autograder will not run unless your working directory is set to earth-analytics/data") # In[4]: # Create paths to sites site_paths = glob(os.path.join("ndvi-automation", "sites", "*")) site_paths # Create cloud mask # Get the cloud pixel values from earthpy high_cloud_confidence = ( em.pixel_flags["pixel_qa"]["L8"]["High Cloud Confidence"]) cloud = em.pixel_flags["pixel_qa"]["L8"]["Cloud"] cloud_shadow = em.pixel_flags["pixel_qa"]["L8"]["Cloud Shadow"] all_masked_values = cloud_shadow + cloud + high_cloud_confidence # # Figure 1: Plot 1 - Mean NDVI For Each Site Across the Year (50 points) # # Create a plot of the mean normalized difference vegetation index (NDVI) for the two different sites in the United States across the year: # # * NDVI on the x axis and formatted dates on the y for both NEON sites on one figure/axis object. # * Each site should be identified with a different color in the plot and legend. # * The final plot **data should be cleaned to remove the influence of clouds**. # * Be sure to include appropriate title and axes labels. # # Add additional cells as needed for processing data (e.g. defining functions, etc), but be sure to: # * follow the instructions in the code cells that have been provided to ensure that you are able to use the sanity check tests that are provided. # * include only the plot code in the cell identified for the final plot code below # ## Task 1: # # In the cell below, create a single dataframe containing MEAN NDVI, the site name, # and the date of the data for the HARV site # scene `HARV/landsat-crop/LC080130302017031701T1-SC20181023151837`. The column names for the final # DataFrame should be`mean_ndvi`, and `site`, and the data should be **indexed on the date**. # # Use the functions that we reviewed in class (or create your own versions of them) to implement your code # # ### In the Cell below Place All Functions Needed to Run this Notebook (20 points) # In[5]: ### DO NOT REMOVE THIS LINE OR EDIT / MOVE THIS CELL ### start_time = datetime.now() # Create functions to extract site name and datetime from directory path names and open, crop and specify valid ranges of a landsat band. # In[6]: # In this cell place all of the functions needed to run your notebook # You will be graded here on function application, docstrings, efficiency so ensure # All functions are placed here! # Function to extract sitename and datetime from directory path names def extract_sitename_date(directory_path, sitename_location, datetime_location): """Extract sitename and datetime from directory path name. Parameters ----------- directory_path : string A path to the directory name sitename_location : index list Index of sitename location in directory path name datetime_location : index list Index of datetime location in directory path name Returns ----------- list : list of site names and datetime information """ # Create an empty list to append sitename and date information site_name_date_list = [] # Assign datetime location to an object date_location = directory_path[datetime_location[0]: datetime_location[1]] # Specify datetime format format = "%Y%m%d" # Use datetime and format to create date varibale date = datetime.strptime(date_location, format) # Assign sitename information to a variable site = directory_path[sitename_location[0]: sitename_location[1]] # Append site variable to list site_name_date_list.append(site) # Append date variable to list site_name_date_list.append(date) return site_name_date_list # Function to clean landsat bands def open_clean_bands(band_path, crop_extent, valid_range=None): """Open, crop and specify valid ranges of a landsat band. Parameters ----------- band_path : string A path to the array to be opened valid_range : tuple (optional) A tuple of min and max range of values for the data. Default = None Returns ----------- arr : xarray DataArray An xarray DataArray with values that should be masked set to 1 for True (Boolean) """ # TODO add tests to ensure the arrays are the same .shape band = rxr.open_rasterio(band_path, masked=True).rio.clip(crop_extent.geometry, from_disk=True).squeeze() # Only run this step if a valid range tuple is provided if valid_range: mask = ((band < valid_range[0]) | (band > valid_range[1])) band = band.where(~xr.where(mask, True, False)) return band # In[7]: # Create dataframe of mean NDVI in this cell using the functions created above # Create path to HARV data harv_path = glob(os.path.join("ndvi-automation", "sites", "HARV")) # Open and clean all HARV bands harv_scene_info = [] # Create a loop to establish the scene directory path # glob is not necessary here, however it is for the larger workflow, which is # what is being demonstrated here for path in harv_path: # Establish the scene directory path that is of interest scene_path = glob(os.path.join(path, "landsat-crop", "LC080130302017031701T1-SC20181023151837")) # Set the path to the associated shapefile bound = os.path.join(path, "vector", "HARV-crop.shp") # Open the shapefile harv_boundary = gpd.read_file(bound) # Create a nested for loop to be able to work with each .tif file (band) # in the scene, again this is necessary when working with multiple scenes for tif in scene_path: # Get site and date info from the scene directory path site_info = extract_sitename_date(tif, [22, 26], [50, 58]) # Sort the bands using glob so that they are in the right order # Only bands 4 and 5 are needed harv_bands = sorted(glob(os.path.join(tif, "*band[4-5]*"))) # Set the path to the qa layer in the scene directory qa_layer_path = os.path.join(tif, "LC08_L1TP_013030_20170317_20170328_01_T1_pixel_qa.tif") # Open the qa layer opened_layer = rxr.open_rasterio(qa_layer_path, masked=True) # Crop the qa layer using the boundary associated with the scene and # opened in a previous step cropped_layer = opened_layer.rio.clip(harv_boundary.geometry).squeeze() # Create an empty list to store bands after they are cleaned of clouds tif_bands = [] # Create an additional loop that is nested inside the other two that will # be used to work with each band inside the scene directory for a_band in harv_bands: # Clean the band using the previously created function # The function opens, crops, and sets landsat's valid range clean_band = open_clean_bands( a_band, harv_boundary, valid_range=(0, 10000)) # Apply the cloud mask to the clean band cloud_free_band = clean_band.where( ~cropped_layer.isin(all_masked_values)) # The band to the empty list that will be used to calculate mean NDVI tif_bands.append(cloud_free_band) # Calculate mean NDVI using the list that is storing the clean bands # that are free of clouds mean_ndvi = np.nanmean( (tif_bands[1]-tif_bands[0]) / (tif_bands[1]+tif_bands[0])) # Append the mean NDVI to the list that was the result of the function # that grabbed site and date information from the scene directory path name site_info.append(mean_ndvi) # Append this lists of lists to the list outside of the nested for # loops at the top harv_scene_info.append(site_info) # Convert list into a pandas dataframe harv_info_df = pd.DataFrame(harv_scene_info, columns=[ "site", "date", "mean_ndvi"]) # Set index harv_date_as_index = harv_info_df.set_index("date") # Call dataframe harv_date_as_index # In[8]: # This cell is testing your data output above student_ndvi_ts_single_site = _ single_scene_points = 0 # Ensure the data is stored in a dataframe. if isinstance(student_ndvi_ts_single_site, pd.DataFrame): print('\u2705 Your data is stored in a DataFrame!') single_scene_points += 1 else: print('\u274C It appears your data is not stored in a DataFrame. ', 'To see what type of object your data is stored in, check its type with type(object)') # Ensure that the date column is the index if isinstance(student_ndvi_ts_single_site.index, pd.core.indexes.datetimes.DatetimeIndex): print('\u2705 You have the index set to the date column!') single_scene_points += 2 else: print('\u274C You do not have the index set to the date column.') # Ensure that the date column is datetime if isinstance(student_ndvi_ts_single_site.index[0], pd._libs.tslibs.timestamps.Timestamp): print('\u2705 The data in your date column is datetime!') single_scene_points += 2 else: print('\u274C The data in your date column is not datetime.') # Ensure the site name is correct if student_ndvi_ts_single_site.site.values[0] == 'HARV': print('\u2705 You have the correct site name!') single_scene_points += 5 else: print('\u274C You do not have the correct site name.') if np.allclose(0.281131628228094, student_ndvi_ts_single_site.mean_ndvi.values[0]): print('\u2705 You have the correct mean NDVI value!') single_scene_points += 5 else: print('\u274C You do not have the correct mean ndvi value.') print("\n \u27A1 You received {} out of 15 points for creating a dataframe.".format( single_scene_points)) single_scene_points # ## Task 2: # # In the cell below, process all of the landsat scenes. Create a DataFrame that contains the following # information for each scene # # # | | index | site | mean_ndvi | # |---|---|---|---| # | Date | | | | # | 2017-01-07 | 0 | SJER | .4 | # # Be sure to call your dataframe at the end of the cell to ensure autograding works. # HINT: FOR THIS STEP, leave any rows containing missing values (`NAN`). # 1. Create dataframe of mean NDVI a. Create an empty list that will hold site, date, and mean NDVI information b. Create a for loop to loop through site paths # i. Get list of scene paths of both sites using glob # ii. Get shapefiles for each site using glob and pulling out index 0 # iii. Open shapefiles # iv. Create a nested for loop to loop through each scene # 1. Go through each scene directory and pull out date and site information using the function created earlier in the notebook # 2. Go through each scene and create sorted list of bands in each scene using glob. Only bands 4 and 5 are needed for calculating NDVI # 3. Go through each scene and get qa pixel layers using glob and pulling out index 0. This will pop out each qa pixel layer as the loop loops through each scene so that it's not in list form and can be worked with # 4. Open the qa layer # 5. Crop the qa layer using the shapefile opened in the first layer of the loop # 6. Create an empty list that will hold bands 4 and 5 once they are cleaned and free of clouds # 7. Create another for loop inside the already nested loop # a. Clean the bands using the previously created function that will open the band, crop it using its associate shapefile, and specify landsat's valid range # b. Apply cloud mask to band # c. Append list so that it holds the cloud free bands. This list will be used to calculate mean NDVI # 8. Calculate mean NDVI # 9. Append the mean NDVI to the list holding the site information (the function that pulled site and date information from scene directory paths created a list as the output) # 10. Append this list of lists to the empty list created outside the for loop at the top # # The below cell runs quickly and efficiently by using loops and functions to process data, which minimize repetition. # In[9]: # Create dataframe of NDVI including the cleaning data to deal with clouds # Create an empty list that will hold site, date, and mean ndvi information all_site_info = [] # Create a for loop to loop through site paths for site in site_paths: # Get list of scene paths of both sites using glob dirs = glob(os.path.join(site, "landsat-crop", "*")) # Get shapefiles for each site using glob and pulling out index 0 bounds = glob(os.path.join(site, "vector", "*-crop.shp"))[0] # Open shapefiles opened_bound = gpd.read_file(bounds) # Create a nested for loop to loop through each scene for all_dirs in dirs: # Go through each scene directory and pull out date and site # information using the function created earlier in the notebook site_info = extract_sitename_date(all_dirs, [22, 26], [50, 58]) # Go through each scene and create sorted list of bands in each scene # using glob. Only bands 4 and 5 are needed for calculating NDVI scene_bands = sorted(glob(os.path.join(all_dirs, "*band[4-5]*"))) # Go through each scene and get qa pixel layers using glob and pulling # out index 0. This will pop out each qa pixel layer as the loop loops # through each scene so that it's not in list form and can be worked with qa_layer_paths = glob(os.path.join(all_dirs, "*pixel_qa*"))[0] # Open the qa layer opened_layer = rxr.open_rasterio(qa_layer_paths, masked=True) # Crop the qa layer using the shapefile opened in the first layer of # the loop cropped_layer = opened_layer.rio.clip(opened_bound.geometry).squeeze() # Create an empty list that will hold bands 4 and 5 once they are # cleaned and free of clouds site_bands = [] # Create another for loop inside the already nested loop for band in scene_bands: # Clean the bands using the previously created function that will # open the band, crop it using its associate shapefile, and specify # landsat's valid range clean_band = open_clean_bands( band, opened_bound, valid_range=(0, 10000)) # Apply cloud mask to band cloud_free_band = clean_band.where( ~cropped_layer.isin(all_masked_values)) # Append list so that it holds the cloud free bands. This list will # be used to calculate mean NDVI site_bands.append(cloud_free_band) # Calculate mean NDVI mean_ndvi = np.nanmean( (site_bands[1]-site_bands[0]) / (site_bands[1]+site_bands[0])) # Append the mean NDVI to the list holding the site information (the # function that pulled site and date information from scene directory # paths created a list as the output) site_info.append(mean_ndvi) # Append this list of lists to the empty list created outside the for # loop at the top all_site_info.append(site_info) # Convert list into a pandas dataframe site_info_df = pd.DataFrame(all_site_info, columns=[ "site", "date", "mean_ndvi"]) # Set index on date indexed_site_info_df = site_info_df.set_index("date") # Call dataframe indexed_site_info_df # In[10]: # Last sanity check before creating your plot (10 points) # Ensure that you call your dataframe at the bottom of the cell above # and that it has columns called: mean_ndvi and site # Ensure the data is stored in a dataframe. student_ndvi_df = _ df_points = 0 if isinstance(student_ndvi_df, pd.DataFrame): print('\u2705 Your data is stored in a DataFrame!') df_points += 2 else: print('\u274C It appears your data is not stored in a DataFrame. ', 'To see what type of object your data is stored in, check its type with type(object)') # Check that dataframe contains the appropriate number of NAN values if student_ndvi_df.mean_ndvi.isna().sum() == 15: print('\u2705 Correct number of masked data values!') df_points += 2 else: print('\u274C The amount of null data in your dataframe is incorrect.') # Ensure that the date column is the index if isinstance(student_ndvi_df.index, pd.core.indexes.datetimes.DatetimeIndex): print('\u2705 You have the index set to the date column!') df_points += 3 else: print('\u274C You do not have the index set to the date column.') # Ensure that the date column is datetime if isinstance(student_ndvi_df.index[0], pd._libs.tslibs.timestamps.Timestamp): print('\u2705 The data in your date column is datetime!') df_points += 3 else: print('\u274C The data in your date column is not datetime.') # Output for timer, # DO NOT MODIFY end_time = datetime.now() total_time = end_time - start_time print( "Your total run time for processing the data was {0}.".format(total_time)) print("\n \u27A1 You received {} out of 10 points for creating a dataframe.".format( df_points)) df_points # Create a figure that displays mean NDVI at the HARV and SJER locations over a year, with mean NDVI on the y-axis and the month on the x-axis using the pandas dataframe created above. # In[11]: # Add only the plot code to this cell # Set figure space fig, ax = plt.subplots(figsize=(12, 7)) # Create overall figure title fig.suptitle( "Mean Normalized Difference Vegetaion Index (NDVI) \nJan 2017 - Dec 2017 \nLandsat 8 with Clouds Removed") # Create a for loop to loop through dataframe and create individual dataframes # grouped by site for plotting for site, site_name_df in indexed_site_info_df.dropna().groupby("site"): ax.plot(site_name_df.index, site_name_df.mean_ndvi, marker="o", label=site) # Set axes labels ax.set(xlabel="Month", ylabel="Mean NDVI") # Format date on x axis ax.xaxis.set_major_formatter(DateFormatter("%b")) # Create a legend ax.legend() ### DO NOT REMOVE LINES BELOW ### final_masked_solution = nb.convert_axes(plt, which_axes="current") # In[12]: # Ignore this cell for the autograding tests # In[13]: # Ignore this cell for the autograding tests # # Question 1 (10 points) # # Imagine that you are planning NEON’s upcoming flight season to capture remote sensing data in these locations and want to ensure that you fly the area when the vegetation is the most green. # # When would you recommend the flights take place for each site? # # Answer the question in 2-3 sentences in the Markdown cell below. # I would recommend that the flights take place in April for the SJER site. I would recommend that HARV flights take place in July. # # Question 2 (10 points) # # How could you modify your workflow to look at vegetation changes over time in each site? # # Answer the question in 2-3 sentences in the Markdown cell below. # I could possibly create NDVI difference maps to examine changes between time points (months, years, etc.). Due to the way my code is set up, I could also continue to add data to the HARV and SJER directories as it becomes available and run this same code to continue to monitor changes. # # Do not edit this cell! (10 points) # # The notebook includes: # * additional Markdown cells throughout the notebook to describe: # * the data that you used - and where it is from # * how data are being processing # * how the code is optimized to run fast and be more concise # # Do not edit this cell! (20 points) # # The notebook will also be checked for overall clean code requirements as specified at the **top** of this notebook. Some of these requirements include (review the top cells for more specifics): # # * Notebook begins at cell [1] and runs on any machine in its entirety. # * PEP 8 format is applied throughout (including lengths of comment and code lines). # * No additional code or imports in the notebook that is not needed for the workflow. # * Notebook is fully reproducible. This means: # * reproducible paths using the os module. # * data downloaded using code in the notebook. # * all imports at top of notebook. # ## BONUS - Export a .CSV File to Share (10 points possible) # # This is optional - if you export a **.csv** file with the columns specified above: Site, Date and NDVI Value you can get an additional 10 points. # # * FULL CREDIT: File exists in csv format and contains the columns specified. # We will check your github repo for this file! # # In[14]: # Drop na values from dataframe for exporting no_nan_df = indexed_site_info_df.dropna() # Export pandas dataframe to csv file # Reproducible output no_nan_df.to_csv(os.path.join(directory_path, "ndvi-automation", "outputs", "ndvi_df.csv")) # Export to my local repository # no_nan_df.to_csv(os.path.join(et.io.HOME, "earth-analytics", # "2022_spring", # "assignments", # "04_assignment", # "ea-2022-04-ndvi-automation-rami8797", # "ndvi_df.csv"))
[ "pandas.DataFrame", "earthpy.data.get_data", "rioxarray.open_rasterio", "os.getcwd", "numpy.allclose", "matplotcheck.notebook.convert_axes", "datetime.datetime.now", "matplotlib.pyplot.subplots", "datetime.datetime.strptime", "matplotlib.dates.DateFormatter", "numpy.nanmean", "xarray.where", "os.path.join", "os.chdir", "geopandas.read_file" ]
[((9028, 9063), 'earthpy.data.get_data', 'et.data.get_data', (['"""ndvi-automation"""'], {}), "('ndvi-automation')\n", (9044, 9063), True, 'import earthpy as et\n'), ((9115, 9166), 'os.path.join', 'os.path.join', (['et.io.HOME', '"""earth-analytics"""', '"""data"""'], {}), "(et.io.HOME, 'earth-analytics', 'data')\n", (9127, 9166), False, 'import os\n'), ((9192, 9216), 'os.chdir', 'os.chdir', (['directory_path'], {}), '(directory_path)\n', (9200, 9216), False, 'import os\n'), ((11636, 11650), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11648, 11650), False, 'from datetime import datetime\n'), ((17529, 17597), 'pandas.DataFrame', 'pd.DataFrame', (['harv_scene_info'], {'columns': "['site', 'date', 'mean_ndvi']"}), "(harv_scene_info, columns=['site', 'date', 'mean_ndvi'])\n", (17541, 17597), True, 'import pandas as pd\n'), ((19052, 19131), 'numpy.allclose', 'np.allclose', (['(0.281131628228094)', 'student_ndvi_ts_single_site.mean_ndvi.values[0]'], {}), '(0.281131628228094, student_ndvi_ts_single_site.mean_ndvi.values[0])\n', (19063, 19131), True, 'import numpy as np\n'), ((25027, 25093), 'pandas.DataFrame', 'pd.DataFrame', (['all_site_info'], {'columns': "['site', 'date', 'mean_ndvi']"}), "(all_site_info, columns=['site', 'date', 'mean_ndvi'])\n", (25039, 25093), True, 'import pandas as pd\n'), ((26693, 26707), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26705, 26707), False, 'from datetime import datetime\n'), ((27210, 27239), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (27222, 27239), True, 'import matplotlib.pyplot as plt\n'), ((27894, 27936), 'matplotcheck.notebook.convert_axes', 'nb.convert_axes', (['plt'], {'which_axes': '"""current"""'}), "(plt, which_axes='current')\n", (27909, 27936), True, 'import matplotcheck.notebook as nb\n'), ((9347, 9358), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9356, 9358), False, 'import os\n'), ((9740, 9785), 'os.path.join', 'os.path.join', (['"""ndvi-automation"""', '"""sites"""', '"""*"""'], {}), "('ndvi-automation', 'sites', '*')\n", (9752, 9785), False, 'import os\n'), ((12972, 13012), 'datetime.datetime.strptime', 'datetime.strptime', (['date_location', 'format'], {}), '(date_location, format)\n', (12989, 13012), False, 'from datetime import datetime\n'), ((14468, 14516), 'os.path.join', 'os.path.join', (['"""ndvi-automation"""', '"""sites"""', '"""HARV"""'], {}), "('ndvi-automation', 'sites', 'HARV')\n", (14480, 14516), False, 'import os\n'), ((15005, 15050), 'os.path.join', 'os.path.join', (['path', '"""vector"""', '"""HARV-crop.shp"""'], {}), "(path, 'vector', 'HARV-crop.shp')\n", (15017, 15050), False, 'import os\n'), ((15096, 15116), 'geopandas.read_file', 'gpd.read_file', (['bound'], {}), '(bound)\n', (15109, 15116), True, 'import geopandas as gpd\n'), ((22476, 22497), 'geopandas.read_file', 'gpd.read_file', (['bounds'], {}), '(bounds)\n', (22489, 22497), True, 'import geopandas as gpd\n'), ((27783, 27802), 'matplotlib.dates.DateFormatter', 'DateFormatter', (['"""%b"""'], {}), "('%b')\n", (27796, 27802), False, 'from matplotlib.dates import DateFormatter\n'), ((30529, 30602), 'os.path.join', 'os.path.join', (['directory_path', '"""ndvi-automation"""', '"""outputs"""', '"""ndvi_df.csv"""'], {}), "(directory_path, 'ndvi-automation', 'outputs', 'ndvi_df.csv')\n", (30541, 30602), False, 'import os\n'), ((14845, 14922), 'os.path.join', 'os.path.join', (['path', '"""landsat-crop"""', '"""LC080130302017031701T1-SC20181023151837"""'], {}), "(path, 'landsat-crop', 'LC080130302017031701T1-SC20181023151837')\n", (14857, 14922), False, 'import os\n'), ((15696, 15770), 'os.path.join', 'os.path.join', (['tif', '"""LC08_L1TP_013030_20170317_20170328_01_T1_pixel_qa.tif"""'], {}), "(tif, 'LC08_L1TP_013030_20170317_20170328_01_T1_pixel_qa.tif')\n", (15708, 15770), False, 'import os\n'), ((15859, 15904), 'rioxarray.open_rasterio', 'rxr.open_rasterio', (['qa_layer_path'], {'masked': '(True)'}), '(qa_layer_path, masked=True)\n', (15876, 15904), True, 'import rioxarray as rxr\n'), ((17047, 17120), 'numpy.nanmean', 'np.nanmean', (['((tif_bands[1] - tif_bands[0]) / (tif_bands[1] + tif_bands[0]))'], {}), '((tif_bands[1] - tif_bands[0]) / (tif_bands[1] + tif_bands[0]))\n', (17057, 17120), True, 'import numpy as np\n'), ((22259, 22298), 'os.path.join', 'os.path.join', (['site', '"""landsat-crop"""', '"""*"""'], {}), "(site, 'landsat-crop', '*')\n", (22271, 22298), False, 'import os\n'), ((23384, 23430), 'rioxarray.open_rasterio', 'rxr.open_rasterio', (['qa_layer_paths'], {'masked': '(True)'}), '(qa_layer_paths, masked=True)\n', (23401, 23430), True, 'import rioxarray as rxr\n'), ((24504, 24581), 'numpy.nanmean', 'np.nanmean', (['((site_bands[1] - site_bands[0]) / (site_bands[1] + site_bands[0]))'], {}), '((site_bands[1] - site_bands[0]) / (site_bands[1] + site_bands[0]))\n', (24514, 24581), True, 'import numpy as np\n'), ((22388, 22430), 'os.path.join', 'os.path.join', (['site', '"""vector"""', '"""*-crop.shp"""'], {}), "(site, 'vector', '*-crop.shp')\n", (22400, 22430), False, 'import os\n'), ((14286, 14313), 'xarray.where', 'xr.where', (['mask', '(True)', '(False)'], {}), '(mask, True, False)\n', (14294, 14313), True, 'import xarray as xr\n'), ((15575, 15607), 'os.path.join', 'os.path.join', (['tif', '"""*band[4-5]*"""'], {}), "(tif, '*band[4-5]*')\n", (15587, 15607), False, 'import os\n'), ((22982, 23019), 'os.path.join', 'os.path.join', (['all_dirs', '"""*band[4-5]*"""'], {}), "(all_dirs, '*band[4-5]*')\n", (22994, 23019), False, 'import os\n'), ((23292, 23328), 'os.path.join', 'os.path.join', (['all_dirs', '"""*pixel_qa*"""'], {}), "(all_dirs, '*pixel_qa*')\n", (23304, 23328), False, 'import os\n'), ((13950, 13991), 'rioxarray.open_rasterio', 'rxr.open_rasterio', (['band_path'], {'masked': '(True)'}), '(band_path, masked=True)\n', (13967, 13991), True, 'import rioxarray as rxr\n')]
"""Route declaration.""" from flask import Flask from flask import render_template app = Flask(__name__) @app.route('/') def home(): """Landing page.""" nav = [ {'name': 'Home', 'url': 'https://example.com/1'}, {'name': 'About', 'url': 'https://example.com/2'}, {'name': 'Pics', 'url': 'https://example.com/3'} ] return render_template( 'home.html', nav=nav, title="Jinja Demo Site", description="Smarter page templates with Flask & Jinja.", status={'active': True} ) if __name__ == '__main__': app.run(host='0.0.0.0', port=5001, debug=True)
[ "flask.Flask", "flask.render_template" ]
[((91, 106), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (96, 106), False, 'from flask import Flask\n'), ((364, 514), 'flask.render_template', 'render_template', (['"""home.html"""'], {'nav': 'nav', 'title': '"""Jinja Demo Site"""', 'description': '"""Smarter page templates with Flask & Jinja."""', 'status': "{'active': True}"}), "('home.html', nav=nav, title='Jinja Demo Site', description=\n 'Smarter page templates with Flask & Jinja.', status={'active': True})\n", (379, 514), False, 'from flask import render_template\n')]
from rest_framework import serializers from tests.testapp.models import Book, Course, Student, Phone from django_restql.fields import NestedField, DynamicSerializerMethodField from django_restql.mixins import DynamicFieldsMixin from django_restql.serializers import NestedModelSerializer ######## Serializers for Data Querying And Mutations Testing ########## class BookSerializer(DynamicFieldsMixin, serializers.ModelSerializer): class Meta: model = Book fields = ['title', 'author'] class PhoneSerializer(DynamicFieldsMixin, serializers.ModelSerializer): class Meta: model = Phone fields = ['number', 'type', 'student'] ################# Serializers for Data Querying Testing ################ class CourseSerializer(DynamicFieldsMixin, serializers.ModelSerializer): books = BookSerializer(many=True, read_only=True) class Meta: model = Course fields = ['name', 'code', 'books'] class CourseWithDisableDynamicFieldsKwargSerializer(DynamicFieldsMixin, serializers.ModelSerializer): books = BookSerializer(many=True, read_only=True, disable_dynamic_fields=True) class Meta: model = Course fields = ['name', 'code', 'books'] class CourseWithReturnPkkwargSerializer(CourseSerializer): books = BookSerializer(many=True, read_only=True, return_pk=True) class Meta: model = Course fields = ['name', 'code', 'books'] class CourseWithFieldsKwargSerializer(CourseSerializer): books = BookSerializer(many=True, read_only=True, fields=["title"]) class Meta(CourseSerializer.Meta): pass class CourseWithExcludeKwargSerializer(CourseSerializer): books = BookSerializer(many=True, read_only=True, exclude=["author"]) class Meta(CourseSerializer.Meta): pass class CourseWithAliasedBooksSerializer(CourseSerializer): tomes = BookSerializer(source="books", many=True, read_only=True) class Meta: model = Course fields = ['name', 'code', 'tomes'] class CourseWithDynamicSerializerMethodField(CourseSerializer): tomes = DynamicSerializerMethodField() class Meta: model = Course fields = ['name', 'code', 'tomes'] def get_tomes(self, obj, query): books = obj.books.all() serializer = BookSerializer(books, query=query, many=True, read_only=True, context=self.context) return serializer.data class StudentSerializer(DynamicFieldsMixin, serializers.ModelSerializer): course = CourseSerializer(many=False, read_only=True) phone_numbers = PhoneSerializer(many=True, read_only=True) class Meta: model = Student fields = ['name', 'age', 'course', 'phone_numbers'] class StudentWithAliasSerializer(DynamicFieldsMixin, serializers.ModelSerializer): program = CourseSerializer(source="course", many=False, read_only=True) phone_numbers = PhoneSerializer(many=True, read_only=True) class Meta: model = Student fields = ['name', 'age', 'program', 'phone_numbers'] ############### Serializers for Nested Data Mutation Testing ############## class WritableCourseSerializer(NestedModelSerializer): books = NestedField(BookSerializer, many=True, required=False) class Meta: model = Course fields = ['name', 'code', 'books'] class ReplaceableCourseSerializer(NestedModelSerializer): books = NestedField(BookSerializer, accept_pk=True, many=True, required=False) class Meta: model = Course fields = ['name', 'code', 'books'] class ReplaceableStudentSerializer(NestedModelSerializer): course = NestedField(WritableCourseSerializer, accept_pk=True, allow_null=True, required=False) phone_numbers = PhoneSerializer(many=True, read_only=True) class Meta: model = Student fields = ['name', 'age', 'course', 'phone_numbers'] class ReplaceableStudentWithAliasSerializer(NestedModelSerializer): full_name = serializers.CharField(source="name") program = NestedField(WritableCourseSerializer, source="course", accept_pk=True, allow_null=True, required=False) contacts = NestedField(PhoneSerializer, source="phone_numbers", accept_pk=True, many=True, required=False) class Meta: model = Student fields = ['full_name', 'age', 'program', 'contacts'] class WritableStudentSerializer(NestedModelSerializer): course = NestedField(WritableCourseSerializer, allow_null=True, required=False) phone_numbers = NestedField(PhoneSerializer, many=True, required=False) class Meta: model = Student fields = ['name', 'age', 'course', 'phone_numbers'] class WritableStudentWithAliasSerializer(NestedModelSerializer): program = NestedField(WritableCourseSerializer, source="course", allow_null=True, required=False) contacts = NestedField(PhoneSerializer, source="phone_numbers", many=True, required=False) class Meta: model = Student fields = ['name', 'age', 'program', 'contacts']
[ "django_restql.fields.DynamicSerializerMethodField", "django_restql.fields.NestedField", "rest_framework.serializers.CharField" ]
[((2097, 2127), 'django_restql.fields.DynamicSerializerMethodField', 'DynamicSerializerMethodField', ([], {}), '()\n', (2125, 2127), False, 'from django_restql.fields import NestedField, DynamicSerializerMethodField\n'), ((3186, 3240), 'django_restql.fields.NestedField', 'NestedField', (['BookSerializer'], {'many': '(True)', 'required': '(False)'}), '(BookSerializer, many=True, required=False)\n', (3197, 3240), False, 'from django_restql.fields import NestedField, DynamicSerializerMethodField\n'), ((3404, 3474), 'django_restql.fields.NestedField', 'NestedField', (['BookSerializer'], {'accept_pk': '(True)', 'many': '(True)', 'required': '(False)'}), '(BookSerializer, accept_pk=True, many=True, required=False)\n', (3415, 3474), False, 'from django_restql.fields import NestedField, DynamicSerializerMethodField\n'), ((3640, 3730), 'django_restql.fields.NestedField', 'NestedField', (['WritableCourseSerializer'], {'accept_pk': '(True)', 'allow_null': '(True)', 'required': '(False)'}), '(WritableCourseSerializer, accept_pk=True, allow_null=True,\n required=False)\n', (3651, 3730), False, 'from django_restql.fields import NestedField, DynamicSerializerMethodField\n'), ((3977, 4013), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""name"""'}), "(source='name')\n", (3998, 4013), False, 'from rest_framework import serializers\n'), ((4028, 4135), 'django_restql.fields.NestedField', 'NestedField', (['WritableCourseSerializer'], {'source': '"""course"""', 'accept_pk': '(True)', 'allow_null': '(True)', 'required': '(False)'}), "(WritableCourseSerializer, source='course', accept_pk=True,\n allow_null=True, required=False)\n", (4039, 4135), False, 'from django_restql.fields import NestedField, DynamicSerializerMethodField\n'), ((4147, 4247), 'django_restql.fields.NestedField', 'NestedField', (['PhoneSerializer'], {'source': '"""phone_numbers"""', 'accept_pk': '(True)', 'many': '(True)', 'required': '(False)'}), "(PhoneSerializer, source='phone_numbers', accept_pk=True, many=\n True, required=False)\n", (4158, 4247), False, 'from django_restql.fields import NestedField, DynamicSerializerMethodField\n'), ((4417, 4487), 'django_restql.fields.NestedField', 'NestedField', (['WritableCourseSerializer'], {'allow_null': '(True)', 'required': '(False)'}), '(WritableCourseSerializer, allow_null=True, required=False)\n', (4428, 4487), False, 'from django_restql.fields import NestedField, DynamicSerializerMethodField\n'), ((4508, 4563), 'django_restql.fields.NestedField', 'NestedField', (['PhoneSerializer'], {'many': '(True)', 'required': '(False)'}), '(PhoneSerializer, many=True, required=False)\n', (4519, 4563), False, 'from django_restql.fields import NestedField, DynamicSerializerMethodField\n'), ((4746, 4837), 'django_restql.fields.NestedField', 'NestedField', (['WritableCourseSerializer'], {'source': '"""course"""', 'allow_null': '(True)', 'required': '(False)'}), "(WritableCourseSerializer, source='course', allow_null=True,\n required=False)\n", (4757, 4837), False, 'from django_restql.fields import NestedField, DynamicSerializerMethodField\n'), ((4849, 4928), 'django_restql.fields.NestedField', 'NestedField', (['PhoneSerializer'], {'source': '"""phone_numbers"""', 'many': '(True)', 'required': '(False)'}), "(PhoneSerializer, source='phone_numbers', many=True, required=False)\n", (4860, 4928), False, 'from django_restql.fields import NestedField, DynamicSerializerMethodField\n')]
import argparse import codecs import os import sys import re from pathlib import Path from collections import defaultdict from graphviz import Digraph include_regex = re.compile('#include\s+["<"](.*)[">]') valid_headers = [['.h', '.hpp'], 'red'] valid_sources = [['.c', '.cc', '.cpp'], 'blue'] valid_extensions = valid_headers[0] + valid_sources[0] def normalize(path): """ Return the name of the node that will represent the file at path. """ filename = os.path.basename(path) end = filename.rfind('.') end = end if end != -1 else len(filename) return filename[:end] def get_extension(path): """ Return the extension of the file targeted by path. """ return path[path.rfind('.'):] def find_all_files(path, recursive=True): """ Return a list of all the files in the folder. If recursive is True, the function will search recursively. """ files = [] path = Path(path) if path.is_dir(): dirs = [entry for entry in path.iterdir() if entry.is_dir()] files += [entry for entry in path.iterdir() if entry.is_file() and entry.suffix in valid_extensions] if recursive: for dir in dirs: files += find_all_files(str(dir)) elif path.is_file() and path.suffix in valid_extensions: files.append(path) files = [str(file) for file in files] print(files) return files # if Path(p).is_file(): # if get_extension(p) in valid_extensions: # files.append(p) def find_neighbors(path): """ Find all the other nodes included by the file targeted by path. """ f = codecs.open(path, 'r', "utf-8", "ignore") code = f.read() f.close() return [normalize(include) for include in include_regex.findall(code)] def create_graph(folder, create_cluster, strict): """ Create a graph from a folder. """ # Find nodes and clusters files = [] for f in folder: files += find_all_files(f) folder_to_files = defaultdict(list) for path in files: folder_to_files[os.path.dirname(path)].append(path) nodes = {normalize(path) for path in files} # Create graph graph = Digraph(strict=strict) # Find edges and create clusters for folder in folder_to_files: with graph.subgraph(name='cluster_{}'.format(folder)) as cluster: for path in folder_to_files[folder]: color = 'black' node = normalize(path) ext = Path(path).suffix if ext in valid_headers[0]: color = valid_headers[1] if ext in valid_sources[0]: color = valid_sources[1] if create_cluster: cluster.node(node) else: graph.node(node) neighbors = find_neighbors(path) for neighbor in neighbors: if neighbor != node and neighbor in nodes: graph.edge(node, neighbor, color=color) return graph if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '-o', '--output', help='Path of the output file without the extension') parser.add_argument('-f', '--format', help='Format of the output', default='dot', choices=['bmp', 'gif', 'jpg', 'png', 'pdf', 'svg', 'dot']) parser.add_argument('-v', '--view', action='store_true', help='View the graph') parser.add_argument('-c', '--cluster', action='store_true', help='Create a cluster for each subfolder', default=False) parser.add_argument('-s', '--strict', action='store_true', help='Rendering should merge multi-edges', default=True) parser.add_argument('folder', help='Path to the folder to scan', nargs='+') args = parser.parse_args() graph = create_graph(args.folder, args.cluster, args.strict) graph.format = args.format if args.output is not None: graph.render(args.output, cleanup=True, view=args.view) else: print(graph)
[ "codecs.open", "argparse.ArgumentParser", "os.path.basename", "os.path.dirname", "collections.defaultdict", "pathlib.Path", "graphviz.Digraph", "re.compile" ]
[((169, 208), 're.compile', 're.compile', (['"""#include\\\\s+["<"](.*)[">]"""'], {}), '(\'#include\\\\s+["<"](.*)[">]\')\n', (179, 208), False, 'import re\n'), ((467, 489), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (483, 489), False, 'import os\n'), ((916, 926), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (920, 926), False, 'from pathlib import Path\n'), ((1626, 1667), 'codecs.open', 'codecs.open', (['path', '"""r"""', '"""utf-8"""', '"""ignore"""'], {}), "(path, 'r', 'utf-8', 'ignore')\n", (1637, 1667), False, 'import codecs\n'), ((1995, 2012), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2006, 2012), False, 'from collections import defaultdict\n'), ((2175, 2197), 'graphviz.Digraph', 'Digraph', ([], {'strict': 'strict'}), '(strict=strict)\n', (2182, 2197), False, 'from graphviz import Digraph\n'), ((3093, 3118), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3116, 3118), False, 'import argparse\n'), ((2060, 2081), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (2075, 2081), False, 'import os\n'), ((2486, 2496), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (2490, 2496), False, 'from pathlib import Path\n')]
# -*- coding: utf-8 -*- """Calendar is a dictionary like Python object that can render itself as VCAL files according to rfc2445. These are the defined components. """ from datetime import datetime, timedelta from icalendar.caselessdict import CaselessDict from icalendar.parser import Contentline from icalendar.parser import Contentlines from icalendar.parser import Parameters from icalendar.parser import q_join from icalendar.parser import q_split from icalendar.parser_tools import DEFAULT_ENCODING from icalendar.prop import TypesFactory from icalendar.prop import vText, vDDDLists from icalendar.timezone_cache import _timezone_cache import pytz import dateutil.rrule from pytz.tzinfo import DstTzInfo from icalendar.compat import unicode_type ###################################### # The component factory class ComponentFactory(CaselessDict): """All components defined in rfc 2445 are registered in this factory class. To get a component you can use it like this. """ def __init__(self, *args, **kwargs): """Set keys to upper for initial dict. """ super(ComponentFactory, self).__init__(*args, **kwargs) self['VEVENT'] = Event self['VTODO'] = Todo self['VJOURNAL'] = Journal self['VFREEBUSY'] = FreeBusy self['VTIMEZONE'] = Timezone self['STANDARD'] = TimezoneStandard self['DAYLIGHT'] = TimezoneDaylight self['VALARM'] = Alarm self['VCALENDAR'] = Calendar # These Properties have multiple property values inlined in one propertyline # seperated by comma. Use CaselessDict as simple caseless set. INLINE = CaselessDict({ 'CATEGORIES': 1, 'RESOURCES': 1, 'FREEBUSY': 1, }) _marker = [] class Component(CaselessDict): """Component is the base object for calendar, Event and the other components defined in RFC 2445. normally you will not use this class directy, but rather one of the subclasses. """ name = None # should be defined in each component required = () # These properties are required singletons = () # These properties must only appear once multiple = () # may occur more than once exclusive = () # These properties are mutually exclusive inclusive = () # if any occurs the other(s) MUST occur # ('duration', 'repeat') ignore_exceptions = False # if True, and we cannot parse this # component, we will silently ignore # it, rather than let the exception # propagate upwards # not_compliant = [''] # List of non-compliant properties. def __init__(self, *args, **kwargs): """Set keys to upper for initial dict. """ super(Component, self).__init__(*args, **kwargs) # set parameters here for properties that use non-default values self.subcomponents = [] # Components can be nested. self.errors = [] # If we ignored exception(s) while # parsing a property, contains error strings # def is_compliant(self, name): # """Returns True is the given property name is compliant with the # icalendar implementation. # # If the parser is too strict it might prevent parsing erroneous but # otherwise compliant properties. So the parser is pretty lax, but it is # possible to test for non-complience by calling this method. # """ # return name in not_compliant def __bool__(self): """Returns True, CaselessDict would return False if it had no items. """ return True # python 2 compatibility __nonzero__ = __bool__ def is_empty(self): """Returns True if Component has no items or subcomponents, else False. """ return True if not (list(self.values()) + self.subcomponents) else False # noqa @property def is_broken(self): return bool(self.errors) ############################# # handling of property values def _encode(self, name, value, parameters=None, encode=1): """Encode values to icalendar property values. :param name: Name of the property. :type name: string :param value: Value of the property. Either of a basic Python type of any of the icalendar's own property types. :type value: Python native type or icalendar property type. :param parameters: Property parameter dictionary for the value. Only available, if encode is set to True. :type parameters: Dictionary :param encode: True, if the value should be encoded to one of icalendar's own property types (Fallback is "vText") or False, if not. :type encode: Boolean :returns: icalendar property value """ if not encode: return value if isinstance(value, types_factory.all_types): # Don't encode already encoded values. return value klass = types_factory.for_property(name) obj = klass(value) if parameters: if isinstance(parameters, dict): params = Parameters() for key, item in parameters.items(): params[key] = item parameters = params assert isinstance(parameters, Parameters) obj.params = parameters return obj def add(self, name, value, parameters=None, encode=1): """Add a property. :param name: Name of the property. :type name: string :param value: Value of the property. Either of a basic Python type of any of the icalendar's own property types. :type value: Python native type or icalendar property type. :param parameters: Property parameter dictionary for the value. Only available, if encode is set to True. :type parameters: Dictionary :param encode: True, if the value should be encoded to one of icalendar's own property types (Fallback is "vText") or False, if not. :type encode: Boolean :returns: None """ if isinstance(value, datetime) and\ name.lower() in ('dtstamp', 'created', 'last-modified'): # RFC expects UTC for those... force value conversion. if getattr(value, 'tzinfo', False) and value.tzinfo is not None: value = value.astimezone(pytz.utc) else: # assume UTC for naive datetime instances value = pytz.utc.localize(value) # encode value if encode and isinstance(value, list) \ and name.lower() not in ['rdate', 'exdate', 'categories']: # Individually convert each value to an ical type except rdate and # exdate, where lists of dates might be passed to vDDDLists. value = [self._encode(name, v, parameters, encode) for v in value] else: value = self._encode(name, value, parameters, encode) # set value if name in self: # If property already exists, append it. oldval = self[name] if isinstance(oldval, list): if isinstance(value, list): value = oldval + value else: oldval.append(value) value = oldval else: value = [oldval, value] self[name] = value def _decode(self, name, value): """Internal for decoding property values. """ # TODO: Currently the decoded method calls the icalendar.prop instances # from_ical. We probably want to decode properties into Python native # types here. But when parsing from an ical string with from_ical, we # want to encode the string into a real icalendar.prop property. if isinstance(value, vDDDLists): # TODO: Workaround unfinished decoding return value decoded = types_factory.from_ical(name, value) # TODO: remove when proper decoded is implemented in every prop.* class # Workaround to decode vText properly if isinstance(decoded, vText): decoded = decoded.encode(DEFAULT_ENCODING) return decoded def decoded(self, name, default=_marker): """Returns decoded value of property. """ # XXX: fail. what's this function supposed to do in the end? # -rnix if name in self: value = self[name] if isinstance(value, list): return [self._decode(name, v) for v in value] return self._decode(name, value) else: if default is _marker: raise KeyError(name) else: return default ######################################################################## # Inline values. A few properties have multiple values inlined in in one # property line. These methods are used for splitting and joining these. def get_inline(self, name, decode=1): """Returns a list of values (split on comma). """ vals = [v.strip('" ') for v in q_split(self[name])] if decode: return [self._decode(name, val) for val in vals] return vals def set_inline(self, name, values, encode=1): """Converts a list of values into comma seperated string and sets value to that. """ if encode: values = [self._encode(name, value, encode=1) for value in values] self[name] = types_factory['inline'](q_join(values)) ######################### # Handling of components def add_component(self, component): """Add a subcomponent to this component. """ self.subcomponents.append(component) def _walk(self, name): """Walk to given component. """ result = [] if name is None or self.name == name: result.append(self) for subcomponent in self.subcomponents: result += subcomponent._walk(name) return result def walk(self, name=None): """Recursively traverses component and subcomponents. Returns sequence of same. If name is passed, only components with name will be returned. """ if name is not None: name = name.upper() return self._walk(name) ##################### # Generation def property_items(self, recursive=True, sorted=True): """Returns properties in this component and subcomponents as: [(name, value), ...] """ vText = types_factory['text'] properties = [('BEGIN', vText(self.name).to_ical())] if sorted: property_names = self.sorted_keys() else: property_names = self.keys() for name in property_names: values = self[name] if isinstance(values, list): # normally one property is one line for value in values: properties.append((name, value)) else: properties.append((name, values)) if recursive: # recursion is fun! for subcomponent in self.subcomponents: properties += subcomponent.property_items(sorted=sorted) properties.append(('END', vText(self.name).to_ical())) return properties @classmethod def from_ical(cls, st, multiple=False): """Populates the component recursively from a string. """ stack = [] # a stack of components comps = [] for line in Contentlines.from_ical(st): # raw parsing if not line: continue try: name, params, vals = line.parts() except ValueError as e: # if unable to parse a line within a component # that ignores exceptions, mark the component # as broken and skip the line. otherwise raise. component = stack[-1] if stack else None if not component or not component.ignore_exceptions: raise component.errors.append((None, unicode_type(e))) continue uname = name.upper() # check for start of component if uname == 'BEGIN': # try and create one of the components defined in the spec, # otherwise get a general Components for robustness. c_name = vals.upper() c_class = component_factory.get(c_name, Component) # If component factory cannot resolve ``c_name``, the generic # ``Component`` class is used which does not have the name set. # That's opposed to the usage of ``cls``, which represents a # more concrete subclass with a name set (e.g. VCALENDAR). component = c_class() if not getattr(component, 'name', ''): # undefined components component.name = c_name stack.append(component) # check for end of event elif uname == 'END': # we are done adding properties to this component # so pop it from the stack and add it to the new top. component = stack.pop() if not stack: # we are at the end comps.append(component) else: stack[-1].add_component(component) if vals == 'VTIMEZONE' and \ 'TZID' in component and \ component['TZID'] not in pytz.all_timezones and \ component['TZID'] not in _timezone_cache: _timezone_cache[component['TZID']] = component.to_tz() # we are adding properties to the current top of the stack else: factory = types_factory.for_property(name) component = stack[-1] if stack else None if not component: raise ValueError('Property "{prop}" does not have ' 'a parent component.'.format(prop=name)) datetime_names = ('DTSTART', 'DTEND', 'RECURRENCE-ID', 'DUE', 'FREEBUSY', 'RDATE', 'EXDATE') try: if name in datetime_names and 'TZID' in params: vals = factory(factory.from_ical(vals, params['TZID'])) else: vals = factory(factory.from_ical(vals)) except ValueError as e: if not component.ignore_exceptions: raise component.errors.append((uname, unicode_type(e))) component.add(name, None, encode=0) else: vals.params = params component.add(name, vals, encode=0) if multiple: return comps if len(comps) > 1: raise ValueError('Found multiple components where ' 'only one is allowed: {st!r}'.format(**locals())) if len(comps) < 1: raise ValueError('Found no components where ' 'exactly one is required: ' '{st!r}'.format(**locals())) return comps[0] def content_line(self, name, value, sorted=True): """Returns property as content line. """ params = getattr(value, 'params', Parameters()) return Contentline.from_parts(name, params, value, sorted=sorted) def content_lines(self, sorted=True): """Converts the Component and subcomponents into content lines. """ contentlines = Contentlines() for name, value in self.property_items(sorted=sorted): cl = self.content_line(name, value, sorted=sorted) contentlines.append(cl) contentlines.append('') # remember the empty string in the end return contentlines def to_ical(self, sorted=True): ''' :param sorted: Whether parameters and properties should be lexicographically sorted. ''' content_lines = self.content_lines(sorted=sorted) return content_lines.to_ical() def __repr__(self): """String representation of class with all of it's subcomponents. """ subs = ', '.join([str(it) for it in self.subcomponents]) return '%s(%s%s)' % ( self.name or type(self).__name__, dict(self), ', %s' % subs if subs else '' ) ####################################### # components defined in RFC 5545 class Event(Component): name = 'VEVENT' canonical_order = ( 'SUMMARY', 'DTSTART', 'DTEND', 'DURATION', 'DTSTAMP', 'UID', 'RECURRENCE-ID', 'SEQUENCE', 'RRULE', 'RDATE', 'EXDATE', ) required = ('UID', 'DTSTAMP',) singletons = ( 'CLASS', 'CREATED', 'DESCRIPTION', 'DTSTART', 'GEO', 'LAST-MODIFIED', 'LOCATION', 'ORGANIZER', 'PRIORITY', 'DTSTAMP', 'SEQUENCE', 'STATUS', 'SUMMARY', 'TRANSP', 'URL', 'RECURRENCE-ID', 'DTEND', 'DURATION', 'UID', 'CATEGORIES', ) exclusive = ('DTEND', 'DURATION',) multiple = ( 'ATTACH', 'ATTENDEE', 'COMMENT', 'CONTACT', 'EXDATE', 'RSTATUS', 'RELATED', 'RESOURCES', 'RDATE', 'RRULE' ) ignore_exceptions = True class Todo(Component): name = 'VTODO' required = ('UID', 'DTSTAMP',) singletons = ( 'CLASS', 'COMPLETED', 'CREATED', 'DESCRIPTION', 'DTSTAMP', 'DTSTART', 'GEO', 'LAST-MODIFIED', 'LOCATION', 'ORGANIZER', 'PERCENT-COMPLETE', 'PRIORITY', 'RECURRENCE-ID', 'SEQUENCE', 'STATUS', 'SUMMARY', 'UID', 'URL', 'DUE', 'DURATION', ) exclusive = ('DUE', 'DURATION',) multiple = ( 'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE', 'RSTATUS', 'RELATED', 'RESOURCES', 'RDATE', 'RRULE' ) class Journal(Component): name = 'VJOURNAL' required = ('UID', 'DTSTAMP',) singletons = ( 'CLASS', 'CREATED', 'DTSTART', 'DTSTAMP', 'LAST-MODIFIED', 'ORGANIZER', 'RECURRENCE-ID', 'SEQUENCE', 'STATUS', 'SUMMARY', 'UID', 'URL', ) multiple = ( 'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE', 'RELATED', 'RDATE', 'RRULE', 'RSTATUS', 'DESCRIPTION', ) class FreeBusy(Component): name = 'VFREEBUSY' required = ('UID', 'DTSTAMP',) singletons = ( 'CONTACT', 'DTSTART', 'DTEND', 'DTSTAMP', 'ORGANIZER', 'UID', 'URL', ) multiple = ('ATTENDEE', 'COMMENT', 'FREEBUSY', 'RSTATUS',) class Timezone(Component): name = 'VTIMEZONE' canonical_order = ('TZID',) required = ('TZID',) # it also requires one of components DAYLIGHT and STANDARD singletons = ('TZID', 'LAST-MODIFIED', 'TZURL',) @staticmethod def _extract_offsets(component, tzname): """extract offsets and transition times from a VTIMEZONE component :param component: a STANDARD or DAYLIGHT component :param tzname: the name of the zone """ offsetfrom = component['TZOFFSETFROM'].td offsetto = component['TZOFFSETTO'].td dtstart = component['DTSTART'].dt # offsets need to be rounded to the next minute, we might loose up # to 30 seconds accuracy, but it can't be helped (datetime # supposedly cannot handle smaller offsets) offsetto_s = int((offsetto.seconds + 30) / 60) * 60 offsetto = timedelta(days=offsetto.days, seconds=offsetto_s) offsetfrom_s = int((offsetfrom.seconds + 30) / 60) * 60 offsetfrom = timedelta(days=offsetfrom.days, seconds=offsetfrom_s) # expand recurrences if 'RRULE' in component: rrulestr = component['RRULE'].to_ical().decode('utf-8') rrule = dateutil.rrule.rrulestr(rrulestr, dtstart=dtstart) if not {'UNTIL', 'COUNT'}.intersection(component['RRULE'].keys()): # pytz.timezones don't know any transition dates after 2038 # either rrule._until = datetime(2038, 12, 31) elif 'UNTIL' in component['RRULE'] and rrule._until.tzinfo: rrule._until = rrule._until.replace(tzinfo=None) transtimes = rrule # or rdates elif 'RDATE' in component: if not isinstance(component['RDATE'], list): rdates = [component['RDATE']] else: rdates = component['RDATE'] transtimes = [dtstart] + [leaf.dt for tree in rdates for leaf in tree.dts] else: transtimes = [dtstart] transitions = [(transtime, offsetfrom, offsetto, tzname) for transtime in set(transtimes)] if component.name == 'STANDARD': is_dst = 0 elif component.name == 'DAYLIGHT': is_dst = 1 return is_dst, transitions @staticmethod def _make_unique_tzname(tzname, tznames): """ :param tzname: Candidate tzname :param tznames: Other tznames """ # TODO better way of making sure tznames are unique while tzname in tznames: tzname += '_1' tznames.add(tzname) return tzname def to_tz(self): """convert this VTIMEZONE component to a pytz.timezone object """ try: zone = str(self['TZID']) except UnicodeEncodeError: zone = self['TZID'].encode('ascii', 'replace') transitions = [] dst = {} tznames = set() for component in self.walk(): if type(component) == Timezone: continue assert isinstance(component['DTSTART'].dt, datetime), ( "VTIMEZONEs sub-components' DTSTART must be of type datetime, not date" ) try: tzname = str(component['TZNAME']) except UnicodeEncodeError: tzname = component['TZNAME'].encode('ascii', 'replace') tzname = self._make_unique_tzname(tzname, tznames) except KeyError: tzname = '{0}_{1}_{2}_{3}'.format( zone, component['DTSTART'].to_ical().decode('utf-8'), component['TZOFFSETFROM'].to_ical(), # for whatever reason this is str/unicode component['TZOFFSETTO'].to_ical(), # for whatever reason this is str/unicode ) tzname = self._make_unique_tzname(tzname, tznames) dst[tzname], component_transitions = self._extract_offsets( component, tzname ) transitions.extend(component_transitions) transitions.sort() transition_times = [ transtime - osfrom for transtime, osfrom, _, _ in transitions ] # transition_info is a list with tuples in the format # (utcoffset, dstoffset, name) # dstoffset = 0, if current transition is to standard time # = this_utcoffset - prev_standard_utcoffset, otherwise transition_info = [] for num, (transtime, osfrom, osto, name) in enumerate(transitions): dst_offset = False if not dst[name]: dst_offset = timedelta(seconds=0) else: # go back in time until we find a transition to dst for index in range(num - 1, -1, -1): if not dst[transitions[index][3]]: # [3] is the name dst_offset = osto - transitions[index][2] # [2] is osto # noqa break # when the first transition is to dst, we didn't find anything # in the past, so we have to look into the future if not dst_offset: for index in range(num, len(transitions)): if not dst[transitions[index][3]]: # [3] is the name dst_offset = osto - transitions[index][2] # [2] is osto # noqa break assert dst_offset is not False transition_info.append((osto, dst_offset, name)) cls = type(zone, (DstTzInfo,), { 'zone': zone, '_utc_transition_times': transition_times, '_transition_info': transition_info }) return cls() class TimezoneStandard(Component): name = 'STANDARD' required = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM') singletons = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM',) multiple = ('COMMENT', 'RDATE', 'TZNAME', 'RRULE', 'EXDATE') class TimezoneDaylight(Component): name = 'DAYLIGHT' required = TimezoneStandard.required singletons = TimezoneStandard.singletons multiple = TimezoneStandard.multiple class Alarm(Component): name = 'VALARM' # some properties MAY/MUST/MUST NOT appear depending on ACTION value required = ('ACTION', 'TRIGGER',) singletons = ( 'ATTACH', 'ACTION', 'DESCRIPTION', 'SUMMARY', 'TRIGGER', 'DURATION', 'REPEAT', ) inclusive = (('DURATION', 'REPEAT',), ('SUMMARY', 'ATTENDEE',)) multiple = ('ATTENDEE', 'ATTACH') class Calendar(Component): """This is the base object for an iCalendar file. """ name = 'VCALENDAR' canonical_order = ('VERSION', 'PRODID', 'CALSCALE', 'METHOD',) required = ('PRODID', 'VERSION', ) singletons = ('PRODID', 'VERSION', 'CALSCALE', 'METHOD') # These are read only singleton, so one instance is enough for the module types_factory = TypesFactory() component_factory = ComponentFactory()
[ "icalendar.prop.TypesFactory", "icalendar.parser.Contentline.from_parts", "icalendar.compat.unicode_type", "icalendar.parser.Parameters", "icalendar.prop.vText", "datetime.datetime", "pytz.utc.localize", "icalendar.parser.q_join", "datetime.timedelta", "icalendar.parser.q_split", "icalendar.parser.Contentlines", "icalendar.parser.Contentlines.from_ical", "icalendar.caselessdict.CaselessDict" ]
[((1637, 1699), 'icalendar.caselessdict.CaselessDict', 'CaselessDict', (["{'CATEGORIES': 1, 'RESOURCES': 1, 'FREEBUSY': 1}"], {}), "({'CATEGORIES': 1, 'RESOURCES': 1, 'FREEBUSY': 1})\n", (1649, 1699), False, 'from icalendar.caselessdict import CaselessDict\n'), ((26129, 26143), 'icalendar.prop.TypesFactory', 'TypesFactory', ([], {}), '()\n', (26141, 26143), False, 'from icalendar.prop import TypesFactory\n'), ((11889, 11915), 'icalendar.parser.Contentlines.from_ical', 'Contentlines.from_ical', (['st'], {}), '(st)\n', (11911, 11915), False, 'from icalendar.parser import Contentlines\n'), ((15902, 15960), 'icalendar.parser.Contentline.from_parts', 'Contentline.from_parts', (['name', 'params', 'value'], {'sorted': 'sorted'}), '(name, params, value, sorted=sorted)\n', (15924, 15960), False, 'from icalendar.parser import Contentline\n'), ((16111, 16125), 'icalendar.parser.Contentlines', 'Contentlines', ([], {}), '()\n', (16123, 16125), False, 'from icalendar.parser import Contentlines\n'), ((19961, 20010), 'datetime.timedelta', 'timedelta', ([], {'days': 'offsetto.days', 'seconds': 'offsetto_s'}), '(days=offsetto.days, seconds=offsetto_s)\n', (19970, 20010), False, 'from datetime import datetime, timedelta\n'), ((20096, 20149), 'datetime.timedelta', 'timedelta', ([], {'days': 'offsetfrom.days', 'seconds': 'offsetfrom_s'}), '(days=offsetfrom.days, seconds=offsetfrom_s)\n', (20105, 20149), False, 'from datetime import datetime, timedelta\n'), ((9836, 9850), 'icalendar.parser.q_join', 'q_join', (['values'], {}), '(values)\n', (9842, 9850), False, 'from icalendar.parser import q_join\n'), ((15873, 15885), 'icalendar.parser.Parameters', 'Parameters', ([], {}), '()\n', (15883, 15885), False, 'from icalendar.parser import Parameters\n'), ((5289, 5301), 'icalendar.parser.Parameters', 'Parameters', ([], {}), '()\n', (5299, 5301), False, 'from icalendar.parser import Parameters\n'), ((6754, 6778), 'pytz.utc.localize', 'pytz.utc.localize', (['value'], {}), '(value)\n', (6771, 6778), False, 'import pytz\n'), ((9412, 9431), 'icalendar.parser.q_split', 'q_split', (['self[name]'], {}), '(self[name])\n', (9419, 9431), False, 'from icalendar.parser import q_split\n'), ((20563, 20585), 'datetime.datetime', 'datetime', (['(2038)', '(12)', '(31)'], {}), '(2038, 12, 31)\n', (20571, 20585), False, 'from datetime import datetime, timedelta\n'), ((23821, 23841), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (23830, 23841), False, 'from datetime import datetime, timedelta\n'), ((10931, 10947), 'icalendar.prop.vText', 'vText', (['self.name'], {}), '(self.name)\n', (10936, 10947), False, 'from icalendar.prop import vText, vDDDLists\n'), ((11615, 11631), 'icalendar.prop.vText', 'vText', (['self.name'], {}), '(self.name)\n', (11620, 11631), False, 'from icalendar.prop import vText, vDDDLists\n'), ((12474, 12489), 'icalendar.compat.unicode_type', 'unicode_type', (['e'], {}), '(e)\n', (12486, 12489), False, 'from icalendar.compat import unicode_type\n'), ((15085, 15100), 'icalendar.compat.unicode_type', 'unicode_type', (['e'], {}), '(e)\n', (15097, 15100), False, 'from icalendar.compat import unicode_type\n')]
import OpenGraph as og from OpenGraph.tests import( assert_graphs_equal, assert_edges_equal, assert_nodes_equal ) def test_passing(): assert (1, 2, 3) == (1, 2, 3) # thanks to numpy for this GenericTest class (numpy/testing/test_utils.py) class _GenericTest: @classmethod def _test_equal(cls, a, b): cls._assert_func(a, b) @classmethod def _test_not_equal(cls, a, b): try: cls._assert_func(a, b) passed = True except AssertionError: pass else: raise AssertionError("a and b are found equal but are not") # def test_equal1(): # G = og.classes.Graph() # G.add_nodes([1, 2, 3]) # H = og.classes.Graph() # H.add_nodes([1, 2, 3]) # #assert G == H class TestNodesEqual(_GenericTest): _assert_func = assert_nodes_equal def test_nodes_equal(self): a = [1, 2, 5, 4] b = [4, 5, 1, 2] self._test_equal(a, b) def test_nodes_not_equal(self): a = [1, 2, 5, 4] b = [4, 5, 1, 3] self._test_not_equal(a, b) def test_nodes_with_data_equal(self): G = og.Graph() G.add_nodes([1, 2, 3]) H = og.Graph() H.add_nodes([1, 2, 3]) self._test_equal(G.nodes, H.nodes) def test_edges_with_data_not_equal(self): G = og.Graph() G.add_nodes([1, 2, 3]) H = og.Graph() H.add_nodes([1, 4, 3]) self._test_not_equal(G.nodes, H.nodes) class TestEdgesEqual(_GenericTest): _assert_func = assert_edges_equal def test_edges_equal(self): a = [(1, 2), (5, 4)] b = [(4, 5), (1, 2)] self._test_equal(a, b) def test_edges_not_equal(self): a = [(1, 2), (5, 4)] b = [(4, 5), (1, 3)] self._test_not_equal(a, b) def test_duplicate_edges(self): a = [(1, 2), (5, 4), (1, 2)] b = [(4, 5), (1, 2)] self._test_not_equal(a, b) def test_duplicate_edges_with_data(self): a = [(1, 2, {'weight': 10}), (5, 4), (1, 2, {'weight': 1})] b = [(4, 5), (1, 2), (1, 2, {'weight': 1})] self._test_not_equal(a, b) def test_order_of_edges_with_data(self): a = [(1, 2, {'weight': 10}), (1, 2, {'weight': 1})] b = [(1, 2, {'weight': 1}), (1, 2, {'weight': 10})] self._test_equal(a, b) def test_order_of_multiedges(self): wt1 = {'weight': 1} wt2 = {'weight': 2} a = [(1, 2, wt1), (1, 2, wt1), (1, 2, wt2)] b = [(1, 2, wt1), (1, 2, wt2), (1, 2, wt2)] self._test_not_equal(a, b) def test_order_of_edges_with_keys(self): a = [(1, 2, 0, {'weight': 10}), (1, 2, 1, {'weight': 1}), (1, 2, 2)] b = [(1, 2, 1, {'weight': 1}), (1, 2, 2), (1, 2, 0, {'weight': 10})] self._test_equal(a, b) a = [(1, 2, 1, {'weight': 10}), (1, 2, 0, {'weight': 1}), (1, 2, 2)] b = [(1, 2, 1, {'weight': 1}), (1, 2, 2), (1, 2, 0, {'weight': 10})] self._test_not_equal(a, b) class TestGraphsEqual(_GenericTest): _assert_func = assert_graphs_equal def test_graphs_euqal(self): G = og.Graph() H = og.Graph() edges1 = [(1, 2), (2, 3), (1, 3), (3, 4), (4, 5), (4, 6), (5, 6)] edges2 = [(3, 7), (4, 7), (10, 7), (11, 7)] edges3 = [(8, 9), (8, 10), (9, 10), (10, 11), (11, 12), (11, 13), (12, 13)] G.add_edges(edges1) G.add_edges(edges2) G.add_edges(edges3) H.add_edges(edges1) H.add_edges(edges2) H.add_edges(edges3) self._test_equal(G, H) def test_graphs_not_equal(self): G = og.Graph() H = og.Graph() edges1 = [(1, 2), (2, 3), (1, 3), (3, 4), (4, 5), (4, 6), (5, 6)] edges2 = [(3, 7), (4, 7), (10, 7), (11, 7)] edges3 = [(8, 9), (8, 10), (9, 10), (10, 11), (11, 12), (11, 13), (12, 13)] G.add_edges(edges1) G.add_edges(edges2) G.add_edges(edges3) H.add_edges(edges1) H.add_edges(edges2) self._test_not_equal(G, H)
[ "OpenGraph.Graph" ]
[((1148, 1158), 'OpenGraph.Graph', 'og.Graph', ([], {}), '()\n', (1156, 1158), True, 'import OpenGraph as og\n'), ((1202, 1212), 'OpenGraph.Graph', 'og.Graph', ([], {}), '()\n', (1210, 1212), True, 'import OpenGraph as og\n'), ((1346, 1356), 'OpenGraph.Graph', 'og.Graph', ([], {}), '()\n', (1354, 1356), True, 'import OpenGraph as og\n'), ((1400, 1410), 'OpenGraph.Graph', 'og.Graph', ([], {}), '()\n', (1408, 1410), True, 'import OpenGraph as og\n'), ((3133, 3143), 'OpenGraph.Graph', 'og.Graph', ([], {}), '()\n', (3141, 3143), True, 'import OpenGraph as og\n'), ((3156, 3166), 'OpenGraph.Graph', 'og.Graph', ([], {}), '()\n', (3164, 3166), True, 'import OpenGraph as og\n'), ((3626, 3636), 'OpenGraph.Graph', 'og.Graph', ([], {}), '()\n', (3634, 3636), True, 'import OpenGraph as og\n'), ((3649, 3659), 'OpenGraph.Graph', 'og.Graph', ([], {}), '()\n', (3657, 3659), True, 'import OpenGraph as og\n')]
#!/usr/bin/env python3 """ Update the S3 bucket with new config files and assets. """ import glob import json import os import re import shutil import subprocess import sys import time import boto3 # Types of assets ASSET_TYPES = { 'json': ['.json'], 'image': ['.png', '.gif', '.jpg'], 'text': ['.txt'], } def build_empty_config(desc_en='', desc_fr=''): """ Get a basic empty config. For consistency. :rtype: `dict` """ return { 'files': [], 'lastUpdatedAt': int(time.time()), 'whatsNew': { 'description_en': desc_en, 'description_fr': desc_fr, }, } def get_total_config_size(config): """ Given a config file, determine the total size of assets, zipped assets, and the total of both. :param config: The config file to parse :type config: `dict` :rtype: `int`, `int`, `int` """ total_base_size = total_zipped_size = 0 for asset_details in config['files']: total_base_size += asset_details['size'] if 'zsize' in asset_details: total_zipped_size += asset_details['zsize'] return total_base_size, total_zipped_size, total_base_size + total_zipped_size def get_all_assets(asset_dir): """ Get all available asset names in the base directory and the subdirectory they are in. First item in tuple is the asset directory, second is the asset name. :param asset_dir: Base directory to begin search from :type asset_dir: `str` :rtype: `list` of (`str`, `str`) """ assets = [] for file_path in glob.iglob(os.path.join(asset_dir, '**', '*'), recursive=True): directory, filename = file_path[:file_path.rfind(os.path.sep) + 1], \ file_path[file_path.rfind(os.path.sep) + 1:] if filename.find('.') > 0 and 'config' not in filename: assets.append((directory, filename)) assets.sort(key=lambda s: s[1]) return assets def get_asset_type(asset_name): """ Gets the asset type from ASSET_TYPES of an asset given its name. :param asset_name: Name of the asset :type: asset_name: `str` :rtype: `str` or None """ filetype = asset_name[asset_name.rfind('.'):].lower() for asset_type in ASSET_TYPES: if filetype in ASSET_TYPES[asset_type]: return asset_type return None def build_dev_config(asset_dir, output_dir, app_config_dir, filename, description): """ Builds a config for a dev environment. :param asset_dir: Location of assets in filesystem :type asset_dir: `str` :param output_dir: Output location for config file :type output_dir: `str` :param app_config_dir: Output location for assets for application bundling :type app_config_dir: `dict` :param filename: Output filename for config file :type filename: `str` :param description: Description of the update :type description: `dict` """ # pylint:disable=R0914 assets = get_all_assets(asset_dir) print('Retrieved {0} assets'.format(len(assets))) print('Creating output directory `{0}`'.format(output_dir)) os.makedirs(output_dir) for platform in app_config_dir: print('Creating app asset directory `{0}`'.format(app_config_dir[platform])) if os.path.exists(app_config_dir[platform]): shutil.rmtree(app_config_dir[platform]) os.makedirs(app_config_dir[platform]) config_ios = build_empty_config(desc_en=description['en'], desc_fr=description['fr']) config_android = build_empty_config(desc_en=description['en'], desc_fr=description['fr']) for dev_asset in assets: asset_folder = dev_asset[0] asset_name = dev_asset[1] if asset_name[-3:] == '.gz': continue asset_type = get_asset_type(dev_asset[1]) asset_zurl_exists = os.path.exists(os.path.join(asset_folder, '{}.gz'.format(asset_name))) for platform in app_config_dir: if not os.path.exists(os.path.join(app_config_dir[platform], asset_type)): os.makedirs(os.path.join(app_config_dir[platform], asset_type)) shutil.copy( os.path.join(asset_folder, asset_name), os.path.join(app_config_dir[platform], asset_type, asset_name) ) file_ios = { 'name': '/{}'.format(asset_name), 'size': os.path.getsize(os.path.join(asset_folder, asset_name)), 'type': asset_type, 'url': 'http://localhost:8080/{0}/{1}'.format(asset_type, asset_name), 'version': 1, } file_android = { 'name': '/{}'.format(asset_name), 'size': os.path.getsize(os.path.join(asset_folder, asset_name)), 'type': asset_type, 'url': 'http://10.0.2.2:8080/{0}/{1}'.format(asset_type, asset_name), 'version': 1, } if asset_zurl_exists: file_ios['zurl'] = 'http://localhost:8080/{0}/{1}'.format( asset_type, '{}.gz'.format(asset_name) ) file_ios['zsize'] = os.path.getsize(os.path.join(asset_folder, '{}.gz'.format(asset_name))) config_ios['files'].append(file_ios) config_android['files'].append(file_android) total_base_size, total_zipped_size, total_size = get_total_config_size(config_ios) print('Config total download size: {0}/{1} ({2})'.format( total_base_size / 1000, total_zipped_size / 1000, total_size / 1000 )) filename_ios = '{0}.ios.{1}'.format(filename[:filename.rindex('.')], filename[filename.rindex('.') + 1:]) filename_android = '{0}.android.{1}'.format(filename[:filename.rindex('.')], filename[filename.rindex('.') + 1:]) print('Dumping iOS config to `{0}{1}`'.format(output_dir, filename_ios)) with open(os.path.join(output_dir, filename_ios), 'w') as config_file: json.dump(config_ios, config_file, sort_keys=True, ensure_ascii=False, indent=2) if 'ios' in app_config_dir: print('Dumping iOS config to `{0}/{1}`'.format(app_config_dir['ios'], 'base_config.json')) with open(os.path.join(app_config_dir['ios'], 'base_config.json'), 'w') as config_file: json.dump(config_ios, config_file, sort_keys=True, ensure_ascii=False, indent=2) print('Dumping Android config to `{0}{1}`'.format(output_dir, filename_android)) with open(os.path.join(output_dir, filename_android), 'w') as config_file: json.dump(config_android, config_file, sort_keys=True, ensure_ascii=False, indent=2) if 'android' in app_config_dir: print('Dumping Android config to `{0}/{1}`'.format( app_config_dir['android'], 'base_config.json')) with open(os.path.join(app_config_dir['android'], 'base_config.json'), 'w') as config_file: json.dump(config_android, config_file, sort_keys=True, ensure_ascii=False, indent=2) def get_most_recent_config(bucket): """ Given an S3 bucket, find the most recent config file version in that bucket and return its version as an array of 3 integers. If no config files are found, returns [0, 0, 0]. :param bucket: the S3 bucket to examine :type bucket: :class:S3.Bucket :rtype: `list` of `int` """ objects = bucket.objects.all() max_version = [0, 0, 0] for item in objects: if item.key[:7] != 'config/' or len(item.key) <= 7: continue item_version = list(map(int, item.key.split('/')[1].split('.')[:3])) for i, _ in enumerate(item_version): if item_version[i] > max_version[i]: max_version = item_version break print('Found most recent config version: {0}'.format(max_version)) return max_version def get_release_config_version(bucket, version): """ Gets a string for the config version to build. :param bucket: the s3 bucket to examine for the most recent config version, if necessary :type bucket: :class:S3.Bucket :param version: Either the major.minor.patch build number for the config, or 'major', 'minor', or 'patch' to update from the most recent config version :type version: `str` """ if re.match(r'[0-9]+[.][0-9]+[.][0-9]+', version): return version last_version = get_most_recent_config(bucket) if version == 'major': last_version[0] = last_version[0] + 1 last_version[1] = 0 last_version[2] = 0 elif version == 'minor': last_version[1] = last_version[1] + 1 last_version[2] = 0 elif version == 'patch': last_version[2] = last_version[2] + 1 else: raise ValueError('`version` must be one of "major", "minor", "patch", or match "X.Y.Z"') last_version = [str(x) for x in last_version] return '.'.join(last_version) def update_asset( bucket, name, asset_type, content, version, zcontent=None, compatible=False, configs={}, upload_file=True): """ Upload an asset to S3 bucket. Override existing versions, and update any config files that contain the asset. Returns the URL to access the asset. :param bucket: S3 bucket to upload to :type bucket: :class:S3.Bucket :param dir: Directory containing asset :type dir: `str` :param name: Filename of the asset :type name: `str` :param asset_type: Type of the asset :type asset_type: `str` :param content: Content of the asset :type content: `str` :param version: Version number for asset :type version: `int` :param zcontent: Zipped content of the asset :type zcontent: `str` :param compatible: If True, then previous configs will be checked if they contain the file and their versions updated :type compatible: `bool` :param configs: List of existing configs to check and update :type: `list` of `json` :param upload_file: True to upload the file, false to skip :type upload_file: `bool` :rtype: `str` """ # pylint:disable=W0102,R0912,R0913,R0914 global S3 # pylint:disable=W0603 global REGION # pylint:disable=W0603 content_type = 'application/json; charset=utf-8' if asset_type == 'image': if name[-3:] == 'png': content_type = 'image/png' elif name[-3:] == 'jpg': content_type = 'image/jpeg' elif name[-3:] == 'gif': content_type = 'image/gif' elif asset_type == 'text': content_type = 'text/plain; charset=utf-8' object_kwargs = { 'ACL': 'public-read', 'ContentType': content_type, 'Metadata': { 'version': str(version), }, } if upload_file: print('Uploading asset `{0}`'.format('assets{0}'.format(name))) bucket.put_object(Key='assets{0}'.format(name), Body=content, **object_kwargs) base_object = S3.Object(bucket.name, 'assets{0}'.format(name)).get() size = base_object['ContentLength'] version = int(base_object['Metadata']['version']) url = 'https://s3.{0}.amazonaws.com/{1}/assets{2}?versionId={3}'.format( REGION, bucket.name, name, base_object['VersionId'] ) updated_asset = { 'size': size, 'url': url, 'version': version, } if zcontent: if upload_file: print('Uploading asset `{0}`'.format('assets{0}.gz'.format(name))) bucket.put_object( Key='assets{0}.gz'.format(name), Body=zcontent, ContentEncoding='gzip', **object_kwargs ) zipped_object = S3.Object(bucket.name, 'assets{0}.gz'.format(name)).get() updated_asset['zsize'] = zipped_object['ContentLength'] updated_asset['zurl'] = 'https://s3.{}.amazonaws.com/{}/assets{}.gz?versionId={}'.format( REGION, bucket.name, name, zipped_object['VersionId'] ) if compatible: for config in configs: updated = False for file in configs[config]['content']['files']: if file['name'] != name or file['version'] != version - 1: continue file['size'] = updated_asset['size'] file['url'] = updated_asset['url'] file['version'] = version if 'zsize' in file: if 'zsize' in updated_asset: file['zsize'] = updated_asset['zsize'] file['zurl'] = updated_asset['zurl'] else: file.pop('zsize', None) file.pop('zurl', None) updated = True if updated: configs[config]['updated'] = True configs[config]['content']['lastUpdatedAt'] = int(time.time()) return updated_asset def parse_existing_config(item, existing_configs): """ Parse the content of a config and add it to the existing configs. :param item: An object from S3 :type item: :class:S3.Object :param existing_configs: The existing configs :type existing_configs: `dict` """ item_key = item.key existing_config = item.get() existing_configs[item_key] = { 'content': json.loads(existing_config['Body'].read()), 'key': item_key, 'updated': False, } print('Parsed existing config `{0}`'.format(item_key)) def parse_existing_asset(item, existing_assets): """ Parse the content of an asset and add it to the existing assets. :param item: An object from S3 :type item: :class:S3.Object :param existing_assets: The existing assets :type existing_assets: `dict` """ item_key = item.key[6:] if item_key[-3:] == '.gz': item_key = item_key[:-3] existing_assets[item_key]['zipped'] = True return existing_asset = item.get() existing_assets[item_key] = { 'content': existing_asset['Body'].read(), 'version': existing_asset['Metadata']['version'], 'versionId': existing_asset['VersionId'], 'zipped': False, } print('Parsed existing asset `{0}`'.format(item_key)) def update_changed_assets(bucket, asset_dir, output_dir, only, compatible=False): """ Update assets which have changed from those versions already in the bucket. Also upload new assets not yet in the bucket. Returns a dict with updated assets and a dict of configs which may or may not have been updated due to the new assets. :param bucket: An S3 bucket to retrieve existing assets and configs from :type bucket: :class:S3.Bucket :param asset_dir: Asset directory :type asset_dir: `str` :param output_dir: Output directory for minified assets and config :type output_dir: `str` :param only: Set of asset names which should be updated, and all others skipped, or None. :type only: `set` :param compatible: If True, update existing configs to accept the new version. :type compatible: `bool` :rtype: `dict`, `dict` """ # pylint:disable=R0914 # Minify assets print('Cleaning output directory `{0}'.format(output_dir)) if os.path.exists(output_dir): shutil.rmtree(output_dir) print('Beginning minify subprocess, from `{0}` to `{1}`'.format(asset_dir, output_dir)) subprocess.run(['./script/minify.sh', asset_dir, output_dir]) # Get existing assets from bucket bucket_objects = bucket.objects.all() existing_assets = {} existing_configs = {} for item in bucket_objects: if item.key[:7] == 'config/' and len(item.key) > 7: parse_existing_config(item, existing_configs) elif item.key[:7] == 'assets/' and len(item.key) > 7: parse_existing_asset(item, existing_assets) # Get local assets and filter for only those specified to be updated assets = get_all_assets(output_dir) assets = [x for x in assets if only is None or '/{}'.format(x[1]) in only] print('Retrieved {0} assets'.format(len(assets))) changed_assets = {} for existing_asset in assets: asset_folder = existing_asset[0] asset_name = existing_asset[1] slash_asset_name = '/{}'.format(asset_name) asset_type = get_asset_type(existing_asset[1]) if asset_name[-3:] == '.gz': continue last_version = 0 asset_content = None asset_zcontent = None upload_file = True with open(os.path.join(asset_folder, asset_name), 'rb') as asset_file: asset_content = asset_file.read() if os.path.exists(os.path.join(asset_folder, '{}.gz'.format(asset_name))): with open(os.path.join(asset_folder, '{}.gz'.format(asset_name)), 'rb') as asset_zfile: asset_zcontent = asset_zfile.read() if slash_asset_name in existing_assets: if existing_assets[slash_asset_name]['content'] == asset_content: upload_file = False else: last_version = int(existing_assets[slash_asset_name]['version']) asset_details = update_asset( bucket, slash_asset_name, asset_type, asset_content, last_version + 1, zcontent=asset_zcontent, compatible=compatible, configs=existing_configs, upload_file=upload_file ) built_asset = { 'name': slash_asset_name, 'size': asset_details['size'], 'type': asset_type, 'url': asset_details['url'], 'version': asset_details['version'], } if 'zurl' in asset_details and 'zsize' in asset_details: built_asset['zsize'] = asset_details['zsize'] built_asset['zurl'] = asset_details['zurl'] changed_assets[slash_asset_name] = built_asset return changed_assets, existing_configs def build_release_config(assets, version, description): """ Build a config for release. :param bucket: An S3 bucket to retrieve existing assets and configs from :type bucket: :class:S3.Bucket :param assets: Asset names and details for the config :type assets: `dict` :param version: Version for config :type version: `int` :param description: Description of the update :type description: `dict` :rtype: `str`, `dict` """ config = build_empty_config(desc_en=description['en'], desc_fr=description['fr']) for release_asset in assets: config['files'].append(assets[release_asset]) config_key = 'config/{0}.json'.format(version) config_details = { 'content': config, 'key': config_key, 'updated': True, } print('Built config file `{0}`'.format(config_key)) total_base_size, total_zipped_size, total_size = get_total_config_size(config) print('Config total download size: {0}/{1} ({2})'.format( total_base_size / 1000, total_zipped_size / 1000, total_size / 1000 )) return config_key, config_details def update_changed_configs(bucket, configs): """ Update only config files in `configs` which have the key 'updated' set to True. :param bucket: S3 bucket which all configs exist in :type bucket: :class:S3.Bucket :param configs: Dictionary of config names and details :type configs: `dict` """ for config in configs: if not configs[config]['updated']: continue print('Uploading config `{0}`'.format(configs[config]['key'])) bucket.put_object( Key=configs[config]['key'], Body=json.dumps(configs[config]['content']), ACL='public-read' ) DESCRIPTION = {'en': '', 'fr': ''} # Input validation if len(sys.argv) >= 2 and sys.argv[1] == '--dev': DEV_ASSET_DIR = '../assets_dev/' if len(sys.argv) < 3 else sys.argv[2] DEV_OUTPUT_DIR = '../assets_dev/config' if len(sys.argv) < 4 else sys.argv[3] DEV_FILENAME = 'public.json' if len(sys.argv) < 5 else sys.argv[4] DEV_APP_DIR = {} if '--ios' in sys.argv: DEV_APP_DIR['ios'] = sys.argv[sys.argv.index('--ios') + 1] if '--android' in sys.argv: DEV_APP_DIR['android'] = sys.argv[sys.argv.index('--android') + 1] if '--desc' in sys.argv: DESC_IDX = sys.argv.index('--desc') DESCRIPTION = {'en': sys.argv[DESC_IDX + 1], 'fr': sys.argv[DESC_IDX + 2]} else: DESCRIPTION = {'en': 'Test update.', 'fr': 'Mise à jour test.'} build_dev_config(DEV_ASSET_DIR, DEV_OUTPUT_DIR, DEV_APP_DIR, DEV_FILENAME, DESCRIPTION) exit() elif len(sys.argv) < 5: print('\n\tCampus Guide - Release Manager') print('\tUsage: release_manager.py', end='') print(' <bucket_name> <asset_dir> <output_dir> <#.#.#|major|minor|patch> [options]') print('\tAlt: release_manager.py', end='') print(' --dev <asset_dir> <config_dir> <config_name>', end='') print(' [--ios <config_dir>]', end='') print(' [--android <config_dir>]') print('\tExample: release_manager.py', end='') print(' <bucket_name> assets/ assets_release/ patch [options]') print('\tOptions:') print('\t--dev\t\t\tBuild a config file for dev based on the given directory') print('\t--no-new-config\t\tPush changed assets and only update configs which exist') print('\t--only <name1,...>\tUpdate only assets with the given names. Otherwise, update all') print('\t--region <region>\tAWS region') print('\t--compatible\t\tSpecify that assets changed are compatible with existing configs') print('\t--desc <en> <fr>\tEnglish and French descriptions of the config changes') print() exit() # Parse arguments BUCKET_NAME = sys.argv[1] ASSET_DIR = sys.argv[2] OUTPUT_DIR = sys.argv[3] NEW_VERSION = sys.argv[4] BUILD_CONFIG = True REGION = 'ca-central-1' ONLY_UPGRADE = None COMPATIBLE = False SKIP_ARGS = 0 if len(sys.argv) > 5: for (index, arg) in enumerate(sys.argv[5:]): if SKIP_ARGS > 0: SKIP_ARGS -= 1 continue if arg == '--only': SKIP_ARGS = 1 ONLY_UPGRADE = set() for asset in sys.argv[index + 1].split(','): ONLY_UPGRADE.add(asset) elif arg == '--region': SKIP_ARGS = 1 REGION = sys.argv[index + 1] elif arg == '--no-new-config': BUILD_CONFIG = False elif arg == '--compatible': COMPATIBLE = True elif arg == '--desc': DESCRIPTION = { 'en': sys.argv[index + 1], 'fr': sys.argv[index + 2], } SKIP_ARGS = 2 S3 = boto3.resource('s3') BUCKET = S3.Bucket(BUCKET_NAME) UPDATED_ASSETS, UPDATED_CONFIGS = update_changed_assets( BUCKET, ASSET_DIR, OUTPUT_DIR, ONLY_UPGRADE, compatible=COMPATIBLE) if COMPATIBLE: update_changed_configs(BUCKET, UPDATED_CONFIGS) if BUILD_CONFIG: CONFIG_VERSION = get_release_config_version(BUCKET, NEW_VERSION) CONFIG_KEY, CONFIG_DETAILS = build_release_config(UPDATED_ASSETS, CONFIG_VERSION, DESCRIPTION) update_changed_configs(BUCKET, {CONFIG_KEY: CONFIG_DETAILS})
[ "subprocess.run", "json.dump", "os.makedirs", "os.path.exists", "re.match", "json.dumps", "time.time", "boto3.resource", "sys.argv.index", "shutil.rmtree", "os.path.join" ]
[((23587, 23607), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (23601, 23607), False, 'import boto3\n'), ((3306, 3329), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (3317, 3329), False, 'import os\n'), ((8613, 8658), 're.match', 're.match', (['"""[0-9]+[.][0-9]+[.][0-9]+"""', 'version'], {}), "('[0-9]+[.][0-9]+[.][0-9]+', version)\n", (8621, 8658), False, 'import re\n'), ((15989, 16015), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (16003, 16015), False, 'import os\n'), ((16147, 16208), 'subprocess.run', 'subprocess.run', (["['./script/minify.sh', asset_dir, output_dir]"], {}), "(['./script/minify.sh', asset_dir, output_dir])\n", (16161, 16208), False, 'import subprocess\n'), ((1655, 1689), 'os.path.join', 'os.path.join', (['asset_dir', '"""**"""', '"""*"""'], {}), "(asset_dir, '**', '*')\n", (1667, 1689), False, 'import os\n'), ((3462, 3502), 'os.path.exists', 'os.path.exists', (['app_config_dir[platform]'], {}), '(app_config_dir[platform])\n', (3476, 3502), False, 'import os\n'), ((3564, 3601), 'os.makedirs', 'os.makedirs', (['app_config_dir[platform]'], {}), '(app_config_dir[platform])\n', (3575, 3601), False, 'import os\n'), ((6247, 6332), 'json.dump', 'json.dump', (['config_ios', 'config_file'], {'sort_keys': '(True)', 'ensure_ascii': '(False)', 'indent': '(2)'}), '(config_ios, config_file, sort_keys=True, ensure_ascii=False, indent=2\n )\n', (6256, 6332), False, 'import json\n'), ((6820, 6908), 'json.dump', 'json.dump', (['config_android', 'config_file'], {'sort_keys': '(True)', 'ensure_ascii': '(False)', 'indent': '(2)'}), '(config_android, config_file, sort_keys=True, ensure_ascii=False,\n indent=2)\n', (6829, 6908), False, 'import json\n'), ((16025, 16050), 'shutil.rmtree', 'shutil.rmtree', (['output_dir'], {}), '(output_dir)\n', (16038, 16050), False, 'import shutil\n'), ((21241, 21265), 'sys.argv.index', 'sys.argv.index', (['"""--desc"""'], {}), "('--desc')\n", (21255, 21265), False, 'import sys\n'), ((527, 538), 'time.time', 'time.time', ([], {}), '()\n', (536, 538), False, 'import time\n'), ((3516, 3555), 'shutil.rmtree', 'shutil.rmtree', (['app_config_dir[platform]'], {}), '(app_config_dir[platform])\n', (3529, 3555), False, 'import shutil\n'), ((6178, 6216), 'os.path.join', 'os.path.join', (['output_dir', 'filename_ios'], {}), '(output_dir, filename_ios)\n', (6190, 6216), False, 'import os\n'), ((6567, 6652), 'json.dump', 'json.dump', (['config_ios', 'config_file'], {'sort_keys': '(True)', 'ensure_ascii': '(False)', 'indent': '(2)'}), '(config_ios, config_file, sort_keys=True, ensure_ascii=False, indent=2\n )\n', (6576, 6652), False, 'import json\n'), ((6747, 6789), 'os.path.join', 'os.path.join', (['output_dir', 'filename_android'], {}), '(output_dir, filename_android)\n', (6759, 6789), False, 'import os\n'), ((7185, 7273), 'json.dump', 'json.dump', (['config_android', 'config_file'], {'sort_keys': '(True)', 'ensure_ascii': '(False)', 'indent': '(2)'}), '(config_android, config_file, sort_keys=True, ensure_ascii=False,\n indent=2)\n', (7194, 7273), False, 'import json\n'), ((4343, 4381), 'os.path.join', 'os.path.join', (['asset_folder', 'asset_name'], {}), '(asset_folder, asset_name)\n', (4355, 4381), False, 'import os\n'), ((4399, 4461), 'os.path.join', 'os.path.join', (['app_config_dir[platform]', 'asset_type', 'asset_name'], {}), '(app_config_dir[platform], asset_type, asset_name)\n', (4411, 4461), False, 'import os\n'), ((4580, 4618), 'os.path.join', 'os.path.join', (['asset_folder', 'asset_name'], {}), '(asset_folder, asset_name)\n', (4592, 4618), False, 'import os\n'), ((4879, 4917), 'os.path.join', 'os.path.join', (['asset_folder', 'asset_name'], {}), '(asset_folder, asset_name)\n', (4891, 4917), False, 'import os\n'), ((6477, 6532), 'os.path.join', 'os.path.join', (["app_config_dir['ios']", '"""base_config.json"""'], {}), "(app_config_dir['ios'], 'base_config.json')\n", (6489, 6532), False, 'import os\n'), ((7091, 7150), 'os.path.join', 'os.path.join', (["app_config_dir['android']", '"""base_config.json"""'], {}), "(app_config_dir['android'], 'base_config.json')\n", (7103, 7150), False, 'import os\n'), ((17291, 17329), 'os.path.join', 'os.path.join', (['asset_folder', 'asset_name'], {}), '(asset_folder, asset_name)\n', (17303, 17329), False, 'import os\n'), ((20554, 20592), 'json.dumps', 'json.dumps', (["configs[config]['content']"], {}), "(configs[config]['content'])\n", (20564, 20592), False, 'import json\n'), ((21057, 21080), 'sys.argv.index', 'sys.argv.index', (['"""--ios"""'], {}), "('--ios')\n", (21071, 21080), False, 'import sys\n'), ((21160, 21187), 'sys.argv.index', 'sys.argv.index', (['"""--android"""'], {}), "('--android')\n", (21174, 21187), False, 'import sys\n'), ((4169, 4219), 'os.path.join', 'os.path.join', (['app_config_dir[platform]', 'asset_type'], {}), '(app_config_dir[platform], asset_type)\n', (4181, 4219), False, 'import os\n'), ((4250, 4300), 'os.path.join', 'os.path.join', (['app_config_dir[platform]', 'asset_type'], {}), '(app_config_dir[platform], asset_type)\n', (4262, 4300), False, 'import os\n'), ((13474, 13485), 'time.time', 'time.time', ([], {}), '()\n', (13483, 13485), False, 'import time\n')]
import subprocess import argparse import os from datetime import date import time import shutil import stat import sys import json import pprint import webbrowser import MachineConfigs as machine_configs import Helpers as helpers import WriteTestResultsToHTML as write_test_results_to_html class TestsSetError(Exception): pass def get_executable_directory(configuration, test_set, runAsCollection): if runAsCollection: exe_dir = os.path.join(test_set, 'Bin') else: exe_dir = 'Bin' if os.name == 'nt': exe_dir = os.path.join(exe_dir, 'x64') if configuration.lower() == 'released3d12' or configuration.lower() == 'releasevk' : config = 'Release' else: config = 'Debug' return os.path.join(exe_dir, config) else: return exe_dir def build_solution(cloned_dir, relative_solution_filepath, configuration, rebuild): if os.name == 'nt': windows_build_script = "BuildSolution.bat" try: # Build the Batch Args. buildType = "build" if rebuild: buildType = "rebuild" batch_args = [windows_build_script, buildType, relative_solution_filepath, configuration.lower()] # Build Solution. if subprocess.call(batch_args) == 0: return 0 else: raise TestsSetError("Error building solution : " + relative_solution_filepath + " with configuration : " + configuration.lower()) except subprocess.CalledProcessError as subprocess_error: raise TestsSetError("Error building solution : " + relative_solution_filepath + " with configuration : " + configuration.lower()) else: prevDir = os.getcwd() #Call Makefile os.chdir(cloned_dir) subprocess.call(['make', 'PreBuild', '-j8']) subprocess.call(['make', 'All', '-j8']) os.chdir(prevDir) def run_test_run(executable_filepath, current_arguments, output_file_base_name, output_directory): try: # Start the process and record the time. cmd_line = executable_filepath + ' ' + current_arguments + ' -outputfilename ' + output_file_base_name + ' -outputdir ' + output_directory process = subprocess.Popen(cmd_line.split()) start_time = time.time() run_results = (True, "") # Wait for the process to finish. while process.returncode is None: process.poll() if process.returncode is not None and process.returncode > 1: run_results = (False, "Process crashed or encountered an error.") break current_time = time.time() difference_time = current_time - start_time # If the process has taken too long, kill it. if difference_time > machine_configs.machine_process_default_kill_time: print("Kill Process") process.kill() run_results = (False, "Process ran for too long, had to kill it. Please verify that the program finishes within its hang time, and that it does not crash") break return run_results except (NameError, IOError, OSError) as e: print(e.args) raise TestsSetError('Error when trying to run ' + executable_filepath + ' ' + current_arguments + ' ' + 'with outputfilename ' + output_file_base_name + ' and outputdir ' + output_directory) # Run the tests set.. def run_tests_set(main_directory, rebuild, json_filepath, results_directory, reference_directory, runAsCollection): tests_set_run_data = {} # Whether all test collections in this set have succeeded tests_set_run_data['Success'] = True tests_set_run_data['Error'] = None json_data = None print(json_filepath) # Try and open and parse the json file. try: jsonfile = open(json_filepath) json_data = json.load(jsonfile) except (IOError, OSError, ValueError, json.decoder.JSONDecodeError) as e: tests_set_run_data['Success'] = False tests_set_run_data['Error'] = "Error reading test set JSON: " + str(e.args[0]) return tests_set_run_data # Try and parse the data from the json file. tests_set_run_data['Name'] = os.path.splitext(os.path.basename(json_filepath))[0] tests_set_run_data['Directory'] = os.path.dirname(json_filepath) tests_set_run_data['Tests Groups'] = json_data['Tests Groups'] tests_set_run_data['Solution Target'] = json_data['Solution Target'] tests_set_run_data['Configuration Target'] = json_data['Configuration Target'] tests_set_run_data['Reference Directory'] = os.path.join(reference_directory, tests_set_run_data['Name']) tests_set_run_data['Results Directory'] = os.path.join(results_directory, tests_set_run_data['Name']) # Build solution unless disabled by command line argument try: # Try and Build the Solution. solution_path = os.path.join(main_directory, tests_set_run_data['Solution Target']) build_solution(main_directory, solution_path, tests_set_run_data['Configuration Target'], rebuild) except TestsSetError as tests_set_error: tests_set_run_data['Error'] = tests_set_error.args tests_set_run_data['Success'] = False return tests_set_run_data # Absolute path. absolutepath = os.path.abspath(os.path.dirname(main_directory)) for current_tests_group_name in tests_set_run_data['Tests Groups']: current_tests_group = tests_set_run_data['Tests Groups'][current_tests_group_name] # Check if the test is enabled. if current_tests_group['Enabled'] == True: current_tests_group['Results'] = {} current_tests_group['Results']['Errors'] = {} # Get the executable directory. executable_directory = get_executable_directory(tests_set_run_data['Configuration Target'], tests_set_run_data['Name'], runAsCollection) executable_directory = os.path.join(absolutepath, executable_directory) # Get the results directory. current_results_directory = os.path.join(tests_set_run_data['Results Directory'], current_tests_group_name) # Create the directory, or clean it. if helpers.directory_clean_or_make(current_results_directory) is None: current_tests_group['Results']['Errors']['Global'] = "Could not clean or make required results directory. Please try manually deleting : " + current_results_directory break # Initialize all the results. current_tests_group['Results']['Run Results'] = {} current_tests_group['Results']['Directory'] = current_results_directory current_tests_group['Results']['Filename'] = {} current_tests_group['Results']['Success'] = True # Run each test. for index, current_test_args in enumerate(current_tests_group['Project Tests Args']) : # Initialize the expected filename current_tests_group['Results']['Filename'][index] = current_tests_group_name + str(index) + '.json' # Try running the test. try: executable_file = os.path.join(executable_directory, current_tests_group['Project Name']) if os.name == 'nt': executable_file += '.exe' current_test_run_result = run_test_run(executable_file, current_test_args, current_tests_group_name + str(index), current_results_directory) current_tests_group['Results']['Run Results'][index] = current_test_run_result # Update overall test set success based on whether there was an error running it. # Updating based on pass/fail checks happen later in analyze_tests_group() current_tests_group['Results']['Success'] &= current_test_run_result[0] if current_test_run_result[0] != True: current_tests_group['Results']['Errors'][index] = current_test_run_result[1] # Check if an error occurred. except (TestsSetError, IOError, OSError) as tests_set_error: current_tests_group['Results']['Errors'][index] = tests_set_error.args tests_set_run_data['Success'] = False return tests_set_run_data # Analyze test run output and fill out the dict with results def get_tests_set_results(tests_set_run_data): # Check the json results for each one that is enabled. tests_groups = tests_set_run_data['Tests Groups'] for current_tests_group_name in tests_groups: if tests_groups[current_tests_group_name]['Enabled'] == True: analyze_tests_group(tests_set_run_data, current_tests_group_name) # Check the json results for a single test. def analyze_tests_group(tests_set_data, current_test_group_name): current_test_group = tests_set_data['Tests Groups'][current_test_group_name] # current_test_group['Results']['Performance Checks'] = {} # current_test_group['Results']['Memory Checks'] = {} current_test_group['Results']['Screen Capture Checks'] = [] for index, current_test_args in enumerate(current_test_group['Project Tests Args']): if index in current_test_group['Results']['Errors']: screen_capture_checks = {} screen_capture_checks['Success'] = False screen_capture_checks['Frame Screen Captures'] = [] screen_capture_checks['Time Screen Captures'] = [] else: current_test_reference_directory = os.path.join(tests_set_data['Reference Directory'], current_test_group_name) current_test_result_directory = str(current_test_group['Results']['Directory']) result_json_filepath = str(current_test_result_directory + current_test_group['Results']['Filename'][index]) # Try and parse the data from the json file. try: result_json_file = open(result_json_filepath) result_json_data = json.load(result_json_file) except (IOError, OSError, ValueError, json.decoder.JSONDecodeError) as e: current_test_group['Results']['Errors'][index] = e.args continue # Analyze the screen captures. Assume Screen Capture test if no config specified if 'Test Config' not in current_test_group or current_test_group['Test Config']['Type'] == "Image Compare": tolerance = 0.0 if 'Test Config' in current_test_group and 'Tolerance' in current_test_group['Test Config']: tolerance = current_test_group['Test Config']['Tolerance'] screen_capture_checks = analyze_screen_captures(tolerance, result_json_data, current_test_result_directory, current_test_reference_directory) # # Analyze the performance checks. # if current_test_group['Test Config']['Type'] == "Performance Test": # performance_checks = analyze_performance_checks(result_json_data) # current_test_group['Results']['Performance Checks'][index] = performance_checks # # When check implemented, update success # # Analyze the memory checks. # if current_test_group['Test Config']['Type'] == "Memory Test": # memory_checks = analyze_memory_checks(result_json_data) # current_test_group['Results']['Memory Checks'][index] = memory_checks # # When check implemented, update success current_test_group['Results']['Screen Capture Checks'].append(screen_capture_checks) tests_set_data['Success'] &= screen_capture_checks['Success'] # Analyze the Performance Checks. def analyze_performance_checks(result_json_data): return [] # Analyze the Memory Checks. def analyze_memory_checks(result_json_data): return [] def analyze_screen_captures(tolerance, result_json_data, current_test_result_directory, current_test_reference_directory): screen_captures_results = {} screen_captures_results['Success'] = True for key in ['Frame Screen Captures', 'Time Screen Captures']: screen_captures_results[key] = [] for index, frame_screen_captures in enumerate(result_json_data[key]): # Get the test result image. test_result_image_filename = os.path.join(current_test_result_directory, str(frame_screen_captures['Filename'])) # Get the reference image. test_reference_image_filename = os.path.join(current_test_reference_directory, str(frame_screen_captures['Filename'])) # Create the test compare image. test_compare_image_filepath = os.path.join(current_test_result_directory, os.path.splitext(frame_screen_captures['Filename'])[0] + '_Compare.png') # Run ImageMagick image_compare_command = ['magick', 'compare', '-metric', 'MSE', '-compose', 'Src', '-highlight-color', 'White', '-lowlight-color', 'Black', test_result_image_filename, test_reference_image_filename, test_compare_image_filepath] if os.name == 'nt': image_compare_process = subprocess.Popen(image_compare_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) else: #don't need "magick" first or shell=True if on linux image_compare_command.pop(0) image_compare_process = subprocess.Popen(image_compare_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) image_compare_result = image_compare_process.communicate()[0] # Decode if image compare result is a "binary" string try: image_compare_result = image_compare_result.decode('ascii') except AttributeError: pass # Keep the Return Code and the Result. result = {} # Image compare succeeded if image_compare_process.returncode <= 1: # 0: Success, 1: Does not match, 2: File not found, or other error? result_str = image_compare_result[:image_compare_result.find(' ')] result['Compare Result'] = result_str result['Test Passed'] = float(result_str) <= tolerance # Error else: result['Compare Result'] = "Error" result['Test Passed'] = False result['Return Code'] = image_compare_process.returncode result['Source Filename'] = test_result_image_filename result['Reference Filename'] = test_reference_image_filename screen_captures_results['Success'] &= result['Test Passed'] screen_captures_results[key].append(result) return screen_captures_results def main(): # Argument Parser. parser = argparse.ArgumentParser() # Add the Argument for which solution. parser.add_argument('-md', '--main_directory', action='store', help='Specify the path to the top level directory of Falcor. The path in the Tests Set file is assumed to be relative to that.') # Add the Argument for which configuration. parser.add_argument('-rb', '--rebuild', action='store_true', help='Specify whether or not to rebuild the solution.') # Add the Argument for which Tests Set to run. parser.add_argument('-ts', '--tests_set', action='store', help='Specify the Tests Set file.') #Which branch to compare to parser.add_argument('-br', '--branch', action='store', help='Branch of refence to compare to') #Which machine name to use for reference comparsion parser.add_argument('-mn', '--machine_name', action='store', help='Which machine name use as reference') # Parse the Arguments. args = parser.parse_args() # Get the machine constants. main_results_directory = machine_configs.machine_relative_checkin_local_results_directory if not args.tests_set: print('ERROR: No test set supplied.') return main_directory = '' if not args.main_directory: print('No Main directory supplied, assuming running from test dir, assuming default ' + str(machine_configs.default_main_dir)) main_directory = machine_configs.default_main_dir else: main_directory = args.main_directory branch = '' if not args.branch: print('No Branch supplied, using default: ' + str(machine_configs.default_reference_branch_name)) branch = 'master' else: branch = str(args.branch) ref_machine_name = '' if not args.machine_name: print('No reference machine name supplied, using default: ' + str(machine_configs.default_reference_machine_name)) ref_machine_name = machine_configs.default_reference_machine_name else: ref_machine_name = str(args.machine_name) main_reference_directory = os.path.join(machine_configs.machine_reference_directory, ref_machine_name, branch) # Run the Test Set. tests_set_data = run_tests_set(main_directory, args.rebuild, args.tests_set, main_results_directory, main_reference_directory, False) if tests_set_data['Success'] == False: print(tests_set_data['Error']) return # Build the Tests Results. get_tests_set_results(tests_set_data) # Write the Tests Results to HTML. html_file_content = write_test_results_to_html.write_test_set_results_to_html(tests_set_data) # Output the file to disk. # Build path and filename html_file_path = os.path.join(machine_configs.machine_relative_checkin_local_results_directory, helpers.build_html_filename(tests_set_data)) html_file = open(html_file_path, 'w') html_file.write(html_file_content) html_file.close() # Open it up. if os.name == 'nt': os.system("start " + html_file_path) else: webbrowser.open('file://' + os.path.abspath(html_file_path)) if __name__ == '__main__': main()
[ "subprocess.Popen", "json.load", "os.path.abspath", "argparse.ArgumentParser", "os.path.basename", "os.getcwd", "os.path.dirname", "os.system", "time.time", "subprocess.call", "Helpers.build_html_filename", "Helpers.directory_clean_or_make", "WriteTestResultsToHTML.write_test_set_results_to_html", "os.path.splitext", "os.path.join", "os.chdir" ]
[((4376, 4406), 'os.path.dirname', 'os.path.dirname', (['json_filepath'], {}), '(json_filepath)\n', (4391, 4406), False, 'import os\n'), ((4678, 4739), 'os.path.join', 'os.path.join', (['reference_directory', "tests_set_run_data['Name']"], {}), "(reference_directory, tests_set_run_data['Name'])\n", (4690, 4739), False, 'import os\n'), ((4786, 4845), 'os.path.join', 'os.path.join', (['results_directory', "tests_set_run_data['Name']"], {}), "(results_directory, tests_set_run_data['Name'])\n", (4798, 4845), False, 'import os\n'), ((14994, 15019), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15017, 15019), False, 'import argparse\n'), ((17060, 17147), 'os.path.join', 'os.path.join', (['machine_configs.machine_reference_directory', 'ref_machine_name', 'branch'], {}), '(machine_configs.machine_reference_directory, ref_machine_name,\n branch)\n', (17072, 17147), False, 'import os\n'), ((17543, 17616), 'WriteTestResultsToHTML.write_test_set_results_to_html', 'write_test_results_to_html.write_test_set_results_to_html', (['tests_set_data'], {}), '(tests_set_data)\n', (17600, 17616), True, 'import WriteTestResultsToHTML as write_test_results_to_html\n'), ((451, 480), 'os.path.join', 'os.path.join', (['test_set', '"""Bin"""'], {}), "(test_set, 'Bin')\n", (463, 480), False, 'import os\n'), ((566, 594), 'os.path.join', 'os.path.join', (['exe_dir', '"""x64"""'], {}), "(exe_dir, 'x64')\n", (578, 594), False, 'import os\n'), ((777, 806), 'os.path.join', 'os.path.join', (['exe_dir', 'config'], {}), '(exe_dir, config)\n', (789, 806), False, 'import os\n'), ((1759, 1770), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1768, 1770), False, 'import os\n'), ((1802, 1822), 'os.chdir', 'os.chdir', (['cloned_dir'], {}), '(cloned_dir)\n', (1810, 1822), False, 'import os\n'), ((1831, 1875), 'subprocess.call', 'subprocess.call', (["['make', 'PreBuild', '-j8']"], {}), "(['make', 'PreBuild', '-j8'])\n", (1846, 1875), False, 'import subprocess\n'), ((1884, 1923), 'subprocess.call', 'subprocess.call', (["['make', 'All', '-j8']"], {}), "(['make', 'All', '-j8'])\n", (1899, 1923), False, 'import subprocess\n'), ((1932, 1949), 'os.chdir', 'os.chdir', (['prevDir'], {}), '(prevDir)\n', (1940, 1949), False, 'import os\n'), ((2330, 2341), 'time.time', 'time.time', ([], {}), '()\n', (2339, 2341), False, 'import time\n'), ((3937, 3956), 'json.load', 'json.load', (['jsonfile'], {}), '(jsonfile)\n', (3946, 3956), False, 'import json\n'), ((4980, 5047), 'os.path.join', 'os.path.join', (['main_directory', "tests_set_run_data['Solution Target']"], {}), "(main_directory, tests_set_run_data['Solution Target'])\n", (4992, 5047), False, 'import os\n'), ((5397, 5428), 'os.path.dirname', 'os.path.dirname', (['main_directory'], {}), '(main_directory)\n', (5412, 5428), False, 'import os\n'), ((17780, 17823), 'Helpers.build_html_filename', 'helpers.build_html_filename', (['tests_set_data'], {}), '(tests_set_data)\n', (17807, 17823), True, 'import Helpers as helpers\n'), ((17979, 18015), 'os.system', 'os.system', (["('start ' + html_file_path)"], {}), "('start ' + html_file_path)\n", (17988, 18015), False, 'import os\n'), ((2695, 2706), 'time.time', 'time.time', ([], {}), '()\n', (2704, 2706), False, 'import time\n'), ((4302, 4333), 'os.path.basename', 'os.path.basename', (['json_filepath'], {}), '(json_filepath)\n', (4318, 4333), False, 'import os\n'), ((6021, 6069), 'os.path.join', 'os.path.join', (['absolutepath', 'executable_directory'], {}), '(absolutepath, executable_directory)\n', (6033, 6069), False, 'import os\n'), ((6151, 6230), 'os.path.join', 'os.path.join', (["tests_set_run_data['Results Directory']", 'current_tests_group_name'], {}), "(tests_set_run_data['Results Directory'], current_tests_group_name)\n", (6163, 6230), False, 'import os\n'), ((9705, 9781), 'os.path.join', 'os.path.join', (["tests_set_data['Reference Directory']", 'current_test_group_name'], {}), "(tests_set_data['Reference Directory'], current_test_group_name)\n", (9717, 9781), False, 'import os\n'), ((1299, 1326), 'subprocess.call', 'subprocess.call', (['batch_args'], {}), '(batch_args)\n', (1314, 1326), False, 'import subprocess\n'), ((6296, 6354), 'Helpers.directory_clean_or_make', 'helpers.directory_clean_or_make', (['current_results_directory'], {}), '(current_results_directory)\n', (6327, 6354), True, 'import Helpers as helpers\n'), ((10167, 10194), 'json.load', 'json.load', (['result_json_file'], {}), '(result_json_file)\n', (10176, 10194), False, 'import json\n'), ((13320, 13426), 'subprocess.Popen', 'subprocess.Popen', (['image_compare_command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), '(image_compare_command, stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT, shell=True)\n', (13336, 13426), False, 'import subprocess\n'), ((13607, 13701), 'subprocess.Popen', 'subprocess.Popen', (['image_compare_command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(image_compare_command, stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n', (13623, 13701), False, 'import subprocess\n'), ((18062, 18093), 'os.path.abspath', 'os.path.abspath', (['html_file_path'], {}), '(html_file_path)\n', (18077, 18093), False, 'import os\n'), ((7276, 7347), 'os.path.join', 'os.path.join', (['executable_directory', "current_tests_group['Project Name']"], {}), "(executable_directory, current_tests_group['Project Name'])\n", (7288, 7347), False, 'import os\n'), ((12903, 12954), 'os.path.splitext', 'os.path.splitext', (["frame_screen_captures['Filename']"], {}), "(frame_screen_captures['Filename'])\n", (12919, 12954), False, 'import os\n')]
import requests from lxml import etree import time import os file_dir = r"D:\A\blog_img" def crawl(url): response = requests.get(url=url) response.encoding = "GBK" html = etree.HTML(response.text) urls = html.xpath("//table/tr/td/table/tbody/tr/td/table/tr[1]/td/div/a/@href") for url in urls: print(url) response = requests.get(url=url) response.encoding = "GBK" html = etree.HTML(response.text) try: down_url = html.xpath("//div[@class='down']/a[2]/@href")[0] img_name = html.xpath("//div[@class='PhotoDiv']/h2/text()")[0] except Exception as e: print(e) continue print(down_url) print(img_name) time.sleep(2) response = requests.get(url=down_url) file_name = os.path.join(file_dir, img_name + ".rar") with open(file_name, "wb") as f: f.write(response.content) time.sleep(2) if __name__ == '__main__': root_url_a = "http://so.sccnn.com/search/%E9%A3%8E%E6%99%AF/" root_url_b = ".html" for i in range(150): root_url = root_url_a + str(i) + root_url_b crawl(root_url)
[ "os.path.join", "lxml.etree.HTML", "requests.get", "time.sleep" ]
[((124, 145), 'requests.get', 'requests.get', ([], {'url': 'url'}), '(url=url)\n', (136, 145), False, 'import requests\n'), ((188, 213), 'lxml.etree.HTML', 'etree.HTML', (['response.text'], {}), '(response.text)\n', (198, 213), False, 'from lxml import etree\n'), ((359, 380), 'requests.get', 'requests.get', ([], {'url': 'url'}), '(url=url)\n', (371, 380), False, 'import requests\n'), ((430, 455), 'lxml.etree.HTML', 'etree.HTML', (['response.text'], {}), '(response.text)\n', (440, 455), False, 'from lxml import etree\n'), ((747, 760), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (757, 760), False, 'import time\n'), ((781, 807), 'requests.get', 'requests.get', ([], {'url': 'down_url'}), '(url=down_url)\n', (793, 807), False, 'import requests\n'), ((829, 870), 'os.path.join', 'os.path.join', (['file_dir', "(img_name + '.rar')"], {}), "(file_dir, img_name + '.rar')\n", (841, 870), False, 'import os\n'), ((960, 973), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (970, 973), False, 'import time\n')]
import struct #import toml def bitness(): return struct.calcsize("P") * 8 #def load_config(): # with open('Config/config.toml') as f: # return toml.load(f)
[ "struct.calcsize" ]
[((54, 74), 'struct.calcsize', 'struct.calcsize', (['"""P"""'], {}), "('P')\n", (69, 74), False, 'import struct\n')]
# -*- coding: utf-8 -*- # @Time : 2018/8/23 22:21 # @Author : zhoujun import os import cv2 import numpy as np import torch from utils import CTCLabelConverter,AttnLabelConverter from data_loader import get_transforms class PytorchNet: def __init__(self, model_path, gpu_id=None): """ 初始化模型 :param model_path: 模型地址 :param gpu_id: 在哪一块gpu上运行 """ checkpoint = torch.load(model_path) print(f"load {checkpoint['epoch']} epoch params") config = checkpoint['config'] alphabet = config['dataset']['alphabet'] if gpu_id is not None and isinstance(gpu_id, int) and torch.cuda.is_available(): self.device = torch.device("cuda:%s" % gpu_id) else: self.device = torch.device("cpu") print('device:', self.device) self.transform = [] for t in config['dataset']['train']['dataset']['args']['transforms']: if t['type'] in ['ToTensor', 'Normalize']: self.transform.append(t) self.transform = get_transforms(self.transform) self.gpu_id = gpu_id img_h, img_w = 32, 100 for process in config['dataset']['train']['dataset']['args']['pre_processes']: if process['type'] == "Resize": img_h = process['args']['img_h'] img_w = process['args']['img_w'] break self.img_w = img_w self.img_h = img_h self.img_mode = config['dataset']['train']['dataset']['args']['img_mode'] self.alphabet = alphabet img_channel = 3 if config['dataset']['train']['dataset']['args']['img_mode'] != 'GRAY' else 1 if config['arch']['args']['prediction']['type'] == 'CTC': self.converter = CTCLabelConverter(config['dataset']['alphabet']) elif config['arch']['args']['prediction']['type'] == 'Attn': self.converter = AttnLabelConverter(config['dataset']['alphabet']) self.net = get_model(img_channel, len(self.converter.character), config['arch']['args']) self.net.load_state_dict(checkpoint['state_dict']) # self.net = torch.jit.load('crnn_lite_gpu.pt') self.net.to(self.device) self.net.eval() sample_input = torch.zeros((2, img_channel, img_h, img_w)).to(self.device) self.net.get_batch_max_length(sample_input) def predict(self, img_path, model_save_path=None): """ 对传入的图像进行预测,支持图像地址和numpy数组 :param img_path: 图像地址 :return: """ assert os.path.exists(img_path), 'file is not exists' img = self.pre_processing(img_path) tensor = self.transform(img) tensor = tensor.unsqueeze(dim=0) tensor = tensor.to(self.device) preds, tensor_img = self.net(tensor) preds = preds.softmax(dim=2).detach().cpu().numpy() # result = decode(preds, self.alphabet, raw=True) # print(result) result = self.converter.decode(preds) if model_save_path is not None: # 输出用于部署的模型 save(self.net, tensor, model_save_path) return result, tensor_img def pre_processing(self, img_path): """ 对图片进行处理,先按照高度进行resize,resize之后如果宽度不足指定宽度,就补黑色像素,否则就强行缩放到指定宽度 :param img_path: 图片地址 :return: """ img = cv2.imread(img_path, 1 if self.img_mode != 'GRAY' else 0) if self.img_mode == 'RGB': img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) h, w = img.shape[:2] ratio_h = float(self.img_h) / h new_w = int(w * ratio_h) if new_w < self.img_w: img = cv2.resize(img, (new_w, self.img_h)) step = np.zeros((self.img_h, self.img_w - new_w, img.shape[-1]), dtype=img.dtype) img = np.column_stack((img, step)) else: img = cv2.resize(img, (self.img_w, self.img_h)) return img def save(net, input, save_path): # 在gpu导出的模型只能在gpu使用,cpu导出的只能在cpu使用 net.eval() traced_script_module = torch.jit.trace(net, input) traced_script_module.save(save_path) if __name__ == '__main__': from modeling import get_model import time from matplotlib import pyplot as plt from matplotlib.font_manager import FontProperties font = FontProperties(fname=r"msyh.ttc", size=14) img_path = '0.jpg' model_path = 'crnn_None_VGG_RNN_Attn/checkpoint/model_latest.pth' crnn_net = PytorchNet(model_path=model_path, gpu_id=0) start = time.time() for i in range(1): result, img = crnn_net.predict(img_path) break print((time.time() - start) *1000/ 1) label = result[0][0] print(result) # plt.title(label, fontproperties=font) # plt.imshow(img.detach().cpu().numpy().squeeze().transpose((1, 2, 0)), cmap='gray') # plt.show()
[ "torch.jit.trace", "matplotlib.font_manager.FontProperties", "cv2.cvtColor", "torch.load", "data_loader.get_transforms", "os.path.exists", "utils.CTCLabelConverter", "numpy.zeros", "time.time", "numpy.column_stack", "cv2.imread", "utils.AttnLabelConverter", "torch.cuda.is_available", "torch.device", "torch.zeros", "cv2.resize" ]
[((4020, 4047), 'torch.jit.trace', 'torch.jit.trace', (['net', 'input'], {}), '(net, input)\n', (4035, 4047), False, 'import torch\n'), ((4277, 4318), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {'fname': '"""msyh.ttc"""', 'size': '(14)'}), "(fname='msyh.ttc', size=14)\n", (4291, 4318), False, 'from matplotlib.font_manager import FontProperties\n'), ((4486, 4497), 'time.time', 'time.time', ([], {}), '()\n', (4495, 4497), False, 'import time\n'), ((415, 437), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (425, 437), False, 'import torch\n'), ((1057, 1087), 'data_loader.get_transforms', 'get_transforms', (['self.transform'], {}), '(self.transform)\n', (1071, 1087), False, 'from data_loader import get_transforms\n'), ((2544, 2568), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (2558, 2568), False, 'import os\n'), ((3333, 3390), 'cv2.imread', 'cv2.imread', (['img_path', "(1 if self.img_mode != 'GRAY' else 0)"], {}), "(img_path, 1 if self.img_mode != 'GRAY' else 0)\n", (3343, 3390), False, 'import cv2\n'), ((645, 670), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (668, 670), False, 'import torch\n'), ((698, 730), 'torch.device', 'torch.device', (["('cuda:%s' % gpu_id)"], {}), "('cuda:%s' % gpu_id)\n", (710, 730), False, 'import torch\n'), ((771, 790), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (783, 790), False, 'import torch\n'), ((1767, 1815), 'utils.CTCLabelConverter', 'CTCLabelConverter', (["config['dataset']['alphabet']"], {}), "(config['dataset']['alphabet'])\n", (1784, 1815), False, 'from utils import CTCLabelConverter, AttnLabelConverter\n'), ((3444, 3480), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3456, 3480), False, 'import cv2\n'), ((3633, 3669), 'cv2.resize', 'cv2.resize', (['img', '(new_w, self.img_h)'], {}), '(img, (new_w, self.img_h))\n', (3643, 3669), False, 'import cv2\n'), ((3689, 3763), 'numpy.zeros', 'np.zeros', (['(self.img_h, self.img_w - new_w, img.shape[-1])'], {'dtype': 'img.dtype'}), '((self.img_h, self.img_w - new_w, img.shape[-1]), dtype=img.dtype)\n', (3697, 3763), True, 'import numpy as np\n'), ((3782, 3810), 'numpy.column_stack', 'np.column_stack', (['(img, step)'], {}), '((img, step))\n', (3797, 3810), True, 'import numpy as np\n'), ((3843, 3884), 'cv2.resize', 'cv2.resize', (['img', '(self.img_w, self.img_h)'], {}), '(img, (self.img_w, self.img_h))\n', (3853, 3884), False, 'import cv2\n'), ((1914, 1963), 'utils.AttnLabelConverter', 'AttnLabelConverter', (["config['dataset']['alphabet']"], {}), "(config['dataset']['alphabet'])\n", (1932, 1963), False, 'from utils import CTCLabelConverter, AttnLabelConverter\n'), ((2256, 2299), 'torch.zeros', 'torch.zeros', (['(2, img_channel, img_h, img_w)'], {}), '((2, img_channel, img_h, img_w))\n', (2267, 2299), False, 'import torch\n'), ((4595, 4606), 'time.time', 'time.time', ([], {}), '()\n', (4604, 4606), False, 'import time\n')]
# -*- coding: utf-8 -*- """ Created on Tue Nov 24 20:37:03 2020 @author: Shiro """ import pandas as pd import copy import numpy as np ## put inside listes the csv file representing the prediction of a model listes = ["model_pl_A-5folds-CV-seed42-bs16-mixup.csv", "model_pl_B-5folds-CV-seed42-bs16-mixup.csv"] df = [pd.read_csv(l) for l in listes] cols = df[0].columns[1:] arr = np.stack([d[cols].values for d in df]).mean(0) final = copy.deepcopy(df[0]) final[cols] = arr # csv filename output final.to_csv("submission_ensemblingv7-PL.csv", index=False)
[ "pandas.read_csv", "copy.deepcopy", "numpy.stack" ]
[((460, 480), 'copy.deepcopy', 'copy.deepcopy', (['df[0]'], {}), '(df[0])\n', (473, 480), False, 'import copy\n'), ((337, 351), 'pandas.read_csv', 'pd.read_csv', (['l'], {}), '(l)\n', (348, 351), True, 'import pandas as pd\n'), ((402, 440), 'numpy.stack', 'np.stack', (['[d[cols].values for d in df]'], {}), '([d[cols].values for d in df])\n', (410, 440), True, 'import numpy as np\n')]
import pygame import adventure import adventure.sound LEFT_BLOCK = [[1, 0]] RIGHT_BLOCK = [[-1, 0]] TOP_BLOCK = [[0, 1]] BOTTOM_BLOCK = [[0, -1]] ALL_BLOCK = LEFT_BLOCK + RIGHT_BLOCK + TOP_BLOCK + BOTTOM_BLOCK SPRITE_FALL = "fall" SPRITE_JUMP = "jump" SPRITE_DJUMP = "djump" SPRITE_IDLE = "idle" SPRITE_RUN = "run" MOV_SPEED = 320 JUMP_POWER = 380 DJUMP_POWER = 280 class Character: w = 0 h = 0 x = 0 y = 0 vx = 0 vy = 0 status = None sprite = None delay = 0 target = None last_y = -99999999 last_blocks = None last_test = None djump = True def __init__(self, x, y, w, h): self.x = x self.y = y self.w = w self.h = h self.status = StatusIdle() self.sprite = SPRITE_IDLE self.last_test = LEFT_BLOCK def handle(self, inputs): self.status.handle(self, inputs) if inputs[pygame.K_o]: print("x:",int(self.x), "xb:", int(self.x/32),"y:",int(self.y),"yb:",int(self.y/32)) self.vx = 0 if inputs[pygame.K_a]: self.vx = -MOV_SPEED elif inputs[pygame.K_d]: self.vx = MOV_SPEED def update(self, delay): self.delay = delay self.status.update(self, delay) dvx = (self.vx * delay) if dvx != 0.0000: test = LEFT_BLOCK if dvx < 0 else RIGHT_BLOCK self.last_test = test else: test = self.last_test self.update_target(test) if self.target is not None: size = adventure.default.block_size x, y = self.target t_x = (((x + 1) * size + 1) if dvx < 0 else (x * size) - 1) w = 0 if dvx < 0 else self.w if test == LEFT_BLOCK: if (self.x + dvx) - t_x < 0: self.x = t_x - w dvx = 0 else: if (self.x + w + dvx) - t_x > 0: self.x = t_x - w dvx = 0 self.x += dvx self.y += (self.vy * delay) def draw(self, canvas): r = self.last_test == LEFT_BLOCK if self.status.name == "idle" or self.status.name == "run" or self.status.name == "djump": i = self.status.index % self.status.count p = adventure.default.texture.get_texture(self.sprite, r) s = p.subsurface((i * self.w, 0, self.w, self.h)) else: s = adventure.default.texture.get_texture(self.sprite, r) canvas.blit(s, (self.x, self.y)) def get_current_block(self): block_size = adventure.default.block_size x = self.x / block_size y = self.y / block_size c_w = self.x + self.w c_h = self.y + self.h c_w1 = c_w / block_size - x c_h1 = c_h / block_size - y w = c_w1 + 1 if ((c_w % block_size) > 0.5) else c_w1 h = c_h1 + 1 if ((c_h % block_size) > 0.5) else c_h1 return pygame.Rect(x, y, w, h) def get_surrounding_block(self, test): r = self.get_current_block() test_w = range(-1, r.w * 2) test_h = range(-1, r.h * 2) result = [] for offset_y in range(0, r.h * 2 + r.h % 2): pos_y = r.y + test_h[offset_y] for offset_x in range(0, r.w * 2 + r.w % 2): pos_x = r.x + test_w[offset_x] for t in test: mx, my = t if r.collidepoint(pos_x + mx, pos_y + my): result.append((pos_x, pos_y)) return result def update_target(self, test): self.last_blocks = self.get_surrounding_block(test) adventure.default.draw_blocks() self.target = None flag = False; for block in self.last_blocks: x, y = block b = adventure.default.get_block_id(x, y) if b is not None and flag is False: self.target = block flag = True def on_collision(self, b_id): block = adventure.default.blocks[b_id] if "collision" in block: if block["collision"] == "died": adventure.default.restart() elif block["collision"] == "jump": self.status = StatusDJump() self.sprite = SPRITE_DJUMP self.djump = False self.vy = -600 elif block["collision"] == "collectMusic": adventure.default.sound_master.add_rhythm_beat(block["musicID"]) block["draw"] = "hidden" if "transport" in block: adventure.default.load_level(block["transport"]) class StatusIdle: name = "idle" index = 0 count = 11 delay_sum = 0 def handle(self, instance, inputs=None): if inputs is not None: if inputs[pygame.K_w] or inputs[pygame.K_SPACE]: instance.status = StatusJump() instance.sprite = SPRITE_JUMP instance.vy = -JUMP_POWER if inputs[pygame.K_d] or inputs[pygame.K_a]: instance.status = StatusRun() instance.sprite = SPRITE_RUN def update(self, instance, delay): if self.delay_sum > 0.05: self.index += 1; self.delay_sum = 0 self.delay_sum += delay block = instance.get_surrounding_block(BOTTOM_BLOCK) fall = True for b in block: x, y = b b_id = adventure.default.get_block_id(x, y) if b_id is not None: instance.on_collision(b_id) fall = False if fall: instance.vy = 10 instance.status = StatusFall() instance.sprite = SPRITE_FALL class StatusJump: name = "jump" target = None release = False def handle(self, instance, inputs=None): if inputs is not None: if inputs[pygame.K_w] or inputs[pygame.K_SPACE]: if self.release and instance.djump: instance.status = StatusDJump() instance.sprite = SPRITE_DJUMP instance.vy = -DJUMP_POWER instance.djump = False else: self.release = True def update(self, instance, delay): instance.vy += (adventure.GRAVITY * delay) if instance.vy > 0: instance.status = StatusFall() instance.sprite = SPRITE_FALL else: self.update_target(instance) if self.target is not None: size = adventure.default.block_size x, y = self.target t_y = (y + 1) * size if (instance.y - (adventure.GRAVITY * delay)) - t_y < 0: instance.y = t_y + 1 instance.status = StatusFall() instance.sprite = SPRITE_FALL b = adventure.default.get_block_id(x, y) instance.on_collision(b) instance.vy = 1 def update_target(self, instance): blocks = instance.get_surrounding_block(TOP_BLOCK) self.target = None for block in blocks: x, y = block b = adventure.default.get_block_id(x, y) if b is not None: self.target = block break class StatusFall: name = "fall" target = None def handle(self, instance, inputs=None): if inputs is not None: if (inputs[pygame.K_w] or inputs[pygame.K_SPACE]) and instance.djump: instance.status = StatusDJump() instance.sprite = SPRITE_DJUMP instance.vy = -DJUMP_POWER instance.djump = False def update(self, instance: Character, delay): instance.vy += (adventure.GRAVITY * delay) self.update_target(instance) size = adventure.default.block_size if self.target is not None: x, y = self.target t_y = y * size if ((instance.y + instance.h) + (adventure.GRAVITY * delay)) - t_y > 0: instance.y = t_y - instance.h - 1 instance.status = StatusIdle() instance.sprite = SPRITE_IDLE instance.djump = True instance.vy = 0 def update_target(self, instance): blocks = instance.get_surrounding_block(BOTTOM_BLOCK) self.target = None for block in blocks: x, y = block b = adventure.default.get_block_id(x, y) if b is not None: self.target = block break class StatusRun: name = "run" index = 0 count = 12 delay_sum = 0 def handle(self, instance, inputs=None): if inputs is not None: if (inputs[pygame.K_w] or inputs[pygame.K_SPACE]) or inputs[pygame.K_SPACE]: instance.status = StatusJump() instance.sprite = SPRITE_JUMP instance.vy = -JUMP_POWER elif not inputs[pygame.K_a] and not inputs[pygame.K_d]: instance.status = StatusIdle() instance.sprite = SPRITE_IDLE def update(self, instance, delay): if self.delay_sum > 0.05: self.index += 1; self.delay_sum = 0 self.delay_sum += delay block = instance.get_surrounding_block(BOTTOM_BLOCK) fall = True for b in block: x, y = b b_id = adventure.default.get_block_id(x, y) if b_id is not None: fall = False instance.on_collision(b_id) if fall: instance.vy = 10 instance.status = StatusFall() instance.sprite = SPRITE_FALL class StatusDJump: name = "djump" index = 0 count = 6 delay_sum = 0 target = None def handle(self, instance, inputs): pass def update(self, instance, delay): if self.delay_sum > 0.05: self.index += 1; self.delay_sum = 0 self.delay_sum += delay instance.vy += (adventure.GRAVITY * delay) if instance.vy > 0: instance.status = StatusFall() instance.sprite = SPRITE_FALL else: self.update_target(instance) if self.target is not None: size = adventure.default.block_size x, y = self.target t_y = (y + 1) * size if (instance.y - (adventure.GRAVITY * delay)) - t_y < 0: b = adventure.default.get_block_id(x, y) instance.on_collision(b) instance.y = t_y + 1 instance.status = StatusFall() instance.sprite = SPRITE_FALL instance.vy = 1 def update_target(self, instance): blocks = instance.get_surrounding_block(TOP_BLOCK) self.target = None for block in blocks: x, y = block b = adventure.default.get_block_id(x, y) if b is not None: self.target = block break
[ "adventure.default.draw_blocks", "adventure.default.texture.get_texture", "pygame.Rect", "adventure.default.sound_master.add_rhythm_beat", "adventure.default.restart", "adventure.default.get_block_id", "adventure.default.load_level" ]
[((2976, 2999), 'pygame.Rect', 'pygame.Rect', (['x', 'y', 'w', 'h'], {}), '(x, y, w, h)\n', (2987, 2999), False, 'import pygame\n'), ((3679, 3710), 'adventure.default.draw_blocks', 'adventure.default.draw_blocks', ([], {}), '()\n', (3708, 3710), False, 'import adventure\n'), ((2318, 2371), 'adventure.default.texture.get_texture', 'adventure.default.texture.get_texture', (['self.sprite', 'r'], {}), '(self.sprite, r)\n', (2355, 2371), False, 'import adventure\n'), ((2464, 2517), 'adventure.default.texture.get_texture', 'adventure.default.texture.get_texture', (['self.sprite', 'r'], {}), '(self.sprite, r)\n', (2501, 2517), False, 'import adventure\n'), ((3840, 3876), 'adventure.default.get_block_id', 'adventure.default.get_block_id', (['x', 'y'], {}), '(x, y)\n', (3870, 3876), False, 'import adventure\n'), ((4616, 4664), 'adventure.default.load_level', 'adventure.default.load_level', (["block['transport']"], {}), "(block['transport'])\n", (4644, 4664), False, 'import adventure\n'), ((5483, 5519), 'adventure.default.get_block_id', 'adventure.default.get_block_id', (['x', 'y'], {}), '(x, y)\n', (5513, 5519), False, 'import adventure\n'), ((7248, 7284), 'adventure.default.get_block_id', 'adventure.default.get_block_id', (['x', 'y'], {}), '(x, y)\n', (7278, 7284), False, 'import adventure\n'), ((8536, 8572), 'adventure.default.get_block_id', 'adventure.default.get_block_id', (['x', 'y'], {}), '(x, y)\n', (8566, 8572), False, 'import adventure\n'), ((9518, 9554), 'adventure.default.get_block_id', 'adventure.default.get_block_id', (['x', 'y'], {}), '(x, y)\n', (9548, 9554), False, 'import adventure\n'), ((11054, 11090), 'adventure.default.get_block_id', 'adventure.default.get_block_id', (['x', 'y'], {}), '(x, y)\n', (11084, 11090), False, 'import adventure\n'), ((4165, 4192), 'adventure.default.restart', 'adventure.default.restart', ([], {}), '()\n', (4190, 4192), False, 'import adventure\n'), ((6934, 6970), 'adventure.default.get_block_id', 'adventure.default.get_block_id', (['x', 'y'], {}), '(x, y)\n', (6964, 6970), False, 'import adventure\n'), ((10598, 10634), 'adventure.default.get_block_id', 'adventure.default.get_block_id', (['x', 'y'], {}), '(x, y)\n', (10628, 10634), False, 'import adventure\n'), ((4464, 4528), 'adventure.default.sound_master.add_rhythm_beat', 'adventure.default.sound_master.add_rhythm_beat', (["block['musicID']"], {}), "(block['musicID'])\n", (4510, 4528), False, 'import adventure\n')]
from configparser import ConfigParser import logging from pathlib import Path import sys import config import helper_functions from interactive_cli import InvalidConfigFileCommandLineInterface from conversion_settings import ConversionSettings def what_module_is_this(): return __name__ class ConfigData(ConfigParser): """ Read, verify, update and write config.ini files Attributes ---------- logger : logging object used for program logging config_file : str name of the configuration file to be parsed and to use for storing config data default_quick_setting : str setting to be used if the config.ini is found to be invalid and uses chooses to use the default conversion_settings : Child of ConversionSettings, holds config data in a format to be used outside of the class validation_values : dict holds the values required to validate a config file read from disk """ def __init__(self, config_file, default_quick_setting, **kwargs): super().__init__(**kwargs) # Note: allow_no_value=True allows for #comments in the ini file self.logger = logging.getLogger(f'{config.yanom_globals.app_name}.' f'{what_module_is_this()}.' f'{self.__class__.__name__}' ) self.logger.setLevel(config.yanom_globals.logger_level) self._config_file = config_file self._default_quick_setting = default_quick_setting self._conversion_settings = ConversionSettings() self._conversion_settings.set_quick_setting('manual') self._validation_values = self._conversion_settings.validation_values def parse_config_file(self): self.read_config_file() valid_config = self.validate_config_file() if not valid_config: self.ask_user_to_choose_new_default_config_file() self.generate_conversion_settings_from_parsed_config_file_data() self.logger.info(f"Settings from config.ini are {self._conversion_settings}") def generate_conversion_settings_using_quick_settings_string(self, value): if value in self._conversion_settings.valid_quick_settings: self._conversion_settings.set_quick_setting(value) self._load_and_save_settings() return self.logger.error(f"Passed invalid value - {value} - is not a recognised quick setting string") raise ValueError(f"Conversion setting parameter must be a valid quick setting string " f"{self._conversion_settings.valid_quick_settings} received '{value}'") def generate_conversion_settings_using_quick_settings_object(self, value): if isinstance(value, ConversionSettings): self._conversion_settings = value self._load_and_save_settings() return self.logger.error(f"Passed invalid value - {value}") raise TypeError(f"Conversion setting parameter must be a valid quick setting " f"{self._conversion_settings.valid_quick_settings} string or a ConversionSettings object") def validate_config_file(self) -> bool: """ Validate config data read from config.ini. Errors in the data will trigger a prompt user asking to create a new default configuration or exit the program. """ self.logger.debug("attempting to validate config file") try: self._validate_config() self.logger.debug("config file validated") return True except ValueError as e: self.logger.warning(f"Config file invalid \n{e}") return False def ask_user_to_choose_new_default_config_file(self): ask_what_to_do = InvalidConfigFileCommandLineInterface() what_to_do = ask_what_to_do.run_cli() if what_to_do == 'exit': sys.exit(0) self.logger.info("User chose to create a default file") self._conversion_settings.set_quick_setting(self._default_quick_setting) self._load_and_save_settings() def _validate_config(self): """ Validate the current Config Data for any errors by comparing to a set of validation values. Errors will raise a ValueError """ for section, keys in self._validation_values.items(): if section not in self: raise ValueError(f'Missing section {section} in the config ini file') for key, values in keys.items(): if key not in self[section]: raise ValueError(f'Missing entry for {key} under section {section} in the config ini file') if values: if self[section][key] not in values: raise ValueError(f'Invalid value of "{self[section][key]}" for {key} under section {section} ' f'in the config file') def generate_conversion_settings_from_parsed_config_file_data(self): """ Transcribe the values in a ConversionSettings object into the ConfigParser format used by this class """ self._conversion_settings.conversion_input = self['conversion_inputs']['conversion_input'] self._conversion_settings.markdown_conversion_input = \ self['markdown_conversion_inputs']['markdown_conversion_input'] self._conversion_settings.quick_setting = self['quick_settings']['quick_setting'] self._conversion_settings.export_format = self['export_formats']['export_format'] self._conversion_settings.front_matter_format = self['meta_data_options']['front_matter_format'] if self._conversion_settings.export_format == 'pandoc_markdown': self._conversion_settings.front_matter_format = 'yaml' self._conversion_settings.metadata_schema = self['meta_data_options']['metadata_schema'] self._conversion_settings.tag_prefix = self['meta_data_options']['tag_prefix'] self._conversion_settings.spaces_in_tags = self.getboolean('meta_data_options', 'spaces_in_tags') self._conversion_settings.split_tags = self.getboolean('meta_data_options', 'split_tags') self._conversion_settings.first_row_as_header = self.getboolean('table_options', 'first_row_as_header') self._conversion_settings.first_column_as_header = self.getboolean('table_options', 'first_column_as_header') self._conversion_settings.chart_image = self.getboolean('chart_options', 'chart_image') self._conversion_settings.chart_csv = self.getboolean('chart_options', 'chart_csv') self._conversion_settings.chart_data_table = self.getboolean('chart_options', 'chart_data_table') self._conversion_settings.source = self['file_options']['source'] if self['file_options']['export_folder'] == '': self['file_options']['export_folder'] = 'notes' self._conversion_settings.export_folder = self['file_options']['export_folder'] if self['file_options']['attachment_folder_name'] == '': self['file_options']['attachment_folder_name'] = 'attachments' self._conversion_settings.attachment_folder_name = self['file_options']['attachment_folder_name'] self._conversion_settings._creation_time_in_exported_file_name = \ self.getboolean('file_options', 'creation_time_in_exported_file_name') self._conversion_settings.allow_spaces_in_filenames = \ self.getboolean('file_options', 'allow_spaces_in_filenames') self._conversion_settings.allow_unicode_in_filenames = \ self.getboolean('file_options', 'allow_unicode_in_filenames') self._conversion_settings.allow_uppercase_in_filenames = \ self.getboolean('file_options', 'allow_uppercase_in_filenames') self._conversion_settings.allow_non_alphanumeric_in_filenames = \ self.getboolean('file_options', 'allow_non_alphanumeric_in_filenames') self._conversion_settings.filename_spaces_replaced_by = self['file_options']['filename_spaces_replaced_by'] self._conversion_settings.max_file_or_directory_name_length = self['file_options']['max_file_or_directory_name_length'] self._conversion_settings.orphans = self['file_options']['orphans'] self._conversion_settings.make_absolute = self.getboolean('file_options', 'make_absolute') def _write_config_file(self): try: with open(Path(self.conversion_settings.working_directory, self._config_file), 'w') as config_file: self.write(config_file) self.logger.info("Saving configuration file") except FileNotFoundError as e: if not Path(self.conversion_settings.working_directory).is_dir(): message = f"Unable to save config.ini file '{self.conversion_settings.working_directory}' " \ f"is not a directory. {e.strerror}" self.logger.error(message) self.logger.error(helper_functions.log_traceback(e)) if not config.yanom_globals.is_silent: print(message) else: message = f"Unable to save config.ini file " \ f"- '{Path(self.conversion_settings.working_directory, self._config_file)}' " \ f"- {e.strerror}" self.logger.error(message) self.logger.error(helper_functions.log_traceback(e)) if not config.yanom_globals.is_silent: print(message) except IOError as e: message = f"Unable to save config.ini file `{self.conversion_settings.working_directory}`.\n{e}" self.logger.error(message) self.logger.error(helper_functions.log_traceback(e)) if not config.yanom_globals.is_silent: print(message) def read_config_file(self): """ Read config file. If file is missing generate a new one using default quick setting. If config file is missing set the conversion_settings to the default quick setting values and use that to generate a config data set. """ self.logger.debug('reading config file') path = Path(self.conversion_settings.working_directory, self._config_file) if path.is_file(): self.read(path) self.logger.info(f'Data read from INI file is {self.__repr__()}') else: self.logger.warning(f'config.ini missing at {path}, generating new file and settings set to default.') if not config.yanom_globals.is_silent: print("config.ini missing, generating new file.") self.conversion_settings = self._default_quick_setting def _load_and_save_settings(self): """ Read a dictionary of config data, formatted for config file generation and store the new config file. """ self._wipe_current_config() self.read_dict(self._generate_conversion_dict()) self.logger.info(f"Quick setting {self['quick_settings']['quick_setting']} loaded") self._write_config_file() def _wipe_current_config(self): """ Wipe the current config sections. When using read_dict new sections are added to the end of the current config. so when written to file they are out of place compared to the validation data. This method is here to save having to manually delete the ini file when coding changes to the conversion dict. That means it is here to save me having to remember to delete the ini file :-) """ for section, keys in self._validation_values.items(): self.remove_section(section) pass def _generate_conversion_dict(self): """ Generate a dictionary of the current conversion settings. Returns ------- Dict Dictionary, formatted for config file creation, using values from a ConversionSettings object """ # comments are treated as 'values' with no value (value is set to None) i.e. they are dict entries # where the key is the #comment string and the value is None return { 'conversion_inputs': { f' # Valid entries are {", ".join(self._conversion_settings.valid_conversion_inputs)}': None, ' # nsx = synology Note Station Export file': None, ' # html = simple html based notes pages, no complex CSS or javascript': None, ' # markdown = text files in markdown format': None, 'conversion_input': self._conversion_settings.conversion_input }, 'markdown_conversion_inputs': { f' # Valid entries are {", ".join(self._conversion_settings.valid_markdown_conversion_inputs)}': None, 'markdown_conversion_input': self._conversion_settings.markdown_conversion_input }, 'quick_settings': { f' # Valid entries are {", ".join(self._conversion_settings.valid_export_formats)}': None, ' # use manual to use the manual settings in the sections below': None, ' # NOTE if an option other than - manual - is used the rest of the ': None, ' # settings in this file will be set automatically': None, ' #': None, "quick_setting": self._conversion_settings.quick_setting, ' # ': None, ' # The following sections only apply if the above is set to manual': None, ' # ': None }, 'export_formats': { f' # Valid entries are {", ".join(self._conversion_settings.valid_export_formats)}': None, "export_format": self._conversion_settings.export_format }, 'meta_data_options': { ' # Note: front_matter_format sets the presence and type of the section with metadata ': None, ' #retrieved from the source': None, f' # Valid entries are {", ".join(self._conversion_settings.valid_front_matter_formats)}': None, ' # no entry will result in no front matter section': None, 'front_matter_format': self._conversion_settings.front_matter_format, ' # metadata schema is a comma separated list of metadata keys that you wish to ': None, ' # restrict the retrieved metadata keys. for example ': None, ' # title, tags will return those two if they are found': None, ' # If left blank any meta data found will be used': None, ' # The useful available keys in an nsx file are title, ctime, mtime, tag': None, 'metadata_schema': ",".join(self._conversion_settings.metadata_schema), ' # tag prefix is a character you wish to be added to the front of any tag values ': None, ' # retrieved from metadata. NOTE Use this if using front matter format "text" ': None, ' # or use is your markdown system uses a prefix in a front matter section (most wil not use a prefix) ': None, 'tag_prefix': self._conversion_settings.tag_prefix, ' # spaces_in_tags if True will maintain spaces in tag words, if False spaces are replaced by a dash -': None, 'spaces_in_tags': self._conversion_settings.spaces_in_tags, ' # split tags will split grouped tags into individual tags if True': None, ' # "Tag1", "Tag1/Sub Tag2" will become "Tag1", "Sub Tag2"': None, ' # grouped tags are only split where a "/" character is found': None, 'split_tags': self._conversion_settings.split_tags }, 'table_options': { ' # These two table options apply to NSX files ONLY': None, 'first_row_as_header': self._conversion_settings.first_row_as_header, 'first_column_as_header': self._conversion_settings.first_column_as_header }, 'chart_options': { ' # These three chart options apply to NSX files ONLY': None, 'chart_image': self._conversion_settings.chart_image, 'chart_csv': self._conversion_settings.chart_csv, 'chart_data_table': self._conversion_settings.chart_data_table }, 'file_options': { 'source': self._conversion_settings.source, 'export_folder': self._conversion_settings.export_folder, 'attachment_folder_name': self._conversion_settings.attachment_folder_name, ' # The following options apply to directory names, and currently only apply filenames in NSX conversions.': None, 'allow_spaces_in_filenames': self._conversion_settings.allow_spaces_in_filenames, 'filename_spaces_replaced_by': self._conversion_settings.filename_spaces_replaced_by, 'allow_unicode_in_filenames': self._conversion_settings.allow_unicode_in_filenames, 'allow_uppercase_in_filenames': self._conversion_settings.allow_uppercase_in_filenames, 'allow_non_alphanumeric_in_filenames': self._conversion_settings.allow_non_alphanumeric_in_filenames, 'creation_time_in_exported_file_name': self._conversion_settings.allow_uppercase_in_filenames, ' # If True creation time as `yyyymmddhhmm-` will be added as prefix to file name': None, 'max_file_or_directory_name_length': self._conversion_settings.max_file_or_directory_name_length, ' # The following options apply to directory names, and currently only apply to html and markdown conversions.': None, 'orphans': self._conversion_settings.orphans, ' # orphans are files that are not linked to any notes. Valid Values are': None, ' # ignore - orphan files are left where they are and are not moved to an export folder.': None, ' # copy - orphan files are coppied to the export folder in the same relative locations as the source.': None, ' # orphan - orphan files are moved to a directory named orphan in the export folder.': None, 'make_absolute': self._conversion_settings.make_absolute, ' # Links to files that are not in the path forwards of the source directory will be ': None, ' # changed to absolute links if set to True. For example "../../someplace/some_file.pdf"': None, ' # becomes /root/path/to/someplace/some_file.pdf"': None, ' # False will leave these links unchanged as relative links': None, } } @property def conversion_settings(self): return self._conversion_settings @conversion_settings.setter def conversion_settings(self, value): """ Receive a conversion setting as a quick setting string or a ConversionSettings object, set the _conversion_setting, generate and save config ini file for the setting received. Parameters ---------- value : str or ConversionSettings If str must be a valid quick_setting string value. """ if type(value) is str: self.generate_conversion_settings_using_quick_settings_string(value) return self.generate_conversion_settings_using_quick_settings_object(value) def __str__(self): display_dict = str({section: dict(self[section]) for section in self.sections()}) return str(f'{self.__class__.__name__}{display_dict}') def __repr__(self): display_dict = str({section: dict(self[section]) for section in self.sections()}) return str(f'{self.__class__.__name__}{display_dict}')
[ "interactive_cli.InvalidConfigFileCommandLineInterface", "conversion_settings.ConversionSettings", "helper_functions.log_traceback", "pathlib.Path", "sys.exit" ]
[((1593, 1613), 'conversion_settings.ConversionSettings', 'ConversionSettings', ([], {}), '()\n', (1611, 1613), False, 'from conversion_settings import ConversionSettings\n'), ((3832, 3871), 'interactive_cli.InvalidConfigFileCommandLineInterface', 'InvalidConfigFileCommandLineInterface', ([], {}), '()\n', (3869, 3871), False, 'from interactive_cli import InvalidConfigFileCommandLineInterface\n'), ((10349, 10416), 'pathlib.Path', 'Path', (['self.conversion_settings.working_directory', 'self._config_file'], {}), '(self.conversion_settings.working_directory, self._config_file)\n', (10353, 10416), False, 'from pathlib import Path\n'), ((3963, 3974), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3971, 3974), False, 'import sys\n'), ((8543, 8610), 'pathlib.Path', 'Path', (['self.conversion_settings.working_directory', 'self._config_file'], {}), '(self.conversion_settings.working_directory, self._config_file)\n', (8547, 8610), False, 'from pathlib import Path\n'), ((9866, 9899), 'helper_functions.log_traceback', 'helper_functions.log_traceback', (['e'], {}), '(e)\n', (9896, 9899), False, 'import helper_functions\n'), ((9101, 9134), 'helper_functions.log_traceback', 'helper_functions.log_traceback', (['e'], {}), '(e)\n', (9131, 9134), False, 'import helper_functions\n'), ((9534, 9567), 'helper_functions.log_traceback', 'helper_functions.log_traceback', (['e'], {}), '(e)\n', (9564, 9567), False, 'import helper_functions\n'), ((8793, 8841), 'pathlib.Path', 'Path', (['self.conversion_settings.working_directory'], {}), '(self.conversion_settings.working_directory)\n', (8797, 8841), False, 'from pathlib import Path\n'), ((9339, 9406), 'pathlib.Path', 'Path', (['self.conversion_settings.working_directory', 'self._config_file'], {}), '(self.conversion_settings.working_directory, self._config_file)\n', (9343, 9406), False, 'from pathlib import Path\n')]
# -*- coding: utf-8 -*- import argparse import pdb import traceback from itertools import permutations from typing import List, Tuple from intcode import Intcode def solve(program: List[int]) -> Tuple[int, int]: phases = (0, 1, 2, 3, 4) signals = [0] * (len(phases) + 1) results = [] vms = [Intcode(program) for _ in phases] for trie in permutations(phases): for idx, phase in enumerate(trie): vms[idx].reset() vms[idx].set_inputs([phase, signals[idx]]) while not vms[idx].execute(): pass signals[idx + 1] = vms[idx].last_output results.append((trie, signals[-1])) one = max([r[1] for r in results]) phases = (5, 6, 7, 8, 9) results = [] for phase in permutations(phases): signals = [0] * (len(phases) + 1) more_work = [True] * len(phases) vms = [Intcode(program) for _ in phases] for idx in range(len(vms)): vms[idx].set_inputs([phase[idx], signals[idx]]) more_work[idx] = vms[idx].execute() signals[idx + 1] = vms[idx].last_output signals[0] = signals[-1] while any(more_work): for idx in range(len(vms)): if not more_work[idx]: continue vms[idx].set_inputs([signals[idx]]) more_work[idx] = vms[idx].execute() signals[idx + 1] = vms[idx].last_output signals[0] = signals[-1] results.append((phase, signals[-1])) two = max([r[1] for r in results]) for idx, vm in enumerate(vms): print(f"vm trace {idx}:") addrs = list(vm.execution_trace.keys()) addrs.sort() for addr in addrs: print(f"{vm.execution_trace[addr]}") for ip in range(addrs[-1] + 1, len(vm.program)): print(f"{ip:5d}: {vm.program[ip]}") return (one, two) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Advent of Code - 2019 - Day 7 - Amplification Circuit." ) parser.add_argument( "input", type=str, default="input.txt", nargs="?", help="The puzzle input. (Default %(default)s)", ) args = parser.parse_args() program: List[int] = [] with open(args.input) as inf: for line in inf: program += list(map(int, line.strip().split(","))) try: print(solve(program)) except Exception: traceback.print_exc() pdb.post_mortem()
[ "traceback.print_exc", "pdb.post_mortem", "argparse.ArgumentParser", "itertools.permutations", "intcode.Intcode" ]
[((361, 381), 'itertools.permutations', 'permutations', (['phases'], {}), '(phases)\n', (373, 381), False, 'from itertools import permutations\n'), ((772, 792), 'itertools.permutations', 'permutations', (['phases'], {}), '(phases)\n', (784, 792), False, 'from itertools import permutations\n'), ((1962, 2060), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Advent of Code - 2019 - Day 7 - Amplification Circuit."""'}), "(description=\n 'Advent of Code - 2019 - Day 7 - Amplification Circuit.')\n", (1985, 2060), False, 'import argparse\n'), ((311, 327), 'intcode.Intcode', 'Intcode', (['program'], {}), '(program)\n', (318, 327), False, 'from intcode import Intcode\n'), ((892, 908), 'intcode.Intcode', 'Intcode', (['program'], {}), '(program)\n', (899, 908), False, 'from intcode import Intcode\n'), ((2492, 2513), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2511, 2513), False, 'import traceback\n'), ((2522, 2539), 'pdb.post_mortem', 'pdb.post_mortem', ([], {}), '()\n', (2537, 2539), False, 'import pdb\n')]
from typing import Optional, Sequence import pytorch_lightning as pl from hydra.utils import instantiate from omegaconf import DictConfig from torch.utils.data import DataLoader, Dataset from torchmeta.transforms import ClassSplitter from torchmeta.utils.data import BatchMetaDataLoader from torchvision.transforms import Compose class CustomBatchMetaDataLoader(BatchMetaDataLoader): def __init__(self, dataset, num_epochs=1, batch_size=1, shuffle=True, sampler=None, num_workers=0, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None): self.num_epochs = num_epochs super(CustomBatchMetaDataLoader, self).__init__(dataset, batch_size=batch_size, shuffle=shuffle, sampler=sampler, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn) def __len__(self): return 1 class MetaDataModule(pl.LightningDataModule): def __init__( self, nway: int, datasets: DictConfig, num_workers: DictConfig, kshot: DictConfig, transforms: DictConfig, target_transform: DictConfig, class_augmentations: DictConfig, batch_size: DictConfig, num_inner_steps: DictConfig, cfg: DictConfig, ): super().__init__() self.cfg = cfg self.datasets = datasets self.num_workers = num_workers self.batch_size = batch_size self.kshot = kshot self.nway = nway self.random_seed = cfg.train.random_seed self.transforms = transforms self.class_augmentations = class_augmentations self.target_transform = target_transform self.num_inner_steps = num_inner_steps self.train_dataset: Optional[Dataset] = None self.val_datasets: Optional[Sequence[Dataset]] = None self.test_datasets: Optional[Sequence[Dataset]] = None def prepare_data(self) -> None: # download only pass def setup(self, stage: Optional[str] = None): transform_compose = Compose([instantiate(transform) for transform in self.transforms]) target_transform = instantiate(self.target_transform, num_classes=self.nway) class_augmentations = [ instantiate(augmentation) for augmentation in self.class_augmentations] # Here you should instantiate your datasets, you may also split the train into train and validation if needed. if stage is None or stage == "fit": self.train_dataset = instantiate( self.datasets.train, num_classes_per_task=self.nway, transform=transform_compose, target_transform=target_transform, class_augmentations=class_augmentations) self.train_dataset.seed(self.random_seed) self.train_dataset = ClassSplitter(self.train_dataset, shuffle=True, random_state_seed=self.random_seed, num_support_per_class=self.kshot.support, num_query_per_class=self.kshot.query) self.val_dataset = instantiate(self.datasets.val, num_classes_per_task=self.nway, transform=transform_compose, target_transform=target_transform, class_augmentations=class_augmentations) self.val_dataset.seed(self.random_seed) self.val_dataset = ClassSplitter(self.val_dataset, shuffle=True, random_state_seed=self.random_seed, num_support_per_class=self.kshot.support, num_query_per_class=self.kshot.query) if stage is None or stage == "test": self.test_dataset = instantiate(self.datasets.test, num_classes_per_task=self.nway, transform=transform_compose, target_transform=target_transform, class_augmentations=class_augmentations) self.test_dataset.seed(self.random_seed) self.test_dataset = ClassSplitter(self.test_dataset, shuffle=True, random_state_seed=self.random_seed, num_support_per_class=self.kshot.support, num_query_per_class=self.kshot.query) def train_dataloader(self) -> DataLoader: return CustomBatchMetaDataLoader( self.train_dataset, shuffle=True, batch_size=self.batch_size.train, num_workers=self.num_workers.train, ) def val_dataloader(self) -> DataLoader: return CustomBatchMetaDataLoader( self.val_dataset, shuffle=False, batch_size=self.batch_size.val, num_workers=self.num_workers.val, ) def test_dataloader(self) -> DataLoader: return CustomBatchMetaDataLoader( self.test_dataset, shuffle=False, batch_size=self.batch_size.test, num_workers=self.num_workers.test, ) def __repr__(self) -> str: return ( f"{self.__class__.__name__}(" f"{self.datasets}, " f"{self.num_workers}, " f"{self.batch_size})" )
[ "hydra.utils.instantiate", "torchmeta.transforms.ClassSplitter" ]
[((2714, 2771), 'hydra.utils.instantiate', 'instantiate', (['self.target_transform'], {'num_classes': 'self.nway'}), '(self.target_transform, num_classes=self.nway)\n', (2725, 2771), False, 'from hydra.utils import instantiate\n'), ((2855, 2880), 'hydra.utils.instantiate', 'instantiate', (['augmentation'], {}), '(augmentation)\n', (2866, 2880), False, 'from hydra.utils import instantiate\n'), ((3147, 3325), 'hydra.utils.instantiate', 'instantiate', (['self.datasets.train'], {'num_classes_per_task': 'self.nway', 'transform': 'transform_compose', 'target_transform': 'target_transform', 'class_augmentations': 'class_augmentations'}), '(self.datasets.train, num_classes_per_task=self.nway, transform=\n transform_compose, target_transform=target_transform,\n class_augmentations=class_augmentations)\n', (3158, 3325), False, 'from hydra.utils import instantiate\n'), ((3485, 3657), 'torchmeta.transforms.ClassSplitter', 'ClassSplitter', (['self.train_dataset'], {'shuffle': '(True)', 'random_state_seed': 'self.random_seed', 'num_support_per_class': 'self.kshot.support', 'num_query_per_class': 'self.kshot.query'}), '(self.train_dataset, shuffle=True, random_state_seed=self.\n random_seed, num_support_per_class=self.kshot.support,\n num_query_per_class=self.kshot.query)\n', (3498, 3657), False, 'from torchmeta.transforms import ClassSplitter\n'), ((3868, 4044), 'hydra.utils.instantiate', 'instantiate', (['self.datasets.val'], {'num_classes_per_task': 'self.nway', 'transform': 'transform_compose', 'target_transform': 'target_transform', 'class_augmentations': 'class_augmentations'}), '(self.datasets.val, num_classes_per_task=self.nway, transform=\n transform_compose, target_transform=target_transform,\n class_augmentations=class_augmentations)\n', (3879, 4044), False, 'from hydra.utils import instantiate\n'), ((4291, 4461), 'torchmeta.transforms.ClassSplitter', 'ClassSplitter', (['self.val_dataset'], {'shuffle': '(True)', 'random_state_seed': 'self.random_seed', 'num_support_per_class': 'self.kshot.support', 'num_query_per_class': 'self.kshot.query'}), '(self.val_dataset, shuffle=True, random_state_seed=self.\n random_seed, num_support_per_class=self.kshot.support,\n num_query_per_class=self.kshot.query)\n', (4304, 4461), False, 'from torchmeta.transforms import ClassSplitter\n'), ((4711, 4888), 'hydra.utils.instantiate', 'instantiate', (['self.datasets.test'], {'num_classes_per_task': 'self.nway', 'transform': 'transform_compose', 'target_transform': 'target_transform', 'class_augmentations': 'class_augmentations'}), '(self.datasets.test, num_classes_per_task=self.nway, transform=\n transform_compose, target_transform=target_transform,\n class_augmentations=class_augmentations)\n', (4722, 4888), False, 'from hydra.utils import instantiate\n'), ((5141, 5312), 'torchmeta.transforms.ClassSplitter', 'ClassSplitter', (['self.test_dataset'], {'shuffle': '(True)', 'random_state_seed': 'self.random_seed', 'num_support_per_class': 'self.kshot.support', 'num_query_per_class': 'self.kshot.query'}), '(self.test_dataset, shuffle=True, random_state_seed=self.\n random_seed, num_support_per_class=self.kshot.support,\n num_query_per_class=self.kshot.query)\n', (5154, 5312), False, 'from torchmeta.transforms import ClassSplitter\n'), ((2592, 2614), 'hydra.utils.instantiate', 'instantiate', (['transform'], {}), '(transform)\n', (2603, 2614), False, 'from hydra.utils import instantiate\n')]
import sys from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * import json, ast try: import RPi.GPIO as GPIO #incase we are in test mode except: print('could not import RPi.GPIO') #from Event_Functions import EventFunctions from Widget_Styles import * class RelayWidgets(): def __init__(self): super().__init__() def __updateRelayHardware(self): try: self.parameters.relayHardware except: self.parameters.relayHardware = set() self.parameters.relayHardware.add(self.name) #adds basic relay to the GUI def addRelay(self,dock): self.__updateRelayHardware() self.hwStatus['relayButtonGUI']=False gb = groupBox('Relay') # VLayout = QVBoxLayout() HLayout = QHBoxLayout() currentPins = bodyLabel('Relay pins attached --> no relays attached') HLayout.addWidget(currentPins) switch = bodyButton() switch.setText(self.name+' - Off') switch.setCheckable(True) switch.clicked.connect(lambda ignore, a=self.name:self.whichbtn(a)) HLayout.addWidget(switch) # VLayout.addLayout(HLayout) gb.setLayout(HLayout) dock.addThisWidget(gb) #update widget dictionary with all widgets we created relayGroupBox = {'widget':gb, 'QLabelCurrentPins':{'widget':currentPins,'value':'no relays attached'}, 'QPushButton':{'widget':switch,'value':False} } self.parameters.brewGUI[self.name]['relayGroupBox'] = relayGroupBox #handles the QPushButton click event def whichbtn(self,hardware): b = self.parameters.brewGUI[hardware]['relayGroupBox']['QPushButton']['widget'] hw = self.name # print('hw is {} and self.name is {}'.format(hw,self.name)) if b.isChecked(): b.setText(b.text()[:-6]+' - On') switch = True else: b.setText(b.text()[:-5]+' - Off') switch = False #updating the status dictionary self.hwStatus['relayButtonGUI']=switch # print('hwstatus is {} and self.hwstatus is {}'.format(self.hwStatus,self.parameters.brewGUI[hw]['object'].hwStatus)) print('Trying to switch {}{}'.format(hw,b.text()[b.text().find('-')+1:])) #check to see if any pins are connected to the hardware pins = self.pinList['relay'] print(pins) if not pins: print('Warning, no relays connected') else: self.checkRelayPinStatus() def checkRelayPinStatus(self): """takes a list of pins and checks their status and status of parent HW switching on and off as required""" pins = self.pinList['relay'] for pin in pins: self.updateStatus() # print('no hardware is associated with pin {}, try setting a connection for the pin in the connections tab'.format(pin)) self.setRelay(pin) def setRelay(self,pin): """toggle the relay on or off and update the text in the GUI""" hw = self.name #set the hardare switch = self.status #set the switch status (true = on, false = off) if not self.parameters.test: GPIO.setmode(GPIO.BCM) RELAIS_1_GPIO = int(pin) GPIO.setup(RELAIS_1_GPIO, GPIO.OUT) # GPIO Assign mode #get the current text of the widget so we can update it text=self.parameters.brewGUI[hw]['relayGroupBox']['QLabelCurrentPins']['widget'].text() if switch: print('switching on relay connected to pin {}'.format(pin)) # self.parameters.relayPins[pin][0]=True self.lastRelayState=True text = text.replace(str(pin),'<a style="color:red;"><strong>{}</strong></a>'.format(pin)) if not self.parameters.test: GPIO.output(RELAIS_1_GPIO, GPIO.LOW) # turn on else: print('switching off relay connected to pin {}'.format(pin)) # self.parameters.relayPins[pin][0]=False self.lastRelayState=False text = text.replace('<a style="color:red;"><strong>'+str(pin)+'</strong></a>','{}'.format(pin)) if not self.parameters.test: GPIO.output(RELAIS_1_GPIO, GPIO.HIGH) # turn on self.parameters.brewGUI[hw]['relayGroupBox']['QLabelCurrentPins']['widget'].setText(text)
[ "RPi.GPIO.setup", "RPi.GPIO.setmode", "RPi.GPIO.output" ]
[((3326, 3348), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (3338, 3348), True, 'import RPi.GPIO as GPIO\n'), ((3399, 3434), 'RPi.GPIO.setup', 'GPIO.setup', (['RELAIS_1_GPIO', 'GPIO.OUT'], {}), '(RELAIS_1_GPIO, GPIO.OUT)\n', (3409, 3434), True, 'import RPi.GPIO as GPIO\n'), ((3953, 3989), 'RPi.GPIO.output', 'GPIO.output', (['RELAIS_1_GPIO', 'GPIO.LOW'], {}), '(RELAIS_1_GPIO, GPIO.LOW)\n', (3964, 3989), True, 'import RPi.GPIO as GPIO\n'), ((4343, 4380), 'RPi.GPIO.output', 'GPIO.output', (['RELAIS_1_GPIO', 'GPIO.HIGH'], {}), '(RELAIS_1_GPIO, GPIO.HIGH)\n', (4354, 4380), True, 'import RPi.GPIO as GPIO\n')]
from __future__ import absolute_import from sentry.mediators import Mediator, Param class Destroyer(Mediator): service_hook = Param('sentry.models.ServiceHook') def call(self): self._destroy_service_hook() return self.service_hook def _destroy_service_hook(self): self.service_hook.delete()
[ "sentry.mediators.Param" ]
[((133, 167), 'sentry.mediators.Param', 'Param', (['"""sentry.models.ServiceHook"""'], {}), "('sentry.models.ServiceHook')\n", (138, 167), False, 'from sentry.mediators import Mediator, Param\n')]
""" AWS Lambda code base for the weather-api. """ import os import logging import pymysql LOGGER = logging.getLogger() LOGGER.setLevel(logging.INFO) STATION_SQL = """ INSERT INTO `station` (`abs_pressure`, `hum_in`, `hum_out`, `rain`, `rain_day`, `tdate`, `temp_apprt`, `temp_dewpt`, `temp_in`, `temp_out`, `ttime`, `wind_ave`, `wind_chill`, `wind_dir`, `wind_gust`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """ SOIL_TEMPS_SQL = """ INSERT INTO `soil_temps` (`temp_concrete`, `temp_grass`, `soil_d`, `soil_m`, `soil_s`, `temp_system`, `tdate`, `ttime`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s) """ CLIMATE_SQL = """ INSERT INTO `climate` (`airflow`, `humidity`, `light`, `sound`, `tdate`, `temp`, `ttime`) VALUES (%s, %s, %s, %s, %s, %s, %s) """ NETSPEED_SQL = """ INSERT INTO `netspeed` (`bytes_received`, `bytes_sent`, `cc`, `country`, `d`, `download`, `host`, `lat`, `latency`, `lon`, `name`, `ping`, `server_id`, `sponsor`, `tdate`, `ttime`,`upload`, `url`, `url2`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """ ################################################################################ # UTILITY FUNCTIONS # ################################################################################ def get_connection(): """ Returns a connection to the database """ return pymysql.connect( host=os.environ['host'], user=os.environ['user'], password=os.environ['password'], db=os.environ['db'], charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor ) def process_reading(data, sql_template): """ Processes handled weather station data using data and SQL template parameters """ try: LOGGER.info('Processing data: {}'.format(data)) LOGGER.info('Received template: {}'.format(sql_template)) # Get the keys in order keys = [f for f in iter(data.keys())] keys.sort() insert_data = [data.get(f, 'NULL') for f in keys] connection = get_connection() try: with connection.cursor() as cursor: cursor.execute(sql_template, (insert_data)) connection.commit() finally: connection.close() except Exception as exc: LOGGER.error('Exception occurred: {0}'.format(exc)) return 'FAIL' else: LOGGER.info('Processed data successfully for [{0} {1}]'.format( data['tdate'], data['ttime'])) return None def last_reading(table): """ Get last reading timestamp from table parameter """ LOGGER.info('Getting last TS from {}'.format(table)) try: connection = get_connection() cursor = connection.cursor() sql = """ SELECT concat(s.tdate, ' ', MAX(s.ttime)) AS lastreading FROM (SELECT tdate, ttime FROM {0} WHERE tdate = (SELECT MAX(tdate) FROM `{0}`)) AS s """.format(table) cursor.execute(sql) data = cursor.fetchone() except Exception as exc: LOGGER.error('Exception occurred: {0}'.format(exc)) return 'FAIL' else: LOGGER.info('Processed data successfully for {0}'.format( data['lastreading'])) return data['lastreading'] finally: connection.close() ################################################################################ # LAMBDA HANDLERS # ################################################################################ def post_weather_station(event, context): """ Weather station data ingestion handler """ return process_reading(event['query'], STATION_SQL) def post_climate(event, context): """ Climate system data ingestion handler """ return process_reading(event['query'], CLIMATE_SQL) def post_soil_temps(event, context): """ Soil temperature station data ingestion handler """ return process_reading(event['query'], SOIL_TEMPS_SQL) def get_last_climate_reading_ts(event, context): """ Return timestamp of last climate reading in database """ return last_reading('climate') def get_last_soil_reading_ts(event, context): """ Return timestamp of last soil temperature station reading in database """ return last_reading('soil_temps') def get_last_netspeed_reading_ts(event, context): """ Return timestamp of last speed test reading in database """ return last_reading('netspeed') def post_netspeed(event, context): """ Speed test data ingestion handler """ return process_reading(event['query'], NETSPEED_SQL)
[ "pymysql.connect", "logging.getLogger" ]
[((101, 120), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (118, 120), False, 'import logging\n'), ((1479, 1666), 'pymysql.connect', 'pymysql.connect', ([], {'host': "os.environ['host']", 'user': "os.environ['user']", 'password': "os.environ['password']", 'db': "os.environ['db']", 'charset': '"""utf8mb4"""', 'cursorclass': 'pymysql.cursors.DictCursor'}), "(host=os.environ['host'], user=os.environ['user'], password=\n os.environ['password'], db=os.environ['db'], charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n", (1494, 1666), False, 'import pymysql\n')]
# -*- coding: utf-8 -*- """Youtubedlg module to update youtube-dl binary. Attributes: UPDATE_PUB_TOPIC (string): wxPublisher subscription topic of the UpdateThread thread. """ import json import os.path from threading import Thread from urllib.request import urlopen from urllib.error import URLError, HTTPError from wx import CallAfter from pubsub import pub as Publisher from .utils import ( YOUTUBEDL_BIN, check_path ) UPDATE_PUB_TOPIC = 'update' class UpdateThread(Thread): """Python Thread that downloads youtube-dl binary. Attributes: LATEST_YOUTUBE_DL (string): URL with the latest youtube-dl binary. DOWNLOAD_TIMEOUT (int): Download timeout in seconds. Args: download_path (string): Absolute path where UpdateThread will download the latest youtube-dl. quiet (boolean): If True UpdateThread won't send the finish signal back to the caller. Finish signal can be used to make sure that the UpdateThread has been completed in an asynchronous way. """ LATEST_YOUTUBE_DL = 'https://yt-dl.org/latest/' GITHUB_API = "https://api.github.com/" LATEST_YOUTUBE_DL_API = GITHUB_API + 'repos/ytdl-org/youtube-dl/releases/latest' # LATEST_PICTA_DL_API = GITHUB_API + 'repos/oleksis/youtube-dl/releases/latest' DOWNLOAD_TIMEOUT = 10 def __init__(self, download_path, quiet=False): super(UpdateThread, self).__init__() self.download_path = download_path self.quiet = quiet self.start() def get_latest_sourcefile(self): source_file = self.GITHUB_API try: stream = urlopen(self.LATEST_YOUTUBE_DL_API, timeout=self.DOWNLOAD_TIMEOUT) latest_json = json.load(stream) latest_assets = latest_json["assets"] for asset in latest_assets: if asset["name"] == YOUTUBEDL_BIN: source_file = asset["browser_download_url"] break except (HTTPError, URLError, json.JSONDecodeError) as error: self._talk_to_gui('error', error) return source_file def run(self): self._talk_to_gui('download') # source_file = self.LATEST_YOUTUBE_DL + YOUTUBEDL_BIN source_file = self.get_latest_sourcefile() destination_file = os.path.join(self.download_path, YOUTUBEDL_BIN) check_path(self.download_path) try: stream = urlopen(source_file, timeout=self.DOWNLOAD_TIMEOUT) with open(destination_file, 'wb') as dest_file: dest_file.write(stream.read()) self._talk_to_gui('correct') except (HTTPError, URLError, IOError) as error: self._talk_to_gui('error', error) if not self.quiet: self._talk_to_gui('finish') def _talk_to_gui(self, signal, data=None): """Communicate with the GUI using wxCallAfter and wxPublisher. Args: signal (string): Unique signal string that informs the GUI for the update process. data (string): Can be any string data to pass along with the given signal. Default is None. Note: UpdateThread supports 4 signals. 1) download: The update process started 2) correct: The update process completed successfully 3) error: An error occured while downloading youtube-dl binary 4) finish: The update thread is ready to join """ CallAfter(Publisher.sendMessage, UPDATE_PUB_TOPIC, signal=signal, data=data)
[ "wx.CallAfter", "json.load", "urllib.request.urlopen" ]
[((3562, 3638), 'wx.CallAfter', 'CallAfter', (['Publisher.sendMessage', 'UPDATE_PUB_TOPIC'], {'signal': 'signal', 'data': 'data'}), '(Publisher.sendMessage, UPDATE_PUB_TOPIC, signal=signal, data=data)\n', (3571, 3638), False, 'from wx import CallAfter\n'), ((1666, 1732), 'urllib.request.urlopen', 'urlopen', (['self.LATEST_YOUTUBE_DL_API'], {'timeout': 'self.DOWNLOAD_TIMEOUT'}), '(self.LATEST_YOUTUBE_DL_API, timeout=self.DOWNLOAD_TIMEOUT)\n', (1673, 1732), False, 'from urllib.request import urlopen\n'), ((1760, 1777), 'json.load', 'json.load', (['stream'], {}), '(stream)\n', (1769, 1777), False, 'import json\n'), ((2476, 2527), 'urllib.request.urlopen', 'urlopen', (['source_file'], {'timeout': 'self.DOWNLOAD_TIMEOUT'}), '(source_file, timeout=self.DOWNLOAD_TIMEOUT)\n', (2483, 2527), False, 'from urllib.request import urlopen\n')]
import csv class Validator(object): ''' validate csv file data type and str length ''' def validate_data_type(self, file_input, types): with open(file_input) as csv_file: csv_reader = csv.reader(csv_file) header = next(csv_reader) for line in csv_reader: row = [data_type(value) for data_type, value in zip(types, line)] def validate_str_length(self, file_input, lengths): with open(file_input) as csv_file: csv_reader = csv.reader(csv_file) header = next(csv_reader) for line in csv_reader: row = [len(value) for data_type, value in zip(lengths, line) \ if type(value) == str and len(value) <= data_type]
[ "csv.reader" ]
[((213, 233), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (223, 233), False, 'import csv\n'), ((515, 535), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (525, 535), False, 'import csv\n')]
from django.core.cache import cache from django.test import Client, TestCase from django.urls.base import reverse from posts.models import Follow, Group, Post, User SLUG = 'test-slug' TEXT = 'Тестовый текст' TEXT_2 = 'Тестовый текст 2' TEXT_3 = 'Тестовый текст 3' USER = 'Name' AUTHOR = 'V' TITLE = 'Тестовое название' DESCRIPTION = 'Тестовое описание' NEW_USER = 'NewUser' INDEX_URL = reverse('posts:index') CREATE_POST_URL = reverse('posts:post_create') PROFILE_URL = reverse('posts:profile', kwargs={'username': AUTHOR}) GROUP_URL = reverse('posts:group_list', kwargs={'slug': SLUG}) LOGIN_URL = reverse('users:login') FOLLOW_INDEX_URL = reverse('posts:follow_index') PROFILE_FOLLOW_URL = reverse( 'posts:profile_follow', kwargs={'username': AUTHOR} ) PROFILE_UNFOLLOW_URL = reverse( 'posts:profile_unfollow', kwargs={'username': AUTHOR} ) class URLTests(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.user = User.objects.create_user(username=USER) cls.author_post = User.objects.create_user(username=AUTHOR) cls.new_user = User.objects.create_user(username=NEW_USER) cls.post = Post.objects.create( text=TEXT, author=cls.author_post, ) cls.POST_EDIT_URL = reverse( 'posts:post_edit', kwargs={'post_id': cls.post.id}) cls.POST_DETAIL_URL = reverse( 'posts:post_detail', kwargs={'post_id': cls.post.id}) Group.objects.create( title=TITLE, slug=SLUG, description=DESCRIPTION ) cls.guest = Client() cls.another = Client() cls.author = Client() cls.new_client = Client() cls.another.force_login(cls.user) cls.author.force_login(cls.author_post) cls.new_client.force_login(cls.new_user) def setUp(self): cache.clear() def test_pages_urls_exist_at_desired_location_posts(self): Follow.objects.create(user=self.user, author=self.author_post) cases = [ [INDEX_URL, self.guest, 200], [GROUP_URL, self.guest, 200], [PROFILE_URL, self.another, 200], [self.POST_DETAIL_URL, self.guest, 200], [CREATE_POST_URL, self.another, 200], [self.POST_EDIT_URL, self.author, 200], ['/unexisting_page/', self.guest, 404], [CREATE_POST_URL, self.guest, 302], [self.POST_EDIT_URL, self.another, 302], [self.POST_EDIT_URL, self.guest, 302], [FOLLOW_INDEX_URL, self.another, 200], [FOLLOW_INDEX_URL, self.guest, 302], [PROFILE_FOLLOW_URL, self.new_client, 302], [PROFILE_FOLLOW_URL, self.guest, 302], [PROFILE_UNFOLLOW_URL, self.another, 302], [PROFILE_UNFOLLOW_URL, self.guest, 302], ] for url, client, code in cases: with self.subTest(url=url, client=client): self.assertEqual(client.get(url).status_code, code) def test_urls_redirects_posts(self): cases = [ [CREATE_POST_URL, self.guest, LOGIN_URL + '?next=' + CREATE_POST_URL], [self.POST_EDIT_URL, self.guest, LOGIN_URL + '?next=' + self.POST_EDIT_URL], [self.POST_EDIT_URL, self.another, self.POST_DETAIL_URL], [PROFILE_FOLLOW_URL, self.new_client, PROFILE_URL], [PROFILE_FOLLOW_URL, self.guest, LOGIN_URL + '?next=' + PROFILE_FOLLOW_URL], [PROFILE_FOLLOW_URL, self.another, PROFILE_URL], [PROFILE_UNFOLLOW_URL, self.guest, LOGIN_URL + '?next=' + PROFILE_UNFOLLOW_URL], ] for url, client, redirect_url in cases: with self.subTest(url=url, client=client): self.assertRedirects( client.get(url, follow=True), redirect_url) def test_urls_use_correct_templates_posts(self): templates_url_names = { INDEX_URL: 'posts/index.html', GROUP_URL: 'posts/group_list.html', PROFILE_URL: 'posts/profile.html', self.POST_DETAIL_URL: 'posts/post_detail.html', CREATE_POST_URL: 'posts/create_post.html', self.POST_EDIT_URL: 'posts/create_post.html', FOLLOW_INDEX_URL: 'posts/follow.html' } for adress, template in templates_url_names.items(): with self.subTest(adress=adress): self.assertTemplateUsed( self.author.get(adress), template )
[ "django.core.cache.cache.clear", "django.test.Client", "posts.models.Follow.objects.create", "posts.models.User.objects.create_user", "posts.models.Post.objects.create", "django.urls.base.reverse", "posts.models.Group.objects.create" ]
[((389, 411), 'django.urls.base.reverse', 'reverse', (['"""posts:index"""'], {}), "('posts:index')\n", (396, 411), False, 'from django.urls.base import reverse\n'), ((430, 458), 'django.urls.base.reverse', 'reverse', (['"""posts:post_create"""'], {}), "('posts:post_create')\n", (437, 458), False, 'from django.urls.base import reverse\n'), ((473, 526), 'django.urls.base.reverse', 'reverse', (['"""posts:profile"""'], {'kwargs': "{'username': AUTHOR}"}), "('posts:profile', kwargs={'username': AUTHOR})\n", (480, 526), False, 'from django.urls.base import reverse\n'), ((539, 589), 'django.urls.base.reverse', 'reverse', (['"""posts:group_list"""'], {'kwargs': "{'slug': SLUG}"}), "('posts:group_list', kwargs={'slug': SLUG})\n", (546, 589), False, 'from django.urls.base import reverse\n'), ((602, 624), 'django.urls.base.reverse', 'reverse', (['"""users:login"""'], {}), "('users:login')\n", (609, 624), False, 'from django.urls.base import reverse\n'), ((644, 673), 'django.urls.base.reverse', 'reverse', (['"""posts:follow_index"""'], {}), "('posts:follow_index')\n", (651, 673), False, 'from django.urls.base import reverse\n'), ((695, 755), 'django.urls.base.reverse', 'reverse', (['"""posts:profile_follow"""'], {'kwargs': "{'username': AUTHOR}"}), "('posts:profile_follow', kwargs={'username': AUTHOR})\n", (702, 755), False, 'from django.urls.base import reverse\n'), ((789, 851), 'django.urls.base.reverse', 'reverse', (['"""posts:profile_unfollow"""'], {'kwargs': "{'username': AUTHOR}"}), "('posts:profile_unfollow', kwargs={'username': AUTHOR})\n", (796, 851), False, 'from django.urls.base import reverse\n'), ((981, 1020), 'posts.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': 'USER'}), '(username=USER)\n', (1005, 1020), False, 'from posts.models import Follow, Group, Post, User\n'), ((1047, 1088), 'posts.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': 'AUTHOR'}), '(username=AUTHOR)\n', (1071, 1088), False, 'from posts.models import Follow, Group, Post, User\n'), ((1112, 1155), 'posts.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': 'NEW_USER'}), '(username=NEW_USER)\n', (1136, 1155), False, 'from posts.models import Follow, Group, Post, User\n'), ((1175, 1229), 'posts.models.Post.objects.create', 'Post.objects.create', ([], {'text': 'TEXT', 'author': 'cls.author_post'}), '(text=TEXT, author=cls.author_post)\n', (1194, 1229), False, 'from posts.models import Follow, Group, Post, User\n'), ((1293, 1352), 'django.urls.base.reverse', 'reverse', (['"""posts:post_edit"""'], {'kwargs': "{'post_id': cls.post.id}"}), "('posts:post_edit', kwargs={'post_id': cls.post.id})\n", (1300, 1352), False, 'from django.urls.base import reverse\n'), ((1408, 1469), 'django.urls.base.reverse', 'reverse', (['"""posts:post_detail"""'], {'kwargs': "{'post_id': cls.post.id}"}), "('posts:post_detail', kwargs={'post_id': cls.post.id})\n", (1415, 1469), False, 'from django.urls.base import reverse\n'), ((1503, 1572), 'posts.models.Group.objects.create', 'Group.objects.create', ([], {'title': 'TITLE', 'slug': 'SLUG', 'description': 'DESCRIPTION'}), '(title=TITLE, slug=SLUG, description=DESCRIPTION)\n', (1523, 1572), False, 'from posts.models import Follow, Group, Post, User\n'), ((1639, 1647), 'django.test.Client', 'Client', ([], {}), '()\n', (1645, 1647), False, 'from django.test import Client, TestCase\n'), ((1670, 1678), 'django.test.Client', 'Client', ([], {}), '()\n', (1676, 1678), False, 'from django.test import Client, TestCase\n'), ((1700, 1708), 'django.test.Client', 'Client', ([], {}), '()\n', (1706, 1708), False, 'from django.test import Client, TestCase\n'), ((1734, 1742), 'django.test.Client', 'Client', ([], {}), '()\n', (1740, 1742), False, 'from django.test import Client, TestCase\n'), ((1912, 1925), 'django.core.cache.cache.clear', 'cache.clear', ([], {}), '()\n', (1923, 1925), False, 'from django.core.cache import cache\n'), ((1998, 2060), 'posts.models.Follow.objects.create', 'Follow.objects.create', ([], {'user': 'self.user', 'author': 'self.author_post'}), '(user=self.user, author=self.author_post)\n', (2019, 2060), False, 'from posts.models import Follow, Group, Post, User\n')]