code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
"""Prime Adam Int Check"""
import math
def prime_check(number: int) -> bool:
"""
A number is prime if it has exactly two dividers: 1 and itself.
Example : 2, 3, 5, 7
"""
if number < 2:
# Numbers less than 2 are not prime.
return False
for i in range(2, int(math.sqrt(number) + 1)):
# If the number has more than 2 dividers, it is not a prime numbe.
if number % i == 0:
return False
# If the number doesnot have more than 2 dividers it is a prime number.
return True
def prime_adam_check(number: int) -> bool:
"""
Check if a number is Prime Adam Integer.
A number is prime adam integer if it is a Prime number as well as Adam number.
A number is Adam if the square of the number and
square of the reverse of the number are reverseof each other.
Example : 13 (13^2 and 31^2 are reverse of each other).
"""
# Check if the number is Prime.
if prime_check(number):
# Get the square of the number.
square = str(number * number)
# Get the reverse of th number
reverse = int(str(number)[::-1])
# Get the square of reverse of the number.
square_reverse = str(reverse * reverse)
# Check if square and square_reverse are reverse of each other.
if square == square_reverse[::-1]:
return True
return False
if __name__ == "__main__":
print("Program to check whether a number is a Prime Adam Int or not...")
number = int(input("Enter number: ").strip())
print(f"{number} is {'' if prime_adam_check(number) else 'not '}a Prime Adam Int.")
| [
"math.sqrt"
] | [((299, 316), 'math.sqrt', 'math.sqrt', (['number'], {}), '(number)\n', (308, 316), False, 'import math\n')] |
from setuptools import setup
setup(
name="ml-lineage-helper",
version="0.1",
description="A wrapper around SageMaker ML Lineage Tracking extending ML Lineage to end-to-end ML lifecycles, including additional capabilities around Feature Store groups, queries, and other relevant artifacts.",
url="https://github.com/aws-samples/ml-lineage-helper",
author="<NAME>",
author_email="<EMAIL>",
license="Apache-2.0",
packages=["ml_lineage_helper"],
install_requires=[
"numpy",
"boto3>=1.17.74",
"sagemaker>2.49.1",
"pandas",
"networkx",
"matplotlib",
"numpy",
],
)
| [
"setuptools.setup"
] | [((30, 576), 'setuptools.setup', 'setup', ([], {'name': '"""ml-lineage-helper"""', 'version': '"""0.1"""', 'description': '"""A wrapper around SageMaker ML Lineage Tracking extending ML Lineage to end-to-end ML lifecycles, including additional capabilities around Feature Store groups, queries, and other relevant artifacts."""', 'url': '"""https://github.com/aws-samples/ml-lineage-helper"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""Apache-2.0"""', 'packages': "['ml_lineage_helper']", 'install_requires': "['numpy', 'boto3>=1.17.74', 'sagemaker>2.49.1', 'pandas', 'networkx',\n 'matplotlib', 'numpy']"}), "(name='ml-lineage-helper', version='0.1', description=\n 'A wrapper around SageMaker ML Lineage Tracking extending ML Lineage to end-to-end ML lifecycles, including additional capabilities around Feature Store groups, queries, and other relevant artifacts.'\n , url='https://github.com/aws-samples/ml-lineage-helper', author=\n '<NAME>', author_email='<EMAIL>', license='Apache-2.0', packages=[\n 'ml_lineage_helper'], install_requires=['numpy', 'boto3>=1.17.74',\n 'sagemaker>2.49.1', 'pandas', 'networkx', 'matplotlib', 'numpy'])\n", (35, 576), False, 'from setuptools import setup\n')] |
# ARZINA: RUNNING THIS CODE IN GEO GIVES YOU A DIFFERENT OUTPUT TO WHAT IS EXPECTED (ON THE TASK IT GIVES EXAMPLE OUTPUT,
# ARZINA: BUT MY OUTPUT IS NOT THE SAME'
# ARZINA: I SUSPECT IT IS THE HAVERSINE FORMULA WHICH I'M USING WRONG
from floodsystem.stationdata import build_station_list
from floodsystem.geo import stations_within_radius
def run():
stations = build_station_list()
centre = (52.2053, 0.1218)
r = 10
stations_in_radius = stations_within_radius(stations, centre, r)
print(stations_in_radius)
if __name__ == "__main__":
print("*** Task 1C: CUED Part IA Flood Warning System ***")
run()
# names = sorted(map(lambda station: station.name, stations_in_radius))
# print(names)
# names = sorted(map(lambda station: station.name, stations_in_radius))
# print(names)
| [
"floodsystem.stationdata.build_station_list",
"floodsystem.geo.stations_within_radius"
] | [((367, 387), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (385, 387), False, 'from floodsystem.stationdata import build_station_list\n'), ((455, 498), 'floodsystem.geo.stations_within_radius', 'stations_within_radius', (['stations', 'centre', 'r'], {}), '(stations, centre, r)\n', (477, 498), False, 'from floodsystem.geo import stations_within_radius\n')] |
import requests
from requests.auth import HTTPBasicAuth
r = requests.get('http://localhost:5000', auth=HTTPBasicAuth('username', 'password'))
r = requests.get('http://localhost:5000', auth=('username', 'password')) # 简写,默认的东西
print(r.status_code)
from requests_oauthlib import OAuth1
url= ''
auth = OAuth1('APP_key','App_secret','user_auth_key','user-token_seleted')
requests.get(url,auth) | [
"requests_oauthlib.OAuth1",
"requests.auth.HTTPBasicAuth",
"requests.get"
] | [((147, 215), 'requests.get', 'requests.get', (['"""http://localhost:5000"""'], {'auth': "('username', 'password')"}), "('http://localhost:5000', auth=('username', 'password'))\n", (159, 215), False, 'import requests\n'), ((302, 372), 'requests_oauthlib.OAuth1', 'OAuth1', (['"""APP_key"""', '"""App_secret"""', '"""user_auth_key"""', '"""user-token_seleted"""'], {}), "('APP_key', 'App_secret', 'user_auth_key', 'user-token_seleted')\n", (308, 372), False, 'from requests_oauthlib import OAuth1\n'), ((370, 393), 'requests.get', 'requests.get', (['url', 'auth'], {}), '(url, auth)\n', (382, 393), False, 'import requests\n'), ((104, 141), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['"""username"""', '"""password"""'], {}), "('username', 'password')\n", (117, 141), False, 'from requests.auth import HTTPBasicAuth\n')] |
#!/usr/bin/python
# Google File Finder - Finished in Dec, 2016
# Powered by <NAME> [<EMAIL>]
import os
import googlesearch
""" --------------------------------------------------------------------------- """
def cut_string(text, limit):
i = 0
while i > -1:
s = str(text[i:(i+1)])
i = i + 1
if s == limit:
text = text[0:i-1]
i = -1
if i > len(text):
i = -1
return text
""" --------------------------------------------------------------------------- """
def listFilesFromResult(all_files, log):
print('\nListing files URLs:\n')
position = 1
for url in all_files:
url = cut_string(url,'&')
if url[1:4] == 'url':
url = url[7:len(url)]
print('-------> ['+str(position)+"] " + url)
log.append(url)
position += 1
print('\n')
return log
""" --------------------------------------------------------------------------- """
def saveResultsInFile(log_list):
save_results = input("\n\n>> Do you want to save the results to a file? [Y/n] ")
if save_results == '' or save_results == 'Y' or save_results == 'y':
path_file = input("\n>> Enter the filename to save: ")
if path_file != '':
f = open(path_file,'w')
n = 0
for log in log_list:
f.write("\n" + "[" + str(n) + "] "+ log)
n = n + 1
f.close()
print("\nFile " + path_file + " saved!\n\n")
""" --------------------------------------------------------------------------- """
def downloadFiles(log_list):
do_wget = input("\n>> Do you want download (with wget) all files? [Y/n] ")
if do_wget == '' or do_wget == 'Y' or do_wget == 'y':
dir_save = input("\n>> Enter dir for save the files: ")
if dir_save != "":
for log in log_list:
os.system("wget " + log + " -P " + dir_save)
else:
do_wget = input("\n>> Do you want download (with wget) any file? [Y/n] ")
if do_wget == '' or do_wget == 'Y' or do_wget == 'y':
url_position = input("\n>> Enter the url position for download or 0 for exit (Ex.: 1,2,3...): ")
while(int(url_position) > 0):
dir_save = input("\n>> Enter dir for save the file: ")
if dir_save != "":
os.system("wget " + log_list[int(url_position)] + " -P " + dir_save)
url_position = input("\n>> Enter the url position for download or 0 for exit (Ex.: 1,2,3...): ")
""" --------------------------------------------------------------------------- """
def printHeader():
os.system("clear")
print('================================================================')
print('/ /')
print('/ GOOGLE FILE FINDER /')
print('/ by <NAME> /')
print('================================================================')
""" --------------------------------------------------------------------------- """
def main():
try:
printHeader()
# Input user search
search = input("\n-> Enter a key word: ")
log = []
log.append("Key Word of File Search -> " + search + "\n\n")
search = search.replace(" ","+")
file_type = input("-> Enter the file type (Ex.: pdf, txt, docx): ")
amount_results = int(input("-> Enter the max amount of results (Max = 1000000): "))
# Start Search
print('\n\n[-] Searching files in google...')
search = googlesearch.search_google(search, amount_results, 1, file_type)
search.process()
all_files = search.get_files()
# Listing files from results
log = listFilesFromResult(all_files, log)
# Save Results
saveResultsInFile(log)
# Download files
downloadFiles(log)
print("\n\nFinish!\n")
except KeyboardInterrupt:
print('\n Files Finder closed..Thanks for use')
""" --------------------------------------------------------------------------- """
if __name__ == "__main__":
main()
| [
"os.system",
"googlesearch.search_google"
] | [((2334, 2352), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (2343, 2352), False, 'import os\n'), ((3254, 3318), 'googlesearch.search_google', 'googlesearch.search_google', (['search', 'amount_results', '(1)', 'file_type'], {}), '(search, amount_results, 1, file_type)\n', (3280, 3318), False, 'import googlesearch\n'), ((1656, 1700), 'os.system', 'os.system', (["('wget ' + log + ' -P ' + dir_save)"], {}), "('wget ' + log + ' -P ' + dir_save)\n", (1665, 1700), False, 'import os\n')] |
#!/usr/bin/env python2
import vxi11
import time
import argparse
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("DEVICE", nargs=1, help="Device to connect to")
ap.add_argument("--toggle", "-t",
action="store_const", dest="action", const="toggle",
default=None, help="Toggle channel")
ap.add_argument("--wait", "-w", type=float, help="Seconds to wait after changes", default=1)
ap.add_argument("--on", action="store_const", dest="action", const="on")
ap.add_argument("--off", action="store_const", dest="action", const="off")
ap.add_argument("--channel", "-c", type=int, help="Select channel", default=1)
ap.add_argument("--volt", "-v", type=float, help="Set voltage", default=None)
ap.add_argument("--amp", "-a", type=float, help="Set current", default=None)
args = ap.parse_args()
instrument = vxi11.Instrument(args.DEVICE[0])
print(instrument.ask("*IDN?"))
print("CHANNEL {}".format(args.channel))
if args.action is not None:
if args.action == "toggle":
current_state = instrument.ask(":OUTPUT:STATE? CH{}".format(args.channel)).upper()
new_state = "ON" if current_state == "OFF" else "OFF"
elif args.action == "on":
new_state = "ON"
elif args.action == "off":
new_state = "OFF"
instrument.write(":OUTPUT:STATE CH{},{}".format(args.channel, new_state))
time.sleep(args.wait)
print("output " + instrument.ask(":OUTPUT:STATE? CH{}".format(args.channel)).upper())
if args.volt is not None:
instrument.write(":SOURCE{}:VOLTAGE {}".format(args.channel, args.volt))
print("set volt " + instrument.ask(":SOURCE{}:VOLTAGE?".format(args.channel)))
if args.amp is not None:
instrument.write(":SOURCE{}:CURRENT {}".format(args.channel, args.amp))
print("set amp " + instrument.ask(":SOURCE{}:CURRENT?".format(args.channel)))
measured = instrument.ask(":MEASURE:ALL? CH{}".format(args.channel)).split(",")
print("measured volt {}".format(measured[0]))
print("measured amp {}".format(measured[1]))
print("measured watt {}".format(measured[2]))
| [
"vxi11.Instrument",
"time.sleep",
"argparse.ArgumentParser"
] | [((104, 129), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (127, 129), False, 'import argparse\n'), ((910, 942), 'vxi11.Instrument', 'vxi11.Instrument', (['args.DEVICE[0]'], {}), '(args.DEVICE[0])\n', (926, 942), False, 'import vxi11\n'), ((1474, 1495), 'time.sleep', 'time.sleep', (['args.wait'], {}), '(args.wait)\n', (1484, 1495), False, 'import time\n')] |
import io
import errno
import types
import importlib
import inspect
from pathlib import Path
from dynaconf import default_settings
from dynaconf.utils.files import find_file
from dynaconf.utils import DynaconfDict, object_merge, raw_logger
def load(obj, settings_module, identifier='py', silent=False, key=None):
"""Tries to import a python module"""
mod, loaded_from = get_module(obj, settings_module, silent)
if mod and loaded_from:
obj.logger.debug(
"py_loader: {}".format(mod)
)
else:
obj.logger.debug('py_loader: %s (Ignoring, Not Found)',
settings_module)
return
for setting in dir(mod):
if setting.isupper():
if key is None or key == setting:
setting_value = getattr(mod, setting)
obj.logger.debug(
'py_loader: loading %s: %s (%s)',
setting,
'*****' if 'secret' in settings_module else setting_value,
identifier
)
obj.set(setting, setting_value, loader_identifier=identifier)
obj._loaded_files.append(mod.__file__)
def get_module(obj, filename, silent=False):
logger = raw_logger()
try:
logger.debug('Trying to import %s', filename)
mod = importlib.import_module(filename)
loaded_from = 'module'
except (ImportError, TypeError):
logger.debug('Cant import %s trying to load from file', filename)
mod = import_from_filename(obj, filename, silent=silent)
if mod and not mod._is_error:
loaded_from = 'filename'
else:
loaded_from = None
return mod, loaded_from
def import_from_filename(obj, filename, silent=False): # pragma: no cover
"""If settings_module is a filename path import it."""
if filename in [item.filename for item in inspect.stack()]:
raise ImportError(
'Looks like you are loading dynaconf '
'from inside the {} file and then it is trying '
'to load itself entering in a circular reference '
'problem. To solve it you have to '
'invoke your program from another root folder '
'or rename your program file.'
.format(filename)
)
_find_file = getattr(obj, 'find_file', find_file)
if not filename.endswith('.py'):
filename = '{0}.py'.format(filename)
if filename in default_settings.SETTINGS_MODULE_FOR_DYNACONF:
silent = True
mod = types.ModuleType(filename.rstrip('.py'))
mod.__file__ = filename
mod._is_error = False
try:
with io.open(
_find_file(filename),
encoding=default_settings.ENCODING_FOR_DYNACONF
) as config_file:
exec(
compile(config_file.read(), filename, 'exec'),
mod.__dict__
)
except IOError as e:
e.strerror = (
'py_loader: error loading file (%s %s)\n'
) % (e.strerror, filename)
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return
raw_logger().debug(e.strerror)
mod._is_error = True
return mod
def write(settings_path, settings_data, merge=True):
"""Write data to a settings file.
:param settings_path: the filepath
:param settings_data: a dictionary with data
:param merge: boolean if existing file should be merged with new data
"""
settings_path = Path(settings_path)
if settings_path.exists() and merge: # pragma: no cover
existing = DynaconfDict()
load(existing, str(settings_path))
object_merge(
existing,
settings_data
)
with io.open(
str(settings_path), 'w',
encoding=default_settings.ENCODING_FOR_DYNACONF
) as f:
f.writelines(
["{} = {}\n".format(k.upper(), repr(v))
for k, v in settings_data.items()]
)
| [
"importlib.import_module",
"inspect.stack",
"pathlib.Path",
"dynaconf.utils.object_merge",
"dynaconf.utils.raw_logger",
"dynaconf.utils.DynaconfDict"
] | [((1244, 1256), 'dynaconf.utils.raw_logger', 'raw_logger', ([], {}), '()\n', (1254, 1256), False, 'from dynaconf.utils import DynaconfDict, object_merge, raw_logger\n'), ((3508, 3527), 'pathlib.Path', 'Path', (['settings_path'], {}), '(settings_path)\n', (3512, 3527), False, 'from pathlib import Path\n'), ((1334, 1367), 'importlib.import_module', 'importlib.import_module', (['filename'], {}), '(filename)\n', (1357, 1367), False, 'import importlib\n'), ((3608, 3622), 'dynaconf.utils.DynaconfDict', 'DynaconfDict', ([], {}), '()\n', (3620, 3622), False, 'from dynaconf.utils import DynaconfDict, object_merge, raw_logger\n'), ((3674, 3711), 'dynaconf.utils.object_merge', 'object_merge', (['existing', 'settings_data'], {}), '(existing, settings_data)\n', (3686, 3711), False, 'from dynaconf.utils import DynaconfDict, object_merge, raw_logger\n'), ((1905, 1920), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (1918, 1920), False, 'import inspect\n'), ((3149, 3161), 'dynaconf.utils.raw_logger', 'raw_logger', ([], {}), '()\n', (3159, 3161), False, 'from dynaconf.utils import DynaconfDict, object_merge, raw_logger\n')] |
# coding = UTF-8
import time
import re
import js2py
import requests
import json
import KuGou
from KuGou.Requirement import Header
class MusicList(object):
"""从酷狗获取要查询的歌曲的结果列表"""
def __init__(self, MusicName: str) -> None:
"""初始化该类:
检查参数正确性,创建时间戳,初始化签名和数据容器,初始化JS命名空间(初始化命名空间并添加签名创建函数)。
::Usage:
>>> var = MusicList("匆匆那年 王菲")
:param MusicName: 要下载的歌曲名,必须为str类型。
"""
# 检查传入的参数是否为str类型
assert isinstance(MusicName, str)
# 创建时间戳,为Python标准返回时间戳的一百倍再取整
self.__TimeStamp = int(time.time() * 1000)
self.__MusicName = MusicName # 绑定歌曲名
self.__Signature = "" # 初始化签名容器
self.__GotData = {} # 初始化数据容器
# 初始化JavaScript命名空间
self.__JSNameSpace = js2py.EvalJs()
# 添加JavaScript编写的签名构造函数
self.__JSNameSpace.execute(KuGou.Requirement.GetSignFunction)
def SetMusicName(self, MusicName: str) -> str:
"""设置或重置歌曲名:
检查参数正确性,改变实例属性__MusicName到传入的参数值。
::Usage:
>>>MusicList.SetMusicName("匆匆那年 王菲")
:param MusicName: 要下载的歌曲名,必须为str类型。
:return: 传入的歌曲名,为str类型。
"""
# 检查传入的参数是否为str类型
assert isinstance(MusicName, str)
# 设置类属性__MusicName为传入的参数值
self.__MusicName = MusicName
return self.__MusicName
def __SetTimeStamp(self) -> int:
"""设置时间戳:
获取时间戳并将类属性__TimeStamp设置为改时间戳。
:return: 获取的时间戳,为int类型。
"""
# 创建时间戳,为Python标准返回时间戳的一百倍再取整
self.__TimeStamp = int(time.time() * 1000)
return self.__TimeStamp
def __CreateMusicSignature(self) -> str:
"""创建签名:
使用第三方库Js2Py运行酷狗的签名构造函数将数据转换为签名。
:return: 创建的签名值,为str类型。
"""
# 将要被转换的数据
DataDict = [
KuGou.Requirement.Key,
"bitrate=0",
"callback=callback123",
f"clienttime={self.__TimeStamp}",
"clientver=2000",
"dfid=-",
"inputtype=0",
"iscorrection=1",
"isfuzzy=0",
f"keyword={self.__MusicName}",
f"mid={self.__TimeStamp}",
"page=1",
"pagesize=30",
"platform=WebFilter",
"privilege_filter=0",
"srcappid=2919",
"tag=em",
"userid=-1",
f"uuid={self.__TimeStamp}",
KuGou.Requirement.Key,
]
# 在JS命名空间中初始化该数据
MusicSign = "o=" + str(DataDict)
self.__JSNameSpace.execute(MusicSign)
# 触发执行构造函数
self.__JSNameSpace.execute(KuGou.Requirement.GetSign)
# 获取执行后的签名
MusicSign = self.__JSNameSpace.signature
# 设置类属性__Signature为创建的签名
self.__Signature = MusicSign
# 返回签名
return MusicSign
def __CreateParams(self) -> dict:
"""创建网络请求必需的参数:
:return: 创建的参数,为dict类型。
"""
self.__GotData = {
'callback': 'callback123',
'keyword': self.__MusicName,
'page': "1",
'pagesize': "30",
'bitrate': '0',
'isfuzzy': '0',
'tag': 'em',
'inputtype': '0',
'platform': 'WebFilter',
'userid': '-1',
'clientver': '2000',
'iscorrection': '1',
'privilege_filter': '0',
'srcappid': '2919',
'clienttime': self.__TimeStamp,
'mid': self.__TimeStamp,
'uuid': self.__TimeStamp,
'dfid': '-',
'signature': self.__Signature,
}
return self.__GotData
def __GetResponse(self) -> list:
"""从酷狗上获取查询结果:
从酷狗获取数据,按照UTF-8编码解码,提取可被Json模块解析的内容,检查状态码是否正确,提取查询结果列表,清洗数据。
:return: 清洗过的结果,为dict类型。
"""
OneHeader = Header.GetHeader(Referrer=Header.REFERRER_KUGOU_SEARCH)
Response = requests.get(
'https://complexsearch.kugou.com/v2/search/song?',
headers=OneHeader, params=self.__GotData
) # 获取数据
String_1 = Response.content.decode('UTF-8') # 按UTF-8编码解码
String_2 = String_1[String_1.find('(') + 1:-2] # 获取可被Json模块解析的内容
Data = json.loads(String_2) # 用Json模块解析
if Data["status"] != 1: # 检查状态码是否正确
raise Exception("酷狗官网的返回状态码有误(不为1)。") # 抛出错误
# 提取需要的部分,即包含查询结果的列表
GotMusicList = Data["data"]["lists"]
if len(GotMusicList) == 0: # 检查列表是否为空
raise Exception("酷狗官网的返回结果数量为0个。")
return MusicList.CleanData(GotMusicList) # 清洗数据并返回
@classmethod
def CleanData(cls, Data: list) -> list:
"""清洗从酷狗官网获取的数据:
提取歌名(FileName)、文件的哈希值(FileHash)、专辑ID(AlbumID),并打上KuGou标识(From:KuGou)后的结果(为dict类型),
作为列表的一个元素,返回每个歌曲操作后的结果。
::Usage:
>>>MusicList.CleanData(list())
:param Data: 获取的数据
:return: 每个歌曲的结果,为list类型。
"""
Buffer = [] # 初始化临时存储列表
for OneSongInfo in Data: # 遍历每个歌曲及其数据
OneMusic = KuGou.Music()
OneMusic.AlbumID = OneSongInfo["AlbumID"] # 获取歌曲所属专辑的ID
OneMusic.FileId = OneSongInfo["FileHash"] # 获取歌曲的哈希值
Name = OneSongInfo["SongName"].replace("<em>", "").replace("</em>", "") # 处理歌曲名中的强调HTML标签
OneMusic.Name = Name
for Id, SingerName in zip(OneSongInfo["SingerId"], OneSongInfo["SingerName"].split("、")):
SingerName = SingerName.replace("<em>", "").replace("</em>", "")
OneMusic.Author.Append(KuGou.SUPPORTED.KuGou, Id, SingerName)
Buffer.append(OneMusic) # 添加歌曲至列表中
return Buffer
def GetMusicList(self) -> list:
"""获取查询结果:
设置该次运行时间戳,创建签名,创建负载数据,获取结果列表。
::Usage:
>>>var1 = MusicList("匆匆那年 王菲")
>>>var2 = var1.GetMusicList()
:return: 查询结果,为list类型。
"""
self.__SetTimeStamp() # 设置时间戳
self.__CreateMusicSignature() # 创建签名
self.__CreateParams() # 创建请求负载数据
return self.__GetResponse() # 获取返回数据
class MusicInfo(object):
"""从酷狗官网获取歌曲相关数据。"""
def __init__(self, MusicItem) -> None:
# 设置基本时间戳
self.__TimeStamp = int(time.time() * 1000)
# 设置存放网络请求负载数据的容器
self.__Params = {}
MusicItem: KuGou.Music
self.__Music = MusicItem
def __SetTimeStamp(self) -> int:
"""设置网络请求的时间戳
:return: 返回设置的时间戳
"""
self.__TimeStamp = int(time.time() * 1000)
return self.__TimeStamp
def __CreateParams(self) -> dict:
"""创建网络请求必要的负载
:return: 返回创建的负载数据
"""
self.__Params = {
"r": "play/getdata",
"callback": "jQuery19100824172432511463_1612781797757",
"hash": self.__Music.FileId,
"dfid": "073Nfk3nSl6t0sst5p3fjWxH",
"mid": "578a45450e07d9022528599a86a22d26",
"platid": 4,
"album_id": self.__Music.AlbumID,
"_": str(self.__TimeStamp)
}
return self.__Params
def __GetResponse(self) -> dict:
OneHeader = Header.GetHeader(Referrer=Header.REFERRER_KUGOU_SEARCH)
Response = requests.get(
"https://wwwapi.kugou.com/yy/index.php",
headers=OneHeader, params=self.__Params
)
String_1 = Response.content.decode('utf-8')
String_2 = String_1[String_1.find('(') + 1:-2]
Data = json.loads(String_2)
if Data["status"] != 1:
raise
Data = Data["data"]
return Data
def CleanData(self, Data):
if Data["have_album"] == 1:
self.__Music.Album = Data["album_name"]
self.__Music.PictureSource = Data["img"]
for OneSinger in Data["authors"]:
self.__Music.Author.Append(
KuGou.SUPPORTED.KuGou,
OneSinger["author_id"],
OneSinger["author_name"],
(OneSinger.get("avatar"),),
True
)
self.__Music.Lyrics = Data["lyrics"]
if Data.get("play_url") is not None:
self.__Music.MusicSource = Data.get("play_url")
else:
if Data.get("play_backup_url") is not None:
self.__Music.MusicSource = Data.get("play_backup_url")
else:
return KuGou.Music()
self.__Music.ReloadInfo()
String = re.match("(.*?)( - )(.*?)(-)", Data["audio_name"] + "-")
if String:
self.__Music.Name = String.group(3).replace("/", "-").replace("\\", "-")
else:
self.__Music.Name = Data["audio_name"]
return None
def GetMusicInfo(self):
self.__SetTimeStamp()
self.__CreateParams()
self.CleanData(self.__GetResponse())
return self.__Music
| [
"json.loads",
"KuGou.Requirement.Header.GetHeader",
"js2py.EvalJs",
"KuGou.Music",
"re.match",
"requests.get",
"time.time"
] | [((776, 790), 'js2py.EvalJs', 'js2py.EvalJs', ([], {}), '()\n', (788, 790), False, 'import js2py\n'), ((3798, 3853), 'KuGou.Requirement.Header.GetHeader', 'Header.GetHeader', ([], {'Referrer': 'Header.REFERRER_KUGOU_SEARCH'}), '(Referrer=Header.REFERRER_KUGOU_SEARCH)\n', (3814, 3853), False, 'from KuGou.Requirement import Header\n'), ((3873, 3983), 'requests.get', 'requests.get', (['"""https://complexsearch.kugou.com/v2/search/song?"""'], {'headers': 'OneHeader', 'params': 'self.__GotData'}), "('https://complexsearch.kugou.com/v2/search/song?', headers=\n OneHeader, params=self.__GotData)\n", (3885, 3983), False, 'import requests\n'), ((4176, 4196), 'json.loads', 'json.loads', (['String_2'], {}), '(String_2)\n', (4186, 4196), False, 'import json\n'), ((7065, 7120), 'KuGou.Requirement.Header.GetHeader', 'Header.GetHeader', ([], {'Referrer': 'Header.REFERRER_KUGOU_SEARCH'}), '(Referrer=Header.REFERRER_KUGOU_SEARCH)\n', (7081, 7120), False, 'from KuGou.Requirement import Header\n'), ((7140, 7238), 'requests.get', 'requests.get', (['"""https://wwwapi.kugou.com/yy/index.php"""'], {'headers': 'OneHeader', 'params': 'self.__Params'}), "('https://wwwapi.kugou.com/yy/index.php', headers=OneHeader,\n params=self.__Params)\n", (7152, 7238), False, 'import requests\n'), ((7391, 7411), 'json.loads', 'json.loads', (['String_2'], {}), '(String_2)\n', (7401, 7411), False, 'import json\n'), ((8358, 8414), 're.match', 're.match', (['"""(.*?)( - )(.*?)(-)"""', "(Data['audio_name'] + '-')"], {}), "('(.*?)( - )(.*?)(-)', Data['audio_name'] + '-')\n", (8366, 8414), False, 'import re\n'), ((4990, 5003), 'KuGou.Music', 'KuGou.Music', ([], {}), '()\n', (5001, 5003), False, 'import KuGou\n'), ((573, 584), 'time.time', 'time.time', ([], {}), '()\n', (582, 584), False, 'import time\n'), ((1545, 1556), 'time.time', 'time.time', ([], {}), '()\n', (1554, 1556), False, 'import time\n'), ((6166, 6177), 'time.time', 'time.time', ([], {}), '()\n', (6175, 6177), False, 'import time\n'), ((6433, 6444), 'time.time', 'time.time', ([], {}), '()\n', (6442, 6444), False, 'import time\n'), ((8293, 8306), 'KuGou.Music', 'KuGou.Music', ([], {}), '()\n', (8304, 8306), False, 'import KuGou\n')] |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
#Code starts here
data=pd.read_csv(path)
data.rename(columns={'Total':'Total_Medals'},inplace=True)
data.head(10)
# --------------
#Code starts here
data['Better_Event']=np.where(data['Total_Summer']==data['Total_Winter'],'Both',(np.where(data['Total_Summer']>data['Total_Winter'],'Summer','Winter')) )
better_event=data['Better_Event'].value_counts().idxmax()
# --------------
#Code starts here
top_countries=data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries.drop(top_countries.index[-1],inplace=True)
def top_ten(variable1,variable2):
country_list=[]
country_list=variable1.nlargest(10,variable2).iloc[:,0]
return country_list
top_10_summer=list(top_ten(top_countries,'Total_Summer'))
top_10_winter=list(top_ten(top_countries,'Total_Winter'))
top_10=list(top_ten(top_countries,'Total_Medals'))
common=list(set(top_10_summer) & set(top_10_winter) & set(top_10))
# --------------
#Code starts here
summer_df=data[data['Country_Name'].isin(top_10_summer)]
winter_df=data[data['Country_Name'].isin(top_10_winter)]
print(winter_df)
top_df=data[data['Country_Name'].isin(top_10)]
print(top_df)
fig, (ax_1,ax_2,ax_3)=plt.subplots(3,1)
summer_df.plot(x='Country_Name',y='Total_Medals',kind='bar',ax=ax_1)
winter_df.plot(x='Country_Name',y='Total_Medals',kind='bar',ax=ax_2)
top_df.plot(x='Country_Name',y='Total_Medals',kind='bar',ax=ax_3)
# --------------
#Code starts here
summer_df['Golden_Ratio']=summer_df['Gold_Summer'] / summer_df['Total_Summer']
summer_max_ratio =summer_df['Golden_Ratio'].max()
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio']==summer_max_ratio,'Country_Name'].iloc[0]
winter_df['Golden_Ratio']=winter_df['Gold_Winter'] / winter_df['Total_Winter']
winter_max_ratio =winter_df['Golden_Ratio'].max()
winter_country_gold=winter_df.loc[winter_df['Golden_Ratio']==winter_max_ratio,'Country_Name'].iloc[0]
top_df['Golden_Ratio']=top_df['Gold_Total'] / top_df['Total_Medals']
top_max_ratio =top_df['Golden_Ratio'].max()
top_country_gold=top_df.loc[top_df['Golden_Ratio']==top_max_ratio,'Country_Name'].iloc[0]
# --------------
#Code starts here
data_1=data.drop(data.index[-1])
data_1['Total_Points']=(data_1['Gold_Total']*3) + (data_1['Silver_Total']*2)+data_1['Bronze_Total']
most_points=data_1['Total_Points'].max()
best_country=data_1.loc[data_1['Total_Points']==most_points,'Country_Name'].iloc[0]
# --------------
#Code starts here
best=data[data['Country_Name']==best_country]
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar(stacked=True)
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
| [
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] | [((169, 186), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (180, 186), True, 'import pandas as pd\n'), ((1371, 1389), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (1383, 1389), True, 'import matplotlib.pyplot as plt\n'), ((2807, 2834), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""United States"""'], {}), "('United States')\n", (2817, 2834), True, 'import matplotlib.pyplot as plt\n'), ((2836, 2862), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Medals Tally"""'], {}), "('Medals Tally')\n", (2846, 2862), True, 'import matplotlib.pyplot as plt\n'), ((2864, 2887), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (2874, 2887), True, 'import matplotlib.pyplot as plt\n'), ((387, 460), 'numpy.where', 'np.where', (["(data['Total_Summer'] > data['Total_Winter'])", '"""Summer"""', '"""Winter"""'], {}), "(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter')\n", (395, 460), True, 'import numpy as np\n')] |
# Lint as: python3
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for shll-specific fuzzing."""
import random
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from xls.common import runfiles
from xls.dslx.fuzzer import ast_generator
from xls.dslx.fuzzer import run_fuzz
from xls.dslx.python import cpp_ast as ast
flags.DEFINE_boolean('update_golden', False,
'Whether to update golden reference files.')
FLAGS = flags.FLAGS
class RunFuzzShllTest(parameterized.TestCase):
KWARGS = {
'calls_per_sample': 4,
'save_temps': False,
'sample_count': 4,
'return_samples': True,
'codegen': False,
}
GOLDEN_REFERENCE_FMT = 'xls/dslx/fuzzer/testdata/run_fuzz_shll_test.seed_{seed}_sample_{sample}.x'
SEED_TO_CHECK_LIMIT = 2
SAMPLE_TO_CHECK_LIMIT = 1
@parameterized.named_parameters(*tuple(
dict(testcase_name='seed_{}'.format(x), seed=x) for x in range(50)))
def test_first_n_seeds(self, seed):
if FLAGS.update_golden and seed >= self.SEED_TO_CHECK_LIMIT:
# Skip running unnecessary tests if updating golden because the test is
# slow and runs unsharded.
return
rng = random.Random(seed)
samples = run_fuzz.run_fuzz(
rng,
ast_generator.AstGeneratorOptions(
disallow_divide=True, binop_allowlist=[ast.BinopKind.SHLL]),
**self.KWARGS)
for i in range(self.KWARGS['sample_count']):
if seed < self.SEED_TO_CHECK_LIMIT and i < self.SAMPLE_TO_CHECK_LIMIT:
path = self.GOLDEN_REFERENCE_FMT.format(seed=seed, sample=i)
if FLAGS.update_golden:
with open(path, 'w') as f:
f.write(samples[i].input_text)
else:
# rstrip to avoid miscompares from trailing newline at EOF.
expected = runfiles.get_contents_as_text(path).rstrip()
self.assertMultiLineEqual(expected, samples[i].input_text)
if __name__ == '__main__':
absltest.main()
| [
"xls.common.runfiles.get_contents_as_text",
"random.Random",
"absl.testing.absltest.main",
"absl.flags.DEFINE_boolean",
"xls.dslx.fuzzer.ast_generator.AstGeneratorOptions"
] | [((904, 997), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""update_golden"""', '(False)', '"""Whether to update golden reference files."""'], {}), "('update_golden', False,\n 'Whether to update golden reference files.')\n", (924, 997), False, 'from absl import flags\n'), ((2509, 2524), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (2522, 2524), False, 'from absl.testing import absltest\n'), ((1747, 1766), 'random.Random', 'random.Random', (['seed'], {}), '(seed)\n', (1760, 1766), False, 'import random\n'), ((1821, 1919), 'xls.dslx.fuzzer.ast_generator.AstGeneratorOptions', 'ast_generator.AstGeneratorOptions', ([], {'disallow_divide': '(True)', 'binop_allowlist': '[ast.BinopKind.SHLL]'}), '(disallow_divide=True, binop_allowlist=[\n ast.BinopKind.SHLL])\n', (1854, 1919), False, 'from xls.dslx.fuzzer import ast_generator\n'), ((2364, 2399), 'xls.common.runfiles.get_contents_as_text', 'runfiles.get_contents_as_text', (['path'], {}), '(path)\n', (2393, 2399), False, 'from xls.common import runfiles\n')] |
try:
from enum import Enum
from pathlib import Path as path
import os
except ImportError as err:
print("Unable to import: {}".format(err))
exit()
class Directory(Enum):
DEFAULT_WINDOWS_FIREFOX = "{}\\Roaming\\Mozilla\\Firefox\\Profiles".format(
os.getenv('APPDATA'))
DEFAULT_WINDOWS_CHROME = "{}\\Local\\Google\\Chrome\\User Data".format(
os.getenv('APPDATA'))
DEFAULT_WINDOWS_EDGE = "{}\\Local\\Microsoft\\Edge\\User Data\\Default".format(
os.getenv('APPDATA'))
DEFAULT_LINUX_FIREFOX = "{}/.mozilla/firefox/".format(path.home())
DEFAULT_LINUX_CHROME = "{}/.config/google-chrome/default".format(path.home())
DEFAULT_LINUX_EDGE = "{}\\Local\\Microsoft\\Edge\\User Data\\Default".format(
path.home())
def __str__(self):
return self.value
| [
"pathlib.Path.home",
"os.getenv"
] | [((280, 300), 'os.getenv', 'os.getenv', (['"""APPDATA"""'], {}), "('APPDATA')\n", (289, 300), False, 'import os\n'), ((386, 406), 'os.getenv', 'os.getenv', (['"""APPDATA"""'], {}), "('APPDATA')\n", (395, 406), False, 'import os\n'), ((500, 520), 'os.getenv', 'os.getenv', (['"""APPDATA"""'], {}), "('APPDATA')\n", (509, 520), False, 'import os\n'), ((581, 592), 'pathlib.Path.home', 'path.home', ([], {}), '()\n', (590, 592), True, 'from pathlib import Path as path\n'), ((663, 674), 'pathlib.Path.home', 'path.home', ([], {}), '()\n', (672, 674), True, 'from pathlib import Path as path\n'), ((766, 777), 'pathlib.Path.home', 'path.home', ([], {}), '()\n', (775, 777), True, 'from pathlib import Path as path\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from utils.period import todate
from utils.shortcut import (rebuild_html,
render)
class Serializer:
# 一定要传form={}进来
def __init__(self, **kwargs):
self.data = self.serialize(kwargs.get('form'))
self.exclude = ()
self.is_valid()
def serialize(self, dit):
return dit
def is_valid(self):
for key in self.data:
if self.data[key] is None and key not in self.exclude:
# print('invalid')
return False
# print('valid')
return True
class ArticleSer(Serializer):
def __init__(self, **kwargs):
super(ArticleSer, self).__init__(**kwargs)
self.exclude = ('id', 'updated_date', 'pic_address', 'axis_y', 'desc', 'citation')
def serialize(self, form):
# TODO:考虑更新和创建
form['created_time'] = (
str(int(time.time())) if form.get('time') is None or form.get('time') == '' else form['time'])
if form.get('edit'):
form['updated_time'] = form['created_time']
if form.get('update') == 'on':
form['updated_time'] = str(int(time.time()))
form['html'], form['desc'] = rebuild_html(render(form['text']))
return dict(
id=None if form.get('id') == '' else form.get('id'),
created_time=form.get('created_time'),
updated_time=form.get('updated_time'),
date=todate(form['created_time'], '%b.%d %Y'), # form.get('date') or
# updated_date=form.get('updated_date') or todate(form['updated_time'], '%b.%d %Y %H:%M:%S'),
title=form.get('title'),
tag=form.get('tag'),
author=form.get('author'),
category=form.get('category'),
text=form.get('text'),
html=form['html'],
desc=form['desc'],
desc_text=((form.get('text'))[:(form.get('text')).find('-----', 1)]).replace('\n', ' ').replace('\"', '\''),
citation=form['citation'] if form.get('citation') else None,
top=form.get('top'),
open=form.get('open'),
pic=form.get('pic'),
pic_address=form.get('pic_address'),
axis_y=form.get('axis_y'),
comments=form.get('comments') or []
)
class ArchiveSer(Serializer):
def __init__(self, **kwargs):
super(ArchiveSer, self).__init__(**kwargs)
self.exclude = ()
def serialize(self, form):
return dict(
id=form.get('id'),
title=form.get('title'),
category=form.get('category'),
created_time=form.get('created_time'),
)
class LinkSer(Serializer):
def __init__(self, **kwargs):
super(LinkSer, self).__init__(**kwargs)
self.exclude = ()
def serialize(self, form):
return dict()
class ConfigSer(Serializer):
def __init__(self, **kwargs):
super(ConfigSer, self).__init__(**kwargs)
self.exclude = ()
def serialize(self, form):
return dict()
| [
"utils.period.todate",
"utils.shortcut.render",
"time.time"
] | [((1261, 1281), 'utils.shortcut.render', 'render', (["form['text']"], {}), "(form['text'])\n", (1267, 1281), False, 'from utils.shortcut import rebuild_html, render\n'), ((1489, 1529), 'utils.period.todate', 'todate', (["form['created_time']", '"""%b.%d %Y"""'], {}), "(form['created_time'], '%b.%d %Y')\n", (1495, 1529), False, 'from utils.period import todate\n'), ((942, 953), 'time.time', 'time.time', ([], {}), '()\n', (951, 953), False, 'import time\n'), ((1196, 1207), 'time.time', 'time.time', ([], {}), '()\n', (1205, 1207), False, 'import time\n')] |
from numpy import ndarray, array
from electripy.physics.charges import PointCharge
class _ChargesSet:
"""
A _ChargesSet instance is a group of charges. The electric
field at a given point can be calculated as the sum of each
electric field at that point for every charge in the charge
set.
"""
def __init__(self, charges: list[PointCharge]) -> None:
self.charges = charges
def electric_field(self, point: ndarray) -> ndarray:
"""
Returns the electric field at the specified point.
"""
ef = array([0.0, 0.0])
for charge in self.charges:
ef += charge.electric_field(point)
return ef
def electric_force(self, charge: PointCharge) -> ndarray:
"""
Returns the force of the electric field exerted
on the charge.
"""
ef = self.electric_field(charge.position)
return ef * charge.charge
def __getitem__(self, index):
return self.charges[index]
class ChargeDistribution:
def __init__(self):
"""
There is one group for each charge in charges.
Each group is a two dimensional vector. The first element is
a charge, and the second element is the ChargeSet instance
containing all charges in charges except the charge itself.
"""
self.groups = []
self.charges_set = _ChargesSet([])
def add_charge(self, charge: PointCharge) -> None:
"""
Adds the charge to charges_set and updates the groups.
"""
self.charges_set.charges.append(charge)
self._update_groups(self.charges_set.charges)
def remove_charge(self, charge: PointCharge) -> None:
"""
Removes the charge to charges_set and updates the groups.
"""
self.charges_set.charges.remove(charge)
self._update_groups(self.charges_set.charges)
def _update_groups(self, charges: list[PointCharge]) -> None:
"""
Let X be a charge from the charge distribution. Computing X electric
force involves computing the electric force exerted on X by all
the other charges on the charge distribution.
This means that, in order to compute the electric force of X,
we need a two dimensional vector where the first component is
the charge X itself and the second component is a ChargeSet
instance cointaning all charges on the charge distribution except
X. This vector is called 'group'.
"""
self.groups = []
for charge in charges:
self.groups.append(
[
charge,
_ChargesSet([c for c in charges if c is not charge]),
]
)
def get_electric_forces(self) -> list[tuple[PointCharge, ndarray]]:
"""
Returns a list of electric forces. There is one electric force for
each charge in charges. Each electric force is a two dimensional
vector. The first element is the charge and the second element is
the electric force the other charges make on it.
"""
electric_forces = []
for group in self.groups:
electric_forces.append((group[0], group[1].electric_force(group[0])))
return electric_forces
def get_electric_field(self, position: ndarray) -> ndarray:
"""
Returns the electric force array at the given point.
"""
return self.charges_set.electric_field(position)
def __len__(self):
return len(self.charges_set.charges)
def __getitem__(self, index):
return self.charges_set[index]
| [
"numpy.array"
] | [((566, 583), 'numpy.array', 'array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (571, 583), False, 'from numpy import ndarray, array\n')] |
# -*- coding:utf8 -*-
import logging
from twisted.internet import reactor
from pygps.server.handlers import ProtocalTCPFactory, ProtocalUDPHandler
from pygps.server.pusher import ThreadQueuePusher, DalPusher
from pygps.goodhope.dal import GPSDal
from pygps.protocol.bsj import A5, Km
from pygps.protocol.longhan import Longhan16m
from pygps.protocol.xinan import Xinan
from pygps.protocol.qnm import Qnm
from pygps.protocol.xinji import Xinji
def init_log(level, name, path='', dir='/tmp/logs'):
import logging.handlers
import os.path
file_folder = os.path.join(dir, path)
if not os.path.exists(file_folder):
os.makedirs(file_folder)
file_path = os.path.join(file_folder, name)
handler = logging.handlers.RotatingFileHandler(
file_path, maxBytes=1048576, backupCount=5)
handler.setLevel(level)
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelno)s %(thread)d %(pathname)s %(lineno)d %(funcName)s %(message)s'))
logging.getLogger().addHandler(handler)
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelno)s %(message)s'))
logging.getLogger().addHandler(handler)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='启动GPS前置机服务')
parser.add_argument('translator', help="协议名称:A5,Km,Longhan16m,Xinan,Qnm,Xinji")
parser.add_argument('connection', help="数据库连接字符串")
parser.add_argument('-p', help="监听端口号", default='3007')
parser.add_argument('-P', help="使用TCP or UDP 缺省为TCP", default='TCP')
parser.add_argument('-T', help="使用TCP时的超时秒", default=None, type=int)
args = parser.parse_args()
init_log(logging.DEBUG, '{translator}_{protocol}_{port}.log'.format(
translator=args.translator, protocol=args.P, port=args.p
), dir='./logs')
logging.info('start with %s', args)
#
dal1 = GPSDal(args.connection)
dal2 = GPSDal(args.connection)
pusher = DalPusher(dal=dal1)#ThreadQueuePusher(dal=dal1)
translator = globals()[args.translator]()
port = int(args.p)
timeout = args.T
if args.P == 'TCP':
reactor.listenTCP(port, ProtocalTCPFactory(translator=translator, pusher=pusher, user_signal=dal2, timeout=timeout))
else:
reactor.listenUDP(port,ProtocalUDPHandler(translator=translator, pusher=pusher, user_signal=dal2))
reactor.run()
| [
"logging.getLogger",
"logging.StreamHandler",
"argparse.ArgumentParser",
"logging.Formatter",
"logging.handlers.RotatingFileHandler",
"pygps.server.pusher.DalPusher",
"pygps.goodhope.dal.GPSDal",
"pygps.server.handlers.ProtocalTCPFactory",
"twisted.internet.reactor.run",
"pygps.server.handlers.Pro... | [((725, 810), 'logging.handlers.RotatingFileHandler', 'logging.handlers.RotatingFileHandler', (['file_path'], {'maxBytes': '(1048576)', 'backupCount': '(5)'}), '(file_path, maxBytes=1048576, backupCount=5\n )\n', (761, 810), False, 'import logging\n'), ((1042, 1065), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1063, 1065), False, 'import logging\n'), ((1292, 1341), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""启动GPS前置机服务"""'}), "(description='启动GPS前置机服务')\n", (1315, 1341), False, 'import argparse\n'), ((1884, 1919), 'logging.info', 'logging.info', (['"""start with %s"""', 'args'], {}), "('start with %s', args)\n", (1896, 1919), False, 'import logging\n'), ((1938, 1961), 'pygps.goodhope.dal.GPSDal', 'GPSDal', (['args.connection'], {}), '(args.connection)\n', (1944, 1961), False, 'from pygps.goodhope.dal import GPSDal\n'), ((1973, 1996), 'pygps.goodhope.dal.GPSDal', 'GPSDal', (['args.connection'], {}), '(args.connection)\n', (1979, 1996), False, 'from pygps.goodhope.dal import GPSDal\n'), ((2010, 2029), 'pygps.server.pusher.DalPusher', 'DalPusher', ([], {'dal': 'dal1'}), '(dal=dal1)\n', (2019, 2029), False, 'from pygps.server.pusher import ThreadQueuePusher, DalPusher\n'), ((2420, 2433), 'twisted.internet.reactor.run', 'reactor.run', ([], {}), '()\n', (2431, 2433), False, 'from twisted.internet import reactor\n'), ((868, 982), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelno)s %(thread)d %(pathname)s %(lineno)d %(funcName)s %(message)s"""'], {}), "(\n '%(asctime)s %(levelno)s %(thread)d %(pathname)s %(lineno)d %(funcName)s %(message)s'\n )\n", (885, 982), False, 'import logging\n'), ((1119, 1175), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelno)s %(message)s"""'], {}), "('%(asctime)s %(levelno)s %(message)s')\n", (1136, 1175), False, 'import logging\n'), ((987, 1006), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1004, 1006), False, 'import logging\n'), ((1190, 1209), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1207, 1209), False, 'import logging\n'), ((2205, 2300), 'pygps.server.handlers.ProtocalTCPFactory', 'ProtocalTCPFactory', ([], {'translator': 'translator', 'pusher': 'pusher', 'user_signal': 'dal2', 'timeout': 'timeout'}), '(translator=translator, pusher=pusher, user_signal=dal2,\n timeout=timeout)\n', (2223, 2300), False, 'from pygps.server.handlers import ProtocalTCPFactory, ProtocalUDPHandler\n'), ((2339, 2413), 'pygps.server.handlers.ProtocalUDPHandler', 'ProtocalUDPHandler', ([], {'translator': 'translator', 'pusher': 'pusher', 'user_signal': 'dal2'}), '(translator=translator, pusher=pusher, user_signal=dal2)\n', (2357, 2413), False, 'from pygps.server.handlers import ProtocalTCPFactory, ProtocalUDPHandler\n')] |
#!/usr/bin/env python
from setuptools import setup, find_packages, Extension
extensions = [
Extension('cyhll.hyperloglog', ['cyhll/hyperloglog.pyx']),
Extension('cyhll.murmer3', ['cyhll/murmer3.pyx']),
]
try:
from Cython.Build import cythonize
extensions = cythonize(extensions,
compiler_directives={
'language_level': 3
})
except ImportError:
pass
if __name__ == '__main__':
setup(
name='cyhll',
version='0.1.4',
url='https://github.com/bndl/cyhll',
description='Hyperloglog in Cython',
long_description=open('README.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
packages=(
find_packages()
),
include_package_data=True,
zip_safe=False,
install_requires=[],
extras_require=dict(
dev=[
'cython<0.25',
'pytest',
],
),
ext_modules=extensions,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| [
"setuptools.Extension",
"Cython.Build.cythonize",
"setuptools.find_packages"
] | [((98, 155), 'setuptools.Extension', 'Extension', (['"""cyhll.hyperloglog"""', "['cyhll/hyperloglog.pyx']"], {}), "('cyhll.hyperloglog', ['cyhll/hyperloglog.pyx'])\n", (107, 155), False, 'from setuptools import setup, find_packages, Extension\n'), ((161, 210), 'setuptools.Extension', 'Extension', (['"""cyhll.murmer3"""', "['cyhll/murmer3.pyx']"], {}), "('cyhll.murmer3', ['cyhll/murmer3.pyx'])\n", (170, 210), False, 'from setuptools import setup, find_packages, Extension\n'), ((276, 340), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {'compiler_directives': "{'language_level': 3}"}), "(extensions, compiler_directives={'language_level': 3})\n", (285, 340), False, 'from Cython.Build import cythonize\n'), ((775, 790), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (788, 790), False, 'from setuptools import setup, find_packages, Extension\n')] |
import random
square = [[1,2,3],[4,5,6],[7,8,9]]
def get_square():
line =[i for i in range(1,10)]
lst = random.sample(line, 9)
square = [lst[i:i + 3] for i in range(0, len(lst), 3)]
return square
def valid_line(line):
if sorted(line) == [1,2,3,4,5,6,7,8,9]: return True
else: return False
def get_3_lines():
square_list= [get_square(),get_square(),get_square()]
lines = []
for i in range(0,3):
lines.append(
square_list[0][i]+square_list[1][i]+square_list[2][i])
return lines
def get_board():
board = get_3_lines()+get_3_lines()+get_3_lines()
return board
def all_valid_lines(board):
for i in board:
if valid_line(i) == False:
return False
return True
def all_valid_columms(board):
transposed_Board= [[board[j][i] for j in range(len(board))] for i in range(len(board[0]))]
return all_valid_lines(transposed_Board)
x = 0
counter=0
while x == 0:
counter+=1
b = get_board()
if all_valid_lines(b) and all_valid_columms(b) == True:
print(b)
print(counter)
x=1
print("ok")
| [
"random.sample"
] | [((114, 136), 'random.sample', 'random.sample', (['line', '(9)'], {}), '(line, 9)\n', (127, 136), False, 'import random\n')] |
# Copyright (c) 2016-2020 <NAME>
# Licensed under the zlib/libpng License
# https://opensource.org/licenses/Zlib
import math
__all__ = ('slownie', 'slownie_zl', 'slownie_zl100gr')
ZERO_LITERALLY = "zero"
MINUS_LITERALLY = "minus "
HUNDREDS_LITERALLY = [
"",
"sto ",
"dwie\u015Bcie ",
"trzysta ",
"czterysta ",
"pi\u0119\u0107set ",
"sze\u015B\u0107set ",
"siedemset ",
"osiemset ",
"dziewi\u0119\u0107set "
]
TENS_LITERALLY = [
"",
"",
"dwadzie\u015Bcia ",
"trzydzie\u015Bci ",
"czterdzie\u015Bci ",
"pi\u0119\u0107dziesi\u0105t ",
"sze\u015B\u0107dziesi\u0105t ",
"siedemdziesi\u0105t ",
"osiemdziesi\u0105t ",
"dziewi\u0119\u0107dziesi\u0105t "
]
UNITIES_LITERALLY = [
"",
"jeden ",
"dwa ",
"trzy ",
"cztery ",
"pi\u0119\u0107 ",
"sze\u015B\u0107 ",
"siedem ",
"osiem ",
"dziewi\u0119\u0107 ",
"dziesi\u0119\u0107 ",
"jedena\u015Bcie ",
"dwana\u015Bcie ",
"trzyna\u015Bcie ",
"czterna\u015Bcie ",
"pi\u0119tna\u015Bcie ",
"szesna\u015Bcie ",
"siedemna\u015Bcie ",
"osiemna\u015Bcie ",
"dziewi\u0119tna\u015Bcie "
]
PARTS_LITERALLY = [
["", "", "", ""],
["", "tysi\u0105c ", "tysi\u0105ce ", "tysi\u0119cy "],
["", "milion ", "miliony ", "milion\u00F3w "],
["", "miliard ", "miliardy ", "miliard\u00F3w "],
["", "bilion ", "biliony ", "bilion\u00F3w "],
["", "biliard ", "biliardy ", "biliard\u00F3w "],
["", "trylion ", "tryliony ", "trylion\u00F3w "],
["", "tryliard ", "tryliardy ", "tryliard\u00F3w "],
["", "kwadrylion ", "kwadryliony ", "kwadrylion\u00F3w "],
["", "kwadryliard ", "kwadryliardy ", "kwadryliard\u00F3w "],
]
GROSZE_LITERALLY = [
" groszy",
" grosz",
" grosze",
" groszy"
]
ZLOTE_LITERALLY = [
" z\u0142otych",
" z\u0142oty",
" z\u0142ote",
" z\u0142otych"
]
def slownie(value):
""" """
if value == 0.0:
return ZERO_LITERALLY
literally = "" if value >= 0.0 else (MINUS_LITERALLY + " ")
value = abs(value)
for k in range(len(PARTS_LITERALLY) - 1, -1, -1):
part = int((value % 1000.0**(k + 1)) / 1000.0**k)
hundreds, tens, unities, declension = _split(part)
literally += HUNDREDS_LITERALLY[hundreds]
literally += TENS_LITERALLY[tens]
literally += UNITIES_LITERALLY[unities]
literally += PARTS_LITERALLY[k][declension]
return literally[:-1]
def slownie_zl(amount):
""" """
grosze, zlote = math.modf(amount)
grosze = int(abs(grosze) * 100.0 + 0.5)
literally = _slownie(zlote, ZLOTE_LITERALLY)
if grosze:
literally += " "
literally += _slownie(grosze, GROSZE_LITERALLY)
return literally
def slownie_zl100gr(amount):
""" """
grosze, zlote = math.modf(amount)
grosze = int(abs(grosze) * 100.0 + 0.5)
literally = _slownie(zlote, ZLOTE_LITERALLY)
literally += " %02d/100" % grosze
return literally
def _slownie(amount, LITERALLY):
literally = slownie(amount)
amount = int(abs(amount) + 0.5)
_, _, _, declension = _split(amount)
literally += LITERALLY[declension]
return literally
def _split(value):
hundreds, rest = divmod(value, 100)
tens, unities = divmod(rest, 10)
if tens == 1: tens, unities = 0, rest
if unities == 0:
declension = 3 if hundreds or tens else 0
elif unities == 1:
declension = 3 if hundreds or tens else 1
elif unities in (2, 3, 4):
declension = 2
else: # unities >= 5:
declension = 3
return (hundreds, tens, unities, declension)
| [
"math.modf"
] | [((2624, 2641), 'math.modf', 'math.modf', (['amount'], {}), '(amount)\n', (2633, 2641), False, 'import math\n'), ((2917, 2934), 'math.modf', 'math.modf', (['amount'], {}), '(amount)\n', (2926, 2934), False, 'import math\n')] |
#!/usr/bin/env python
import contextlib
import os
import platform
from typing import Iterator
from typing import Optional
import uuid
from absl.testing import absltest
from grr_response_client.client_actions.windows import pipes
if platform.system() == "Windows":
# pylint: disable=g-import-not-at-top
# pytype: disable=import-error
import win32pipe
# pytype: enable=import-error
# pylint: enable=g-import-not-at-top
@absltest.skipUnless(
platform.system() == "Windows",
reason="Windows-only action.",
)
class ListNamedPipesTest(absltest.TestCase):
def testSinglePipe(self) -> None:
pipe_name = str(uuid.uuid4())
pipe_spec = NamedPipeSpec(pipe_name)
with pipe_spec.Create():
results = list(pipes.ListNamedPipes())
names = set(result.name for result in results)
self.assertIn(pipe_name, names)
def testMultiplePipes(self) -> None:
pipe_name_1 = str(uuid.uuid4())
pipe_name_2 = str(uuid.uuid4())
pipe_spec_1 = NamedPipeSpec(pipe_name_1)
pipe_spec_2 = NamedPipeSpec(pipe_name_2)
with pipe_spec_1.Create():
with pipe_spec_2.Create():
results = list(pipes.ListNamedPipes())
names = set(result.name for result in results)
self.assertIn(pipe_name_1, names)
self.assertIn(pipe_name_2, names)
def testPipeTypeByte(self) -> None:
self._testPipeType(win32pipe.PIPE_TYPE_BYTE)
def testPipeTypeMessage(self) -> None:
self._testPipeType(win32pipe.PIPE_TYPE_MESSAGE)
def _testPipeType(self, pipe_type: int) -> None: # pylint: disable=invalid-name
pipe_name = str(uuid.uuid4())
pipe_spec = NamedPipeSpec(pipe_name)
pipe_spec.pipe_mode = pipe_type
with pipe_spec.Create():
results = list(pipes.ListNamedPipes())
results_by_name = {result.name: result for result in results}
result = results_by_name[pipe_name]
self.assertEqual(result.flags & pipe_type, pipe_type)
def testMaxInstanceCountLimited(self) -> None:
self._testMaxInstanceCount(42)
def testMaxInstanceCountUnlimited(self) -> None:
self._testMaxInstanceCount(win32pipe.PIPE_UNLIMITED_INSTANCES)
def _testMaxInstanceCount(self, count: int) -> None: # pylint: disable=invalid-name
pipe_name = str(uuid.uuid4())
pipe_spec = NamedPipeSpec(pipe_name)
pipe_spec.max_instance_count = count
with pipe_spec.Create():
results = list(pipes.ListNamedPipes())
results_by_name = {result.name: result for result in results}
result = results_by_name[pipe_name]
self.assertEqual(result.max_instance_count, count)
def testCurInstanceCount(self) -> None:
pipe_name = str(uuid.uuid4())
pipe_spec = NamedPipeSpec(pipe_name)
with pipe_spec.Create():
with pipe_spec.Create():
with pipe_spec.Create():
results = list(pipes.ListNamedPipes())
results_by_name = {result.name: result for result in results}
result = results_by_name[pipe_name]
self.assertEqual(result.cur_instance_count, 3)
def testBufferSize(self) -> None:
pipe_name = str(uuid.uuid4())
pipe_spec = NamedPipeSpec(pipe_name)
pipe_spec.in_buffer_size = 42
pipe_spec.out_buffer_size = 108
with pipe_spec.Create():
results = list(pipes.ListNamedPipes())
results_by_name = {result.name: result for result in results}
result = results_by_name[pipe_name]
self.assertEqual(result.in_buffer_size, 42)
self.assertEqual(result.out_buffer_size, 108)
def testPid(self) -> None:
pipe_name = str(uuid.uuid4())
pipe_spec = NamedPipeSpec(pipe_name)
with pipe_spec.Create():
results = list(pipes.ListNamedPipes())
results_by_name = {result.name: result for result in results}
result = results_by_name[pipe_name]
self.assertEqual(result.server_pid, os.getpid())
self.assertEqual(result.client_pid, os.getpid())
class NamedPipeSpec:
"""A class with named pipe specification."""
name: str
open_mode: Optional[int] = None
pipe_mode: Optional[int] = None
max_instance_count: Optional[int] = None
in_buffer_size: int = 0
out_buffer_size: int = 0
default_timeout_millis: int = 0
def __init__(self, name: str) -> None:
self.name = name
@contextlib.contextmanager
def Create(self) -> Iterator[None]:
"""Creates a named pipe context conforming to the specification."""
if self.max_instance_count is not None:
max_instance_count = self.max_instance_count
else:
max_instance_count = win32pipe.PIPE_UNLIMITED_INSTANCES
if self.open_mode is not None:
open_mode = self.open_mode
else:
open_mode = win32pipe.PIPE_ACCESS_DUPLEX
if self.pipe_mode is not None:
pipe_mode = self.pipe_mode
else:
pipe_mode = 0
handle = win32pipe.CreateNamedPipe(
f"\\\\.\\pipe\\{self.name}",
open_mode,
pipe_mode,
max_instance_count,
self.in_buffer_size,
self.out_buffer_size,
self.default_timeout_millis,
None,
)
with contextlib.closing(handle):
yield
if __name__ == "__main__":
absltest.main()
| [
"absl.testing.absltest.main",
"uuid.uuid4",
"win32pipe.CreateNamedPipe",
"platform.system",
"contextlib.closing",
"os.getpid",
"grr_response_client.client_actions.windows.pipes.ListNamedPipes"
] | [((235, 252), 'platform.system', 'platform.system', ([], {}), '()\n', (250, 252), False, 'import platform\n'), ((5051, 5066), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (5064, 5066), False, 'from absl.testing import absltest\n'), ((458, 475), 'platform.system', 'platform.system', ([], {}), '()\n', (473, 475), False, 'import platform\n'), ((4724, 4907), 'win32pipe.CreateNamedPipe', 'win32pipe.CreateNamedPipe', (['f"""\\\\\\\\.\\\\pipe\\\\{self.name}"""', 'open_mode', 'pipe_mode', 'max_instance_count', 'self.in_buffer_size', 'self.out_buffer_size', 'self.default_timeout_millis', 'None'], {}), "(f'\\\\\\\\.\\\\pipe\\\\{self.name}', open_mode, pipe_mode,\n max_instance_count, self.in_buffer_size, self.out_buffer_size, self.\n default_timeout_millis, None)\n", (4749, 4907), False, 'import win32pipe\n'), ((629, 641), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (639, 641), False, 'import uuid\n'), ((909, 921), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (919, 921), False, 'import uuid\n'), ((945, 957), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (955, 957), False, 'import uuid\n'), ((1576, 1588), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1586, 1588), False, 'import uuid\n'), ((2221, 2233), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2231, 2233), False, 'import uuid\n'), ((2620, 2632), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2630, 2632), False, 'import uuid\n'), ((3034, 3046), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3044, 3046), False, 'import uuid\n'), ((3491, 3503), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3501, 3503), False, 'import uuid\n'), ((3769, 3780), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3778, 3780), False, 'import os\n'), ((3822, 3833), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3831, 3833), False, 'import os\n'), ((4980, 5006), 'contextlib.closing', 'contextlib.closing', (['handle'], {}), '(handle)\n', (4998, 5006), False, 'import contextlib\n'), ((735, 757), 'grr_response_client.client_actions.windows.pipes.ListNamedPipes', 'pipes.ListNamedPipes', ([], {}), '()\n', (755, 757), False, 'from grr_response_client.client_actions.windows import pipes\n'), ((1719, 1741), 'grr_response_client.client_actions.windows.pipes.ListNamedPipes', 'pipes.ListNamedPipes', ([], {}), '()\n', (1739, 1741), False, 'from grr_response_client.client_actions.windows import pipes\n'), ((2370, 2392), 'grr_response_client.client_actions.windows.pipes.ListNamedPipes', 'pipes.ListNamedPipes', ([], {}), '()\n', (2390, 2392), False, 'from grr_response_client.client_actions.windows import pipes\n'), ((3211, 3233), 'grr_response_client.client_actions.windows.pipes.ListNamedPipes', 'pipes.ListNamedPipes', ([], {}), '()\n', (3231, 3233), False, 'from grr_response_client.client_actions.windows import pipes\n'), ((3597, 3619), 'grr_response_client.client_actions.windows.pipes.ListNamedPipes', 'pipes.ListNamedPipes', ([], {}), '()\n', (3617, 3619), False, 'from grr_response_client.client_actions.windows import pipes\n'), ((1138, 1160), 'grr_response_client.client_actions.windows.pipes.ListNamedPipes', 'pipes.ListNamedPipes', ([], {}), '()\n', (1158, 1160), False, 'from grr_response_client.client_actions.windows import pipes\n'), ((2794, 2816), 'grr_response_client.client_actions.windows.pipes.ListNamedPipes', 'pipes.ListNamedPipes', ([], {}), '()\n', (2814, 2816), False, 'from grr_response_client.client_actions.windows import pipes\n')] |
from arrays import DynamicArray
import fileinput
import random
class WordController:
'''
Class representation of WordController
'''
def __init__(self, fl):
'''
Creates new WordController
:type fl: str
:param fl: user txt file
'''
self._file = fl
def read_from_file(self):
'''
Reads information from file.
Returns tuple of two lists: words with translation and definition,
korean words.
'''
fl = open(self._file, "r")
learned_words_full = DynamicArray()
learned_words = DynamicArray()
for line in fl:
line = line.strip()
if '===' not in line and line != '' and line[0] not in '123456789':
line = line.split(',')
learned_words_full.append(line)
learned_words.append(line[0])
return learned_words_full, learned_words
def write_to_file(self):
'''
Writes information about new word to the file.
Returns string representation of this information.
'''
fl1 = open('translations.txt', 'r')
fl2 = open(self._file, "r")
word_num = int(fl2.readline())
info = None
for line in fl1:
line = line.strip().split(",")
if line[0] == str(word_num + 1):
info = "Word: "+line[1]+"\nTranslation: "+ line[3]+\
"\nDefinition: "+line[4]
fl2 = open(self._file, "a")
fl2.write(line[1] + "," + line[3] + "," + line[4] + "\n")
break
fl2 = open(self._file, "r")
if info != None:
for line in fileinput.FileInput(self._file, inplace=1):
n = str(word_num)
m = str(word_num + 1)
line=line.replace(n,m)
print(line)
return info
def word_string(self):
'''
Reads all information from file and
returns string representation of it
'''
fl = open(self._file, 'r')
words = ''
for line in fl:
line = line.strip()
if line not in '123456789' and '===' not in line and line != '':
words += line +'\n'
return words
class User:
'''
Class representation of WordController
'''
def __init__(self, fl = None):
'''
Creates new User
:type fl: str
:param fl: user txt file
'''
self._file = fl
self.word_controller = WordController(fl)
def learn_new_word(self):
'''
Writes information about one new word to user file.
Returns string representation of this information.
'''
info = self.word_controller.write_to_file()
print(info)
return info
def test_yourself(self, number):
'''
Returns tuple of korean word, translation of it and list
of false translations
:type number: int
:param number: number of answer generation
'''
learned_words_full, learned_words = self.word_controller.read_from_file()
if len(learned_words) < 2:
return False
else:
check_word = random.choice(learned_words)
index = learned_words.index(check_word)
true_trans = learned_words_full[index][1]
false_answ = [true_trans]
for time in range(number):
word = learned_words_full[index]
for var in range(3):
if len(learned_words_full) > 1:
learned_words_full.remove(word)
word = random.choice(learned_words_full)
trans = word[1]
false_answ.append(trans)
false_answ.sort()
return (true_trans, false_answ, check_word)
def see_word_list(self):
'''
Reads all information from file and
returns string representation of it
'''
words = self.word_controller.word_string()
return words
def set_file(self, fl):
'''
Sets users file
:type fl: str
:param fl: user txt file
'''
self._file = fl
| [
"fileinput.FileInput",
"random.choice",
"arrays.DynamicArray"
] | [((565, 579), 'arrays.DynamicArray', 'DynamicArray', ([], {}), '()\n', (577, 579), False, 'from arrays import DynamicArray\n'), ((604, 618), 'arrays.DynamicArray', 'DynamicArray', ([], {}), '()\n', (616, 618), False, 'from arrays import DynamicArray\n'), ((1699, 1741), 'fileinput.FileInput', 'fileinput.FileInput', (['self._file'], {'inplace': '(1)'}), '(self._file, inplace=1)\n', (1718, 1741), False, 'import fileinput\n'), ((3259, 3287), 'random.choice', 'random.choice', (['learned_words'], {}), '(learned_words)\n', (3272, 3287), False, 'import random\n'), ((3692, 3725), 'random.choice', 'random.choice', (['learned_words_full'], {}), '(learned_words_full)\n', (3705, 3725), False, 'import random\n')] |
"""
Created on Tue Mar 17 03:23:32 2019
script: /pyleaves/pyleaves/train/csv_datasets_train.py
@author: JacobARose
"""
def main(experiment_config, experiment_results_dir):
############################################
#TODO: Moving towards defining most or all run parameters in separate config files
############################################
domain = experiment_config.domain
label_mapping_filepath = experiment_config['label_mappings']
label_encoder = LabelEncoder(filepath=label_mapping_filepath)
print(label_encoder)
trainer = CSVTrainer(experiment_config, label_encoder=label_encoder)
trainer.init_model_builder()
model_filepath = os.path.join(trainer.model_manager.model_dir,trainer.model_name+'_'+domain+'_model.h5')
train_data = trainer.get_data_loader(subset='train')
val_data = trainer.get_data_loader(subset= 'val')
test_data = trainer.get_data_loader(subset= 'test')
#Get parameters for fitting and callbacks
fit_params = trainer.get_fit_params()
callbacks = get_callbacks(weights_best=os.path.join(trainer.model_manager.model_dir,trainer.model_name+'_'+domain+'_model_weights_best.h5'),
logs_dir=os.path.join(experiment_results_dir,'tensorboard_logs'),
restore_best_weights=True)
history = trainer.fit(train_data,
steps_per_epoch = fit_params['steps_per_epoch'],
epochs=fit_params['epochs'],
validation_data=val_data,
validation_steps=fit_params['validation_steps'],
callbacks=callbacks) #,
# history_name=domain
# )
trainer.histories[domain] = history
trainer.save_model(filepath=model_filepath)
#######################################################################
# TARGET DOMAIN
#trainer.load_model(filepath=source_model_filepath)
num_test_samples = trainer.metadata_splits['test']['num_samples']
num_steps = num_test_samples//trainer.config['batch_size']
test_results = [trainer.evaluate(test_data, steps=num_steps, log_name='test')]#'trained-on-source_train--evaluated-on-source_test')]
trainer.test_results = test_results
return trainer
if __name__=='__main__':
'''
Example:
python /home/jacob/pyleaves/pyleaves/train/2-stage_transfer-learning_main.py -d PNAS Fossil -m vgg16 -gpu 0 -bsz 64 -lr 1e-4 --color_type grayscale -thresh 20 -r l2 -r_p 0.001 --experiment TransferBaselines
python /home/jacob/pyleaves/pyleaves/train/2-stage_transfer-learning_main.py -d Leaves2020 PNAS -m resnet_50_v2 -gpu 2 -bsz 64 -lr 1e-4 --color_type grayscale -thresh 20 -r l2 -r_p 0.001 --experiment TransferBaselines
python /home/jacob/pyleaves/pyleaves/train/csv_datasets_train.py --run_name PNAS -m resnet_50_v2 --experiment_root_dir /media/data_cifs/jacob/Fossil_Project/replication_data/single-domain_experiments --experiment BaselinesCSV -gpu 0
python /home/jacob/pyleaves/pyleaves/train/csv_datasets_train.py --run_name Leaves -m resnet_50_v2 --experiment_root_dir /media/data_cifs/jacob/Fossil_Project/replication_data/single-domain_experiments --experiment BaselinesCSV -gpu 5
python /home/jacob/pyleaves/pyleaves/train/csv_datasets_train.py --run_name Fossil -m resnet_50_v2 --experiment_root_dir /media/data_cifs/jacob/Fossil_Project/replication_data/single-domain_experiments --experiment BaselinesCSV -gpu 6
Possible models:
[
'shallow',
'vgg16',
'xception',
'resnet_50_v2',
'resnet_101_v2'
]
'''
import argparse
import datetime
import json
import numpy as np
import os
import itertools
import random
from collections import OrderedDict
random.seed(6)
parser = argparse.ArgumentParser()
# parser.add_argument('--dataset_config', default='', type=str, nargs='?', help='Requires 2 args identifying datasets by name in order of input to the pipeline. Stage 1: train + validate, Stage 2: finetune + validate + test')
# parser.add_argument('-m', '--model_config', default='vgg16', type=str, help='Name of model to train')
parser.add_argument('--run_name', type=str, default='PNAS')
# parser.add_argument('--dataset_name', type=str, default='PNAS')
parser.add_argument('--experiment_root_dir', type=str, default=r'/media/data_cifs/jacob/Fossil_Project/replication_data/single-domain_experiments')
parser.add_argument('-m', '--model_name', default='vgg16', type=str, nargs='*', help='Name of model to train')
parser.add_argument('-gpu', '--gpu_id', default='0', type=str, help='integer number of gpu to train on')
# parser.add_argument('-ch', '--num_channels', default=3, type=int, help='Number of input channels, either 1 for grayscale, or 3 for rgb')
parser.add_argument('-c', '--color_type', default='grayscale', type=str, help='grayscale or rgb')
parser.add_argument('-bsz', '--batch_size', default=64, type=int, nargs='*', help='Batch size. What else do you need to know?')
parser.add_argument('-lr', '--base_learning_rate', default=1e-4, nargs='*', type=float, help="Starting learning rate, <float> for a single value or 'all' to loop through a hardcoded range of values")
parser.add_argument('-thresh', '--low_class_count_thresh', default=10, type=int)
parser.add_argument('-r', '--regularizations', default='l2', type=str, help='comma separated list of regularizers to search through. Enter combinations of l1 and l2, enter anything else for None.')
parser.add_argument('-r_p', '--r_params', default='0.001', type=str, nargs='*', help='comma separated list of regularizer strengths to search through. Enter combinations of floats.') #3
parser.add_argument('-epochs', '--num_epochs', default=200, type=int, help='Number of epochs')
parser.add_argument('-exp', '--experiment', default='Baselines', type=str, help=r"Name of new or existing MLFlow experiment to log results into. TODO: Add None option")
parser.add_argument('-tracking_dir', '--mlflow_tracking_dir', default=r'/media/data/jacob/Fossil_Project/experiments/mlflow', type=str, help=r"Absolute path of MLFlow tracking dir for logging this experiment.")
parser.add_argument('--data_db_path', default=r'/home/jacob/pyleaves/pyleaves/leavesdb/resources/leavesdb.db', type=str, help='Directory in which to save/load models and/or model weights')
parser.add_argument('--model_dir', default=r'/media/data_cifs/jacob/Fossil_Project/models', type=str, help='Directory in which to save/load models and/or model weights')
parser.add_argument('-tfrec', '--tfrecord_dir', default=r'/media/data/jacob/Fossil_Project/tfrecord_data', type=str, help=r"Parent dir above the location that's intended for saving the TFRecords for this dataset")
parser.add_argument('-f',default='')
args = parser.parse_args()
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id) ####SHOULD THIS BE AN INT???
tf.compat.v1.enable_eager_execution()
import pyleaves
from pyleaves.utils import ensure_dir_exists, process_hparam_args
####
from pyleaves.data_pipeline.preprocessing import LabelEncoder
from pyleaves.leavesdb.tf_utils.tf_utils import reset_eager_session
from pyleaves.utils.csv_utils import gather_run_data, load_csv_data
from pyleaves.train.callbacks import get_callbacks
from pyleaves.config import DatasetConfig, TrainConfig, ExperimentConfig, CSVDomainDataConfig, CSVFrozenRunDataConfig
from pyleaves.train.csv_trainer import CSVTrainer
from pyleaves.analysis.mlflow_utils import mlflow_log_params_dict, mlflow_log_history, mlflow_log_best_history
import mlflow
import mlflow.tensorflow
ensure_dir_exists(args.mlflow_tracking_dir)
mlflow.set_tracking_uri(args.mlflow_tracking_dir)
mlflow.set_experiment(args.experiment)
# print(mlflow.tracking.get_tracking_uri())
############################
#########################################
search_params=['run_name','base_learning_rate','batch_size']
if args.model_name == 'all':
args.model_name = ['resnet_50_v2','resnet_152_v2', 'vgg16']
elif type(args.model_name)==str:
search_params.append('model_name')
#########################################
#########################################
regularizer = {args.regularizations:args.r_params}
new_args = process_hparam_args(args, search_params=search_params)
hparams = OrderedDict({
'model_names':args.model_name,
'run_names':args.run_name,
'learning_rates':args.base_learning_rate,
'batch_sizes':args.batch_size
}
)
hparams_labeled = OrderedDict()
for k, v in hparams.items():
hparams_labeled[k] = list(itertools.product([k],v))
hparam_sampler = list(
itertools.product(*list(hparams_labeled.values()))
)
print('BEGINNING HPARAM SEARCH THROUGH A TOTAL OF ',len(hparam_sampler),' INDIVIDUAL HPARAM PERMUTATIONS.')
print('#'*20)
print('#'*20)
#########################################
for num_finished, hparam in enumerate(hparam_sampler):
hparam = {k:v for k,v in hparam}
args.model_name = hparam['model_names']
args.run_name = hparam['run_names']
args.dataset_name = args.run_name
args.domain = args.run_name
args.base_learning_rate = hparam['learning_rates']
args.batch_size = hparam['batch_sizes']
mlflow_run_name=f'{args.model_name}-{args.run_name}-{args.color_type}-lr_{args.base_learning_rate}-bsz_{args.batch_size}'
with mlflow.start_run(run_name=mlflow_run_name, nested=True):
current_time = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
experiment_name = os.path.basename(args.experiment_root_dir)
experiment_results_dir = os.path.join(args.experiment_root_dir,
'results',
'-'.join([args.model_name,args.color_type]),
args.dataset_name,
f'lr-{args.base_learning_rate}-bsz_{args.batch_size}',
current_time)
# experiment_records = gather_experiment_data(experiment_root_dir, return_type='records')
# get_records_attribute(experiment_records, attribute_key='run')
run_records = gather_run_data(args.experiment_root_dir, run=args.run_name, return_type='records')
# get_records_attribute(run_records, attribute_key='run')
# domain_config_0 = CSVDomainDataConfig(experiment_name=experiment_name,
# **run_records[0],
# grayscale=True,
# color_type='grayscale',
# num_channels=1,
# low_class_count_thresh=10,
# data_splits={'val_size':0.2,'test_size':0.2},
# num_shards=10)
# dataset_config_domain = CSVFrozenRunDataConfig(experiment_name=experiment_name, #"single-domain_experiments",
# run=args.run_name, #"Leaves",
# experiment_root_dir=args.experiment_root_dir,
# tfrecord_root_dir=args.tfrecord_dir,
# low_class_count_thresh=10,
# data_configs={
# args.domain: domain_config_0
# })
# dataset_config_domain.init_config_file()
dataset_config = DatasetConfig(experiment_name=experiment_name,
**run_records[0],
experiment_root_dir=args.experiment_root_dir,
label_col='family',
# target_size=target_size,
# num_channels=num_channels,
grayscale=(args.color_type=='grayscale'),
color_type=args.color_type,
low_class_count_thresh=args.low_class_count_thresh,
data_splits={'val_size':0.0,'test_size':0.5},
tfrecord_root_dir=args.tfrecord_dir,
num_shards=10)
train_config = TrainConfig(model_name=args.model_name,
model_dir=args.model_dir,
batch_size=args.batch_size,
frozen_layers=None,
base_learning_rate=args.base_learning_rate,
buffer_size=500,
num_epochs=args.num_epochs,
preprocessing=True,
x_col='x',
y_col='y',
augment_images=True,
augmentations=['rotate','flip'],
regularization=regularizer,
seed=5,
verbose=True)
experiment_config = ExperimentConfig(dataset_config=dataset_config,
train_config=train_config)
reset_eager_session()
mlflow.tensorflow.autolog()
# mlflow.log_params(experiment_config)
print(f'BEGINNING: DATASET:{args.dataset_name}|MODEL:{args.model_name}|bsz:{args.batch_size}|lr:{args.base_learning_rate}|Color_type={args.color_type}|regularizer={regularizer}')
print('-'*30)
trainer = main(experiment_config, experiment_results_dir)
histories = trainer.histories
mlflow.log_params(args.__dict__)
try:
mlflow_log_params_dict(trainer.config)
# for k, v in trainer.configs.items():
# mlflow.log_params(v)
# print('logged', k)
except:
mlflow_log_params_dict(experiment_config)
| [
"pyleaves.config.ExperimentConfig",
"mlflow.set_experiment",
"pyleaves.utils.csv_utils.gather_run_data",
"pyleaves.analysis.mlflow_utils.mlflow_log_params_dict",
"pyleaves.train.csv_trainer.CSVTrainer",
"argparse.ArgumentParser",
"mlflow.set_tracking_uri",
"mlflow.tensorflow.autolog",
"itertools.pro... | [((496, 541), 'pyleaves.data_pipeline.preprocessing.LabelEncoder', 'LabelEncoder', ([], {'filepath': 'label_mapping_filepath'}), '(filepath=label_mapping_filepath)\n', (508, 541), False, 'from pyleaves.data_pipeline.preprocessing import LabelEncoder\n'), ((581, 639), 'pyleaves.train.csv_trainer.CSVTrainer', 'CSVTrainer', (['experiment_config'], {'label_encoder': 'label_encoder'}), '(experiment_config, label_encoder=label_encoder)\n', (591, 639), False, 'from pyleaves.train.csv_trainer import CSVTrainer\n'), ((696, 794), 'os.path.join', 'os.path.join', (['trainer.model_manager.model_dir', "(trainer.model_name + '_' + domain + '_model.h5')"], {}), "(trainer.model_manager.model_dir, trainer.model_name + '_' +\n domain + '_model.h5')\n", (708, 794), False, 'import os\n'), ((3842, 3856), 'random.seed', 'random.seed', (['(6)'], {}), '(6)\n', (3853, 3856), False, 'import random\n'), ((3875, 3900), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3898, 3900), False, 'import argparse\n'), ((7077, 7114), 'tensorflow.compat.v1.enable_eager_execution', 'tf.compat.v1.enable_eager_execution', ([], {}), '()\n', (7112, 7114), True, 'import tensorflow as tf\n'), ((7826, 7869), 'pyleaves.utils.ensure_dir_exists', 'ensure_dir_exists', (['args.mlflow_tracking_dir'], {}), '(args.mlflow_tracking_dir)\n', (7843, 7869), False, 'from pyleaves.utils import ensure_dir_exists, process_hparam_args\n'), ((7874, 7923), 'mlflow.set_tracking_uri', 'mlflow.set_tracking_uri', (['args.mlflow_tracking_dir'], {}), '(args.mlflow_tracking_dir)\n', (7897, 7923), False, 'import mlflow\n'), ((7928, 7966), 'mlflow.set_experiment', 'mlflow.set_experiment', (['args.experiment'], {}), '(args.experiment)\n', (7949, 7966), False, 'import mlflow\n'), ((8526, 8580), 'pyleaves.utils.process_hparam_args', 'process_hparam_args', (['args'], {'search_params': 'search_params'}), '(args, search_params=search_params)\n', (8545, 8580), False, 'from pyleaves.utils import ensure_dir_exists, process_hparam_args\n'), ((8600, 8752), 'collections.OrderedDict', 'OrderedDict', (["{'model_names': args.model_name, 'run_names': args.run_name,\n 'learning_rates': args.base_learning_rate, 'batch_sizes': args.batch_size}"], {}), "({'model_names': args.model_name, 'run_names': args.run_name,\n 'learning_rates': args.base_learning_rate, 'batch_sizes': args.batch_size})\n", (8611, 8752), False, 'from collections import OrderedDict\n'), ((8936, 8949), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8947, 8949), False, 'from collections import OrderedDict\n'), ((1088, 1199), 'os.path.join', 'os.path.join', (['trainer.model_manager.model_dir', "(trainer.model_name + '_' + domain + '_model_weights_best.h5')"], {}), "(trainer.model_manager.model_dir, trainer.model_name + '_' +\n domain + '_model_weights_best.h5')\n", (1100, 1199), False, 'import os\n'), ((1234, 1290), 'os.path.join', 'os.path.join', (['experiment_results_dir', '"""tensorboard_logs"""'], {}), "(experiment_results_dir, 'tensorboard_logs')\n", (1246, 1290), False, 'import os\n'), ((9017, 9042), 'itertools.product', 'itertools.product', (['[k]', 'v'], {}), '([k], v)\n', (9034, 9042), False, 'import itertools\n'), ((9900, 9955), 'mlflow.start_run', 'mlflow.start_run', ([], {'run_name': 'mlflow_run_name', 'nested': '(True)'}), '(run_name=mlflow_run_name, nested=True)\n', (9916, 9955), False, 'import mlflow\n'), ((10070, 10112), 'os.path.basename', 'os.path.basename', (['args.experiment_root_dir'], {}), '(args.experiment_root_dir)\n', (10086, 10112), False, 'import os\n'), ((10802, 10890), 'pyleaves.utils.csv_utils.gather_run_data', 'gather_run_data', (['args.experiment_root_dir'], {'run': 'args.run_name', 'return_type': '"""records"""'}), "(args.experiment_root_dir, run=args.run_name, return_type=\n 'records')\n", (10817, 10890), False, 'from pyleaves.utils.csv_utils import gather_run_data, load_csv_data\n'), ((12521, 12895), 'pyleaves.config.DatasetConfig', 'DatasetConfig', ([], {'experiment_name': 'experiment_name', 'experiment_root_dir': 'args.experiment_root_dir', 'label_col': '"""family"""', 'grayscale': "(args.color_type == 'grayscale')", 'color_type': 'args.color_type', 'low_class_count_thresh': 'args.low_class_count_thresh', 'data_splits': "{'val_size': 0.0, 'test_size': 0.5}", 'tfrecord_root_dir': 'args.tfrecord_dir', 'num_shards': '(10)'}), "(experiment_name=experiment_name, **run_records[0],\n experiment_root_dir=args.experiment_root_dir, label_col='family',\n grayscale=args.color_type == 'grayscale', color_type=args.color_type,\n low_class_count_thresh=args.low_class_count_thresh, data_splits={\n 'val_size': 0.0, 'test_size': 0.5}, tfrecord_root_dir=args.tfrecord_dir,\n num_shards=10)\n", (12534, 12895), False, 'from pyleaves.config import DatasetConfig, TrainConfig, ExperimentConfig, CSVDomainDataConfig, CSVFrozenRunDataConfig\n'), ((13477, 13847), 'pyleaves.config.TrainConfig', 'TrainConfig', ([], {'model_name': 'args.model_name', 'model_dir': 'args.model_dir', 'batch_size': 'args.batch_size', 'frozen_layers': 'None', 'base_learning_rate': 'args.base_learning_rate', 'buffer_size': '(500)', 'num_epochs': 'args.num_epochs', 'preprocessing': '(True)', 'x_col': '"""x"""', 'y_col': '"""y"""', 'augment_images': '(True)', 'augmentations': "['rotate', 'flip']", 'regularization': 'regularizer', 'seed': '(5)', 'verbose': '(True)'}), "(model_name=args.model_name, model_dir=args.model_dir,\n batch_size=args.batch_size, frozen_layers=None, base_learning_rate=args\n .base_learning_rate, buffer_size=500, num_epochs=args.num_epochs,\n preprocessing=True, x_col='x', y_col='y', augment_images=True,\n augmentations=['rotate', 'flip'], regularization=regularizer, seed=5,\n verbose=True)\n", (13488, 13847), False, 'from pyleaves.config import DatasetConfig, TrainConfig, ExperimentConfig, CSVDomainDataConfig, CSVFrozenRunDataConfig\n'), ((14429, 14503), 'pyleaves.config.ExperimentConfig', 'ExperimentConfig', ([], {'dataset_config': 'dataset_config', 'train_config': 'train_config'}), '(dataset_config=dataset_config, train_config=train_config)\n', (14445, 14503), False, 'from pyleaves.config import DatasetConfig, TrainConfig, ExperimentConfig, CSVDomainDataConfig, CSVFrozenRunDataConfig\n'), ((14566, 14587), 'pyleaves.leavesdb.tf_utils.tf_utils.reset_eager_session', 'reset_eager_session', ([], {}), '()\n', (14585, 14587), False, 'from pyleaves.leavesdb.tf_utils.tf_utils import reset_eager_session\n'), ((14601, 14628), 'mlflow.tensorflow.autolog', 'mlflow.tensorflow.autolog', ([], {}), '()\n', (14626, 14628), False, 'import mlflow\n'), ((15026, 15058), 'mlflow.log_params', 'mlflow.log_params', (['args.__dict__'], {}), '(args.__dict__)\n', (15043, 15058), False, 'import mlflow\n'), ((15105, 15143), 'pyleaves.analysis.mlflow_utils.mlflow_log_params_dict', 'mlflow_log_params_dict', (['trainer.config'], {}), '(trainer.config)\n', (15127, 15143), False, 'from pyleaves.analysis.mlflow_utils import mlflow_log_params_dict, mlflow_log_history, mlflow_log_best_history\n'), ((9985, 10008), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10006, 10008), False, 'import datetime\n'), ((15319, 15360), 'pyleaves.analysis.mlflow_utils.mlflow_log_params_dict', 'mlflow_log_params_dict', (['experiment_config'], {}), '(experiment_config)\n', (15341, 15360), False, 'from pyleaves.analysis.mlflow_utils import mlflow_log_params_dict, mlflow_log_history, mlflow_log_best_history\n')] |
# Generated by Django 3.1.4 on 2020-12-07 21:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CustomerApps', '0003_auto_20201207_1922'),
]
operations = [
migrations.RemoveField(
model_name='customerapp',
name='paid_status',
),
migrations.AddField(
model_name='customerapp',
name='status',
field=models.CharField(default='Test', max_length=20),
),
migrations.AlterField(
model_name='customerapp',
name='token',
field=models.CharField(max_length=10, unique=True),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] | [((240, 308), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""customerapp"""', 'name': '"""paid_status"""'}), "(model_name='customerapp', name='paid_status')\n", (262, 308), False, 'from django.db import migrations, models\n'), ((457, 504), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""Test"""', 'max_length': '(20)'}), "(default='Test', max_length=20)\n", (473, 504), False, 'from django.db import migrations, models\n'), ((630, 674), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'unique': '(True)'}), '(max_length=10, unique=True)\n', (646, 674), False, 'from django.db import migrations, models\n')] |
import pytest
import time
import xml.etree.ElementTree as etree
import shakedown
import sdk_cmd as cmd
import sdk_hosts as hosts
import sdk_install as install
import sdk_marathon as marathon
import sdk_plan as plan
import sdk_tasks as tasks
import sdk_utils as utils
from tests.config import *
def setup_module(module):
install.uninstall(FOLDERED_SERVICE_NAME, package_name=PACKAGE_NAME)
utils.gc_frameworks()
install.install(
PACKAGE_NAME,
DEFAULT_TASK_COUNT,
service_name=FOLDERED_SERVICE_NAME,
additional_options={"service": { "name": FOLDERED_SERVICE_NAME } })
plan.wait_for_completed_deployment(FOLDERED_SERVICE_NAME)
def setup_function(function):
check_healthy()
def teardown_module(module):
install.uninstall(FOLDERED_SERVICE_NAME, package_name=PACKAGE_NAME)
@pytest.mark.sanity
def test_endpoints():
# check that we can reach the scheduler via admin router, and that returned endpoints are sanitized:
core_site = etree.fromstring(cmd.run_cli('hdfs --name={} endpoints core-site.xml'.format(FOLDERED_SERVICE_NAME)))
check_properties(core_site, {
'ha.zookeeper.parent-znode': '/dcos-service-test__integration__hdfs/hadoop-ha'
})
hdfs_site = etree.fromstring(cmd.run_cli('hdfs --name={} endpoints hdfs-site.xml'.format(FOLDERED_SERVICE_NAME)))
expect = {
'dfs.namenode.shared.edits.dir': 'qjournal://' + ';'.join([
hosts.autoip_host(FOLDERED_SERVICE_NAME, 'journal-{}-node'.format(i), 8485) for i in range(3)]) + '/hdfs',
}
for i in range(2):
expect['dfs.namenode.rpc-address.hdfs.name-{}-node'.format(i)] = hosts.autoip_host(FOLDERED_SERVICE_NAME, 'name-{}-node'.format(i), 9001)
expect['dfs.namenode.http-address.hdfs.name-{}-node'.format(i)] = hosts.autoip_host(FOLDERED_SERVICE_NAME, 'name-{}-node'.format(i), 9002)
check_properties(hdfs_site, expect)
def check_properties(xml, expect):
found = {}
for prop in xml.findall('property'):
name = prop.find('name').text
if name in expect:
found[name] = prop.find('value').text
utils.out('expect: {}\nfound: {}'.format(expect, found))
assert expect == found
@pytest.mark.skip(reason="HDFS-451")
@pytest.mark.data_integrity
@pytest.mark.sanity
def test_integrity_on_data_node_failure():
write_some_data('data-0-node', TEST_FILE_1_NAME)
# gives chance for write to succeed and replication to occur
time.sleep(9)
tasks.kill_task_with_pattern("DataNode", hosts.system_host(FOLDERED_SERVICE_NAME, 'data-0-node'))
tasks.kill_task_with_pattern("DataNode", hosts.system_host(FOLDERED_SERVICE_NAME, 'data-1-node'))
time.sleep(1) # give DataNode a chance to die
read_some_data('data-2-node', TEST_FILE_1_NAME)
check_healthy()
@pytest.mark.skip(reason="HDFS-451")
@pytest.mark.data_integrity
@pytest.mark.sanity
def test_integrity_on_name_node_failure():
"""
The first name node (name-0-node) is the active name node by default when HDFS gets installed.
This test checks that it is possible to write and read data after the first name node fails.
"""
tasks.kill_task_with_pattern("NameNode", hosts.system_host(FOLDERED_SERVICE_NAME, 'name-0-node'))
time.sleep(1) # give NameNode a chance to die
write_some_data('data-0-node', TEST_FILE_2_NAME)
read_some_data('data-2-node', TEST_FILE_2_NAME)
check_healthy()
@pytest.mark.recovery
def test_kill_journal_node():
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal-0')
name_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'name')
data_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'data')
tasks.kill_task_with_pattern('journalnode', hosts.system_host(FOLDERED_SERVICE_NAME, 'journal-0-node'))
check_healthy()
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'name', name_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'data', data_ids)
@pytest.mark.sanity
@pytest.mark.recovery
def test_kill_name_node():
name_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'name-0')
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal')
data_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'data')
tasks.kill_task_with_pattern('namenode', hosts.system_host(FOLDERED_SERVICE_NAME, 'name-0-node'))
check_healthy()
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'name', name_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'data', data_ids)
@pytest.mark.sanity
@pytest.mark.recovery
def test_kill_data_node():
data_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'data-0')
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal')
name_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'name')
tasks.kill_task_with_pattern('datanode', hosts.system_host(FOLDERED_SERVICE_NAME, 'data-0-node'))
check_healthy()
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'data', data_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'name', name_ids)
@pytest.mark.sanity
@pytest.mark.recovery
def test_kill_scheduler():
tasks.kill_task_with_pattern('hdfs.scheduler.Main', shakedown.get_service_ips('marathon').pop())
check_healthy()
@pytest.mark.sanity
@pytest.mark.recovery
def test_kill_all_journalnodes():
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal')
data_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'data')
for host in shakedown.get_service_ips(FOLDERED_SERVICE_NAME):
tasks.kill_task_with_pattern('journalnode', host)
check_healthy()
# name nodes fail and restart, so don't check those
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'data', data_ids)
@pytest.mark.sanity
@pytest.mark.recovery
def test_kill_all_namenodes():
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal')
name_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'name')
data_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'data')
for host in shakedown.get_service_ips(FOLDERED_SERVICE_NAME):
tasks.kill_task_with_pattern('namenode', host)
check_healthy()
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'name', name_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'data', data_ids)
@pytest.mark.sanity
@pytest.mark.recovery
def test_kill_all_datanodes():
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal')
name_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'name')
data_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'data')
for host in shakedown.get_service_ips(FOLDERED_SERVICE_NAME):
tasks.kill_task_with_pattern('datanode', host)
check_healthy()
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'data', data_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'name', name_ids)
@pytest.mark.sanity
@pytest.mark.recovery
def test_permanently_replace_namenodes():
replace_name_node(0)
replace_name_node(1)
replace_name_node(0)
@pytest.mark.sanity
@pytest.mark.recovery
def test_permanent_and_transient_namenode_failures_0_1():
check_healthy()
name_0_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'name-0')
name_1_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'name-1')
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal')
data_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'data')
cmd.run_cli('hdfs --name={} pods replace name-0'.format(FOLDERED_SERVICE_NAME))
cmd.run_cli('hdfs --name={} pods restart name-1'.format(FOLDERED_SERVICE_NAME))
check_healthy()
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'name-0', name_0_ids)
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'name-1', name_1_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'data', data_ids)
@pytest.mark.sanity
@pytest.mark.recovery
def test_permanent_and_transient_namenode_failures_1_0():
check_healthy()
name_0_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'name-0')
name_1_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'name-1')
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal')
data_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'data')
cmd.run_cli('hdfs --name={} pods replace name-1'.format(FOLDERED_SERVICE_NAME))
cmd.run_cli('hdfs --name={} pods restart name-0'.format(FOLDERED_SERVICE_NAME))
check_healthy()
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'name-0', name_0_ids)
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'name-1', name_1_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'data', data_ids)
@pytest.mark.smoke
def test_install():
check_healthy()
@pytest.mark.sanity
def test_bump_journal_cpus():
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal')
utils.out('journal ids: ' + str(journal_ids))
marathon.bump_cpu_count_config(FOLDERED_SERVICE_NAME, 'JOURNAL_CPUS')
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
check_healthy()
@pytest.mark.sanity
def test_bump_data_nodes():
data_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'data')
utils.out('data ids: ' + str(data_ids))
marathon.bump_task_count_config(FOLDERED_SERVICE_NAME, 'DATA_COUNT')
check_healthy(DEFAULT_TASK_COUNT + 1)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'data', data_ids)
@pytest.mark.sanity
def test_modify_app_config():
app_config_field = 'TASKCFG_ALL_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_EXPIRY_MS'
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal')
name_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'name')
config = marathon.get_config(FOLDERED_SERVICE_NAME)
utils.out('marathon config: ')
utils.out(config)
expiry_ms = int(config['env'][app_config_field])
config['env'][app_config_field] = str(expiry_ms + 1)
marathon.update_app(FOLDERED_SERVICE_NAME, config)
# All tasks should be updated because hdfs-site.xml has changed
check_healthy()
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'name', name_ids)
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'data', journal_ids)
@pytest.mark.sanity
def test_modify_app_config_rollback():
app_config_field = 'TASKCFG_ALL_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_EXPIRY_MS'
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal')
data_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'data')
old_config = marathon.get_config(FOLDERED_SERVICE_NAME)
config = marathon.get_config(FOLDERED_SERVICE_NAME)
utils.out('marathon config: ')
utils.out(config)
expiry_ms = int(config['env'][app_config_field])
utils.out('expiry ms: ' + str(expiry_ms))
config['env'][app_config_field] = str(expiry_ms + 1)
marathon.update_app(FOLDERED_SERVICE_NAME, config)
# Wait for journal nodes to be affected by the change
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal')
utils.out('old config: ')
utils.out(old_config)
# Put the old config back (rollback)
marathon.update_app(FOLDERED_SERVICE_NAME, old_config)
# Wait for the journal nodes to return to their old configuration
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
check_healthy()
config = marathon.get_config(FOLDERED_SERVICE_NAME)
assert int(config['env'][app_config_field]) == expiry_ms
# Data tasks should not have been affected
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'data', data_ids)
def replace_name_node(index):
check_healthy()
name_node_name = 'name-' + str(index)
name_id = tasks.get_task_ids(FOLDERED_SERVICE_NAME, name_node_name)
journal_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'journal')
data_ids = tasks.get_task_ids(FOLDERED_SERVICE_NAME, 'data')
cmd.run_cli('hdfs --name={} pods replace {}'.format(FOLDERED_SERVICE_NAME, name_node_name))
check_healthy()
tasks.check_tasks_updated(FOLDERED_SERVICE_NAME, name_node_name, name_id)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'journal', journal_ids)
tasks.check_tasks_not_updated(FOLDERED_SERVICE_NAME, 'data', data_ids)
def write_some_data(data_node_name, file_name):
def write_data_to_hdfs():
write_command = "echo '{}' | ./bin/hdfs dfs -put - /{}".format(TEST_CONTENT_SMALL, file_name)
rc, _ = run_hdfs_command(data_node_name, write_command)
# rc being True is effectively it being 0...
return rc
shakedown.wait_for(lambda: write_data_to_hdfs(), timeout_seconds=HDFS_CMD_TIMEOUT_SEC)
def read_some_data(data_node_name, file_name):
def read_data_from_hdfs():
read_command = "./bin/hdfs dfs -cat /{}".format(file_name)
rc, output = run_hdfs_command(data_node_name, read_command)
return rc and output.rstrip() == TEST_CONTENT_SMALL
shakedown.wait_for(lambda: read_data_from_hdfs(), timeout_seconds=HDFS_CMD_TIMEOUT_SEC)
def run_hdfs_command(task_name, command):
"""
Go into the Data Node hdfs directory, set JAVA_HOME, and execute the command.
"""
host = hosts.system_host(FOLDERED_SERVICE_NAME, task_name)
java_home = find_java_home(host)
# Find hdfs home directory by looking up the Data Node process.
# Hdfs directory is found in an arg to the java command.
hdfs_dir_cmd = """ps -ef | grep hdfs | grep DataNode \
| awk 'BEGIN {RS=" "}; /-Dhadoop.home.dir/' | sed s/-Dhadoop.home.dir=//"""
full_command = """cd $({}) &&
export JAVA_HOME={} &&
{}""".format(hdfs_dir_cmd, java_home, command)
rc, output = shakedown.run_command_on_agent(host, full_command)
return rc, output
def find_java_home(host):
"""
Find java home by looking up the Data Node process.
Java home is found in the process command.
"""
java_home_cmd = """ps -ef | grep hdfs | grep DataNode | grep -v grep \
| awk '{print $8}' | sed s:/bin/java::"""
rc, output = shakedown.run_command_on_agent(host, java_home_cmd)
assert rc
java_home = output.rstrip()
utils.out("java_home: {}".format(java_home))
return java_home
def check_healthy(count=DEFAULT_TASK_COUNT):
plan.wait_for_completed_deployment(FOLDERED_SERVICE_NAME, timeout_seconds=20 * 60)
plan.wait_for_completed_recovery(FOLDERED_SERVICE_NAME, timeout_seconds=20 * 60)
tasks.check_running(FOLDERED_SERVICE_NAME, count)
| [
"sdk_tasks.check_tasks_updated",
"sdk_marathon.bump_task_count_config",
"sdk_tasks.check_tasks_not_updated",
"time.sleep",
"sdk_plan.wait_for_completed_recovery",
"sdk_marathon.get_config",
"sdk_utils.out",
"sdk_hosts.system_host",
"sdk_plan.wait_for_completed_deployment",
"sdk_install.uninstall",... | [((2207, 2242), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""HDFS-451"""'}), "(reason='HDFS-451')\n", (2223, 2242), False, 'import pytest\n'), ((2804, 2839), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""HDFS-451"""'}), "(reason='HDFS-451')\n", (2820, 2839), False, 'import pytest\n'), ((328, 395), 'sdk_install.uninstall', 'install.uninstall', (['FOLDERED_SERVICE_NAME'], {'package_name': 'PACKAGE_NAME'}), '(FOLDERED_SERVICE_NAME, package_name=PACKAGE_NAME)\n', (345, 395), True, 'import sdk_install as install\n'), ((400, 421), 'sdk_utils.gc_frameworks', 'utils.gc_frameworks', ([], {}), '()\n', (419, 421), True, 'import sdk_utils as utils\n'), ((426, 585), 'sdk_install.install', 'install.install', (['PACKAGE_NAME', 'DEFAULT_TASK_COUNT'], {'service_name': 'FOLDERED_SERVICE_NAME', 'additional_options': "{'service': {'name': FOLDERED_SERVICE_NAME}}"}), "(PACKAGE_NAME, DEFAULT_TASK_COUNT, service_name=\n FOLDERED_SERVICE_NAME, additional_options={'service': {'name':\n FOLDERED_SERVICE_NAME}})\n", (441, 585), True, 'import sdk_install as install\n'), ((617, 674), 'sdk_plan.wait_for_completed_deployment', 'plan.wait_for_completed_deployment', (['FOLDERED_SERVICE_NAME'], {}), '(FOLDERED_SERVICE_NAME)\n', (651, 674), True, 'import sdk_plan as plan\n'), ((762, 829), 'sdk_install.uninstall', 'install.uninstall', (['FOLDERED_SERVICE_NAME'], {'package_name': 'PACKAGE_NAME'}), '(FOLDERED_SERVICE_NAME, package_name=PACKAGE_NAME)\n', (779, 829), True, 'import sdk_install as install\n'), ((2457, 2470), 'time.sleep', 'time.sleep', (['(9)'], {}), '(9)\n', (2467, 2470), False, 'import time\n'), ((2680, 2693), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2690, 2693), False, 'import time\n'), ((3249, 3262), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3259, 3262), False, 'import time\n'), ((3496, 3550), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal-0"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal-0')\n", (3514, 3550), True, 'import sdk_tasks as tasks\n'), ((3566, 3615), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""name"""'], {}), "(FOLDERED_SERVICE_NAME, 'name')\n", (3584, 3615), True, 'import sdk_tasks as tasks\n'), ((3631, 3680), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""data"""'], {}), "(FOLDERED_SERVICE_NAME, 'data')\n", (3649, 3680), True, 'import sdk_tasks as tasks\n'), ((3814, 3886), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (3839, 3886), True, 'import sdk_tasks as tasks\n'), ((3891, 3961), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""name"""', 'name_ids'], {}), "(FOLDERED_SERVICE_NAME, 'name', name_ids)\n", (3920, 3961), True, 'import sdk_tasks as tasks\n'), ((3966, 4036), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""data"""', 'data_ids'], {}), "(FOLDERED_SERVICE_NAME, 'data', data_ids)\n", (3995, 4036), True, 'import sdk_tasks as tasks\n'), ((4123, 4174), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""name-0"""'], {}), "(FOLDERED_SERVICE_NAME, 'name-0')\n", (4141, 4174), True, 'import sdk_tasks as tasks\n'), ((4193, 4245), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal')\n", (4211, 4245), True, 'import sdk_tasks as tasks\n'), ((4261, 4310), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""data"""'], {}), "(FOLDERED_SERVICE_NAME, 'data')\n", (4279, 4310), True, 'import sdk_tasks as tasks\n'), ((4438, 4504), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""name"""', 'name_ids'], {}), "(FOLDERED_SERVICE_NAME, 'name', name_ids)\n", (4463, 4504), True, 'import sdk_tasks as tasks\n'), ((4509, 4585), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (4538, 4585), True, 'import sdk_tasks as tasks\n'), ((4590, 4660), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""data"""', 'data_ids'], {}), "(FOLDERED_SERVICE_NAME, 'data', data_ids)\n", (4619, 4660), True, 'import sdk_tasks as tasks\n'), ((4747, 4798), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""data-0"""'], {}), "(FOLDERED_SERVICE_NAME, 'data-0')\n", (4765, 4798), True, 'import sdk_tasks as tasks\n'), ((4817, 4869), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal')\n", (4835, 4869), True, 'import sdk_tasks as tasks\n'), ((4885, 4934), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""name"""'], {}), "(FOLDERED_SERVICE_NAME, 'name')\n", (4903, 4934), True, 'import sdk_tasks as tasks\n'), ((5062, 5128), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""data"""', 'data_ids'], {}), "(FOLDERED_SERVICE_NAME, 'data', data_ids)\n", (5087, 5128), True, 'import sdk_tasks as tasks\n'), ((5133, 5209), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (5162, 5209), True, 'import sdk_tasks as tasks\n'), ((5214, 5284), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""name"""', 'name_ids'], {}), "(FOLDERED_SERVICE_NAME, 'name', name_ids)\n", (5243, 5284), True, 'import sdk_tasks as tasks\n'), ((5573, 5625), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal')\n", (5591, 5625), True, 'import sdk_tasks as tasks\n'), ((5641, 5690), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""data"""'], {}), "(FOLDERED_SERVICE_NAME, 'data')\n", (5659, 5690), True, 'import sdk_tasks as tasks\n'), ((5708, 5756), 'shakedown.get_service_ips', 'shakedown.get_service_ips', (['FOLDERED_SERVICE_NAME'], {}), '(FOLDERED_SERVICE_NAME)\n', (5733, 5756), False, 'import shakedown\n'), ((5897, 5969), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (5922, 5969), True, 'import sdk_tasks as tasks\n'), ((5974, 6044), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""data"""', 'data_ids'], {}), "(FOLDERED_SERVICE_NAME, 'data', data_ids)\n", (6003, 6044), True, 'import sdk_tasks as tasks\n'), ((6138, 6190), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal')\n", (6156, 6190), True, 'import sdk_tasks as tasks\n'), ((6206, 6255), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""name"""'], {}), "(FOLDERED_SERVICE_NAME, 'name')\n", (6224, 6255), True, 'import sdk_tasks as tasks\n'), ((6271, 6320), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""data"""'], {}), "(FOLDERED_SERVICE_NAME, 'data')\n", (6289, 6320), True, 'import sdk_tasks as tasks\n'), ((6338, 6386), 'shakedown.get_service_ips', 'shakedown.get_service_ips', (['FOLDERED_SERVICE_NAME'], {}), '(FOLDERED_SERVICE_NAME)\n', (6363, 6386), False, 'import shakedown\n'), ((6468, 6534), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""name"""', 'name_ids'], {}), "(FOLDERED_SERVICE_NAME, 'name', name_ids)\n", (6493, 6534), True, 'import sdk_tasks as tasks\n'), ((6539, 6615), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (6568, 6615), True, 'import sdk_tasks as tasks\n'), ((6620, 6690), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""data"""', 'data_ids'], {}), "(FOLDERED_SERVICE_NAME, 'data', data_ids)\n", (6649, 6690), True, 'import sdk_tasks as tasks\n'), ((6784, 6836), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal')\n", (6802, 6836), True, 'import sdk_tasks as tasks\n'), ((6852, 6901), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""name"""'], {}), "(FOLDERED_SERVICE_NAME, 'name')\n", (6870, 6901), True, 'import sdk_tasks as tasks\n'), ((6917, 6966), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""data"""'], {}), "(FOLDERED_SERVICE_NAME, 'data')\n", (6935, 6966), True, 'import sdk_tasks as tasks\n'), ((6984, 7032), 'shakedown.get_service_ips', 'shakedown.get_service_ips', (['FOLDERED_SERVICE_NAME'], {}), '(FOLDERED_SERVICE_NAME)\n', (7009, 7032), False, 'import shakedown\n'), ((7114, 7180), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""data"""', 'data_ids'], {}), "(FOLDERED_SERVICE_NAME, 'data', data_ids)\n", (7139, 7180), True, 'import sdk_tasks as tasks\n'), ((7185, 7261), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (7214, 7261), True, 'import sdk_tasks as tasks\n'), ((7266, 7336), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""name"""', 'name_ids'], {}), "(FOLDERED_SERVICE_NAME, 'name', name_ids)\n", (7295, 7336), True, 'import sdk_tasks as tasks\n'), ((7637, 7688), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""name-0"""'], {}), "(FOLDERED_SERVICE_NAME, 'name-0')\n", (7655, 7688), True, 'import sdk_tasks as tasks\n'), ((7706, 7757), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""name-1"""'], {}), "(FOLDERED_SERVICE_NAME, 'name-1')\n", (7724, 7757), True, 'import sdk_tasks as tasks\n'), ((7776, 7828), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal')\n", (7794, 7828), True, 'import sdk_tasks as tasks\n'), ((7844, 7893), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""data"""'], {}), "(FOLDERED_SERVICE_NAME, 'data')\n", (7862, 7893), True, 'import sdk_tasks as tasks\n'), ((8088, 8158), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""name-0"""', 'name_0_ids'], {}), "(FOLDERED_SERVICE_NAME, 'name-0', name_0_ids)\n", (8113, 8158), True, 'import sdk_tasks as tasks\n'), ((8163, 8233), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""name-1"""', 'name_1_ids'], {}), "(FOLDERED_SERVICE_NAME, 'name-1', name_1_ids)\n", (8188, 8233), True, 'import sdk_tasks as tasks\n'), ((8238, 8314), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (8267, 8314), True, 'import sdk_tasks as tasks\n'), ((8319, 8389), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""data"""', 'data_ids'], {}), "(FOLDERED_SERVICE_NAME, 'data', data_ids)\n", (8348, 8389), True, 'import sdk_tasks as tasks\n'), ((8528, 8579), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""name-0"""'], {}), "(FOLDERED_SERVICE_NAME, 'name-0')\n", (8546, 8579), True, 'import sdk_tasks as tasks\n'), ((8597, 8648), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""name-1"""'], {}), "(FOLDERED_SERVICE_NAME, 'name-1')\n", (8615, 8648), True, 'import sdk_tasks as tasks\n'), ((8667, 8719), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal')\n", (8685, 8719), True, 'import sdk_tasks as tasks\n'), ((8735, 8784), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""data"""'], {}), "(FOLDERED_SERVICE_NAME, 'data')\n", (8753, 8784), True, 'import sdk_tasks as tasks\n'), ((8979, 9049), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""name-0"""', 'name_0_ids'], {}), "(FOLDERED_SERVICE_NAME, 'name-0', name_0_ids)\n", (9004, 9049), True, 'import sdk_tasks as tasks\n'), ((9054, 9124), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""name-1"""', 'name_1_ids'], {}), "(FOLDERED_SERVICE_NAME, 'name-1', name_1_ids)\n", (9079, 9124), True, 'import sdk_tasks as tasks\n'), ((9129, 9205), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (9158, 9205), True, 'import sdk_tasks as tasks\n'), ((9210, 9280), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""data"""', 'data_ids'], {}), "(FOLDERED_SERVICE_NAME, 'data', data_ids)\n", (9239, 9280), True, 'import sdk_tasks as tasks\n'), ((9412, 9464), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal')\n", (9430, 9464), True, 'import sdk_tasks as tasks\n'), ((9520, 9589), 'sdk_marathon.bump_cpu_count_config', 'marathon.bump_cpu_count_config', (['FOLDERED_SERVICE_NAME', '"""JOURNAL_CPUS"""'], {}), "(FOLDERED_SERVICE_NAME, 'JOURNAL_CPUS')\n", (9550, 9589), True, 'import sdk_marathon as marathon\n'), ((9595, 9667), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (9620, 9667), True, 'import sdk_tasks as tasks\n'), ((9753, 9802), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""data"""'], {}), "(FOLDERED_SERVICE_NAME, 'data')\n", (9771, 9802), True, 'import sdk_tasks as tasks\n'), ((9852, 9920), 'sdk_marathon.bump_task_count_config', 'marathon.bump_task_count_config', (['FOLDERED_SERVICE_NAME', '"""DATA_COUNT"""'], {}), "(FOLDERED_SERVICE_NAME, 'DATA_COUNT')\n", (9883, 9920), True, 'import sdk_marathon as marathon\n'), ((9968, 10038), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""data"""', 'data_ids'], {}), "(FOLDERED_SERVICE_NAME, 'data', data_ids)\n", (9997, 10038), True, 'import sdk_tasks as tasks\n'), ((10201, 10253), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal')\n", (10219, 10253), True, 'import sdk_tasks as tasks\n'), ((10269, 10318), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""name"""'], {}), "(FOLDERED_SERVICE_NAME, 'name')\n", (10287, 10318), True, 'import sdk_tasks as tasks\n'), ((10333, 10375), 'sdk_marathon.get_config', 'marathon.get_config', (['FOLDERED_SERVICE_NAME'], {}), '(FOLDERED_SERVICE_NAME)\n', (10352, 10375), True, 'import sdk_marathon as marathon\n'), ((10380, 10410), 'sdk_utils.out', 'utils.out', (['"""marathon config: """'], {}), "('marathon config: ')\n", (10389, 10410), True, 'import sdk_utils as utils\n'), ((10415, 10432), 'sdk_utils.out', 'utils.out', (['config'], {}), '(config)\n', (10424, 10432), True, 'import sdk_utils as utils\n'), ((10547, 10597), 'sdk_marathon.update_app', 'marathon.update_app', (['FOLDERED_SERVICE_NAME', 'config'], {}), '(FOLDERED_SERVICE_NAME, config)\n', (10566, 10597), True, 'import sdk_marathon as marathon\n'), ((10691, 10763), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (10716, 10763), True, 'import sdk_tasks as tasks\n'), ((10768, 10834), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""name"""', 'name_ids'], {}), "(FOLDERED_SERVICE_NAME, 'name', name_ids)\n", (10793, 10834), True, 'import sdk_tasks as tasks\n'), ((10839, 10908), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""data"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'data', journal_ids)\n", (10864, 10908), True, 'import sdk_tasks as tasks\n'), ((11080, 11132), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal')\n", (11098, 11132), True, 'import sdk_tasks as tasks\n'), ((11148, 11197), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""data"""'], {}), "(FOLDERED_SERVICE_NAME, 'data')\n", (11166, 11197), True, 'import sdk_tasks as tasks\n'), ((11216, 11258), 'sdk_marathon.get_config', 'marathon.get_config', (['FOLDERED_SERVICE_NAME'], {}), '(FOLDERED_SERVICE_NAME)\n', (11235, 11258), True, 'import sdk_marathon as marathon\n'), ((11272, 11314), 'sdk_marathon.get_config', 'marathon.get_config', (['FOLDERED_SERVICE_NAME'], {}), '(FOLDERED_SERVICE_NAME)\n', (11291, 11314), True, 'import sdk_marathon as marathon\n'), ((11319, 11349), 'sdk_utils.out', 'utils.out', (['"""marathon config: """'], {}), "('marathon config: ')\n", (11328, 11349), True, 'import sdk_utils as utils\n'), ((11354, 11371), 'sdk_utils.out', 'utils.out', (['config'], {}), '(config)\n', (11363, 11371), True, 'import sdk_utils as utils\n'), ((11532, 11582), 'sdk_marathon.update_app', 'marathon.update_app', (['FOLDERED_SERVICE_NAME', 'config'], {}), '(FOLDERED_SERVICE_NAME, config)\n', (11551, 11582), True, 'import sdk_marathon as marathon\n'), ((11646, 11718), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (11671, 11718), True, 'import sdk_tasks as tasks\n'), ((11737, 11789), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal')\n", (11755, 11789), True, 'import sdk_tasks as tasks\n'), ((11795, 11820), 'sdk_utils.out', 'utils.out', (['"""old config: """'], {}), "('old config: ')\n", (11804, 11820), True, 'import sdk_utils as utils\n'), ((11825, 11846), 'sdk_utils.out', 'utils.out', (['old_config'], {}), '(old_config)\n', (11834, 11846), True, 'import sdk_utils as utils\n'), ((11892, 11946), 'sdk_marathon.update_app', 'marathon.update_app', (['FOLDERED_SERVICE_NAME', 'old_config'], {}), '(FOLDERED_SERVICE_NAME, old_config)\n', (11911, 11946), True, 'import sdk_marathon as marathon\n'), ((12022, 12094), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (12047, 12094), True, 'import sdk_tasks as tasks\n'), ((12129, 12171), 'sdk_marathon.get_config', 'marathon.get_config', (['FOLDERED_SERVICE_NAME'], {}), '(FOLDERED_SERVICE_NAME)\n', (12148, 12171), True, 'import sdk_marathon as marathon\n'), ((12285, 12355), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""data"""', 'data_ids'], {}), "(FOLDERED_SERVICE_NAME, 'data', data_ids)\n", (12314, 12355), True, 'import sdk_tasks as tasks\n'), ((12464, 12521), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', 'name_node_name'], {}), '(FOLDERED_SERVICE_NAME, name_node_name)\n', (12482, 12521), True, 'import sdk_tasks as tasks\n'), ((12540, 12592), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""journal"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal')\n", (12558, 12592), True, 'import sdk_tasks as tasks\n'), ((12608, 12657), 'sdk_tasks.get_task_ids', 'tasks.get_task_ids', (['FOLDERED_SERVICE_NAME', '"""data"""'], {}), "(FOLDERED_SERVICE_NAME, 'data')\n", (12626, 12657), True, 'import sdk_tasks as tasks\n'), ((12780, 12853), 'sdk_tasks.check_tasks_updated', 'tasks.check_tasks_updated', (['FOLDERED_SERVICE_NAME', 'name_node_name', 'name_id'], {}), '(FOLDERED_SERVICE_NAME, name_node_name, name_id)\n', (12805, 12853), True, 'import sdk_tasks as tasks\n'), ((12858, 12934), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""journal"""', 'journal_ids'], {}), "(FOLDERED_SERVICE_NAME, 'journal', journal_ids)\n", (12887, 12934), True, 'import sdk_tasks as tasks\n'), ((12939, 13009), 'sdk_tasks.check_tasks_not_updated', 'tasks.check_tasks_not_updated', (['FOLDERED_SERVICE_NAME', '"""data"""', 'data_ids'], {}), "(FOLDERED_SERVICE_NAME, 'data', data_ids)\n", (12968, 13009), True, 'import sdk_tasks as tasks\n'), ((13938, 13989), 'sdk_hosts.system_host', 'hosts.system_host', (['FOLDERED_SERVICE_NAME', 'task_name'], {}), '(FOLDERED_SERVICE_NAME, task_name)\n', (13955, 13989), True, 'import sdk_hosts as hosts\n'), ((14438, 14488), 'shakedown.run_command_on_agent', 'shakedown.run_command_on_agent', (['host', 'full_command'], {}), '(host, full_command)\n', (14468, 14488), False, 'import shakedown\n'), ((14800, 14851), 'shakedown.run_command_on_agent', 'shakedown.run_command_on_agent', (['host', 'java_home_cmd'], {}), '(host, java_home_cmd)\n', (14830, 14851), False, 'import shakedown\n'), ((15019, 15106), 'sdk_plan.wait_for_completed_deployment', 'plan.wait_for_completed_deployment', (['FOLDERED_SERVICE_NAME'], {'timeout_seconds': '(20 * 60)'}), '(FOLDERED_SERVICE_NAME, timeout_seconds=\n 20 * 60)\n', (15053, 15106), True, 'import sdk_plan as plan\n'), ((15106, 15191), 'sdk_plan.wait_for_completed_recovery', 'plan.wait_for_completed_recovery', (['FOLDERED_SERVICE_NAME'], {'timeout_seconds': '(20 * 60)'}), '(FOLDERED_SERVICE_NAME, timeout_seconds=20 * 60\n )\n', (15138, 15191), True, 'import sdk_plan as plan\n'), ((15191, 15240), 'sdk_tasks.check_running', 'tasks.check_running', (['FOLDERED_SERVICE_NAME', 'count'], {}), '(FOLDERED_SERVICE_NAME, count)\n', (15210, 15240), True, 'import sdk_tasks as tasks\n'), ((2517, 2572), 'sdk_hosts.system_host', 'hosts.system_host', (['FOLDERED_SERVICE_NAME', '"""data-0-node"""'], {}), "(FOLDERED_SERVICE_NAME, 'data-0-node')\n", (2534, 2572), True, 'import sdk_hosts as hosts\n'), ((2619, 2674), 'sdk_hosts.system_host', 'hosts.system_host', (['FOLDERED_SERVICE_NAME', '"""data-1-node"""'], {}), "(FOLDERED_SERVICE_NAME, 'data-1-node')\n", (2636, 2674), True, 'import sdk_hosts as hosts\n'), ((3188, 3243), 'sdk_hosts.system_host', 'hosts.system_host', (['FOLDERED_SERVICE_NAME', '"""name-0-node"""'], {}), "(FOLDERED_SERVICE_NAME, 'name-0-node')\n", (3205, 3243), True, 'import sdk_hosts as hosts\n'), ((3730, 3788), 'sdk_hosts.system_host', 'hosts.system_host', (['FOLDERED_SERVICE_NAME', '"""journal-0-node"""'], {}), "(FOLDERED_SERVICE_NAME, 'journal-0-node')\n", (3747, 3788), True, 'import sdk_hosts as hosts\n'), ((4357, 4412), 'sdk_hosts.system_host', 'hosts.system_host', (['FOLDERED_SERVICE_NAME', '"""name-0-node"""'], {}), "(FOLDERED_SERVICE_NAME, 'name-0-node')\n", (4374, 4412), True, 'import sdk_hosts as hosts\n'), ((4981, 5036), 'sdk_hosts.system_host', 'hosts.system_host', (['FOLDERED_SERVICE_NAME', '"""data-0-node"""'], {}), "(FOLDERED_SERVICE_NAME, 'data-0-node')\n", (4998, 5036), True, 'import sdk_hosts as hosts\n'), ((5766, 5815), 'sdk_tasks.kill_task_with_pattern', 'tasks.kill_task_with_pattern', (['"""journalnode"""', 'host'], {}), "('journalnode', host)\n", (5794, 5815), True, 'import sdk_tasks as tasks\n'), ((6396, 6442), 'sdk_tasks.kill_task_with_pattern', 'tasks.kill_task_with_pattern', (['"""namenode"""', 'host'], {}), "('namenode', host)\n", (6424, 6442), True, 'import sdk_tasks as tasks\n'), ((7042, 7088), 'sdk_tasks.kill_task_with_pattern', 'tasks.kill_task_with_pattern', (['"""datanode"""', 'host'], {}), "('datanode', host)\n", (7070, 7088), True, 'import sdk_tasks as tasks\n'), ((5412, 5449), 'shakedown.get_service_ips', 'shakedown.get_service_ips', (['"""marathon"""'], {}), "('marathon')\n", (5437, 5449), False, 'import shakedown\n')] |
import os
import pathlib
import sys
import subprocess
sys.path.insert(1, str(pathlib.Path(__file__).parent.absolute())+"/../../../../parser")
#sys.path.insert(1, '/usr/workspace/wsa/laguna/fpchecker/FPChecker/parser')
from tokenizer import Tokenizer
source = "compute_inst.cu"
def setup_module(module):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(THIS_DIR)
def teardown_module(module):
cmd = ["make clean"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
def test_1():
cmd = ["make"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
# it should find one instrumented statement
numbwerOfTransformations = 0
fd = open(source, "r")
for l in fd:
if "_FPC_CHECK_D_(" in l:
numbwerOfTransformations = numbwerOfTransformations + 1
fd.close()
assert numbwerOfTransformations == 1
if __name__ == '__main__':
test_1()
| [
"os.chdir",
"subprocess.check_output",
"os.path.abspath",
"pathlib.Path"
] | [((364, 382), 'os.chdir', 'os.chdir', (['THIS_DIR'], {}), '(THIS_DIR)\n', (372, 382), False, 'import os\n'), ((451, 517), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), '(cmd, stderr=subprocess.STDOUT, shell=True)\n', (474, 517), False, 'import subprocess\n'), ((564, 630), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), '(cmd, stderr=subprocess.STDOUT, shell=True)\n', (587, 630), False, 'import subprocess\n'), ((335, 360), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (350, 360), False, 'import os\n'), ((78, 100), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (90, 100), False, 'import pathlib\n')] |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, TrafficLightArray , TrafficLight
from std_msgs.msg import Int32
import numpy as np
from threading import Thread, Lock
from copy import deepcopy
class GT_TL_Pub(object):
def __init__(self):
rospy.init_node('gt_TL_Publisher')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.gt_traffic_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
self.gt_TL_pub = rospy.Publisher('traffic_waypoint', Int32, queue_size=1)
self.mutex = Lock()
self.base_waypoints = None
self.current_pose = None
self.next_waypoint_id = None
self.traffic_light_waypoint_id = None
self.gt_tl_waypoint_id = -1
self.t_0 = rospy.get_time()
# Loop Event for updating final_waypoints
rate = rospy.Rate(40)
while not rospy.is_shutdown():
self.mutex.acquire()
self.publish_gt_TL_waypoint()
self.mutex.release()
rate.sleep()
def publish_gt_TL_waypoint(self):
if self.gt_tl_waypoint_id is not None:
self.gt_TL_pub.publish(data=self.gt_tl_waypoint_id)
# rospy.loginfo("tl waypoint id = %d", self.gt_tl_waypoint_id)
def nearest_waypoint(self,x,y,waypoints_list):
min_dist = float('inf')
nearest_point_id = -1
for id , waypoint in enumerate(waypoints_list.waypoints):
waypoint_x = waypoint.pose.pose.position.x
waypoint_y = waypoint.pose.pose.position.y
dist = (waypoint_x-x)**2 + (waypoint_y-y)**2
if dist < min_dist:
min_dist = dist
nearest_point_id = id
return nearest_point_id
def gt_traffic_cb(self,msg):
# t_0 = rospy.get_time()
self.mutex.acquire()
# process ground truth information to get nearest Traffic light and its corrosponding waypoint id
self.gt_tl_waypoint_id = -1
trafficlight_array = msg.lights
# rospy.loginfo("state = {}".format(np.uint8(trafficlight_array[0].state)))
if self.base_waypoints is not None and self.current_pose is not None: #and not trafficlight_array[0].state:
current_pose_x = self.current_pose.pose.position.x
current_pose_y = self.current_pose.pose.position.y
min_dist = float('inf')
nearest_point_id = -1
for id in range(len(trafficlight_array)):
tl_x = trafficlight_array[id].pose.pose.position.x
tl_y = trafficlight_array[id].pose.pose.position.y
dist = (current_pose_x - tl_x) ** 2 + (current_pose_y - tl_y) ** 2
if dist < min_dist:
min_dist = dist
nearest_point_id = id
if nearest_point_id != -1 and not np.uint8(trafficlight_array[0].state):
self.gt_tl_waypoint_id = self.nearest_waypoint(
trafficlight_array[nearest_point_id].pose.pose.position.x,
trafficlight_array[nearest_point_id].pose.pose.position.y,
self.base_waypoints)
elif np.uint8(trafficlight_array[0].state):
self.gt_tl_waypoint_id = -1
self.mutex.release()
# rospy.loginfo("processig time = {}".format(t_0 - rospy.get_time()))
def pose_cb(self, msg):
self.current_pose = msg
def waypoints_cb(self, waypoints):
self.base_waypoints = waypoints
if __name__ == '__main__':
try:
GT_TL_Pub()
except rospy.ROSInterruptException:
rospy.logerr('Could not start GT_TL_Pub node.') | [
"rospy.logerr",
"numpy.uint8",
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.get_time",
"threading.Lock",
"rospy.Rate",
"rospy.Publisher"
] | [((312, 346), 'rospy.init_node', 'rospy.init_node', (['"""gt_TL_Publisher"""'], {}), "('gt_TL_Publisher')\n", (327, 346), False, 'import rospy\n'), ((356, 416), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {}), "('/current_pose', PoseStamped, self.pose_cb)\n", (372, 416), False, 'import rospy\n'), ((425, 485), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {}), "('/base_waypoints', Lane, self.waypoints_cb)\n", (441, 485), False, 'import rospy\n'), ((494, 581), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/vehicle/traffic_lights"""', 'TrafficLightArray', 'self.gt_traffic_cb'], {}), "('/vehicle/traffic_lights', TrafficLightArray, self.\n gt_traffic_cb)\n", (510, 581), False, 'import rospy\n'), ((688, 744), 'rospy.Publisher', 'rospy.Publisher', (['"""traffic_waypoint"""', 'Int32'], {'queue_size': '(1)'}), "('traffic_waypoint', Int32, queue_size=1)\n", (703, 744), False, 'import rospy\n'), ((767, 773), 'threading.Lock', 'Lock', ([], {}), '()\n', (771, 773), False, 'from threading import Thread, Lock\n'), ((980, 996), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (994, 996), False, 'import rospy\n'), ((1063, 1077), 'rospy.Rate', 'rospy.Rate', (['(40)'], {}), '(40)\n', (1073, 1077), False, 'import rospy\n'), ((1096, 1115), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1113, 1115), False, 'import rospy\n'), ((3831, 3878), 'rospy.logerr', 'rospy.logerr', (['"""Could not start GT_TL_Pub node."""'], {}), "('Could not start GT_TL_Pub node.')\n", (3843, 3878), False, 'import rospy\n'), ((3392, 3429), 'numpy.uint8', 'np.uint8', (['trafficlight_array[0].state'], {}), '(trafficlight_array[0].state)\n', (3400, 3429), True, 'import numpy as np\n'), ((3073, 3110), 'numpy.uint8', 'np.uint8', (['trafficlight_array[0].state'], {}), '(trafficlight_array[0].state)\n', (3081, 3110), True, 'import numpy as np\n')] |
import collections
import datetime
import typing as t
import attr
from .. import targets
from ._base import Rotator, Verdict
class _SupportsLessThan(t.Protocol):
def __lt__(self, __other: t.Any) -> bool: ...
TValue = t.TypeVar("TValue", bound=_SupportsLessThan)
Count = t.Union[int, t.Literal["all"]]
@attr.s(auto_attribs=True, kw_only=True)
class Bucket(t.Generic[TValue]):
name: str
count: Count
decider: t.Callable[[TValue, TValue], TValue]
grouper: t.Callable[[TValue], t.Hashable]
_groups: t.Dict[t.Hashable, TValue] = attr.ib(factory=dict)
def add(self, value: TValue) -> None:
group: t.Hashable = self.grouper(value)
if group in self._groups:
decider = self.decider(value, self._groups[group])
self._groups[group] = decider
else:
self._groups[group] = value
def get_winners(self) -> t.List[TValue]:
winners = sorted(self._groups.values(), reverse=True)
if self.count == 'all':
return winners
else:
return winners[:self.count]
def _year(bucket: targets.Backup) -> int:
t = bucket.timestamp
return t.year
def _month(bucket: targets.Backup) -> t.Tuple[int, int]:
t = bucket.timestamp
return t.year, t.month
def _week(bucket: targets.Backup) -> t.Tuple[int, int]:
t = bucket.timestamp
return t.isocalendar()[:2]
def _day(bucket: targets.Backup) -> t.Tuple[int, int, int]:
t = bucket.timestamp
return t.year, t.month, t.day
def _hour(bucket: targets.Backup) -> t.Tuple[int, int, int, int]:
t = bucket.timestamp
return t.year, t.month, t.day, t.hour
@attr.s(auto_attribs=True, kw_only=True)
class DateBucket(Rotator):
"""
All backups are sorted in to buckets. If it doesn't fit in the bucket, it
is sorted into the next bucket. If it doesn't fit in any
bucket, it is discarded.
"""
name: str
#: How many hourly backups to keep.
hour: t.Optional[Count] = None
#: How many daily backups to keep.
day: t.Optional[Count] = None
#: How many weekly backups to keep
week: t.Optional[Count] = None
#: How many monthly (30 day window) backups to keep
month: t.Optional[Count] = None
#: How many yearly backups to keep
year: t.Optional[Count] = None
prefer_newest: bool = False
@classmethod
def from_options(cls, name: str, rotator: dict) -> 'DateBucket':
return cls(name=name, **rotator)
def rotate_backups(
self,
timestamp: datetime.datetime,
backups: t.List[targets.Backup],
) -> t.Iterable[t.Tuple[targets.Backup, Verdict, str]]:
winners = self.get_winners(backups)
for backup_date in backups:
if backup_date in winners:
verdict = Verdict.keep
explanation = f"Kept by buckets: {', '.join(winners[backup_date])}"
else:
verdict = Verdict.drop
explanation = "Not kept by any bucket"
yield backup_date, verdict, explanation
def get_winners(self, backups: t.List[targets.Backup]) -> t.Dict[targets.Backup, t.List[str]]:
backups = sorted(backups)
decider = max if self.prefer_newest else min
buckets: t.List[Bucket[targets.Backup]] = []
if self.year:
buckets.append(Bucket(name="Year", count=self.year, decider=decider, grouper=_year))
if self.month:
buckets.append(Bucket(name="Month", count=self.month, decider=decider, grouper=_month))
if self.week:
buckets.append(Bucket(name="Week", count=self.week, decider=decider, grouper=_week))
if self.day:
buckets.append(Bucket(name="Day", count=self.day, decider=decider, grouper=_day))
if self.hour:
buckets.append(Bucket(name="Hour", count=self.hour, decider=decider, grouper=_hour))
for backup in backups:
for bucket in buckets:
bucket.add(backup)
winners: t.Dict[targets.Backup, t.List[str]] = collections.defaultdict(list)
for bucket in buckets:
for nth, to_keep in enumerate(bucket.get_winners(), start=1):
winners[to_keep].append(f"{bucket.name} ({nth})")
winners[backups[-1]].append("Latest")
return winners
| [
"typing.isocalendar",
"attr.s",
"collections.defaultdict",
"typing.TypeVar",
"attr.ib"
] | [((227, 271), 'typing.TypeVar', 't.TypeVar', (['"""TValue"""'], {'bound': '_SupportsLessThan'}), "('TValue', bound=_SupportsLessThan)\n", (236, 271), True, 'import typing as t\n'), ((314, 353), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)', 'kw_only': '(True)'}), '(auto_attribs=True, kw_only=True)\n', (320, 353), False, 'import attr\n'), ((1654, 1693), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)', 'kw_only': '(True)'}), '(auto_attribs=True, kw_only=True)\n', (1660, 1693), False, 'import attr\n'), ((556, 577), 'attr.ib', 'attr.ib', ([], {'factory': 'dict'}), '(factory=dict)\n', (563, 577), False, 'import attr\n'), ((1375, 1390), 'typing.isocalendar', 't.isocalendar', ([], {}), '()\n', (1388, 1390), True, 'import typing as t\n'), ((4047, 4076), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (4070, 4076), False, 'import collections\n')] |
# Copyright 2019 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
from typ import artifacts
class _FakeFileManager(object):
def __init__(self, disc):
self.disc = disc
def open(self, path, _):
self.path = path
self.disc[path] = ''
return self
def exists(self, path):
return path in self.disc
def join(self, *parts):
return os.path.join(*parts)
def write(self, content):
self.disc[self.path] += content
def maybe_make_directory(self, *path):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
pass
class ArtifactsArtifactCreationTests(unittest.TestCase):
def _VerifyPathAndContents(
self, output_dir, file_rel_path, contents, iteration=0, test_base_dir='',
intial_results_base_dir=False):
path = output_dir
if test_base_dir:
path = os.path.join(path, test_base_dir)
if iteration:
path = os.path.join(path, 'retry_%d' % iteration)
elif intial_results_base_dir:
path = os.path.join(path, 'initial')
path = os.path.join(path, file_rel_path)
self.assertTrue(os.path.exists(path))
with open(path, 'r') as f:
self.assertEqual(f.read(), contents)
def test_create_artifact_writes_to_disk_iteration_0_no_test_dir(self):
"""Tests CreateArtifact will write to disk at the correct location."""
tempdir = tempfile.mkdtemp()
try:
ar = artifacts.Artifacts(tempdir)
file_rel_path = os.path.join('stdout', 'text.txt')
with ar.CreateArtifact('artifact_name', file_rel_path) as f:
f.write(b'contents')
self._VerifyPathAndContents(tempdir, file_rel_path, b'contents')
finally:
shutil.rmtree(tempdir)
def test_create_artifact_writes_to_disk_iteration_1_no_test_dir(self):
"""Tests CreateArtifact will write to disk at the correct location."""
tempdir = tempfile.mkdtemp()
try:
ar = artifacts.Artifacts(tempdir, iteration=1)
file_rel_path = os.path.join('stdout', 'text.txt')
with ar.CreateArtifact('artifact_name', file_rel_path) as f:
f.write(b'contents')
self._VerifyPathAndContents(tempdir, file_rel_path, b'contents', iteration=1)
finally:
shutil.rmtree(tempdir)
def test_create_artifact_writes_to_disk_iteration_1_test_dir(self):
"""Tests CreateArtifact will write to disk at the correct location."""
tempdir = tempfile.mkdtemp()
try:
ar = artifacts.Artifacts(tempdir, iteration=1, test_name='a.b.c')
file_rel_path = os.path.join('stdout', 'text.txt')
with ar.CreateArtifact('artifact_name', file_rel_path) as f:
f.write(b'contents')
self._VerifyPathAndContents(
tempdir, file_rel_path, b'contents', iteration=1, test_base_dir='a.b.c')
finally:
shutil.rmtree(tempdir)
def test_overwriting_artifact_raises_value_error(self):
"""Tests CreateArtifact will write to disk at the correct location."""
tempdir = tempfile.mkdtemp()
try:
ar = artifacts.Artifacts(tempdir, iteration=1, test_name='a.b.c')
file_rel_path = os.path.join('stdout', 'text.txt')
with ar.CreateArtifact('artifact_name', file_rel_path) as f:
f.write(b'contents')
ar = artifacts.Artifacts(tempdir, iteration=0, test_name='a.b.c')
file_rel_path = os.path.join('retry_1', 'stdout', 'text.txt')
with self.assertRaises(ValueError) as ve:
with ar.CreateArtifact('artifact_name', file_rel_path) as f:
f.write(b'contents')
self.assertIn('already exists.', str(ve.exception))
finally:
shutil.rmtree(tempdir)
def test_force_overwriting_artifact_does_not_raise_error(self):
"""Tests CreateArtifact will write to disk at the correct location."""
tempdir = tempfile.mkdtemp()
try:
ar = artifacts.Artifacts(tempdir, iteration=1, test_name='a.b.c')
file_rel_path = os.path.join('stdout', 'text.txt')
with ar.CreateArtifact('artifact_name', file_rel_path) as f:
f.write(b'contents')
with ar.CreateArtifact(
'artifact_name', file_rel_path, force_overwrite=True) as f:
f.write(b'overwritten contents')
self._VerifyPathAndContents(
tempdir, file_rel_path, b'overwritten contents', iteration=1,
test_base_dir='a.b.c')
finally:
shutil.rmtree(tempdir)
def test_create_artifact_writes_to_disk_initial_results_dir(self):
"""Tests CreateArtifact will write to disk at the correct location."""
tempdir = tempfile.mkdtemp()
try:
ar = artifacts.Artifacts(
tempdir, iteration=0, test_name='a.b.c', intial_results_base_dir=True)
file_rel_path = os.path.join('stdout', 'text.txt')
with ar.CreateArtifact('artifact_name', file_rel_path) as f:
f.write(b'contents')
self._VerifyPathAndContents(
tempdir, file_rel_path, b'contents', iteration=0, test_base_dir='a.b.c',
intial_results_base_dir=True)
finally:
shutil.rmtree(tempdir)
def test_file_manager_writes_file(self):
disc = {}
ar = artifacts.Artifacts('tmp', iteration=0, file_manager=_FakeFileManager(disc))
file_path = os.path.join('failures', 'stderr.txt')
with ar.CreateArtifact('artifact_name', file_path) as f:
f.write('hello world')
self.assertEqual(disc, {os.path.join('tmp', file_path): 'hello world'})
def test_finds_duplicates_in_file_manager_(self):
disc = {}
ar = artifacts.Artifacts('tmp', iteration=0, file_manager=_FakeFileManager(disc))
file_path = os.path.join('failures', 'stderr.txt')
with ar.CreateArtifact('artifact1', file_path) as f:
f.write('hello world')
with self.assertRaises(ValueError) as ve:
with ar.CreateArtifact('artifact2', file_path) as f:
f.write('Goodbye world')
self.assertIn('already exists', str(ve.exception))
class ArtifactsLinkCreationTests(unittest.TestCase):
def test_create_link(self):
ar = artifacts.Artifacts(None)
ar.CreateLink('link', 'https://testsite.com')
self.assertEqual(ar.artifacts, {'link': ['https://testsite.com']})
def test_create_link_invalid_url(self):
ar = artifacts.Artifacts(None)
with self.assertRaises(ValueError):
ar.CreateLink('link', 'https:/malformedurl.com')
def test_create_link_non_https(self):
ar = artifacts.Artifacts(None)
with self.assertRaises(ValueError):
ar.CreateLink('link', 'http://testsite.com')
def test_create_link_newlines(self):
ar = artifacts.Artifacts(None)
with self.assertRaises(ValueError):
ar.CreateLink('link', 'https://some\nbadurl.com')
| [
"os.path.exists",
"typ.artifacts.Artifacts",
"os.path.join",
"tempfile.mkdtemp",
"shutil.rmtree"
] | [((979, 999), 'os.path.join', 'os.path.join', (['*parts'], {}), '(*parts)\n', (991, 999), False, 'import os\n'), ((1702, 1735), 'os.path.join', 'os.path.join', (['path', 'file_rel_path'], {}), '(path, file_rel_path)\n', (1714, 1735), False, 'import os\n'), ((2015, 2033), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2031, 2033), False, 'import tempfile\n'), ((2512, 2530), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2528, 2530), False, 'import tempfile\n'), ((3032, 3050), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3048, 3050), False, 'import tempfile\n'), ((3593, 3611), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3609, 3611), False, 'import tempfile\n'), ((4396, 4414), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (4412, 4414), False, 'import tempfile\n'), ((5131, 5149), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (5147, 5149), False, 'import tempfile\n'), ((5783, 5821), 'os.path.join', 'os.path.join', (['"""failures"""', '"""stderr.txt"""'], {}), "('failures', 'stderr.txt')\n", (5795, 5821), False, 'import os\n'), ((6157, 6195), 'os.path.join', 'os.path.join', (['"""failures"""', '"""stderr.txt"""'], {}), "('failures', 'stderr.txt')\n", (6169, 6195), False, 'import os\n'), ((6569, 6594), 'typ.artifacts.Artifacts', 'artifacts.Artifacts', (['None'], {}), '(None)\n', (6588, 6594), False, 'from typ import artifacts\n'), ((6768, 6793), 'typ.artifacts.Artifacts', 'artifacts.Artifacts', (['None'], {}), '(None)\n', (6787, 6793), False, 'from typ import artifacts\n'), ((6939, 6964), 'typ.artifacts.Artifacts', 'artifacts.Artifacts', (['None'], {}), '(None)\n', (6958, 6964), False, 'from typ import artifacts\n'), ((7105, 7130), 'typ.artifacts.Artifacts', 'artifacts.Artifacts', (['None'], {}), '(None)\n', (7124, 7130), False, 'from typ import artifacts\n'), ((1502, 1535), 'os.path.join', 'os.path.join', (['path', 'test_base_dir'], {}), '(path, test_base_dir)\n', (1514, 1535), False, 'import os\n'), ((1569, 1611), 'os.path.join', 'os.path.join', (['path', "('retry_%d' % iteration)"], {}), "(path, 'retry_%d' % iteration)\n", (1581, 1611), False, 'import os\n'), ((1756, 1776), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1770, 1776), False, 'import os\n'), ((2054, 2082), 'typ.artifacts.Artifacts', 'artifacts.Artifacts', (['tempdir'], {}), '(tempdir)\n', (2073, 2082), False, 'from typ import artifacts\n'), ((2105, 2139), 'os.path.join', 'os.path.join', (['"""stdout"""', '"""text.txt"""'], {}), "('stdout', 'text.txt')\n", (2117, 2139), False, 'import os\n'), ((2326, 2348), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (2339, 2348), False, 'import shutil\n'), ((2551, 2592), 'typ.artifacts.Artifacts', 'artifacts.Artifacts', (['tempdir'], {'iteration': '(1)'}), '(tempdir, iteration=1)\n', (2570, 2592), False, 'from typ import artifacts\n'), ((2615, 2649), 'os.path.join', 'os.path.join', (['"""stdout"""', '"""text.txt"""'], {}), "('stdout', 'text.txt')\n", (2627, 2649), False, 'import os\n'), ((2849, 2871), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (2862, 2871), False, 'import shutil\n'), ((3071, 3131), 'typ.artifacts.Artifacts', 'artifacts.Artifacts', (['tempdir'], {'iteration': '(1)', 'test_name': '"""a.b.c"""'}), "(tempdir, iteration=1, test_name='a.b.c')\n", (3090, 3131), False, 'from typ import artifacts\n'), ((3154, 3188), 'os.path.join', 'os.path.join', (['"""stdout"""', '"""text.txt"""'], {}), "('stdout', 'text.txt')\n", (3166, 3188), False, 'import os\n'), ((3422, 3444), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (3435, 3444), False, 'import shutil\n'), ((3632, 3692), 'typ.artifacts.Artifacts', 'artifacts.Artifacts', (['tempdir'], {'iteration': '(1)', 'test_name': '"""a.b.c"""'}), "(tempdir, iteration=1, test_name='a.b.c')\n", (3651, 3692), False, 'from typ import artifacts\n'), ((3715, 3749), 'os.path.join', 'os.path.join', (['"""stdout"""', '"""text.txt"""'], {}), "('stdout', 'text.txt')\n", (3727, 3749), False, 'import os\n'), ((3857, 3917), 'typ.artifacts.Artifacts', 'artifacts.Artifacts', (['tempdir'], {'iteration': '(0)', 'test_name': '"""a.b.c"""'}), "(tempdir, iteration=0, test_name='a.b.c')\n", (3876, 3917), False, 'from typ import artifacts\n'), ((3940, 3985), 'os.path.join', 'os.path.join', (['"""retry_1"""', '"""stdout"""', '"""text.txt"""'], {}), "('retry_1', 'stdout', 'text.txt')\n", (3952, 3985), False, 'import os\n'), ((4217, 4239), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (4230, 4239), False, 'import shutil\n'), ((4435, 4495), 'typ.artifacts.Artifacts', 'artifacts.Artifacts', (['tempdir'], {'iteration': '(1)', 'test_name': '"""a.b.c"""'}), "(tempdir, iteration=1, test_name='a.b.c')\n", (4454, 4495), False, 'from typ import artifacts\n'), ((4518, 4552), 'os.path.join', 'os.path.join', (['"""stdout"""', '"""text.txt"""'], {}), "('stdout', 'text.txt')\n", (4530, 4552), False, 'import os\n'), ((4949, 4971), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (4962, 4971), False, 'import shutil\n'), ((5170, 5264), 'typ.artifacts.Artifacts', 'artifacts.Artifacts', (['tempdir'], {'iteration': '(0)', 'test_name': '"""a.b.c"""', 'intial_results_base_dir': '(True)'}), "(tempdir, iteration=0, test_name='a.b.c',\n intial_results_base_dir=True)\n", (5189, 5264), False, 'from typ import artifacts\n'), ((5292, 5326), 'os.path.join', 'os.path.join', (['"""stdout"""', '"""text.txt"""'], {}), "('stdout', 'text.txt')\n", (5304, 5326), False, 'import os\n'), ((5600, 5622), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (5613, 5622), False, 'import shutil\n'), ((1661, 1690), 'os.path.join', 'os.path.join', (['path', '"""initial"""'], {}), "(path, 'initial')\n", (1673, 1690), False, 'import os\n'), ((5940, 5970), 'os.path.join', 'os.path.join', (['"""tmp"""', 'file_path'], {}), "('tmp', file_path)\n", (5952, 5970), False, 'import os\n')] |
"""
LC 480
Given an array of numbers and a number ‘k’, find the median of all the ‘k’ sized sub-arrays (or windows) of the array.
Example 1:
Input: nums=[1, 2, -1, 3, 5], k = 2
Output: [1.5, 0.5, 1.0, 4.0]
Explanation: Lets consider all windows of size ‘2’:
[1, 2, -1, 3, 5] -> median is 1.5
[1, 2, -1, 3, 5] -> median is 0.5
[1, 2, -1, 3, 5] -> median is 1.0
[1, 2, -1, 3, 5] -> median is 4.0
Example 2:
Input: nums=[1, 2, -1, 3, 5], k = 3
Output: [1.0, 2.0, 3.0]
Explanation: Lets consider all windows of size ‘3’:
[1, 2, -1, 3, 5] -> median is 1.0
[1, 2, -1, 3, 5] -> median is 2.0
[1, 2, -1, 3, 5] -> median is 3.0
"""
from heapq import *
import heapq
class SlidingWindowMedian:
def find_sliding_window_median(self, nums, k):
self.min_heap = []
self.max_heap = []
res = []
for i, n in enumerate(nums):
self.insert(n)
if i >= k - 1:
res.append(self.median())
self.delete(nums[i - k + 1])
return res
def delete(self, n):
if self.min_heap and self.min_heap[0] <= n:
heap = self.min_heap
else:
heap = self.max_heap
n = -n
i = heap.index(n)
if i == len(heap) - 1: # the last
heap.pop()
else:
heap[i] = heap.pop()
heapq._siftup(heap, i) # move children up
heapq._siftdown(heap, 0, i) # move parent down
self.balance()
def balance(self):
if len(self.min_heap) - len(self.max_heap) > 1:
heappush(self.max_heap, -heappop(self.min_heap))
if len(self.min_heap) < len(self.max_heap):
heappush(self.min_heap, -heappop(self.max_heap))
def insert(self, n):
if not self.min_heap or self.min_heap[0] <= n:
heappush(self.min_heap, n)
else:
heappush(self.max_heap, -n)
self.balance()
def median(self):
if len(self.min_heap) == len(self.max_heap):
return 0.5 * (self.min_heap[0] - self.max_heap[0])
else:
return self.min_heap[0]
def main():
slidingWindowMedian = SlidingWindowMedian()
result = slidingWindowMedian.find_sliding_window_median(
[1, 2, -1, 3, 5], 2)
print("Sliding window medians are: " + str(result))
slidingWindowMedian = SlidingWindowMedian()
result = slidingWindowMedian.find_sliding_window_median(
[1, 2, -1, 3, 5], 3)
print("Sliding window medians are: " + str(result))
main()
"""
Time O(NK): find the index takes O(K)
Space O(K)
"""
| [
"heapq._siftdown",
"heapq._siftup"
] | [((1337, 1359), 'heapq._siftup', 'heapq._siftup', (['heap', 'i'], {}), '(heap, i)\n', (1350, 1359), False, 'import heapq\n'), ((1392, 1419), 'heapq._siftdown', 'heapq._siftdown', (['heap', '(0)', 'i'], {}), '(heap, 0, i)\n', (1407, 1419), False, 'import heapq\n')] |
import pandas as pd
import re
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
#from bokeh.plotting import figure, output_file, show
houses = pd.read_csv("out_4.1.csv",index_col=0)
houses.sort_values('具体日期',inplace=True)
houses = houses.iloc[1:]
print(re.search(r"(\d+-\d+)(-\d+)", houses['具体日期'][40]).group(1))
#houses['具体日期'] = houses.apply(lambda x: re.search(r"(\d+-\d+)(-\d+)", x['具体日期']).group(1),axis=1)
#houses.price = houses.price.where(houses.price < 20000)
#houses['具体日期'] = houses.apply(lambda x: re.search(r"(\d+-\d+)(-\d+)", x['具体日期']).group(1),axis=1)
#houses['具体日期'] = houses['具体日期'].mask(houses['具体日期'].duplicated())
#plt.gca().xaxis.set_major_locator(ticker.MultipleLocator(50))
plt.figure(0,figsize=(10,10))
houses["租金1"] = houses["租金"].loc[houses['电视'] == '有']
houses["租金2"] = houses["租金"].loc[houses['电视'] == '无']
x = range(1,340,34)
plt.scatter(houses['具体日期'],houses['租金1'],s=3,color='red', label='With Television')
plt.scatter(houses['具体日期'],houses['租金2'],s=3,color='blue', label='Without Television')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.legend(loc='upper right')
#plt.savefig('./house_img/figure0.png')
##plt.show()
plt.figure(1,figsize=(10,10))
houses["租金1"] = houses["租金"].loc[houses['冰箱'] == '有']
houses["租金2"] = houses["租金"].loc[houses['冰箱'] == '无']
x = range(1,340,34)
plt.scatter(houses['具体日期'],houses['租金1'],s=3,color='red', label='With Refrigerator')
plt.scatter(houses['具体日期'],houses['租金2'],s=3,color='blue', label='Without Refrigerator')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.legend(loc='upper right')
#plt.savefig('./house_img/figure1.png')
#plt.show()
plt.figure(2,figsize=(10,10))
houses["租金1"] = houses["租金"].loc[houses['洗衣机'] == '有']
houses["租金2"] = houses["租金"].loc[houses['洗衣机'] == '无']
x = range(1,340,34)
plt.scatter(houses['具体日期'],houses['租金1'],s=3,color='red', label='With Washing Machine')
plt.scatter(houses['具体日期'],houses['租金2'],s=3,color='blue', label='Without Washing Machine')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.legend(loc='upper right')
#plt.savefig('./house_img/figure2.png')
#plt.show()
plt.figure(3,figsize=(10,10))
houses["租金1"] = houses["租金"].loc[houses['空调'] == '有']
houses["租金2"] = houses["租金"].loc[houses['空调'] == '无']
plt.scatter(houses['具体日期'],houses['租金1'],s=3,color='red', label='With Air Conditioner')
plt.scatter(houses['具体日期'],houses['租金2'],s=3,color='blue', label='Without Air Conditioner')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.legend(loc='upper right')
#plt.savefig('./house_img/figure3.png')
#plt.show()
plt.figure(4,figsize=(10,10))
houses["租金1"] = houses["租金"].loc[houses['热水器'] == '有']
houses["租金2"] = houses["租金"].loc[houses['热水器'] == '无']
x = range(1,340,34)
plt.scatter(houses['具体日期'],houses['租金1'],s=3,color='red', label='With Water Heater')
plt.scatter(houses['具体日期'],houses['租金2'],s=3,color='blue', label='Without Water Heater')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.legend(loc='upper right')
#plt.savefig('./house_img/figure4.png')
#plt.show()
plt.figure(5,figsize=(10,10))
houses["租金1"] = houses["租金"].loc[houses['床'] == '有']
houses["租金2"] = houses["租金"].loc[houses['床'] == '无']
x = range(1,340,34)
plt.scatter(houses['具体日期'],houses['租金1'],s=3,color='red', label='With Bed')
plt.scatter(houses['具体日期'],houses['租金2'],s=3,color='blue', label='Without Bed')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.legend(loc='upper right')
#plt.savefig('./house_img/figure5.png')
#plt.show()
plt.figure(6,figsize=(10,10))
houses["租金1"] = houses["租金"].loc[houses['暖气'] == '有']
houses["租金2"] = houses["租金"].loc[houses['暖气'] == '无']
x = range(1,340,34)
plt.scatter(houses['具体日期'],houses['租金1'],s=3,color='red', label='With Heating')
plt.scatter(houses['具体日期'],houses['租金2'],s=3,color='blue', label='Without Heating')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.legend(loc='upper right')
#plt.savefig('./house_img/figure6.png')
#plt.show()
plt.figure(7,figsize=(10,10))
houses["租金1"] = houses["租金"].loc[houses['宽带'] == '有']
houses["租金2"] = houses["租金"].loc[houses['宽带'] == '无']
x = range(1,340,34)
plt.scatter(houses['具体日期'],houses['租金1'],s=3,color='red', label='With Wifi')
plt.scatter(houses['具体日期'],houses['租金2'],s=3,color='blue', label='Without Wifi')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.legend(loc='upper right')
#plt.savefig('./house_img/figure7.png')
#plt.show()
plt.figure(8,figsize=(10,10))
houses["租金1"] = houses["租金"].loc[houses['衣柜'] == '有']
houses["租金2"] = houses["租金"].loc[houses['衣柜'] == '无']
x = range(1,340,34)
plt.scatter(houses['具体日期'],houses['租金1'],s=3,color='red', label='With Wardrobe')
plt.scatter(houses['具体日期'],houses['租金2'],s=3,color='blue', label='Without Wardrobe')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.legend(loc='upper right')
#plt.savefig('./house_img/figure8.png')
#plt.show()
plt.figure(9,figsize=(10,10))
houses["租金1"] = houses["租金"].loc[houses['天然气'] == '有']
houses["租金2"] = houses["租金"].loc[houses['天然气'] == '无']
x = range(1,340,34)
plt.scatter(houses['具体日期'],houses['租金1'],s=3,color='red', label='With Natural Gas')
plt.scatter(houses['具体日期'],houses['租金2'],s=3,color='blue', label='Without Natural Gas')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.legend(loc='upper right')
#plt.savefig('./house_img/figure9.png')
##plt.show()
plt.figure(10,figsize=(10,10))
x = range(1,340,34)
plt.scatter(houses['具体日期'],houses['租金'],s=3,color='red', label='With Natural Gas')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
#plt.savefig('./house_img/figure10.png')
##plt.show()
plt.figure(11,figsize=(10,10))
houses["租金1"] = houses["租金"].loc[houses['距离地铁站距离'] == houses['距离地铁站距离']]
houses["租金2"] = houses["租金"].loc[houses['距离地铁站距离'] != houses['距离地铁站距离']]
x = range(1,340,34)
plt.scatter(houses['具体日期'],houses['租金1'],s=3,color='red', label='With Station Nearby')
plt.scatter(houses['具体日期'],houses['租金2'],s=3,color='blue', label='Without Station Nearby')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.legend(loc='upper right')
plt.savefig('./house_img/figure_subway.png')
plt.show()
plt.figure(10,figsize=(10,10))
x = range(1,180,18)
houses.sort_values('面积',inplace=True)
plt.scatter(houses['面积'],houses['租金'],s=3,color='red', label='With Natural Gas')
plt.xticks(x,('10','60','90','120','150','180','210','240','270','310'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.savefig('./house_img/figure_area.png')
plt.show()
plt.figure(11,figsize=(10,10))
houses["租金1"] = houses["租金"].loc[houses['距离地铁站距离'] == houses['距离地铁站距离']]
houses["租金2"] = houses["租金"].loc[houses['距离地铁站距离'] != houses['距离地铁站距离']]
x = range(1,340,34)
plt.scatter(houses['具体日期'],houses['租金1'],s=3,color='red', label='With Station Nearby')
plt.scatter(houses['具体日期'],houses['租金2'],s=3,color='blue', label='Without Station Nearby')
plt.xticks(x,('2018-01','2018-03','2018-05','2018-07','2018-09','2018-11','2019-01','2019-03','2019-05','2019-07'),rotation=30,ha='right')
plt.xlabel("Date")
plt.ylabel("Price")
plt.title("Rent of Apartment")
plt.legend(loc='upper right')
plt.savefig('./house_img/figure11.png') | [
"re.search",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((165, 204), 'pandas.read_csv', 'pd.read_csv', (['"""out_4.1.csv"""'], {'index_col': '(0)'}), "('out_4.1.csv', index_col=0)\n", (176, 204), True, 'import pandas as pd\n'), ((735, 766), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {'figsize': '(10, 10)'}), '(0, figsize=(10, 10))\n', (745, 766), True, 'import matplotlib.pyplot as plt\n'), ((897, 987), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金1']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Television"""'}), "(houses['具体日期'], houses['租金1'], s=3, color='red', label=\n 'With Television')\n", (908, 987), True, 'import matplotlib.pyplot as plt\n'), ((981, 1075), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金2']"], {'s': '(3)', 'color': '"""blue"""', 'label': '"""Without Television"""'}), "(houses['具体日期'], houses['租金2'], s=3, color='blue', label=\n 'Without Television')\n", (992, 1075), True, 'import matplotlib.pyplot as plt\n'), ((1069, 1228), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (1079, 1228), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1227), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (1219, 1227), True, 'import matplotlib.pyplot as plt\n'), ((1229, 1248), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (1239, 1248), True, 'import matplotlib.pyplot as plt\n'), ((1250, 1280), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (1259, 1280), True, 'import matplotlib.pyplot as plt\n'), ((1282, 1311), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (1292, 1311), True, 'import matplotlib.pyplot as plt\n'), ((1370, 1401), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(10, 10)'}), '(1, figsize=(10, 10))\n', (1380, 1401), True, 'import matplotlib.pyplot as plt\n'), ((1532, 1624), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金1']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Refrigerator"""'}), "(houses['具体日期'], houses['租金1'], s=3, color='red', label=\n 'With Refrigerator')\n", (1543, 1624), True, 'import matplotlib.pyplot as plt\n'), ((1618, 1714), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金2']"], {'s': '(3)', 'color': '"""blue"""', 'label': '"""Without Refrigerator"""'}), "(houses['具体日期'], houses['租金2'], s=3, color='blue', label=\n 'Without Refrigerator')\n", (1629, 1714), True, 'import matplotlib.pyplot as plt\n'), ((1708, 1867), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (1718, 1867), True, 'import matplotlib.pyplot as plt\n'), ((1848, 1866), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (1858, 1866), True, 'import matplotlib.pyplot as plt\n'), ((1868, 1887), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (1878, 1887), True, 'import matplotlib.pyplot as plt\n'), ((1889, 1919), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (1898, 1919), True, 'import matplotlib.pyplot as plt\n'), ((1921, 1950), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (1931, 1950), True, 'import matplotlib.pyplot as plt\n'), ((2008, 2039), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {'figsize': '(10, 10)'}), '(2, figsize=(10, 10))\n', (2018, 2039), True, 'import matplotlib.pyplot as plt\n'), ((2172, 2267), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金1']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Washing Machine"""'}), "(houses['具体日期'], houses['租金1'], s=3, color='red', label=\n 'With Washing Machine')\n", (2183, 2267), True, 'import matplotlib.pyplot as plt\n'), ((2261, 2360), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金2']"], {'s': '(3)', 'color': '"""blue"""', 'label': '"""Without Washing Machine"""'}), "(houses['具体日期'], houses['租金2'], s=3, color='blue', label=\n 'Without Washing Machine')\n", (2272, 2360), True, 'import matplotlib.pyplot as plt\n'), ((2354, 2513), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (2364, 2513), True, 'import matplotlib.pyplot as plt\n'), ((2494, 2512), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (2504, 2512), True, 'import matplotlib.pyplot as plt\n'), ((2514, 2533), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (2524, 2533), True, 'import matplotlib.pyplot as plt\n'), ((2535, 2565), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (2544, 2565), True, 'import matplotlib.pyplot as plt\n'), ((2567, 2596), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (2577, 2596), True, 'import matplotlib.pyplot as plt\n'), ((2654, 2685), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {'figsize': '(10, 10)'}), '(3, figsize=(10, 10))\n', (2664, 2685), True, 'import matplotlib.pyplot as plt\n'), ((2795, 2890), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金1']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Air Conditioner"""'}), "(houses['具体日期'], houses['租金1'], s=3, color='red', label=\n 'With Air Conditioner')\n", (2806, 2890), True, 'import matplotlib.pyplot as plt\n'), ((2884, 2983), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金2']"], {'s': '(3)', 'color': '"""blue"""', 'label': '"""Without Air Conditioner"""'}), "(houses['具体日期'], houses['租金2'], s=3, color='blue', label=\n 'Without Air Conditioner')\n", (2895, 2983), True, 'import matplotlib.pyplot as plt\n'), ((2977, 3136), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (2987, 3136), True, 'import matplotlib.pyplot as plt\n'), ((3117, 3135), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (3127, 3135), True, 'import matplotlib.pyplot as plt\n'), ((3137, 3156), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (3147, 3156), True, 'import matplotlib.pyplot as plt\n'), ((3158, 3188), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (3167, 3188), True, 'import matplotlib.pyplot as plt\n'), ((3190, 3219), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (3200, 3219), True, 'import matplotlib.pyplot as plt\n'), ((3277, 3308), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {'figsize': '(10, 10)'}), '(4, figsize=(10, 10))\n', (3287, 3308), True, 'import matplotlib.pyplot as plt\n'), ((3441, 3533), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金1']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Water Heater"""'}), "(houses['具体日期'], houses['租金1'], s=3, color='red', label=\n 'With Water Heater')\n", (3452, 3533), True, 'import matplotlib.pyplot as plt\n'), ((3527, 3623), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金2']"], {'s': '(3)', 'color': '"""blue"""', 'label': '"""Without Water Heater"""'}), "(houses['具体日期'], houses['租金2'], s=3, color='blue', label=\n 'Without Water Heater')\n", (3538, 3623), True, 'import matplotlib.pyplot as plt\n'), ((3617, 3776), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (3627, 3776), True, 'import matplotlib.pyplot as plt\n'), ((3757, 3775), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (3767, 3775), True, 'import matplotlib.pyplot as plt\n'), ((3777, 3796), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (3787, 3796), True, 'import matplotlib.pyplot as plt\n'), ((3798, 3828), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (3807, 3828), True, 'import matplotlib.pyplot as plt\n'), ((3830, 3859), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (3840, 3859), True, 'import matplotlib.pyplot as plt\n'), ((3917, 3948), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {'figsize': '(10, 10)'}), '(5, figsize=(10, 10))\n', (3927, 3948), True, 'import matplotlib.pyplot as plt\n'), ((4077, 4155), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金1']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Bed"""'}), "(houses['具体日期'], houses['租金1'], s=3, color='red', label='With Bed')\n", (4088, 4155), True, 'import matplotlib.pyplot as plt\n'), ((4154, 4241), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金2']"], {'s': '(3)', 'color': '"""blue"""', 'label': '"""Without Bed"""'}), "(houses['具体日期'], houses['租金2'], s=3, color='blue', label=\n 'Without Bed')\n", (4165, 4241), True, 'import matplotlib.pyplot as plt\n'), ((4235, 4394), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (4245, 4394), True, 'import matplotlib.pyplot as plt\n'), ((4375, 4393), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (4385, 4393), True, 'import matplotlib.pyplot as plt\n'), ((4395, 4414), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (4405, 4414), True, 'import matplotlib.pyplot as plt\n'), ((4416, 4446), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (4425, 4446), True, 'import matplotlib.pyplot as plt\n'), ((4448, 4477), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4458, 4477), True, 'import matplotlib.pyplot as plt\n'), ((4535, 4566), 'matplotlib.pyplot.figure', 'plt.figure', (['(6)'], {'figsize': '(10, 10)'}), '(6, figsize=(10, 10))\n', (4545, 4566), True, 'import matplotlib.pyplot as plt\n'), ((4697, 4784), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金1']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Heating"""'}), "(houses['具体日期'], houses['租金1'], s=3, color='red', label=\n 'With Heating')\n", (4708, 4784), True, 'import matplotlib.pyplot as plt\n'), ((4778, 4869), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金2']"], {'s': '(3)', 'color': '"""blue"""', 'label': '"""Without Heating"""'}), "(houses['具体日期'], houses['租金2'], s=3, color='blue', label=\n 'Without Heating')\n", (4789, 4869), True, 'import matplotlib.pyplot as plt\n'), ((4863, 5022), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (4873, 5022), True, 'import matplotlib.pyplot as plt\n'), ((5003, 5021), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (5013, 5021), True, 'import matplotlib.pyplot as plt\n'), ((5023, 5042), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (5033, 5042), True, 'import matplotlib.pyplot as plt\n'), ((5044, 5074), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (5053, 5074), True, 'import matplotlib.pyplot as plt\n'), ((5076, 5105), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (5086, 5105), True, 'import matplotlib.pyplot as plt\n'), ((5163, 5194), 'matplotlib.pyplot.figure', 'plt.figure', (['(7)'], {'figsize': '(10, 10)'}), '(7, figsize=(10, 10))\n', (5173, 5194), True, 'import matplotlib.pyplot as plt\n'), ((5325, 5404), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金1']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Wifi"""'}), "(houses['具体日期'], houses['租金1'], s=3, color='red', label='With Wifi')\n", (5336, 5404), True, 'import matplotlib.pyplot as plt\n'), ((5403, 5491), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金2']"], {'s': '(3)', 'color': '"""blue"""', 'label': '"""Without Wifi"""'}), "(houses['具体日期'], houses['租金2'], s=3, color='blue', label=\n 'Without Wifi')\n", (5414, 5491), True, 'import matplotlib.pyplot as plt\n'), ((5485, 5644), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (5495, 5644), True, 'import matplotlib.pyplot as plt\n'), ((5625, 5643), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (5635, 5643), True, 'import matplotlib.pyplot as plt\n'), ((5645, 5664), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (5655, 5664), True, 'import matplotlib.pyplot as plt\n'), ((5666, 5696), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (5675, 5696), True, 'import matplotlib.pyplot as plt\n'), ((5698, 5727), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (5708, 5727), True, 'import matplotlib.pyplot as plt\n'), ((5785, 5816), 'matplotlib.pyplot.figure', 'plt.figure', (['(8)'], {'figsize': '(10, 10)'}), '(8, figsize=(10, 10))\n', (5795, 5816), True, 'import matplotlib.pyplot as plt\n'), ((5947, 6035), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金1']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Wardrobe"""'}), "(houses['具体日期'], houses['租金1'], s=3, color='red', label=\n 'With Wardrobe')\n", (5958, 6035), True, 'import matplotlib.pyplot as plt\n'), ((6029, 6121), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金2']"], {'s': '(3)', 'color': '"""blue"""', 'label': '"""Without Wardrobe"""'}), "(houses['具体日期'], houses['租金2'], s=3, color='blue', label=\n 'Without Wardrobe')\n", (6040, 6121), True, 'import matplotlib.pyplot as plt\n'), ((6115, 6274), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (6125, 6274), True, 'import matplotlib.pyplot as plt\n'), ((6255, 6273), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (6265, 6273), True, 'import matplotlib.pyplot as plt\n'), ((6275, 6294), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (6285, 6294), True, 'import matplotlib.pyplot as plt\n'), ((6296, 6326), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (6305, 6326), True, 'import matplotlib.pyplot as plt\n'), ((6328, 6357), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (6338, 6357), True, 'import matplotlib.pyplot as plt\n'), ((6415, 6446), 'matplotlib.pyplot.figure', 'plt.figure', (['(9)'], {'figsize': '(10, 10)'}), '(9, figsize=(10, 10))\n', (6425, 6446), True, 'import matplotlib.pyplot as plt\n'), ((6579, 6670), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金1']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Natural Gas"""'}), "(houses['具体日期'], houses['租金1'], s=3, color='red', label=\n 'With Natural Gas')\n", (6590, 6670), True, 'import matplotlib.pyplot as plt\n'), ((6664, 6759), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金2']"], {'s': '(3)', 'color': '"""blue"""', 'label': '"""Without Natural Gas"""'}), "(houses['具体日期'], houses['租金2'], s=3, color='blue', label=\n 'Without Natural Gas')\n", (6675, 6759), True, 'import matplotlib.pyplot as plt\n'), ((6753, 6912), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (6763, 6912), True, 'import matplotlib.pyplot as plt\n'), ((6893, 6911), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (6903, 6911), True, 'import matplotlib.pyplot as plt\n'), ((6913, 6932), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (6923, 6932), True, 'import matplotlib.pyplot as plt\n'), ((6934, 6964), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (6943, 6964), True, 'import matplotlib.pyplot as plt\n'), ((6966, 6995), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (6976, 6995), True, 'import matplotlib.pyplot as plt\n'), ((7054, 7086), 'matplotlib.pyplot.figure', 'plt.figure', (['(10)'], {'figsize': '(10, 10)'}), '(10, figsize=(10, 10))\n', (7064, 7086), True, 'import matplotlib.pyplot as plt\n'), ((7107, 7197), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Natural Gas"""'}), "(houses['具体日期'], houses['租金'], s=3, color='red', label=\n 'With Natural Gas')\n", (7118, 7197), True, 'import matplotlib.pyplot as plt\n'), ((7191, 7350), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (7201, 7350), True, 'import matplotlib.pyplot as plt\n'), ((7331, 7349), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (7341, 7349), True, 'import matplotlib.pyplot as plt\n'), ((7351, 7370), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (7361, 7370), True, 'import matplotlib.pyplot as plt\n'), ((7372, 7402), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (7381, 7402), True, 'import matplotlib.pyplot as plt\n'), ((7462, 7494), 'matplotlib.pyplot.figure', 'plt.figure', (['(11)'], {'figsize': '(10, 10)'}), '(11, figsize=(10, 10))\n', (7472, 7494), True, 'import matplotlib.pyplot as plt\n'), ((7663, 7757), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金1']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Station Nearby"""'}), "(houses['具体日期'], houses['租金1'], s=3, color='red', label=\n 'With Station Nearby')\n", (7674, 7757), True, 'import matplotlib.pyplot as plt\n'), ((7751, 7849), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金2']"], {'s': '(3)', 'color': '"""blue"""', 'label': '"""Without Station Nearby"""'}), "(houses['具体日期'], houses['租金2'], s=3, color='blue', label=\n 'Without Station Nearby')\n", (7762, 7849), True, 'import matplotlib.pyplot as plt\n'), ((7843, 8002), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (7853, 8002), True, 'import matplotlib.pyplot as plt\n'), ((7983, 8001), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (7993, 8001), True, 'import matplotlib.pyplot as plt\n'), ((8003, 8022), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (8013, 8022), True, 'import matplotlib.pyplot as plt\n'), ((8024, 8054), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (8033, 8054), True, 'import matplotlib.pyplot as plt\n'), ((8056, 8085), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (8066, 8085), True, 'import matplotlib.pyplot as plt\n'), ((8087, 8131), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./house_img/figure_subway.png"""'], {}), "('./house_img/figure_subway.png')\n", (8098, 8131), True, 'import matplotlib.pyplot as plt\n'), ((8133, 8143), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8141, 8143), True, 'import matplotlib.pyplot as plt\n'), ((8149, 8181), 'matplotlib.pyplot.figure', 'plt.figure', (['(10)'], {'figsize': '(10, 10)'}), '(10, figsize=(10, 10))\n', (8159, 8181), True, 'import matplotlib.pyplot as plt\n'), ((8241, 8329), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['面积']", "houses['租金']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Natural Gas"""'}), "(houses['面积'], houses['租金'], s=3, color='red', label=\n 'With Natural Gas')\n", (8252, 8329), True, 'import matplotlib.pyplot as plt\n'), ((8323, 8434), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('10', '60', '90', '120', '150', '180', '210', '240', '270', '310')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('10', '60', '90', '120', '150', '180', '210', '240', '270',\n '310'), rotation=30, ha='right')\n", (8333, 8434), True, 'import matplotlib.pyplot as plt\n'), ((8420, 8438), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (8430, 8438), True, 'import matplotlib.pyplot as plt\n'), ((8440, 8459), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (8450, 8459), True, 'import matplotlib.pyplot as plt\n'), ((8461, 8491), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (8470, 8491), True, 'import matplotlib.pyplot as plt\n'), ((8493, 8535), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./house_img/figure_area.png"""'], {}), "('./house_img/figure_area.png')\n", (8504, 8535), True, 'import matplotlib.pyplot as plt\n'), ((8537, 8547), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8545, 8547), True, 'import matplotlib.pyplot as plt\n'), ((8551, 8583), 'matplotlib.pyplot.figure', 'plt.figure', (['(11)'], {'figsize': '(10, 10)'}), '(11, figsize=(10, 10))\n', (8561, 8583), True, 'import matplotlib.pyplot as plt\n'), ((8752, 8846), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金1']"], {'s': '(3)', 'color': '"""red"""', 'label': '"""With Station Nearby"""'}), "(houses['具体日期'], houses['租金1'], s=3, color='red', label=\n 'With Station Nearby')\n", (8763, 8846), True, 'import matplotlib.pyplot as plt\n'), ((8840, 8938), 'matplotlib.pyplot.scatter', 'plt.scatter', (["houses['具体日期']", "houses['租金2']"], {'s': '(3)', 'color': '"""blue"""', 'label': '"""Without Station Nearby"""'}), "(houses['具体日期'], houses['租金2'], s=3, color='blue', label=\n 'Without Station Nearby')\n", (8851, 8938), True, 'import matplotlib.pyplot as plt\n'), ((8932, 9091), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "('2018-01', '2018-03', '2018-05', '2018-07', '2018-09', '2018-11',\n '2019-01', '2019-03', '2019-05', '2019-07')"], {'rotation': '(30)', 'ha': '"""right"""'}), "(x, ('2018-01', '2018-03', '2018-05', '2018-07', '2018-09',\n '2018-11', '2019-01', '2019-03', '2019-05', '2019-07'), rotation=30, ha\n ='right')\n", (8942, 9091), True, 'import matplotlib.pyplot as plt\n'), ((9072, 9090), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Date"""'], {}), "('Date')\n", (9082, 9090), True, 'import matplotlib.pyplot as plt\n'), ((9092, 9111), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (9102, 9111), True, 'import matplotlib.pyplot as plt\n'), ((9113, 9143), 'matplotlib.pyplot.title', 'plt.title', (['"""Rent of Apartment"""'], {}), "('Rent of Apartment')\n", (9122, 9143), True, 'import matplotlib.pyplot as plt\n'), ((9145, 9174), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (9155, 9174), True, 'import matplotlib.pyplot as plt\n'), ((9176, 9215), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./house_img/figure11.png"""'], {}), "('./house_img/figure11.png')\n", (9187, 9215), True, 'import matplotlib.pyplot as plt\n'), ((278, 329), 're.search', 're.search', (['"""(\\\\d+-\\\\d+)(-\\\\d+)"""', "houses['具体日期'][40]"], {}), "('(\\\\d+-\\\\d+)(-\\\\d+)', houses['具体日期'][40])\n", (287, 329), False, 'import re\n')] |
import multiprocessing as mp
import os
import subprocess
import time
import pytest
from solarforecastarbiter.io import fetch
def badfun():
raise ValueError
def bad_subprocess():
subprocess.run(['cat', '/nowaythisworks'], check=True, capture_output=True)
@pytest.mark.asyncio
@pytest.mark.parametrize('bad,err', [
(badfun, ValueError),
(bad_subprocess, subprocess.CalledProcessError)
])
async def test_cluster_error(bad, err):
pytest.importorskip("loky", reason="requires [fetch] packages")
with pytest.raises(err):
await fetch.run_in_executor(bad)
def getpid(): # pragma: no cover
return mp.current_process().pid
def longrunning(): # pragma: no cover
time.sleep(3)
@pytest.mark.asyncio
@pytest.mark.timeout(5, method='thread')
async def test_cluster_external_kill():
pytest.importorskip("loky", reason="requires [fetch] packages")
from loky.process_executor import TerminatedWorkerError
pid = await fetch.run_in_executor(getpid)
long = fetch.run_in_executor(longrunning)
os.kill(pid, 9)
with pytest.raises(TerminatedWorkerError):
await long
| [
"os.kill",
"solarforecastarbiter.io.fetch.run_in_executor",
"subprocess.run",
"time.sleep",
"pytest.mark.parametrize",
"pytest.importorskip",
"pytest.raises",
"pytest.mark.timeout",
"multiprocessing.current_process"
] | [((293, 404), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""bad,err"""', '[(badfun, ValueError), (bad_subprocess, subprocess.CalledProcessError)]'], {}), "('bad,err', [(badfun, ValueError), (bad_subprocess,\n subprocess.CalledProcessError)])\n", (316, 404), False, 'import pytest\n'), ((744, 783), 'pytest.mark.timeout', 'pytest.mark.timeout', (['(5)'], {'method': '"""thread"""'}), "(5, method='thread')\n", (763, 783), False, 'import pytest\n'), ((193, 268), 'subprocess.run', 'subprocess.run', (["['cat', '/nowaythisworks']"], {'check': '(True)', 'capture_output': '(True)'}), "(['cat', '/nowaythisworks'], check=True, capture_output=True)\n", (207, 268), False, 'import subprocess\n'), ((455, 518), 'pytest.importorskip', 'pytest.importorskip', (['"""loky"""'], {'reason': '"""requires [fetch] packages"""'}), "('loky', reason='requires [fetch] packages')\n", (474, 518), False, 'import pytest\n'), ((706, 719), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (716, 719), False, 'import time\n'), ((828, 891), 'pytest.importorskip', 'pytest.importorskip', (['"""loky"""'], {'reason': '"""requires [fetch] packages"""'}), "('loky', reason='requires [fetch] packages')\n", (847, 891), False, 'import pytest\n'), ((1009, 1043), 'solarforecastarbiter.io.fetch.run_in_executor', 'fetch.run_in_executor', (['longrunning'], {}), '(longrunning)\n', (1030, 1043), False, 'from solarforecastarbiter.io import fetch\n'), ((1048, 1063), 'os.kill', 'os.kill', (['pid', '(9)'], {}), '(pid, 9)\n', (1055, 1063), False, 'import os\n'), ((528, 546), 'pytest.raises', 'pytest.raises', (['err'], {}), '(err)\n', (541, 546), False, 'import pytest\n'), ((636, 656), 'multiprocessing.current_process', 'mp.current_process', ([], {}), '()\n', (654, 656), True, 'import multiprocessing as mp\n'), ((968, 997), 'solarforecastarbiter.io.fetch.run_in_executor', 'fetch.run_in_executor', (['getpid'], {}), '(getpid)\n', (989, 997), False, 'from solarforecastarbiter.io import fetch\n'), ((1073, 1109), 'pytest.raises', 'pytest.raises', (['TerminatedWorkerError'], {}), '(TerminatedWorkerError)\n', (1086, 1109), False, 'import pytest\n'), ((562, 588), 'solarforecastarbiter.io.fetch.run_in_executor', 'fetch.run_in_executor', (['bad'], {}), '(bad)\n', (583, 588), False, 'from solarforecastarbiter.io import fetch\n')] |
from fastapi import APIRouter, Depends
from fastapi.responses import ORJSONResponse
from di import GetPrizeInteractorFactory
from domain.entity import Prize, PrizeResponse, ValidationErrorResponse, now
from usecase.interactor import GetPrizeInteractor
router = APIRouter()
@router.get(
"/{user_id}",
response_class=ORJSONResponse,
response_model=PrizeResponse,
responses={422: {"model": ValidationErrorResponse}},
tags=["prize"],
)
async def get_prizes(
user_id: str,
interactor: GetPrizeInteractor = Depends(GetPrizeInteractorFactory.get),
):
prize: Prize = await interactor.execute(user_id=user_id)
return {"servertime": now(), "content": prize}
| [
"domain.entity.now",
"fastapi.APIRouter",
"fastapi.Depends"
] | [((263, 274), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (272, 274), False, 'from fastapi import APIRouter, Depends\n'), ((533, 571), 'fastapi.Depends', 'Depends', (['GetPrizeInteractorFactory.get'], {}), '(GetPrizeInteractorFactory.get)\n', (540, 571), False, 'from fastapi import APIRouter, Depends\n'), ((663, 668), 'domain.entity.now', 'now', ([], {}), '()\n', (666, 668), False, 'from domain.entity import Prize, PrizeResponse, ValidationErrorResponse, now\n')] |
import webbrowser
import pyautogui as magic
from datetime import datetime
import time
import sys
import yaml
sys.tracebacklimit=0
settings_path="setting.yaml"
with open(settings_path) as f:
settings = yaml.load(f, Loader=yaml.FullLoader)
alltimings = settings['alltimings']
timing = alltimings['starttime']
endtime = alltimings['endtime']
columns = alltimings['range']
google_meet = settings["google meets"]
link = google_meet["link"]
code = google_meet['code']
def preemeeting():
print('Going to enter the meeting')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('enter')
magic.press('tab')
magic.press('enter')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('enter')
waitingforend()
def preemeeting2():
print('Going to enter the meeting')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('enter')
magic.press('tab')
magic.press('enter')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('enter')
waitingforend()
def entermeeting():
blank = magic.locateCenterOnScreen('core\stream.png', confidence = 0.8)
magic.moveTo(blank)
magic.click()
magic.click()
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('enter')
time.sleep(10)
waiting()
def waiting():
wait = magic.locateCenterOnScreen('core\waiting-for-meeting.png', confidence = 0.8)
if wait == None:
preemeeting()
else:
magic.hotkey('ctrl', 'r')
print('Waiting for the meeting to be started by the teacher')
k = True
while k == True:
if wait == None:
preemeeting()
else:
magic.hotkey('ctrl', 'r')
time.sleep(15)
def googlemeets():
print('Waiting for the start time')
while True:
curr = datetime.now().strftime("%H:%M")
for now in timing:
if now == curr:
i = 0
while i <= columns:
if curr == timing[i]:
print(i)
break
i += 1
if link == [] and code == []:
webbrowser.get('windows-default').open('https://classroom.google.com')
time.sleep(15)
for z in range(0,3):
magic.press('tab')
if i == 0:
magic.press('enter')
else:
for b in range(0,i):
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('enter')
time.sleep(10)
entermeeting()
elif link == []:
webbrowser.get('windows-default').open('https://meet.google.com/')
time.sleep(15)
textbox= magic.locateCenterOnScreen('core\enter-meeting-id.png')
magic.moveTo(textbox)
magic.click()
magic.write(code[i])
magic.press('enter')
time.sleep(10)
preemeeting2()
elif code == []:
webbrowser.get('windows-default').open(link[i])
time.sleep(15)
preemeeting2()
else:
print('Please enter either the meeting code or the meeting link')
def waitingforend():
print('waiting for the end time')
while True:
curr2 = datetime.now().strftime("%H:%M")
for now2 in endtime:
if now2 == curr2:
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('tab')
magic.press('enter')
time.sleep(2)
magic.hotkey('alt', 'f4')
googlemeets()
else:
time.sleep(2)
googlemeets()
| [
"pyautogui.hotkey",
"pyautogui.write",
"pyautogui.locateCenterOnScreen",
"pyautogui.press",
"pyautogui.moveTo",
"webbrowser.get",
"yaml.load",
"time.sleep",
"pyautogui.click",
"datetime.datetime.now"
] | [((218, 254), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (227, 254), False, 'import yaml\n'), ((567, 585), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (578, 585), True, 'import pyautogui as magic\n'), ((591, 609), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (602, 609), True, 'import pyautogui as magic\n'), ((615, 633), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (626, 633), True, 'import pyautogui as magic\n'), ((639, 657), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (650, 657), True, 'import pyautogui as magic\n'), ((663, 683), 'pyautogui.press', 'magic.press', (['"""enter"""'], {}), "('enter')\n", (674, 683), True, 'import pyautogui as magic\n'), ((689, 707), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (700, 707), True, 'import pyautogui as magic\n'), ((713, 733), 'pyautogui.press', 'magic.press', (['"""enter"""'], {}), "('enter')\n", (724, 733), True, 'import pyautogui as magic\n'), ((739, 757), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (750, 757), True, 'import pyautogui as magic\n'), ((763, 781), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (774, 781), True, 'import pyautogui as magic\n'), ((787, 805), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (798, 805), True, 'import pyautogui as magic\n'), ((811, 829), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (822, 829), True, 'import pyautogui as magic\n'), ((835, 855), 'pyautogui.press', 'magic.press', (['"""enter"""'], {}), "('enter')\n", (846, 855), True, 'import pyautogui as magic\n'), ((946, 964), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (957, 964), True, 'import pyautogui as magic\n'), ((970, 988), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (981, 988), True, 'import pyautogui as magic\n'), ((994, 1012), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (1005, 1012), True, 'import pyautogui as magic\n'), ((1018, 1036), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (1029, 1036), True, 'import pyautogui as magic\n'), ((1042, 1062), 'pyautogui.press', 'magic.press', (['"""enter"""'], {}), "('enter')\n", (1053, 1062), True, 'import pyautogui as magic\n'), ((1068, 1086), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (1079, 1086), True, 'import pyautogui as magic\n'), ((1092, 1112), 'pyautogui.press', 'magic.press', (['"""enter"""'], {}), "('enter')\n", (1103, 1112), True, 'import pyautogui as magic\n'), ((1118, 1136), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (1129, 1136), True, 'import pyautogui as magic\n'), ((1142, 1160), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (1153, 1160), True, 'import pyautogui as magic\n'), ((1166, 1184), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (1177, 1184), True, 'import pyautogui as magic\n'), ((1190, 1210), 'pyautogui.press', 'magic.press', (['"""enter"""'], {}), "('enter')\n", (1201, 1210), True, 'import pyautogui as magic\n'), ((1268, 1330), 'pyautogui.locateCenterOnScreen', 'magic.locateCenterOnScreen', (['"""core\\\\stream.png"""'], {'confidence': '(0.8)'}), "('core\\\\stream.png', confidence=0.8)\n", (1294, 1330), True, 'import pyautogui as magic\n'), ((1337, 1356), 'pyautogui.moveTo', 'magic.moveTo', (['blank'], {}), '(blank)\n', (1349, 1356), True, 'import pyautogui as magic\n'), ((1362, 1375), 'pyautogui.click', 'magic.click', ([], {}), '()\n', (1373, 1375), True, 'import pyautogui as magic\n'), ((1381, 1394), 'pyautogui.click', 'magic.click', ([], {}), '()\n', (1392, 1394), True, 'import pyautogui as magic\n'), ((1400, 1418), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (1411, 1418), True, 'import pyautogui as magic\n'), ((1424, 1442), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (1435, 1442), True, 'import pyautogui as magic\n'), ((1448, 1466), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (1459, 1466), True, 'import pyautogui as magic\n'), ((1472, 1490), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (1483, 1490), True, 'import pyautogui as magic\n'), ((1496, 1516), 'pyautogui.press', 'magic.press', (['"""enter"""'], {}), "('enter')\n", (1507, 1516), True, 'import pyautogui as magic\n'), ((1522, 1536), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1532, 1536), False, 'import time\n'), ((1586, 1661), 'pyautogui.locateCenterOnScreen', 'magic.locateCenterOnScreen', (['"""core\\\\waiting-for-meeting.png"""'], {'confidence': '(0.8)'}), "('core\\\\waiting-for-meeting.png', confidence=0.8)\n", (1612, 1661), True, 'import pyautogui as magic\n'), ((1728, 1753), 'pyautogui.hotkey', 'magic.hotkey', (['"""ctrl"""', '"""r"""'], {}), "('ctrl', 'r')\n", (1740, 1753), True, 'import pyautogui as magic\n'), ((1966, 1991), 'pyautogui.hotkey', 'magic.hotkey', (['"""ctrl"""', '"""r"""'], {}), "('ctrl', 'r')\n", (1978, 1991), True, 'import pyautogui as magic\n'), ((2009, 2023), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (2019, 2023), False, 'import time\n'), ((2136, 2150), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2148, 2150), False, 'from datetime import datetime\n'), ((4008, 4022), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4020, 4022), False, 'from datetime import datetime\n'), ((4119, 4137), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (4130, 4137), True, 'import pyautogui as magic\n'), ((4155, 4173), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (4166, 4173), True, 'import pyautogui as magic\n'), ((4191, 4209), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (4202, 4209), True, 'import pyautogui as magic\n'), ((4227, 4245), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (4238, 4245), True, 'import pyautogui as magic\n'), ((4263, 4281), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (4274, 4281), True, 'import pyautogui as magic\n'), ((4299, 4317), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (4310, 4317), True, 'import pyautogui as magic\n'), ((4335, 4353), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (4346, 4353), True, 'import pyautogui as magic\n'), ((4371, 4389), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (4382, 4389), True, 'import pyautogui as magic\n'), ((4407, 4427), 'pyautogui.press', 'magic.press', (['"""enter"""'], {}), "('enter')\n", (4418, 4427), True, 'import pyautogui as magic\n'), ((4445, 4458), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4455, 4458), False, 'import time\n'), ((4476, 4501), 'pyautogui.hotkey', 'magic.hotkey', (['"""alt"""', '"""f4"""'], {}), "('alt', 'f4')\n", (4488, 4501), True, 'import pyautogui as magic\n'), ((4569, 4582), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4579, 4582), False, 'import time\n'), ((2582, 2596), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (2592, 2596), False, 'import time\n'), ((3093, 3107), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3103, 3107), False, 'import time\n'), ((2664, 2682), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (2675, 2682), True, 'import pyautogui as magic\n'), ((2740, 2760), 'pyautogui.press', 'magic.press', (['"""enter"""'], {}), "('enter')\n", (2751, 2760), True, 'import pyautogui as magic\n'), ((3051, 3071), 'pyautogui.press', 'magic.press', (['"""enter"""'], {}), "('enter')\n", (3062, 3071), True, 'import pyautogui as magic\n'), ((3287, 3301), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (3297, 3301), False, 'import time\n'), ((3332, 3388), 'pyautogui.locateCenterOnScreen', 'magic.locateCenterOnScreen', (['"""core\\\\enter-meeting-id.png"""'], {}), "('core\\\\enter-meeting-id.png')\n", (3358, 3388), True, 'import pyautogui as magic\n'), ((3409, 3430), 'pyautogui.moveTo', 'magic.moveTo', (['textbox'], {}), '(textbox)\n', (3421, 3430), True, 'import pyautogui as magic\n'), ((3452, 3465), 'pyautogui.click', 'magic.click', ([], {}), '()\n', (3463, 3465), True, 'import pyautogui as magic\n'), ((3487, 3507), 'pyautogui.write', 'magic.write', (['code[i]'], {}), '(code[i])\n', (3498, 3507), True, 'import pyautogui as magic\n'), ((3529, 3549), 'pyautogui.press', 'magic.press', (['"""enter"""'], {}), "('enter')\n", (3540, 3549), True, 'import pyautogui as magic\n'), ((3571, 3585), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3581, 3585), False, 'import time\n'), ((2490, 2523), 'webbrowser.get', 'webbrowser.get', (['"""windows-default"""'], {}), "('windows-default')\n", (2504, 2523), False, 'import webbrowser\n'), ((2863, 2881), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (2874, 2881), True, 'import pyautogui as magic\n'), ((2911, 2929), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (2922, 2929), True, 'import pyautogui as magic\n'), ((2959, 2977), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (2970, 2977), True, 'import pyautogui as magic\n'), ((3007, 3025), 'pyautogui.press', 'magic.press', (['"""tab"""'], {}), "('tab')\n", (3018, 3025), True, 'import pyautogui as magic\n'), ((3746, 3760), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (3756, 3760), False, 'import time\n'), ((3199, 3232), 'webbrowser.get', 'webbrowser.get', (['"""windows-default"""'], {}), "('windows-default')\n", (3213, 3232), False, 'import webbrowser\n'), ((3677, 3710), 'webbrowser.get', 'webbrowser.get', (['"""windows-default"""'], {}), "('windows-default')\n", (3691, 3710), False, 'import webbrowser\n')] |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
#
# PlayonCloud recorder
#
# update-alternatives --install /usr/bin/python python /usr/bin/python3.7 2
# sudo apt-get install chromium-chromedriver
# sudo apt-get install libxml2-dev libxslt-dev python-dev
# which python3 (make sure that path is /usr/bin/python3)
#
# Finally: crontab -e => 0 23 * * * /usr/bin/python3 /plex/media/Media/DownloadPlayonRecordings.py
# a.k.a. automatically run every day at 11:00 p.m.
#
# For bonus points, setup playon to automatically update:
# bash -c "$(wget -qO - https://raw.githubusercontent.com/mrworf/plexupdate/master/extras/installer.sh)"
from genericpath import exists
import logging, os, configparser
g_iniPath = 'PlayonDownloader.ini'
# ExtendedInterpolation means ini can use ${} instead of %()s, and lets you refer to other sections besides default if needed
g_config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
g_config.read(g_iniPath)
g_paths = g_config['Paths']
g_creds = g_config['Credentials']
g_settings = g_config['Settings']
install_requires = [
'beautifulsoup4',
'IMDbPY',
'selenium'
]
logging.basicConfig(filename=os.path.join(g_paths['mediaroot'], g_settings['logfile']), level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
class PlayonVideo:
def __init__(self, tr=None):
if tr is None:
self.Provider = "Default"
return
self.DownloadButtonId = tr[0].i['id']
# When downloading, this will happen automatically thanks to chrome. This just lets us find the right file. Further, IMDB doesn't care about ':' vs '_', so just make everything match
self.CreateRightName(tr[1].text)
self.Provider = tr[2].text
self.Size = tr[3].text
self.Duration = tr[4].text
# These are datefields and should be modified into such
self.Created = self.ConvertFromPvTime(tr[5].text)
self.Expires = self.ConvertFromPvTime(tr[6].text)
def ConvertFromPvTime(self, tm):
from datetime import datetime
# tm is expected to be "Jan 17, 2022"
return datetime.strptime(tm, '%b %d, %Y')
def CreateRightName(self, title):
import re
tv_filter = re.compile(r"(.*)([Ss]\d{2})([Ee]\d{2})(.*)")
episode_parts = re.match(tv_filter, title)
if episode_parts:
self.ShowTitle = episode_parts[1].replace(':',' ').replace('_', ' ').strip()
self.ShowTitle = self.ShowTitle.rstrip('-').strip() # Remove trailing '-' if present
self.Season = episode_parts[2][1:]
self.Episode = episode_parts[3][1:]
self.EpisodeTitle = episode_parts[4].replace(':',' ').replace('-', ' ').strip()
self.VideoType = "TvShow"
self.Title = self.ShowTitle + ' - ' + episode_parts[2] + episode_parts[3] + ' - ' + self.EpisodeTitle
else:
self.Title = title.replace(':', '_').replace('/','_')
self.VideoType = "Movie"
def LogInToPlayon(driver):
import time
from selenium.webdriver.common.by import By
logging.debug('Entering LogInToPlayon')
playonUrl = 'https://www.playonrecorder.com/list'
driver.get(playonUrl)
# Long sleep to let page load
time.sleep(10)
email_in = driver.find_element(By.ID, "email")
email_in.click()
email_in.send_keys(g_creds['playonusername'])
time.sleep(1)
password_in = driver.find_element(By.ID, 'password')
password_in.click()
password_in.send_keys(g_creds['<PASSWORD>'])
time.sleep(1)
login = driver.find_element(By.ID, 'login-button')
login.click()
time.sleep(10)
# Long sleep to let 'Your recordings' page load
logging.debug('Exiting LogInToPlayon (and presuming success)')
def CheckForNewVideos(driver):
from bs4 import BeautifulSoup
import os
bs = BeautifulSoup(driver.page_source, features='html.parser')
tbl = bs.find(id='recording-list')
if len(tbl) == 1 and tbl.findChild().text == 'You currently have no recordings to download':
return []
download_list = []
for row in tbl:
# Look at each recorded object. Is it already saved? if not, download
cols = row.find_all('td')
pv = PlayonVideo(cols)
needToDownload = True
# Recursively search for the expected file in previously downloaded & handled
if pv.VideoType == "Movie":
for root, subFolders, files in os.walk(g_paths['playonroot']):
if pv.Provider.lower() in root.lower():
for file in files:
if pv.Title.lower() == os.path.splitext(file.lower())[0]:
logging.debug(pv.Title + ' should already be available in plex.')
needToDownload = False
break
elif pv.VideoType == "TvShow":
for root, subFolders, files in os.walk(g_paths['tvroot']):
if pv.ShowTitle.lower() in root.lower():
for file in files:
if pv.Title.lower() == os.path.splitext(file.lower())[0]:
logging.debug(pv.Title + ' should already be available in plex.')
needToDownload = False
break
# Recursively search for the expected file in active downloads
for root, subFolders, files in os.walk(g_paths['downloadfolder']):
for file in files:
fnameLow = os.path.splitext(file.lower())[0]
if pv.Title.lower() == fnameLow:
# File is downloaded (or downloading). We will add it to file mgmt list
# incase previous execution crashed, but no need to download a 2nd time
logging.debug(pv.Title + ' is already being downloaded.')
needToDownload = False
download_list.append(pv)
break
if needToDownload:
logging.info('Want to download: ' + pv.Title)
download_list.append(pv)
logging.debug('Required downloads queued')
# What if someone downloads both versions of "Beauty and the beast" at the same time?
for i in range(len(download_list)):
duplicate_count = 0
for j in range(i+1, len(download_list)):
if download_list[i].Title == download_list[j].Title:
duplicate_count += 1
download_list[j].Title += " (" + str(duplicate_count) + ")"
return download_list
def SortPvByExpiration(e):
return e.Expires
def DownloadVideos(driver, download_list):
from selenium.webdriver.common.by import By
from datetime import datetime
from datetime import timedelta
import time
logging.debug('Entering DownloadVideos')
# Sort so earliest expiring video is queued for download
download_list.sort(key=SortPvByExpiration)
min_downloads_required = 0
# Set the stop time to 2hr before work day. This is hopefully enough time to finish queue
stop_queue_time = datetime.today() + timedelta(days=1)
stop_queue_time = stop_queue_time.replace(hour=int(g_settings['morningstoptime']) - 2, minute=0)
finished, downloading_list = GetFinishedDownloads(download_list)
if len(finished) > 0:
logging.info('Count of old downloads to be moved into plex: ' + str(len(finished)))
MoveDownloadsToPlayonFolder(finished)
for item in finished:
if item in download_list:
download_list.remove(item)
for item in downloading_list:
download_list.remove(item) # Remove from download_list since it's now tracked by inprogress
# Force download any item expiring soon (Playon holds for about a week, so 2 days until expirey means we are behind schedule)
for item in download_list:
if item.Expires < datetime.today() + timedelta(days=2):
min_downloads_required += 1
while(True):
# Should queue another download if:
# 1) We have stuff expiring soon and can't delay, OR
# 2) there is time for it to finish
await_all = False
if len(download_list) > 0 and ( min_downloads_required > 0 or datetime.today() < stop_queue_time):
while len(downloading_list) < g_settings.getint('maxconcurrentdownloads', 5) and len(download_list) > 0:
next_video = download_list.pop(0)
downloadBtn = driver.find_element(By.ID, next_video.DownloadButtonId)
downloadBtn.click()
min_downloads_required -= 1
downloading_list.append(next_video)
# Give a few seconds for download to start
time.sleep(5)
elif len(downloading_list) > 0:
# Need to let other downloads finish. However, we have nothing left to download so we should switch to "await all"
logging.debug('No downloads left to queue, just need to await remaining')
await_all = True
else:
# We have no active downloads, and no required downloads / time left
logging.debug('All active downloads finished, and nothing left to queue for download (or out of time)')
break
# Wait for a download to finish, then move into appropriate folder structure
logging.debug('Active download count: ' + str(len(downloading_list)))
finished = WaitForDownloads(driver, downloading_list, await_all)
logging.info('Finished ' + str(len(finished)) + ' downloads')
if len(finished) == 0 :
logging.critical("WaitForDownloads returned with nothing finished in Download Videos! This should never be the case, so failing to prevent explosion of log")
raise "DownloadVideos failed to wait"
MoveDownloadsToPlayonFolder(finished)
for item in finished:
if item in downloading_list:
downloading_list.remove(item)
logging.debug('Exiting DownloadVideos')
def GetFinishedDownloads(download_list):
finished_downloads = []
inprogress = []
video_map = {}
for video in download_list:
video_map[video.Title.lower()] = video
# Iterate through downloaded files
for filename in os.listdir(g_paths['downloadfolder']):
fnameLow, extension = os.path.splitext(filename.lower())
inProgName = os.path.splitext(fnameLow)[0]
# fnameLow might be 'hunger games.mp4' if the original file was 'hunger games.mp4.crdownload'
if fnameLow in video_map.keys() or inProgName in video_map.keys():
if extension != '.crdownload':
finished_downloads.append(video_map[fnameLow])
else:
inprogress.append(video_map[inProgName])
return finished_downloads, inprogress
def WaitForDownloads(driver, download_list, await_all):
# Would be a better method than just raw sleep, but ... too much effort
# https://newbedev.com/selenium-python-waiting-for-a-download-process-to-complete-using-chrome-web-driver
import os, time
if len(download_list) == 0:
return []
logging.debug('Entering WaitForDownloads, waiting on: ' + str([playon.Title for playon in download_list]))
infinite_loop = True
while infinite_loop:
finished_downloads, inprogress = GetFinishedDownloads(download_list)
if len(finished_downloads) > 0:
if len(finished_downloads) == len(download_list):
logging.info('All Downloads complete!')
return finished_downloads
if not await_all:
logging.debug("Returning downloads that finished (since not awaiting all)")
return finished_downloads
if len(inprogress) == 0:
# No downloads in progress
logging.debug("No in progress downloads, returning")
return []
time.sleep(30)
def GetMovieData(name):
import imdb
ia = imdb.IMDb()
possibles = ia.search_movie(name)
for possibility in possibles:
if possibility.data['title'].replace(':','_') == name:
return possibility.data
def MoveDownloadsToPlayonFolder(download_list):
movies = []
tv_shows = []
for video in download_list:
if video.VideoType == "TvShow":
tv_shows.append(video)
elif video.VideoType == "Movie":
movies.append(video)
else:
logging.error("Unknown video type: " + video.VideoType)
MoveMoviesToPlayonFolder(movies)
MoveTvShowsToPlayonFolder(tv_shows)
def MoveMoviesToPlayonFolder(download_list):
import os, shutil, re, time
from datetime import date
if len(download_list) == 0:
return
# Correct file might look something like #_Title.mp4, but just in case it's only Title.mp4, this will still match
playonFileRe = re.compile('\d*_?(.*)\.mp4')
# Make sure a provider ('hbo max', 'disney plus') folder exists for all recorded videos
for video in download_list:
src_path = os.path.join(g_paths['playonroot'], video.Provider)
if not exists(src_path):
os.makedirs(src_path)
# Iterate through download folder looking for our new videos
for file in os.listdir(g_paths['downloadfolder']):
results = re.match(playonFileRe, file)
if not results:
continue
title = results[1]
for video in download_list:
if title == video.Title:
# Create proper folder with name + year (if movie)
logging.info('Attempting to move download (' + title + ') to appropriate folder')
year = ''
movie_date = []
try:
movie_data = GetMovieData(title)
if movie_data:
year = str(movie_data['year'])
else:
# Video is so new (or so under reported) it doesn't have a year yet. We'll try the
# current year, but can always come back later and fix if it isn't appearing correctly
year = str(date.today().year)
except:
logging.error('Exception generated from imdb! Defaulting to current year I guess')
year = str(date.today().year)
folder_title = title + ' (' + year + ')'
movie_folder = os.path.join(g_paths['downloadfolder'], folder_title)
os.mkdir(movie_folder)
if not movie_data:
logging.warning('Unable to find ' + title + ' on IMDB :( ')
f = open(os.path.join(movie_folder, 'Guesswork.txt'), mode='x')
f.write("Couldn't find the file in IMDB. Chose to assume it is " + year + ", but if not the case, please correct!")
f.close()
# Move the downloaded file into it's corresponding movie folder
shutil.move(os.path.join(g_paths['downloadfolder'], file), movie_folder)
# Move the movie folder to the playon subdirectory for that provider
final_location = os.path.join(g_paths['playonroot'], video.Provider, folder_title)
# What if someone records "Beauty and the Beast" and then also recorded the live-action version?
attempt_count = 0
failed = True
true_location = ""
while failed:
try:
true_location = final_location if attempt_count == 0 else final_location + "_" + str(attempt_count)
shutil.move(movie_folder, true_location)
failed = False
except:
attempt_count += 1
logging.info(movie_folder + ' => ' + true_location)
break
def MoveTvShowsToPlayonFolder(download_list):
import os, shutil, re, time
from datetime import date
if len(download_list) == 0:
return
# Correct file might look something like #_Title.mp4, but just in case it's only Title.mp4, this will still match
playonFileRe = re.compile('\d*_?(.*)\.mp4')
# Components specific to tv episodes:
# video.ShowTitle
# video.Season
# video.Episode
# video.EpisodeTitle
# Iterate through download folder looking for our new videos
for file in os.listdir(g_paths['downloadfolder']):
results = re.match(playonFileRe, file)
if not results:
continue
title = results[1]
for video in download_list:
if title == video.Title:
# Create proper folder
logging.info('Attempting to move download (' + title + ') to appropriate folder')
final_show_path = os.path.join(g_paths['tvroot'], video.Provider, video.ShowTitle)
final_season_path = os.path.join(final_show_path, 'Season ' + video.Season)
orig_file_path = os.path.join(g_paths['downloadfolder'], file)
if not os.path.exists(final_season_path):
logging.debug('Something missing in path, creating: ' + final_season_path)
os.makedirs(final_season_path)
logging.info(orig_file_path + ' => ' + final_season_path)
shutil.move(orig_file_path, final_season_path)
break
def GenerateDownloadList():
# We already have downlaoded files, just want to make the list in order to call everything else
import os, re
dlist = []
comp = re.compile("\d*_(.*)\.mp4")
for f in os.listdir(g_paths["downloadfolder"]):
m = re.match(comp, f)
if m:
newMatch = PlayonVideo()
newMatch.CreateRightName(m[1])
dlist.append(newMatch)
return dlist
def main():
from selenium import webdriver
import subprocess
# Some modules (looking at you webdriver) are exceptionally noisy for debug purposes
# this sets them to only log if warning or higher
for log_name, log_obj in logging.Logger.manager.loggerDict.items():
if log_name != __name__:
logging.getLogger(log_name).setLevel(logging.WARNING)
driver = {}
try:
driver = webdriver.Chrome()
except:
driver = webdriver.Chrome(g_paths['chromewebdriver'])
try:
LogInToPlayon(driver)
dl = CheckForNewVideos(driver)
if len(dl) > 0:
DownloadVideos(driver, dl)
WaitForDownloads(driver, dl, True)
MoveDownloadsToPlayonFolder(dl)
# Eventually playon will pickup these changes.. but why wait?
subprocess.run([g_paths['mediascanner'], '--scan'])
logging.info('Finished sucessfully! Just need to cleanup')
else:
logging.info('No videos to download today.')
except:
logging.error('Some kind of fatal exception caught by main!')
finally:
driver.close()
if __name__ == '__main__':
#dl = GenerateDownloadList()
#MoveDownloadsToPlayonFolder(dl)
main() | [
"logging.getLogger",
"logging.debug",
"re.compile",
"time.sleep",
"datetime.datetime.today",
"datetime.timedelta",
"logging.info",
"logging.error",
"os.walk",
"os.path.exists",
"os.listdir",
"shutil.move",
"subprocess.run",
"os.mkdir",
"logging.critical",
"logging.Logger.manager.logger... | [((3139, 3178), 'logging.debug', 'logging.debug', (['"""Entering LogInToPlayon"""'], {}), "('Entering LogInToPlayon')\n", (3152, 3178), False, 'import logging, os, configparser\n'), ((3307, 3321), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3317, 3321), False, 'import os, shutil, re, time\n'), ((3449, 3462), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3459, 3462), False, 'import os, shutil, re, time\n'), ((3598, 3611), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3608, 3611), False, 'import os, shutil, re, time\n'), ((3689, 3703), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3699, 3703), False, 'import os, shutil, re, time\n'), ((3760, 3822), 'logging.debug', 'logging.debug', (['"""Exiting LogInToPlayon (and presuming success)"""'], {}), "('Exiting LogInToPlayon (and presuming success)')\n", (3773, 3822), False, 'import logging, os, configparser\n'), ((3912, 3969), 'bs4.BeautifulSoup', 'BeautifulSoup', (['driver.page_source'], {'features': '"""html.parser"""'}), "(driver.page_source, features='html.parser')\n", (3925, 3969), False, 'from bs4 import BeautifulSoup\n'), ((6200, 6242), 'logging.debug', 'logging.debug', (['"""Required downloads queued"""'], {}), "('Required downloads queued')\n", (6213, 6242), False, 'import logging, os, configparser\n'), ((6905, 6945), 'logging.debug', 'logging.debug', (['"""Entering DownloadVideos"""'], {}), "('Entering DownloadVideos')\n", (6918, 6945), False, 'import logging, os, configparser\n'), ((10134, 10173), 'logging.debug', 'logging.debug', (['"""Exiting DownloadVideos"""'], {}), "('Exiting DownloadVideos')\n", (10147, 10173), False, 'import logging, os, configparser\n'), ((10426, 10463), 'os.listdir', 'os.listdir', (["g_paths['downloadfolder']"], {}), "(g_paths['downloadfolder'])\n", (10436, 10463), False, 'import os, re\n'), ((12143, 12154), 'imdb.IMDb', 'imdb.IMDb', ([], {}), '()\n', (12152, 12154), False, 'import imdb\n'), ((13051, 13081), 're.compile', 're.compile', (['"""\\\\d*_?(.*)\\\\.mp4"""'], {}), "('\\\\d*_?(.*)\\\\.mp4')\n", (13061, 13081), False, 'import re\n'), ((13433, 13470), 'os.listdir', 'os.listdir', (["g_paths['downloadfolder']"], {}), "(g_paths['downloadfolder'])\n", (13443, 13470), False, 'import os, re\n'), ((16400, 16430), 're.compile', 're.compile', (['"""\\\\d*_?(.*)\\\\.mp4"""'], {}), "('\\\\d*_?(.*)\\\\.mp4')\n", (16410, 16430), False, 'import re\n'), ((16651, 16688), 'os.listdir', 'os.listdir', (["g_paths['downloadfolder']"], {}), "(g_paths['downloadfolder'])\n", (16661, 16688), False, 'import os, re\n'), ((17878, 17907), 're.compile', 're.compile', (['"""\\\\d*_(.*)\\\\.mp4"""'], {}), "('\\\\d*_(.*)\\\\.mp4')\n", (17888, 17907), False, 'import re\n'), ((17919, 17956), 'os.listdir', 'os.listdir', (["g_paths['downloadfolder']"], {}), "(g_paths['downloadfolder'])\n", (17929, 17956), False, 'import os, re\n'), ((18383, 18424), 'logging.Logger.manager.loggerDict.items', 'logging.Logger.manager.loggerDict.items', ([], {}), '()\n', (18422, 18424), False, 'import logging, os, configparser\n'), ((912, 948), 'configparser.ExtendedInterpolation', 'configparser.ExtendedInterpolation', ([], {}), '()\n', (946, 948), False, 'import logging, os, configparser\n'), ((1177, 1234), 'os.path.join', 'os.path.join', (["g_paths['mediaroot']", "g_settings['logfile']"], {}), "(g_paths['mediaroot'], g_settings['logfile'])\n", (1189, 1234), False, 'import os, re\n'), ((2156, 2190), 'datetime.datetime.strptime', 'datetime.strptime', (['tm', '"""%b %d, %Y"""'], {}), "(tm, '%b %d, %Y')\n", (2173, 2190), False, 'from datetime import datetime\n'), ((2272, 2318), 're.compile', 're.compile', (['"""(.*)([Ss]\\\\d{2})([Ee]\\\\d{2})(.*)"""'], {}), "('(.*)([Ss]\\\\d{2})([Ee]\\\\d{2})(.*)')\n", (2282, 2318), False, 'import re\n'), ((2342, 2368), 're.match', 're.match', (['tv_filter', 'title'], {}), '(tv_filter, title)\n', (2350, 2368), False, 'import re\n'), ((5518, 5552), 'os.walk', 'os.walk', (["g_paths['downloadfolder']"], {}), "(g_paths['downloadfolder'])\n", (5525, 5552), False, 'import os, re\n'), ((7207, 7223), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (7221, 7223), False, 'from datetime import datetime\n'), ((7226, 7243), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (7235, 7243), False, 'from datetime import timedelta\n'), ((12078, 12092), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (12088, 12092), False, 'import os, shutil, re, time\n'), ((13228, 13279), 'os.path.join', 'os.path.join', (["g_paths['playonroot']", 'video.Provider'], {}), "(g_paths['playonroot'], video.Provider)\n", (13240, 13279), False, 'import os, re\n'), ((13490, 13518), 're.match', 're.match', (['playonFileRe', 'file'], {}), '(playonFileRe, file)\n', (13498, 13518), False, 'import re\n'), ((16708, 16736), 're.match', 're.match', (['playonFileRe', 'file'], {}), '(playonFileRe, file)\n', (16716, 16736), False, 'import re\n'), ((17970, 17987), 're.match', 're.match', (['comp', 'f'], {}), '(comp, f)\n', (17978, 17987), False, 'import re\n'), ((18572, 18590), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (18588, 18590), False, 'from selenium import webdriver\n'), ((4524, 4554), 'os.walk', 'os.walk', (["g_paths['playonroot']"], {}), "(g_paths['playonroot'])\n", (4531, 4554), False, 'import os, re\n'), ((6112, 6157), 'logging.info', 'logging.info', (["('Want to download: ' + pv.Title)"], {}), "('Want to download: ' + pv.Title)\n", (6124, 6157), False, 'import logging, os, configparser\n'), ((9750, 9917), 'logging.critical', 'logging.critical', (['"""WaitForDownloads returned with nothing finished in Download Videos! This should never be the case, so failing to prevent explosion of log"""'], {}), "(\n 'WaitForDownloads returned with nothing finished in Download Videos! This should never be the case, so failing to prevent explosion of log'\n )\n", (9766, 9917), False, 'import logging, os, configparser\n'), ((10551, 10577), 'os.path.splitext', 'os.path.splitext', (['fnameLow'], {}), '(fnameLow)\n', (10567, 10577), False, 'import os, re\n'), ((11995, 12047), 'logging.debug', 'logging.debug', (['"""No in progress downloads, returning"""'], {}), "('No in progress downloads, returning')\n", (12008, 12047), False, 'import logging, os, configparser\n'), ((13295, 13311), 'genericpath.exists', 'exists', (['src_path'], {}), '(src_path)\n', (13301, 13311), False, 'from genericpath import exists\n'), ((13325, 13346), 'os.makedirs', 'os.makedirs', (['src_path'], {}), '(src_path)\n', (13336, 13346), False, 'import os, re\n'), ((18620, 18664), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (["g_paths['chromewebdriver']"], {}), "(g_paths['chromewebdriver'])\n", (18636, 18664), False, 'from selenium import webdriver\n'), ((18985, 19036), 'subprocess.run', 'subprocess.run', (["[g_paths['mediascanner'], '--scan']"], {}), "([g_paths['mediascanner'], '--scan'])\n", (18999, 19036), False, 'import subprocess\n'), ((19049, 19107), 'logging.info', 'logging.info', (['"""Finished sucessfully! Just need to cleanup"""'], {}), "('Finished sucessfully! Just need to cleanup')\n", (19061, 19107), False, 'import logging, os, configparser\n'), ((19134, 19178), 'logging.info', 'logging.info', (['"""No videos to download today."""'], {}), "('No videos to download today.')\n", (19146, 19178), False, 'import logging, os, configparser\n'), ((19199, 19260), 'logging.error', 'logging.error', (['"""Some kind of fatal exception caught by main!"""'], {}), "('Some kind of fatal exception caught by main!')\n", (19212, 19260), False, 'import logging, os, configparser\n'), ((4994, 5020), 'os.walk', 'os.walk', (["g_paths['tvroot']"], {}), "(g_paths['tvroot'])\n", (5001, 5020), False, 'import os, re\n'), ((8028, 8044), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (8042, 8044), False, 'from datetime import datetime\n'), ((8047, 8064), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (8056, 8064), False, 'from datetime import timedelta\n'), ((8866, 8879), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (8876, 8879), False, 'import os, shutil, re, time\n'), ((9059, 9132), 'logging.debug', 'logging.debug', (['"""No downloads left to queue, just need to await remaining"""'], {}), "('No downloads left to queue, just need to await remaining')\n", (9072, 9132), False, 'import logging, os, configparser\n'), ((9269, 9382), 'logging.debug', 'logging.debug', (['"""All active downloads finished, and nothing left to queue for download (or out of time)"""'], {}), "(\n 'All active downloads finished, and nothing left to queue for download (or out of time)'\n )\n", (9282, 9382), False, 'import logging, os, configparser\n'), ((11665, 11704), 'logging.info', 'logging.info', (['"""All Downloads complete!"""'], {}), "('All Downloads complete!')\n", (11677, 11704), False, 'import logging, os, configparser\n'), ((11793, 11868), 'logging.debug', 'logging.debug', (['"""Returning downloads that finished (since not awaiting all)"""'], {}), "('Returning downloads that finished (since not awaiting all)')\n", (11806, 11868), False, 'import logging, os, configparser\n'), ((12616, 12671), 'logging.error', 'logging.error', (["('Unknown video type: ' + video.VideoType)"], {}), "('Unknown video type: ' + video.VideoType)\n", (12629, 12671), False, 'import logging, os, configparser\n'), ((13747, 13832), 'logging.info', 'logging.info', (["('Attempting to move download (' + title + ') to appropriate folder')"], {}), "('Attempting to move download (' + title +\n ') to appropriate folder')\n", (13759, 13832), False, 'import logging, os, configparser\n'), ((14629, 14682), 'os.path.join', 'os.path.join', (["g_paths['downloadfolder']", 'folder_title'], {}), "(g_paths['downloadfolder'], folder_title)\n", (14641, 14682), False, 'import os, re\n'), ((14699, 14721), 'os.mkdir', 'os.mkdir', (['movie_folder'], {}), '(movie_folder)\n', (14707, 14721), False, 'import os, re\n'), ((15380, 15445), 'os.path.join', 'os.path.join', (["g_paths['playonroot']", 'video.Provider', 'folder_title'], {}), "(g_paths['playonroot'], video.Provider, folder_title)\n", (15392, 15445), False, 'import os, re\n'), ((16028, 16079), 'logging.info', 'logging.info', (["(movie_folder + ' => ' + true_location)"], {}), "(movie_folder + ' => ' + true_location)\n", (16040, 16079), False, 'import logging, os, configparser\n'), ((16938, 17023), 'logging.info', 'logging.info', (["('Attempting to move download (' + title + ') to appropriate folder')"], {}), "('Attempting to move download (' + title +\n ') to appropriate folder')\n", (16950, 17023), False, 'import logging, os, configparser\n'), ((17054, 17118), 'os.path.join', 'os.path.join', (["g_paths['tvroot']", 'video.Provider', 'video.ShowTitle'], {}), "(g_paths['tvroot'], video.Provider, video.ShowTitle)\n", (17066, 17118), False, 'import os, re\n'), ((17155, 17210), 'os.path.join', 'os.path.join', (['final_show_path', "('Season ' + video.Season)"], {}), "(final_show_path, 'Season ' + video.Season)\n", (17167, 17210), False, 'import os, re\n'), ((17261, 17306), 'os.path.join', 'os.path.join', (["g_paths['downloadfolder']", 'file'], {}), "(g_paths['downloadfolder'], file)\n", (17273, 17306), False, 'import os, re\n'), ((17562, 17619), 'logging.info', 'logging.info', (["(orig_file_path + ' => ' + final_season_path)"], {}), "(orig_file_path + ' => ' + final_season_path)\n", (17574, 17619), False, 'import logging, os, configparser\n'), ((17636, 17682), 'shutil.move', 'shutil.move', (['orig_file_path', 'final_season_path'], {}), '(orig_file_path, final_season_path)\n', (17647, 17682), False, 'import os, shutil, re, time\n'), ((5900, 5957), 'logging.debug', 'logging.debug', (["(pv.Title + ' is already being downloaded.')"], {}), "(pv.Title + ' is already being downloaded.')\n", (5913, 5957), False, 'import logging, os, configparser\n'), ((8369, 8385), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (8383, 8385), False, 'from datetime import datetime\n'), ((14778, 14837), 'logging.warning', 'logging.warning', (["('Unable to find ' + title + ' on IMDB :( ')"], {}), "('Unable to find ' + title + ' on IMDB :( ')\n", (14793, 14837), False, 'import logging, os, configparser\n'), ((15200, 15245), 'os.path.join', 'os.path.join', (["g_paths['downloadfolder']", 'file'], {}), "(g_paths['downloadfolder'], file)\n", (15212, 15245), False, 'import os, re\n'), ((17348, 17381), 'os.path.exists', 'os.path.exists', (['final_season_path'], {}), '(final_season_path)\n', (17362, 17381), False, 'import os, re\n'), ((17403, 17477), 'logging.debug', 'logging.debug', (["('Something missing in path, creating: ' + final_season_path)"], {}), "('Something missing in path, creating: ' + final_season_path)\n", (17416, 17477), False, 'import logging, os, configparser\n'), ((17498, 17528), 'os.makedirs', 'os.makedirs', (['final_season_path'], {}), '(final_season_path)\n', (17509, 17528), False, 'import os, re\n'), ((18471, 18498), 'logging.getLogger', 'logging.getLogger', (['log_name'], {}), '(log_name)\n', (18488, 18498), False, 'import logging, os, configparser\n'), ((14395, 14482), 'logging.error', 'logging.error', (['"""Exception generated from imdb! Defaulting to current year I guess"""'], {}), "(\n 'Exception generated from imdb! Defaulting to current year I guess')\n", (14408, 14482), False, 'import logging, os, configparser\n'), ((14868, 14911), 'os.path.join', 'os.path.join', (['movie_folder', '"""Guesswork.txt"""'], {}), "(movie_folder, 'Guesswork.txt')\n", (14880, 14911), False, 'import os, re\n'), ((15861, 15901), 'shutil.move', 'shutil.move', (['movie_folder', 'true_location'], {}), '(movie_folder, true_location)\n', (15872, 15901), False, 'import os, shutil, re, time\n'), ((4761, 4826), 'logging.debug', 'logging.debug', (["(pv.Title + ' should already be available in plex.')"], {}), "(pv.Title + ' should already be available in plex.')\n", (4774, 4826), False, 'import logging, os, configparser\n'), ((5228, 5293), 'logging.debug', 'logging.debug', (["(pv.Title + ' should already be available in plex.')"], {}), "(pv.Title + ' should already be available in plex.')\n", (5241, 5293), False, 'import logging, os, configparser\n'), ((14332, 14344), 'datetime.date.today', 'date.today', ([], {}), '()\n', (14342, 14344), False, 'from datetime import date\n'), ((14509, 14521), 'datetime.date.today', 'date.today', ([], {}), '()\n', (14519, 14521), False, 'from datetime import date\n')] |
import numpy as np
import torch
class ModuleMixin(object):
"""
Adds convenince functions to a torch module
"""
def number_of_parameters(self, trainable=True):
return number_of_parameters(self, trainable)
def number_of_parameters(model, trainable=True):
"""
Returns number of trainable parameters in a torch module
Example:
>>> import netharn as nh
>>> model = nh.models.ToyNet2d()
>>> number_of_parameters(model)
824
"""
if trainable:
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
else:
model_parameters = model.parameters()
n_params = sum([np.prod(p.size()) for p in model_parameters])
return n_params
class grad_context(object):
"""
Context manager for controlling if autograd is enabled.
"""
def __init__(self, flag):
if tuple(map(int, torch.__version__.split('.')[0:2])) < (0, 4):
self.prev = None
self.flag = flag
else:
self.prev = torch.is_grad_enabled()
self.flag = flag
def __enter__(self):
if self.prev is not None:
torch.set_grad_enabled(self.flag)
def __exit__(self, *args):
if self.prev is not None:
torch.set_grad_enabled(self.prev)
return False
class DisableBatchNorm(object):
def __init__(self, model, enabled=True):
self.model = model
self.enabled = enabled
self.previous_state = None
def __enter__(self):
if self.enabled:
self.previous_state = {}
for name, layer in trainable_layers(self.model, names=True):
if isinstance(layer, torch.nn.modules.batchnorm._BatchNorm):
self.previous_state[name] = layer.training
layer.training = False
return self
def __exit__(self, *args):
if self.previous_state:
for name, layer in trainable_layers(self.model, names=True):
if name in self.previous_state:
layer.training = self.previous_state[name]
def trainable_layers(model, names=False):
"""
Example:
>>> import torchvision
>>> model = torchvision.models.AlexNet()
>>> list(trainable_layers(model, names=True))
"""
if names:
stack = [('', '', model)]
while stack:
prefix, basename, item = stack.pop()
name = '.'.join([p for p in [prefix, basename] if p])
if isinstance(item, torch.nn.modules.conv._ConvNd):
yield name, item
elif isinstance(item, torch.nn.modules.batchnorm._BatchNorm):
yield name, item
elif hasattr(item, 'reset_parameters'):
yield name, item
child_prefix = name
for child_basename, child_item in list(item.named_children())[::-1]:
stack.append((child_prefix, child_basename, child_item))
else:
queue = [model]
while queue:
item = queue.pop(0)
# TODO: need to put all trainable layer types here
# (I think this is just everything with reset_parameters)
if isinstance(item, torch.nn.modules.conv._ConvNd):
yield item
elif isinstance(item, torch.nn.modules.batchnorm._BatchNorm):
yield item
elif hasattr(item, 'reset_parameters'):
yield item
# if isinstance(input, torch.nn.modules.Linear):
# yield item
# if isinstance(input, torch.nn.modules.Bilinear):
# yield item
# if isinstance(input, torch.nn.modules.Embedding):
# yield item
# if isinstance(input, torch.nn.modules.EmbeddingBag):
# yield item
for child in item.children():
queue.append(child)
def one_hot_embedding(labels, num_classes, dtype=None):
"""
Embedding labels to one-hot form.
Args:
labels: (LongTensor) class labels, sized [N,].
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N,#classes].
References:
https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/4
CommandLine:
python -m netharn.loss one_hot_embedding
Example:
>>> # each element in target has to have 0 <= value < C
>>> labels = torch.LongTensor([0, 0, 1, 4, 2, 3])
>>> num_classes = max(labels) + 1
>>> t = one_hot_embedding(labels, num_classes)
>>> assert all(row[y] == 1 for row, y in zip(t.numpy(), labels.numpy()))
>>> import ubelt as ub
>>> print(ub.repr2(t.numpy().tolist()))
[
[1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
]
>>> t2 = one_hot_embedding(labels.numpy(), num_classes)
>>> assert np.all(t2 == t.numpy())
>>> if torch.cuda.is_available():
>>> t3 = one_hot_embedding(labels.to(0), num_classes)
>>> assert np.all(t3.cpu().numpy() == t.numpy())
"""
if isinstance(labels, np.ndarray):
dtype = dtype or np.float
y = np.eye(num_classes, dtype=dtype)
y_onehot = y[labels]
else: # if torch.is_tensor(labels):
dtype = dtype or torch.float
y = torch.eye(num_classes, device=labels.device, dtype=dtype)
y_onehot = y[labels]
return y_onehot
def one_hot_lookup(probs, labels):
"""
Return probbility of a particular label (usually true labels) for each item
Each item in labels corresonds to a row in probs. Returns the index
specified at each row.
Example:
>>> probs = np.array([
>>> [0, 1, 2],
>>> [3, 4, 5],
>>> [6, 7, 8],
>>> [9, 10, 11],
>>> ])
>>> labels = np.array([0, 1, 2, 1])
>>> one_hot_lookup(probs, labels)
array([ 0, 4, 8, 10])
"""
return probs[np.eye(probs.shape[1], dtype=np.bool)[labels]]
| [
"numpy.eye",
"torch.__version__.split",
"torch.eye",
"torch.set_grad_enabled",
"torch.is_grad_enabled"
] | [((5383, 5415), 'numpy.eye', 'np.eye', (['num_classes'], {'dtype': 'dtype'}), '(num_classes, dtype=dtype)\n', (5389, 5415), True, 'import numpy as np\n'), ((5535, 5592), 'torch.eye', 'torch.eye', (['num_classes'], {'device': 'labels.device', 'dtype': 'dtype'}), '(num_classes, device=labels.device, dtype=dtype)\n', (5544, 5592), False, 'import torch\n'), ((1043, 1066), 'torch.is_grad_enabled', 'torch.is_grad_enabled', ([], {}), '()\n', (1064, 1066), False, 'import torch\n'), ((1168, 1201), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['self.flag'], {}), '(self.flag)\n', (1190, 1201), False, 'import torch\n'), ((1280, 1313), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['self.prev'], {}), '(self.prev)\n', (1302, 1313), False, 'import torch\n'), ((6180, 6217), 'numpy.eye', 'np.eye', (['probs.shape[1]'], {'dtype': 'np.bool'}), '(probs.shape[1], dtype=np.bool)\n', (6186, 6217), True, 'import numpy as np\n'), ((901, 929), 'torch.__version__.split', 'torch.__version__.split', (['"""."""'], {}), "('.')\n", (924, 929), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 16 22:08:33 2018
@author: <NAME>
"""
import urllib.request
import json
inputType = 'textquery'
endpoint = 'https://maps.googleapis.com/maps/api/place/textsearch/json?'
search_key = input('What do you want to search for?: ').replace('','+')
api_key=input('Enter your API Key: ').replace('','+')
search_req ='input={}&key={}'.format(search_key,api_key)
request = endpoint+search_req
response = urllib.request.urlopen(request).read()
output = json.loads(response)
print(output) | [
"json.loads"
] | [((489, 509), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (499, 509), False, 'import json\n')] |
from Tokenizer import Tokenizer
from writer import MKDocsWriter
import json
import jsonutils
class Parser:
def run(code, file, path):
Parser.tokens = Tokenizer(code)
Parser.file = file
Parser.writer = MKDocsWriter()
Parser.path = path
ret = Parser.parseDocstring()
return ret
def parseDocstring():
while Parser.tokens.actual.type != 'EOF':
if Parser.tokens.actual.value == 'Description':
Parser.parseDescription()
continue
if Parser.tokens.actual.value == 'Parameters':
Parser.parseParameters()
continue
if Parser.tokens.actual.value == 'Response':
Parser.parseResponse()
continue
Parser.tokens.select_next()
def parseParameters():
parameters = {}
Parser.tokens.select_next()
Parser.file.write(Parser.writer.heading('Parameters:', level=4))
while Parser.tokens.actual.type != 'TITLE':
if Parser.tokens.actual.value == 'Header':
Parser.tokens.select_next()
header = Parser.parseHeader()
parameters['header'] = header
if Parser.tokens.actual.value == 'Body':
Parser.tokens.select_next()
body = Parser.parseBody()
parameters['body'] = body
if Parser.tokens.actual.value == 'Path':
Parser.tokens.select_next()
path = Parser.parsePath()
parameters['path'] = path
else:
return parameters
return parameters
def parsePath():
path = {}
Parser.file.write(Parser.writer.heading('Path parameters:', level=5))
while Parser.tokens.actual.type not in ['EOF', 'TITLE', 'SUB']:
key = Parser.tokens.actual.value
Parser.tokens.select_next()
if Parser.tokens.actual.type != 'SEPARATOR':
raise SyntaxError(f'Missing - between key and value for path, instead got {Parser.tokens.actual.value}')
Parser.tokens.select_next()
value = []
while Parser.tokens.actual.type != 'ENDLINE':
value.append(Parser.tokens.actual.value)
Parser.tokens.select_next()
value.append(Parser.tokens.actual.value)
value = ' '.join(value)
Parser.tokens.select_next()
path[key] = value
Parser.file.write(Parser.writer.table(path, keyname='Name', valuename='Description'))
return path
def parseHeader():
header = {}
Parser.file.write(Parser.writer.heading('Header parameters:', level=5))
while Parser.tokens.actual.type not in ['EOF', 'TITLE', 'SUB']:
key = Parser.tokens.actual.value
Parser.tokens.select_next()
if Parser.tokens.actual.type != 'SEPARATOR':
raise SyntaxError(f'Missing - between key and value for header, instead got {Parser.tokens.actual.value}')
Parser.tokens.select_next()
value = []
while Parser.tokens.actual.type != 'ENDLINE':
value.append(Parser.tokens.actual)
Parser.tokens.select_next()
value.append(Parser.tokens.actual.value)
value = ' '.join(value)
Parser.tokens.select_next()
header[key] = value
Parser.file.write(Parser.writer.table(header))
return header
def parseSchema():
try:
file_path = Parser.tokens.actual.value
if Parser.tokens.actual.value[:2] == './':
file_path = Parser.tokens.actual.value[2:]
with open(Parser.path + file_path) as f:
data = json.load(f)
return data
except FileNotFoundError as err:
raise err
def parseSchemaBody():
try:
file_path = Parser.tokens.actual.value
if Parser.tokens.actual.value[:2] == './':
file_path = Parser.tokens.actual.value[2:]
body = jsonutils.make_payload(Parser.path + file_path)
return body
except FileNotFoundError as err:
raise err
def parseBody():
body = {}
Parser.file.write(Parser.writer.heading('Body parameters:', level=5))
if Parser.tokens.actual.type == 'FILE':
Parser.tokens.select_next()
body = Parser.parseSchemaBody()
Parser.file.write(Parser.writer.json_code(json.dumps(body)))
return body
while Parser.tokens.actual.type not in ['EOF', 'TITLE', 'SUB']:
key = Parser.tokens.actual
Parser.tokens.select_next()
if Parser.tokens.actual.type != 'SEPARATOR':
raise SyntaxError(f'Missing - between key and value for body, instead got {Parser.tokens.actual.value}')
Parser.tokens.select_next()
value = []
while Parser.tokens.actual.type != 'ENDLINE':
value.append(Parser.tokens.actual)
Parser.tokens.select_next()
value.append(Parser.tokens.actual.value)
value = ' '.join(value)
Parser.tokens.select_next()
body[key.value] = value
Parser.file.write(Parser.writer.json_code(json.dumps(body)))
return body
def parseResponse():
response = {}
Parser.tokens.select_next()
Parser.file.write(Parser.writer.heading('Response:', level=4))
while Parser.tokens.actual.type not in ['EOF', 'TITLE']:
actual = Parser.tokens.actual.value
if not actual.isnumeric():
raise SyntaxError(f'Response must have a code, instead got {Parser.tokens.actual.value}')
try:
color = Parser.writer.RESPONSE_COLOR[str(actual)]
except KeyError:
color = 'red'
Parser.file.write(Parser.writer.heading(Parser.writer.text_color(f'Code: {actual}', color=color), level=5))
key_reponse = str(actual)
Parser.tokens.select_next()
body = {}
if Parser.tokens.actual.type == 'FILE':
Parser.tokens.select_next()
body = Parser.parseSchema()
Parser.tokens.select_next()
response[key_reponse] = body
while Parser.tokens.actual.type not in ['EOF', 'TITLE', 'SUB']:
key = Parser.tokens.actual.value
Parser.tokens.select_next()
if Parser.tokens.actual.type != 'SEPARATOR':
raise SyntaxError(f'Missing - between key and value for reponse, instead got {Parser.tokens.actual.value}')
Parser.tokens.select_next()
value = []
while Parser.tokens.actual.type != 'ENDLINE':
value.append(Parser.tokens.actual.value)
Parser.tokens.select_next()
value.append(Parser.tokens.actual.value)
value = ' '.join(value)
Parser.tokens.select_next()
body[key] = value
response[key_reponse] = body
try:
color = Parser.writer.RESPONSE_COLOR[str(actual)]
Parser.file.write(Parser.writer.RESPONSE_NOTATION[str(actual)](Parser.writer.code(json.dumps(body))))
except KeyError:
Parser.file.write(Parser.writer.failure(Parser.writer.code(json.dumps(body))))
return response
def parseDescription():
description = []
Parser.tokens.select_next()
Parser.file.write(Parser.writer.heading('Description:', level=4))
while Parser.tokens.actual.type != 'TITLE':
description.append(Parser.tokens.actual.value)
Parser.tokens.select_next()
description = ' '.join(description)
Parser.file.write(Parser.writer.text(description))
return description | [
"jsonutils.make_payload",
"writer.MKDocsWriter",
"json.dumps",
"Tokenizer.Tokenizer",
"json.load"
] | [((163, 178), 'Tokenizer.Tokenizer', 'Tokenizer', (['code'], {}), '(code)\n', (172, 178), False, 'from Tokenizer import Tokenizer\n'), ((230, 244), 'writer.MKDocsWriter', 'MKDocsWriter', ([], {}), '()\n', (242, 244), False, 'from writer import MKDocsWriter\n'), ((4183, 4230), 'jsonutils.make_payload', 'jsonutils.make_payload', (['(Parser.path + file_path)'], {}), '(Parser.path + file_path)\n', (4205, 4230), False, 'import jsonutils\n'), ((3854, 3866), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3863, 3866), False, 'import json\n'), ((5425, 5441), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (5435, 5441), False, 'import json\n'), ((4622, 4638), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (4632, 4638), False, 'import json\n'), ((7469, 7485), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (7479, 7485), False, 'import json\n'), ((7593, 7609), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (7603, 7609), False, 'import json\n')] |
#!/usr/bin/env python3
'''
test all the class and function
'''
import unittest
from StudentList import StudentList, StudentList15
from Textprocessor import Textprocessor
from NetworkSolution import NetworkSolution
class TestStudentList(unittest.TestCase):
'''
Test all
'''
def test_student_list(self):
'''
test
'''
stl = StudentList()
stl.add_student(11510493, '<NAME>')
stl.add_student(11610001, 'Alice')
stl.add_student(11510001, 'Nancy')
stl.add_student(11510001, 'Nancy')
stl.add_student(11510002, 'Nancy1')
stl.print_list()
def test_student_list15(self):
'''
test
'''
stl = StudentList15()
stl.add_student(11510493, '<NAME>')
stl.add_student(11610001, 'Alice')
stl.add_student(11510001, 'Nancy')
stl.add_student(11510001, 'Nancy')
stl.add_student(11510002, 'Nancy1')
stl.print_list()
def test_file_processor(self):
'''
test
'''
textprocessor = Textprocessor("test.txt")
textprocessor.append_text("abcdefghijk")
textprocessor.read_print_all()
textprocessor.delete_text(7, 4)
textprocessor.read_print_all()
textprocessor.edit_text(0, "sustech")
textprocessor.read_print_all()
def test_solution3(self):
'''
question3
filesize: 160MB
'''
solutioninst = NetworkSolution(160e6 * 8)
print(solutioninst.get_answer1(),
solutioninst.get_answer2(), solutioninst.get_answer3())
if __name__ == "__main__":
unittest.main()
| [
"Textprocessor.Textprocessor",
"StudentList.StudentList15",
"NetworkSolution.NetworkSolution",
"StudentList.StudentList",
"unittest.main"
] | [((1640, 1655), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1653, 1655), False, 'import unittest\n'), ((372, 385), 'StudentList.StudentList', 'StudentList', ([], {}), '()\n', (383, 385), False, 'from StudentList import StudentList, StudentList15\n'), ((715, 730), 'StudentList.StudentList15', 'StudentList15', ([], {}), '()\n', (728, 730), False, 'from StudentList import StudentList, StudentList15\n'), ((1070, 1095), 'Textprocessor.Textprocessor', 'Textprocessor', (['"""test.txt"""'], {}), "('test.txt')\n", (1083, 1095), False, 'from Textprocessor import Textprocessor\n'), ((1468, 1500), 'NetworkSolution.NetworkSolution', 'NetworkSolution', (['(160000000.0 * 8)'], {}), '(160000000.0 * 8)\n', (1483, 1500), False, 'from NetworkSolution import NetworkSolution\n')] |
# ******************************************************
## Revision "$LastChangedDate: 2018-07-08 18:08:17 +0200 (zo, 08 jul 2018) $"
## Date "$LastChangedRevision: 1 $"
## Author "$LastChangedBy: arthurbeusen $"
## URL "$HeadURL: https://pbl.sliksvn.com/dgnm/core/make_y0.py $"
## Copyright 2019, PBL Netherlands Environmental Assessment Agency and Utrecht University.
## Reuse permitted under Gnu Public License, GPL v3.
# ******************************************************
# Import local modules.
import general_func
def make_y0(params,species):
y0 = general_func.get_amount(species)
# Append spool and rpool
#for i in params.phytoindex:
#y0.append(species[i].get_spool())
#y0.append(species[i].get_rpool())
#for i in params.pocindex:
#y0.append(species[i].get_dissolved())
return y0
| [
"general_func.get_amount"
] | [((566, 598), 'general_func.get_amount', 'general_func.get_amount', (['species'], {}), '(species)\n', (589, 598), False, 'import general_func\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 17:58:56 2020
@author: pmchozas
"""
import nltk
from nltk.stem.snowball import SnowballStemmer
f=open('legal_verbs.txt', 'r', encoding='utf-8')
file=open('estatuto_es.txt', 'r', encoding='utf-8')
read=file.readlines()
new=open('estatuto_es_verb.txt', 'w')
span=open('estatuto_es_span.txt', 'r', encoding='utf-8')
newc=open('listTerms_conts.csv', 'w')
#ptstemmer = PorterStemmer()
snstemmer = SnowballStemmer(language='spanish')
verblist=[]
for i in f:
k=i.strip('\n')
verblist.append(k)
'''
for verb in verblist:
stem=ptstemmer.stem(verb)
print(verb + ' --> ' + stem)
'''
stemlist=[]
for verb in verblist:
stem=snstemmer.stem(verb)
stemlist.append(stem)
#text="2. Las disposiciones legales y reglamentarias se aplicarán con sujeción estricta al principio de jerarquía normativa. Las disposiciones reglamentarias desarrollarán los preceptos que establecen las normas de rango superior, pero no podrán establecer condiciones de trabajo distintas a las establecidas por las leyes a desarrollar."
#textlow=text.lower()
estatuto=span.read()
#print(textlow)
for j in stemlist:
if j in estatuto:
pos=estatuto.index(j)
lenj=len(j)
endpos=pos+lenj
#print(endpos)
for k in estatuto:
#print(k)
if estatuto[endpos]!=' ':
endpos=endpos+1
continue
else:
#print(endpos)
#print(j+'-->'+str(pos)+', '+str(endpos))
term=estatuto[pos:endpos]
#print(term)
tagterm='<span class="verb">'+estatuto[pos:endpos]+'</span>'
estatuto=estatuto.replace(term, tagterm)
new.write(estatuto)
| [
"nltk.stem.snowball.SnowballStemmer"
] | [((475, 510), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', ([], {'language': '"""spanish"""'}), "(language='spanish')\n", (490, 510), False, 'from nltk.stem.snowball import SnowballStemmer\n')] |
from parlai.agents.programr.parser.template.nodes.base import TemplateNode
# from parlai.agents.programr.utils.logging.ylogger import YLogger
import parlai.utils.logging as logging
from parlai.agents.programr.utils.text.text import TextUtils
class TemplateListNode(TemplateNode):
def __init__(self):
super().__init__()
self._items = []
def resolve_list_items(self, client_context):
str = ""
for item in self._items:
str += "<item>%s</item>"%item.resolve(client_context)
return str
def resolve_to_string(self, client_context):
str = "<list>"
str += self.resolve_list_items(client_context)
str += "</list>"
return str
def resolve(self, brain):
try:
return self.resolve_to_string(brain)
except Exception as excep:
# YLogger.exception(brain, "Failed to resolve", excep)
logging.error(f"Failed to resolve {excep}")
return ""
def to_string(self):
return "[LIST] %d" % (len(self._items))
def to_xml(self, client_context):
return self.resolve_to_string(client_context)
#######################################################################################################
#
def parse_expression(self, graph, expression):
head_text = self.get_text_from_element(expression)
self.parse_text(graph, head_text)
for child in expression:
tag_name = TextUtils.tag_from_text(child.tag)
if tag_name == 'item':
item = self.parse_children_as_word_node(graph, child)
self._items.append(item)
else:
graph.parse_tag_expression(child, self)
tail_text = self.get_tail_from_element(child)
self.parse_text(graph, tail_text)
| [
"parlai.agents.programr.utils.text.text.TextUtils.tag_from_text",
"parlai.utils.logging.error"
] | [((1482, 1516), 'parlai.agents.programr.utils.text.text.TextUtils.tag_from_text', 'TextUtils.tag_from_text', (['child.tag'], {}), '(child.tag)\n', (1505, 1516), False, 'from parlai.agents.programr.utils.text.text import TextUtils\n'), ((924, 967), 'parlai.utils.logging.error', 'logging.error', (['f"""Failed to resolve {excep}"""'], {}), "(f'Failed to resolve {excep}')\n", (937, 967), True, 'import parlai.utils.logging as logging\n')] |
# After tensorflow 2, keras is being used in Backend
# comment shows an alternative way to run the command
#Documentation:- https://keras.io/api/datasets/cifar10/
# Version for your reference, Downgrade/ Ugrade/ Reinstall Accordingly
import keras
print(keras.__version__) #2.2.4 in my case
from keras.datasets import cifar10
(x_train,y_train), (x_test, y_test) = cifar10.load_data()
print(len(x_train))
print(x_train[0])
# Screenshot of Output, For Your Reference :-
# Using Tf2 :- Recommended
#pip install tensorflow
# from tensorflow.keras.datasets import cifar10
# (x_train,y_train), (x_test, y_test) = cifar10.load_data()
# print(len(x_train))
# print(x_train[0])
| [
"keras.datasets.cifar10.load_data"
] | [((376, 395), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (393, 395), False, 'from keras.datasets import cifar10\n')] |
"""AddreceiptOnRegistrationTable
Revision ID: 1468fd5ca2be
Revises: 3dd<PASSWORD>4<PASSWORD>
Create Date: 2022-02-24 22:33:14.628454
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1468fd5ca2be'
down_revision = '3dd70974<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user_registrations', sa.Column('receipt_generated', sa.VARCHAR(length=250), nullable=True))
op.create_index(op.f('ix_user_registrations_receipt_generated'), 'user_registrations', ['receipt_generated'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_registrations_receipt_generated'), table_name='user_registrations')
op.drop_column('user_registrations', 'receipt_generated')
# ### end Alembic commands ###
| [
"alembic.op.f",
"alembic.op.drop_column",
"sqlalchemy.VARCHAR"
] | [((877, 934), 'alembic.op.drop_column', 'op.drop_column', (['"""user_registrations"""', '"""receipt_generated"""'], {}), "('user_registrations', 'receipt_generated')\n", (891, 934), False, 'from alembic import op\n'), ((545, 592), 'alembic.op.f', 'op.f', (['"""ix_user_registrations_receipt_generated"""'], {}), "('ix_user_registrations_receipt_generated')\n", (549, 592), False, 'from alembic import op\n'), ((791, 838), 'alembic.op.f', 'op.f', (['"""ix_user_registrations_receipt_generated"""'], {}), "('ix_user_registrations_receipt_generated')\n", (795, 838), False, 'from alembic import op\n'), ((485, 507), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(250)'}), '(length=250)\n', (495, 507), True, 'import sqlalchemy as sa\n')] |
# -*- coding: utf-8 -*-
"""Cp2kMultistageDdecWorkChain workchain"""
from aiida.plugins import CalculationFactory, DataFactory, WorkflowFactory
from aiida.common import AttributeDict
from aiida.engine import WorkChain, ToContext
from aiida_lsmo.utils import aiida_dict_merge
# import sub-workchains
Cp2kMultistageWorkChain = WorkflowFactory('lsmo.cp2k_multistage') # pylint: disable=invalid-name
Cp2kDdecWorkChain = WorkflowFactory('ddec.cp2k_ddec') # pylint: disable=invalid-name
# import calculations
DdecCalculation = CalculationFactory('ddec') # pylint: disable=invalid-name
# import aiida data
Dict = DataFactory('dict') # pylint: disable=invalid-name
CifData = DataFactory('cif') # pylint: disable=invalid-name
class Cp2kMultistageDdecWorkChain(WorkChain):
"""A workchain that combines: Cp2kMultistageWorkChain + Cp2kDdecWorkChain"""
@classmethod
def define(cls, spec):
"""Define workflow specification."""
super().define(spec)
spec.expose_inputs(Cp2kMultistageWorkChain)
spec.expose_inputs(Cp2kDdecWorkChain, exclude=['cp2k_base'])
# specify the chain of calculations to be performed
spec.outline(cls.run_cp2kmultistage, cls.run_cp2kddec, cls.return_results)
spec.expose_outputs(Cp2kMultistageWorkChain, exclude=['output_structure'])
spec.expose_outputs(Cp2kDdecWorkChain, include=['structure_ddec'])
def run_cp2kmultistage(self):
"""Run CP2K-Multistage"""
cp2k_ms_inputs = AttributeDict(self.exposed_inputs(Cp2kMultistageWorkChain))
cp2k_ms_inputs['metadata']['call_link_label'] = 'call_cp2kmultistage'
running = self.submit(Cp2kMultistageWorkChain, **cp2k_ms_inputs)
self.report('Running Cp2MultistageWorkChain to move the structure')
return ToContext(ms_wc=running)
def run_cp2kddec(self):
"""Pass the Cp2kMultistageWorkChain outputs as inputs for
Cp2kDdecWorkChain: cp2k_base (metadata), cp2k_params, structure and WFN.
"""
cp2k_ddec_inputs = AttributeDict(self.exposed_inputs(Cp2kDdecWorkChain))
cp2k_ddec_inputs['cp2k_base'] = self.exposed_inputs(Cp2kMultistageWorkChain)['cp2k_base']
cp2k_params_modify = Dict(
dict={
'FORCE_EVAL': {
'DFT': {
'WFN_RESTART_FILE_NAME': './parent_calc/aiida-RESTART.wfn',
'SCF': {
'SCF_GUESS': 'RESTART'
}
}
}
})
cp2k_params = aiida_dict_merge(self.ctx.ms_wc.outputs.last_input_parameters, cp2k_params_modify)
cp2k_ddec_inputs['cp2k_base']['cp2k']['parameters'] = cp2k_params
if 'output_structure' in self.ctx.ms_wc.outputs:
cp2k_ddec_inputs['cp2k_base']['cp2k']['structure'] = self.ctx.ms_wc.outputs.output_structure
else: # no output structure from a CP2K ENERGY calculation, use the input one.
inp_structure = self.exposed_inputs(Cp2kMultistageWorkChain)['structure']
cp2k_ddec_inputs['cp2k_base']['cp2k']['structure'] = inp_structure
cp2k_ddec_inputs['cp2k_base']['cp2k']['parent_calc_folder'] = self.ctx.ms_wc.outputs.remote_folder
cp2k_ddec_inputs['metadata']['call_link_label'] = 'call_cp2kddec'
running = self.submit(Cp2kDdecWorkChain, **cp2k_ddec_inputs)
return ToContext(cp2k_ddec_wc=running)
def return_results(self):
"""Return exposed outputs and print the pk of the CifData w/DDEC"""
self.out_many(self.exposed_outputs(self.ctx.ms_wc, Cp2kMultistageWorkChain))
self.out_many(self.exposed_outputs(self.ctx.cp2k_ddec_wc, Cp2kDdecWorkChain))
| [
"aiida_lsmo.utils.aiida_dict_merge",
"aiida.engine.ToContext",
"aiida.plugins.CalculationFactory",
"aiida.plugins.DataFactory",
"aiida.plugins.WorkflowFactory"
] | [((326, 365), 'aiida.plugins.WorkflowFactory', 'WorkflowFactory', (['"""lsmo.cp2k_multistage"""'], {}), "('lsmo.cp2k_multistage')\n", (341, 365), False, 'from aiida.plugins import CalculationFactory, DataFactory, WorkflowFactory\n'), ((418, 451), 'aiida.plugins.WorkflowFactory', 'WorkflowFactory', (['"""ddec.cp2k_ddec"""'], {}), "('ddec.cp2k_ddec')\n", (433, 451), False, 'from aiida.plugins import CalculationFactory, DataFactory, WorkflowFactory\n'), ((525, 551), 'aiida.plugins.CalculationFactory', 'CalculationFactory', (['"""ddec"""'], {}), "('ddec')\n", (543, 551), False, 'from aiida.plugins import CalculationFactory, DataFactory, WorkflowFactory\n'), ((612, 631), 'aiida.plugins.DataFactory', 'DataFactory', (['"""dict"""'], {}), "('dict')\n", (623, 631), False, 'from aiida.plugins import CalculationFactory, DataFactory, WorkflowFactory\n'), ((674, 692), 'aiida.plugins.DataFactory', 'DataFactory', (['"""cif"""'], {}), "('cif')\n", (685, 692), False, 'from aiida.plugins import CalculationFactory, DataFactory, WorkflowFactory\n'), ((1794, 1818), 'aiida.engine.ToContext', 'ToContext', ([], {'ms_wc': 'running'}), '(ms_wc=running)\n', (1803, 1818), False, 'from aiida.engine import WorkChain, ToContext\n'), ((2572, 2658), 'aiida_lsmo.utils.aiida_dict_merge', 'aiida_dict_merge', (['self.ctx.ms_wc.outputs.last_input_parameters', 'cp2k_params_modify'], {}), '(self.ctx.ms_wc.outputs.last_input_parameters,\n cp2k_params_modify)\n', (2588, 2658), False, 'from aiida_lsmo.utils import aiida_dict_merge\n'), ((3411, 3442), 'aiida.engine.ToContext', 'ToContext', ([], {'cp2k_ddec_wc': 'running'}), '(cp2k_ddec_wc=running)\n', (3420, 3442), False, 'from aiida.engine import WorkChain, ToContext\n')] |
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from geoevents.core.models import Setting
import json
def app_settings(request):
"""Global values to pass to templates"""
settings_dict = dict()
settings = list()
for obj in Setting.objects.all():
settings_dict[obj.name.upper()] = obj.value
settings += [
{
'name': obj.name,
'value': obj.value
}
]
settings_dict['settings'] = json.dumps(settings)
return dict(settings_dict) | [
"geoevents.core.models.Setting.objects.all",
"json.dumps"
] | [((399, 420), 'geoevents.core.models.Setting.objects.all', 'Setting.objects.all', ([], {}), '()\n', (418, 420), False, 'from geoevents.core.models import Setting\n'), ((635, 655), 'json.dumps', 'json.dumps', (['settings'], {}), '(settings)\n', (645, 655), False, 'import json\n')] |
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for pyvo.dal.datalink
"""
from functools import partial
from urllib.parse import parse_qsl
from pyvo.dal.adhoc import DatalinkResults
from pyvo.dal.params import find_param_by_keyword, get_converter
from pyvo.dal.exceptions import DALServiceError
import pytest
import numpy as np
import astropy.units as u
from astropy.utils.data import get_pkg_data_contents, get_pkg_data_fileobj
get_pkg_data_contents = partial(
get_pkg_data_contents, package=__package__, encoding='binary')
get_pkg_data_fileobj = partial(
get_pkg_data_fileobj, package=__package__, encoding='binary')
@pytest.fixture()
def proc(mocker):
def callback(request, context):
return get_pkg_data_contents('data/datalink/proc.xml')
with mocker.register_uri(
'GET', 'http://example.com/proc', content=callback
) as matcher:
yield matcher
@pytest.fixture()
def proc_ds(mocker):
def callback(request, context):
return b''
with mocker.register_uri(
'GET', 'http://example.com/proc', content=callback
) as matcher:
yield matcher
@pytest.fixture()
def proc_units(mocker):
def callback(request, context):
return get_pkg_data_contents('data/datalink/proc_units.xml')
with mocker.register_uri(
'GET', 'http://example.com/proc_units', content=callback
) as matcher:
yield matcher
@pytest.fixture()
def proc_units_ds(mocker):
def callback(request, context):
data = dict(parse_qsl(request.query))
if 'band' in data:
assert data['band'] == (
'6.000000000000001e-07 8.000000000000001e-06')
return b''
with mocker.register_uri(
'GET', 'http://example.com/proc_units_ds', content=callback
) as matcher:
yield matcher
@pytest.fixture()
def proc_inf(mocker):
def callback(request, context):
return get_pkg_data_contents('data/datalink/proc_inf.xml')
with mocker.register_uri(
'GET', 'http://example.com/proc_inf', content=callback
) as matcher:
yield matcher
@pytest.fixture()
def proc_inf_ds(mocker):
def callback(request, context):
data = dict(parse_qsl(request.query))
if 'band' in data:
assert data['band'] == (
'6.000000000000001e-07 +Inf')
return b''
with mocker.register_uri(
'GET', 'http://example.com/proc_inf_ds', content=callback
) as matcher:
yield matcher
@pytest.mark.usefixtures('proc')
@pytest.mark.filterwarnings("ignore::astropy.io.votable.exceptions.W06")
@pytest.mark.filterwarnings("ignore::astropy.io.votable.exceptions.W48")
@pytest.mark.filterwarnings("ignore::astropy.io.votable.exceptions.E02")
def test_find_param_by_keyword():
datalink = DatalinkResults.from_result_url('http://example.com/proc')
proc = datalink[0]
input_params = {param.name: param for param in proc.input_params}
polygon_lower = find_param_by_keyword('polygon', input_params)
polygon_upper = find_param_by_keyword('POLYGON', input_params)
circle_lower = find_param_by_keyword('circle', input_params)
circle_upper = find_param_by_keyword('CIRCLE', input_params)
assert polygon_lower == polygon_upper
assert circle_lower == circle_upper
@pytest.mark.usefixtures('proc')
@pytest.mark.filterwarnings("ignore::astropy.io.votable.exceptions.W06")
@pytest.mark.filterwarnings("ignore::astropy.io.votable.exceptions.W48")
@pytest.mark.filterwarnings("ignore::astropy.io.votable.exceptions.E02")
def test_serialize():
datalink = DatalinkResults.from_result_url('http://example.com/proc')
proc = datalink[0]
input_params = {param.name: param for param in proc.input_params}
polygon_conv = get_converter(
find_param_by_keyword('polygon', input_params))
circle_conv = get_converter(
find_param_by_keyword('circle', input_params))
scale_conv = get_converter(
find_param_by_keyword('scale', input_params))
kind_conv = get_converter(
find_param_by_keyword('kind', input_params))
assert polygon_conv.serialize((1, 2, 3)) == "1 2 3"
assert polygon_conv.serialize(np.array((1, 2, 3))) == "1 2 3"
assert circle_conv.serialize((1.1, 2.2, 3.3)) == "1.1 2.2 3.3"
assert circle_conv.serialize(np.array((1.1, 2.2, 3.3))) == "1.1 2.2 3.3"
assert scale_conv.serialize(1) == "1"
assert kind_conv.serialize("DATA") == "DATA"
@pytest.mark.usefixtures('proc')
@pytest.mark.usefixtures('proc_ds')
def test_serialize_exceptions():
datalink = DatalinkResults.from_result_url('http://example.com/proc')
proc = datalink[0]
input_params = {param.name: param for param in proc.input_params}
polygon_conv = get_converter(
find_param_by_keyword('polygon', input_params))
circle_conv = get_converter(
find_param_by_keyword('circle', input_params))
band_conv = get_converter(
find_param_by_keyword('band', input_params))
with pytest.raises(DALServiceError):
polygon_conv.serialize((1, 2, 3, 4))
with pytest.raises(DALServiceError):
circle_conv.serialize((1, 2, 3, 4))
with pytest.raises(DALServiceError):
band_conv.serialize((1, 2, 3))
@pytest.mark.usefixtures('proc_units')
@pytest.mark.usefixtures('proc_units_ds')
def test_units():
datalink = DatalinkResults.from_result_url('http://example.com/proc_units')
proc = datalink[0]
proc.process(band=(6000*u.Angstrom, 80000*u.Angstrom))
@pytest.mark.usefixtures('proc_inf')
@pytest.mark.usefixtures('proc_inf_ds')
def test_inf():
datalink = DatalinkResults.from_result_url('http://example.com/proc_inf')
proc = datalink[0]
proc.process(band=(6000, +np.inf) * u.Angstrom)
| [
"pytest.mark.filterwarnings",
"pyvo.dal.params.find_param_by_keyword",
"pyvo.dal.adhoc.DatalinkResults.from_result_url",
"astropy.utils.data.get_pkg_data_contents",
"numpy.array",
"functools.partial",
"pytest.mark.usefixtures",
"pytest.raises",
"pytest.fixture",
"urllib.parse.parse_qsl"
] | [((505, 575), 'functools.partial', 'partial', (['get_pkg_data_contents'], {'package': '__package__', 'encoding': '"""binary"""'}), "(get_pkg_data_contents, package=__package__, encoding='binary')\n", (512, 575), False, 'from functools import partial\n'), ((605, 674), 'functools.partial', 'partial', (['get_pkg_data_fileobj'], {'package': '__package__', 'encoding': '"""binary"""'}), "(get_pkg_data_fileobj, package=__package__, encoding='binary')\n", (612, 674), False, 'from functools import partial\n'), ((683, 699), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (697, 699), False, 'import pytest\n'), ((950, 966), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (964, 966), False, 'import pytest\n'), ((1176, 1192), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1190, 1192), False, 'import pytest\n'), ((1461, 1477), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1475, 1477), False, 'import pytest\n'), ((1876, 1892), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1890, 1892), False, 'import pytest\n'), ((2155, 2171), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2169, 2171), False, 'import pytest\n'), ((2549, 2580), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc"""'], {}), "('proc')\n", (2572, 2580), False, 'import pytest\n'), ((2582, 2653), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::astropy.io.votable.exceptions.W06"""'], {}), "('ignore::astropy.io.votable.exceptions.W06')\n", (2608, 2653), False, 'import pytest\n'), ((2655, 2726), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::astropy.io.votable.exceptions.W48"""'], {}), "('ignore::astropy.io.votable.exceptions.W48')\n", (2681, 2726), False, 'import pytest\n'), ((2728, 2799), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::astropy.io.votable.exceptions.E02"""'], {}), "('ignore::astropy.io.votable.exceptions.E02')\n", (2754, 2799), False, 'import pytest\n'), ((3353, 3384), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc"""'], {}), "('proc')\n", (3376, 3384), False, 'import pytest\n'), ((3386, 3457), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::astropy.io.votable.exceptions.W06"""'], {}), "('ignore::astropy.io.votable.exceptions.W06')\n", (3412, 3457), False, 'import pytest\n'), ((3459, 3530), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::astropy.io.votable.exceptions.W48"""'], {}), "('ignore::astropy.io.votable.exceptions.W48')\n", (3485, 3530), False, 'import pytest\n'), ((3532, 3603), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::astropy.io.votable.exceptions.E02"""'], {}), "('ignore::astropy.io.votable.exceptions.E02')\n", (3558, 3603), False, 'import pytest\n'), ((4505, 4536), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc"""'], {}), "('proc')\n", (4528, 4536), False, 'import pytest\n'), ((4538, 4572), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc_ds"""'], {}), "('proc_ds')\n", (4561, 4572), False, 'import pytest\n'), ((5293, 5330), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc_units"""'], {}), "('proc_units')\n", (5316, 5330), False, 'import pytest\n'), ((5332, 5372), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc_units_ds"""'], {}), "('proc_units_ds')\n", (5355, 5372), False, 'import pytest\n'), ((5557, 5592), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc_inf"""'], {}), "('proc_inf')\n", (5580, 5592), False, 'import pytest\n'), ((5594, 5632), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""proc_inf_ds"""'], {}), "('proc_inf_ds')\n", (5617, 5632), False, 'import pytest\n'), ((2849, 2907), 'pyvo.dal.adhoc.DatalinkResults.from_result_url', 'DatalinkResults.from_result_url', (['"""http://example.com/proc"""'], {}), "('http://example.com/proc')\n", (2880, 2907), False, 'from pyvo.dal.adhoc import DatalinkResults\n'), ((3022, 3068), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""polygon"""', 'input_params'], {}), "('polygon', input_params)\n", (3043, 3068), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((3089, 3135), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""POLYGON"""', 'input_params'], {}), "('POLYGON', input_params)\n", (3110, 3135), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((3156, 3201), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""circle"""', 'input_params'], {}), "('circle', input_params)\n", (3177, 3201), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((3221, 3266), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""CIRCLE"""', 'input_params'], {}), "('CIRCLE', input_params)\n", (3242, 3266), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((3641, 3699), 'pyvo.dal.adhoc.DatalinkResults.from_result_url', 'DatalinkResults.from_result_url', (['"""http://example.com/proc"""'], {}), "('http://example.com/proc')\n", (3672, 3699), False, 'from pyvo.dal.adhoc import DatalinkResults\n'), ((4621, 4679), 'pyvo.dal.adhoc.DatalinkResults.from_result_url', 'DatalinkResults.from_result_url', (['"""http://example.com/proc"""'], {}), "('http://example.com/proc')\n", (4652, 4679), False, 'from pyvo.dal.adhoc import DatalinkResults\n'), ((5406, 5470), 'pyvo.dal.adhoc.DatalinkResults.from_result_url', 'DatalinkResults.from_result_url', (['"""http://example.com/proc_units"""'], {}), "('http://example.com/proc_units')\n", (5437, 5470), False, 'from pyvo.dal.adhoc import DatalinkResults\n'), ((5664, 5726), 'pyvo.dal.adhoc.DatalinkResults.from_result_url', 'DatalinkResults.from_result_url', (['"""http://example.com/proc_inf"""'], {}), "('http://example.com/proc_inf')\n", (5695, 5726), False, 'from pyvo.dal.adhoc import DatalinkResults\n'), ((769, 816), 'astropy.utils.data.get_pkg_data_contents', 'get_pkg_data_contents', (['"""data/datalink/proc.xml"""'], {}), "('data/datalink/proc.xml')\n", (790, 816), False, 'from astropy.utils.data import get_pkg_data_contents, get_pkg_data_fileobj\n'), ((1268, 1321), 'astropy.utils.data.get_pkg_data_contents', 'get_pkg_data_contents', (['"""data/datalink/proc_units.xml"""'], {}), "('data/datalink/proc_units.xml')\n", (1289, 1321), False, 'from astropy.utils.data import get_pkg_data_contents, get_pkg_data_fileobj\n'), ((1966, 2017), 'astropy.utils.data.get_pkg_data_contents', 'get_pkg_data_contents', (['"""data/datalink/proc_inf.xml"""'], {}), "('data/datalink/proc_inf.xml')\n", (1987, 2017), False, 'from astropy.utils.data import get_pkg_data_contents, get_pkg_data_fileobj\n'), ((3836, 3882), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""polygon"""', 'input_params'], {}), "('polygon', input_params)\n", (3857, 3882), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((3925, 3970), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""circle"""', 'input_params'], {}), "('circle', input_params)\n", (3946, 3970), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((4012, 4056), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""scale"""', 'input_params'], {}), "('scale', input_params)\n", (4033, 4056), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((4097, 4140), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""kind"""', 'input_params'], {}), "('kind', input_params)\n", (4118, 4140), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((4816, 4862), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""polygon"""', 'input_params'], {}), "('polygon', input_params)\n", (4837, 4862), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((4905, 4950), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""circle"""', 'input_params'], {}), "('circle', input_params)\n", (4926, 4950), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((4991, 5034), 'pyvo.dal.params.find_param_by_keyword', 'find_param_by_keyword', (['"""band"""', 'input_params'], {}), "('band', input_params)\n", (5012, 5034), False, 'from pyvo.dal.params import find_param_by_keyword, get_converter\n'), ((5046, 5076), 'pytest.raises', 'pytest.raises', (['DALServiceError'], {}), '(DALServiceError)\n', (5059, 5076), False, 'import pytest\n'), ((5133, 5163), 'pytest.raises', 'pytest.raises', (['DALServiceError'], {}), '(DALServiceError)\n', (5146, 5163), False, 'import pytest\n'), ((5219, 5249), 'pytest.raises', 'pytest.raises', (['DALServiceError'], {}), '(DALServiceError)\n', (5232, 5249), False, 'import pytest\n'), ((1561, 1585), 'urllib.parse.parse_qsl', 'parse_qsl', (['request.query'], {}), '(request.query)\n', (1570, 1585), False, 'from urllib.parse import parse_qsl\n'), ((2253, 2277), 'urllib.parse.parse_qsl', 'parse_qsl', (['request.query'], {}), '(request.query)\n', (2262, 2277), False, 'from urllib.parse import parse_qsl\n'), ((4233, 4252), 'numpy.array', 'np.array', (['(1, 2, 3)'], {}), '((1, 2, 3))\n', (4241, 4252), True, 'import numpy as np\n'), ((4366, 4391), 'numpy.array', 'np.array', (['(1.1, 2.2, 3.3)'], {}), '((1.1, 2.2, 3.3))\n', (4374, 4391), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import unicode_literals
import six
import yaml
from compose.config import types
def serialize_config_type(dumper, data):
representer = dumper.represent_str if six.PY3 else dumper.represent_unicode
return representer(data.repr())
yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
def serialize_config(config):
output = {
'version': config.version,
'services': {service.pop('name'): service for service in config.services},
'networks': config.networks,
'volumes': config.volumes,
}
return yaml.safe_dump(
output,
default_flow_style=False,
indent=2,
width=80)
| [
"yaml.safe_dump",
"yaml.SafeDumper.add_representer"
] | [((298, 374), 'yaml.SafeDumper.add_representer', 'yaml.SafeDumper.add_representer', (['types.VolumeFromSpec', 'serialize_config_type'], {}), '(types.VolumeFromSpec, serialize_config_type)\n', (329, 374), False, 'import yaml\n'), ((375, 447), 'yaml.SafeDumper.add_representer', 'yaml.SafeDumper.add_representer', (['types.VolumeSpec', 'serialize_config_type'], {}), '(types.VolumeSpec, serialize_config_type)\n', (406, 447), False, 'import yaml\n'), ((702, 770), 'yaml.safe_dump', 'yaml.safe_dump', (['output'], {'default_flow_style': '(False)', 'indent': '(2)', 'width': '(80)'}), '(output, default_flow_style=False, indent=2, width=80)\n', (716, 770), False, 'import yaml\n')] |
import os
import sys
import tempfile
import json
if '--key' not in sys.argv or len(sys.argv) < 3:
print('No key specified')
sys.exit(-1)
key_name = sys.argv[2] if sys.argv[1] == '--key' else sys.argv[4]
val = None
if '--val' in sys.argv:
val = sys.argv[2] if sys.argv[1] == '--val' else sys.argv[4]
wanna_store = (len(sys.argv) == 5)
storage_path = os.path.join(tempfile.gettempdir(), 'storage.data')
kv_dict = {}
if os.path.exists(storage_path):
with open(storage_path, 'r') as f:
kv_dict = json.loads(f.read())
if wanna_store:
if key_name not in kv_dict:
kv_dict[key_name] = []
kv_dict[key_name].append(val)
with open(storage_path, 'w') as f:
f.write(json.dumps(kv_dict))
else:
if kv_dict.get(key_name) is None:
print('None')
else:
print(', '.join(kv_dict[key_name]))
#Teacher's solution:
import argparse
import json
import os
import tempfile
storage_path = os.path.join(tempfile.gettempdir(), 'storage.data')
def clear():
os.remove(storage_path)
def get_data():
if not os.path.exists(storage_path):
return {}
with open(storage_path, 'r') as f:
raw_data = f.read()
if raw_data:
return json.loads(raw_data)
return {}
def put(key, value):
data = get_data()
if key in data:
data[key].append(value)
else:
data[key] = [value]
with open(storage_path, 'w') as f:
f.write(json.dumps(data))
def get(key):
data = get_data()
return data.get(key)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--key', help='Key')
parser.add_argument('--val', help='Value')
parser.add_argument('--clear', action='store_true', help='Clear')
args = parser.parse_args()
if args.clear:
clear()
elif args.key and args.val:
put(args.key, args.val)
elif args.key:
print(get(args.key))
else:
print('Wrong command')
| [
"os.path.exists",
"json.loads",
"argparse.ArgumentParser",
"json.dumps",
"tempfile.gettempdir",
"sys.exit",
"os.remove"
] | [((433, 461), 'os.path.exists', 'os.path.exists', (['storage_path'], {}), '(storage_path)\n', (447, 461), False, 'import os\n'), ((133, 145), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (141, 145), False, 'import sys\n'), ((376, 397), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (395, 397), False, 'import tempfile\n'), ((959, 980), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (978, 980), False, 'import tempfile\n'), ((1016, 1039), 'os.remove', 'os.remove', (['storage_path'], {}), '(storage_path)\n', (1025, 1039), False, 'import os\n'), ((1579, 1604), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1602, 1604), False, 'import argparse\n'), ((1069, 1097), 'os.path.exists', 'os.path.exists', (['storage_path'], {}), '(storage_path)\n', (1083, 1097), False, 'import os\n'), ((710, 729), 'json.dumps', 'json.dumps', (['kv_dict'], {}), '(kv_dict)\n', (720, 729), False, 'import json\n'), ((1225, 1245), 'json.loads', 'json.loads', (['raw_data'], {}), '(raw_data)\n', (1235, 1245), False, 'import json\n'), ((1456, 1472), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1466, 1472), False, 'import json\n')] |
#!/usr/bin/env python
import uproot
class DAQFile:
def __init__(self, infpn):
'''
Constructor in charge of loading a data file.
'''
tr_mppc = uproot.open(infpn)['mppc']
self.df = tr_mppc.arrays(library='pd')
# store the input file pathname
self.infpn = infpn
# store the FEB SN
self.feb_sn = None
if __name__ == '__main__':
my_file = DAQFile('../data//root/led/20210615_200000_new_ch24_thr205_gain56_temp22.75_trig24_feb12808_olddaq/mppc_56.5V.root')
print(my_file.df) | [
"uproot.open"
] | [((180, 198), 'uproot.open', 'uproot.open', (['infpn'], {}), '(infpn)\n', (191, 198), False, 'import uproot\n')] |
from pathlib import Path
from glob import glob
import pickle
from dataclasses import dataclass
import pandas as pd
import numpy as np
from tqdm import tqdm
from db_models import session, Item
@dataclass
class ItemData:
"""Data for Items."""
name: str = ""
item_id: int = 0
type: str = ""
# Details
weight: int = 0
npc_buy: int = 0
npc_sell: int = 0
refineable: bool = None
equip_locations: str = ""
# Stats
range: int = 0
defense: int = 0
attack: int = 0
weapon_level: int = 0
slots: int = 0
# Restrictions
level_range: str = ""
usage: str = ""
trade: str = ""
job_class_type: str = ""
job_classes: str = ""
gender: str = ""
# Scripts
use_script: str = ""
equip_script: str = ""
unequip_script: str = ""
def process_item(item):
item_data = ItemData()
for table in item:
if "Use Script" in table.columns:
item_data.use_script = table["Use Script"][0]
item_data.equip_script = table["Use Script"][2]
item_data.unequip_script = table["Use Script"][4]
elif "ID" in table.columns:
pass
elif table[0][0] == "Name":
item_data.name = table[1][0]
item_data.item_id = int(table[1][1].split()[0])
item_data.type = table[1][2]
elif table[0][0] == "Weight":
item_data.weight = int(table[1][0])
item_data.npc_buy = int(table[1][1].split()[0].replace(",", ""))
item_data.npc_sell = int(table[1][2].split()[0].replace(",", ""))
item_data.refineable = True if table[1][3] == "Yes" else False
item_data.equip_locations = table[1][4]
elif table[0][0] == "Range":
item_data.range = int(table[1][0])
item_data.defense = int(table[1][1])
item_data.attack = int(table[1][2])
item_data.weapon_level = int(table[1][3])
item_data.slots = int(table[1][4])
elif table[0][0] == "Level Range":
item_data.level_range = table[1][0]
item_data.usage = table[1][1]
item_data.trade = table[1][2]
item_data.job_class_types = table[1][3]
item_data.job_classes = table[1][4]
item_data.gender = table[1][5]
return item_data
BASE_DIR = Path.cwd()
items_pages = glob(f"{BASE_DIR / 'data/cp.originsro.org/item/view/index.html?id=*'}")
if Path(BASE_DIR / "data/items_data.pkl").exists():
with open(BASE_DIR / "data/items_data.pkl", "rb") as f:
items_data = pickle.load(f)
else:
items_data = {}
for page in tqdm(items_pages):
item_id = page.split("=")[1]
items_data[int(item_id)] = pd.read_html(page)
with open(BASE_DIR / "data/items_data.pkl", "wb") as f:
pickle.dump(items_data, f)
items = []
for item_id in tqdm(sorted(list(items_data.keys()))):
item_data = process_item(items_data[item_id])
item = Item(
# Basic Info
name=item_data.name,
item_id=item_data.item_id,
type=item_data.type,
# Details
weight=item_data.weight,
npc_buy=item_data.npc_buy,
npc_sell=item_data.npc_sell,
refineable=item_data.refineable,
equip_locations=item_data.equip_locations,
# Stats
range=item_data.range,
defense=item_data.defense,
attack=item_data.attack,
weapon_level=item_data.weapon_level,
slots=item_data.slots,
# Restrictions
level_range=item_data.level_range,
usage=item_data.usage,
trade=item_data.trade,
job_class_types=item_data.job_class_types,
job_classes=item_data.job_classes,
gender=item_data.gender,
# Scripts
use_script=item_data.use_script,
equip_script=item_data.equip_script,
unequip_script=item_data.unequip_script,
)
items.append(item)
session.bulk_save_objects(items)
session.commit()
| [
"db_models.Item",
"pickle.dump",
"pathlib.Path",
"pathlib.Path.cwd",
"db_models.session.commit",
"tqdm.tqdm",
"pickle.load",
"db_models.session.bulk_save_objects",
"pandas.read_html",
"glob.glob"
] | [((2348, 2358), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (2356, 2358), False, 'from pathlib import Path\n'), ((2374, 2445), 'glob.glob', 'glob', (['f"""{BASE_DIR / \'data/cp.originsro.org/item/view/index.html?id=*\'}"""'], {}), '(f"{BASE_DIR / \'data/cp.originsro.org/item/view/index.html?id=*\'}")\n', (2378, 2445), False, 'from glob import glob\n'), ((3933, 3965), 'db_models.session.bulk_save_objects', 'session.bulk_save_objects', (['items'], {}), '(items)\n', (3958, 3965), False, 'from db_models import session, Item\n'), ((3966, 3982), 'db_models.session.commit', 'session.commit', ([], {}), '()\n', (3980, 3982), False, 'from db_models import session, Item\n'), ((2637, 2654), 'tqdm.tqdm', 'tqdm', (['items_pages'], {}), '(items_pages)\n', (2641, 2654), False, 'from tqdm import tqdm\n'), ((2970, 3671), 'db_models.Item', 'Item', ([], {'name': 'item_data.name', 'item_id': 'item_data.item_id', 'type': 'item_data.type', 'weight': 'item_data.weight', 'npc_buy': 'item_data.npc_buy', 'npc_sell': 'item_data.npc_sell', 'refineable': 'item_data.refineable', 'equip_locations': 'item_data.equip_locations', 'range': 'item_data.range', 'defense': 'item_data.defense', 'attack': 'item_data.attack', 'weapon_level': 'item_data.weapon_level', 'slots': 'item_data.slots', 'level_range': 'item_data.level_range', 'usage': 'item_data.usage', 'trade': 'item_data.trade', 'job_class_types': 'item_data.job_class_types', 'job_classes': 'item_data.job_classes', 'gender': 'item_data.gender', 'use_script': 'item_data.use_script', 'equip_script': 'item_data.equip_script', 'unequip_script': 'item_data.unequip_script'}), '(name=item_data.name, item_id=item_data.item_id, type=item_data.type,\n weight=item_data.weight, npc_buy=item_data.npc_buy, npc_sell=item_data.\n npc_sell, refineable=item_data.refineable, equip_locations=item_data.\n equip_locations, range=item_data.range, defense=item_data.defense,\n attack=item_data.attack, weapon_level=item_data.weapon_level, slots=\n item_data.slots, level_range=item_data.level_range, usage=item_data.\n usage, trade=item_data.trade, job_class_types=item_data.job_class_types,\n job_classes=item_data.job_classes, gender=item_data.gender, use_script=\n item_data.use_script, equip_script=item_data.equip_script,\n unequip_script=item_data.unequip_script)\n', (2974, 3671), False, 'from db_models import session, Item\n'), ((2450, 2488), 'pathlib.Path', 'Path', (["(BASE_DIR / 'data/items_data.pkl')"], {}), "(BASE_DIR / 'data/items_data.pkl')\n", (2454, 2488), False, 'from pathlib import Path\n'), ((2580, 2594), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2591, 2594), False, 'import pickle\n'), ((2728, 2746), 'pandas.read_html', 'pd.read_html', (['page'], {}), '(page)\n', (2740, 2746), True, 'import pandas as pd\n'), ((2816, 2842), 'pickle.dump', 'pickle.dump', (['items_data', 'f'], {}), '(items_data, f)\n', (2827, 2842), False, 'import pickle\n')] |
"""
Allows manipulation of custom jobs.
Jobs include creating predictions or preferences for a custom sequence.
"""
import uuid
import datetime
from pred.queries.dbutil import update_database, read_database
from pred.webserver.customresult import CustomResultData
class JobStatus(object):
"""
States a job can be in.
"""
NEW = 'NEW'
RUNNING = 'RUNNING'
COMPLETE = 'COMPLETE'
ERROR = 'ERROR'
class CustomJob(object):
"""
CRUD for managing job request in the database.
The job will perform some operation in the background for a user and update status when done
"""
NON_KEY_FIELDS = 'type, seq_id, model_name, status, created, finished, error_msg'
def __init__(self, job_uuid):
"""
Setup job with specified uuid.
:param job_uuid: str: uuid key of this job
"""
if not job_uuid:
raise ValueError("CustomJob uuid must have a value yours:'{}'.".format(job_uuid))
self.uuid = job_uuid
self.status = JobStatus.NEW
self.type = None
self.model_name = None
self.sequence_list = None
self.created = None
self.error_msg = None
self.finished = None
def insert(self, db):
if not self.type or not self.sequence_list or not self.status or not self.model_name:
raise ValueError("Type, sequence_list, model_name, and status properties "
"must be filled in before calling insert.")
insert_sql = "insert into job(id, type, model_name, seq_id, status) values(%s, %s, %s, %s, %s)"
update_database(db, insert_sql, [self.uuid, self.type, self.model_name, self.sequence_list, self.status])
def delete(self, cur):
cur.execute("delete from job where id = %s", [self.uuid])
def load(self, db):
select_sql = "select {} from job where id = %s".format(self.NON_KEY_FIELDS)
rows = read_database(db, select_sql, [self.uuid])
self._load_non_key(rows[0])
def _load_non_key(self, row):
self.type, self.sequence_list, self.model_name, self.status, self.created, self.finished, self.error_msg = row
def get_dict(self):
"""
Get properties as a dictionary.
:return:
"""
return {
'id': self.uuid,
'status': self.status,
'type': self.type,
'sequence_list': self.sequence_list,
'created': self.created,
'finished': self.finished,
'error_msg': self.error_msg,
'model_name': self.model_name,
'current_time': datetime.datetime.utcnow(),
}
@staticmethod
def create_job(db, job_type, sequence_list, model_name):
"""
Create and return a job with the specified properties.
:param db: DatabaseConnection: database we will create the job in
:param job_type: str: type of job to perform
:param sequence_list: str: uuid of the custom sequence to use in this job
:param model_name: str: name of the model to generate data with
:return: CustomJob: job that has been persisted to database in NEW state.
"""
custom_job = CustomJob(str(uuid.uuid1()))
custom_job.type = job_type
custom_job.sequence_list = sequence_list
custom_job.model_name = model_name
custom_job.insert(db)
return custom_job
@staticmethod
def read_job(db, job_uuid):
"""
Load job based on uuid.
:param db: DatabaseConnection: database we will read the job details from
:param job_uuid: str: uuid of this job
:return: CustomJob with properties loaded from database
"""
custom_job = CustomJob(job_uuid)
custom_job.load(db)
return custom_job
@staticmethod
def set_job_running(db, job_uuid):
"""
Set job to RUNNING state.
Requires job to be at NEW status.
:param db: DatabaseConnection: database to update
:param job_uuid: str: uuid of this job
"""
update_sql = "update job set status = %s where id = %s and status = %s"
rowcount = update_database(db, update_sql, [JobStatus.RUNNING, job_uuid, JobStatus.NEW])
if rowcount == 0:
raise ValueError("No job found for {} at status {}".format(job_uuid, JobStatus.NEW))
@staticmethod
def set_job_complete(db, job_uuid):
"""
Set job to COMPLETE state.
Requires job to be at RUNNING status.
Updates the finished date/time.
:param db: DatabaseConnection: database to update
:param job_uuid: str: uuid of this job
"""
update_sql = "update job set status = %s, finished = CURRENT_TIMESTAMP where id = %s and status = %s"
rowcount = update_database(db, update_sql, [JobStatus.COMPLETE, job_uuid, JobStatus.RUNNING])
if rowcount == 0:
raise ValueError("No job found for {} at status {}".format(job_uuid, JobStatus.RUNNING))
@staticmethod
def set_job_as_error(db, job_uuid, error_message):
"""
Set job to ERROR state.
Updates the finished date/time.
:param db: DatabaseConnection: database to update
:param job_uuid: str: uuid of this job
:param error_message: str: error associated with the failure
"""
if not error_message:
raise ValueError("Missing required error_message.")
update_sql = "update job set status = %s, error_msg = %s, finished = CURRENT_TIMESTAMP where id = %s"
rowcount = update_database(db, update_sql, [JobStatus.ERROR, error_message, job_uuid])
if rowcount == 0:
raise ValueError("No job found for {}.".format(job_uuid))
@staticmethod
def find_jobs(db, job_status):
"""
Find jobs with optional job_status filter.
:param db: DatabaseConnection: database to read
:param job_status: str: JobStatus property to filter by
:return: [CustomJob] jobs found
"""
result = []
select_sql = "select id, {} from job".format(CustomJob.NON_KEY_FIELDS)
params = []
if job_status:
select_sql += " WHERE status = %s"
params.append(job_status)
select_sql += " order by created "
for row in read_database(db, select_sql, params):
job = CustomJob(row[0])
job._load_non_key(row[1:])
result.append(job)
return result
@staticmethod
def find_existing_job(db, job_type, sequence_list, model_name):
"""
Find a single job for the specified properties.
:param db: DatabaseConnection: database to read
:param job_type: str: type of job we are looking for
:param sequence_list: str: uuid of sequence
:param model_name: str: name of the model
:return: CustomJob or None if not found
"""
select_sql = "select id, {} from job " \
" WHERE seq_id = %s and type = %s and model_name = %s".format(CustomJob.NON_KEY_FIELDS)
params = [sequence_list, job_type, model_name]
for row in read_database(db, select_sql, params):
job = CustomJob(row[0])
job._load_non_key(row[1:])
return job
return None
@staticmethod
def find_old_jobs(cur, hours):
result = []
select_sql = "select id, {} from job " \
"where CURRENT_TIMESTAMP - finished > interval '{} hours'".format(CustomJob.NON_KEY_FIELDS, hours)
cur.execute(select_sql, [])
for row in cur.fetchall():
job = CustomJob(row[0])
job._load_non_key(row[1:])
result.append(job)
return result
@staticmethod
def delete_old_jobs(cur, hours):
for old_job in CustomJob.find_old_jobs(cur, hours):
CustomResultData.delete_for_job(cur, old_job.uuid)
old_job.delete(cur)
return None
| [
"pred.queries.dbutil.read_database",
"datetime.datetime.utcnow",
"pred.webserver.customresult.CustomResultData.delete_for_job",
"pred.queries.dbutil.update_database",
"uuid.uuid1"
] | [((1602, 1711), 'pred.queries.dbutil.update_database', 'update_database', (['db', 'insert_sql', '[self.uuid, self.type, self.model_name, self.sequence_list, self.status]'], {}), '(db, insert_sql, [self.uuid, self.type, self.model_name,\n self.sequence_list, self.status])\n', (1617, 1711), False, 'from pred.queries.dbutil import update_database, read_database\n'), ((1926, 1968), 'pred.queries.dbutil.read_database', 'read_database', (['db', 'select_sql', '[self.uuid]'], {}), '(db, select_sql, [self.uuid])\n', (1939, 1968), False, 'from pred.queries.dbutil import update_database, read_database\n'), ((4172, 4249), 'pred.queries.dbutil.update_database', 'update_database', (['db', 'update_sql', '[JobStatus.RUNNING, job_uuid, JobStatus.NEW]'], {}), '(db, update_sql, [JobStatus.RUNNING, job_uuid, JobStatus.NEW])\n', (4187, 4249), False, 'from pred.queries.dbutil import update_database, read_database\n'), ((4811, 4898), 'pred.queries.dbutil.update_database', 'update_database', (['db', 'update_sql', '[JobStatus.COMPLETE, job_uuid, JobStatus.RUNNING]'], {}), '(db, update_sql, [JobStatus.COMPLETE, job_uuid, JobStatus.\n RUNNING])\n', (4826, 4898), False, 'from pred.queries.dbutil import update_database, read_database\n'), ((5588, 5663), 'pred.queries.dbutil.update_database', 'update_database', (['db', 'update_sql', '[JobStatus.ERROR, error_message, job_uuid]'], {}), '(db, update_sql, [JobStatus.ERROR, error_message, job_uuid])\n', (5603, 5663), False, 'from pred.queries.dbutil import update_database, read_database\n'), ((6338, 6375), 'pred.queries.dbutil.read_database', 'read_database', (['db', 'select_sql', 'params'], {}), '(db, select_sql, params)\n', (6351, 6375), False, 'from pred.queries.dbutil import update_database, read_database\n'), ((7171, 7208), 'pred.queries.dbutil.read_database', 'read_database', (['db', 'select_sql', 'params'], {}), '(db, select_sql, params)\n', (7184, 7208), False, 'from pred.queries.dbutil import update_database, read_database\n'), ((2614, 2640), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2638, 2640), False, 'import datetime\n'), ((7899, 7949), 'pred.webserver.customresult.CustomResultData.delete_for_job', 'CustomResultData.delete_for_job', (['cur', 'old_job.uuid'], {}), '(cur, old_job.uuid)\n', (7930, 7949), False, 'from pred.webserver.customresult import CustomResultData\n'), ((3217, 3229), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (3227, 3229), False, 'import uuid\n')] |
import string
from rt import *
from listener import Listener
import traceback
import pytz
from datetime import datetime
class Ticket:
def __init__(self, client):
self.client = client
Listener.register(self.on_ready, "on_ready")
Listener.register(self.on_message, "on_message")
Listener.register(self.on_loop, "on_loop")
self.rt_stat = RT_Stat()
self.ticket_url = "https://support.oit.pdx.edu/Ticket/Display.html?id="
self.update_thread = None
self.current_day = 0 # To detect day change
self.updated_today = 0 # To check if the bot have updated today.
def send_message(self, *args):
""" Shortener. """
return self.client.rtm_send_message(*args)
def on_ready(self):
pass
def on_loop(self):
if not self.updated_today:
self.updated_today = 1
self.update_thread = RT.update_cache()
current_time = datetime.now(pytz.timezone('US/Pacific') )
if current_time.weekday() != self.current_day:
self.current_day = current_time.weekday()
if self.update_thread and not self.update_thread.is_alive():
self.update_thread = None
if hasattr(self.update_thread, 'channel'):
# If a channel is bundled, then send a message to the channel.
error_count = self.update_thread.result.get()
response = "Done updating\n"
if error_count:
response += "There were {} errors found. Check the error log to see what they were.".format(error_count)
self.send_message(self.update_thread.channel, response)
print("Done updating!")
def on_message(self, ctx):
try: # Don't exit the bot when an error happens.
if ctx.command and ctx.command[0] != '!':
# Ticket linker.
try:
ticket_list = self.parse_message_for_tickets(ctx.message)
response = ""
for ticket_number in ticket_list:
ticket = RT.get_ticket(ticket_number)
response += self.ticket_url + str(ticket_number) + "\n" + \
"Subject: " + ticket.content['Subject'] + "\n"
self.send_message(ctx.channel, response)
except:
traceback.print_exc()
if ctx.command in ["!response"]:
if len(ctx.args) == 1:
try:
days_ago = int(ctx.args[0])
except ValueError:
traceback.print_exc()
self.client.rtm_send_message(channel_id, "Invalid value. Please enter amount of days.")
self.response_command(ctx.channel, days_ago)
if ctx.command in ["!update"]:
pre_response = "Updating {} tickets since {}".format(RT.get_amount_to_update(), RT.get_last_updated())
self.send_message(ctx.channel, pre_response)
self.update_thread = RT.update_cache()
self.update_thread.channel = ctx.channel
if ctx.command in ["!last_updated"]:
response = "There are {} tickets to update since {}".format(RT.get_amount_to_update(), RT.get_last_updated())
self.send_message(ctx.channel, response)
if ctx.command in ["!untagged"]:
untagged = self.rt_stat.untag_blame()
if not untagged:
response = ":smile: Woo! All the tickets are tagged! :smile:"
self.send_message(ctx.channel, response)
return
response = ":angry: Hey! You guys didn't tag your tickets!!! :angry:\n"
for person in untagged.keys():
response += "{}: {}.\n".format(person, ", ".join(map(str, untagged[person])))
#response = response[:-2] + ".\n" # Replace the last comma with a period.
response += "(This is only for fun, it's not designed to place blame on anyone!)"
self.send_message(ctx.channel, response)
if ctx.command in ['!touch', '!touches', '!tt']:
if len(ctx.args) >= 1:
username = ctx.args[0]
try:
days_ago = int(ctx.args[1])
except ValueError:
traceback.print_exc()
self.client.rtm_send_message(channel_id, "Invalid value. Please enter amount of days.")
self.ticket_touch_command(ctx.channel, days_ago, username)
except:
traceback.print_exc()
self.send_message(ctx.channel, "An error has occured in the bot... :thinking_face:")
def response_command(self, channel_id, days_ago):
self.validate_days_ago(channel_id, days_ago)
response = self.rt_stat.get_average_response_time(days_ago)
if response == None:
self.send_message(channel_id, "No tickets found for the last {} days. Do !update to update cache.".format(days_ago))
return
avg_time, slowest, fastest, no_response, no_response_list = response
avg_time = self.hms(int(avg_time))
response = "Response time in the last " + str(days_ago) + " days:\n" + \
"Average time: {:.0f}h, {:.0f}m, {:.0f}s.".format(*avg_time) + \
"\nSlowest time: {:.0f}h, {:.0f}m, {:.0f}s, ticket #{}\n".format(*self.hms(slowest[1]), slowest[0]) + \
"Fastest time: {:.0f}h, {:.0f}m, {:.0f}s, ticket #{}\n".format(*self.hms(fastest[1]), fastest[0]) + \
"No response: {} out of {}.\n".format(*no_response) + \
"No response tickets: {}.\n".format(' '.join(["#" + str(s) for s in no_response_list])) + \
"(Note that this does not include weekends while calculating time)"
self.send_message(channel_id, response)
def ticket_touch_command(self, channel_id, days_ago, username=None):
self.validate_days_ago(channel_id, days_ago)
touch_dict = self.rt_stat.ticket_touches(days_ago, username)
if username:
touch_count = touch_dict # Rename.
response = "{} ticket touches for {}".format(touch_count, username)
self.send_message(channel_id, response)
def validate_days_ago(self, channel_id, days_ago):
""" Generic responder for invalid days_ago input. Returns false for invalid input."""
if days_ago < 0:
self.send_message(channel_id, "Positive numbers please!")
return False
if days_ago > 365:
self.send_message(channel_id, "Sorry I only have tickets up to 1 year old... :cry:")
return False
return True
def parse_message_for_tickets(self, message):
""" Parse a message and create an integer list of ticket numbers. """
message_split = message.split(" ")
ticket_list = []
for word in message_split:
if not word:
continue
if word[0] == '#':
try:
# Create a
# Make sure things behind # is a legit issue
ticket_number = int(word[1:].translate(str.maketrans('', '', string.punctuation)))
except ValueError:
continue
if ticket_number < 0 or ticket_number in ticket_list:
continue
ticket_list.append(ticket_number)
ticket_list.sort()
return ticket_list
def hms(self, seconds):
"""
Convert seconds to H:M:S in a tuple.
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return (h, m, s)
| [
"pytz.timezone",
"listener.Listener.register",
"traceback.print_exc"
] | [((205, 249), 'listener.Listener.register', 'Listener.register', (['self.on_ready', '"""on_ready"""'], {}), "(self.on_ready, 'on_ready')\n", (222, 249), False, 'from listener import Listener\n'), ((258, 306), 'listener.Listener.register', 'Listener.register', (['self.on_message', '"""on_message"""'], {}), "(self.on_message, 'on_message')\n", (275, 306), False, 'from listener import Listener\n'), ((315, 357), 'listener.Listener.register', 'Listener.register', (['self.on_loop', '"""on_loop"""'], {}), "(self.on_loop, 'on_loop')\n", (332, 357), False, 'from listener import Listener\n'), ((979, 1006), 'pytz.timezone', 'pytz.timezone', (['"""US/Pacific"""'], {}), "('US/Pacific')\n", (992, 1006), False, 'import pytz\n'), ((4817, 4838), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4836, 4838), False, 'import traceback\n'), ((2436, 2457), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2455, 2457), False, 'import traceback\n'), ((2683, 2704), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2702, 2704), False, 'import traceback\n'), ((4521, 4542), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4540, 4542), False, 'import traceback\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 8 12:16:53 2021
@author: M.Tarka
"""
from machinetranslation import translator
from flask import Flask, render_template, request
#import json
app = Flask("Web Translator")
@app.route("/englishToFrench")
def english_to_french():
textToTranslate = request.args.get('textToTranslate')
# Write your code here
# return "Translated text to French"
french_text = translator.english_to_french(textToTranslate)
return french_text
@app.route("/frenchToEnglish")
def french_to_english():
textToTranslate = request.args.get('textToTranslate')
# Write your code here
#return "Translated text to English"
english_text = translator.french_to_english(textToTranslate)
return english_text
@app.route("/")
def renderIndexPage():
# Write the code to render template
return render_template('index.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080) | [
"flask.render_template",
"flask.request.args.get",
"flask.Flask",
"machinetranslation.translator.english_to_french",
"machinetranslation.translator.french_to_english"
] | [((212, 235), 'flask.Flask', 'Flask', (['"""Web Translator"""'], {}), "('Web Translator')\n", (217, 235), False, 'from flask import Flask, render_template, request\n'), ((319, 354), 'flask.request.args.get', 'request.args.get', (['"""textToTranslate"""'], {}), "('textToTranslate')\n", (335, 354), False, 'from flask import Flask, render_template, request\n'), ((443, 488), 'machinetranslation.translator.english_to_french', 'translator.english_to_french', (['textToTranslate'], {}), '(textToTranslate)\n', (471, 488), False, 'from machinetranslation import translator\n'), ((596, 631), 'flask.request.args.get', 'request.args.get', (['"""textToTranslate"""'], {}), "('textToTranslate')\n", (612, 631), False, 'from flask import Flask, render_template, request\n'), ((722, 767), 'machinetranslation.translator.french_to_english', 'translator.french_to_english', (['textToTranslate'], {}), '(textToTranslate)\n', (750, 767), False, 'from machinetranslation import translator\n'), ((889, 918), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (904, 918), False, 'from flask import Flask, render_template, request\n')] |
# -*- coding: utf-8 -*-
"""
SQLpie License (MIT License)
Copyright (c) 2011-2016 <NAME>, http://sqlpie.com
See LICENSE file.
"""
from flask import Response
import json
import sqlpie
class SearchController(sqlpie.BaseController):
@staticmethod
@sqlpie.BaseController.controller_wrapper
def service_index(request=None):
rebuild = False
json_data = request.get_json()
if "options" in json_data:
options = json_data["options"]
if sqlpie.Indexer.REBUILD_PARAM in options:
rebuild = options[sqlpie.Indexer.REBUILD_PARAM]
if rebuild:
sqlpie.Indexer.rebuild()
sqlpie.Indexer().index_documents()
return {'success': True}
@staticmethod
@sqlpie.BaseController.controller_wrapper
def service_search(request):
json_data = request.get_json()
query, tagcloud_search, geo_radius_search, geo_target_search = "", "", "", ""
geo_sort_by = sqlpie.Searcher.SORT_BY_DISTANCE
is_tagcloud_search = False
is_geo_search = False
num_results = 10
start_result = 0
if sqlpie.Searcher.QUERY_OPERATOR in json_data:
query = json_data[sqlpie.Searcher.QUERY_OPERATOR]
if sqlpie.Searcher.TAGCLOUD_OPERATOR in json_data:
tagcloud_search = json_data[sqlpie.Searcher.TAGCLOUD_OPERATOR].lower()
if sqlpie.Searcher.GEO_RADIUS_OPERATOR in json_data:
geo_radius_search = json_data[sqlpie.Searcher.GEO_RADIUS_OPERATOR]
if sqlpie.Searcher.GEO_TARGET_OPERATOR in json_data:
geo_target_search = json_data[sqlpie.Searcher.GEO_TARGET_OPERATOR].lower()
if sqlpie.Searcher.GEO_SORT_BY in json_data:
geo_sort_by = json_data[sqlpie.Searcher.GEO_SORT_BY].lower()
if sqlpie.Searcher.NUM_RESULTS in json_data:
num_results = int(json_data[sqlpie.Searcher.NUM_RESULTS])
if sqlpie.Searcher.START_RESULT in json_data:
start_result = int(json_data[sqlpie.Searcher.START_RESULT])
if tagcloud_search:
if not tagcloud_search in [sqlpie.Searcher.SORT_TAGCLOUD_BY_RELEVANCE, \
sqlpie.Searcher.SORT_TAGCLOUD_BY_FREQUENCY]:
raise sqlpie.CustomException(sqlpie.CustomException.INVALID_ARGUMENTS)
else:
is_tagcloud_search = True
if geo_radius_search or geo_target_search:
if not sqlpie.Util.is_number(geo_radius_search) or not geo_radius_search or \
not geo_target_search or not len(geo_target_search.split(",")) == 2 or \
not sqlpie.Util.is_number(geo_target_search.split(",")[0]) or \
not sqlpie.Util.is_number(geo_target_search.split(",")[1]) or \
geo_sort_by not in [sqlpie.Searcher.SORT_BY_RELEVANCE, sqlpie.Searcher.SORT_BY_DISTANCE]:
raise sqlpie.CustomException(sqlpie.CustomException.INVALID_ARGUMENTS)
else:
is_geo_search = True
engine = sqlpie.Searcher(query)
if is_tagcloud_search:
results = engine.run_tagcloud(tagcloud_search, num_results)
elif is_geo_search:
results = engine.run_geosearch(geo_radius_search, geo_target_search, num_results, start_result, geo_sort_by)
else:
results = engine.run_searcher(num_results, start_result)
return {'success': True, 'results':results}
| [
"sqlpie.CustomException",
"sqlpie.Indexer.rebuild",
"sqlpie.Util.is_number",
"sqlpie.Searcher",
"sqlpie.Indexer"
] | [((3047, 3069), 'sqlpie.Searcher', 'sqlpie.Searcher', (['query'], {}), '(query)\n', (3062, 3069), False, 'import sqlpie\n'), ((629, 653), 'sqlpie.Indexer.rebuild', 'sqlpie.Indexer.rebuild', ([], {}), '()\n', (651, 653), False, 'import sqlpie\n'), ((662, 678), 'sqlpie.Indexer', 'sqlpie.Indexer', ([], {}), '()\n', (676, 678), False, 'import sqlpie\n'), ((2266, 2330), 'sqlpie.CustomException', 'sqlpie.CustomException', (['sqlpie.CustomException.INVALID_ARGUMENTS'], {}), '(sqlpie.CustomException.INVALID_ARGUMENTS)\n', (2288, 2330), False, 'import sqlpie\n'), ((2909, 2973), 'sqlpie.CustomException', 'sqlpie.CustomException', (['sqlpie.CustomException.INVALID_ARGUMENTS'], {}), '(sqlpie.CustomException.INVALID_ARGUMENTS)\n', (2931, 2973), False, 'import sqlpie\n'), ((2461, 2501), 'sqlpie.Util.is_number', 'sqlpie.Util.is_number', (['geo_radius_search'], {}), '(geo_radius_search)\n', (2482, 2501), False, 'import sqlpie\n')] |
import numpy as np
__copyright__ = 'Copyright (C) 2018 ICTP'
__author__ = '<NAME> <<EMAIL>>'
__credits__ = ["<NAME>", "<NAME>"]
def get_x(lon, clon, cone):
if clon >= 0.0 and lon >= 0.0 or clon < 0.0 and lon < 0.0:
return np.radians(clon - lon) * cone
elif clon >= 0.0:
if abs(clon - lon + 360.0) < abs(clon - lon):
return np.radians(clon - lon + 360) * cone
else:
return np.radians(clon - lon) * cone
elif abs(clon - lon - 360.0) < abs(clon - lon):
return np.radians(clon - lon - 360) * cone
else:
return np.radians(clon - lon) * cone
def grid_to_earth_uvrotate(proj, lon, lat, clon, clat, cone=None, plon=None,
plat=None):
if proj == 'NORMER':
return 1, 0
elif proj == 'ROTMER':
zphi = np.radians(lat)
zrla = np.radians(lon)
zrla = np.where(abs(lat) > 89.99999, 0.0, zrla)
if plat > 0.0:
pollam = plon + 180.0
polphi = 90.0 - plat
else:
pollam = plon
polphi = 90.0 + plat
if pollam > 180.0:
pollam = pollam - 360.0
polcphi = np.cos(np.radians(polphi))
polsphi = np.sin(np.radians(polphi))
zrlap = np.radians(pollam) - zrla
zarg1 = polcphi * np.sin(zrlap)
zarg2 = polsphi*np.cos(zphi) - polcphi*np.sin(zphi)*np.cos(zrlap)
znorm = 1.0/np.sqrt(zarg1**2+zarg2**2)
sindel = zarg1*znorm
cosdel = zarg2*znorm
return cosdel, sindel
else:
if np.isscalar(lon):
x = get_x(lon, clon, cone)
else:
c = np.vectorize(get_x, excluded=['clon', 'cone'])
x = c(lon, clon, cone)
xc = np.cos(x)
xs = np.sin(x)
if clat >= 0:
xs *= -1
return xc, xs
| [
"numpy.radians",
"numpy.sqrt",
"numpy.isscalar",
"numpy.cos",
"numpy.sin",
"numpy.vectorize"
] | [((238, 260), 'numpy.radians', 'np.radians', (['(clon - lon)'], {}), '(clon - lon)\n', (248, 260), True, 'import numpy as np\n'), ((829, 844), 'numpy.radians', 'np.radians', (['lat'], {}), '(lat)\n', (839, 844), True, 'import numpy as np\n'), ((860, 875), 'numpy.radians', 'np.radians', (['lon'], {}), '(lon)\n', (870, 875), True, 'import numpy as np\n'), ((1564, 1580), 'numpy.isscalar', 'np.isscalar', (['lon'], {}), '(lon)\n', (1575, 1580), True, 'import numpy as np\n'), ((1746, 1755), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (1752, 1755), True, 'import numpy as np\n'), ((1769, 1778), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1775, 1778), True, 'import numpy as np\n'), ((1185, 1203), 'numpy.radians', 'np.radians', (['polphi'], {}), '(polphi)\n', (1195, 1203), True, 'import numpy as np\n'), ((1230, 1248), 'numpy.radians', 'np.radians', (['polphi'], {}), '(polphi)\n', (1240, 1248), True, 'import numpy as np\n'), ((1266, 1284), 'numpy.radians', 'np.radians', (['pollam'], {}), '(pollam)\n', (1276, 1284), True, 'import numpy as np\n'), ((1318, 1331), 'numpy.sin', 'np.sin', (['zrlap'], {}), '(zrlap)\n', (1324, 1331), True, 'import numpy as np\n'), ((1426, 1458), 'numpy.sqrt', 'np.sqrt', (['(zarg1 ** 2 + zarg2 ** 2)'], {}), '(zarg1 ** 2 + zarg2 ** 2)\n', (1433, 1458), True, 'import numpy as np\n'), ((1651, 1697), 'numpy.vectorize', 'np.vectorize', (['get_x'], {'excluded': "['clon', 'cone']"}), "(get_x, excluded=['clon', 'cone'])\n", (1663, 1697), True, 'import numpy as np\n'), ((364, 392), 'numpy.radians', 'np.radians', (['(clon - lon + 360)'], {}), '(clon - lon + 360)\n', (374, 392), True, 'import numpy as np\n'), ((433, 455), 'numpy.radians', 'np.radians', (['(clon - lon)'], {}), '(clon - lon)\n', (443, 455), True, 'import numpy as np\n'), ((531, 559), 'numpy.radians', 'np.radians', (['(clon - lon - 360)'], {}), '(clon - lon - 360)\n', (541, 559), True, 'import numpy as np\n'), ((593, 615), 'numpy.radians', 'np.radians', (['(clon - lon)'], {}), '(clon - lon)\n', (603, 615), True, 'import numpy as np\n'), ((1356, 1368), 'numpy.cos', 'np.cos', (['zphi'], {}), '(zphi)\n', (1362, 1368), True, 'import numpy as np\n'), ((1392, 1405), 'numpy.cos', 'np.cos', (['zrlap'], {}), '(zrlap)\n', (1398, 1405), True, 'import numpy as np\n'), ((1379, 1391), 'numpy.sin', 'np.sin', (['zphi'], {}), '(zphi)\n', (1385, 1391), True, 'import numpy as np\n')] |
import os
import shutil
def move_files(files, src_prefix, dst_prefix, app_name):
for file_name, attributes in files.items():
file_path = os.path.join(src_prefix, file_name)
dest_path = os.path.join(dst_prefix, file_name)
if attributes["static"]:
shutil.copy(file_path, dest_path)
else:
with open(file_path, "r") as file:
content = file.read()
with open(dest_path, "w") as file:
file.write(content.replace("{{ app_name }}", app_name))
def create_project(name):
project_path = os.path.join(os.getcwd(), name)
project_templates_path = os.path.join(project_path, "templates")
project_dupgee_path = os.path.join(project_path, "dupgee")
os.mkdir(project_path)
os.mkdir(project_templates_path)
os.mkdir(project_dupgee_path)
absolute_path = os.path.dirname(os.path.abspath(__file__))
base_path = os.path.join(absolute_path, "base")
dupgee_path = os.path.join(base_path, "dupgee")
base_files = {
"runner.py": {"static": False},
"urls.py": {"static": True},
"pages.py": {"static": True},
"__init__.py": {"static": True},
}
dupgee_files = {
"__init__.py": {"static": True},
"parsers.py": {"static": True},
"render.py": {"static": False},
"response.py": {"static": True},
"request.py": {"static": True},
"views.py": {"static": True},
"server.py": {"static": True},
"utils.py": {"static": True},
"wifi.py": {"static": True},
"matcher.py": {"static": False},
}
move_files(base_files, base_path, project_path, app_name=name)
move_files(dupgee_files, dupgee_path, project_dupgee_path, app_name=name)
with open(os.path.join(project_templates_path, "index.html"), "w") as f:
index_html = """<html lang="en">
<head>
<title>Dupgee Framework</title>
</head>
<body>
<h2>{{ name }}</h2>
</body>
</html>"""
f.write(index_html)
print(f"{name} project created at {project_path}")
| [
"os.path.join",
"os.getcwd",
"os.mkdir",
"shutil.copy",
"os.path.abspath"
] | [((648, 687), 'os.path.join', 'os.path.join', (['project_path', '"""templates"""'], {}), "(project_path, 'templates')\n", (660, 687), False, 'import os\n'), ((714, 750), 'os.path.join', 'os.path.join', (['project_path', '"""dupgee"""'], {}), "(project_path, 'dupgee')\n", (726, 750), False, 'import os\n'), ((756, 778), 'os.mkdir', 'os.mkdir', (['project_path'], {}), '(project_path)\n', (764, 778), False, 'import os\n'), ((783, 815), 'os.mkdir', 'os.mkdir', (['project_templates_path'], {}), '(project_templates_path)\n', (791, 815), False, 'import os\n'), ((820, 849), 'os.mkdir', 'os.mkdir', (['project_dupgee_path'], {}), '(project_dupgee_path)\n', (828, 849), False, 'import os\n'), ((930, 965), 'os.path.join', 'os.path.join', (['absolute_path', '"""base"""'], {}), "(absolute_path, 'base')\n", (942, 965), False, 'import os\n'), ((984, 1017), 'os.path.join', 'os.path.join', (['base_path', '"""dupgee"""'], {}), "(base_path, 'dupgee')\n", (996, 1017), False, 'import os\n'), ((151, 186), 'os.path.join', 'os.path.join', (['src_prefix', 'file_name'], {}), '(src_prefix, file_name)\n', (163, 186), False, 'import os\n'), ((207, 242), 'os.path.join', 'os.path.join', (['dst_prefix', 'file_name'], {}), '(dst_prefix, file_name)\n', (219, 242), False, 'import os\n'), ((600, 611), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (609, 611), False, 'import os\n'), ((887, 912), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (902, 912), False, 'import os\n'), ((288, 321), 'shutil.copy', 'shutil.copy', (['file_path', 'dest_path'], {}), '(file_path, dest_path)\n', (299, 321), False, 'import shutil\n'), ((1784, 1834), 'os.path.join', 'os.path.join', (['project_templates_path', '"""index.html"""'], {}), "(project_templates_path, 'index.html')\n", (1796, 1834), False, 'import os\n')] |
from setuptools import setup, find_packages
import os
setup_dir = os.path.dirname(__file__)
readme_path = os.path.join(setup_dir, 'README.rst')
version_path = os.path.join(setup_dir, 'keyfree/version.py')
requirements_path = os.path.join(setup_dir, "requirements.txt")
requirements_dev_path = os.path.join(setup_dir, "requirements-dev.txt")
__version__ = None
with open(version_path) as f:
code = compile(f.read(), version_path, 'exec')
exec(code)
with open(readme_path) as req_file:
readme = req_file.read()
with open(requirements_path) as req_file:
requirements = req_file.read().splitlines()
with open(requirements_dev_path) as req_file:
requirements_dev = req_file.read().splitlines()
setup(
name='keyfree',
version=__version__,
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
url='https://github.com/nickrw/keyfree',
description='An authentication proxy for Amazon Elasticsearch Service',
long_description=readme,
install_requires=requirements,
tests_require=requirements_dev,
package_data={'keyfree': ['requirements.txt', 'requirements-dev.txt']},
entry_points={
'console_scripts': ['keyfree-proxy-test=keyfree.proxy:main'],
},
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Environment :: No Input/Output (Daemon)',
'Topic :: Internet :: Proxy Servers',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
],
)
| [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join"
] | [((68, 93), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (83, 93), False, 'import os\n'), ((108, 145), 'os.path.join', 'os.path.join', (['setup_dir', '"""README.rst"""'], {}), "(setup_dir, 'README.rst')\n", (120, 145), False, 'import os\n'), ((161, 206), 'os.path.join', 'os.path.join', (['setup_dir', '"""keyfree/version.py"""'], {}), "(setup_dir, 'keyfree/version.py')\n", (173, 206), False, 'import os\n'), ((227, 270), 'os.path.join', 'os.path.join', (['setup_dir', '"""requirements.txt"""'], {}), "(setup_dir, 'requirements.txt')\n", (239, 270), False, 'import os\n'), ((295, 342), 'os.path.join', 'os.path.join', (['setup_dir', '"""requirements-dev.txt"""'], {}), "(setup_dir, 'requirements-dev.txt')\n", (307, 342), False, 'import os\n'), ((830, 845), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (843, 845), False, 'from setuptools import setup, find_packages\n')] |
import logging
import os
import shutil
import tempfile
from urllib import request as request
from urllib.error import HTTPError, URLError
from ase import Atoms
import numpy as np
from schnetpack.data import AtomsData
from schnetpack.environment import SimpleEnvironmentProvider
class MD17(AtomsData):
"""
MD17 benchmark data set for molecular dynamics of small molecules containing molecular forces.
Args:
path (str): path to database
dataset (str): Name of molecule to load into database. Allowed are:
aspirin
benzene
ethanol
malonaldehyde
naphthalene
salicylic_acid
toluene
uracil
subset (list): indices of subset. Set to None for entire dataset
(default: None)
download (bool): set true if dataset should be downloaded
(default: True)
calculate_triples (bool): set true if triples for angular functions
should be computed (default: False)
parse_all (bool): set true to generate the ase dbs of all molecules in
the beginning (default: False)
See: http://quantum-machine.org/datasets/
"""
energies = 'energy'
forces = 'forces'
datasets_dict = dict(aspirin='aspirin_dft.npz',
#aspirin_ccsd='aspirin_ccsd.zip',
azobenzene='azobenzene_dft.npz',
benzene='benzene_dft.npz',
ethanol='ethanol_dft.npz',
#ethanol_ccsdt='ethanol_ccsd_t.zip',
malonaldehyde='malonaldehyde_dft.npz',
#malonaldehyde_ccsdt='malonaldehyde_ccsd_t.zip',
naphthalene='naphthalene_dft.npz',
paracetamol='paracetamol_dft.npz',
salicylic_acid='salicylic_dft.npz',
toluene='toluene_dft.npz',
#toluene_ccsdt='toluene_ccsd_t.zip',
uracil='uracil_dft.npz'
)
existing_datasets = datasets_dict.keys()
def __init__(self, dbdir, dataset, subset=None, download=True, collect_triples=False, parse_all=False,
properties=None):
self.load_all = parse_all
if dataset not in self.datasets_dict.keys():
raise ValueError("Unknown dataset specification {:s}".format(dataset))
self.dbdir = dbdir
self.dataset = dataset
self.database = dataset + ".db"
dbpath = os.path.join(self.dbdir, self.database)
self.collect_triples = collect_triples
environment_provider = SimpleEnvironmentProvider()
if properties is None:
properties = ["energy", "forces"]
super(MD17, self).__init__(dbpath, subset, properties, environment_provider,
collect_triples)
if download:
self.download()
def create_subset(self, idx):
idx = np.array(idx)
subidx = idx if self.subset is None else np.array(self.subset)[idx]
return MD17(self.dbdir, self.dataset, subset=subidx, download=False, collect_triples=self.collect_triples)
def download(self):
"""
download data if not already on disk.
"""
success = True
if not os.path.exists(self.dbdir):
os.makedirs(self.dbdir)
if not os.path.exists(self.dbpath):
success = success and self._load_data()
return success
def _load_data(self):
for molecule in self.datasets_dict.keys():
# if requested, convert only the required molecule
if not self.load_all:
if molecule != self.dataset:
continue
logging.info("Downloading {} data".format(molecule))
tmpdir = tempfile.mkdtemp("MD")
rawpath = os.path.join(tmpdir, self.datasets_dict[molecule])
url = "http://www.quantum-machine.org/gdml/data/npz/" + self.datasets_dict[molecule]
try:
request.urlretrieve(url, rawpath)
except HTTPError as e:
logging.error("HTTP Error:", e.code, url)
return False
except URLError as e:
logging.error("URL Error:", e.reason, url)
return False
logging.info("Parsing molecule {:s}".format(molecule))
data = np.load(rawpath)
numbers = data['z']
atoms_list = []
properties_list = []
for positions, energies, forces in zip(data['R'], data['E'], data['F']):
properties_list.append(dict(energy=energies, forces=forces))
atoms_list.append(Atoms(positions=positions, numbers=numbers))
self.add_systems(atoms_list, properties_list)
logging.info("Cleanining up the mess...")
logging.info('{} molecule done'.format(molecule))
shutil.rmtree(tmpdir)
return True
| [
"os.path.exists",
"os.makedirs",
"urllib.request.urlretrieve",
"ase.Atoms",
"os.path.join",
"numpy.array",
"tempfile.mkdtemp",
"shutil.rmtree",
"numpy.load",
"logging.info",
"logging.error",
"schnetpack.environment.SimpleEnvironmentProvider"
] | [((2703, 2742), 'os.path.join', 'os.path.join', (['self.dbdir', 'self.database'], {}), '(self.dbdir, self.database)\n', (2715, 2742), False, 'import os\n'), ((2822, 2849), 'schnetpack.environment.SimpleEnvironmentProvider', 'SimpleEnvironmentProvider', ([], {}), '()\n', (2847, 2849), False, 'from schnetpack.environment import SimpleEnvironmentProvider\n'), ((3166, 3179), 'numpy.array', 'np.array', (['idx'], {}), '(idx)\n', (3174, 3179), True, 'import numpy as np\n'), ((5039, 5080), 'logging.info', 'logging.info', (['"""Cleanining up the mess..."""'], {}), "('Cleanining up the mess...')\n", (5051, 5080), False, 'import logging\n'), ((5147, 5168), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (5160, 5168), False, 'import shutil\n'), ((3504, 3530), 'os.path.exists', 'os.path.exists', (['self.dbdir'], {}), '(self.dbdir)\n', (3518, 3530), False, 'import os\n'), ((3544, 3567), 'os.makedirs', 'os.makedirs', (['self.dbdir'], {}), '(self.dbdir)\n', (3555, 3567), False, 'import os\n'), ((3584, 3611), 'os.path.exists', 'os.path.exists', (['self.dbpath'], {}), '(self.dbpath)\n', (3598, 3611), False, 'import os\n'), ((4026, 4048), 'tempfile.mkdtemp', 'tempfile.mkdtemp', (['"""MD"""'], {}), "('MD')\n", (4042, 4048), False, 'import tempfile\n'), ((4071, 4121), 'os.path.join', 'os.path.join', (['tmpdir', 'self.datasets_dict[molecule]'], {}), '(tmpdir, self.datasets_dict[molecule])\n', (4083, 4121), False, 'import os\n'), ((4619, 4635), 'numpy.load', 'np.load', (['rawpath'], {}), '(rawpath)\n', (4626, 4635), True, 'import numpy as np\n'), ((3229, 3250), 'numpy.array', 'np.array', (['self.subset'], {}), '(self.subset)\n', (3237, 3250), True, 'import numpy as np\n'), ((4253, 4286), 'urllib.request.urlretrieve', 'request.urlretrieve', (['url', 'rawpath'], {}), '(url, rawpath)\n', (4272, 4286), True, 'from urllib import request as request\n'), ((4338, 4379), 'logging.error', 'logging.error', (['"""HTTP Error:"""', 'e.code', 'url'], {}), "('HTTP Error:', e.code, url)\n", (4351, 4379), False, 'import logging\n'), ((4459, 4501), 'logging.error', 'logging.error', (['"""URL Error:"""', 'e.reason', 'url'], {}), "('URL Error:', e.reason, url)\n", (4472, 4501), False, 'import logging\n'), ((4926, 4969), 'ase.Atoms', 'Atoms', ([], {'positions': 'positions', 'numbers': 'numbers'}), '(positions=positions, numbers=numbers)\n', (4931, 4969), False, 'from ase import Atoms\n')] |
import sys
import os
sys.path.append( os.path.abspath("../") )
import myhdl
from myhdl import (Signal, intbv, instance, always_comb, delay, always,
StopSimulation, block)
from rhea.system import Global, Clock, Reset, FIFOBus, Signals
from rhea.cores.spi import SPIBus, spi_slave_fifo_async
from rhea.utils.test import run_testbench, tb_default_args, tb_args
from ser import ser
from ClkDriver import ClkDriver
from pulsegen import pulsegen
sck = Signal(False)
mosi = Signal(False)
miso = Signal(False)
cs = Signal(True)
leds = Signal(intbv(0)[8:])
out = Signal(True)
clock = Signal(False)
tx = Signal(intbv(0)[8:])
#rx = Signal(intbv(0)[8:])
enable = Signal (False)
#reset = ResetSignal(False)
@block
def divisor(
# ~~~[Ports]~~~
clk_in, # input : clock
clk_out, # output : one pulse will start every frequence clock cycles
# ~~~[Parameters]~~~
division = 100
):
div_mem = Signal(intbv(0)[1:0])
clk_cnt = Signal(intbv(0, min=0, max=division))
@always(clk_in.posedge)
def beh_strobe():
if clk_cnt >= division-1:
div_mem.next = not div_mem
clk_cnt.next = 0
else:
clk_cnt.next = clk_cnt + 1
@always_comb
def beh_map_output():
clk_out.next = div_mem
return beh_strobe, beh_map_output
@block
def recv_to_plsgen(clock_div, clock, fifobus, leds, out):
reading= Signal (False)
pulse_in = Signal(intbv(0)[8:])
plsgen = pulsegen(clock=clock, frequence=pulse_in, duration=10, out_pulse = out)
@always(clock_div.posedge)
def go_to_pulse():
if reading :
pulse_in.next = fifobus.read_data
leds.next = fifobus.read_data
fifobus.read.next = False
reading.next = False
else:
if not fifobus.empty :
fifobus.read.next = True
reading.next = True
return plsgen, go_to_pulse
@block
def spi_slave_pulsegen(clock, sck, mosi, miso, cs, leds, out):
clk_div = Signal(False)
clk_pulse = Signal(False)
glbl = Global(clock)
spibus = SPIBus(sck=sck, mosi=mosi, miso=miso, ss=cs)
fifobus = FIFOBus()
fifobus.write_clock=clock
fifobus.read_clock=clock
div = divisor (clock, clk_div, 1)
divp = divisor (clock, clk_pulse, 1)
rtl = recv_to_plsgen(clk_div, clk_pulse, fifobus,leds, out)
#rtl = recv_to_led(clk_div, fifobus, leds)
tbdut = spi_slave_fifo_async(glbl, spibus, fifobus)
@always_comb
def map():
spibus.csn.next = cs
return myhdl.instances()
@block
def test_spi_pulsegen(clock, sck, mosi, miso, cs, leds, out):
clkdrv = ClkDriver(clock,period=10)
ssled = spi_slave_pulsegen(clock, sck, mosi, miso, cs, leds, out)
ts = ser (clock, tx, mosi, enable)
@always_comb
def map():
sck.next = clock
@instance
def tbstim():
yield delay(15)
enable.next=1
tx.next=42
cs.next=0
yield delay(90)
enable.next=0
cs.next=1
#assert rx == 42
yield delay(15)
enable.next=1
tx.next=98
cs.next=0
yield delay(90)
cs.next=1
tx.next=23
yield delay(20)
#assert rx == 98
yield delay(90)
#assert rx == 23
enable.next=0
yield delay(100)
return myhdl.instances()
if "--test" in str(sys.argv):
do_test=True
else:
do_test=False
if do_test:
tr = test_spi_pulsegen(clock, sck, mosi,miso, cs, leds, out)
tr.config_sim(trace=True)
tr.run_sim(1000)
else:
tr = spi_slave_pulsegen(clock, sck, mosi,miso, cs, leds, out)
tr.convert('Verilog',initial_values=True)
| [
"myhdl.always",
"rhea.cores.spi.SPIBus",
"myhdl.instances",
"myhdl.Signal",
"ser.ser",
"rhea.system.Global",
"pulsegen.pulsegen",
"myhdl.intbv",
"os.path.abspath",
"myhdl.delay",
"ClkDriver.ClkDriver",
"rhea.system.FIFOBus",
"rhea.cores.spi.spi_slave_fifo_async"
] | [((469, 482), 'myhdl.Signal', 'Signal', (['(False)'], {}), '(False)\n', (475, 482), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((490, 503), 'myhdl.Signal', 'Signal', (['(False)'], {}), '(False)\n', (496, 503), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((512, 525), 'myhdl.Signal', 'Signal', (['(False)'], {}), '(False)\n', (518, 525), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((532, 544), 'myhdl.Signal', 'Signal', (['(True)'], {}), '(True)\n', (538, 544), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((580, 592), 'myhdl.Signal', 'Signal', (['(True)'], {}), '(True)\n', (586, 592), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((603, 616), 'myhdl.Signal', 'Signal', (['(False)'], {}), '(False)\n', (609, 616), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((681, 694), 'myhdl.Signal', 'Signal', (['(False)'], {}), '(False)\n', (687, 694), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((38, 60), 'os.path.abspath', 'os.path.abspath', (['"""../"""'], {}), "('../')\n", (53, 60), False, 'import os\n'), ((987, 1009), 'myhdl.always', 'always', (['clk_in.posedge'], {}), '(clk_in.posedge)\n', (993, 1009), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((1325, 1338), 'myhdl.Signal', 'Signal', (['(False)'], {}), '(False)\n', (1331, 1338), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((1389, 1458), 'pulsegen.pulsegen', 'pulsegen', ([], {'clock': 'clock', 'frequence': 'pulse_in', 'duration': '(10)', 'out_pulse': 'out'}), '(clock=clock, frequence=pulse_in, duration=10, out_pulse=out)\n', (1397, 1458), False, 'from pulsegen import pulsegen\n'), ((1467, 1492), 'myhdl.always', 'always', (['clock_div.posedge'], {}), '(clock_div.posedge)\n', (1473, 1492), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((1939, 1952), 'myhdl.Signal', 'Signal', (['(False)'], {}), '(False)\n', (1945, 1952), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((1969, 1982), 'myhdl.Signal', 'Signal', (['(False)'], {}), '(False)\n', (1975, 1982), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((1994, 2007), 'rhea.system.Global', 'Global', (['clock'], {}), '(clock)\n', (2000, 2007), False, 'from rhea.system import Global, Clock, Reset, FIFOBus, Signals\n'), ((2021, 2065), 'rhea.cores.spi.SPIBus', 'SPIBus', ([], {'sck': 'sck', 'mosi': 'mosi', 'miso': 'miso', 'ss': 'cs'}), '(sck=sck, mosi=mosi, miso=miso, ss=cs)\n', (2027, 2065), False, 'from rhea.cores.spi import SPIBus, spi_slave_fifo_async\n'), ((2080, 2089), 'rhea.system.FIFOBus', 'FIFOBus', ([], {}), '()\n', (2087, 2089), False, 'from rhea.system import Global, Clock, Reset, FIFOBus, Signals\n'), ((2353, 2396), 'rhea.cores.spi.spi_slave_fifo_async', 'spi_slave_fifo_async', (['glbl', 'spibus', 'fifobus'], {}), '(glbl, spibus, fifobus)\n', (2373, 2396), False, 'from rhea.cores.spi import SPIBus, spi_slave_fifo_async\n'), ((2471, 2488), 'myhdl.instances', 'myhdl.instances', ([], {}), '()\n', (2486, 2488), False, 'import myhdl\n'), ((2573, 2600), 'ClkDriver.ClkDriver', 'ClkDriver', (['clock'], {'period': '(10)'}), '(clock, period=10)\n', (2582, 2600), False, 'from ClkDriver import ClkDriver\n'), ((2679, 2707), 'ser.ser', 'ser', (['clock', 'tx', 'mosi', 'enable'], {}), '(clock, tx, mosi, enable)\n', (2682, 2707), False, 'from ser import ser\n'), ((3276, 3293), 'myhdl.instances', 'myhdl.instances', ([], {}), '()\n', (3291, 3293), False, 'import myhdl\n'), ((559, 567), 'myhdl.intbv', 'intbv', (['(0)'], {}), '(0)\n', (564, 567), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((631, 639), 'myhdl.intbv', 'intbv', (['(0)'], {}), '(0)\n', (636, 639), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((953, 982), 'myhdl.intbv', 'intbv', (['(0)'], {'min': '(0)', 'max': 'division'}), '(0, min=0, max=division)\n', (958, 982), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((920, 928), 'myhdl.intbv', 'intbv', (['(0)'], {}), '(0)\n', (925, 928), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((1362, 1370), 'myhdl.intbv', 'intbv', (['(0)'], {}), '(0)\n', (1367, 1370), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((2815, 2824), 'myhdl.delay', 'delay', (['(15)'], {}), '(15)\n', (2820, 2824), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((2898, 2907), 'myhdl.delay', 'delay', (['(90)'], {}), '(90)\n', (2903, 2907), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((2988, 2997), 'myhdl.delay', 'delay', (['(15)'], {}), '(15)\n', (2993, 2997), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((3071, 3080), 'myhdl.delay', 'delay', (['(90)'], {}), '(90)\n', (3076, 3080), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((3132, 3141), 'myhdl.delay', 'delay', (['(20)'], {}), '(20)\n', (3137, 3141), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((3181, 3190), 'myhdl.delay', 'delay', (['(90)'], {}), '(90)\n', (3186, 3190), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n'), ((3252, 3262), 'myhdl.delay', 'delay', (['(100)'], {}), '(100)\n', (3257, 3262), False, 'from myhdl import Signal, intbv, instance, always_comb, delay, always, StopSimulation, block\n')] |
#Faça um programa que mostre na tela uma contagem regressiva para
# o estouro de fogos de artifício, indo de 10 até 0, com uma pausa
# de 1 segundo entre eles.
from time import sleep
for i in range(10, -1, -1):
print('{}'.format(i))
sleep(1)
print('Bum, BUM, POW')
| [
"time.sleep"
] | [((242, 250), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (247, 250), False, 'from time import sleep\n')] |
"""adding nodes / edges
Revision ID: eec5a7359447
Revises: <KEY>
Create Date: 2021-11-08 21:43:02.200062
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'eec5a7359447'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('project_nodes', sa.Column('keyword_id', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('project_nodes', 'keyword_id')
# ### end Alembic commands ###
| [
"alembic.op.drop_column",
"sqlalchemy.Integer"
] | [((586, 631), 'alembic.op.drop_column', 'op.drop_column', (['"""project_nodes"""', '"""keyword_id"""'], {}), "('project_nodes', 'keyword_id')\n", (600, 631), False, 'from alembic import op\n'), ((432, 444), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (442, 444), True, 'import sqlalchemy as sa\n')] |
#!/usr/bin/env python3
from random import randint
class Caesar(object):
def shift(self, offset):
"""Shifts the alphabet using a random number.
Returns the value of the shift."""
self.alphabet = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z'
]
self.new_alphabet = list(self.alphabet)
for i in range(26 - int(offset)):
# Takes first index and appends it to the end.
self.new_alphabet.insert(0, self.new_alphabet.pop(-1))
return offset
def encrypt(self, text, key):
"""The function takes an input then
then returns the encrypted output and key."""
text = text.lower()
key = self.shift(key)
encrypted_text = []
for c in text:
# Takes letter input then appends the output to a list.
print(self.new_alphabet[self.alphabet.index(c)])
encrypted_text.append(self.new_alphabet[self.alphabet.index(c)])
return "".join(encrypted_text), key # Returns the encrypted text and the key.
def decrypt(self, cypher, key):
"""This function takes the encrypted text and key then returns
the original text."""
decrypted_text = []
self.shift(key) # Shift alphabet using value from key.
for i in range(len(cypher)):
# Takes encrypted letter and returns original letter.
decrypted_text.append(self.alphabet[self.new_alphabet.index(
cypher[i])])
return "".join(decrypted_text)
class Ncaesar(object):
"""This encryption method is like the Caesar Cypher however it does a
different alphabet shift for each letter. This results in a more
secure encryption method, however the key is longer."""
def shift(self, offset):
self.alphabet = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z'
]
self.new_alphabet = [
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z'
]
for i in range(int(offset)):
self.new_alphabet.insert(0, self.new_alphabet.pop(-1))
return offset
def encrypt(self, text):
"""Does exactly the same as the Ceaser method but uses a
different key for each letter."""
text = text.lower()
"""Does exactly the same as the Caeser encrypt method but
uses a different key for each letter."""
key = []
encrypted_text = []
for c in text:
# Shifts alphabet for each letter and generates key + text
key.append(self.shift(randint(0, 26)))
encrypted_text.append(self.new_alphabet[self.alphabet.index(c)])
return "".join(encrypted_text), key
def decrypt(self, cypher, key):
# Decrypted each letter in text.
decrypted_text = []
for i in range(len(key)):
self.shift(key[i])
decrypted_text.append(self.alphabet[self.new_alphabet.index(
cypher[i])])
print(i)
return "".join(decrypted_text)
| [
"random.randint"
] | [((3178, 3192), 'random.randint', 'randint', (['(0)', '(26)'], {}), '(0, 26)\n', (3185, 3192), False, 'from random import randint\n')] |
from basketapp.models import TrainerBasket
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.urls import reverse
from mainapp.models import Trainer
@login_required
def index(request):
items = TrainerBasket.objects.filter(user=request.user)
context = {
'object_list': items,
}
return render(request, 'basketapp/basket.html', context)
@login_required
def add(request, trainer_id):
trainer = Trainer.objects.get(pk=trainer_id)
TrainerBasket.objects.get_or_create(
user=request.user,
trainer=trainer
)
return HttpResponseRedirect(
reverse('mainapp:trainer_page',
kwargs={'pk': trainer.pk})
)
@login_required
def remove(request, tr):
if request.is_ajax():
item = TrainerBasket.objects.get(id=tr)
item.delete()
return JsonResponse({'status': 'ok',
'tr': tr})
| [
"django.shortcuts.render",
"basketapp.models.TrainerBasket.objects.get_or_create",
"basketapp.models.TrainerBasket.objects.filter",
"basketapp.models.TrainerBasket.objects.get",
"django.http.JsonResponse",
"mainapp.models.Trainer.objects.get",
"django.urls.reverse"
] | [((312, 359), 'basketapp.models.TrainerBasket.objects.filter', 'TrainerBasket.objects.filter', ([], {'user': 'request.user'}), '(user=request.user)\n', (340, 359), False, 'from basketapp.models import TrainerBasket\n'), ((424, 473), 'django.shortcuts.render', 'render', (['request', '"""basketapp/basket.html"""', 'context'], {}), "(request, 'basketapp/basket.html', context)\n", (430, 473), False, 'from django.shortcuts import render\n'), ((535, 569), 'mainapp.models.Trainer.objects.get', 'Trainer.objects.get', ([], {'pk': 'trainer_id'}), '(pk=trainer_id)\n', (554, 569), False, 'from mainapp.models import Trainer\n'), ((574, 645), 'basketapp.models.TrainerBasket.objects.get_or_create', 'TrainerBasket.objects.get_or_create', ([], {'user': 'request.user', 'trainer': 'trainer'}), '(user=request.user, trainer=trainer)\n', (609, 645), False, 'from basketapp.models import TrainerBasket\n'), ((709, 767), 'django.urls.reverse', 'reverse', (['"""mainapp:trainer_page"""'], {'kwargs': "{'pk': trainer.pk}"}), "('mainapp:trainer_page', kwargs={'pk': trainer.pk})\n", (716, 767), False, 'from django.urls import reverse\n'), ((873, 905), 'basketapp.models.TrainerBasket.objects.get', 'TrainerBasket.objects.get', ([], {'id': 'tr'}), '(id=tr)\n', (898, 905), False, 'from basketapp.models import TrainerBasket\n'), ((943, 983), 'django.http.JsonResponse', 'JsonResponse', (["{'status': 'ok', 'tr': tr}"], {}), "({'status': 'ok', 'tr': tr})\n", (955, 983), False, 'from django.http import HttpResponseRedirect, JsonResponse\n')] |
import uuid
import requests
from flask import Flask, request
from flask import jsonify
import configFileControl
from cipherOperations import genrateKeys, decryptData, saveKeyinFile
# =============================================================================================================================
NODE_URL = ""
BASE_API = "/api/v1"
nodePublicKey_path = 'nodePublicKey.pem'
# =============================================================================================================================
base_url = "/webhook"
private_key_path = 'private.pem'
public_key_path = 'public.pem'
webhook_functions = None
# app.config['TEMPLATES_AUTO_RELOAD'] = True
# =============================================================================================================================
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
# =============================================================================================================================
@app.route(base_url, methods = ["GET"])
def home():
return jsonify({"name" : "Home Test"})
@app.route(base_url + "/addNode", methods = ["POST"])
def check_node():
data = request.get_json()
print(data)
try:
dataToFunction = {}
params = ["name", "companyName", "companyID"]
for i in params:
dataToFunction[i] = decryptData(private_key_path, data[i])
print(dataToFunction)
NotifyNewNode = webhook_functions["NotifyNewNode"]
name = dataToFunction["name"]
companyName = dataToFunction["companyName"]
companyID = dataToFunction["companyID"]
status = NotifyNewNode(name, companyName, companyID)
print(status)
if status == True:
return jsonify({"code": 0, "status": "Node added"})
else:
return jsonify({"code": 1, "status": "Something Went Wrong"})
except Exception as e:
print(e)
return jsonify({"code": 1, "status": "Something Went Wrong"})
@app.route(base_url + "/addTransaction", methods = ["POST"])
def check_transaction():
data = request.get_json()
print(data)
try:
dataToFunction = {}
params = ["proposingNode", "transaction", "blockindex", "uid"]
for i in params:
dataToFunction[i] = decryptData(private_key_path, data[i])
print(dataToFunction)
NotifyNewTransaction = webhook_functions["NotifyNewTransaction"]
proposingNode = dataToFunction["proposingNode"]
transaction = dataToFunction["transaction"]
blockindex = dataToFunction["blockindex"]
uid = dataToFunction["uid"]
status = NotifyNewTransaction(proposingNode, transaction, blockindex, uid)
print(status)
if status == True:
return jsonify({"code": 0, "status": "Transaction added"})
else:
return jsonify({"code": 1, "status": "Something Went Wrong"})
except Exception as e:
print(e)
return jsonify({"code": 1, "status": "Something Went Wrong"})
# =============================================================================================================================
def register_client(uid, ip, port):
publicKey = None
with open('public.pem', 'r') as f:
publicKey = f.read()
payload = {
"uid" : uid,
"publicKey" : publicKey,
"ipaddr" : ip + ":" + port
}
response = requests.post("http://" + NODE_URL + "/registerClient", json=payload)
response = response.json()
return response["code"]
def save_node_publicKey():
response = requests.get("http://" + NODE_URL + BASE_API + "/check")
response = response.json()
print(response)
saveKeyinFile(nodePublicKey_path, response["publicKey"])
def check_client(uid, ip, port):
payload = {
"uid" : uid
}
response = requests.post("http://" + NODE_URL + "/checkClient", json=payload)
response = response.json()
# code 1 means client is not registered
if response["code"] == 1:
code = register_client(uid, ip, port)
if code == 0:
print("Client Registered")
# calling node api to retrieve node public key and save it
save_node_publicKey()
return True
else:
print("An error occured while registering client")
return False
else:
print(response["status"])
return True
def check_and_get_uid():
status, uid = configFileControl.getUid()
if status:
return uid
else:
uid = uuid.uuid4()
configFileControl.setUid(uid)
return uid
def start_server(ip, port, webhook_functions_arg, node_url):
genrateKeys(private_key_path, public_key_path)
global webhook_functions, NODE_URL
webhook_functions = webhook_functions_arg
NODE_URL = node_url
from ui import app as clienAPPT
global app
app.register_blueprint(clienAPPT)
uid = str(check_and_get_uid())
status = check_client(uid, ip, port)
if status:
app.run(host=ip, port=port, debug=True)
| [
"requests.post",
"configFileControl.setUid",
"flask.Flask",
"requests.get",
"uuid.uuid4",
"cipherOperations.genrateKeys",
"configFileControl.getUid",
"flask.request.get_json",
"cipherOperations.saveKeyinFile",
"cipherOperations.decryptData",
"flask.jsonify"
] | [((807, 822), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (812, 822), False, 'from flask import Flask, request\n'), ((1060, 1090), 'flask.jsonify', 'jsonify', (["{'name': 'Home Test'}"], {}), "({'name': 'Home Test'})\n", (1067, 1090), False, 'from flask import jsonify\n'), ((1176, 1194), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1192, 1194), False, 'from flask import Flask, request\n'), ((2101, 2119), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2117, 2119), False, 'from flask import Flask, request\n'), ((3424, 3493), 'requests.post', 'requests.post', (["('http://' + NODE_URL + '/registerClient')"], {'json': 'payload'}), "('http://' + NODE_URL + '/registerClient', json=payload)\n", (3437, 3493), False, 'import requests\n'), ((3596, 3652), 'requests.get', 'requests.get', (["('http://' + NODE_URL + BASE_API + '/check')"], {}), "('http://' + NODE_URL + BASE_API + '/check')\n", (3608, 3652), False, 'import requests\n'), ((3708, 3764), 'cipherOperations.saveKeyinFile', 'saveKeyinFile', (['nodePublicKey_path', "response['publicKey']"], {}), "(nodePublicKey_path, response['publicKey'])\n", (3721, 3764), False, 'from cipherOperations import genrateKeys, decryptData, saveKeyinFile\n'), ((3857, 3923), 'requests.post', 'requests.post', (["('http://' + NODE_URL + '/checkClient')"], {'json': 'payload'}), "('http://' + NODE_URL + '/checkClient', json=payload)\n", (3870, 3923), False, 'import requests\n'), ((4475, 4501), 'configFileControl.getUid', 'configFileControl.getUid', ([], {}), '()\n', (4499, 4501), False, 'import configFileControl\n'), ((4698, 4744), 'cipherOperations.genrateKeys', 'genrateKeys', (['private_key_path', 'public_key_path'], {}), '(private_key_path, public_key_path)\n', (4709, 4744), False, 'from cipherOperations import genrateKeys, decryptData, saveKeyinFile\n'), ((4560, 4572), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4570, 4572), False, 'import uuid\n'), ((4581, 4610), 'configFileControl.setUid', 'configFileControl.setUid', (['uid'], {}), '(uid)\n', (4605, 4610), False, 'import configFileControl\n'), ((1359, 1397), 'cipherOperations.decryptData', 'decryptData', (['private_key_path', 'data[i]'], {}), '(private_key_path, data[i])\n', (1370, 1397), False, 'from cipherOperations import genrateKeys, decryptData, saveKeyinFile\n'), ((1756, 1800), 'flask.jsonify', 'jsonify', (["{'code': 0, 'status': 'Node added'}"], {}), "({'code': 0, 'status': 'Node added'})\n", (1763, 1800), False, 'from flask import jsonify\n'), ((1834, 1888), 'flask.jsonify', 'jsonify', (["{'code': 1, 'status': 'Something Went Wrong'}"], {}), "({'code': 1, 'status': 'Something Went Wrong'})\n", (1841, 1888), False, 'from flask import jsonify\n'), ((1948, 2002), 'flask.jsonify', 'jsonify', (["{'code': 1, 'status': 'Something Went Wrong'}"], {}), "({'code': 1, 'status': 'Something Went Wrong'})\n", (1955, 2002), False, 'from flask import jsonify\n'), ((2301, 2339), 'cipherOperations.decryptData', 'decryptData', (['private_key_path', 'data[i]'], {}), '(private_key_path, data[i])\n', (2312, 2339), False, 'from cipherOperations import genrateKeys, decryptData, saveKeyinFile\n'), ((2790, 2841), 'flask.jsonify', 'jsonify', (["{'code': 0, 'status': 'Transaction added'}"], {}), "({'code': 0, 'status': 'Transaction added'})\n", (2797, 2841), False, 'from flask import jsonify\n'), ((2875, 2929), 'flask.jsonify', 'jsonify', (["{'code': 1, 'status': 'Something Went Wrong'}"], {}), "({'code': 1, 'status': 'Something Went Wrong'})\n", (2882, 2929), False, 'from flask import jsonify\n'), ((2989, 3043), 'flask.jsonify', 'jsonify', (["{'code': 1, 'status': 'Something Went Wrong'}"], {}), "({'code': 1, 'status': 'Something Went Wrong'})\n", (2996, 3043), False, 'from flask import jsonify\n')] |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage collections of props."""
import collections
from typing import Any, Callable, Union
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
VersionedSequence = collections.namedtuple('VersionedSequence',
['version', 'ids'])
class PropSetDict(dict):
"""A dictionary that supports a function evaluation on every key access.
Extends the standard dictionary to provide dynamic behaviour for object sets.
"""
def __getitem__(self, key: Any) -> VersionedSequence:
# The method is called during [] access.
# Provides a collection of prop names.
return self._evaluate(dict.__getitem__(self, key))
def __repr__(self) -> str:
return f'{type(self).__name__}({super().__repr__()})'
def get(self, key) -> VersionedSequence:
return self.__getitem__(key)
def values(self):
values = super().values()
return [self._evaluate(x) for x in values]
def items(self):
new_dict = {k: self._evaluate(v) for k, v in super().items()}
return new_dict.items()
def _evaluate(
self, sequence_or_function: Union[VersionedSequence,
Callable[[], VersionedSequence]]
) -> VersionedSequence:
"""Based on the type of an argument, execute different actions.
Supports static sequence containers or functions that create such. When the
argument is a contrainer, the function returns the argument "as is". In case
a callable is provided as an argument, it will be evaluated to create a
container.
Args:
sequence_or_function: A sequence or a function that creates a sequence.
Returns:
A versioned set of names.
"""
if isinstance(sequence_or_function, VersionedSequence):
return sequence_or_function
new_sequence = sequence_or_function()
return new_sequence
| [
"collections.namedtuple"
] | [((934, 997), 'collections.namedtuple', 'collections.namedtuple', (['"""VersionedSequence"""', "['version', 'ids']"], {}), "('VersionedSequence', ['version', 'ids'])\n", (956, 997), False, 'import collections\n')] |
import os, shutil
import re
from django.conf import settings
from django.core.files.base import ContentFile
from filemanager import signals
from filemanager.settings import DIRECTORY, STORAGE
from filemanager.utils import sizeof_fmt
class Filemanager(object):
def __init__(self, path=None):
self.update_path(path)
def update_path(self, path):
if path is None or len(path) == 0:
self.path = ''
self.abspath = DIRECTORY
else:
self.path = self.validate_path(path)
self.abspath = os.path.join(DIRECTORY, self.path)
self.location = os.path.join(settings.MEDIA_ROOT, self.abspath)
self.url = os.path.join(settings.MEDIA_URL, self.abspath)
def validate_path(self, path):
# replace backslash with slash
path = path.replace('\\', '/')
# remove leading and trailing slashes
path = '/'.join([i for i in path.split('/') if i])
return path
def get_breadcrumbs(self):
breadcrumbs = [{
'label': 'Home',
'path': '',
}]
parts = [e for e in self.path.split('/') if e]
path = ''
for part in parts:
path = os.path.join(path, part)
breadcrumbs.append({
'label': part,
'path': path,
})
return breadcrumbs
def patch_context_data(self, context):
context.update({
'path': self.path,
'breadcrumbs': self.get_breadcrumbs(),
})
def file_details(self):
filename = self.path.rsplit('/', 1)[-1]
return {
'directory': os.path.dirname(self.path),
'filepath': self.path,
'filename': filename,
'filesize': sizeof_fmt(STORAGE.size(self.location)),
'filedate': STORAGE.get_modified_time(self.location),
'fileurl': self.url,
}
def directory_list(self):
listing = []
directories, files = STORAGE.listdir(self.location)
def _helper(name, filetype):
return {
'filepath': os.path.join(self.path, name),
'filetype': filetype,
'filename': name,
'filedate': STORAGE.get_modified_time(os.path.join(self.path, name)),
'filesize': sizeof_fmt(STORAGE.size(os.path.join(self.path, name))),
'fileurl' : os.path.join(settings.MEDIA_URL, self.abspath, name),
}
for directoryname in directories:
listing.append(_helper(directoryname, 'Directory'))
for filename in files:
listing.append(_helper(filename, 'File'))
return listing
def upload_file(self, filedata):
filename = STORAGE.get_valid_name(filedata.name)
filepath = os.path.join(self.path, filename)
signals.filemanager_pre_upload.send(sender=self.__class__, filename=filename, path=self.path, filepath=filepath)
STORAGE.save(filepath, filedata)
signals.filemanager_post_upload.send(sender=self.__class__, filename=filename, path=self.path, filepath=filepath)
return filename
def create_directory(self, name):
name = STORAGE.get_valid_name(name)
tmpfile = os.path.join(name, '.tmp')
path = os.path.join(self.path, tmpfile)
STORAGE.save(path, ContentFile(''))
STORAGE.delete(path)
def rename(self, src, dst):
os.rename(os.path.join(self.location, src), os.path.join(self.location, dst))
def remove(self, name):
if os.path.isdir(os.path.join(self.location, name)):
shutil.rmtree(os.path.join(self.location, name))
else:
os.remove(os.path.join(self.location, name))
def search(self, name):
startpath = os.path.join(settings.MEDIA_ROOT, self.abspath)
q = []
for root, dirs, files in os.walk(startpath):
self.update_path(root.replace(startpath, ''))
for file in self.directory_list():
if re.search(name, file['filename'], re.I):
q.append(file)
try:
if file['filetype'] == 'File':
with open('media/uploads/' + file['filepath']) as f:
content = f.read()
if name in content:
q.append(file)
except:
pass
return q
| [
"django.core.files.base.ContentFile",
"filemanager.settings.STORAGE.size",
"filemanager.settings.STORAGE.save",
"os.path.join",
"filemanager.settings.STORAGE.get_valid_name",
"os.path.dirname",
"filemanager.settings.STORAGE.get_modified_time",
"filemanager.settings.STORAGE.delete",
"filemanager.sign... | [((620, 667), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', 'self.abspath'], {}), '(settings.MEDIA_ROOT, self.abspath)\n', (632, 667), False, 'import os, shutil\n'), ((687, 733), 'os.path.join', 'os.path.join', (['settings.MEDIA_URL', 'self.abspath'], {}), '(settings.MEDIA_URL, self.abspath)\n', (699, 733), False, 'import os, shutil\n'), ((2012, 2042), 'filemanager.settings.STORAGE.listdir', 'STORAGE.listdir', (['self.location'], {}), '(self.location)\n', (2027, 2042), False, 'from filemanager.settings import DIRECTORY, STORAGE\n'), ((2774, 2811), 'filemanager.settings.STORAGE.get_valid_name', 'STORAGE.get_valid_name', (['filedata.name'], {}), '(filedata.name)\n', (2796, 2811), False, 'from filemanager.settings import DIRECTORY, STORAGE\n'), ((2831, 2864), 'os.path.join', 'os.path.join', (['self.path', 'filename'], {}), '(self.path, filename)\n', (2843, 2864), False, 'import os, shutil\n'), ((2873, 2990), 'filemanager.signals.filemanager_pre_upload.send', 'signals.filemanager_pre_upload.send', ([], {'sender': 'self.__class__', 'filename': 'filename', 'path': 'self.path', 'filepath': 'filepath'}), '(sender=self.__class__, filename=\n filename, path=self.path, filepath=filepath)\n', (2908, 2990), False, 'from filemanager import signals\n'), ((2994, 3026), 'filemanager.settings.STORAGE.save', 'STORAGE.save', (['filepath', 'filedata'], {}), '(filepath, filedata)\n', (3006, 3026), False, 'from filemanager.settings import DIRECTORY, STORAGE\n'), ((3035, 3153), 'filemanager.signals.filemanager_post_upload.send', 'signals.filemanager_post_upload.send', ([], {'sender': 'self.__class__', 'filename': 'filename', 'path': 'self.path', 'filepath': 'filepath'}), '(sender=self.__class__, filename=\n filename, path=self.path, filepath=filepath)\n', (3071, 3153), False, 'from filemanager import signals\n'), ((3227, 3255), 'filemanager.settings.STORAGE.get_valid_name', 'STORAGE.get_valid_name', (['name'], {}), '(name)\n', (3249, 3255), False, 'from filemanager.settings import DIRECTORY, STORAGE\n'), ((3274, 3300), 'os.path.join', 'os.path.join', (['name', '""".tmp"""'], {}), "(name, '.tmp')\n", (3286, 3300), False, 'import os, shutil\n'), ((3317, 3349), 'os.path.join', 'os.path.join', (['self.path', 'tmpfile'], {}), '(self.path, tmpfile)\n', (3329, 3349), False, 'import os, shutil\n'), ((3402, 3422), 'filemanager.settings.STORAGE.delete', 'STORAGE.delete', (['path'], {}), '(path)\n', (3416, 3422), False, 'from filemanager.settings import DIRECTORY, STORAGE\n'), ((3813, 3860), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', 'self.abspath'], {}), '(settings.MEDIA_ROOT, self.abspath)\n', (3825, 3860), False, 'import os, shutil\n'), ((3909, 3927), 'os.walk', 'os.walk', (['startpath'], {}), '(startpath)\n', (3916, 3927), False, 'import os, shutil\n'), ((561, 595), 'os.path.join', 'os.path.join', (['DIRECTORY', 'self.path'], {}), '(DIRECTORY, self.path)\n', (573, 595), False, 'import os, shutil\n'), ((1216, 1240), 'os.path.join', 'os.path.join', (['path', 'part'], {}), '(path, part)\n', (1228, 1240), False, 'import os, shutil\n'), ((1659, 1685), 'os.path.dirname', 'os.path.dirname', (['self.path'], {}), '(self.path)\n', (1674, 1685), False, 'import os, shutil\n'), ((1845, 1885), 'filemanager.settings.STORAGE.get_modified_time', 'STORAGE.get_modified_time', (['self.location'], {}), '(self.location)\n', (1870, 1885), False, 'from filemanager.settings import DIRECTORY, STORAGE\n'), ((3377, 3392), 'django.core.files.base.ContentFile', 'ContentFile', (['""""""'], {}), "('')\n", (3388, 3392), False, 'from django.core.files.base import ContentFile\n'), ((3474, 3506), 'os.path.join', 'os.path.join', (['self.location', 'src'], {}), '(self.location, src)\n', (3486, 3506), False, 'import os, shutil\n'), ((3508, 3540), 'os.path.join', 'os.path.join', (['self.location', 'dst'], {}), '(self.location, dst)\n', (3520, 3540), False, 'import os, shutil\n'), ((3596, 3629), 'os.path.join', 'os.path.join', (['self.location', 'name'], {}), '(self.location, name)\n', (3608, 3629), False, 'import os, shutil\n'), ((1791, 1818), 'filemanager.settings.STORAGE.size', 'STORAGE.size', (['self.location'], {}), '(self.location)\n', (1803, 1818), False, 'from filemanager.settings import DIRECTORY, STORAGE\n'), ((2130, 2159), 'os.path.join', 'os.path.join', (['self.path', 'name'], {}), '(self.path, name)\n', (2142, 2159), False, 'import os, shutil\n'), ((2432, 2484), 'os.path.join', 'os.path.join', (['settings.MEDIA_URL', 'self.abspath', 'name'], {}), '(settings.MEDIA_URL, self.abspath, name)\n', (2444, 2484), False, 'import os, shutil\n'), ((3658, 3691), 'os.path.join', 'os.path.join', (['self.location', 'name'], {}), '(self.location, name)\n', (3670, 3691), False, 'import os, shutil\n'), ((3729, 3762), 'os.path.join', 'os.path.join', (['self.location', 'name'], {}), '(self.location, name)\n', (3741, 3762), False, 'import os, shutil\n'), ((4054, 4093), 're.search', 're.search', (['name', "file['filename']", 're.I'], {}), "(name, file['filename'], re.I)\n", (4063, 4093), False, 'import re\n'), ((2287, 2316), 'os.path.join', 'os.path.join', (['self.path', 'name'], {}), '(self.path, name)\n', (2299, 2316), False, 'import os, shutil\n'), ((2371, 2400), 'os.path.join', 'os.path.join', (['self.path', 'name'], {}), '(self.path, name)\n', (2383, 2400), False, 'import os, shutil\n')] |
"""
Aggregator
====================================
*Aggregators* are used to combine multiple matrices to a single matrix.
This is used to combine similarity and dissimilarity matrices of multiple attributes to a single one.
Thus, an *Aggregator* :math:`\\mathcal{A}` is a mapping of the form
:math:`\\mathcal{A} : \\mathbb{R}^{n \\times n \\times k} \\rightarrow \\mathbb{R}^{n \\times n}`,
with :math:`n` being the amount of features and :math:`k` being the number of similarity or dissimilarity matrices
of type :math:`D \\in \\mathbb{R}^{n \\times n}`, i.e. the amount of attributes/columns of the dataset.
Currently, the following *Aggregators* are implement:
=========== ===========
Name Formula
----------- -----------
mean :math:`\\mathcal{A} (D^1, D^2, ..., D^k) = \\frac{1}{k} \\sum_{i=1}^{k} D^i`
median :math:`\\mathcal{A} (D^1, D^2, ..., D^k) = \\left\\{ \\begin{array}{ll} D^{\\frac{k}{2}} & \\mbox{, if } k \\mbox{ is even} \\\\ \\frac{1}{2} \\left( D^{\\frac{k-1}{2}} + D^{\\frac{k+1}{2}} \\right) & \\mbox{, if } k \\mbox{ is odd} \\end{array} \\right.`
max :math:`\\mathcal{A} (D^1, D^2, ..., D^k) = max_{ l} \\; D_{i,j}^l`
min :math:`\\mathcal{A} (D^1, D^2, ..., D^k) = min_{ l} \\; D_{i,j}^l`
=========== ===========
"""
import numpy as np
from abc import ABC, abstractmethod
class Aggregator(ABC):
"""
An abstract base class for *Aggregators*.
If custom *Aggregators* are created,
it is enough to derive from this class
and use it whenever an *Aggregator* is needed.
"""
@abstractmethod
def aggregate(self, matrices):
"""
The abstract method that is implemented by the concrete *Aggregators*.
:param matrices: a list of similarity or dissimilarity matrices as 2D numpy arrays.
:return: a single 2D numpy array.
"""
pass
class AggregatorFactory:
"""
The factory class for creating concrete instances of the implemented *Aggregators* with default values.
"""
@staticmethod
def create(aggregator):
"""
Creates an instance of the given *Aggregator* name.
:param aggregator: The name of the *Aggregator*, which can be ``mean``, ``median``, ``max`` or ``min``.
:return: An instance of the *Aggregator*.
:raise ValueError: The given *Aggregator* does not exist.
"""
if aggregator == "mean":
return MeanAggregator()
elif aggregator == "median":
return MedianAggregator()
elif aggregator == "max":
return MaxAggregator()
elif aggregator == "min":
return MinAggregator()
else:
raise ValueError(f"An aggregator of type {aggregator} does not exist.")
class MeanAggregator(Aggregator):
"""
This class aggregates similarity or dissimilarity matrices using the ``mean``.
Given :math:`k` similarity or dissimilarity matrices :math:`D^i \\in \\mathbb{R}^{n \\times n}`,
the *MeanAggregator* calculates
.. centered::
:math:`\\mathcal{A} (D^1, D^2, ..., D^k) = \\frac{1}{k} \\sum_{i=1}^{k} D^i`.
"""
def aggregate(self, matrices):
"""
Calculates the mean of all given matrices along the zero axis.
:param matrices: A list of 2D numpy arrays.
:return: A 2D numpy array.
"""
return np.mean(matrices, axis=0)
class MedianAggregator(Aggregator):
"""
This class aggregates similarity or dissimilarity matrices using the ``median``.
Given :math:`k` similarity or dissimilarity matrices :math:`D^i \\in \\mathbb{R}^{n \\times n}`,
the *MedianAggregator* calculates
.. centered::
:math:`\\mathcal{A} (D^1, D^2, ..., D^k) = \\left{ \\begin{array}{ll} D^{\\frac{k}{2}} & \\mbox{, if } k \\mbox{ is even} \\\\ \\frac{1}{2} \\left( D^{\\frac{k-1}{2}} + D^{\\frac{k+1}{2}} \\right) & \\mbox{, if } k \\mbox{ is odd} \\end{array} \\right.`
"""
def aggregate(self, matrices):
"""
Calculates the median of all given matrices along the zero axis.
:param matrices: A list of 2D numpy arrays.
:return: A 2D numpy array.
"""
return np.median(matrices, axis=0)
class MaxAggregator(Aggregator):
"""
This class aggregates similarity or dissimilarity matrices using the ``max``.
Given :math:`k` similarity or dissimilarity matrices :math:`D^i \\in \\mathbb{R}^{n \\times n}`,
the *MaxAggregator* calculates
.. centered::
:math:`\\mathcal{A} (D^1, D^2, ..., D^k) = max_{ l} \\; D_{i,j}^l`.
"""
def aggregate(self, matrices):
"""
Calculates the max of all given matrices along the zero axis.
:param matrices: A list of 2D numpy arrays.
:return: A 2D numpy array.
"""
return np.max(matrices, axis=0)
class MinAggregator(Aggregator):
"""
This class aggregates similarity or dissimilarity matrices using the ``min``.
Given :math:`k` similarity or dissimilarity matrices :math:`D^i \\in \\mathbb{R}^{n \\times n}`,
the *MinAggregator* calculates
.. centered::
:math:`\\mathcal{A} (D^1, D^2, ..., D^k) = min_{ l} \\; D_{i,j}^l`.
"""
def aggregate(self, matrices):
"""
Calculates the min of all given matrices along the zero axis.
:param matrices: A list of 2D numpy arrays.
:return: A 2D numpy array.
"""
return np.min(matrices, axis=0)
| [
"numpy.mean",
"numpy.median",
"numpy.min",
"numpy.max"
] | [((3369, 3394), 'numpy.mean', 'np.mean', (['matrices'], {'axis': '(0)'}), '(matrices, axis=0)\n', (3376, 3394), True, 'import numpy as np\n'), ((4191, 4218), 'numpy.median', 'np.median', (['matrices'], {'axis': '(0)'}), '(matrices, axis=0)\n', (4200, 4218), True, 'import numpy as np\n'), ((4816, 4840), 'numpy.max', 'np.max', (['matrices'], {'axis': '(0)'}), '(matrices, axis=0)\n', (4822, 4840), True, 'import numpy as np\n'), ((5438, 5462), 'numpy.min', 'np.min', (['matrices'], {'axis': '(0)'}), '(matrices, axis=0)\n', (5444, 5462), True, 'import numpy as np\n')] |
"""
byceps.blueprints.site.page.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from flask import abort, g
from ....services.page import service as page_service
from ....util.framework.blueprint import create_blueprint
from .templating import render_page, url_for_page
blueprint = create_blueprint('page', __name__)
blueprint.add_app_template_global(url_for_page)
@blueprint.get('/<path:url_path>')
def view(url_path):
"""Show the current version of the page that is mounted for the
current site at the given URL path.
"""
url_path = '/' + url_path
version = page_service.find_current_version_for_url_path(
g.site_id, url_path
)
if version is None:
abort(404)
page = page_service.get_page(version.page_id)
return render_page(page, version)
| [
"flask.abort"
] | [((784, 794), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (789, 794), False, 'from flask import abort, g\n')] |
#!/usr/bin/env python
# coding: utf-8
"""script that generates source data csvs for searchstims experiment figures"""
from argparse import ArgumentParser
from collections import defaultdict
from pathlib import Path
import pandas as pd
import pyprojroot
import searchnets
def main(results_gz_root,
source_data_root,
all_csv_filename,
acc_diff_csv_filename,
stim_acc_diff_csv_filename,
net_acc_diff_csv_filename,
acc_diff_by_stim_csv_filename,
net_names,
methods,
modes,
alexnet_split_csv_path,
VGG16_split_csv_path,
learning_rate=1e-3,
):
"""generate .csv files used as source data for figures corresponding to experiments
carried out with stimuli generated by searchstims library
Parameters
----------
results_gz_root : str, Path
path to root of directory that has results.gz files created by `searchnets test` command
source_data_root : str, path
path to root of directory where csv files
that are the source data for figures should be saved.
all_csv_filename : str
filename for .csv saved that contains results from **all** results.gz files.
Saved in source_data_root.
acc_diff_csv_filename : str
filename for .csv should be saved that contains group analysis derived from all results,
with difference in accuracy between set size 1 and 8.
Saved in source_data_root.
stim_acc_diff_csv_filename : str
filename for .csv saved that contains group analysis derived from all results,
with stimulus type column sorted by difference in accuracy between set size 1 and 8.
Saved in source_data_root.
net_acc_diff_csv_filename : str
filename for .csv saved that contains group analysis derived from all results,
with net name column sorted by mean accuracy across all stimulus types.
Saved in source_data_root.
acc_diff_by_stim_csv_filename : str
filename for .csv saved that contains group analysis derived from all results,
with difference in accuracy between set size 1 and 8,
pivoted so that columns are visual search stimulus type.
Saved in source_data_root.
net_names : list
of str, neural network architecture names
methods : list
of str, training "methods". Valid values are {"transfer", "initialize"}.
modes : list
of str, training "modes". Valid values are {"classify","detect"}.
alexnet_split_csv_path : str, Path
path to .csv that contains dataset splits for "alexnet-sized" searchstim images
VGG16_split_csv_path : str, Path
path to .csv that contains dataset splits for "VGG16-sized" searchstim images
learning_rate
float, learning rate value for all experiments. Default is 1e-3.
"""
results_gz_root = Path(results_gz_root)
source_data_root = Path(source_data_root)
if not source_data_root.exists():
raise NotADirectoryError(
f'directory specified as source_data_root not found: {source_data_root}'
)
df_list = []
for net_name in net_names:
for method in methods:
if method not in METHODS:
raise ValueError(
f'invalid method: {method}, must be one of: {METHODS}'
)
for mode in modes:
results_gz_path = sorted(results_gz_root.glob(f'**/*{net_name}*{method}*gz'))
if mode == 'classify':
results_gz_path = [results_gz for results_gz in results_gz_path if 'detect' not in str(results_gz)]
elif mode == 'detect':
results_gz_path = [results_gz for results_gz in results_gz_path if 'detect' in str(results_gz)]
else:
raise ValueError(
f'invalid mode: {mode}, must be one of: {MODES}'
)
if len(results_gz_path) != 1:
raise ValueError(f'found more than one results.gz file: {results_gz_path}')
results_gz_path = results_gz_path[0]
if net_name == 'alexnet' or 'CORnet' in net_name:
csv_path = alexnet_split_csv_path
elif net_name == 'VGG16':
csv_path = VGG16_split_csv_path
else:
raise ValueError(f'no csv path defined for net_name: {net_name}')
df = searchnets.analysis.searchstims.results_gz_to_df(results_gz_path,
csv_path,
net_name,
method,
mode,
learning_rate)
df_list.append(df)
df_all = pd.concat(df_list)
# Get just the transfer learning results,
# then group by network, stimulus, and set size,
# and compute the mean accuracy for each set size.
df_transfer = df_all[df_all['method'] == 'transfer']
df_transfer_acc_mn = df_transfer.groupby(['net_name', 'stimulus', 'set_size']).agg({'accuracy':'mean'})
df_transfer_acc_mn = df_transfer_acc_mn.reset_index()
# Make one more `DataFrame`
# where variable is difference of mean accuracies on set size 1 and set size 8.
# We use this to organize the figure,
# and to show a heatmap with a marginal distribution.
records = defaultdict(list)
for net_name in df_transfer_acc_mn['net_name'].unique():
df_net = df_transfer_acc_mn[df_transfer_acc_mn['net_name'] == net_name]
for stim in df_net['stimulus'].unique():
df_stim = df_net[df_net['stimulus'] == stim]
set_size_1_acc = df_stim[df_stim['set_size'] == 1]['accuracy'].values.item()
set_size_8_acc = df_stim[df_stim['set_size'] == 8]['accuracy'].values.item()
acc_diff = set_size_1_acc - set_size_8_acc
records['net_name'].append(net_name)
records['stimulus'].append(stim)
records['set_size_1_acc'].append(set_size_1_acc)
records['set_size_8_acc'].append(set_size_8_acc)
records['acc_diff'].append(acc_diff)
df_acc_diff = pd.DataFrame.from_records(records)
df_acc_diff = df_acc_diff[['net_name', 'stimulus', 'set_size_1_acc', 'set_size_8_acc', 'acc_diff']]
# columns will be stimuli, in increasing order of accuracy drop across models
stim_acc_diff_df = df_acc_diff.groupby(['stimulus']).agg({'acc_diff': 'mean', 'set_size_1_acc': 'mean'})
stim_acc_diff_df = stim_acc_diff_df.reset_index()
stim_acc_diff_df = stim_acc_diff_df.sort_values(by=['set_size_1_acc', 'acc_diff'], ascending=False)
# rows will be nets, in decreasing order of accuracy drops across stimuli
net_acc_diff_df = df_acc_diff.groupby(['net_name']).agg({'acc_diff': 'mean'})
net_acc_diff_df = net_acc_diff_df.reset_index()
net_acc_diff_df = net_acc_diff_df.sort_values(by='acc_diff', ascending=False)
# no idea how much I am abusing the Pandas API, just trying to make a pivot table into a data frame here
# https://stackoverflow.com/a/42708606/4906855
# want the columns to be (sorted) stimulus type,
# and rows be (sorted) network names,
# with values in cells being effect size
df_acc_diff_only = df_acc_diff[['net_name', 'stimulus', 'acc_diff']]
df_acc_diff_by_stim = df_acc_diff_only.pivot_table(index='net_name', columns='stimulus')
df_acc_diff_by_stim.columns = df_acc_diff_by_stim.columns.get_level_values(1)
df_acc_diff_by_stim = pd.DataFrame(df_acc_diff_by_stim.to_records())
df_acc_diff_by_stim = df_acc_diff_by_stim.set_index('net_name')
df_acc_diff_by_stim = df_acc_diff_by_stim.reindex(net_acc_diff_df['net_name'].values.tolist())
df_acc_diff_by_stim = df_acc_diff_by_stim[stim_acc_diff_df['stimulus'].values.tolist()]
# finally, save csvs
df_all.to_csv(source_data_root.joinpath(all_csv_filename), index=False)
df_acc_diff.to_csv(source_data_root.joinpath(acc_diff_csv_filename), index=False)
stim_acc_diff_df.to_csv(source_data_root.joinpath(stim_acc_diff_csv_filename), index=False)
net_acc_diff_df.to_csv(source_data_root.joinpath(net_acc_diff_csv_filename), index=False)
# for this csv, the index is "net names" -- we want to keep it
df_acc_diff_by_stim.to_csv(source_data_root.joinpath(acc_diff_by_stim_csv_filename))
ROOT = pyprojroot.here()
DATA_DIR = ROOT.joinpath('data')
RESULTS_ROOT = ROOT.joinpath('results')
SEARCHSTIMS_ROOT = RESULTS_ROOT.joinpath('searchstims')
RESULTS_GZ_ROOT = SEARCHSTIMS_ROOT.joinpath('results_gz')
LEARNING_RATE = 1e-3
NET_NAMES = [
'alexnet',
'VGG16',
'CORnet_Z',
'CORnet_S',
]
METHODS = [
'initialize',
'transfer'
]
MODES = ['classify']
SEARCHSTIMS_OUTPUT_ROOT = ROOT.joinpath('../visual_search_stimuli')
alexnet_split_csv_path = SEARCHSTIMS_OUTPUT_ROOT.joinpath(
'alexnet_multiple_stims/alexnet_multiple_stims_128000samples_balanced_split.csv')
VGG16_split_csv_path = SEARCHSTIMS_OUTPUT_ROOT.joinpath(
'VGG16_multiple_stims/VGG16_multiple_stims_128000samples_balanced_split.csv'
)
def get_parser():
parser = ArgumentParser()
parser.add_argument('--results_gz_root',
help='path to root of directory that has results.gz files created by searchstims test command')
parser.add_argument('--source_data_root',
help=('path to root of directory where "source data" csv files '
'that are generated should be saved'))
parser.add_argument('--all_csv_filename', default='all.csv',
help=('filename for .csv that should be saved '
'that contains results from **all** results.gz files. '
'Saved in source_data_root.'))
parser.add_argument('--acc_diff_csv_filename', default='acc_diff.csv',
help=("filename for .csv should be saved "
"that contains group analysis derived from all results, "
"with difference in accuracy between set size 1 and 8. "
"Saved in source_data_root"))
parser.add_argument('--stim_acc_diff_csv_filename', default='stim_acc_diff.csv',
help=("filename for .csv should be saved "
"that contains group analysis derived from all results, "
"with stimulus type column sorted by difference in accuracy between set size 1 and 8. "
"Saved in source_data_root"))
parser.add_argument('--net_acc_diff_csv_filename', default='net_acc_diff.csv',
help=("filename for .csv should be saved "
"that contains group analysis derived from all results, "
"with net name column sorted by mean accuracy across all stimulus types."
"Saved in source_data_root."))
parser.add_argument('--acc_diff_by_stim_csv_filename', default='acc_diff_by_stim.csv',
help=("filename for .csv should be saved "
"that contains group analysis derived from all results, "
"with difference in accuracy between set size 1 and 8, "
"pivoted so that columns are visual search stimulus type. "
"Saved in source_data_root"))
parser.add_argument('--net_names', default=NET_NAMES,
help='comma-separated list of neural network architecture names',
type=lambda net_names: net_names.split(','))
parser.add_argument('--methods', default=METHODS,
help='comma-separated list of training "methods", must be in {"transfer", "initialize"}',
type=lambda methods: methods.split(','))
parser.add_argument('--modes', default=MODES,
help='comma-separate list of training "modes", must be in {"classify","detect"}',
type=lambda modes: modes.split(','))
parser.add_argument('--learning_rate', default=LEARNING_RATE,
help=f'float, learning rate value for all experiments. Default is {LEARNING_RATE}')
parser.add_argument('--alexnet_split_csv_path', default=alexnet_split_csv_path,
help='path to .csv that contains dataset splits for "alexnet-sized" searchstim images')
parser.add_argument('--VGG16_split_csv_path', default=VGG16_split_csv_path,
help='path to .csv that contains dataset splits for "VGG16-sized" searchstim images')
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(results_gz_root=args.results_gz_root,
source_data_root=args.source_data_root,
all_csv_filename=args.all_csv_filename,
acc_diff_csv_filename=args.acc_diff_csv_filename,
stim_acc_diff_csv_filename=args.stim_acc_diff_csv_filename,
net_acc_diff_csv_filename=args.net_acc_diff_csv_filename,
acc_diff_by_stim_csv_filename=args.acc_diff_by_stim_csv_filename,
net_names=args.net_names,
methods=args.methods,
modes=args.modes,
alexnet_split_csv_path=args.alexnet_split_csv_path,
VGG16_split_csv_path=args.VGG16_split_csv_path,
learning_rate=args.learning_rate,
)
| [
"pandas.DataFrame.from_records",
"argparse.ArgumentParser",
"pathlib.Path",
"searchnets.analysis.searchstims.results_gz_to_df",
"pyprojroot.here",
"collections.defaultdict",
"pandas.concat"
] | [((8669, 8686), 'pyprojroot.here', 'pyprojroot.here', ([], {}), '()\n', (8684, 8686), False, 'import pyprojroot\n'), ((2900, 2921), 'pathlib.Path', 'Path', (['results_gz_root'], {}), '(results_gz_root)\n', (2904, 2921), False, 'from pathlib import Path\n'), ((2946, 2968), 'pathlib.Path', 'Path', (['source_data_root'], {}), '(source_data_root)\n', (2950, 2968), False, 'from pathlib import Path\n'), ((5050, 5068), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (5059, 5068), True, 'import pandas as pd\n'), ((5678, 5695), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5689, 5695), False, 'from collections import defaultdict\n'), ((6461, 6495), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['records'], {}), '(records)\n', (6486, 6495), True, 'import pandas as pd\n'), ((9431, 9447), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (9445, 9447), False, 'from argparse import ArgumentParser\n'), ((4534, 4652), 'searchnets.analysis.searchstims.results_gz_to_df', 'searchnets.analysis.searchstims.results_gz_to_df', (['results_gz_path', 'csv_path', 'net_name', 'method', 'mode', 'learning_rate'], {}), '(results_gz_path, csv_path,\n net_name, method, mode, learning_rate)\n', (4582, 4652), False, 'import searchnets\n')] |
""" suggest a sensible tolerance for a matrix and coverage-rate (default 0.6).
"""
from typing import Optional
import numpy as np
from tqdm import trange
from logzero import logger
from .coverage_rate import coverage_rate
# fmt: off
def suggest_tolerance(
mat: np.ndarray,
c_rate: float = 0.66,
limit: Optional[int] = None,
) -> int:
# fmt: on
""" suggest a sensible tolerance for a matrix and coverage-rate (default 0.66).
"""
mat = np.asarray(mat)
try:
_, col = mat.shape
except Exception as exc:
logger.erorr(exc)
raise
if limit is None:
limit = max(col // 2, 6)
tolerance = 3
if coverage_rate(mat, tolerance) >= c_rate:
return tolerance
# may try binary tree to speed up
for tol in trange(tolerance + 1, limit + 1):
_ = coverage_rate(mat, tol)
if _ >= c_rate:
logger.info(" search succeeded for mat of size %s", mat.size)
return tol
logger.warning(" mat of size %s most likely not a score matrix", mat.shape)
logger.waning(" we searched hard but were unable to find a sensible tolerance, setting to max(half of %s, 6): %s", col, max(col // 2, 6))
return max(col // 2, 6)
| [
"numpy.asarray",
"logzero.logger.warning",
"logzero.logger.info",
"tqdm.trange",
"logzero.logger.erorr"
] | [((480, 495), 'numpy.asarray', 'np.asarray', (['mat'], {}), '(mat)\n', (490, 495), True, 'import numpy as np\n'), ((804, 836), 'tqdm.trange', 'trange', (['(tolerance + 1)', '(limit + 1)'], {}), '(tolerance + 1, limit + 1)\n', (810, 836), False, 'from tqdm import trange\n'), ((999, 1074), 'logzero.logger.warning', 'logger.warning', (['""" mat of size %s most likely not a score matrix"""', 'mat.shape'], {}), "(' mat of size %s most likely not a score matrix', mat.shape)\n", (1013, 1074), False, 'from logzero import logger\n'), ((570, 587), 'logzero.logger.erorr', 'logger.erorr', (['exc'], {}), '(exc)\n', (582, 587), False, 'from logzero import logger\n'), ((910, 971), 'logzero.logger.info', 'logger.info', (['""" search succeeded for mat of size %s"""', 'mat.size'], {}), "(' search succeeded for mat of size %s', mat.size)\n", (921, 971), False, 'from logzero import logger\n')] |
# Generated by Django 3.1.7 on 2021-03-20 12:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NerSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250, unique=True)),
('info', models.JSONField()),
],
),
migrations.CreateModel(
name='NerSample',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ner_text', models.TextField(blank=True, help_text='text', null=True, verbose_name='text')),
('ner_sample', models.JSONField(blank=True, help_text='text', null=True, verbose_name='text')),
('ner_ent_exist', models.BooleanField(default=False, verbose_name='Contains Entities')),
('ner_source', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='archiv.nersource')),
],
),
]
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.JSONField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((338, 431), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (354, 431), False, 'from django.db import migrations, models\n'), ((456, 501), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(250)', 'unique': '(True)'}), '(max_length=250, unique=True)\n', (472, 501), False, 'from django.db import migrations, models\n'), ((529, 547), 'django.db.models.JSONField', 'models.JSONField', ([], {}), '()\n', (545, 547), False, 'from django.db import migrations, models\n'), ((682, 775), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (698, 775), False, 'from django.db import migrations, models\n'), ((803, 881), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""text"""', 'null': '(True)', 'verbose_name': '"""text"""'}), "(blank=True, help_text='text', null=True, verbose_name='text')\n", (819, 881), False, 'from django.db import migrations, models\n'), ((915, 993), 'django.db.models.JSONField', 'models.JSONField', ([], {'blank': '(True)', 'help_text': '"""text"""', 'null': '(True)', 'verbose_name': '"""text"""'}), "(blank=True, help_text='text', null=True, verbose_name='text')\n", (931, 993), False, 'from django.db import migrations, models\n'), ((1030, 1098), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Contains Entities"""'}), "(default=False, verbose_name='Contains Entities')\n", (1049, 1098), False, 'from django.db import migrations, models\n'), ((1132, 1222), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""archiv.nersource"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'archiv.nersource')\n", (1149, 1222), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
One off for preparing lesion segmentations for validation by a neuroradiologist
"""
import os
import shutil
import pandas as pd
master_folder = '/Users/manusdonahue/Documents/Sky/segmentations_sci/pt_data/'
to_folder = '/Users/manusdonahue/Documents/Sky/lesion_training_data/'
training_csv = '/Users/manusdonahue/Documents/Sky/segmentations_sci/pt_data/move_and_prepare_tabular_24-07-20-09_53.csv'
#####
table = pd.read_csv(training_csv)
is_one = table['training'] == 1
is_nought = table['training'] == 0
trutru = [any([i,j]) for i,j in zip(is_one, is_nought)]
table = table[trutru]
for i, row in table.iterrows():
pt_id = row['id']
print(f'{i+1} of {len(table)}: {pt_id}')
target_folder = os.path.join(to_folder, pt_id)
os.mkdir(target_folder)
pt_bin_folder = os.path.join(master_folder, pt_id, 'bin')
pt_proc_folder = os.path.join(master_folder, pt_id, 'processed')
source_flair = os.path.join(pt_bin_folder, 'axFLAIR_raw.nii.gz')
source_flair_cor = os.path.join(pt_bin_folder, 'corFLAIR_raw.nii.gz')
source_t1 = os.path.join(pt_bin_folder, 'axT1_raw.nii.gz')
source_mask = os.path.join(pt_proc_folder, 'axFLAIR_mask.nii.gz')
target_flair = os.path.join(target_folder, 'axFLAIR.nii.gz')
target_flair_cor = os.path.join(target_folder, 'corFLAIR.nii.gz')
target_t1 = os.path.join(target_folder, 'axT1.nii.gz')
target_mask = os.path.join(target_folder, 'axFLAIR_mask.nii.gz')
sources = [source_flair, source_flair_cor, source_t1, source_mask]
targets = [target_flair, target_flair_cor, target_t1, target_mask]
for s,t in zip(sources,targets):
shutil.copy(s,t)
| [
"shutil.copy",
"os.path.join",
"os.mkdir",
"pandas.read_csv"
] | [((472, 497), 'pandas.read_csv', 'pd.read_csv', (['training_csv'], {}), '(training_csv)\n', (483, 497), True, 'import pandas as pd\n'), ((776, 806), 'os.path.join', 'os.path.join', (['to_folder', 'pt_id'], {}), '(to_folder, pt_id)\n', (788, 806), False, 'import os\n'), ((811, 834), 'os.mkdir', 'os.mkdir', (['target_folder'], {}), '(target_folder)\n', (819, 834), False, 'import os\n'), ((860, 901), 'os.path.join', 'os.path.join', (['master_folder', 'pt_id', '"""bin"""'], {}), "(master_folder, pt_id, 'bin')\n", (872, 901), False, 'import os\n'), ((923, 970), 'os.path.join', 'os.path.join', (['master_folder', 'pt_id', '"""processed"""'], {}), "(master_folder, pt_id, 'processed')\n", (935, 970), False, 'import os\n'), ((995, 1044), 'os.path.join', 'os.path.join', (['pt_bin_folder', '"""axFLAIR_raw.nii.gz"""'], {}), "(pt_bin_folder, 'axFLAIR_raw.nii.gz')\n", (1007, 1044), False, 'import os\n'), ((1068, 1118), 'os.path.join', 'os.path.join', (['pt_bin_folder', '"""corFLAIR_raw.nii.gz"""'], {}), "(pt_bin_folder, 'corFLAIR_raw.nii.gz')\n", (1080, 1118), False, 'import os\n'), ((1135, 1181), 'os.path.join', 'os.path.join', (['pt_bin_folder', '"""axT1_raw.nii.gz"""'], {}), "(pt_bin_folder, 'axT1_raw.nii.gz')\n", (1147, 1181), False, 'import os\n'), ((1200, 1251), 'os.path.join', 'os.path.join', (['pt_proc_folder', '"""axFLAIR_mask.nii.gz"""'], {}), "(pt_proc_folder, 'axFLAIR_mask.nii.gz')\n", (1212, 1251), False, 'import os\n'), ((1276, 1321), 'os.path.join', 'os.path.join', (['target_folder', '"""axFLAIR.nii.gz"""'], {}), "(target_folder, 'axFLAIR.nii.gz')\n", (1288, 1321), False, 'import os\n'), ((1345, 1391), 'os.path.join', 'os.path.join', (['target_folder', '"""corFLAIR.nii.gz"""'], {}), "(target_folder, 'corFLAIR.nii.gz')\n", (1357, 1391), False, 'import os\n'), ((1408, 1450), 'os.path.join', 'os.path.join', (['target_folder', '"""axT1.nii.gz"""'], {}), "(target_folder, 'axT1.nii.gz')\n", (1420, 1450), False, 'import os\n'), ((1469, 1519), 'os.path.join', 'os.path.join', (['target_folder', '"""axFLAIR_mask.nii.gz"""'], {}), "(target_folder, 'axFLAIR_mask.nii.gz')\n", (1481, 1519), False, 'import os\n'), ((1717, 1734), 'shutil.copy', 'shutil.copy', (['s', 't'], {}), '(s, t)\n', (1728, 1734), False, 'import shutil\n')] |
import gym
from gym import spaces
import numpy as np
import os
import sys
from m_gym.envs.createsim import CreateSimulation
from m_gym.envs.meveahandle import MeveaHandle
from time import sleep
from math import exp
class ExcavatorDiggingSparseEnv(gym.Env):
def __init__(self):
super(ExcavatorDiggingSparseEnv, self).__init__()
self.config= {
"model_name": "Excavator",
"model_file_location": "Excavator",
"debug": False,
"episode_duration": 45,
"excluded": ["Input_Reset"],
"render": True,
"service_inputs": ["Input_Reset","Input_Ready"],
"service_outputs": ["Output_Reset_Done"],
"reset_input_block": 12,
"reset_done_output_block": 1,
}
#"workers_directory":"..\\Workers"
self.sim = CreateSimulation(self.config)
self.new_folder = self.sim.new_folder
self.model_file_path = self.sim.model_file_path
# Get amount of the parameters in the observation vector
self.obs_len = self.sim.observation_len
# Get amount of the parameters in the action vector
self.act_len = self.sim.action_len
# Create observation and action numpy array
self.observation = np.zeros(self.obs_len, dtype=np.float32)
self.action = np.zeros(self.act_len, dtype=np.float32)
self.action_high = np.ones(self.act_len, dtype=np.float32)
self.action_low = -self.action_high
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=self.observation.shape)
self.action_space = spaces.Box(low=self.action_low, high=self.action_high, shape=self.action.shape)
self.mh = MeveaHandle(self.sim.worker_number, self.sim.analog_inputs_blocks , self.sim.digital_inputs_blocks, self.sim.analog_outputs_blocks, self.sim.digital_outputs_blocks)
self.mh.start_process(os.path.abspath(self.model_file_path), self.config['render'])
self.bucket_trigger_mass = 100
self.dumpster_trigger_mass = 100
del self.sim
# Returns Box observation space
def get_observation_space(self):
return spaces.Box(low=-np.inf, high=np.inf, shape=self.observation.shape)
# Returns Box action space
def get_action_space(self):
return spaces.Box(low=self.action_low, high=self.action_high, shape=self.action.shape)
def step(self, action):
self.mh.set_inputs(action)
sleep(1)
obs = self.mh.get_outputs()
print(self.get_action_space())
reward = 1
done = False
print(obs[11], obs[11] >= self.config['episode_duration'])
if obs[11] >= self.config['episode_duration']:
done = True
return obs, reward, done, {}
def reset(self):
'''
print("restart")
self.mh.set_single_digital_input(self.config['reset_input_block'], self.mh.worker_number, 1)
sleep(1)
self.mh.set_single_digital_input(self.config['reset_input_block'], self.mh.worker_number, 0)
obs = self.mh.get_outputs()
while round(obs[11]) != 0:
self.mh.set_single_digital_input(self.config['reset_input_block'], self.mh.worker_number, 0)
obs = self.mh.get_outputs()
sleep(0.1)
'''
return self.mh.get_outputs()
def render(self, mode='', close=False):
pass
def close(self):
self.mh.terminate()
self.mh.delete_folder(self.new_folder)
print('Simulation environment closed!')
| [
"numpy.ones",
"m_gym.envs.meveahandle.MeveaHandle",
"m_gym.envs.createsim.CreateSimulation",
"time.sleep",
"gym.spaces.Box",
"numpy.zeros",
"os.path.abspath"
] | [((754, 783), 'm_gym.envs.createsim.CreateSimulation', 'CreateSimulation', (['self.config'], {}), '(self.config)\n', (770, 783), False, 'from m_gym.envs.createsim import CreateSimulation\n'), ((1156, 1196), 'numpy.zeros', 'np.zeros', (['self.obs_len'], {'dtype': 'np.float32'}), '(self.obs_len, dtype=np.float32)\n', (1164, 1196), True, 'import numpy as np\n'), ((1215, 1255), 'numpy.zeros', 'np.zeros', (['self.act_len'], {'dtype': 'np.float32'}), '(self.act_len, dtype=np.float32)\n', (1223, 1255), True, 'import numpy as np\n'), ((1279, 1318), 'numpy.ones', 'np.ones', (['self.act_len'], {'dtype': 'np.float32'}), '(self.act_len, dtype=np.float32)\n', (1286, 1318), True, 'import numpy as np\n'), ((1394, 1460), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': 'self.observation.shape'}), '(low=-np.inf, high=np.inf, shape=self.observation.shape)\n', (1404, 1460), False, 'from gym import spaces\n'), ((1485, 1564), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'self.action_low', 'high': 'self.action_high', 'shape': 'self.action.shape'}), '(low=self.action_low, high=self.action_high, shape=self.action.shape)\n', (1495, 1564), False, 'from gym import spaces\n'), ((1580, 1753), 'm_gym.envs.meveahandle.MeveaHandle', 'MeveaHandle', (['self.sim.worker_number', 'self.sim.analog_inputs_blocks', 'self.sim.digital_inputs_blocks', 'self.sim.analog_outputs_blocks', 'self.sim.digital_outputs_blocks'], {}), '(self.sim.worker_number, self.sim.analog_inputs_blocks, self.sim\n .digital_inputs_blocks, self.sim.analog_outputs_blocks, self.sim.\n digital_outputs_blocks)\n', (1591, 1753), False, 'from m_gym.envs.meveahandle import MeveaHandle\n'), ((2011, 2077), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': 'self.observation.shape'}), '(low=-np.inf, high=np.inf, shape=self.observation.shape)\n', (2021, 2077), False, 'from gym import spaces\n'), ((2149, 2228), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'self.action_low', 'high': 'self.action_high', 'shape': 'self.action.shape'}), '(low=self.action_low, high=self.action_high, shape=self.action.shape)\n', (2159, 2228), False, 'from gym import spaces\n'), ((2292, 2300), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (2297, 2300), False, 'from time import sleep\n'), ((1771, 1808), 'os.path.abspath', 'os.path.abspath', (['self.model_file_path'], {}), '(self.model_file_path)\n', (1786, 1808), False, 'import os\n')] |
#!/usr/bin/env python
"""PyDEC: Software and Algorithms for Discrete Exterior Calculus
"""
DOCLINES = __doc__.split("\n")
import os
import sys
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
Intended Audience :: Education
License :: OSI Approved :: BSD License
Programming Language :: Python
Topic :: Education
Topic :: Software Development
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Mathematics
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
# BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
# update it when the contents of directories change.
if os.path.exists('MANIFEST'): os.remove('MANIFEST')
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('pydec')
config.add_data_files(('pydec','*.txt'))
config.get_version(os.path.join('pydec','version.py')) # sets config.version
return config
def setup_package():
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
old_path = os.getcwd()
local_path = os.path.dirname(os.path.abspath(sys.argv[0]))
os.chdir(local_path)
sys.path.insert(0,local_path)
sys.path.insert(0,os.path.join(local_path,'pydec')) # to retrive version
try:
setup(
name = 'pydec',
maintainer = "PyDEC Developers",
maintainer_email = "<EMAIL>",
description = DOCLINES[0],
long_description = "\n".join(DOCLINES[2:]),
url = "http://www.graphics.cs.uiuc.edu/~wnbell/",
download_url = "http://code.google.com/p/pydec/downloads/list",
license = 'BSD',
classifiers=filter(None, CLASSIFIERS.split('\n')),
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
configuration=configuration )
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
setup_package()
| [
"os.path.exists",
"sys.path.insert",
"os.path.join",
"numpy.distutils.misc_util.Configuration",
"os.getcwd",
"os.chdir",
"os.path.abspath",
"os.remove"
] | [((762, 788), 'os.path.exists', 'os.path.exists', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (776, 788), False, 'import os\n'), ((790, 811), 'os.remove', 'os.remove', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (799, 811), False, 'import os\n'), ((934, 979), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['None', 'parent_package', 'top_path'], {}), '(None, parent_package, top_path)\n', (947, 979), False, 'from numpy.distutils.misc_util import Configuration\n'), ((1481, 1492), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1490, 1492), False, 'import os\n'), ((1560, 1580), 'os.chdir', 'os.chdir', (['local_path'], {}), '(local_path)\n', (1568, 1580), False, 'import os\n'), ((1585, 1615), 'sys.path.insert', 'sys.path.insert', (['(0)', 'local_path'], {}), '(0, local_path)\n', (1600, 1615), False, 'import sys\n'), ((1263, 1298), 'os.path.join', 'os.path.join', (['"""pydec"""', '"""version.py"""'], {}), "('pydec', 'version.py')\n", (1275, 1298), False, 'import os\n'), ((1526, 1554), 'os.path.abspath', 'os.path.abspath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1541, 1554), False, 'import os\n'), ((1637, 1670), 'os.path.join', 'os.path.join', (['local_path', '"""pydec"""'], {}), "(local_path, 'pydec')\n", (1649, 1670), False, 'import os\n'), ((2321, 2339), 'os.chdir', 'os.chdir', (['old_path'], {}), '(old_path)\n', (2329, 2339), False, 'import os\n')] |
from src import fastarg
import subprocess
def test_foo():
assert 'foo'.upper() == 'FOO'
def test_fastarg_no_methods():
app = fastarg.Fastarg()
assert len(app.commands) == 0
def test_fastarg_one_method():
app = fastarg.Fastarg()
@app.command()
def foo():
print("foo")
assert len(app.commands) == 1
def test_command_get_name():
app = fastarg.Fastarg()
@app.command()
def foo():
print("foo")
assert app.commands[0].get_name() == "foo"
def test_hello_world():
completed_process = subprocess.run("python3 main.py hello_world foo", shell=True, capture_output=True)
assert completed_process.stdout.decode("utf-8") == "hello foo\n"
def test_create_todo():
completed_process = subprocess.run("python3 main.py todo create_todo \"drink water\"", shell=True, capture_output=True)
assert completed_process.stdout.decode("utf-8") == "create todo: drink water - False\n"
def test_create_todo_completed():
completed_process = subprocess.run("python3 main.py todo create_todo \"drink water\" --completed", shell=True, capture_output=True)
assert completed_process.stdout.decode("utf-8") == "create todo: drink water - True\n"
def test_create_address():
completed_process = subprocess.run("python3 main.py user address create_address 123 \"456 main st\" --city bellevue --state wa --zip 98004", shell=True, capture_output=True)
assert completed_process.stdout.decode("utf-8") == "creating address for user 123\n456 main st bellevue wa 98004\n" | [
"subprocess.run",
"src.fastarg.Fastarg"
] | [((135, 152), 'src.fastarg.Fastarg', 'fastarg.Fastarg', ([], {}), '()\n', (150, 152), False, 'from src import fastarg\n'), ((230, 247), 'src.fastarg.Fastarg', 'fastarg.Fastarg', ([], {}), '()\n', (245, 247), False, 'from src import fastarg\n'), ((379, 396), 'src.fastarg.Fastarg', 'fastarg.Fastarg', ([], {}), '()\n', (394, 396), False, 'from src import fastarg\n'), ((550, 636), 'subprocess.run', 'subprocess.run', (['"""python3 main.py hello_world foo"""'], {'shell': '(True)', 'capture_output': '(True)'}), "('python3 main.py hello_world foo', shell=True,\n capture_output=True)\n", (564, 636), False, 'import subprocess\n'), ((751, 852), 'subprocess.run', 'subprocess.run', (['"""python3 main.py todo create_todo "drink water\\""""'], {'shell': '(True)', 'capture_output': '(True)'}), '(\'python3 main.py todo create_todo "drink water"\', shell=True,\n capture_output=True)\n', (765, 852), False, 'import subprocess\n'), ((1002, 1115), 'subprocess.run', 'subprocess.run', (['"""python3 main.py todo create_todo "drink water" --completed"""'], {'shell': '(True)', 'capture_output': '(True)'}), '(\'python3 main.py todo create_todo "drink water" --completed\',\n shell=True, capture_output=True)\n', (1016, 1115), False, 'import subprocess\n'), ((1257, 1418), 'subprocess.run', 'subprocess.run', (['"""python3 main.py user address create_address 123 "456 main st" --city bellevue --state wa --zip 98004"""'], {'shell': '(True)', 'capture_output': '(True)'}), '(\n \'python3 main.py user address create_address 123 "456 main st" --city bellevue --state wa --zip 98004\'\n , shell=True, capture_output=True)\n', (1271, 1418), False, 'import subprocess\n')] |
from random import seed, shuffle
import re
import os
from shutil import rmtree
import unicodedata
seed("lol")
def strip_accents(s):
return "".join(
c for c in unicodedata.normalize("NFD", s) if unicodedata.category(c) != "Mn"
)
words = []
with open("./zone.txt") as f:
for word in f.readlines():
word = word.strip()
if len(word) > 9 or len(word) < 6:
continue
if " " in word:
continue
word = strip_accents(word)
word = word.replace("-", "")
word = word.replace("'", "")
word = word.replace("(", "")
word = word.replace(")", "")
if not re.match(r"^[a-zA-Z]+$", word):
print(word)
continue
words.append(word)
shuffle(words)
rmtree("./mots", ignore_errors=True)
os.makedirs("./mots", exist_ok=True)
for i, word in enumerate(words):
with open(f"./mots/{i+1}.txt", "w") as f:
f.write(word.upper())
| [
"random.shuffle",
"os.makedirs",
"re.match",
"random.seed",
"unicodedata.category",
"unicodedata.normalize",
"shutil.rmtree"
] | [((99, 110), 'random.seed', 'seed', (['"""lol"""'], {}), "('lol')\n", (103, 110), False, 'from random import seed, shuffle\n'), ((764, 778), 'random.shuffle', 'shuffle', (['words'], {}), '(words)\n', (771, 778), False, 'from random import seed, shuffle\n'), ((780, 816), 'shutil.rmtree', 'rmtree', (['"""./mots"""'], {'ignore_errors': '(True)'}), "('./mots', ignore_errors=True)\n", (786, 816), False, 'from shutil import rmtree\n'), ((817, 853), 'os.makedirs', 'os.makedirs', (['"""./mots"""'], {'exist_ok': '(True)'}), "('./mots', exist_ok=True)\n", (828, 853), False, 'import os\n'), ((659, 688), 're.match', 're.match', (['"""^[a-zA-Z]+$"""', 'word'], {}), "('^[a-zA-Z]+$', word)\n", (667, 688), False, 'import re\n'), ((174, 205), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 's'], {}), "('NFD', s)\n", (195, 205), False, 'import unicodedata\n'), ((209, 232), 'unicodedata.category', 'unicodedata.category', (['c'], {}), '(c)\n', (229, 232), False, 'import unicodedata\n')] |
import argparse
from PIL import Image
import numpy as np
import onnxruntime as rt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="StyleTransferONNX")
parser.add_argument('--model', type=str, default=' ', help='ONNX model file', required=True)
parser.add_argument('--input', type=str, default=' ', help='Input image', required=True)
parser.add_argument('--output', type=str, default=' ', help='learning rate',required=True)
args = parser.parse_args()
session = rt.InferenceSession(args.model)
inputH = session.get_inputs()
outputH = session.get_outputs()
img = Image.open(args.input)
print('img dim: ',img.width,' ',img.height)
inputArray = np.asarray(img)
inputArray = inputArray.astype(np.float32);
inputArray = inputArray.transpose([2,0,1])
np.clip(inputArray,0,255,out=inputArray)
inputArray = inputArray.reshape((1,3,img.height,img.width))
output_res = session.run(None,{inputH[0].name: inputArray})
output_img = output_res[0].reshape(3,output_res[0].shape[2],output_res[0].shape[3])
output_img = output_img.transpose([1,2,0])
output_img = output_img.astype(np.uint8)
output = Image.fromarray(output_img)
output.save(args.output)
| [
"numpy.clip",
"PIL.Image.fromarray",
"PIL.Image.open",
"argparse.ArgumentParser",
"numpy.asarray",
"onnxruntime.InferenceSession"
] | [((122, 178), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""StyleTransferONNX"""'}), "(description='StyleTransferONNX')\n", (145, 178), False, 'import argparse\n'), ((504, 535), 'onnxruntime.InferenceSession', 'rt.InferenceSession', (['args.model'], {}), '(args.model)\n', (523, 535), True, 'import onnxruntime as rt\n'), ((613, 635), 'PIL.Image.open', 'Image.open', (['args.input'], {}), '(args.input)\n', (623, 635), False, 'from PIL import Image\n'), ((699, 714), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (709, 714), True, 'import numpy as np\n'), ((811, 854), 'numpy.clip', 'np.clip', (['inputArray', '(0)', '(255)'], {'out': 'inputArray'}), '(inputArray, 0, 255, out=inputArray)\n', (818, 854), True, 'import numpy as np\n'), ((1168, 1195), 'PIL.Image.fromarray', 'Image.fromarray', (['output_img'], {}), '(output_img)\n', (1183, 1195), False, 'from PIL import Image\n')] |
#!/usr/bin/env python
import subprocess
import sys
import os
from pbcommand.engine import run_cmd
def run(args):
output_dir = os.getcwd()
if len(args) == 1:
output_dir = args[0]
assert os.path.isdir(output_dir), "Not a directory: %s"%output_dir
module_dir = os.path.join(os.path.dirname(__file__), "pbcoretools", "tasks")
for file_name in os.listdir(module_dir):
if file_name.endswith(".py") and not file_name.startswith("_"):
if file_name in ["converters.py", "filters.py"]:
continue
module_name = "pbcoretools.tasks.{m}".format(m=file_name[:-3])
json_file = os.path.join(output_dir,
"{m}_tool_contract.json".format(m=module_name))
cmd = "python -m {m} --emit-tool-contract > {j}".format(
m=module_name, j=json_file)
run_cmd(cmd, sys.stdout, sys.stderr)
cmd = "python -m pbcoretools.tasks.converters emit-tool-contracts -o {d}".format(d=output_dir)
run_cmd(cmd, sys.stdout, sys.stderr)
cmd = "python -m pbcoretools.tasks.filters emit-tool-contracts -o {d}".format(d=output_dir)
run_cmd(cmd, sys.stdout, sys.stderr)
if __name__ == "__main__":
run(sys.argv[1:])
| [
"os.listdir",
"os.getcwd",
"os.path.dirname",
"os.path.isdir",
"pbcommand.engine.run_cmd"
] | [((134, 145), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (143, 145), False, 'import os\n'), ((375, 397), 'os.listdir', 'os.listdir', (['module_dir'], {}), '(module_dir)\n', (385, 397), False, 'import os\n'), ((1010, 1046), 'pbcommand.engine.run_cmd', 'run_cmd', (['cmd', 'sys.stdout', 'sys.stderr'], {}), '(cmd, sys.stdout, sys.stderr)\n', (1017, 1046), False, 'from pbcommand.engine import run_cmd\n'), ((1147, 1183), 'pbcommand.engine.run_cmd', 'run_cmd', (['cmd', 'sys.stdout', 'sys.stderr'], {}), '(cmd, sys.stdout, sys.stderr)\n', (1154, 1183), False, 'from pbcommand.engine import run_cmd\n'), ((213, 238), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (226, 238), False, 'import os\n'), ((303, 328), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (318, 328), False, 'import os\n'), ((870, 906), 'pbcommand.engine.run_cmd', 'run_cmd', (['cmd', 'sys.stdout', 'sys.stderr'], {}), '(cmd, sys.stdout, sys.stderr)\n', (877, 906), False, 'from pbcommand.engine import run_cmd\n')] |
from N4Tools.Design import Text,Square,ThreadAnimation,Animation,AnimationTools
import requests as req
import socket,os,time,sys
from threading import Thread as u
A = Animation()
class MA:
def CustomAnimation(min=0,max=5639,**kwargs):
yield A.Prograsse(min=min,max=max,prograsse=['│','\033[1;36m█','\033[1;34m▒','│'],text='Scanning',**kwargs)[0]+f'\033[1;37m({round(min*100/max,1)}/100.0) '
class Main(MA):
ips=[]
def __init__(self):
try:
self.Sq = Square()
self.Sq.color = '[$LCYAN]'
self.T = Text()
eip = req.get('https://api.ipify.org').text
self.ips.append(eip+'+eip')
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
iip = s.getsockname()[0]
self.ips.append(iip+'+iip')
self.open_ports1 = []
self.open_ports1.sort()
self.open_ports2 = []
self.open_ports2.sort()
self.ports=set(list(range(1,6535)) + [8080,7547,6666,8888,7777])
self.mw=os.get_terminal_size().columns
except socket.gaierror:
exit()
except socket.error:
print('\033[1;31m[-] Check your internet connection..!\033[0m')
exit()
except KeyboardInterrupt:
exit()
b='''
_____ _--_
/ ___/_________ _____ .' '.
\__ \/ ___/ __ `/ __ \ |\033[1;30m((0)) \033[1;35m|
___/ / /__/ /_/ / / / / | |
/____/\___/\__,_/_/ /_/ '. .'
|""|
'''
print('\033[0;32m',b,'\033[0m')
def serv(self,p):
try:
x=socket.getservbyport(p)
except socket.error:
x='Unknown'
return x
def display(self,i):
i,a=i.split('+')
myl=self.open_ports1 if a=='eip' else self.open_ports2
fu = '''
port Service Status
[$LCYAN]═══════════════════════════════'''
Ip = f'\n\n[$LYELLOW][{i}]'
if not len(myl):
fu+='\n[$LRED] No Service Is Runing\b'
else:
for p in myl:
fu+=f'\n[$LBLUE] {str(p).ljust(4)} {self.serv(p).rjust(8)} {"Open".rjust(7)} '
box_info=self.Sq.base(fu[1:-1])
output = self.T.CentreAlignPro([Ip,box_info])
for x in output.split('\n'):
print("\t"+x)
@ThreadAnimation(Animation=MA.CustomAnimation)
def scan(Thread,self):
p=0
for ip in self.ips:
i,a=ip.split('+')
myl=self.open_ports1 if a=='eip' else self.open_ports2
for port in self.ports:
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.1)
if s.connect_ex((i,port))==0:
myl.append(port)
Thread.set_kwargs(min=p+1, max=6539*2)
p+=1
Thread.kill()
def d(self):
for ip in self.ips:
self.display(ip)
def runs(self):
p1=u(target=self.scan,args=())
p1.start()
p1.join()
s1=u(target=self.d,args=())
s1.start()
if __name__ == '__main__':
Main().runs()
| [
"os.get_terminal_size",
"socket.getservbyport",
"socket.socket",
"N4Tools.Design.Animation",
"N4Tools.Design.Text",
"requests.get",
"N4Tools.Design.Square",
"threading.Thread",
"N4Tools.Design.ThreadAnimation"
] | [((167, 178), 'N4Tools.Design.Animation', 'Animation', ([], {}), '()\n', (176, 178), False, 'from N4Tools.Design import Text, Square, ThreadAnimation, Animation, AnimationTools\n'), ((2071, 2116), 'N4Tools.Design.ThreadAnimation', 'ThreadAnimation', ([], {'Animation': 'MA.CustomAnimation'}), '(Animation=MA.CustomAnimation)\n', (2086, 2116), False, 'from N4Tools.Design import Text, Square, ThreadAnimation, Animation, AnimationTools\n'), ((2558, 2586), 'threading.Thread', 'u', ([], {'target': 'self.scan', 'args': '()'}), '(target=self.scan, args=())\n', (2559, 2586), True, 'from threading import Thread as u\n'), ((2616, 2641), 'threading.Thread', 'u', ([], {'target': 'self.d', 'args': '()'}), '(target=self.d, args=())\n', (2617, 2641), True, 'from threading import Thread as u\n'), ((474, 482), 'N4Tools.Design.Square', 'Square', ([], {}), '()\n', (480, 482), False, 'from N4Tools.Design import Text, Square, ThreadAnimation, Animation, AnimationTools\n'), ((525, 531), 'N4Tools.Design.Text', 'Text', ([], {}), '()\n', (529, 531), False, 'from N4Tools.Design import Text, Square, ThreadAnimation, Animation, AnimationTools\n'), ((617, 665), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (630, 665), False, 'import socket, os, time, sys\n'), ((1442, 1465), 'socket.getservbyport', 'socket.getservbyport', (['p'], {}), '(p)\n', (1462, 1465), False, 'import socket, os, time, sys\n'), ((541, 573), 'requests.get', 'req.get', (['"""https://api.ipify.org"""'], {}), "('https://api.ipify.org')\n", (548, 573), True, 'import requests as req\n'), ((937, 959), 'os.get_terminal_size', 'os.get_terminal_size', ([], {}), '()\n', (957, 959), False, 'import socket, os, time, sys\n'), ((2282, 2331), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (2295, 2331), False, 'import socket, os, time, sys\n')] |
import os
import datetime
import gym
import numpy as np
import matplotlib.pyplot as plt
from es import CMAES
import pandas as pd
import string
def sigmoid(x):
return 1 / (1 + np.exp(-x))
class Agent:
def __init__(self, x, y, layer1_nodes, layer2_nodes):
self.input = np.zeros(x, dtype=np.float128)
self.weights1 = np.zeros((x, layer1_nodes), dtype=np.float128)
self.weights2 = np.zeros((layer1_nodes, layer2_nodes), dtype=np.float128)
self.weights3 = np.zeros((layer2_nodes, y), dtype=np.float128)
self.output = np.zeros(y, dtype=np.float128)
def feedforward(self, x):
self.input = x
self.layer1 = sigmoid(np.dot(self.input, self.weights1))
self.layer2 = sigmoid(np.dot(self.layer1, self.weights2))
self.output = sigmoid(np.dot(self.layer2, self.weights3))
def assignWeights(self, s):
self.weights1 = s[0]
self.weights2 = s[1]
self.weights3 = s[2]
class RL:
# initializes the CMA and RL algo
__slots__ = ['HL1', "HL2", "NPOP", "MAX_ITER", "STEPS", "dir", "STATE_SIZE", "ACTION_SIZE", "env", "FINAL"]
def __init__(self, D="DefaultDir", H1=64, H2=64, P=100, G=5000, S=50000, E="TimePilot-ram-v0", wd=0.01, #weight decay initialized to 0.01
si=0.5):
# HYPERPARAMETERS
self.HL1 = H1
self.HL2 = H2
self.NPOP = P
self.MAX_ITER = G
self.STEPS = S
self.dir = D
# CONSTANTS
self.STATE_SIZE = 128
self.ACTION_SIZE = self.decisions_env(E)
self.env = gym.make(E)
self.env.reset()
# CMA
NPARAMS = (self.STATE_SIZE * self.HL1) + (self.HL1 * self.HL2) + (self.HL2 * self.ACTION_SIZE)
cma = CMAES(NPARAMS, popsize=self.NPOP, weight_decay=wd, sigma_init=si)
self.FINAL = self.Engine(cma)
# Function to initialize
def decisions_env(self, name):
if name == "TimePilot-ram-v0":
return 10
elif name == "Breakout-ram-v0":
return 4
return 10 # just rerturn TimePilot command as default
def findHighest(self, NN_Output):
NN_Temp = NN_Output
NN_I = []
xF = 0
index = 0
foundI = 0
for xl in range(self.ACTION_SIZE + 1):
for NN_O in NN_Temp:
if xF < NN_O:
xF = NN_O
foundI = index
index = index + 1
NN_Temp[foundI] = -1
NN_I.append(foundI)
index = 0
xF = 0
return NN_I[0]
def weightsCalc(self, s):
x1 = np.asarray(s[:self.STATE_SIZE * self.HL1], dtype=np.float128)
x2 = np.asarray(s[self.STATE_SIZE * self.HL1:self.STATE_SIZE * self.HL1 + self.HL1 * self.HL2], dtype=np.float128)
x3 = np.asarray(s[self.STATE_SIZE * self.HL1 + self.HL1 * self.HL2:], dtype=np.float128)
x1 = np.reshape(x1, (self.STATE_SIZE, self.HL1))
x2 = np.reshape(x2, (self.HL1, self.HL2))
x3 = np.reshape(x3, (self.HL2, self.ACTION_SIZE))
return (x1, x2, x3)
# runs the sim and tallies reward
def Fitness(self, solution):
a = Agent(self.STATE_SIZE, self.ACTION_SIZE, self.HL1, self.HL2)
a.assignWeights(self.weightsCalc(solution))
fitness = 0
self.env.reset()
first = True
for i in range(self.STEPS):
if first:
obs = a.input
first = False
a.feedforward(obs)
choice = list(a.output)
action = self.findHighest(choice)
obs, reward, done, info = self.env.step(action)
# self.env.render()
fitness = fitness + reward
if done:
self.env.close()
break
self.env.close()
return fitness
# This function communicates with the es-tools CMA object "solver"
def Engine(self, solver):
history = []
dLogs = []
word = None
Word = "Start: {0}\n".format(str(self.MAX_ITER))
print(Word)
dLogs.append(Word)
for j in range(self.MAX_ITER):
solutions = solver.ask() # Generates parameters based on distribution mean and covariance matrix
fitness_list = np.zeros(solver.popsize)
for k in range(solver.popsize):
fitness_list[k] = self.Fitness(solutions[k])
solver.tell(fitness_list) # update distribution mean and covariance matrix
result = solver.result()
history.append(result[1])
if (j + 1) % 100 == 0:
Word = "Iteration {0}___{1}".format((j + 1), result[1])
print(Word)
dLogs.append(Word)
print("{0} {1}".format(str(j), self.dir), flush=True)
print("local optimum discovered: {0}\n".format(result[0]))
Word = "fitness score: {0}".format(result[1])
print(Word)
dLogs.append(Word)
self.env.close()
self.GRAPHDB(history, dLogs)
return result[1]
# Graphs, makes database, and saves to directory
def GRAPHDB(self, history, dLogs):
if not (os.path.exists(self.dir)):
os.mkdir(self.dir)
else:
print("{0} already exists as a directory".format(self.dir))
fig = plt.figure()
ax = plt.subplot()
plt.plot(np.arange(len(history)), history, 'r', label="Fitness")
ax.set_ylabel('Score')
ax.set_xlabel('Generation')
ax.set_title('Fitness')
plt.legend()
plt.show()
mainN = "_HL1_{0}_HL2_{1}_P_{2}_F_{3}".format(str(self.HL1), str(self.HL2), str(self.NPOP), str(self.MAX_ITER))
fig.savefig("{0}/Grid_Plot_{1}.png".format(self.dir, mainN))
DataR = pd.DataFrame()
DataR['BestV'] = history
print(DataR)
DataR.to_csv("{0}/R_data_{1}.csv".format(self.dir, mainN), index=None, header=True)
f = open("{0}/Logs_data_{1}.csv".format(self.dir, mainN), "w")
f.writelines(str(dLogs))
f.close()
def autoLabel(rects, I, ax):
cI = 0
for rect in rects:
height = rect.get_height()
information = "L1: {0} L2: {1}\nP: {2}\nG: {3}\nS: {4}".format(str(I[cI][1]), str(I[cI][2]), str(I[cI][3]), str(I[cI][4]), str(I[cI][5]))
ax.annotate('{}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3),
textcoords="offset points", ha='center', va="bottom")
ax.annotate('{}'.format(information), xy=(rect.get_x() + rect.get_width() / 2, height / 2), xytext=(0, 3),
textcoords="offset points", ha='center', va="bottom")
cI += 1
def createGraph(testCaseResults, t):
fig, ax = plt.subplots()
tests = []
results = []
for i in range(len(testCaseResults)):
tests.append("{0}_{1}".format(currentLabelE, str(i)))
results.append(testCaseResults[i][0])
r = ax.bar(np.arange(len(testCaseResults)), results)
ax.set_ylabel('Highest Score')
ax.set_title('Overall Performance test case set {0}_{1}'.format(currentLabelE, currentLabelTC))
ax.set_xticks(np.arange(len(testCaseResults)))
ax.set_xticklabels(tests)
plt.xticks(np.arange(int(len(testCaseResults))))
autoLabel(r, testCaseResults, ax)
fig.tight_layout()
plt.show()
if not (os.path.exists("TestOverall")):
os.mkdir("TestOverall")
else:
print("TestOverall already exists as a directory")
fig.savefig("TestOverall/NN_Plot_{0}_{1}_Overall.png".format(str(t), str(datetime.datetime.today())))
print("Main Started...")
#caculates n amount of test cases each with m amount of cases
# returns a bar graph of each test cases test in one graph, labeled
################# # testCases represents n sets of tests cases that you are going to test out
# TEST CASE # # each testCase set increases the Generation count linearly. This way we can compare the same pop
################# # size with different generation counts
TestRLA = [] #Each test case set houses a m amount of Cases
startP = 50 #Each iteration is set to linearly increase the population size
startG = 500
envAtari = ["TimePilot-ram-v0", "Breakout-ram-v0"]
environments = len(envAtari)
testCases = 1
Cases = 1
currentLabelE = None
currentLabelTC = None
#number of environments we are testing on
for currentE in range(environments):
currentLabelE = envAtari[currentE].split("-")[0]
if not (os.path.exists(currentLabelE)):
os.mkdir(currentLabelE)
else:
print("{0} already exists".format(currentLabelE))
print("TEST SET on {0} environment".format(envAtari[currentE]))
#(start, end). index at start = 1, index at the end = end - 1
for tC in range(1, (testCases + 1)): #tC is used as the generation max multiplier per {test case set}
currentLabelTC = string.ascii_uppercase[tC - 1]
print("CASE {0} Set Time G: {1}\n".format(currentLabelTC, str(startG)))
for i in range(1, (Cases + 1)): #i is used as the population multiplier per {test case}
caseRL = RL(D="{0}/_RL_{1}{2}".format(currentLabelE, currentLabelTC, str(i-1)), P=(startP * i), G=(startG * tC), E=envAtari[currentE])
TestRLA.append((int(caseRL.FINAL), caseRL.HL1, caseRL.HL2, caseRL.NPOP, caseRL.MAX_ITER, caseRL.STEPS))
createGraph(TestRLA, tC)
TestRLA = []
| [
"os.path.exists",
"numpy.reshape",
"es.CMAES",
"numpy.asarray",
"numpy.exp",
"datetime.datetime.today",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.dot",
"matplotlib.pyplot.subplots",
"os.mkdir",
"pandas.DataFrame",
"matplotlib.pyplot.subplot",
"gym.make",
"matplotlib.pyplot.legend... | [((6770, 6784), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6782, 6784), True, 'import matplotlib.pyplot as plt\n'), ((7360, 7370), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7368, 7370), True, 'import matplotlib.pyplot as plt\n'), ((286, 316), 'numpy.zeros', 'np.zeros', (['x'], {'dtype': 'np.float128'}), '(x, dtype=np.float128)\n', (294, 316), True, 'import numpy as np\n'), ((341, 387), 'numpy.zeros', 'np.zeros', (['(x, layer1_nodes)'], {'dtype': 'np.float128'}), '((x, layer1_nodes), dtype=np.float128)\n', (349, 387), True, 'import numpy as np\n'), ((412, 469), 'numpy.zeros', 'np.zeros', (['(layer1_nodes, layer2_nodes)'], {'dtype': 'np.float128'}), '((layer1_nodes, layer2_nodes), dtype=np.float128)\n', (420, 469), True, 'import numpy as np\n'), ((494, 540), 'numpy.zeros', 'np.zeros', (['(layer2_nodes, y)'], {'dtype': 'np.float128'}), '((layer2_nodes, y), dtype=np.float128)\n', (502, 540), True, 'import numpy as np\n'), ((563, 593), 'numpy.zeros', 'np.zeros', (['y'], {'dtype': 'np.float128'}), '(y, dtype=np.float128)\n', (571, 593), True, 'import numpy as np\n'), ((1577, 1588), 'gym.make', 'gym.make', (['E'], {}), '(E)\n', (1585, 1588), False, 'import gym\n'), ((1746, 1811), 'es.CMAES', 'CMAES', (['NPARAMS'], {'popsize': 'self.NPOP', 'weight_decay': 'wd', 'sigma_init': 'si'}), '(NPARAMS, popsize=self.NPOP, weight_decay=wd, sigma_init=si)\n', (1751, 1811), False, 'from es import CMAES\n'), ((2623, 2684), 'numpy.asarray', 'np.asarray', (['s[:self.STATE_SIZE * self.HL1]'], {'dtype': 'np.float128'}), '(s[:self.STATE_SIZE * self.HL1], dtype=np.float128)\n', (2633, 2684), True, 'import numpy as np\n'), ((2698, 2812), 'numpy.asarray', 'np.asarray', (['s[self.STATE_SIZE * self.HL1:self.STATE_SIZE * self.HL1 + self.HL1 * self.HL2]'], {'dtype': 'np.float128'}), '(s[self.STATE_SIZE * self.HL1:self.STATE_SIZE * self.HL1 + self.\n HL1 * self.HL2], dtype=np.float128)\n', (2708, 2812), True, 'import numpy as np\n'), ((2821, 2909), 'numpy.asarray', 'np.asarray', (['s[self.STATE_SIZE * self.HL1 + self.HL1 * self.HL2:]'], {'dtype': 'np.float128'}), '(s[self.STATE_SIZE * self.HL1 + self.HL1 * self.HL2:], dtype=np.\n float128)\n', (2831, 2909), True, 'import numpy as np\n'), ((2918, 2961), 'numpy.reshape', 'np.reshape', (['x1', '(self.STATE_SIZE, self.HL1)'], {}), '(x1, (self.STATE_SIZE, self.HL1))\n', (2928, 2961), True, 'import numpy as np\n'), ((2975, 3011), 'numpy.reshape', 'np.reshape', (['x2', '(self.HL1, self.HL2)'], {}), '(x2, (self.HL1, self.HL2))\n', (2985, 3011), True, 'import numpy as np\n'), ((3025, 3069), 'numpy.reshape', 'np.reshape', (['x3', '(self.HL2, self.ACTION_SIZE)'], {}), '(x3, (self.HL2, self.ACTION_SIZE))\n', (3035, 3069), True, 'import numpy as np\n'), ((5342, 5354), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5352, 5354), True, 'import matplotlib.pyplot as plt\n'), ((5368, 5381), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (5379, 5381), True, 'import matplotlib.pyplot as plt\n'), ((5562, 5574), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5572, 5574), True, 'import matplotlib.pyplot as plt\n'), ((5583, 5593), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5591, 5593), True, 'import matplotlib.pyplot as plt\n'), ((5801, 5815), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5813, 5815), True, 'import pandas as pd\n'), ((7383, 7412), 'os.path.exists', 'os.path.exists', (['"""TestOverall"""'], {}), "('TestOverall')\n", (7397, 7412), False, 'import os\n'), ((7423, 7446), 'os.mkdir', 'os.mkdir', (['"""TestOverall"""'], {}), "('TestOverall')\n", (7431, 7446), False, 'import os\n'), ((8501, 8530), 'os.path.exists', 'os.path.exists', (['currentLabelE'], {}), '(currentLabelE)\n', (8515, 8530), False, 'import os\n'), ((8541, 8564), 'os.mkdir', 'os.mkdir', (['currentLabelE'], {}), '(currentLabelE)\n', (8549, 8564), False, 'import os\n'), ((180, 190), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (186, 190), True, 'import numpy as np\n'), ((678, 711), 'numpy.dot', 'np.dot', (['self.input', 'self.weights1'], {}), '(self.input, self.weights1)\n', (684, 711), True, 'import numpy as np\n'), ((743, 777), 'numpy.dot', 'np.dot', (['self.layer1', 'self.weights2'], {}), '(self.layer1, self.weights2)\n', (749, 777), True, 'import numpy as np\n'), ((809, 843), 'numpy.dot', 'np.dot', (['self.layer2', 'self.weights3'], {}), '(self.layer2, self.weights3)\n', (815, 843), True, 'import numpy as np\n'), ((4289, 4313), 'numpy.zeros', 'np.zeros', (['solver.popsize'], {}), '(solver.popsize)\n', (4297, 4313), True, 'import numpy as np\n'), ((5183, 5207), 'os.path.exists', 'os.path.exists', (['self.dir'], {}), '(self.dir)\n', (5197, 5207), False, 'import os\n'), ((5222, 5240), 'os.mkdir', 'os.mkdir', (['self.dir'], {}), '(self.dir)\n', (5230, 5240), False, 'import os\n'), ((7593, 7618), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (7616, 7618), False, 'import datetime\n')] |
"""Tests for the Synology DSM config flow."""
import logging
from unittest.mock import MagicMock, Mock, patch
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.synology_dsm.const import (
CONF_VOLUMES,
DEFAULT_NAME,
DEFAULT_PORT,
DEFAULT_PORT_SSL,
DEFAULT_SSL,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import (
CONF_DISKS,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.helpers.typing import HomeAssistantType
from tests.common import MockConfigEntry
_LOGGER = logging.getLogger(__name__)
NAME = "My Syno"
HOST = "nas.meontheinternet.com"
SERIAL = "mySerial"
HOST_2 = "nas.worldwide.me"
SERIAL_2 = "mySerial2"
PORT = 1234
SSL = True
USERNAME = "Home_Assistant"
PASSWORD = "password"
@pytest.fixture(name="service")
def mock_controller_service():
"""Mock a successful service."""
with patch(
"homeassistant.components.synology_dsm.config_flow.SynologyDSM"
) as service_mock:
service_mock.return_value.login = Mock(return_value=True)
service_mock.return_value.information = Mock(serial=SERIAL)
service_mock.return_value.utilisation = Mock(cpu_user_load=1)
service_mock.return_value.storage = Mock(disks_ids=[], volumes_ids=[])
yield service_mock
@pytest.fixture(name="service_login_failed")
def mock_controller_service_login_failed():
"""Mock a failed login."""
with patch(
"homeassistant.components.synology_dsm.config_flow.SynologyDSM"
) as service_mock:
service_mock.return_value.login = Mock(return_value=False)
yield service_mock
@pytest.fixture(name="service_failed")
def mock_controller_service_failed():
"""Mock a failed service."""
with patch(
"homeassistant.components.synology_dsm.config_flow.SynologyDSM"
) as service_mock:
service_mock.return_value.login = Mock(return_value=True)
service_mock.return_value.information = Mock(serial=None)
service_mock.return_value.utilisation = Mock(cpu_user_load=None)
service_mock.return_value.storage = Mock(disks_ids=None, volumes_ids=None)
yield service_mock
async def test_user(hass: HomeAssistantType, service: MagicMock):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# test with all provided
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_PORT: PORT,
CONF_SSL: SSL,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == HOST
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == PORT
assert result["data"][CONF_SSL] == SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
service.return_value.information = Mock(serial=SERIAL_2)
# test without port + False SSL
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={
CONF_NAME: NAME,
CONF_HOST: HOST,
CONF_SSL: False,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL_2
assert result["title"] == HOST
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == DEFAULT_PORT
assert not result["data"][CONF_SSL]
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
async def test_import(hass: HomeAssistantType, service: MagicMock):
"""Test import step."""
# import with minimum setup
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL
assert result["title"] == HOST
assert result["data"][CONF_NAME] == DEFAULT_NAME
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_PORT] == DEFAULT_PORT_SSL
assert result["data"][CONF_SSL] == DEFAULT_SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"].get(CONF_DISKS) is None
assert result["data"].get(CONF_VOLUMES) is None
service.return_value.information = Mock(serial=SERIAL_2)
# import with all
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_NAME: NAME,
CONF_HOST: HOST_2,
CONF_PORT: PORT,
CONF_SSL: SSL,
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_DISKS: ["sda", "sdb", "sdc"],
CONF_VOLUMES: ["volume_1"],
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == SERIAL_2
assert result["title"] == HOST_2
assert result["data"][CONF_NAME] == NAME
assert result["data"][CONF_HOST] == HOST_2
assert result["data"][CONF_PORT] == PORT
assert result["data"][CONF_SSL] == SSL
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_DISKS] == ["sda", "sdb", "sdc"]
assert result["data"][CONF_VOLUMES] == ["volume_1"]
async def test_abort_if_already_setup(hass: HomeAssistantType, service: MagicMock):
"""Test we abort if the account is already setup."""
MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
unique_id=SERIAL,
).add_to_hass(hass)
# Should fail, same HOST:PORT (import)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same HOST:PORT (flow)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_login_failed(hass: HomeAssistantType, service_login_failed: MagicMock):
"""Test when we have errors during connection."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_USERNAME: "login"}
async def test_connection_failed(hass: HomeAssistantType, service_failed: MagicMock):
"""Test when we have errors during connection."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
| [
"logging.getLogger",
"unittest.mock.Mock",
"tests.common.MockConfigEntry",
"pytest.fixture",
"unittest.mock.patch"
] | [((662, 689), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (679, 689), False, 'import logging\n'), ((889, 919), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""service"""'}), "(name='service')\n", (903, 919), False, 'import pytest\n'), ((1412, 1455), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""service_login_failed"""'}), "(name='service_login_failed')\n", (1426, 1455), False, 'import pytest\n'), ((1739, 1776), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""service_failed"""'}), "(name='service_failed')\n", (1753, 1776), False, 'import pytest\n'), ((3534, 3555), 'unittest.mock.Mock', 'Mock', ([], {'serial': 'SERIAL_2'}), '(serial=SERIAL_2)\n', (3538, 3555), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((5384, 5405), 'unittest.mock.Mock', 'Mock', ([], {'serial': 'SERIAL_2'}), '(serial=SERIAL_2)\n', (5388, 5405), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((997, 1067), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.synology_dsm.config_flow.SynologyDSM"""'], {}), "('homeassistant.components.synology_dsm.config_flow.SynologyDSM')\n", (1002, 1067), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((1141, 1164), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (1145, 1164), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((1213, 1232), 'unittest.mock.Mock', 'Mock', ([], {'serial': 'SERIAL'}), '(serial=SERIAL)\n', (1217, 1232), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((1281, 1302), 'unittest.mock.Mock', 'Mock', ([], {'cpu_user_load': '(1)'}), '(cpu_user_load=1)\n', (1285, 1302), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((1347, 1381), 'unittest.mock.Mock', 'Mock', ([], {'disks_ids': '[]', 'volumes_ids': '[]'}), '(disks_ids=[], volumes_ids=[])\n', (1351, 1381), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((1540, 1610), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.synology_dsm.config_flow.SynologyDSM"""'], {}), "('homeassistant.components.synology_dsm.config_flow.SynologyDSM')\n", (1545, 1610), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((1684, 1708), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (1688, 1708), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((1857, 1927), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.synology_dsm.config_flow.SynologyDSM"""'], {}), "('homeassistant.components.synology_dsm.config_flow.SynologyDSM')\n", (1862, 1927), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((2001, 2024), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (2005, 2024), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((2073, 2090), 'unittest.mock.Mock', 'Mock', ([], {'serial': 'None'}), '(serial=None)\n', (2077, 2090), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((2139, 2163), 'unittest.mock.Mock', 'Mock', ([], {'cpu_user_load': 'None'}), '(cpu_user_load=None)\n', (2143, 2163), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((2208, 2246), 'unittest.mock.Mock', 'Mock', ([], {'disks_ids': 'None', 'volumes_ids': 'None'}), '(disks_ids=None, volumes_ids=None)\n', (2212, 2246), False, 'from unittest.mock import MagicMock, Mock, patch\n'), ((6561, 6687), 'tests.common.MockConfigEntry', 'MockConfigEntry', ([], {'domain': 'DOMAIN', 'data': '{CONF_HOST: HOST, CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD}', 'unique_id': 'SERIAL'}), '(domain=DOMAIN, data={CONF_HOST: HOST, CONF_USERNAME:\n USERNAME, CONF_PASSWORD: PASSWORD}, unique_id=SERIAL)\n', (6576, 6687), False, 'from tests.common import MockConfigEntry\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ..losses import build_loss
class ConvBNAct(nn.Sequential):
def __init__(self, in_channels: int, out_channels: int):
super().__init__(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
class Stage(nn.Sequential):
def __init__(self, in_channels: int, out_channels: int, n_layers: int = 2):
super().__init__()
self.add_module('0', ConvBNAct(in_channels, out_channels))
for i in range(1, n_layers):
self.add_module(str(i), ConvBNAct(out_channels, out_channels))
class UNet(nn.Module):
def __init__(self, stage_channels: list, n_classes: int, criterion: dict):
super().__init__()
# layers
assert len(stage_channels) == 5
c1, c2, c3, c4, c5 = stage_channels
self.stage1_down = Stage(3, c1)
self.stage2_down = Stage(c1, c2)
self.stage3_down = Stage(c2, c3)
self.stage4_down = Stage(c3, c4)
self.stage5 = Stage(c4, c5)
self.stage4_up = Stage(c5, c4)
self.stage3_up = Stage(c4, c3)
self.stage2_up = Stage(c3, c2)
self.stage1_up = Stage(c2, c1)
self.pool = nn.MaxPool2d(2, 2)
self.upconv_5to4 = nn.ConvTranspose2d(c5, c4, 2, 2)
self.upconv_4to3 = nn.ConvTranspose2d(c4, c3, 2, 2)
self.upconv_3to2 = nn.ConvTranspose2d(c3, c2, 2, 2)
self.upconv_2to1 = nn.ConvTranspose2d(c2, c1, 2, 2)
self.cls_top = nn.Conv2d(c1, n_classes, kernel_size=1)
# loss
self.cls_loss = build_loss(criterion['cls_loss'])
self._init_weights()
def forward(self, x):
x = x1 = self.stage1_down(x)
x = self.pool(x)
x = x2 = self.stage2_down(x)
x = self.pool(x)
x = x3 = self.stage3_down(x)
x = self.pool(x)
x = x4 = self.stage4_down(x)
x = self.pool(x)
x = self.upconv_5to4(self.stage5(x))
x = self.upconv_4to3(self.stage4_up(torch.cat([x4, x], dim=1)))
x = self.upconv_3to2(self.stage3_up(torch.cat([x3, x], dim=1)))
x = self.upconv_2to1(self.stage2_up(torch.cat([x2, x], dim=1)))
out = self.cls_top(self.stage1_up(torch.cat([x1, x], dim=1)))
return out
def _init_weights(self):
for name, m in self.named_modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
if m.bias is not None:
if 'cls_top' in name:
nn.init.constant_(m.bias, np.log((1 - 0.01) / 0.01))
else:
nn.init.constant_(m.bias, 0.0)
def loss(self, outputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
loss = self.cls_loss(outputs, targets)
return loss
def predict(self, outputs: torch.Tensor) -> torch.Tensor:
preds = F.softmax(outputs, dim=1)
return preds
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.init.constant_",
"numpy.log",
"torch.nn.init.kaiming_normal_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.ConvTranspose2d",
"torch.nn.functional.softmax",
"torch.cat"
] | [((1339, 1357), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1351, 1357), True, 'import torch.nn as nn\n'), ((1385, 1417), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['c5', 'c4', '(2)', '(2)'], {}), '(c5, c4, 2, 2)\n', (1403, 1417), True, 'import torch.nn as nn\n'), ((1445, 1477), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['c4', 'c3', '(2)', '(2)'], {}), '(c4, c3, 2, 2)\n', (1463, 1477), True, 'import torch.nn as nn\n'), ((1505, 1537), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['c3', 'c2', '(2)', '(2)'], {}), '(c3, c2, 2, 2)\n', (1523, 1537), True, 'import torch.nn as nn\n'), ((1565, 1597), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['c2', 'c1', '(2)', '(2)'], {}), '(c2, c1, 2, 2)\n', (1583, 1597), True, 'import torch.nn as nn\n'), ((1622, 1661), 'torch.nn.Conv2d', 'nn.Conv2d', (['c1', 'n_classes'], {'kernel_size': '(1)'}), '(c1, n_classes, kernel_size=1)\n', (1631, 1661), True, 'import torch.nn as nn\n'), ((3083, 3108), 'torch.nn.functional.softmax', 'F.softmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (3092, 3108), True, 'import torch.nn.functional as F\n'), ((252, 326), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(False)'}), '(in_channels, out_channels, kernel_size=3, padding=1, bias=False)\n', (261, 326), True, 'import torch.nn as nn\n'), ((340, 368), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (354, 368), True, 'import torch.nn as nn\n'), ((382, 403), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (389, 403), True, 'import torch.nn as nn\n'), ((2130, 2155), 'torch.cat', 'torch.cat', (['[x4, x]'], {'dim': '(1)'}), '([x4, x], dim=1)\n', (2139, 2155), False, 'import torch\n'), ((2202, 2227), 'torch.cat', 'torch.cat', (['[x3, x]'], {'dim': '(1)'}), '([x3, x], dim=1)\n', (2211, 2227), False, 'import torch\n'), ((2274, 2299), 'torch.cat', 'torch.cat', (['[x2, x]'], {'dim': '(1)'}), '([x2, x], dim=1)\n', (2283, 2299), False, 'import torch\n'), ((2344, 2369), 'torch.cat', 'torch.cat', (['[x1, x]'], {'dim': '(1)'}), '([x1, x], dim=1)\n', (2353, 2369), False, 'import torch\n'), ((2545, 2614), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_in', nonlinearity='relu')\n", (2568, 2614), True, 'import torch.nn as nn\n'), ((2823, 2853), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0.0)'], {}), '(m.bias, 0.0)\n', (2840, 2853), True, 'import torch.nn as nn\n'), ((2746, 2771), 'numpy.log', 'np.log', (['((1 - 0.01) / 0.01)'], {}), '((1 - 0.01) / 0.01)\n', (2752, 2771), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import vcr
from django.test import TestCase
from rest_framework import status # noqa: F401
from rest_framework.test import APITestCase
moonshot_vcr = vcr.VCR(
serializer="json",
cassette_library_dir="./fixtures",
record_mode="once",
match_on=["uri", "method"],
filter_headers=["authorization", "x-stripe-client-user-agent"],
)
class MoonshotTestCase:
def validate_close_date(self, date1, date2, tolerance=1.5):
"""
Returns a :bool of whether two dates are close to each other,
up to the tolerance (in seconds)
"""
if abs((date2 - date1).total_seconds()) < tolerance:
return True
return False
class MoonshotUnitTestCase(TestCase, MoonshotTestCase):
pass
class MoonshotFunctionalTestCase(APITestCase, MoonshotTestCase):
pass
| [
"vcr.VCR"
] | [((177, 359), 'vcr.VCR', 'vcr.VCR', ([], {'serializer': '"""json"""', 'cassette_library_dir': '"""./fixtures"""', 'record_mode': '"""once"""', 'match_on': "['uri', 'method']", 'filter_headers': "['authorization', 'x-stripe-client-user-agent']"}), "(serializer='json', cassette_library_dir='./fixtures', record_mode=\n 'once', match_on=['uri', 'method'], filter_headers=['authorization',\n 'x-stripe-client-user-agent'])\n", (184, 359), False, 'import vcr\n')] |
import numpy as np
import pandas as pd
import unittest
from bdranalytics.sklearn.model_selection import GrowingWindow, IntervalGrowingWindow
def create_time_series_data_set(start_date=pd.datetime(year=2000, month=1, day=1), n_rows=100):
end_date = start_date + pd.Timedelta(days=n_rows-1)
ds = np.random.rand(n_rows)
X = pd.DataFrame(ds,
columns=['variable'],
index=pd.date_range(start_date, end_date))
y = np.random.randint(2, size=(n_rows,))
return X, y
class TestGrowingWindow(unittest.TestCase):
def test_n_splits(self):
assert GrowingWindow(4).get_n_splits(np.arange(15).reshape(3, 5)) == 4
def test_n_splits_returned(self):
assert len(list(GrowingWindow(4).split(
np.arange(15).reshape(3, 5), np.arange(3)))) == 4
def test_n_splits_testsize(self):
for train, test in GrowingWindow(4).split(np.arange(15).reshape(5, 3), np.arange(5)):
assert len(test) == 1
def test_n_splits_testsize2(self):
for i, (train, test) in zip(range(4), GrowingWindow(4).split(np.arange(15).reshape(5, 3), np.arange(5))):
assert len(train) == i+1
class TestIntervalGrowingWindow(unittest.TestCase):
def test_split_on_index(self):
X, y = create_time_series_data_set()
cv = IntervalGrowingWindow(
test_start_date=pd.datetime(year=2000, month=2, day=1),
test_end_date=pd.datetime(year=2000, month=3, day=1),
test_size='7D')
self.assertTrue(len(list(cv.split(X, y))) == 4)
def test_split_on_array(self):
X, y = create_time_series_data_set()
test_size_in_days = 7
cv = IntervalGrowingWindow(
timestamps=X.index.values,
test_start_date=pd.datetime(year=2000, month=2, day=1),
test_end_date=pd.datetime(year=2000, month=3, day=1),
test_size=pd.Timedelta(days=test_size_in_days))
self.assertTrue(len(list(cv.split(X, y))) == 4)
def test_split_test_size(self):
X, y = create_time_series_data_set()
test_size_in_days = 7
cv = IntervalGrowingWindow(
test_start_date=pd.datetime(year=2000, month=2, day=1),
test_end_date=pd.datetime(year=2000, month=3, day=1),
test_size=pd.Timedelta(days=test_size_in_days))
for _, test in cv.split(X, y):
self.assertTrue(len(test) == test_size_in_days)
def test_split_with_train_size(self):
X, y = create_time_series_data_set()
train_size_in_days = 14
cv = IntervalGrowingWindow(
test_start_date=pd.datetime(year=2000, month=2, day=1),
test_end_date=pd.datetime(year=2000, month=3, day=1),
test_size=pd.Timedelta(days=7),
train_size=pd.Timedelta(days=train_size_in_days))
for train, _ in cv.split(X, y):
self.assertTrue(len(train) == train_size_in_days)
def test_n_splits(self):
X, y = create_time_series_data_set()
cv = IntervalGrowingWindow(
test_start_date=pd.datetime(year=2000, month=2, day=1),
test_end_date=pd.datetime(year=2000, month=3, day=1),
test_size=pd.Timedelta(days=7))
self.assertTrue(cv.get_n_splits(X) == 4)
| [
"numpy.random.rand",
"numpy.arange",
"pandas.Timedelta",
"bdranalytics.sklearn.model_selection.GrowingWindow",
"numpy.random.randint",
"pandas.datetime",
"pandas.date_range"
] | [((187, 225), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(1)', 'day': '(1)'}), '(year=2000, month=1, day=1)\n', (198, 225), True, 'import pandas as pd\n'), ((307, 329), 'numpy.random.rand', 'np.random.rand', (['n_rows'], {}), '(n_rows)\n', (321, 329), True, 'import numpy as np\n'), ((472, 508), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(n_rows,)'}), '(2, size=(n_rows,))\n', (489, 508), True, 'import numpy as np\n'), ((269, 298), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(n_rows - 1)'}), '(days=n_rows - 1)\n', (281, 298), True, 'import pandas as pd\n'), ((426, 461), 'pandas.date_range', 'pd.date_range', (['start_date', 'end_date'], {}), '(start_date, end_date)\n', (439, 461), True, 'import pandas as pd\n'), ((948, 960), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (957, 960), True, 'import numpy as np\n'), ((896, 912), 'bdranalytics.sklearn.model_selection.GrowingWindow', 'GrowingWindow', (['(4)'], {}), '(4)\n', (909, 912), False, 'from bdranalytics.sklearn.model_selection import GrowingWindow, IntervalGrowingWindow\n'), ((1135, 1147), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (1144, 1147), True, 'import numpy as np\n'), ((1389, 1427), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(2)', 'day': '(1)'}), '(year=2000, month=2, day=1)\n', (1400, 1427), True, 'import pandas as pd\n'), ((1455, 1493), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(3)', 'day': '(1)'}), '(year=2000, month=3, day=1)\n', (1466, 1493), True, 'import pandas as pd\n'), ((1797, 1835), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(2)', 'day': '(1)'}), '(year=2000, month=2, day=1)\n', (1808, 1835), True, 'import pandas as pd\n'), ((1863, 1901), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(3)', 'day': '(1)'}), '(year=2000, month=3, day=1)\n', (1874, 1901), True, 'import pandas as pd\n'), ((1925, 1961), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'test_size_in_days'}), '(days=test_size_in_days)\n', (1937, 1961), True, 'import pandas as pd\n'), ((2199, 2237), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(2)', 'day': '(1)'}), '(year=2000, month=2, day=1)\n', (2210, 2237), True, 'import pandas as pd\n'), ((2265, 2303), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(3)', 'day': '(1)'}), '(year=2000, month=3, day=1)\n', (2276, 2303), True, 'import pandas as pd\n'), ((2327, 2363), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'test_size_in_days'}), '(days=test_size_in_days)\n', (2339, 2363), True, 'import pandas as pd\n'), ((2652, 2690), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(2)', 'day': '(1)'}), '(year=2000, month=2, day=1)\n', (2663, 2690), True, 'import pandas as pd\n'), ((2718, 2756), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(3)', 'day': '(1)'}), '(year=2000, month=3, day=1)\n', (2729, 2756), True, 'import pandas as pd\n'), ((2780, 2800), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(7)'}), '(days=7)\n', (2792, 2800), True, 'import pandas as pd\n'), ((2825, 2862), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'train_size_in_days'}), '(days=train_size_in_days)\n', (2837, 2862), True, 'import pandas as pd\n'), ((3108, 3146), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(2)', 'day': '(1)'}), '(year=2000, month=2, day=1)\n', (3119, 3146), True, 'import pandas as pd\n'), ((3174, 3212), 'pandas.datetime', 'pd.datetime', ([], {'year': '(2000)', 'month': '(3)', 'day': '(1)'}), '(year=2000, month=3, day=1)\n', (3185, 3212), True, 'import pandas as pd\n'), ((3236, 3256), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(7)'}), '(days=7)\n', (3248, 3256), True, 'import pandas as pd\n'), ((617, 633), 'bdranalytics.sklearn.model_selection.GrowingWindow', 'GrowingWindow', (['(4)'], {}), '(4)\n', (630, 633), False, 'from bdranalytics.sklearn.model_selection import GrowingWindow, IntervalGrowingWindow\n'), ((919, 932), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (928, 932), True, 'import numpy as np\n'), ((1083, 1099), 'bdranalytics.sklearn.model_selection.GrowingWindow', 'GrowingWindow', (['(4)'], {}), '(4)\n', (1096, 1099), False, 'from bdranalytics.sklearn.model_selection import GrowingWindow, IntervalGrowingWindow\n'), ((647, 660), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (656, 660), True, 'import numpy as np\n'), ((809, 821), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (818, 821), True, 'import numpy as np\n'), ((1106, 1119), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (1115, 1119), True, 'import numpy as np\n'), ((744, 760), 'bdranalytics.sklearn.model_selection.GrowingWindow', 'GrowingWindow', (['(4)'], {}), '(4)\n', (757, 760), False, 'from bdranalytics.sklearn.model_selection import GrowingWindow, IntervalGrowingWindow\n'), ((780, 793), 'numpy.arange', 'np.arange', (['(15)'], {}), '(15)\n', (789, 793), True, 'import numpy as np\n')] |
import base64
class Pdf:
def __init__(self, fname=None, data=None, width='100%',
height='300px', border=False, log=None):
self.fname = fname
self.data = data
self.width = width
self.height = height
self.border = border
self.log = log
def save(self, fname):
with open(fname, 'wb') as f:
f.write(self.data)
def rasterize(self, to_file=None, scale=1):
return self.as_svg().rasterize(to_file=to_file, scale=scale)
def as_svg(self):
from .convert import pdf_to_svg
return pdf_to_svg(self)
def toDrawables(self, elements, **kwargs):
'''Integration with drawSvg.
Forwards its arguments to `latextools.convert.Svg.toDrawables`.
'''
svg = self.as_svg()
return svg.toDrawables(elements, **kwargs)
def _repr_html_(self):
if self.data is None and self.fname is None:
return '<span color="red">No PDF.</span>'
if self.data is None:
path = self.fname
else:
path = (b'data:application/pdf;base64,'
+ base64.b64encode(self.data)
).decode()
return (
f'''<iframe src="{path}"{' style="border:0"'*(not self.border)}
width="{self.width}" height="{self.height}">
No iframe support.
</iframe>''')
| [
"base64.b64encode"
] | [((1147, 1174), 'base64.b64encode', 'base64.b64encode', (['self.data'], {}), '(self.data)\n', (1163, 1174), False, 'import base64\n')] |
import os, sys
sys.path.insert(0, './../Error/')
from .Pattern.Controllers.alternate_page import (KeyError, NotFoundButtonAlternPg )
def ErrorIncorrectParseKey():
raise KeyError("Not indentificaed key persed")
def ErrorButtonAlternate():
raise NotFoundButtonAlternPg("Not Found Button to Alternage Page") | [
"sys.path.insert"
] | [((16, 49), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./../Error/"""'], {}), "(0, './../Error/')\n", (31, 49), False, 'import os, sys\n')] |
"""
POST binaries to alternate storage, create a canonical uri tiddler
pointing to that storage.
"""
from httpexceptor import HTTP400
from uuid import uuid4
from mimetypes import guess_extension
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.util import pseudo_binary
from tiddlyweb.web.util import get_route_value, tiddler_url
from tiddlywebplugins.utils import require_role
@require_role('MEMBER')
def closet(environ, start_response):
"""
Read file input and write it to special storage.
"""
store = environ['tiddlyweb.store']
usersign = environ['tiddlyweb.usersign']
bag_name = get_route_value(environ, 'bag_name')
redir = environ['tiddlyweb.query'].get('redir', [False])[0]
target_name = environ['tiddlyweb.query'].get('name', [None])[0]
bag = store.get(Bag(bag_name))
bag.policy.allows(usersign, 'create')
bag.policy.allows(usersign, 'write')
files = environ['tiddlyweb.input_files']
if not files:
raise HTTP400('missing file input')
tiddlers = []
for input_file in files:
if target_name is None:
target_name = input_file.filename
if pseudo_binary(input_file.type):
tiddler = _regular_tiddler(environ, bag_name, input_file,
target_name)
else:
tiddler = _binary_tiddler(environ, bag_name, input_file,
target_name)
tiddlers.append(tiddler)
response_code = '303 See Other' if redir else '204 No Content'
start_response(response_code, [
('Location', tiddler_url(environ, tiddlers[-1]))])
return []
def closet_binary(environ, tiddler):
"""
Turn a tiddler which is binary into a canonical and save
the binary content with boto.
"""
# XXX: dupes with BinaryDisk
config = environ['tiddlyweb.config']
boto = S3Connection(config['closet.aws_access_key'],
config['closet.aws_secret_key'])
bucket = boto.create_bucket(config['closet.bucket'])
extension = guess_extension(tiddler.type) or ''
key = Key(bucket)
key.key = uuid4().get_hex() + extension
key.set_metadata('Content-Type', tiddler.type)
key.set_contents_from_string(tiddler.text)
url = key.generate_url(0, query_auth=False)
tiddler.text = ''
tiddler.fields['_canonical_uri'] = url
return tiddler
def _regular_tiddler(environ, bag_name, input_file, target_name):
store = environ['tiddlyweb.store']
username = environ['tiddlyweb.usersign']['name']
content = input_file.file.read()
tiddler = Tiddler(target_name, bag_name)
try:
tiddler.text = content.decode('utf-8')
except UnicodeError as exc:
raise HTTP400('tiddler content should be utf-8 encode: %s' % exc)
tiddler.modifier = username
tiddler.type = input_file.type
store.put(tiddler)
return tiddler
def _binary_tiddler(environ, bag_name, input_file, target_name):
store = environ['tiddlyweb.store']
username = environ['tiddlyweb.usersign']['name']
binary_storage = BinaryDisk(environ, input_file)
url = binary_storage.store()
tiddler = Tiddler(target_name, bag_name)
tiddler.fields['_canonical_uri'] = url
tiddler.modifier = username
tiddler.type = input_file.type
store.put(tiddler)
return tiddler
class BinaryDisk(object):
def __init__(self, environ, filething):
self.environ = environ
self.filename = filething.name
self.filehandle = filething.file
self.type = filething.type
self.extension = guess_extension(self.type) or ''
self.targetname = uuid4().get_hex() + self.extension
self.config = environ['tiddlyweb.config']
self.boto = S3Connection(self.config['closet.aws_access_key'],
self.config['closet.aws_secret_key'])
def store(self):
bucket = self.boto.create_bucket(self.config['closet.bucket'])
key = Key(bucket)
key.key = self.targetname
key.set_metadata('Content-Type', self.type)
key.set_contents_from_file(self.filehandle)
url = key.generate_url(0, query_auth=False)
return url
| [
"httpexceptor.HTTP400",
"tiddlyweb.util.pseudo_binary",
"boto.s3.connection.S3Connection",
"uuid.uuid4",
"boto.s3.key.Key",
"tiddlyweb.model.tiddler.Tiddler",
"tiddlyweb.web.util.tiddler_url",
"tiddlyweb.web.util.get_route_value",
"tiddlyweb.model.bag.Bag",
"mimetypes.guess_extension",
"tiddlywe... | [((502, 524), 'tiddlywebplugins.utils.require_role', 'require_role', (['"""MEMBER"""'], {}), "('MEMBER')\n", (514, 524), False, 'from tiddlywebplugins.utils import require_role\n'), ((730, 766), 'tiddlyweb.web.util.get_route_value', 'get_route_value', (['environ', '"""bag_name"""'], {}), "(environ, 'bag_name')\n", (745, 766), False, 'from tiddlyweb.web.util import get_route_value, tiddler_url\n'), ((1963, 2041), 'boto.s3.connection.S3Connection', 'S3Connection', (["config['closet.aws_access_key']", "config['closet.aws_secret_key']"], {}), "(config['closet.aws_access_key'], config['closet.aws_secret_key'])\n", (1975, 2041), False, 'from boto.s3.connection import S3Connection\n'), ((2170, 2181), 'boto.s3.key.Key', 'Key', (['bucket'], {}), '(bucket)\n', (2173, 2181), False, 'from boto.s3.key import Key\n'), ((2672, 2702), 'tiddlyweb.model.tiddler.Tiddler', 'Tiddler', (['target_name', 'bag_name'], {}), '(target_name, bag_name)\n', (2679, 2702), False, 'from tiddlyweb.model.tiddler import Tiddler\n'), ((3236, 3266), 'tiddlyweb.model.tiddler.Tiddler', 'Tiddler', (['target_name', 'bag_name'], {}), '(target_name, bag_name)\n', (3243, 3266), False, 'from tiddlyweb.model.tiddler import Tiddler\n'), ((920, 933), 'tiddlyweb.model.bag.Bag', 'Bag', (['bag_name'], {}), '(bag_name)\n', (923, 933), False, 'from tiddlyweb.model.bag import Bag\n'), ((1098, 1127), 'httpexceptor.HTTP400', 'HTTP400', (['"""missing file input"""'], {}), "('missing file input')\n", (1105, 1127), False, 'from httpexceptor import HTTP400\n'), ((1265, 1295), 'tiddlyweb.util.pseudo_binary', 'pseudo_binary', (['input_file.type'], {}), '(input_file.type)\n', (1278, 1295), False, 'from tiddlyweb.util import pseudo_binary\n'), ((2123, 2152), 'mimetypes.guess_extension', 'guess_extension', (['tiddler.type'], {}), '(tiddler.type)\n', (2138, 2152), False, 'from mimetypes import guess_extension\n'), ((3829, 3922), 'boto.s3.connection.S3Connection', 'S3Connection', (["self.config['closet.aws_access_key']", "self.config['closet.aws_secret_key']"], {}), "(self.config['closet.aws_access_key'], self.config[\n 'closet.aws_secret_key'])\n", (3841, 3922), False, 'from boto.s3.connection import S3Connection\n'), ((4041, 4052), 'boto.s3.key.Key', 'Key', (['bucket'], {}), '(bucket)\n', (4044, 4052), False, 'from boto.s3.key import Key\n'), ((2805, 2864), 'httpexceptor.HTTP400', 'HTTP400', (["('tiddler content should be utf-8 encode: %s' % exc)"], {}), "('tiddler content should be utf-8 encode: %s' % exc)\n", (2812, 2864), False, 'from httpexceptor import HTTP400\n'), ((3664, 3690), 'mimetypes.guess_extension', 'guess_extension', (['self.type'], {}), '(self.type)\n', (3679, 3690), False, 'from mimetypes import guess_extension\n'), ((1675, 1709), 'tiddlyweb.web.util.tiddler_url', 'tiddler_url', (['environ', 'tiddlers[-1]'], {}), '(environ, tiddlers[-1])\n', (1686, 1709), False, 'from tiddlyweb.web.util import get_route_value, tiddler_url\n'), ((2196, 2203), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2201, 2203), False, 'from uuid import uuid4\n'), ((3723, 3730), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (3728, 3730), False, 'from uuid import uuid4\n')] |
'''ORIGINAL file:https://github.com/neubig/util-scripts/blob/master/syntactic-complexity.py
'''
#!/usr/bin/env python
'''
A program to calculate syntactic complexity of parse trees. (Relies on NLTK)
This is an implementation of some of the methods in:
Syntactic complexity measures for detecting Mild Cognitive Impairment
<NAME>, <NAME> and <NAME>
Proc BioNLP 2007.
'''
try:
import sys
from nltk.tree import Tree
from nltk.draw.tree import TreeWidget
from nltk.draw.util import (CanvasFrame, CanvasWidget, BoxWidget,
TextWidget, ParenWidget, OvalWidget)
except ImportError:
print('Error: cannot import the NLTK!')
print('You need to install the NLTK. Please visit http://nltk.org/install.html for details.')
print("On Ubuntu, the installation can be done via 'sudo apt-get install python-nltk'")
sys.exit()
def calc_words(t):
if type(t) == str:
return 1
else:
val = 0
for child in t:
val += calc_words(child)
return val
def calc_nodes(t):
if type(t) == str:
return 0
else:
val = 0
for child in t:
val += calc_nodes(child)+1
return val
def calc_yngve(t, par):
if type(t) == str:
return par
else:
val = 0
for i, child in enumerate(reversed(t)):
val += calc_yngve(child, par+i)
return val
def is_sent(val):
return len(val) > 0 and val[0] == "S"
def calc_frazier(t, par, par_lab):
# print t
# print par
if type(t) == str:
# print par-1
return par-1
else:
val = 0
for i, child in enumerate(t):
# For all but the leftmost child, zero
score = 0
if i == 0:
my_lab = t.label()
# If it's a sentence, and not duplicated, add 1.5
if is_sent(my_lab):
score = (0 if is_sent(par_lab) else par+1.5)
# Otherwise, unless it's a root node, add one
elif my_lab != "" and my_lab != "ROOT" and my_lab != "TOP":
score = par + 1
val += calc_frazier(child, score, my_lab)
return val
def main():
sents = 0
words_tot = 0
yngve_tot = 0
frazier_tot = 0
nodes_tot = 0
for line in sys.stdin:
if line == "end":
break
print('>>>line', line)
t = Tree.fromstring(line)
words = calc_words(t)
words_tot += words
sents += 1
yngve = calc_yngve(t, 0)
yngve_avg = float(yngve)/words
yngve_tot += yngve_avg
nodes = calc_nodes(t)
nodes_avg = float(nodes)/words
nodes_tot += nodes_avg
frazier = calc_frazier(t, 0, "")
frazier_avg = float(frazier)/words
frazier_tot += frazier_avg
print("Sentence=%d\twords=%d\tyngve=%f\tfrazier=%f\tnodes=%f" %
(sents, words, yngve_avg, frazier_avg, nodes_avg))
# yngve_avg = float(yngve_tot)/sents
# frazier_avg = float(frazier_tot)/sents
# nodes_avg = float(nodes_tot)/sents
# words_avg = float(words_tot)/sents
# print("Total\tsents=%d\twords=%f\tyngve=%f\tfrazier=%f\tnodes=%f" %
# (sents, words_avg, yngve_avg, frazier_avg, nodes_avg))
if __name__ == '__main__':
main()
| [
"nltk.tree.Tree.fromstring",
"sys.exit"
] | [((846, 856), 'sys.exit', 'sys.exit', ([], {}), '()\n', (854, 856), False, 'import sys\n'), ((2196, 2217), 'nltk.tree.Tree.fromstring', 'Tree.fromstring', (['line'], {}), '(line)\n', (2211, 2217), False, 'from nltk.tree import Tree\n')] |
"""Implement 3D image thresholding."""
from typing import List, Optional
import numpy.typing as npt
import numpy as np
from ..image_utils import get_xy_block_coords, get_xy_block
from ..gpu import get_image_method
def get_threshold_otsu(image: npt.ArrayLike, blur_sigma=5):
"""Perform Otsu's thresholding with Gaussian blur."""
skimage_gaussian = get_image_method(image, "skimage.filters.gaussian")
skimage_otsu = get_image_method(image, "skimage.filters.threshold_otsu")
image = skimage_gaussian(image, sigma=blur_sigma)
return skimage_otsu(image)
def select_nonempty_patches(
image: npt.ArrayLike,
patch_size: int = 512,
min_nonzeros: float = 0.02,
threshold: Optional[float] = None,
verbose: bool = False,
) -> List[List[int]]:
"""Select XY patches from 3D image by percent of nonzero voxels."""
verboseprint = print if verbose else lambda *a, **k: None
selected_patches = []
if threshold is None:
threshold = get_threshold_otsu(image)
img_as_float = get_image_method(image, "skimage.img_as_float")
binary_image = (img_as_float(image) > threshold).astype(np.uint8)
patch_coordinates = get_xy_block_coords(image.shape, patch_size)
verboseprint(f"Nonzero pixels in the image: {np.count_nonzero(binary_image) / binary_image.size}") # type: ignore[operator]
for single_patch_coords in patch_coordinates:
binary_tile = get_xy_block(binary_image, single_patch_coords)
patch_nonzero = np.count_nonzero(binary_tile) / binary_tile.size
if patch_nonzero >= min_nonzeros:
selected_patches.append(single_patch_coords)
return selected_patches
| [
"numpy.count_nonzero"
] | [((1495, 1524), 'numpy.count_nonzero', 'np.count_nonzero', (['binary_tile'], {}), '(binary_tile)\n', (1511, 1524), True, 'import numpy as np\n'), ((1270, 1300), 'numpy.count_nonzero', 'np.count_nonzero', (['binary_image'], {}), '(binary_image)\n', (1286, 1300), True, 'import numpy as np\n')] |
import vlc
import time
import io
import os
from tkinter import *
from tinytag import TinyTag, TinyTagException
from PIL import Image, ImageTk
class check:
i = 0
mname = ''
song = 1
goto = 0
def main(self):
# change to \ for windows
temp_track = TinyTag.get("/home/lowkey/temp/medusa/Music" + "/" + str(c.song) + ".mp3", image=True)
print("Now Playing:", temp_track.title)
pic = temp_track.get_image()
f1 = open("/home/lowkey/temp/medusa/temp.jpg", "wb")
f1.write(pic)
c.mname = temp_track.title
class root:
def __init__(self):
# change to \ for windows
sound_file = vlc.MediaPlayer("/home/lowkey/temp/medusa/Music" + "/" + str(c.song) + ".mp3")
c.main()
c.goto = 1
self.root = Tk()
self.root.title("Medusa")
self.root.geometry("420x500")
playimg = PhotoImage(file="/home/lowkey/temp/medusa/play.png")
img = Image.open("/home/lowkey/temp/medusa/temp.jpg")
img = img.resize((300, 300), Image.ANTIALIAS)
img.save("/home/lowkey/temp/medusa/temp.jpg")
img = ImageTk.PhotoImage(Image.open("/home/lowkey/temp/medusa/temp.jpg"))
lpic = Label(self.root, image=img)
self.play(c.i, sound_file)
BtPlay = Button(self.root, image=playimg, command=lambda: self.play(c.i,sound_file))
BtNext = Button(self.root, text="Next", command=lambda: self.next(sound_file))
BtPrev = Button(self.root, text="Prev", command=lambda: self.prev(sound_file))
BtTrev = Button(self.root, text="Show PlayList", command= lambda: os.system('python3 /home/lowkey/temp/medusa/treverse_songs.py'))
mname = Label(self.root, text=c.mname)
space = Label(self.root, text=' ')
space2 = Label(self.root, text=' ')
space3 = Label(self.root, text=' ')
lpic.grid(row=0, column=1)
space.grid(row=1)
mname.grid(row=2, column=1)
space2.grid(row=3)
BtPlay.grid(row=4, column=1)
BtNext.grid(row=4, column=2)
BtPrev.grid(row=4, column=0)
space3.grid(row=5)
BtTrev.grid(row=7, column=1)
self.root.mainloop()
def play(self, check, sound_file):
if check == 0:
sound_file.play()
c.i = 1
else:
self.pause(sound_file)
def pause(self, sound_file):
sound_file.pause()
print("Paused")
c.i = 0
def next(self, sound_file):
self.pause(sound_file)
c.song += 1
c.goto = 0
self.root.destroy()
def prev(self, sound_file):
self.pause(sound_file)
if c.song != 1:
c.song -= 1
c.goto = 0
self.root.destroy()
# main function
c = check()
while c.goto < 1:
r = root()
print("Finished Playing")
| [
"os.system",
"PIL.Image.open"
] | [((1020, 1067), 'PIL.Image.open', 'Image.open', (['"""/home/lowkey/temp/medusa/temp.jpg"""'], {}), "('/home/lowkey/temp/medusa/temp.jpg')\n", (1030, 1067), False, 'from PIL import Image, ImageTk\n'), ((1209, 1256), 'PIL.Image.open', 'Image.open', (['"""/home/lowkey/temp/medusa/temp.jpg"""'], {}), "('/home/lowkey/temp/medusa/temp.jpg')\n", (1219, 1256), False, 'from PIL import Image, ImageTk\n'), ((1677, 1740), 'os.system', 'os.system', (['"""python3 /home/lowkey/temp/medusa/treverse_songs.py"""'], {}), "('python3 /home/lowkey/temp/medusa/treverse_songs.py')\n", (1686, 1740), False, 'import os\n')] |
import cv2
import logging
import numpy as np
import nibabel as nib
from skimage.measure import label
from skimage.morphology import binary_closing, cube
from fetal_brain_mask.model import Unet
logger = logging.getLogger(__name__)
class MaskingTool:
def __init__(self):
self.model = Unet()
def mask_tensor(self, data, smoothen=True):
# axes have to be switched from (256,256,x) to (x,256,256)
data = np.moveaxis(data, -1, 0)
# normalize each image slice
data = np.array([self.normalize_uint8(islice) for islice in data], dtype=np.uint16)
data = data[..., np.newaxis]
resize_needed = False
original_shape = (data.shape[2], data.shape[1])
if data.shape[1] != 256 or data.shape[2] != 256:
data = self.resize_data(data)
resize_needed = True
# do prediction
data = self.model.predict_mask(data)
if smoothen:
# it would be better for this to be put in its own plugin
data = binary_closing(np.squeeze(data), cube(2))
try:
labels = label(data)
data = (labels == np.argmax(np.bincount(labels.flat)[1:]) + 1).astype(np.uint16)
except Exception as e:
logger.error(e)
logger.error('Failed to apply smoothing for ' + input_filename)
if resize_needed:
data = self.resize_data(data.astype(np.uint16), target=original_shape)
# remove extra dimension
data = np.squeeze(data)
# return result into shape (256,256, X)
data = np.moveaxis(data, 0, -1)
return data
@staticmethod
def normalize_uint8(img_slice):
"""
Normalizes the image to be in the range of 0-255
it round up negative values to 0 and caps the top values at the
97% value as to avoid outliers
"""
img_slice[img_slice < 0] = 0
flat_sorted = np.sort(img_slice.flatten())
# dont consider values greater than 97% of the values
# maybe we should use a statistical method here instead?
top_3_limit = int(len(flat_sorted) * 0.97)
limit = flat_sorted[top_3_limit]
img_slice[img_slice > limit] = limit
rows, cols = img_slice.shape
# create new empty image
new_img = np.zeros((rows, cols))
max_val = np.max(img_slice)
if max_val == 0:
return new_img
# normalize all values
for i in range(rows):
for j in range(cols):
new_img[i, j] = int((float(img_slice[i, j]) / float(max_val)) * 255)
return new_img
@staticmethod
def resize_data(image, target=(256, 256)):
# maybe use a library for this?
image = np.squeeze(image)
resized_img = []
for i in range(image.shape[0]):
img_slice = cv2.resize(image[i, :, :], target)
resized_img.append(img_slice)
image = np.array(resized_img, dtype=np.uint16)
return image[..., np.newaxis]
| [
"logging.getLogger",
"skimage.morphology.cube",
"numpy.squeeze",
"numpy.max",
"numpy.array",
"numpy.zeros",
"fetal_brain_mask.model.Unet",
"numpy.moveaxis",
"cv2.resize",
"numpy.bincount",
"skimage.measure.label"
] | [((204, 231), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (221, 231), False, 'import logging\n'), ((298, 304), 'fetal_brain_mask.model.Unet', 'Unet', ([], {}), '()\n', (302, 304), False, 'from fetal_brain_mask.model import Unet\n'), ((436, 460), 'numpy.moveaxis', 'np.moveaxis', (['data', '(-1)', '(0)'], {}), '(data, -1, 0)\n', (447, 460), True, 'import numpy as np\n'), ((1526, 1542), 'numpy.squeeze', 'np.squeeze', (['data'], {}), '(data)\n', (1536, 1542), True, 'import numpy as np\n'), ((1606, 1630), 'numpy.moveaxis', 'np.moveaxis', (['data', '(0)', '(-1)'], {}), '(data, 0, -1)\n', (1617, 1630), True, 'import numpy as np\n'), ((2341, 2363), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {}), '((rows, cols))\n', (2349, 2363), True, 'import numpy as np\n'), ((2382, 2399), 'numpy.max', 'np.max', (['img_slice'], {}), '(img_slice)\n', (2388, 2399), True, 'import numpy as np\n'), ((2779, 2796), 'numpy.squeeze', 'np.squeeze', (['image'], {}), '(image)\n', (2789, 2796), True, 'import numpy as np\n'), ((2980, 3018), 'numpy.array', 'np.array', (['resized_img'], {'dtype': 'np.uint16'}), '(resized_img, dtype=np.uint16)\n', (2988, 3018), True, 'import numpy as np\n'), ((2886, 2920), 'cv2.resize', 'cv2.resize', (['image[i, :, :]', 'target'], {}), '(image[i, :, :], target)\n', (2896, 2920), False, 'import cv2\n'), ((1042, 1058), 'numpy.squeeze', 'np.squeeze', (['data'], {}), '(data)\n', (1052, 1058), True, 'import numpy as np\n'), ((1060, 1067), 'skimage.morphology.cube', 'cube', (['(2)'], {}), '(2)\n', (1064, 1067), False, 'from skimage.morphology import binary_closing, cube\n'), ((1111, 1122), 'skimage.measure.label', 'label', (['data'], {}), '(data)\n', (1116, 1122), False, 'from skimage.measure import label\n'), ((1167, 1191), 'numpy.bincount', 'np.bincount', (['labels.flat'], {}), '(labels.flat)\n', (1178, 1191), True, 'import numpy as np\n')] |
#pip install datetime
import time
import platform
import getpass
machine=str(platform.node())
architecture=str(platform.architecture())
opsys=str(platform.system())
os_vers=str(platform.release())
proces=str(platform.processor())
py_vers=str(platform.python_version())
a='\n'
a1='Machine: '
a2='Architecture: '
a3='OS: '
a4='OS Version: '
a5='Processor: '
a6='Python Version: '
print(a+a1+machine+a+a2+architecture+a+a3+opsys+a+a4+os_vers+a+a5+proces+a+a6+py_vers+a)
print ('Time: '+time.strftime("%Y-%m-%d %H:%M:%S"))
print ('Time am/pm: '+time.strftime("%Y-%m-%d %I:%M:%S"))
print('PC User: '+getpass.getuser())
print('\nYour password is: '+getpass.getpass('Insert your password and hit enter')) | [
"platform.node",
"time.strftime",
"getpass.getpass",
"platform.release",
"platform.architecture",
"platform.system",
"platform.processor",
"getpass.getuser",
"platform.python_version"
] | [((81, 96), 'platform.node', 'platform.node', ([], {}), '()\n', (94, 96), False, 'import platform\n'), ((116, 139), 'platform.architecture', 'platform.architecture', ([], {}), '()\n', (137, 139), False, 'import platform\n'), ((152, 169), 'platform.system', 'platform.system', ([], {}), '()\n', (167, 169), False, 'import platform\n'), ((184, 202), 'platform.release', 'platform.release', ([], {}), '()\n', (200, 202), False, 'import platform\n'), ((216, 236), 'platform.processor', 'platform.processor', ([], {}), '()\n', (234, 236), False, 'import platform\n'), ((251, 276), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (274, 276), False, 'import platform\n'), ((501, 535), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (514, 535), False, 'import time\n'), ((560, 594), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %I:%M:%S"""'], {}), "('%Y-%m-%d %I:%M:%S')\n", (573, 594), False, 'import time\n'), ((615, 632), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (630, 632), False, 'import getpass\n'), ((664, 717), 'getpass.getpass', 'getpass.getpass', (['"""Insert your password and hit enter"""'], {}), "('Insert your password and hit enter')\n", (679, 717), False, 'import getpass\n')] |
# -*- coding: utf8 -*-
import click
from fledgling.app.use_case.delete_task import DeleteTaskUseCase, IParams
from fledgling.cli.config import IniFileConfig
from fledgling.cli.repository_factory import RepositoryFactory
class Params(IParams):
def __init__(self, *, task_id):
self.task_id = task_id
def get_task_id(self) -> int:
return self.task_id
@click.command()
@click.option('--task-id', required=True, type=click.INT)
def delete_task(*, task_id):
"""
删除指定任务及其计划。
"""
params = Params(task_id=task_id)
config = IniFileConfig()
config.load()
repository_factory = RepositoryFactory(config)
use_case = DeleteTaskUseCase(
params=params,
task_repository=repository_factory.for_task(),
)
use_case.run()
| [
"click.option",
"click.command",
"fledgling.cli.config.IniFileConfig",
"fledgling.cli.repository_factory.RepositoryFactory"
] | [((379, 394), 'click.command', 'click.command', ([], {}), '()\n', (392, 394), False, 'import click\n'), ((396, 452), 'click.option', 'click.option', (['"""--task-id"""'], {'required': '(True)', 'type': 'click.INT'}), "('--task-id', required=True, type=click.INT)\n", (408, 452), False, 'import click\n'), ((564, 579), 'fledgling.cli.config.IniFileConfig', 'IniFileConfig', ([], {}), '()\n', (577, 579), False, 'from fledgling.cli.config import IniFileConfig\n'), ((623, 648), 'fledgling.cli.repository_factory.RepositoryFactory', 'RepositoryFactory', (['config'], {}), '(config)\n', (640, 648), False, 'from fledgling.cli.repository_factory import RepositoryFactory\n')] |
#!/usr/bin/env python3
import os
import time
import pytest
import radical.utils as ru
yaml = pytest.importorskip('yaml')
flux = pytest.importorskip('flux')
events = dict()
spec = {
"tasks": [{
"slot": "task",
"count": {
"per_slot": 1
},
"command": [
"/bin/date"
]
}],
"attributes": {
"system": {
"duration": 10000
}
},
"version": 1,
"resources": [{
"count": 1,
"type" : "slot",
"label": "task",
"with": [{
"count": 1,
"type": "core"
}]
}]
}
# ------------------------------------------------------------------------------
#
def test_flux_startup():
global events
njobs = 10
events = dict()
def cb1(job_id, state, ts, context):
# print([job_id, state, ts, context])
if job_id not in events:
events[job_id] = [ts, state]
else:
events[job_id].append([ts, state])
fh = ru.FluxHelper()
fh.start_flux()
assert(fh.uri)
assert('FLUX_URI' in fh.env)
specs = [spec] * njobs
ids = fh.submit_jobs(specs, cb=cb1)
assert(len(ids) == njobs), len(ids)
time.sleep(5)
assert(len(events) == njobs), len(events)
for jid in events:
# we expect at least 4 events per job:
# 'submit', 'start', 'finish', 'clean',
assert(len(events[jid]) >= 4), [jid, events[jid]]
fh.reset()
assert(fh.uri is None)
# ------------------------------------------------------------------------------
#
def test_flux_pickup():
global events
njobs = 10
events = dict()
outer_fh = None
if 'FLUX_URI' not in os.environ:
outer_fh = ru.FluxHelper()
outer_fh.start_flux()
for k,v in outer_fh.env.items():
os.environ[k] = v
def cb1(job_id, state, ts, context):
# print([job_id, state, ts, context])
if job_id not in events:
events[job_id] = [ts, state]
else:
events[job_id].append([ts, state])
fh = ru.FluxHelper()
fh.start_flux()
assert(fh.uri)
assert('FLUX_URI' in fh.env)
specs = [spec] * njobs
ids = fh.submit_jobs(specs, cb=cb1)
assert(len(ids) == njobs), len(ids)
time.sleep(5)
assert(len(events) == njobs), len(events)
for jid in events:
# we expect at least 4 events per job:
# 'submit', 'start', 'finish', 'clean',
assert(len(events[jid]) >= 4), [jid, events[jid]]
fh.reset()
assert(fh.uri is None)
if outer_fh:
outer_fh.reset()
# ------------------------------------------------------------------------------
#
if __name__ == '__main__':
test_flux_startup()
test_flux_pickup()
# ------------------------------------------------------------------------------
| [
"radical.utils.FluxHelper",
"pytest.importorskip",
"time.sleep"
] | [((99, 126), 'pytest.importorskip', 'pytest.importorskip', (['"""yaml"""'], {}), "('yaml')\n", (118, 126), False, 'import pytest\n'), ((136, 163), 'pytest.importorskip', 'pytest.importorskip', (['"""flux"""'], {}), "('flux')\n", (155, 163), False, 'import pytest\n'), ((1263, 1278), 'radical.utils.FluxHelper', 'ru.FluxHelper', ([], {}), '()\n', (1276, 1278), True, 'import radical.utils as ru\n'), ((1467, 1480), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1477, 1480), False, 'import time\n'), ((2343, 2358), 'radical.utils.FluxHelper', 'ru.FluxHelper', ([], {}), '()\n', (2356, 2358), True, 'import radical.utils as ru\n'), ((2547, 2560), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2557, 2560), False, 'import time\n'), ((1993, 2008), 'radical.utils.FluxHelper', 'ru.FluxHelper', ([], {}), '()\n', (2006, 2008), True, 'import radical.utils as ru\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Data EDA
# Author - <NAME>
# Written - December 2017
# This script take cleansed data from a csv file and writes figures to a results folder
# Parameters required -
# 1. Cleansed csv file
# 2. Codebook csv file
# 3. Destination folder for figures
import pandas as pd
import numpy as np
import sys
import os
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
#%matplotlib inline
sns.set_style("dark")
def plotBar(var, data,title,filename,directory):
'''
This function takes in a pandas data frame and a column and prints/saves count of instances in a categorical variable
Users can specify where this file needs to be saved
Args: var - variable to plot
data - input data
title - Graph title
filename - File name for plot
directory - Folder location for plot
'''
bar_df = data[[var]].groupby([var]).size().reset_index(name='count')
bar_plot = sns.barplot(x = var,y = "count",data = bar_df)
plt.ylabel("count")
plt.xlabel(" ")
plt.title("{0}".format(title))
fig = bar_plot.get_figure()
if(filename is not None):
di = directory
if not os.path.exists(di):#checks if data directory exists, creates one otherwise
os.makedirs(di)
fig.savefig(di + "/" + filename + ".png")
def plot_data(cleansed_file,codebook_src,results_dest):
'''
This function takes in a cleansed file,the codebook and saves images of bar plots to the specified folder
Args: cleansed_file - file with cleansed data
codebook_src - variable codebook
results_dest - destination specifying where the images need to be stored
'''
# Read cleansed data and codebook
clean_fn = cleansed_file
di = clean_fn.rsplit('/',1)[0]
if not os.path.exists(di):#checks if data directory exists, creates one otherwise
os.makedirs(di)
df = pd.read_csv(clean_fn, encoding='latin-1')
code_fn = codebook_src
di = code_fn.rsplit('/',1)[0]
if not os.path.exists(di): #checks if data directory exists, creates one otherwise
os.makedirs(di)
cb = pd.read_csv(code_fn, encoding='latin-1')
a = cb.loc[cb['Old_names'] == 'Have you been diagnosed with a mental health condition by a medical professional?','New_names']
plotBar(a.values[0],df,"Have you been diagnosed for mental disorder by a professional?","diag_prof",results_dest)
b = cb.loc[cb['Old_names'] == 'Do you currently have a mental health disorder?','New_names']
plotBar(b.values[0],df,"Do you have mental disorder?","have_disorder",results_dest)
c = cb.loc[cb['Old_names'] == 'What is your gender?','New_names']
plotBar(c.values[0],df,"Gender breakdown","gender_breakdown",results_dest)
d = cb.loc[cb['Old_names'] == 'Does your employer provide mental health benefits as part of healthcare coverage?','New_names']
plotBar(d.values[0],df,"Does your employee provide Mental health benefits?","employee_benefits",results_dest)
e = cb.loc[cb['Old_names'] == 'Would you feel comfortable discussing a mental health disorder with your coworkers?','New_names']
plotBar(e.values[0],df,"Comfortable with discussing about mental health condition with coworkers?","coworker_comfort",results_dest)
f = cb.loc[cb['Old_names'] == 'Do you feel that your employer takes mental health as seriously as physical health?','New_names']
plotBar(f.values[0],df,"Do you feel that your employer takes mental health as seriously as physical health?","mental_physical",results_dest)
g = cb.loc[cb['Old_names'] == 'Does your employer offer resources to learn more about mental health concerns and options for seeking help?','New_names']
plotBar(g.values[0],df,"Does employee offer resources to learn about mental health?","emp_resources",results_dest)
h = cb.loc[cb['Old_names'] == 'Do you think that discussing a mental health disorder with your employer would have negative consequences?','New_names']
plotBar(h.values[0],df,"Do you think discussing mental health would have a negative consequence?","disc_negative",results_dest)
i = cb.loc[cb['Old_names'] == 'Is your anonymity protected if you choose to take advantage of mental health or substance abuse treatment resources provided by your employer?','New_names']
plotBar(i.values[0],df,"Anonymity when one opens up about mental health?","anonymity",results_dest)
if __name__ == '__main__':
cleansed_file = sys.argv[1] if len(sys.argv) > 1 else 'data/cleansed_data.csv'
codebook_src = sys.argv[2] if len(sys.argv) > 2 else 'docs/codebook.csv'
results_dest = sys.argv[3] if len(sys.argv) > 3 else 'results/figures'
plot_data(cleansed_file,codebook_src,results_dest)
print("End of program")
| [
"os.path.exists",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"os.makedirs",
"matplotlib.pyplot.xlabel",
"os.environ.get",
"seaborn.set_style",
"seaborn.barplot"
] | [((573, 594), 'seaborn.set_style', 'sns.set_style', (['"""dark"""'], {}), "('dark')\n", (586, 594), True, 'import seaborn as sns\n'), ((380, 409), 'os.environ.get', 'os.environ.get', (['"""DISPLAY"""', '""""""'], {}), "('DISPLAY', '')\n", (394, 409), False, 'import os\n'), ((483, 497), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (490, 497), True, 'import matplotlib as mpl\n'), ((1106, 1148), 'seaborn.barplot', 'sns.barplot', ([], {'x': 'var', 'y': '"""count"""', 'data': 'bar_df'}), "(x=var, y='count', data=bar_df)\n", (1117, 1148), True, 'import seaborn as sns\n'), ((1157, 1176), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""count"""'], {}), "('count')\n", (1167, 1176), True, 'import matplotlib.pyplot as plt\n'), ((1181, 1196), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['""" """'], {}), "(' ')\n", (1191, 1196), True, 'import matplotlib.pyplot as plt\n'), ((2066, 2107), 'pandas.read_csv', 'pd.read_csv', (['clean_fn'], {'encoding': '"""latin-1"""'}), "(clean_fn, encoding='latin-1')\n", (2077, 2107), True, 'import pandas as pd\n'), ((2290, 2330), 'pandas.read_csv', 'pd.read_csv', (['code_fn'], {'encoding': '"""latin-1"""'}), "(code_fn, encoding='latin-1')\n", (2301, 2330), True, 'import pandas as pd\n'), ((1958, 1976), 'os.path.exists', 'os.path.exists', (['di'], {}), '(di)\n', (1972, 1976), False, 'import os\n'), ((2041, 2056), 'os.makedirs', 'os.makedirs', (['di'], {}), '(di)\n', (2052, 2056), False, 'import os\n'), ((2181, 2199), 'os.path.exists', 'os.path.exists', (['di'], {}), '(di)\n', (2195, 2199), False, 'import os\n'), ((2265, 2280), 'os.makedirs', 'os.makedirs', (['di'], {}), '(di)\n', (2276, 2280), False, 'import os\n'), ((1333, 1351), 'os.path.exists', 'os.path.exists', (['di'], {}), '(di)\n', (1347, 1351), False, 'import os\n'), ((1420, 1435), 'os.makedirs', 'os.makedirs', (['di'], {}), '(di)\n', (1431, 1435), False, 'import os\n')] |
# ##### BEGIN GPL LICENSE BLOCK #####
# KeenTools for blender is a blender addon for using KeenTools in Blender.
# Copyright (C) 2019 KeenTools
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# ##### END GPL LICENSE BLOCK #####
import logging
import numpy as np
import bpy
def find_bpy_image_by_name(image_name):
image_num = bpy.data.images.find(image_name)
if image_num >= 0:
return bpy.data.images[image_num]
return None
def remove_bpy_image(image):
if image and image in bpy.data.images:
bpy.data.images.remove(image)
def remove_bpy_image_by_name(image_name):
image = find_bpy_image_by_name(image_name)
if image is not None:
bpy.data.images.remove(image)
def store_bpy_image_in_scene(image):
image.pack()
image.use_fake_user = True
def add_alpha_channel(np_image_array):
return np.dstack((np_image_array, np.ones(np_image_array.shape[:2])))
def check_bpy_image_size(image):
if not image or not image.size:
return False
w, h = image.size[:2]
return w > 0 and h > 0
def check_bpy_image_has_same_size(image, size):
if not image or not image.size:
return False
w, h = image.size[:2]
return w == size[0] and h == size[1]
def safe_bpy_image_loading(blender_name, path):
tex = find_bpy_image_by_name(blender_name)
if tex is not None:
if check_bpy_image_size(tex):
return tex
else:
remove_bpy_image_by_name(blender_name)
try:
image = bpy.data.images.load(path)
image.name = blender_name
except Exception:
logger = logging.getLogger(__name__)
logger.error('Source texture for "{}" '
'is not found on path: {}'.format(blender_name, path))
return None
if not check_bpy_image_size(image):
return None
return image
def safe_bpy_image_in_scene_loading(blender_name, path):
logger = logging.getLogger(__name__)
tex = find_bpy_image_by_name(blender_name)
if tex is not None:
if check_bpy_image_size(tex):
return tex
else:
remove_bpy_image_by_name(blender_name)
try:
image = bpy.data.images.load(path)
except Exception:
logger.error('Source texture for "{}" '
'is not found on path: {}'.format(blender_name, path))
return None
if not check_bpy_image_size(image):
bpy.data.images.remove(image)
logger.error('Source texture "{}" '
'has wrong format on path: {}'.format(blender_name, path))
return None
tex = bpy.data.images.new(blender_name,
width=image.size[0], height=image.size[1],
alpha=True, float_buffer=False)
tex.pixels[:] = image.pixels[:]
store_bpy_image_in_scene(tex)
bpy.data.images.remove(image)
return tex
| [
"logging.getLogger",
"numpy.ones",
"bpy.data.images.new",
"bpy.data.images.find",
"bpy.data.images.load",
"bpy.data.images.remove"
] | [((918, 950), 'bpy.data.images.find', 'bpy.data.images.find', (['image_name'], {}), '(image_name)\n', (938, 950), False, 'import bpy\n'), ((2513, 2540), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2530, 2540), False, 'import logging\n'), ((3189, 3301), 'bpy.data.images.new', 'bpy.data.images.new', (['blender_name'], {'width': 'image.size[0]', 'height': 'image.size[1]', 'alpha': '(True)', 'float_buffer': '(False)'}), '(blender_name, width=image.size[0], height=image.size[1],\n alpha=True, float_buffer=False)\n', (3208, 3301), False, 'import bpy\n'), ((3432, 3461), 'bpy.data.images.remove', 'bpy.data.images.remove', (['image'], {}), '(image)\n', (3454, 3461), False, 'import bpy\n'), ((1114, 1143), 'bpy.data.images.remove', 'bpy.data.images.remove', (['image'], {}), '(image)\n', (1136, 1143), False, 'import bpy\n'), ((1269, 1298), 'bpy.data.images.remove', 'bpy.data.images.remove', (['image'], {}), '(image)\n', (1291, 1298), False, 'import bpy\n'), ((2092, 2118), 'bpy.data.images.load', 'bpy.data.images.load', (['path'], {}), '(path)\n', (2112, 2118), False, 'import bpy\n'), ((2763, 2789), 'bpy.data.images.load', 'bpy.data.images.load', (['path'], {}), '(path)\n', (2783, 2789), False, 'import bpy\n'), ((3004, 3033), 'bpy.data.images.remove', 'bpy.data.images.remove', (['image'], {}), '(image)\n', (3026, 3033), False, 'import bpy\n'), ((1465, 1498), 'numpy.ones', 'np.ones', (['np_image_array.shape[:2]'], {}), '(np_image_array.shape[:2])\n', (1472, 1498), True, 'import numpy as np\n'), ((2192, 2219), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2209, 2219), False, 'import logging\n')] |
from img_parser import parse
import cv2
import numpy as np
import pandas as pd
import os
protoFile = "models/pose_deploy_linevec.prototxt"
weightsFile = "models/pose_iter_440000.caffemodel"
# Read the network into memory
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
nPoints = 18
# TrainImgNum = len(os.listdir("clips"))
# # print(ADLImgNum)
# TrainRawList = []
# for i in range(TrainImgNum):
# print("processing " + str(i) + "\n")
# image_path = "clips/" + str(i+1) + ".png"
# tmp_line = parse(net, nPoints, image_path)
# flat_line = []
# for coord in tmp_line:
# if coord:
# flat_line.append(coord[0])
# flat_line.append(coord[1])
# else:
# flat_line.append(None)
# flat_line.append(None)
# TrainRawList.append(flat_line)
#
# TrainRawDf = pd.DataFrame(TrainRawList, columns=['0x', '0y', '1x', '1y', '2x', '2y', '3x', '3y', '4x', '4y', '5x', '5y',
# '6x', '6y', '7x', '7y', '8x', '8y', '9x', '9y', '10x', '10y', '11x', '11y',
# '12x', '12y', '13x', '13y', '14x', '14y', '15x', '15y', '16x', '16y',
# '17x', '17y'])
# # write the data into csv file
# TrainRawDf.to_csv("data/clipTrainRaw.csv", encoding="utf-8")
TestImgNum = len(os.listdir("clips_test"))
TestRawList = []
for i in range(TestImgNum):
print("processing " + str(i) + "\n")
image_path = "clips_test/" + str(i+1) + ".png"
tmp_line = parse(net, nPoints, image_path)
flat_line = []
for coord in tmp_line:
if coord:
flat_line.append(coord[0])
flat_line.append(coord[1])
else:
flat_line.append(None)
flat_line.append(None)
TestRawList.append(flat_line)
FallRawDf = pd.DataFrame(TestRawList, columns=['0x', '0y', '1x', '1y', '2x', '2y', '3x', '3y', '4x', '4y', '5x', '5y',
'6x', '6y', '7x', '7y', '8x', '8y', '9x', '9y', '10x', '10y', '11x',
'11y', '12x', '12y', '13x', '13y', '14x', '14y', '15x', '15y', '16x',
'16y', '17x', '17y'])
FallRawDf.to_csv("data/clipTestRaw.csv", encoding="utf-8")
| [
"pandas.DataFrame",
"img_parser.parse",
"os.listdir",
"cv2.dnn.readNetFromCaffe"
] | [((228, 276), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['protoFile', 'weightsFile'], {}), '(protoFile, weightsFile)\n', (252, 276), False, 'import cv2\n'), ((1848, 2127), 'pandas.DataFrame', 'pd.DataFrame', (['TestRawList'], {'columns': "['0x', '0y', '1x', '1y', '2x', '2y', '3x', '3y', '4x', '4y', '5x', '5y',\n '6x', '6y', '7x', '7y', '8x', '8y', '9x', '9y', '10x', '10y', '11x',\n '11y', '12x', '12y', '13x', '13y', '14x', '14y', '15x', '15y', '16x',\n '16y', '17x', '17y']"}), "(TestRawList, columns=['0x', '0y', '1x', '1y', '2x', '2y', '3x',\n '3y', '4x', '4y', '5x', '5y', '6x', '6y', '7x', '7y', '8x', '8y', '9x',\n '9y', '10x', '10y', '11x', '11y', '12x', '12y', '13x', '13y', '14x',\n '14y', '15x', '15y', '16x', '16y', '17x', '17y'])\n", (1860, 2127), True, 'import pandas as pd\n'), ((1364, 1388), 'os.listdir', 'os.listdir', (['"""clips_test"""'], {}), "('clips_test')\n", (1374, 1388), False, 'import os\n'), ((1543, 1574), 'img_parser.parse', 'parse', (['net', 'nPoints', 'image_path'], {}), '(net, nPoints, image_path)\n', (1548, 1574), False, 'from img_parser import parse\n')] |
""" This file contains quantum code in support of Shor's Algorithm
"""
""" Imports from qiskit"""
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
import sys
import math
import numpy as np
""" ********* QFT Functions *** """
""" Function to create QFT """
def create_QFT(circuit,up_reg,n,with_swaps):
i=n-1
""" Apply the H gates and Cphases"""
""" The Cphases with |angle| < threshold are not created because they do
nothing. The threshold is put as being 0 so all CPhases are created,
but the clause is there so if wanted just need to change the 0 of the
if-clause to the desired value """
while i>=0:
circuit.h(up_reg[i])
j=i-1
while j>=0:
if (np.pi)/(pow(2,(i-j))) > 0:
circuit.cu1( (np.pi)/(pow(2,(i-j))) , up_reg[i] , up_reg[j] )
j=j-1
i=i-1
""" If specified, apply the Swaps at the end """
if with_swaps==1:
i=0
while i < ((n-1)/2):
circuit.swap(up_reg[i], up_reg[n-1-i])
i=i+1
""" Function to create inverse QFT """
def create_inverse_QFT(circuit,up_reg,n,with_swaps):
""" If specified, apply the Swaps at the beginning"""
if with_swaps==1:
i=0
while i < ((n-1)/2):
circuit.swap(up_reg[i], up_reg[n-1-i])
i=i+1
""" Apply the H gates and Cphases"""
""" The Cphases with |angle| < threshold are not created because they do
nothing. The threshold is put as being 0 so all CPhases are created,
but the clause is there so if wanted just need to change the 0 of the
if-clause to the desired value """
i=0
while i<n:
circuit.h(up_reg[i])
if i != n-1:
j=i+1
y=i
while y>=0:
if (np.pi)/(pow(2,(j-y))) > 0:
circuit.cu1( - (np.pi)/(pow(2,(j-y))) , up_reg[j] , up_reg[y] )
y=y-1
i=i+1
""" ********* Arithmetic Functions *** """
""" Helper Functions """
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
"""Function that calculates the angle of a phase shift in the sequential QFT based on the binary digits of a."""
"""a represents a possible value of the classical register"""
def getAngle(a, N):
"""convert the number a to a binary string with length N"""
s=bin(int(a))[2:].zfill(N)
angle = 0
for i in range(0, N):
"""if the digit is 1, add the corresponding value to the angle"""
if s[N-1-i] == '1':
angle += math.pow(2, -(N-i))
angle *= np.pi
return angle
"""Function that calculates the array of angles to be used in the addition in Fourier Space"""
def getAngles(a,N):
s=bin(int(a))[2:].zfill(N)
angles=np.zeros([N])
for i in range(0, N):
for j in range(i,N):
if s[j]=='1':
angles[N-i-1]+=math.pow(2, -(j-i))
angles[N-i-1]*=np.pi
return angles
"""Creation of a doubly controlled phase gate"""
def ccphase(circuit, angle, ctl1, ctl2, tgt):
circuit.cu1(angle/2,ctl1,tgt)
circuit.cx(ctl2,ctl1)
circuit.cu1(-angle/2,ctl1,tgt)
circuit.cx(ctl2,ctl1)
circuit.cu1(angle/2,ctl2,tgt)
"""Creation of the circuit that performs addition by a in Fourier Space"""
"""Can also be used for subtraction by setting the parameter inv to a value different from 0"""
def phiADD(circuit, q, a, N, inv):
angle=getAngles(a,N)
for i in range(0,N):
if inv==0:
circuit.u1(angle[i],q[i])
"""addition"""
else:
circuit.u1(-angle[i],q[i])
"""subtraction"""
"""Single controlled version of the phiADD circuit"""
def cphiADD(circuit, q, ctl, a, n, inv):
angle=getAngles(a,n)
for i in range(0,n):
if inv==0:
circuit.cu1(angle[i],ctl,q[i])
else:
circuit.cu1(-angle[i],ctl,q[i])
"""Doubly controlled version of the phiADD circuit"""
def ccphiADD(circuit,q,ctl1,ctl2,a,n,inv):
angle=getAngles(a,n)
for i in range(0,n):
if inv==0:
ccphase(circuit,angle[i],ctl1,ctl2,q[i])
else:
ccphase(circuit,-angle[i],ctl1,ctl2,q[i])
"""Circuit that implements doubly controlled modular addition by a"""
def ccphiADDmodN(circuit, q, ctl1, ctl2, aux, a, N, n):
ccphiADD(circuit, q, ctl1, ctl2, a, n, 0)
phiADD(circuit, q, N, n, 1)
create_inverse_QFT(circuit, q, n, 0)
circuit.cx(q[n-1],aux)
create_QFT(circuit,q,n,0)
cphiADD(circuit, q, aux, N, n, 0)
ccphiADD(circuit, q, ctl1, ctl2, a, n, 1)
create_inverse_QFT(circuit, q, n, 0)
circuit.x(q[n-1])
circuit.cx(q[n-1], aux)
circuit.x(q[n-1])
create_QFT(circuit,q,n,0)
ccphiADD(circuit, q, ctl1, ctl2, a, n, 0)
"""Circuit that implements the inverse of doubly controlled modular addition by a"""
def ccphiADDmodN_inv(circuit, q, ctl1, ctl2, aux, a, N, n):
ccphiADD(circuit, q, ctl1, ctl2, a, n, 1)
create_inverse_QFT(circuit, q, n, 0)
circuit.x(q[n-1])
circuit.cx(q[n-1],aux)
circuit.x(q[n-1])
create_QFT(circuit, q, n, 0)
ccphiADD(circuit, q, ctl1, ctl2, a, n, 0)
cphiADD(circuit, q, aux, N, n, 1)
create_inverse_QFT(circuit, q, n, 0)
circuit.cx(q[n-1], aux)
create_QFT(circuit, q, n, 0)
phiADD(circuit, q, N, n, 0)
ccphiADD(circuit, q, ctl1, ctl2, a, n, 1)
"""Circuit that implements single controlled modular multiplication by a"""
def cMULTmodN(circuit, ctl, q, aux, a, N, n):
create_QFT(circuit,aux,n+1,0)
for i in range(0, n):
ccphiADDmodN(circuit, aux, q[i], ctl, aux[n+1], (2**i)*a % N, N, n+1)
create_inverse_QFT(circuit, aux, n+1, 0)
for i in range(0, n):
circuit.cswap(ctl,q[i],aux[i])
a_inv = modinv(a, N)
create_QFT(circuit, aux, n+1, 0)
i = n-1
while i >= 0:
ccphiADDmodN_inv(circuit, aux, q[i], ctl, aux[n+1], math.pow(2,i)*a_inv % N, N, n+1)
i -= 1
create_inverse_QFT(circuit, aux, n+1, 0)
| [
"math.pow",
"numpy.zeros"
] | [((3006, 3019), 'numpy.zeros', 'np.zeros', (['[N]'], {}), '([N])\n', (3014, 3019), True, 'import numpy as np\n'), ((2791, 2812), 'math.pow', 'math.pow', (['(2)', '(-(N - i))'], {}), '(2, -(N - i))\n', (2799, 2812), False, 'import math\n'), ((3132, 3153), 'math.pow', 'math.pow', (['(2)', '(-(j - i))'], {}), '(2, -(j - i))\n', (3140, 3153), False, 'import math\n'), ((6151, 6165), 'math.pow', 'math.pow', (['(2)', 'i'], {}), '(2, i)\n', (6159, 6165), False, 'import math\n')] |
import sys
sys.path.append('.')
from sslplay.data.digits import DataDigits
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import numpy as np
obj_data = DataDigits()
obj_data.load()
X = obj_data.X
y = obj_data.y
target_names = np.array(["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"])
colors = np.array(['black', 'lime', 'darkorange',
'darkred', 'chocolate', 'yellow', 'olive',
'cyan', 'darkgrey', 'darkgreen'])
scaler = StandardScaler()
pca = PCA(n_components=6)
np.random.seed(1102)
X_r = pca.fit(scaler.fit_transform(X)).transform(scaler.fit_transform(X))
array_subset = np.random.choice(range(X_r.shape[0]), size=500, replace=False)
X_r = X_r[array_subset, :]
y_r = y[array_subset]
array_classes_show = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
plt.figure()
for color, i, target_name in zip(colors[array_classes_show], \
array_classes_show, target_names[array_classes_show]):
plt.scatter(
X_r[y_r == i, 0], X_r[y_r == i, 1],
color=color, alpha=.8, lw=2,
label=target_name
)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.suptitle('PCA - DIGITS')
plt.title("Explained variance ratio: %s"
% str(pca.explained_variance_ratio_[[0, 1]]))
plt.xlabel("Dimension 1")
plt.ylabel("Dimension 2")
plt.legend(fancybox=True, framealpha=1)
plt.show()
"""
array_classes_show = [1,2,3,6,8,9]
plt.figure()
for color, i, target_name in zip(colors[array_classes_show], \
array_classes_show, target_names[array_classes_show]):
plt.scatter(
X_r[y_r == i, 2], X_r[y_r == i, 3],
color=color, alpha=.8, lw=2,
label=target_name
)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.suptitle('PCA - DIGITS')
plt.title("Explained variance ratio: %s"
% str(pca.explained_variance_ratio_[[2, 3]]))
plt.xlabel("Dimension 3")
plt.ylabel("Dimension 4")
plt.legend(fancybox=True, framealpha=1)
plt.show()
array_classes_show = [2,6,8,9]
plt.figure()
for color, i, target_name in zip(colors[array_classes_show], \
array_classes_show, target_names[array_classes_show]):
plt.scatter(
X_r[y_r == i, 4], X_r[y_r == i, 5],
color=color, alpha=.8, lw=2,
label=target_name
)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.suptitle('PCA - DIGITS')
plt.title("Explained variance ratio: %s"
% str(pca.explained_variance_ratio_[[4, 5]]))
plt.xlabel("Dimension 5")
plt.ylabel("Dimension 6")
plt.legend(fancybox=True, framealpha=1)
plt.show()
"""
| [
"matplotlib.pyplot.ylabel",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"sslplay.data.digits.DataDigits",
"numpy.array",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.figure",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"sys.path.append",
"matplotlib.pyplot.suptit... | [((11, 31), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (26, 31), False, 'import sys\n'), ((227, 239), 'sslplay.data.digits.DataDigits', 'DataDigits', ([], {}), '()\n', (237, 239), False, 'from sslplay.data.digits import DataDigits\n'), ((302, 362), 'numpy.array', 'np.array', (["['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']"], {}), "(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])\n", (310, 362), True, 'import numpy as np\n'), ((372, 493), 'numpy.array', 'np.array', (["['black', 'lime', 'darkorange', 'darkred', 'chocolate', 'yellow', 'olive',\n 'cyan', 'darkgrey', 'darkgreen']"], {}), "(['black', 'lime', 'darkorange', 'darkred', 'chocolate', 'yellow',\n 'olive', 'cyan', 'darkgrey', 'darkgreen'])\n", (380, 493), True, 'import numpy as np\n'), ((502, 518), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (516, 518), False, 'from sklearn.preprocessing import StandardScaler\n'), ((525, 544), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(6)'}), '(n_components=6)\n', (528, 544), False, 'from sklearn.decomposition import PCA\n'), ((545, 565), 'numpy.random.seed', 'np.random.seed', (['(1102)'], {}), '(1102)\n', (559, 565), True, 'import numpy as np\n'), ((822, 834), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (832, 834), True, 'import matplotlib.pyplot as plt\n'), ((1087, 1140), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'shadow': '(False)', 'scatterpoints': '(1)'}), "(loc='best', shadow=False, scatterpoints=1)\n", (1097, 1140), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1169), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""PCA - DIGITS"""'], {}), "('PCA - DIGITS')\n", (1153, 1169), True, 'import matplotlib.pyplot as plt\n'), ((1263, 1288), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dimension 1"""'], {}), "('Dimension 1')\n", (1273, 1288), True, 'import matplotlib.pyplot as plt\n'), ((1289, 1314), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Dimension 2"""'], {}), "('Dimension 2')\n", (1299, 1314), True, 'import matplotlib.pyplot as plt\n'), ((1315, 1354), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fancybox': '(True)', 'framealpha': '(1)'}), '(fancybox=True, framealpha=1)\n', (1325, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1355, 1365), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1363, 1365), True, 'import matplotlib.pyplot as plt\n'), ((957, 1058), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_r[y_r == i, 0]', 'X_r[y_r == i, 1]'], {'color': 'color', 'alpha': '(0.8)', 'lw': '(2)', 'label': 'target_name'}), '(X_r[y_r == i, 0], X_r[y_r == i, 1], color=color, alpha=0.8, lw=\n 2, label=target_name)\n', (968, 1058), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
import logging
import click
@click.command(short_help='Fits and saves feature extractors.')
@click.argument('dataset_path', type=click.Path(exists=True))
@click.argument('feature_extractor_path', type=click.Path())
def build_features(dataset_path, feature_extractor_path):
"""Fits and saves feature extractors.
Reads a dataset from DATASET_PATH, runs the feature extractor
pipeline and saves it in FEATURE_EXTRACTOR_PATH for future use.
"""
logger = logging.getLogger(__name__)
logger.info('reading dataset from {}'.format(dataset_path))
logger.info('saving feature extractor pipeline in {}'.format(
feature_extractor_path))
| [
"logging.getLogger",
"click.command",
"click.Path"
] | [((56, 118), 'click.command', 'click.command', ([], {'short_help': '"""Fits and saves feature extractors."""'}), "(short_help='Fits and saves feature extractors.')\n", (69, 118), False, 'import click\n'), ((498, 525), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (515, 525), False, 'import logging\n'), ((156, 179), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (166, 179), False, 'import click\n'), ((228, 240), 'click.Path', 'click.Path', ([], {}), '()\n', (238, 240), False, 'import click\n')] |
from collider.data.sensor import Sensor
from collider.data.message_package import MessagePackage
from scipy.stats import spearmanr
import numpy as np
class FakeForwardReturn(Sensor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.lastvalue = None
@property
def output_variables(self):
return ["exposure", "factorName"]
def do(self, date, mp: MessagePackage, **kwargs):
scaler = kwargs.get("scaler", 0.5)
sigma = kwargs.get("sigma", 0.1)
shrinkage = kwargs.get("shrinkage", 0.2)
trueForwardReturn = mp.exposure
fakeForwardReturn = trueForwardReturn * scaler + np.random.normal(scale=sigma, size=4000)
if self.lastvalue is None:
thisvalue = fakeForwardReturn
else:
thisvalue = self.lastvalue * (1 - shrinkage) + fakeForwardReturn * shrinkage
self.lastvalue = thisvalue
self.logger.debug(spearmanr(trueForwardReturn, thisvalue, nan_policy="omit")[0])
return thisvalue, np.array(["fakeForwardReturn"])
| [
"numpy.random.normal",
"numpy.array",
"scipy.stats.spearmanr"
] | [((660, 700), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'sigma', 'size': '(4000)'}), '(scale=sigma, size=4000)\n', (676, 700), True, 'import numpy as np\n'), ((1036, 1067), 'numpy.array', 'np.array', (["['fakeForwardReturn']"], {}), "(['fakeForwardReturn'])\n", (1044, 1067), True, 'import numpy as np\n'), ((946, 1004), 'scipy.stats.spearmanr', 'spearmanr', (['trueForwardReturn', 'thisvalue'], {'nan_policy': '"""omit"""'}), "(trueForwardReturn, thisvalue, nan_policy='omit')\n", (955, 1004), False, 'from scipy.stats import spearmanr\n')] |
#!/usr/bin/env python
import aptx
import argparse
import textwrap
import os.path
parser = argparse.ArgumentParser(
description='Print schema version of .aptx files.',
epilog='example: aptx_schemaver.py *.aptx')
parser.add_argument('aptxfiles', nargs='+', help='aptx file specification')
args = parser.parse_args()
dict = {}
for aptxfile in args.aptxfiles:
basename = os.path.basename(aptxfile)
rootname = basename.rstrip('.aptx')
try:
version = aptx.Proposal(aptxfile).schemaversion
dict[rootname] = version
except:
print('error reading schema version for {}'.format(aptx))
if len(dict) > 0:
versions = sorted(set(dict.values()))
for version in versions:
rootnames = [k for k,v in dict.items() if v == version]
out = 'schemaVersion=' + version + ': ' + ' '.join(rootnames)
for line in textwrap.wrap(out, width=78):
print(line)
| [
"aptx.Proposal",
"argparse.ArgumentParser",
"textwrap.wrap"
] | [((92, 215), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Print schema version of .aptx files."""', 'epilog': '"""example: aptx_schemaver.py *.aptx"""'}), "(description='Print schema version of .aptx files.',\n epilog='example: aptx_schemaver.py *.aptx')\n", (115, 215), False, 'import argparse\n'), ((869, 897), 'textwrap.wrap', 'textwrap.wrap', (['out'], {'width': '(78)'}), '(out, width=78)\n', (882, 897), False, 'import textwrap\n'), ((476, 499), 'aptx.Proposal', 'aptx.Proposal', (['aptxfile'], {}), '(aptxfile)\n', (489, 499), False, 'import aptx\n')] |