code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while left_idx <= mid_idx and right_idx <= n - 1:
if n_list[left_idx] < n_list[right_idx]:
nn_list.append(n_list[left_idx])
left_idx += 1
elif n_list[left_idx] > n_list[right_idx]:
nn_list.append(n_list[right_idx])
right_idx += 1
else:
break
print(nn_list, end='\n')
<|reserved_special_token_1|>
n = int(input())
n_list = [int(input()) for _ in range(n)]
nn_list = []
mid_idx = len(n_list) // 2
left_idx = 0
right_idx = mid_idx + 1
while left_idx <= mid_idx and right_idx <= n - 1:
if n_list[left_idx] < n_list[right_idx]:
nn_list.append(n_list[left_idx])
left_idx += 1
elif n_list[left_idx] > n_list[right_idx]:
nn_list.append(n_list[right_idx])
right_idx += 1
else:
break
print(nn_list, end='\n')
<|reserved_special_token_1|>
# https://www.acmicpc.net/problem/2751
# n 개 수가 주어짐
# 목표 오름차순정렬
# 첫 줄 n개
# 둘째줄부터 n개의 줄에 수가 주어짐 세로로
# 출력 오름차순 정렬한 결과를 한 줄에 하나씩 출력한다?
n=int(input())
n_list=[int(input()) for _ in range(n)]
# print(n_list)
nn_list = []
# 인덱스 2개 관리
mid_idx = len(n_list) //2
left_idx = 0
right_idx = mid_idx +1
while left_idx <= mid_idx and right_idx <= n-1:
# nn_list = []
if n_list[left_idx] < n_list[right_idx]:
nn_list.append(n_list[left_idx])
left_idx += 1
elif n_list[left_idx] > n_list[right_idx]:
nn_list.append(n_list[right_idx])
right_idx+=1
else:
break
print(nn_list, end='\n')
# 문제해결 정렬이 된다 = 값이 하나일때 = if start_idx == end_idx return 값이 하나짜리 리스트가 넘어간다는 것을 기억해라
# merge_sort과정에서
# combined_list=[]
# while f[fidx] <=len(f) or이냐 and냐 b[bidx]<=len(b) and or 모두 동작하지 않음
# f<b
# 작은값을 넣어지고
# 어펜드
# 작은값의 리스트 인덱스 +1
# 반대의 경우도 똑같음
# 프론트를 다 넣으면 백에서 못넣은 값들이 남아있을수도 있다
# 백이든 프론트든 하나는 끝났다 하나는 빈리스트가 나온다
# 나머지 하나의 남은 리스트를 붙여준다
# print(combined_list = combined_list +f[fidx:] +b[bidx:]) 동작을 볼 수 있음
# return combined_list = combined_list +f[fidx:] +b[bidx:]
# 탑 다운 방식
|
flexible
|
{
"blob_id": "fb5508b1b5aa36c4921358d6ca7f96fc7d565241",
"index": 5104,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile left_idx <= mid_idx and right_idx <= n - 1:\n if n_list[left_idx] < n_list[right_idx]:\n nn_list.append(n_list[left_idx])\n left_idx += 1\n elif n_list[left_idx] > n_list[right_idx]:\n nn_list.append(n_list[right_idx])\n right_idx += 1\n else:\n break\nprint(nn_list, end='\\n')\n",
"step-3": "n = int(input())\nn_list = [int(input()) for _ in range(n)]\nnn_list = []\nmid_idx = len(n_list) // 2\nleft_idx = 0\nright_idx = mid_idx + 1\nwhile left_idx <= mid_idx and right_idx <= n - 1:\n if n_list[left_idx] < n_list[right_idx]:\n nn_list.append(n_list[left_idx])\n left_idx += 1\n elif n_list[left_idx] > n_list[right_idx]:\n nn_list.append(n_list[right_idx])\n right_idx += 1\n else:\n break\nprint(nn_list, end='\\n')\n",
"step-4": "# https://www.acmicpc.net/problem/2751\n\n# n 개 수가 주어짐 \n\n# 목표 오름차순정렬\n\n# 첫 줄 n개\n# 둘째줄부터 n개의 줄에 수가 주어짐 세로로\n\n# 출력 오름차순 정렬한 결과를 한 줄에 하나씩 출력한다?\n\n\nn=int(input())\nn_list=[int(input()) for _ in range(n)]\n# print(n_list)\nnn_list = []\n# 인덱스 2개 관리\nmid_idx = len(n_list) //2\nleft_idx = 0 \nright_idx = mid_idx +1\n\nwhile left_idx <= mid_idx and right_idx <= n-1:\n # nn_list = []\n if n_list[left_idx] < n_list[right_idx]:\n nn_list.append(n_list[left_idx])\n left_idx += 1\n elif n_list[left_idx] > n_list[right_idx]:\n nn_list.append(n_list[right_idx])\n right_idx+=1\n else:\n break\nprint(nn_list, end='\\n')\n\n\n# 문제해결 정렬이 된다 = 값이 하나일때 = if start_idx == end_idx return 값이 하나짜리 리스트가 넘어간다는 것을 기억해라\n# merge_sort과정에서\n# combined_list=[]\n# while f[fidx] <=len(f) or이냐 and냐 b[bidx]<=len(b) and or 모두 동작하지 않음\n\n\n# f<b\n# 작은값을 넣어지고\n# 어펜드\n# 작은값의 리스트 인덱스 +1\n# 반대의 경우도 똑같음\n\n# 프론트를 다 넣으면 백에서 못넣은 값들이 남아있을수도 있다\n\n# 백이든 프론트든 하나는 끝났다 하나는 빈리스트가 나온다\n# 나머지 하나의 남은 리스트를 붙여준다\n# print(combined_list = combined_list +f[fidx:] +b[bidx:]) 동작을 볼 수 있음\n# return combined_list = combined_list +f[fidx:] +b[bidx:]\n# 탑 다운 방식",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# bot.py
import os
import sqlite3
import json
import datetime
from dotenv import load_dotenv
import discord
from discord.ext import commands
from discord.ext.commands import Bot
from cogs.utils import helper as h
intents = discord.Intents.default()
intents.members = True
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
PREFIX = os.getenv('BOT_PREFIX')
dir_path = os.path.dirname(os.path.realpath(__file__))
extensions = ['cogs.general', 'cogs.events', 'cogs.moderation']
class LLKEventsBot(Bot):
def __init__(self):
super().__init__(
description="Bot created by Oto#2494",
command_prefix=PREFIX,
owner_id=271992863175344130,
intents=intents,
help_command=None
)
print('\nLoading embed data...')
try:
with open(f'{dir_path}/db/embed_id.json', 'r+') as f:
try:
self.embed_data = json.load(f)
if self.embed_data:
self.embed_id = self.embed_data['eventEmbed']['id']
except:
self.embed_data = {"eventEmbed":{
"id": None }}
self.embed_id = self.embed_data['eventEmbed']['id']
json.dump(self.embed_data, f, indent=4)
except:
with open(f'{dir_path}/db/embed_id.json', 'w+'):
self.embed_data = {"eventEmbed":{
"id": self.bot.embed_id
}}
self.embed_id = self.embed_data['eventEmbed']['id']
json.dump(self.embed_data, f, indent=4)
print('Loading permissions data...')
try:
with open('db/roles.json', 'r+') as f:
try:
self.perms_data = json.load(f)
if self.perms_data:
self.perms = self.perms_data['permissions']
except Exception as e:
print(f'{e}')
except:
with open(f'{dir_path}/db/roles.json', 'w+') as f:
self.perms_data = {"permissions":{
"admins": [],
"mods": [],
"hosts": []
}}
self.perms = self.perms_data['permissions']
json.dump(self.perms_data, f, indent=4)
print('Loading roles DB...')
self.conn = sqlite3.connect(f'{dir_path}/db/events.db')
self.cursor = self.conn.cursor()
self.cursor.execute("""
CREATE TABLE IF NOT EXISTS events (
event_id STRING NOT NULL,
user_id STRING NOT NULL,
description STRING NOT NULL,
target STRING NOT NULL
)
""")
# print('Loading embed data...')
# try:
# with open('db/embed_id.json', 'r+') as f:
# try:
# self.embed_data = json.load(f)
# if self.embed_data:
# self.embed_id = self.embed_data['eventEmbed']['id']
# except Exception as e:
# print(f'{e}')
# except:
# open('db/embed_id.json', 'w+')
async def on_ready(self):
if not os.path.exists('db'):
os.makedirs('db')
if not os.path.exists('logs'):
os.makedirs('logs')
print('\nLoading extensions...')
for extension in extensions:
print(f'Loading {extension}')
bot.load_extension(extension)
await bot.change_presence(activity=discord.Game(f'{PREFIX}help'))
print(f'\nLogged in as: {bot.user.name} - {bot.user.id}\nVersion: {discord.__version__}\n')
# async def on_message(self, msg):
# if msg.author.bot:
# return
async def on_command_error(self, ctx, error):
if isinstance(error, commands.BotMissingPermissions):
await ctx.send(f'I have no permission to do that')
return
elif isinstance(error, commands.CheckFailure):
await ctx.send(f'You have no permission to use this command')
return
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send(f'You forgot to inform the following parameter: {error.param}')
else:
d = datetime.datetime.now()
with open(f'logs/{d.year}-{d.month}-{d.day}.log', 'a', encoding='utf8') as f:
# f.write(f'''-------------\n{d.hour}:{d.minute}:{d.second}.{d.microsecond}\n{type(error)}\n{error}\n-------------\n\n'''')
f.write(
'-------------\n'
f'{d.hour}:{d.minute}:{d.second}.{d.microsecond}\n'
f'Command: {ctx.message.content}\n'
f'Author: {ctx.author}\n'
f'Exception: {type(error)}\n'
f'Description: {error}\n'
'-------------\n\n'
)
await ctx.send(f'It seems something went wrong:```{error}```')
return
bot = LLKEventsBot()
bot.run(TOKEN)
|
normal
|
{
"blob_id": "849343561dd9bdcfc1da66c604e1bfa4aa10ddf3",
"index": 5359,
"step-1": "<mask token>\n\n\nclass LLKEventsBot(Bot):\n <mask token>\n\n async def on_ready(self):\n if not os.path.exists('db'):\n os.makedirs('db')\n if not os.path.exists('logs'):\n os.makedirs('logs')\n print('\\nLoading extensions...')\n for extension in extensions:\n print(f'Loading {extension}')\n bot.load_extension(extension)\n await bot.change_presence(activity=discord.Game(f'{PREFIX}help'))\n print(\n f'\\nLogged in as: {bot.user.name} - {bot.user.id}\\nVersion: {discord.__version__}\\n'\n )\n\n async def on_command_error(self, ctx, error):\n if isinstance(error, commands.BotMissingPermissions):\n await ctx.send(f'I have no permission to do that')\n return\n elif isinstance(error, commands.CheckFailure):\n await ctx.send(f'You have no permission to use this command')\n return\n elif isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n f'You forgot to inform the following parameter: {error.param}')\n else:\n d = datetime.datetime.now()\n with open(f'logs/{d.year}-{d.month}-{d.day}.log', 'a', encoding\n ='utf8') as f:\n f.write(\n f\"\"\"-------------\n{d.hour}:{d.minute}:{d.second}.{d.microsecond}\nCommand: {ctx.message.content}\nAuthor: {ctx.author}\nException: {type(error)}\nDescription: {error}\n-------------\n\n\"\"\"\n )\n await ctx.send(f'It seems something went wrong:```{error}```')\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LLKEventsBot(Bot):\n\n def __init__(self):\n super().__init__(description='Bot created by Oto#2494',\n command_prefix=PREFIX, owner_id=271992863175344130, intents=\n intents, help_command=None)\n print('\\nLoading embed data...')\n try:\n with open(f'{dir_path}/db/embed_id.json', 'r+') as f:\n try:\n self.embed_data = json.load(f)\n if self.embed_data:\n self.embed_id = self.embed_data['eventEmbed']['id']\n except:\n self.embed_data = {'eventEmbed': {'id': None}}\n self.embed_id = self.embed_data['eventEmbed']['id']\n json.dump(self.embed_data, f, indent=4)\n except:\n with open(f'{dir_path}/db/embed_id.json', 'w+'):\n self.embed_data = {'eventEmbed': {'id': self.bot.embed_id}}\n self.embed_id = self.embed_data['eventEmbed']['id']\n json.dump(self.embed_data, f, indent=4)\n print('Loading permissions data...')\n try:\n with open('db/roles.json', 'r+') as f:\n try:\n self.perms_data = json.load(f)\n if self.perms_data:\n self.perms = self.perms_data['permissions']\n except Exception as e:\n print(f'{e}')\n except:\n with open(f'{dir_path}/db/roles.json', 'w+') as f:\n self.perms_data = {'permissions': {'admins': [], 'mods': [],\n 'hosts': []}}\n self.perms = self.perms_data['permissions']\n json.dump(self.perms_data, f, indent=4)\n print('Loading roles DB...')\n self.conn = sqlite3.connect(f'{dir_path}/db/events.db')\n self.cursor = self.conn.cursor()\n self.cursor.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS events (\n event_id STRING NOT NULL,\n user_id STRING NOT NULL,\n description STRING NOT NULL,\n target STRING NOT NULL\n )\n \"\"\"\n )\n\n async def on_ready(self):\n if not os.path.exists('db'):\n os.makedirs('db')\n if not os.path.exists('logs'):\n os.makedirs('logs')\n print('\\nLoading extensions...')\n for extension in extensions:\n print(f'Loading {extension}')\n bot.load_extension(extension)\n await bot.change_presence(activity=discord.Game(f'{PREFIX}help'))\n print(\n f'\\nLogged in as: {bot.user.name} - {bot.user.id}\\nVersion: {discord.__version__}\\n'\n )\n\n async def on_command_error(self, ctx, error):\n if isinstance(error, commands.BotMissingPermissions):\n await ctx.send(f'I have no permission to do that')\n return\n elif isinstance(error, commands.CheckFailure):\n await ctx.send(f'You have no permission to use this command')\n return\n elif isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n f'You forgot to inform the following parameter: {error.param}')\n else:\n d = datetime.datetime.now()\n with open(f'logs/{d.year}-{d.month}-{d.day}.log', 'a', encoding\n ='utf8') as f:\n f.write(\n f\"\"\"-------------\n{d.hour}:{d.minute}:{d.second}.{d.microsecond}\nCommand: {ctx.message.content}\nAuthor: {ctx.author}\nException: {type(error)}\nDescription: {error}\n-------------\n\n\"\"\"\n )\n await ctx.send(f'It seems something went wrong:```{error}```')\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\nintents = discord.Intents.default()\nintents.members = True\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\nPREFIX = os.getenv('BOT_PREFIX')\ndir_path = os.path.dirname(os.path.realpath(__file__))\nextensions = ['cogs.general', 'cogs.events', 'cogs.moderation']\n\n\nclass LLKEventsBot(Bot):\n\n def __init__(self):\n super().__init__(description='Bot created by Oto#2494',\n command_prefix=PREFIX, owner_id=271992863175344130, intents=\n intents, help_command=None)\n print('\\nLoading embed data...')\n try:\n with open(f'{dir_path}/db/embed_id.json', 'r+') as f:\n try:\n self.embed_data = json.load(f)\n if self.embed_data:\n self.embed_id = self.embed_data['eventEmbed']['id']\n except:\n self.embed_data = {'eventEmbed': {'id': None}}\n self.embed_id = self.embed_data['eventEmbed']['id']\n json.dump(self.embed_data, f, indent=4)\n except:\n with open(f'{dir_path}/db/embed_id.json', 'w+'):\n self.embed_data = {'eventEmbed': {'id': self.bot.embed_id}}\n self.embed_id = self.embed_data['eventEmbed']['id']\n json.dump(self.embed_data, f, indent=4)\n print('Loading permissions data...')\n try:\n with open('db/roles.json', 'r+') as f:\n try:\n self.perms_data = json.load(f)\n if self.perms_data:\n self.perms = self.perms_data['permissions']\n except Exception as e:\n print(f'{e}')\n except:\n with open(f'{dir_path}/db/roles.json', 'w+') as f:\n self.perms_data = {'permissions': {'admins': [], 'mods': [],\n 'hosts': []}}\n self.perms = self.perms_data['permissions']\n json.dump(self.perms_data, f, indent=4)\n print('Loading roles DB...')\n self.conn = sqlite3.connect(f'{dir_path}/db/events.db')\n self.cursor = self.conn.cursor()\n self.cursor.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS events (\n event_id STRING NOT NULL,\n user_id STRING NOT NULL,\n description STRING NOT NULL,\n target STRING NOT NULL\n )\n \"\"\"\n )\n\n async def on_ready(self):\n if not os.path.exists('db'):\n os.makedirs('db')\n if not os.path.exists('logs'):\n os.makedirs('logs')\n print('\\nLoading extensions...')\n for extension in extensions:\n print(f'Loading {extension}')\n bot.load_extension(extension)\n await bot.change_presence(activity=discord.Game(f'{PREFIX}help'))\n print(\n f'\\nLogged in as: {bot.user.name} - {bot.user.id}\\nVersion: {discord.__version__}\\n'\n )\n\n async def on_command_error(self, ctx, error):\n if isinstance(error, commands.BotMissingPermissions):\n await ctx.send(f'I have no permission to do that')\n return\n elif isinstance(error, commands.CheckFailure):\n await ctx.send(f'You have no permission to use this command')\n return\n elif isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n f'You forgot to inform the following parameter: {error.param}')\n else:\n d = datetime.datetime.now()\n with open(f'logs/{d.year}-{d.month}-{d.day}.log', 'a', encoding\n ='utf8') as f:\n f.write(\n f\"\"\"-------------\n{d.hour}:{d.minute}:{d.second}.{d.microsecond}\nCommand: {ctx.message.content}\nAuthor: {ctx.author}\nException: {type(error)}\nDescription: {error}\n-------------\n\n\"\"\"\n )\n await ctx.send(f'It seems something went wrong:```{error}```')\n return\n\n\nbot = LLKEventsBot()\nbot.run(TOKEN)\n",
"step-4": "import os\nimport sqlite3\nimport json\nimport datetime\nfrom dotenv import load_dotenv\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot\nfrom cogs.utils import helper as h\nintents = discord.Intents.default()\nintents.members = True\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\nPREFIX = os.getenv('BOT_PREFIX')\ndir_path = os.path.dirname(os.path.realpath(__file__))\nextensions = ['cogs.general', 'cogs.events', 'cogs.moderation']\n\n\nclass LLKEventsBot(Bot):\n\n def __init__(self):\n super().__init__(description='Bot created by Oto#2494',\n command_prefix=PREFIX, owner_id=271992863175344130, intents=\n intents, help_command=None)\n print('\\nLoading embed data...')\n try:\n with open(f'{dir_path}/db/embed_id.json', 'r+') as f:\n try:\n self.embed_data = json.load(f)\n if self.embed_data:\n self.embed_id = self.embed_data['eventEmbed']['id']\n except:\n self.embed_data = {'eventEmbed': {'id': None}}\n self.embed_id = self.embed_data['eventEmbed']['id']\n json.dump(self.embed_data, f, indent=4)\n except:\n with open(f'{dir_path}/db/embed_id.json', 'w+'):\n self.embed_data = {'eventEmbed': {'id': self.bot.embed_id}}\n self.embed_id = self.embed_data['eventEmbed']['id']\n json.dump(self.embed_data, f, indent=4)\n print('Loading permissions data...')\n try:\n with open('db/roles.json', 'r+') as f:\n try:\n self.perms_data = json.load(f)\n if self.perms_data:\n self.perms = self.perms_data['permissions']\n except Exception as e:\n print(f'{e}')\n except:\n with open(f'{dir_path}/db/roles.json', 'w+') as f:\n self.perms_data = {'permissions': {'admins': [], 'mods': [],\n 'hosts': []}}\n self.perms = self.perms_data['permissions']\n json.dump(self.perms_data, f, indent=4)\n print('Loading roles DB...')\n self.conn = sqlite3.connect(f'{dir_path}/db/events.db')\n self.cursor = self.conn.cursor()\n self.cursor.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS events (\n event_id STRING NOT NULL,\n user_id STRING NOT NULL,\n description STRING NOT NULL,\n target STRING NOT NULL\n )\n \"\"\"\n )\n\n async def on_ready(self):\n if not os.path.exists('db'):\n os.makedirs('db')\n if not os.path.exists('logs'):\n os.makedirs('logs')\n print('\\nLoading extensions...')\n for extension in extensions:\n print(f'Loading {extension}')\n bot.load_extension(extension)\n await bot.change_presence(activity=discord.Game(f'{PREFIX}help'))\n print(\n f'\\nLogged in as: {bot.user.name} - {bot.user.id}\\nVersion: {discord.__version__}\\n'\n )\n\n async def on_command_error(self, ctx, error):\n if isinstance(error, commands.BotMissingPermissions):\n await ctx.send(f'I have no permission to do that')\n return\n elif isinstance(error, commands.CheckFailure):\n await ctx.send(f'You have no permission to use this command')\n return\n elif isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n f'You forgot to inform the following parameter: {error.param}')\n else:\n d = datetime.datetime.now()\n with open(f'logs/{d.year}-{d.month}-{d.day}.log', 'a', encoding\n ='utf8') as f:\n f.write(\n f\"\"\"-------------\n{d.hour}:{d.minute}:{d.second}.{d.microsecond}\nCommand: {ctx.message.content}\nAuthor: {ctx.author}\nException: {type(error)}\nDescription: {error}\n-------------\n\n\"\"\"\n )\n await ctx.send(f'It seems something went wrong:```{error}```')\n return\n\n\nbot = LLKEventsBot()\nbot.run(TOKEN)\n",
"step-5": "# bot.py\nimport os\nimport sqlite3\nimport json\n\nimport datetime\n\nfrom dotenv import load_dotenv\n\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands import Bot\n\nfrom cogs.utils import helper as h\n\nintents = discord.Intents.default()\nintents.members = True\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\nPREFIX = os.getenv('BOT_PREFIX')\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\nextensions = ['cogs.general', 'cogs.events', 'cogs.moderation']\n\nclass LLKEventsBot(Bot):\n\n def __init__(self):\n super().__init__(\n description=\"Bot created by Oto#2494\",\n command_prefix=PREFIX,\n owner_id=271992863175344130,\n intents=intents,\n help_command=None\n )\n print('\\nLoading embed data...')\n try:\n with open(f'{dir_path}/db/embed_id.json', 'r+') as f:\n try:\n self.embed_data = json.load(f)\n if self.embed_data:\n self.embed_id = self.embed_data['eventEmbed']['id']\n except:\n self.embed_data = {\"eventEmbed\":{\n \"id\": None }}\n self.embed_id = self.embed_data['eventEmbed']['id']\n json.dump(self.embed_data, f, indent=4)\n except:\n with open(f'{dir_path}/db/embed_id.json', 'w+'):\n self.embed_data = {\"eventEmbed\":{\n \"id\": self.bot.embed_id\n }}\n self.embed_id = self.embed_data['eventEmbed']['id']\n json.dump(self.embed_data, f, indent=4)\n\n print('Loading permissions data...')\n try:\n with open('db/roles.json', 'r+') as f:\n try:\n self.perms_data = json.load(f)\n if self.perms_data:\n self.perms = self.perms_data['permissions']\n except Exception as e:\n print(f'{e}')\n except:\n with open(f'{dir_path}/db/roles.json', 'w+') as f:\n self.perms_data = {\"permissions\":{\n \"admins\": [],\n \"mods\": [],\n \"hosts\": []\n }}\n self.perms = self.perms_data['permissions']\n json.dump(self.perms_data, f, indent=4)\n\n print('Loading roles DB...')\n self.conn = sqlite3.connect(f'{dir_path}/db/events.db')\n self.cursor = self.conn.cursor()\n self.cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS events (\n event_id STRING NOT NULL,\n user_id STRING NOT NULL,\n description STRING NOT NULL,\n target STRING NOT NULL\n )\n \"\"\")\n\n # print('Loading embed data...')\n # try:\n # with open('db/embed_id.json', 'r+') as f:\n # try:\n # self.embed_data = json.load(f)\n # if self.embed_data:\n # self.embed_id = self.embed_data['eventEmbed']['id']\n # except Exception as e:\n # print(f'{e}')\n # except:\n # open('db/embed_id.json', 'w+')\n\n async def on_ready(self):\n if not os.path.exists('db'):\n os.makedirs('db')\n if not os.path.exists('logs'):\n os.makedirs('logs')\n\n print('\\nLoading extensions...')\n for extension in extensions:\n print(f'Loading {extension}')\n bot.load_extension(extension)\n\n await bot.change_presence(activity=discord.Game(f'{PREFIX}help'))\n\n print(f'\\nLogged in as: {bot.user.name} - {bot.user.id}\\nVersion: {discord.__version__}\\n')\n\n # async def on_message(self, msg):\n # if msg.author.bot:\n # return\n\n async def on_command_error(self, ctx, error):\n if isinstance(error, commands.BotMissingPermissions):\n await ctx.send(f'I have no permission to do that')\n return\n elif isinstance(error, commands.CheckFailure):\n await ctx.send(f'You have no permission to use this command')\n return\n elif isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(f'You forgot to inform the following parameter: {error.param}')\n else:\n d = datetime.datetime.now()\n with open(f'logs/{d.year}-{d.month}-{d.day}.log', 'a', encoding='utf8') as f:\n # f.write(f'''-------------\\n{d.hour}:{d.minute}:{d.second}.{d.microsecond}\\n{type(error)}\\n{error}\\n-------------\\n\\n'''')\n f.write(\n '-------------\\n'\n f'{d.hour}:{d.minute}:{d.second}.{d.microsecond}\\n'\n f'Command: {ctx.message.content}\\n'\n f'Author: {ctx.author}\\n'\n f'Exception: {type(error)}\\n'\n f'Description: {error}\\n'\n '-------------\\n\\n'\n )\n await ctx.send(f'It seems something went wrong:```{error}```')\n return\n\n\nbot = LLKEventsBot()\nbot.run(TOKEN)\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
#coding=utf-8
import requests,sys
result_url=[]
def main():
counts=open(sys.argv[1]).readlines()
for line in open(sys.argv[1]):
line=line.strip("\n")
url=line
try:
#url="http://s6000.sgcc.com.cn/WebContent/s6000/main/index.jsp#no-back"
r=requests.get(url,verify=True,timeout=3)
print(url+" "+str(r.status_code))
print(str(r.text))
if r.status_code==200 and "MPEGVideo" in r.text:
result_url.append(url)
except Exception as e:
print(str(e))
for i in result_url:
print(i)
file_200.write(i+"\n")
if __name__ == '__main__':
file_200=open("result_uWSGI_file.txt","w")
main()
file_200.flush()
file_200.close()
|
normal
|
{
"blob_id": "96a4659f03879e051af95b5aa9c1e1364015fb86",
"index": 8723,
"step-1": "<mask token>\n\n\ndef main():\n counts = open(sys.argv[1]).readlines()\n for line in open(sys.argv[1]):\n line = line.strip('\\n')\n url = line\n try:\n r = requests.get(url, verify=True, timeout=3)\n print(url + ' ' + str(r.status_code))\n print(str(r.text))\n if r.status_code == 200 and 'MPEGVideo' in r.text:\n result_url.append(url)\n except Exception as e:\n print(str(e))\n for i in result_url:\n print(i)\n file_200.write(i + '\\n')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n counts = open(sys.argv[1]).readlines()\n for line in open(sys.argv[1]):\n line = line.strip('\\n')\n url = line\n try:\n r = requests.get(url, verify=True, timeout=3)\n print(url + ' ' + str(r.status_code))\n print(str(r.text))\n if r.status_code == 200 and 'MPEGVideo' in r.text:\n result_url.append(url)\n except Exception as e:\n print(str(e))\n for i in result_url:\n print(i)\n file_200.write(i + '\\n')\n\n\nif __name__ == '__main__':\n file_200 = open('result_uWSGI_file.txt', 'w')\n main()\n file_200.flush()\n file_200.close()\n",
"step-3": "<mask token>\nresult_url = []\n\n\ndef main():\n counts = open(sys.argv[1]).readlines()\n for line in open(sys.argv[1]):\n line = line.strip('\\n')\n url = line\n try:\n r = requests.get(url, verify=True, timeout=3)\n print(url + ' ' + str(r.status_code))\n print(str(r.text))\n if r.status_code == 200 and 'MPEGVideo' in r.text:\n result_url.append(url)\n except Exception as e:\n print(str(e))\n for i in result_url:\n print(i)\n file_200.write(i + '\\n')\n\n\nif __name__ == '__main__':\n file_200 = open('result_uWSGI_file.txt', 'w')\n main()\n file_200.flush()\n file_200.close()\n",
"step-4": "import requests, sys\nresult_url = []\n\n\ndef main():\n counts = open(sys.argv[1]).readlines()\n for line in open(sys.argv[1]):\n line = line.strip('\\n')\n url = line\n try:\n r = requests.get(url, verify=True, timeout=3)\n print(url + ' ' + str(r.status_code))\n print(str(r.text))\n if r.status_code == 200 and 'MPEGVideo' in r.text:\n result_url.append(url)\n except Exception as e:\n print(str(e))\n for i in result_url:\n print(i)\n file_200.write(i + '\\n')\n\n\nif __name__ == '__main__':\n file_200 = open('result_uWSGI_file.txt', 'w')\n main()\n file_200.flush()\n file_200.close()\n",
"step-5": "#coding=utf-8\r\nimport requests,sys\r\nresult_url=[]\r\n\r\ndef main():\r\n counts=open(sys.argv[1]).readlines()\r\n for line in open(sys.argv[1]):\r\n line=line.strip(\"\\n\")\r\n url=line\r\n try:\r\n #url=\"http://s6000.sgcc.com.cn/WebContent/s6000/main/index.jsp#no-back\"\r\n r=requests.get(url,verify=True,timeout=3)\r\n print(url+\" \"+str(r.status_code))\r\n print(str(r.text))\r\n if r.status_code==200 and \"MPEGVideo\" in r.text:\r\n result_url.append(url) \r\n except Exception as e:\r\n print(str(e))\r\n for i in result_url:\r\n print(i)\r\n file_200.write(i+\"\\n\")\r\n\r\nif __name__ == '__main__':\r\n file_200=open(\"result_uWSGI_file.txt\",\"w\") \r\n main()\r\n file_200.flush() \r\n file_200.close() \r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# ------------------------------------------------------------------------------------------------------
# Copyright (c) Leo Hanisch. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
from typing import Tuple
import numpy as np
class Nest:
def __init__(self, function, lower_boundary, upper_boundary):
self.__function = function
self.__lower_boundary = lower_boundary
self.__upper_boundary = upper_boundary
# Randomly create a new nest position
self.__position = np.random.uniform(self.__lower_boundary, self.__upper_boundary, 2)
self.__value = self.__function(self.__position)
@property
def position(self) -> Tuple[float, float]:
return self.__position
@property
def value(self) -> float:
return self.__value
def update_pos(self, new_position: Tuple[float, float]) -> None:
"""
If the new position's value is better than the old one, update the nests position and value.
Arguments:
new_position {Tuple[float, float]} -- The new position
"""
new_value = self.__function(new_position)
if new_value < self.__value:
self.__value = new_value
self.__position = new_position
|
normal
|
{
"blob_id": "917a291c7b62dee392d7411c3e039949d74d7af8",
"index": 1375,
"step-1": "<mask token>\n\n\nclass Nest:\n <mask token>\n <mask token>\n <mask token>\n\n def update_pos(self, new_position: Tuple[float, float]) ->None:\n \"\"\"\n If the new position's value is better than the old one, update the nests position and value.\n\n Arguments:\n new_position {Tuple[float, float]} -- The new position\n \"\"\"\n new_value = self.__function(new_position)\n if new_value < self.__value:\n self.__value = new_value\n self.__position = new_position\n",
"step-2": "<mask token>\n\n\nclass Nest:\n\n def __init__(self, function, lower_boundary, upper_boundary):\n self.__function = function\n self.__lower_boundary = lower_boundary\n self.__upper_boundary = upper_boundary\n self.__position = np.random.uniform(self.__lower_boundary, self.\n __upper_boundary, 2)\n self.__value = self.__function(self.__position)\n <mask token>\n <mask token>\n\n def update_pos(self, new_position: Tuple[float, float]) ->None:\n \"\"\"\n If the new position's value is better than the old one, update the nests position and value.\n\n Arguments:\n new_position {Tuple[float, float]} -- The new position\n \"\"\"\n new_value = self.__function(new_position)\n if new_value < self.__value:\n self.__value = new_value\n self.__position = new_position\n",
"step-3": "<mask token>\n\n\nclass Nest:\n\n def __init__(self, function, lower_boundary, upper_boundary):\n self.__function = function\n self.__lower_boundary = lower_boundary\n self.__upper_boundary = upper_boundary\n self.__position = np.random.uniform(self.__lower_boundary, self.\n __upper_boundary, 2)\n self.__value = self.__function(self.__position)\n <mask token>\n\n @property\n def value(self) ->float:\n return self.__value\n\n def update_pos(self, new_position: Tuple[float, float]) ->None:\n \"\"\"\n If the new position's value is better than the old one, update the nests position and value.\n\n Arguments:\n new_position {Tuple[float, float]} -- The new position\n \"\"\"\n new_value = self.__function(new_position)\n if new_value < self.__value:\n self.__value = new_value\n self.__position = new_position\n",
"step-4": "<mask token>\n\n\nclass Nest:\n\n def __init__(self, function, lower_boundary, upper_boundary):\n self.__function = function\n self.__lower_boundary = lower_boundary\n self.__upper_boundary = upper_boundary\n self.__position = np.random.uniform(self.__lower_boundary, self.\n __upper_boundary, 2)\n self.__value = self.__function(self.__position)\n\n @property\n def position(self) ->Tuple[float, float]:\n return self.__position\n\n @property\n def value(self) ->float:\n return self.__value\n\n def update_pos(self, new_position: Tuple[float, float]) ->None:\n \"\"\"\n If the new position's value is better than the old one, update the nests position and value.\n\n Arguments:\n new_position {Tuple[float, float]} -- The new position\n \"\"\"\n new_value = self.__function(new_position)\n if new_value < self.__value:\n self.__value = new_value\n self.__position = new_position\n",
"step-5": "# ------------------------------------------------------------------------------------------------------\n# Copyright (c) Leo Hanisch. All rights reserved.\n# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.\n# ------------------------------------------------------------------------------------------------------\n\nfrom typing import Tuple\nimport numpy as np\n\n\nclass Nest:\n def __init__(self, function, lower_boundary, upper_boundary):\n self.__function = function\n self.__lower_boundary = lower_boundary\n self.__upper_boundary = upper_boundary\n\n # Randomly create a new nest position\n self.__position = np.random.uniform(self.__lower_boundary, self.__upper_boundary, 2)\n self.__value = self.__function(self.__position)\n\n @property\n def position(self) -> Tuple[float, float]:\n return self.__position\n\n @property\n def value(self) -> float:\n return self.__value\n\n def update_pos(self, new_position: Tuple[float, float]) -> None:\n \"\"\"\n If the new position's value is better than the old one, update the nests position and value.\n\n Arguments:\n new_position {Tuple[float, float]} -- The new position\n \"\"\"\n\n new_value = self.__function(new_position)\n if new_value < self.__value:\n self.__value = new_value\n self.__position = new_position\n",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def trainW2v(args):
clazz = [['Accidents', 'Arts', 'Attacks', 'Economy', 'Miscellaneous',
'Politics', 'Science', 'Sports', 'undefined'], ['Accidents', 'Arts',
'Attacks', 'Economy', 'Miscellaneous', 'Politics', 'Science',
'Sports'], ['positive', 'negative']]
models = ['rbf', 'poly']
FileHelper.create('log')
C = 0.5
gamma = 0.5
degree = 6
types = ['generic', 'specific']
if args.ontology == 'dbpedia':
types.append('normal')
for classes in clazz:
task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes
) == 8 else 'task1'
train_instances, train_labels, train_texts = Word2VecHelper.loadData(
classes, args, 'train')
test_instances, test_labels, test_texts = Word2VecHelper.loadData(
classes, args, 'test')
sys.stdout = open('log/{}_{}.txt'.format(args.ontology, task), 'w')
for model in models:
args.classifier = model
for _type in types:
args.type = _type
for merge in range(2):
args.merge = merge
if args.force == 1 or not os.path.exists('{}_{}_{}.bin'
.format(args.ontology, args.type, 'merged' if args.
merge == 1 else 'simple')):
files = ['./train/{}/{}/positive.txt'.format(args.
ontology, args.type),
'./train/{}/{}/negative.txt'.format(args.
ontology, args.type)]
model = Word2VecHelper.createModel(files, name=
'{}_{}'.format(args.ontology, args.type), merge
=args.merge)
else:
model = Word2VecHelper.loadModel('{}_{}'.format(
args.ontology, args.type), merge=args.merge)
w2v = {w: vec for w, vec in zip(model.wv.index2word,
model.wv.syn0)}
print('========== Model', args.ontology, args.type,
args.merge, task, args.classifier, '==========')
if args.classifier == 'ben':
classifier = Pipeline([('w2v vect',
MeanEmbeddingVectorizer(w2v)), ('clf',
BernoulliNB())])
else:
classifier = Pipeline([('w2v vect',
MeanEmbeddingVectorizer(w2v)), ('clf', svm.SVC(
kernel=args.classifier, degree=degree, C=C,
gamma=gamma, probability=True))])
y_score = classifier.fit(train_texts, train_labels
).predict_proba(test_texts)
y_pred = classifier.predict(test_texts)
print('========= Classification Report ==========')
print(classification_report(test_labels, y_pred))
print('========= Confusion Matrix ==========')
print(confusion_matrix(test_labels, y_pred, labels=classes)
)
GraphHelper.savePrediction('{}_{}_{}_{}_{}'.format(args
.ontology, args.type, args.classifier, task, args.
merge), y_pred=y_pred, y_score=y_score, classes=
classes, y=test_labels)
GraphHelper.saveClassifier(classifier,
'{}_{}_{}_{}_{}.pkl'.format(args.ontology, args.
type, args.classifier, task, args.merge))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def trainW2v(args):
clazz = [['Accidents', 'Arts', 'Attacks', 'Economy', 'Miscellaneous',
'Politics', 'Science', 'Sports', 'undefined'], ['Accidents', 'Arts',
'Attacks', 'Economy', 'Miscellaneous', 'Politics', 'Science',
'Sports'], ['positive', 'negative']]
models = ['rbf', 'poly']
FileHelper.create('log')
C = 0.5
gamma = 0.5
degree = 6
types = ['generic', 'specific']
if args.ontology == 'dbpedia':
types.append('normal')
for classes in clazz:
task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes
) == 8 else 'task1'
train_instances, train_labels, train_texts = Word2VecHelper.loadData(
classes, args, 'train')
test_instances, test_labels, test_texts = Word2VecHelper.loadData(
classes, args, 'test')
sys.stdout = open('log/{}_{}.txt'.format(args.ontology, task), 'w')
for model in models:
args.classifier = model
for _type in types:
args.type = _type
for merge in range(2):
args.merge = merge
if args.force == 1 or not os.path.exists('{}_{}_{}.bin'
.format(args.ontology, args.type, 'merged' if args.
merge == 1 else 'simple')):
files = ['./train/{}/{}/positive.txt'.format(args.
ontology, args.type),
'./train/{}/{}/negative.txt'.format(args.
ontology, args.type)]
model = Word2VecHelper.createModel(files, name=
'{}_{}'.format(args.ontology, args.type), merge
=args.merge)
else:
model = Word2VecHelper.loadModel('{}_{}'.format(
args.ontology, args.type), merge=args.merge)
w2v = {w: vec for w, vec in zip(model.wv.index2word,
model.wv.syn0)}
print('========== Model', args.ontology, args.type,
args.merge, task, args.classifier, '==========')
if args.classifier == 'ben':
classifier = Pipeline([('w2v vect',
MeanEmbeddingVectorizer(w2v)), ('clf',
BernoulliNB())])
else:
classifier = Pipeline([('w2v vect',
MeanEmbeddingVectorizer(w2v)), ('clf', svm.SVC(
kernel=args.classifier, degree=degree, C=C,
gamma=gamma, probability=True))])
y_score = classifier.fit(train_texts, train_labels
).predict_proba(test_texts)
y_pred = classifier.predict(test_texts)
print('========= Classification Report ==========')
print(classification_report(test_labels, y_pred))
print('========= Confusion Matrix ==========')
print(confusion_matrix(test_labels, y_pred, labels=classes)
)
GraphHelper.savePrediction('{}_{}_{}_{}_{}'.format(args
.ontology, args.type, args.classifier, task, args.
merge), y_pred=y_pred, y_score=y_score, classes=
classes, y=test_labels)
GraphHelper.saveClassifier(classifier,
'{}_{}_{}_{}_{}.pkl'.format(args.ontology, args.
type, args.classifier, task, args.merge))
if __name__ == '__main__':
parser = OptionParser('%prog -o ontology -t type -f force ')
parser.add_option('-o', '--ontology', dest='ontology', default='dbpedia')
parser.add_option('-t', '--type', dest='type', default='generic')
parser.add_option('-f', '--force', dest='force', default=0, type=int)
parser.add_option('-c', '--classifier', dest='classifier', default='poly')
parser.add_option('-j', '--job', dest='job', type=int, default=10)
parser.add_option('-w', '--window', dest='window', type=int, default=2)
parser.add_option('-s', '--size', dest='size', type=int, default=300)
parser.add_option('-m', '--merge', dest='merge', type=int, default=0)
parser.add_option('-e', '--experiment', dest='experiment', type=int,
default=1)
opts, args = parser.parse_args()
trainW2v(opts)
<|reserved_special_token_1|>
from sklearn.naive_bayes import *
from sklearn import svm
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, confusion_matrix
from optparse import OptionParser
from helper import FileHelper, Word2VecHelper, GraphHelper
import helper
from helper.VectorHelper import *
import os
import sys
def trainW2v(args):
clazz = [['Accidents', 'Arts', 'Attacks', 'Economy', 'Miscellaneous',
'Politics', 'Science', 'Sports', 'undefined'], ['Accidents', 'Arts',
'Attacks', 'Economy', 'Miscellaneous', 'Politics', 'Science',
'Sports'], ['positive', 'negative']]
models = ['rbf', 'poly']
FileHelper.create('log')
C = 0.5
gamma = 0.5
degree = 6
types = ['generic', 'specific']
if args.ontology == 'dbpedia':
types.append('normal')
for classes in clazz:
task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes
) == 8 else 'task1'
train_instances, train_labels, train_texts = Word2VecHelper.loadData(
classes, args, 'train')
test_instances, test_labels, test_texts = Word2VecHelper.loadData(
classes, args, 'test')
sys.stdout = open('log/{}_{}.txt'.format(args.ontology, task), 'w')
for model in models:
args.classifier = model
for _type in types:
args.type = _type
for merge in range(2):
args.merge = merge
if args.force == 1 or not os.path.exists('{}_{}_{}.bin'
.format(args.ontology, args.type, 'merged' if args.
merge == 1 else 'simple')):
files = ['./train/{}/{}/positive.txt'.format(args.
ontology, args.type),
'./train/{}/{}/negative.txt'.format(args.
ontology, args.type)]
model = Word2VecHelper.createModel(files, name=
'{}_{}'.format(args.ontology, args.type), merge
=args.merge)
else:
model = Word2VecHelper.loadModel('{}_{}'.format(
args.ontology, args.type), merge=args.merge)
w2v = {w: vec for w, vec in zip(model.wv.index2word,
model.wv.syn0)}
print('========== Model', args.ontology, args.type,
args.merge, task, args.classifier, '==========')
if args.classifier == 'ben':
classifier = Pipeline([('w2v vect',
MeanEmbeddingVectorizer(w2v)), ('clf',
BernoulliNB())])
else:
classifier = Pipeline([('w2v vect',
MeanEmbeddingVectorizer(w2v)), ('clf', svm.SVC(
kernel=args.classifier, degree=degree, C=C,
gamma=gamma, probability=True))])
y_score = classifier.fit(train_texts, train_labels
).predict_proba(test_texts)
y_pred = classifier.predict(test_texts)
print('========= Classification Report ==========')
print(classification_report(test_labels, y_pred))
print('========= Confusion Matrix ==========')
print(confusion_matrix(test_labels, y_pred, labels=classes)
)
GraphHelper.savePrediction('{}_{}_{}_{}_{}'.format(args
.ontology, args.type, args.classifier, task, args.
merge), y_pred=y_pred, y_score=y_score, classes=
classes, y=test_labels)
GraphHelper.saveClassifier(classifier,
'{}_{}_{}_{}_{}.pkl'.format(args.ontology, args.
type, args.classifier, task, args.merge))
if __name__ == '__main__':
parser = OptionParser('%prog -o ontology -t type -f force ')
parser.add_option('-o', '--ontology', dest='ontology', default='dbpedia')
parser.add_option('-t', '--type', dest='type', default='generic')
parser.add_option('-f', '--force', dest='force', default=0, type=int)
parser.add_option('-c', '--classifier', dest='classifier', default='poly')
parser.add_option('-j', '--job', dest='job', type=int, default=10)
parser.add_option('-w', '--window', dest='window', type=int, default=2)
parser.add_option('-s', '--size', dest='size', type=int, default=300)
parser.add_option('-m', '--merge', dest='merge', type=int, default=0)
parser.add_option('-e', '--experiment', dest='experiment', type=int,
default=1)
opts, args = parser.parse_args()
trainW2v(opts)
<|reserved_special_token_1|>
from sklearn.naive_bayes import *
from sklearn import svm
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, confusion_matrix
from optparse import OptionParser
from helper import FileHelper, Word2VecHelper, GraphHelper
import helper
from helper.VectorHelper import *
import os
import sys
#log = helper.enableLog()
def trainW2v(args):
clazz = [["Accidents", "Arts", "Attacks", "Economy", "Miscellaneous", "Politics", "Science", "Sports","undefined"], ["Accidents", "Arts", "Attacks", "Economy", "Miscellaneous", "Politics", "Science", "Sports"], ['positive', 'negative']]
models = ['rbf', 'poly']
FileHelper.create("log")
C = 0.5 # SVM regularization parameter
gamma = 0.5
degree = 6
types = ['generic', 'specific']
if args.ontology =='dbpedia':
types.append('normal')
for classes in clazz:
task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes) == 8 else 'task1'
train_instances, train_labels, train_texts = Word2VecHelper.loadData(classes, args, 'train')
test_instances, test_labels, test_texts = Word2VecHelper.loadData(classes, args, 'test')
sys.stdout = open(
"log/{}_{}.txt".format(args.ontology, task), "w")
for model in models:
args.classifier = model
for _type in types:
args.type = _type
for merge in range(2):
args.merge = merge
if args.force == 1 or not os.path.exists("{}_{}_{}.bin".format(args.ontology, args.type, 'merged' if args.merge==1 else 'simple')):
files = ["./train/{}/{}/positive.txt".format(args.ontology, args.type),
"./train/{}/{}/negative.txt".format(args.ontology, args.type)]
model = Word2VecHelper.createModel(files, name="{}_{}".format(args.ontology, args.type),
merge=args.merge)
else:
model = Word2VecHelper.loadModel("{}_{}".format(args.ontology, args.type), merge=args.merge)
w2v = {w: vec for w, vec in zip(model.wv.index2word, model.wv.syn0)}
print("========== Model", args.ontology, args.type, args.merge, task, args.classifier, "==========")
if args.classifier == 'ben':
classifier = Pipeline([("w2v vect", MeanEmbeddingVectorizer(w2v)),
("clf", BernoulliNB())])
else:
classifier = Pipeline([("w2v vect", MeanEmbeddingVectorizer(w2v)),
("clf", svm.SVC(kernel=args.classifier, degree=degree, C=C, gamma=gamma,
probability=True))])
y_score = classifier.fit(train_texts, train_labels).predict_proba(test_texts)
y_pred = classifier.predict(test_texts)
#f.write("========= Classification Report ==========\n")
print("========= Classification Report ==========")
print(classification_report(test_labels, y_pred))
#f.write(classification_report(test_labels, y_pred)+"\n")
print("========= Confusion Matrix ==========")
#f.write("========= Confusion Matrix ==========\n")
print(confusion_matrix(test_labels,y_pred, labels=classes))
#f.write(confusion_matrix(test_labels,y_pred, labels=classes)+"\n")
GraphHelper.savePrediction("{}_{}_{}_{}_{}".format(args.ontology,args.type,args.classifier,task, args.merge), y_pred=y_pred,y_score=y_score,classes=classes,y=test_labels )
GraphHelper.saveClassifier(classifier, "{}_{}_{}_{}_{}.pkl".format(args.ontology,args.type,args.classifier,task, args.merge))
#f.close()
#trainW2v()
if __name__ == "__main__":
parser = OptionParser('''%prog -o ontology -t type -f force ''')
parser.add_option('-o', '--ontology', dest='ontology', default="dbpedia")
parser.add_option('-t', '--type', dest='type', default="generic")
parser.add_option('-f', '--force', dest='force', default=0, type=int)
parser.add_option('-c', '--classifier', dest='classifier', default='poly')
parser.add_option('-j', '--job', dest='job', type=int, default=10)
parser.add_option('-w', '--window', dest='window', type=int, default=2)
parser.add_option('-s', '--size', dest='size', type=int, default=300)
parser.add_option('-m', '--merge', dest='merge', type=int, default=0)
parser.add_option('-e', '--experiment', dest='experiment', type=int, default=1)
opts, args = parser.parse_args()
trainW2v(opts)
|
flexible
|
{
"blob_id": "3bc9c6a66f749858ea5801202b0ac80755c1b347",
"index": 6493,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef trainW2v(args):\n clazz = [['Accidents', 'Arts', 'Attacks', 'Economy', 'Miscellaneous',\n 'Politics', 'Science', 'Sports', 'undefined'], ['Accidents', 'Arts',\n 'Attacks', 'Economy', 'Miscellaneous', 'Politics', 'Science',\n 'Sports'], ['positive', 'negative']]\n models = ['rbf', 'poly']\n FileHelper.create('log')\n C = 0.5\n gamma = 0.5\n degree = 6\n types = ['generic', 'specific']\n if args.ontology == 'dbpedia':\n types.append('normal')\n for classes in clazz:\n task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes\n ) == 8 else 'task1'\n train_instances, train_labels, train_texts = Word2VecHelper.loadData(\n classes, args, 'train')\n test_instances, test_labels, test_texts = Word2VecHelper.loadData(\n classes, args, 'test')\n sys.stdout = open('log/{}_{}.txt'.format(args.ontology, task), 'w')\n for model in models:\n args.classifier = model\n for _type in types:\n args.type = _type\n for merge in range(2):\n args.merge = merge\n if args.force == 1 or not os.path.exists('{}_{}_{}.bin'\n .format(args.ontology, args.type, 'merged' if args.\n merge == 1 else 'simple')):\n files = ['./train/{}/{}/positive.txt'.format(args.\n ontology, args.type),\n './train/{}/{}/negative.txt'.format(args.\n ontology, args.type)]\n model = Word2VecHelper.createModel(files, name=\n '{}_{}'.format(args.ontology, args.type), merge\n =args.merge)\n else:\n model = Word2VecHelper.loadModel('{}_{}'.format(\n args.ontology, args.type), merge=args.merge)\n w2v = {w: vec for w, vec in zip(model.wv.index2word,\n model.wv.syn0)}\n print('========== Model', args.ontology, args.type,\n args.merge, task, args.classifier, '==========')\n if args.classifier == 'ben':\n classifier = Pipeline([('w2v vect',\n MeanEmbeddingVectorizer(w2v)), ('clf',\n BernoulliNB())])\n else:\n classifier = Pipeline([('w2v vect',\n MeanEmbeddingVectorizer(w2v)), ('clf', svm.SVC(\n kernel=args.classifier, degree=degree, C=C,\n gamma=gamma, probability=True))])\n y_score = classifier.fit(train_texts, train_labels\n ).predict_proba(test_texts)\n y_pred = classifier.predict(test_texts)\n print('========= Classification Report ==========')\n print(classification_report(test_labels, y_pred))\n print('========= Confusion Matrix ==========')\n print(confusion_matrix(test_labels, y_pred, labels=classes)\n )\n GraphHelper.savePrediction('{}_{}_{}_{}_{}'.format(args\n .ontology, args.type, args.classifier, task, args.\n merge), y_pred=y_pred, y_score=y_score, classes=\n classes, y=test_labels)\n GraphHelper.saveClassifier(classifier,\n '{}_{}_{}_{}_{}.pkl'.format(args.ontology, args.\n type, args.classifier, task, args.merge))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef trainW2v(args):\n clazz = [['Accidents', 'Arts', 'Attacks', 'Economy', 'Miscellaneous',\n 'Politics', 'Science', 'Sports', 'undefined'], ['Accidents', 'Arts',\n 'Attacks', 'Economy', 'Miscellaneous', 'Politics', 'Science',\n 'Sports'], ['positive', 'negative']]\n models = ['rbf', 'poly']\n FileHelper.create('log')\n C = 0.5\n gamma = 0.5\n degree = 6\n types = ['generic', 'specific']\n if args.ontology == 'dbpedia':\n types.append('normal')\n for classes in clazz:\n task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes\n ) == 8 else 'task1'\n train_instances, train_labels, train_texts = Word2VecHelper.loadData(\n classes, args, 'train')\n test_instances, test_labels, test_texts = Word2VecHelper.loadData(\n classes, args, 'test')\n sys.stdout = open('log/{}_{}.txt'.format(args.ontology, task), 'w')\n for model in models:\n args.classifier = model\n for _type in types:\n args.type = _type\n for merge in range(2):\n args.merge = merge\n if args.force == 1 or not os.path.exists('{}_{}_{}.bin'\n .format(args.ontology, args.type, 'merged' if args.\n merge == 1 else 'simple')):\n files = ['./train/{}/{}/positive.txt'.format(args.\n ontology, args.type),\n './train/{}/{}/negative.txt'.format(args.\n ontology, args.type)]\n model = Word2VecHelper.createModel(files, name=\n '{}_{}'.format(args.ontology, args.type), merge\n =args.merge)\n else:\n model = Word2VecHelper.loadModel('{}_{}'.format(\n args.ontology, args.type), merge=args.merge)\n w2v = {w: vec for w, vec in zip(model.wv.index2word,\n model.wv.syn0)}\n print('========== Model', args.ontology, args.type,\n args.merge, task, args.classifier, '==========')\n if args.classifier == 'ben':\n classifier = Pipeline([('w2v vect',\n MeanEmbeddingVectorizer(w2v)), ('clf',\n BernoulliNB())])\n else:\n classifier = Pipeline([('w2v vect',\n MeanEmbeddingVectorizer(w2v)), ('clf', svm.SVC(\n kernel=args.classifier, degree=degree, C=C,\n gamma=gamma, probability=True))])\n y_score = classifier.fit(train_texts, train_labels\n ).predict_proba(test_texts)\n y_pred = classifier.predict(test_texts)\n print('========= Classification Report ==========')\n print(classification_report(test_labels, y_pred))\n print('========= Confusion Matrix ==========')\n print(confusion_matrix(test_labels, y_pred, labels=classes)\n )\n GraphHelper.savePrediction('{}_{}_{}_{}_{}'.format(args\n .ontology, args.type, args.classifier, task, args.\n merge), y_pred=y_pred, y_score=y_score, classes=\n classes, y=test_labels)\n GraphHelper.saveClassifier(classifier,\n '{}_{}_{}_{}_{}.pkl'.format(args.ontology, args.\n type, args.classifier, task, args.merge))\n\n\nif __name__ == '__main__':\n parser = OptionParser('%prog -o ontology -t type -f force ')\n parser.add_option('-o', '--ontology', dest='ontology', default='dbpedia')\n parser.add_option('-t', '--type', dest='type', default='generic')\n parser.add_option('-f', '--force', dest='force', default=0, type=int)\n parser.add_option('-c', '--classifier', dest='classifier', default='poly')\n parser.add_option('-j', '--job', dest='job', type=int, default=10)\n parser.add_option('-w', '--window', dest='window', type=int, default=2)\n parser.add_option('-s', '--size', dest='size', type=int, default=300)\n parser.add_option('-m', '--merge', dest='merge', type=int, default=0)\n parser.add_option('-e', '--experiment', dest='experiment', type=int,\n default=1)\n opts, args = parser.parse_args()\n trainW2v(opts)\n",
"step-4": "from sklearn.naive_bayes import *\nfrom sklearn import svm\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom optparse import OptionParser\nfrom helper import FileHelper, Word2VecHelper, GraphHelper\nimport helper\nfrom helper.VectorHelper import *\nimport os\nimport sys\n\n\ndef trainW2v(args):\n clazz = [['Accidents', 'Arts', 'Attacks', 'Economy', 'Miscellaneous',\n 'Politics', 'Science', 'Sports', 'undefined'], ['Accidents', 'Arts',\n 'Attacks', 'Economy', 'Miscellaneous', 'Politics', 'Science',\n 'Sports'], ['positive', 'negative']]\n models = ['rbf', 'poly']\n FileHelper.create('log')\n C = 0.5\n gamma = 0.5\n degree = 6\n types = ['generic', 'specific']\n if args.ontology == 'dbpedia':\n types.append('normal')\n for classes in clazz:\n task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes\n ) == 8 else 'task1'\n train_instances, train_labels, train_texts = Word2VecHelper.loadData(\n classes, args, 'train')\n test_instances, test_labels, test_texts = Word2VecHelper.loadData(\n classes, args, 'test')\n sys.stdout = open('log/{}_{}.txt'.format(args.ontology, task), 'w')\n for model in models:\n args.classifier = model\n for _type in types:\n args.type = _type\n for merge in range(2):\n args.merge = merge\n if args.force == 1 or not os.path.exists('{}_{}_{}.bin'\n .format(args.ontology, args.type, 'merged' if args.\n merge == 1 else 'simple')):\n files = ['./train/{}/{}/positive.txt'.format(args.\n ontology, args.type),\n './train/{}/{}/negative.txt'.format(args.\n ontology, args.type)]\n model = Word2VecHelper.createModel(files, name=\n '{}_{}'.format(args.ontology, args.type), merge\n =args.merge)\n else:\n model = Word2VecHelper.loadModel('{}_{}'.format(\n args.ontology, args.type), merge=args.merge)\n w2v = {w: vec for w, vec in zip(model.wv.index2word,\n model.wv.syn0)}\n print('========== Model', args.ontology, args.type,\n args.merge, task, args.classifier, '==========')\n if args.classifier == 'ben':\n classifier = Pipeline([('w2v vect',\n MeanEmbeddingVectorizer(w2v)), ('clf',\n BernoulliNB())])\n else:\n classifier = Pipeline([('w2v vect',\n MeanEmbeddingVectorizer(w2v)), ('clf', svm.SVC(\n kernel=args.classifier, degree=degree, C=C,\n gamma=gamma, probability=True))])\n y_score = classifier.fit(train_texts, train_labels\n ).predict_proba(test_texts)\n y_pred = classifier.predict(test_texts)\n print('========= Classification Report ==========')\n print(classification_report(test_labels, y_pred))\n print('========= Confusion Matrix ==========')\n print(confusion_matrix(test_labels, y_pred, labels=classes)\n )\n GraphHelper.savePrediction('{}_{}_{}_{}_{}'.format(args\n .ontology, args.type, args.classifier, task, args.\n merge), y_pred=y_pred, y_score=y_score, classes=\n classes, y=test_labels)\n GraphHelper.saveClassifier(classifier,\n '{}_{}_{}_{}_{}.pkl'.format(args.ontology, args.\n type, args.classifier, task, args.merge))\n\n\nif __name__ == '__main__':\n parser = OptionParser('%prog -o ontology -t type -f force ')\n parser.add_option('-o', '--ontology', dest='ontology', default='dbpedia')\n parser.add_option('-t', '--type', dest='type', default='generic')\n parser.add_option('-f', '--force', dest='force', default=0, type=int)\n parser.add_option('-c', '--classifier', dest='classifier', default='poly')\n parser.add_option('-j', '--job', dest='job', type=int, default=10)\n parser.add_option('-w', '--window', dest='window', type=int, default=2)\n parser.add_option('-s', '--size', dest='size', type=int, default=300)\n parser.add_option('-m', '--merge', dest='merge', type=int, default=0)\n parser.add_option('-e', '--experiment', dest='experiment', type=int,\n default=1)\n opts, args = parser.parse_args()\n trainW2v(opts)\n",
"step-5": "from sklearn.naive_bayes import *\nfrom sklearn import svm\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom optparse import OptionParser\nfrom helper import FileHelper, Word2VecHelper, GraphHelper\nimport helper\nfrom helper.VectorHelper import *\n\nimport os\nimport sys\n\n\n#log = helper.enableLog()\n\ndef trainW2v(args):\n clazz = [[\"Accidents\", \"Arts\", \"Attacks\", \"Economy\", \"Miscellaneous\", \"Politics\", \"Science\", \"Sports\",\"undefined\"], [\"Accidents\", \"Arts\", \"Attacks\", \"Economy\", \"Miscellaneous\", \"Politics\", \"Science\", \"Sports\"], ['positive', 'negative']]\n models = ['rbf', 'poly']\n FileHelper.create(\"log\")\n\n C = 0.5 # SVM regularization parameter\n gamma = 0.5\n degree = 6\n types = ['generic', 'specific']\n if args.ontology =='dbpedia':\n types.append('normal')\n\n for classes in clazz:\n task = 'pipeline2' if len(classes) == 9 else 'task2' if len(classes) == 8 else 'task1'\n train_instances, train_labels, train_texts = Word2VecHelper.loadData(classes, args, 'train')\n test_instances, test_labels, test_texts = Word2VecHelper.loadData(classes, args, 'test')\n\n sys.stdout = open(\n \"log/{}_{}.txt\".format(args.ontology, task), \"w\")\n\n for model in models:\n args.classifier = model\n\n for _type in types:\n args.type = _type\n for merge in range(2):\n args.merge = merge\n if args.force == 1 or not os.path.exists(\"{}_{}_{}.bin\".format(args.ontology, args.type, 'merged' if args.merge==1 else 'simple')):\n files = [\"./train/{}/{}/positive.txt\".format(args.ontology, args.type),\n \"./train/{}/{}/negative.txt\".format(args.ontology, args.type)]\n model = Word2VecHelper.createModel(files, name=\"{}_{}\".format(args.ontology, args.type),\n merge=args.merge)\n else:\n model = Word2VecHelper.loadModel(\"{}_{}\".format(args.ontology, args.type), merge=args.merge)\n\n w2v = {w: vec for w, vec in zip(model.wv.index2word, model.wv.syn0)}\n\n print(\"========== Model\", args.ontology, args.type, args.merge, task, args.classifier, \"==========\")\n if args.classifier == 'ben':\n classifier = Pipeline([(\"w2v vect\", MeanEmbeddingVectorizer(w2v)),\n (\"clf\", BernoulliNB())])\n else:\n classifier = Pipeline([(\"w2v vect\", MeanEmbeddingVectorizer(w2v)),\n (\"clf\", svm.SVC(kernel=args.classifier, degree=degree, C=C, gamma=gamma,\n probability=True))])\n\n y_score = classifier.fit(train_texts, train_labels).predict_proba(test_texts)\n y_pred = classifier.predict(test_texts)\n #f.write(\"========= Classification Report ==========\\n\")\n print(\"========= Classification Report ==========\")\n print(classification_report(test_labels, y_pred))\n #f.write(classification_report(test_labels, y_pred)+\"\\n\")\n\n print(\"========= Confusion Matrix ==========\")\n #f.write(\"========= Confusion Matrix ==========\\n\")\n print(confusion_matrix(test_labels,y_pred, labels=classes))\n #f.write(confusion_matrix(test_labels,y_pred, labels=classes)+\"\\n\")\n\n GraphHelper.savePrediction(\"{}_{}_{}_{}_{}\".format(args.ontology,args.type,args.classifier,task, args.merge), y_pred=y_pred,y_score=y_score,classes=classes,y=test_labels )\n GraphHelper.saveClassifier(classifier, \"{}_{}_{}_{}_{}.pkl\".format(args.ontology,args.type,args.classifier,task, args.merge))\n\n #f.close()\n\n#trainW2v()\n\nif __name__ == \"__main__\":\n parser = OptionParser('''%prog -o ontology -t type -f force ''')\n parser.add_option('-o', '--ontology', dest='ontology', default=\"dbpedia\")\n parser.add_option('-t', '--type', dest='type', default=\"generic\")\n parser.add_option('-f', '--force', dest='force', default=0, type=int)\n parser.add_option('-c', '--classifier', dest='classifier', default='poly')\n parser.add_option('-j', '--job', dest='job', type=int, default=10)\n parser.add_option('-w', '--window', dest='window', type=int, default=2)\n parser.add_option('-s', '--size', dest='size', type=int, default=300)\n parser.add_option('-m', '--merge', dest='merge', type=int, default=0)\n parser.add_option('-e', '--experiment', dest='experiment', type=int, default=1)\n opts, args = parser.parse_args()\n\n trainW2v(opts)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProjectrolesConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProjectrolesConfig(AppConfig):
name = 'projectroles'
<|reserved_special_token_1|>
from django.apps import AppConfig
class ProjectrolesConfig(AppConfig):
name = 'projectroles'
|
flexible
|
{
"blob_id": "6a4585e0e2f5ebbd0f9a7fa203f76bb88ff9c2a0",
"index": 2920,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProjectrolesConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ProjectrolesConfig(AppConfig):\n name = 'projectroles'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass ProjectrolesConfig(AppConfig):\n name = 'projectroles'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
import json
import pyttsx
engine = pyttsx.init()
engine.say('Hello from Eliq.')
engine.runAndWait()
power_value = 0
power_value_int = 0
prompt=0
Eliq_just_NOW ={}
accesstoken = "xxxxxxxxxxxxxxxxxxxxxx"
#Say warning for power use over this limmit in Watts
level_warning = 2000
Eliq_request_string = ('https://my.eliq.io/api/datanow?accesstoken={}&channelid=32217'.format(accesstoken))
response = requests.get (Eliq_request_string)
Eliq_just_NOW = (response.json())
power_value = Eliq_just_NOW['power']
power_value_int = int (float (power_value))
power_str = ('Power is {} Watts'.format(power_value_int))
print (power_str)
if power_value_int > level_warning:
engine.say(power_str)
engine.say('Warning.')
engine.runAndWait()
else:
engine.say(power_str)
engine.say('Good.')
engine.runAndWait()
|
normal
|
{
"blob_id": "72abba6fa40441ab172bccb9065aaa0af5fefd64",
"index": 7209,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nengine.say('Hello from Eliq.')\nengine.runAndWait()\n<mask token>\nprint(power_str)\nif power_value_int > level_warning:\n engine.say(power_str)\n engine.say('Warning.')\n engine.runAndWait()\nelse:\n engine.say(power_str)\n engine.say('Good.')\n engine.runAndWait()\n",
"step-3": "<mask token>\nengine = pyttsx.init()\nengine.say('Hello from Eliq.')\nengine.runAndWait()\npower_value = 0\npower_value_int = 0\nprompt = 0\nEliq_just_NOW = {}\naccesstoken = 'xxxxxxxxxxxxxxxxxxxxxx'\nlevel_warning = 2000\nEliq_request_string = (\n 'https://my.eliq.io/api/datanow?accesstoken={}&channelid=32217'.format(\n accesstoken))\nresponse = requests.get(Eliq_request_string)\nEliq_just_NOW = response.json()\npower_value = Eliq_just_NOW['power']\npower_value_int = int(float(power_value))\npower_str = 'Power is {} Watts'.format(power_value_int)\nprint(power_str)\nif power_value_int > level_warning:\n engine.say(power_str)\n engine.say('Warning.')\n engine.runAndWait()\nelse:\n engine.say(power_str)\n engine.say('Good.')\n engine.runAndWait()\n",
"step-4": "import requests\nimport json\nimport pyttsx\nengine = pyttsx.init()\nengine.say('Hello from Eliq.')\nengine.runAndWait()\npower_value = 0\npower_value_int = 0\nprompt = 0\nEliq_just_NOW = {}\naccesstoken = 'xxxxxxxxxxxxxxxxxxxxxx'\nlevel_warning = 2000\nEliq_request_string = (\n 'https://my.eliq.io/api/datanow?accesstoken={}&channelid=32217'.format(\n accesstoken))\nresponse = requests.get(Eliq_request_string)\nEliq_just_NOW = response.json()\npower_value = Eliq_just_NOW['power']\npower_value_int = int(float(power_value))\npower_str = 'Power is {} Watts'.format(power_value_int)\nprint(power_str)\nif power_value_int > level_warning:\n engine.say(power_str)\n engine.say('Warning.')\n engine.runAndWait()\nelse:\n engine.say(power_str)\n engine.say('Good.')\n engine.runAndWait()\n",
"step-5": "import requests\nimport json\n\n\nimport pyttsx\nengine = pyttsx.init()\nengine.say('Hello from Eliq.')\nengine.runAndWait()\n\npower_value = 0\npower_value_int = 0\nprompt=0\nEliq_just_NOW ={}\naccesstoken = \"xxxxxxxxxxxxxxxxxxxxxx\"\n\n#Say warning for power use over this limmit in Watts\nlevel_warning = 2000\n\nEliq_request_string = ('https://my.eliq.io/api/datanow?accesstoken={}&channelid=32217'.format(accesstoken))\n\nresponse = requests.get (Eliq_request_string)\n\nEliq_just_NOW = (response.json())\npower_value = Eliq_just_NOW['power']\npower_value_int = int (float (power_value))\npower_str = ('Power is {} Watts'.format(power_value_int)) \n\nprint (power_str)\n\nif power_value_int > level_warning:\n engine.say(power_str)\n engine.say('Warning.')\n engine.runAndWait() \nelse:\n engine.say(power_str)\n engine.say('Good.')\n engine.runAndWait() \n \n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Nest:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update_pos(self, new_position: Tuple[float, float]) ->None:
"""
If the new position's value is better than the old one, update the nests position and value.
Arguments:
new_position {Tuple[float, float]} -- The new position
"""
new_value = self.__function(new_position)
if new_value < self.__value:
self.__value = new_value
self.__position = new_position
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Nest:
def __init__(self, function, lower_boundary, upper_boundary):
self.__function = function
self.__lower_boundary = lower_boundary
self.__upper_boundary = upper_boundary
self.__position = np.random.uniform(self.__lower_boundary, self.
__upper_boundary, 2)
self.__value = self.__function(self.__position)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update_pos(self, new_position: Tuple[float, float]) ->None:
"""
If the new position's value is better than the old one, update the nests position and value.
Arguments:
new_position {Tuple[float, float]} -- The new position
"""
new_value = self.__function(new_position)
if new_value < self.__value:
self.__value = new_value
self.__position = new_position
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Nest:
def __init__(self, function, lower_boundary, upper_boundary):
self.__function = function
self.__lower_boundary = lower_boundary
self.__upper_boundary = upper_boundary
self.__position = np.random.uniform(self.__lower_boundary, self.
__upper_boundary, 2)
self.__value = self.__function(self.__position)
<|reserved_special_token_0|>
@property
def value(self) ->float:
return self.__value
def update_pos(self, new_position: Tuple[float, float]) ->None:
"""
If the new position's value is better than the old one, update the nests position and value.
Arguments:
new_position {Tuple[float, float]} -- The new position
"""
new_value = self.__function(new_position)
if new_value < self.__value:
self.__value = new_value
self.__position = new_position
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Nest:
def __init__(self, function, lower_boundary, upper_boundary):
self.__function = function
self.__lower_boundary = lower_boundary
self.__upper_boundary = upper_boundary
self.__position = np.random.uniform(self.__lower_boundary, self.
__upper_boundary, 2)
self.__value = self.__function(self.__position)
@property
def position(self) ->Tuple[float, float]:
return self.__position
@property
def value(self) ->float:
return self.__value
def update_pos(self, new_position: Tuple[float, float]) ->None:
"""
If the new position's value is better than the old one, update the nests position and value.
Arguments:
new_position {Tuple[float, float]} -- The new position
"""
new_value = self.__function(new_position)
if new_value < self.__value:
self.__value = new_value
self.__position = new_position
<|reserved_special_token_1|>
# ------------------------------------------------------------------------------------------------------
# Copyright (c) Leo Hanisch. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
from typing import Tuple
import numpy as np
class Nest:
def __init__(self, function, lower_boundary, upper_boundary):
self.__function = function
self.__lower_boundary = lower_boundary
self.__upper_boundary = upper_boundary
# Randomly create a new nest position
self.__position = np.random.uniform(self.__lower_boundary, self.__upper_boundary, 2)
self.__value = self.__function(self.__position)
@property
def position(self) -> Tuple[float, float]:
return self.__position
@property
def value(self) -> float:
return self.__value
def update_pos(self, new_position: Tuple[float, float]) -> None:
"""
If the new position's value is better than the old one, update the nests position and value.
Arguments:
new_position {Tuple[float, float]} -- The new position
"""
new_value = self.__function(new_position)
if new_value < self.__value:
self.__value = new_value
self.__position = new_position
|
flexible
|
{
"blob_id": "917a291c7b62dee392d7411c3e039949d74d7af8",
"index": 1375,
"step-1": "<mask token>\n\n\nclass Nest:\n <mask token>\n <mask token>\n <mask token>\n\n def update_pos(self, new_position: Tuple[float, float]) ->None:\n \"\"\"\n If the new position's value is better than the old one, update the nests position and value.\n\n Arguments:\n new_position {Tuple[float, float]} -- The new position\n \"\"\"\n new_value = self.__function(new_position)\n if new_value < self.__value:\n self.__value = new_value\n self.__position = new_position\n",
"step-2": "<mask token>\n\n\nclass Nest:\n\n def __init__(self, function, lower_boundary, upper_boundary):\n self.__function = function\n self.__lower_boundary = lower_boundary\n self.__upper_boundary = upper_boundary\n self.__position = np.random.uniform(self.__lower_boundary, self.\n __upper_boundary, 2)\n self.__value = self.__function(self.__position)\n <mask token>\n <mask token>\n\n def update_pos(self, new_position: Tuple[float, float]) ->None:\n \"\"\"\n If the new position's value is better than the old one, update the nests position and value.\n\n Arguments:\n new_position {Tuple[float, float]} -- The new position\n \"\"\"\n new_value = self.__function(new_position)\n if new_value < self.__value:\n self.__value = new_value\n self.__position = new_position\n",
"step-3": "<mask token>\n\n\nclass Nest:\n\n def __init__(self, function, lower_boundary, upper_boundary):\n self.__function = function\n self.__lower_boundary = lower_boundary\n self.__upper_boundary = upper_boundary\n self.__position = np.random.uniform(self.__lower_boundary, self.\n __upper_boundary, 2)\n self.__value = self.__function(self.__position)\n <mask token>\n\n @property\n def value(self) ->float:\n return self.__value\n\n def update_pos(self, new_position: Tuple[float, float]) ->None:\n \"\"\"\n If the new position's value is better than the old one, update the nests position and value.\n\n Arguments:\n new_position {Tuple[float, float]} -- The new position\n \"\"\"\n new_value = self.__function(new_position)\n if new_value < self.__value:\n self.__value = new_value\n self.__position = new_position\n",
"step-4": "<mask token>\n\n\nclass Nest:\n\n def __init__(self, function, lower_boundary, upper_boundary):\n self.__function = function\n self.__lower_boundary = lower_boundary\n self.__upper_boundary = upper_boundary\n self.__position = np.random.uniform(self.__lower_boundary, self.\n __upper_boundary, 2)\n self.__value = self.__function(self.__position)\n\n @property\n def position(self) ->Tuple[float, float]:\n return self.__position\n\n @property\n def value(self) ->float:\n return self.__value\n\n def update_pos(self, new_position: Tuple[float, float]) ->None:\n \"\"\"\n If the new position's value is better than the old one, update the nests position and value.\n\n Arguments:\n new_position {Tuple[float, float]} -- The new position\n \"\"\"\n new_value = self.__function(new_position)\n if new_value < self.__value:\n self.__value = new_value\n self.__position = new_position\n",
"step-5": "# ------------------------------------------------------------------------------------------------------\n# Copyright (c) Leo Hanisch. All rights reserved.\n# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.\n# ------------------------------------------------------------------------------------------------------\n\nfrom typing import Tuple\nimport numpy as np\n\n\nclass Nest:\n def __init__(self, function, lower_boundary, upper_boundary):\n self.__function = function\n self.__lower_boundary = lower_boundary\n self.__upper_boundary = upper_boundary\n\n # Randomly create a new nest position\n self.__position = np.random.uniform(self.__lower_boundary, self.__upper_boundary, 2)\n self.__value = self.__function(self.__position)\n\n @property\n def position(self) -> Tuple[float, float]:\n return self.__position\n\n @property\n def value(self) -> float:\n return self.__value\n\n def update_pos(self, new_position: Tuple[float, float]) -> None:\n \"\"\"\n If the new position's value is better than the old one, update the nests position and value.\n\n Arguments:\n new_position {Tuple[float, float]} -- The new position\n \"\"\"\n\n new_value = self.__function(new_position)\n if new_value < self.__value:\n self.__value = new_value\n self.__position = new_position\n",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
from mathgraph3D.core.plot import *
from mathgraph3D.core.functions import *
|
normal
|
{
"blob_id": "b58cc08f8f10220373fa78f5d7249bc883b447bf",
"index": 6991,
"step-1": "<mask token>\n",
"step-2": "from mathgraph3D.core.plot import *\nfrom mathgraph3D.core.functions import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from HurdleRace import hurdleRace
from ddt import ddt, data, unpack
import unittest
class test_AppendAndDelete3(unittest.TestCase):
def test_hurdleRace(self):
height = [1, 6, 3, 5, 2]
k = 4
sum_too_high = hurdleRace(k, height)
self.assertEqual(2, sum_too_high)
|
normal
|
{
"blob_id": "ea86a2a9068c316d3efcbcb165a8ef3d3516ba1b",
"index": 4763,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass test_AppendAndDelete3(unittest.TestCase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass test_AppendAndDelete3(unittest.TestCase):\n\n def test_hurdleRace(self):\n height = [1, 6, 3, 5, 2]\n k = 4\n sum_too_high = hurdleRace(k, height)\n self.assertEqual(2, sum_too_high)\n",
"step-4": "from HurdleRace import hurdleRace\nfrom ddt import ddt, data, unpack\nimport unittest\n\n\nclass test_AppendAndDelete3(unittest.TestCase):\n\n def test_hurdleRace(self):\n height = [1, 6, 3, 5, 2]\n k = 4\n sum_too_high = hurdleRace(k, height)\n self.assertEqual(2, sum_too_high)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import cv2 as cv
from threading import Thread
class Reader(Thread):
def __init__(self, width, height, device=0):
super().__init__(daemon=True)
self._stream = cv.VideoCapture(device)
self._stream.set(cv.CAP_PROP_FRAME_WIDTH, width)
self._stream.set(cv.CAP_PROP_FRAME_HEIGHT, height)
self._frame = None
self.start()
def __del__(self):
self._frame = None
self._stream.release()
def run(self):
while True:
ret, frame = self._stream.read()
if not ret:
self._frame = None
break
self._frame = frame
def read(self):
return self._frame
|
normal
|
{
"blob_id": "73bf31e43394c3f922b00b2cfcd5d88cc0e01094",
"index": 2339,
"step-1": "<mask token>\n\n\nclass Reader(Thread):\n <mask token>\n\n def __del__(self):\n self._frame = None\n self._stream.release()\n <mask token>\n\n def read(self):\n return self._frame\n",
"step-2": "<mask token>\n\n\nclass Reader(Thread):\n <mask token>\n\n def __del__(self):\n self._frame = None\n self._stream.release()\n\n def run(self):\n while True:\n ret, frame = self._stream.read()\n if not ret:\n self._frame = None\n break\n self._frame = frame\n\n def read(self):\n return self._frame\n",
"step-3": "<mask token>\n\n\nclass Reader(Thread):\n\n def __init__(self, width, height, device=0):\n super().__init__(daemon=True)\n self._stream = cv.VideoCapture(device)\n self._stream.set(cv.CAP_PROP_FRAME_WIDTH, width)\n self._stream.set(cv.CAP_PROP_FRAME_HEIGHT, height)\n self._frame = None\n self.start()\n\n def __del__(self):\n self._frame = None\n self._stream.release()\n\n def run(self):\n while True:\n ret, frame = self._stream.read()\n if not ret:\n self._frame = None\n break\n self._frame = frame\n\n def read(self):\n return self._frame\n",
"step-4": "import cv2 as cv\nfrom threading import Thread\n\n\nclass Reader(Thread):\n\n def __init__(self, width, height, device=0):\n super().__init__(daemon=True)\n self._stream = cv.VideoCapture(device)\n self._stream.set(cv.CAP_PROP_FRAME_WIDTH, width)\n self._stream.set(cv.CAP_PROP_FRAME_HEIGHT, height)\n self._frame = None\n self.start()\n\n def __del__(self):\n self._frame = None\n self._stream.release()\n\n def run(self):\n while True:\n ret, frame = self._stream.read()\n if not ret:\n self._frame = None\n break\n self._frame = frame\n\n def read(self):\n return self._frame\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
import json
import os
import ipdb
from tqdm import tqdm
import argparse
from os import listdir
from os.path import isfile, join
import pickle
import joblib
from collections import Counter
from shutil import copyfile
import networkx as nx
import spacy
import nltk
import numpy as np
nltk.download('stopwords')
nltk_stopwords = nltk.corpus.stopwords.words('english')
data_path = '/home/joey.bose/dblp_papers_v11.txt'
save_path_base = '/home/joey.bose/aminer_data/'
load_path_rank_base = '/home/joey.bose/aminer_data_ranked/fos/'
save_path_graph_base = '/home/joey.bose/aminer_data_ranked/graphs/'
raw_save_path = '/home/joey.bose/aminer_data_ranked/aminer_raw.txt'
spacy_nlp = spacy.load('en_core_web_sm')
glove_path = '/home/joey.bose/docker_temp/meta-graph/meta-graph/glove.840B.300d.txt'
class Lang:
def __init__(self):
super(Lang, self).__init__()
self.word2index = {}
self.word2count = {}
self.index2word = {}
self.n_words = 0 # Count default tokens
def index_words(self, sentence):
for word in sentence:
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def gen_embeddings(vocab, file, emb_size, emb_dim):
"""
Generate an initial embedding matrix for word_dict.
If an embedding file is not given or a word is not in the embedding file,
a randomly initialized vector will be used.
"""
# embeddings = np.random.randn(vocab.n_words, emb_size) * 0.01
embeddings = np.zeros((vocab.n_words, emb_size))
print('Embeddings: %d x %d' % (vocab.n_words, emb_size))
if file is not None:
print('Loading embedding file: %s' % file)
pre_trained = 0
for line in open(file).readlines():
sp = line.split()
if(len(sp) == emb_dim + 1):
if sp[0] in vocab.word2index:
pre_trained += 1
embeddings[vocab.word2index[sp[0]]] = [float(x) for x in sp[1:]]
else:
print(sp[0])
print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * 100.0 / vocab.n_words))
return embeddings
def process_raw_abstracts(vocab):
with open(raw_save_path,"r",encoding="utf8") as f:
for line in tqdm(f,total=13304586):
tokens = nltk.tokenize.word_tokenize(line)
tokens = [token for token in tokens if not token in nltk_stopwords]
vocab.index_words(tokens)
def get_node_embed(text,vocab):
sum_embed = 0
for word in text:
embed = embeddings[vocab.word2index[word]]
sum_embed += embed
return sum_embed
def check_graph(G):
total_nodes = len(G.nodes)
no_emb_nodes = 0
nodes_to_delete = []
for node_str in G.nodes:
try:
emb = G.node[node_str]['emb']
except:
no_emb_nodes += 1
nodes_to_delete.append(node_str)
print("%d Nodes and %d missing nodes in G " %(total_nodes, no_emb_nodes))
G.remove_nodes_from(nodes_to_delete)
return G
def process_line(G, line, vocab=None):
try:
fos = data['fos']
abstract = data['indexed_abstract']
paper_id = data['id']
references_id = data['references']
text = list(abstract['InvertedIndex'].keys())
text =" ".join(text)
if args.process_raw:
with open(raw_save_path,"a+") as f:
f.write(text)
f.write('\n')
'''Create Node Embedding if Node doesn't exist '''
if vocab is not None:
tokens = nltk.tokenize.word_tokenize(text)
tokens = [token for token in tokens if not token in nltk_stopwords]
node_emb = get_node_embed(tokens,vocab)
for field in fos:
name = field['name']
for ref in references_id:
G.add_edge(paper_id, ref)
G.node[paper_id]['emb'] = node_emb
except:
return G
return G
if __name__ == '__main__':
"""
Create Aminer-Citation v-11 Graphs
"""
parser = argparse.ArgumentParser()
parser.add_argument('--topk', type=int, default='100')
parser.add_argument("--process_raw", action="store_true", default=False,
help='Process Raw Data')
parser.add_argument("--make_vocab", action="store_true", default=False,
help='Create Vocab from the raw abstract data')
args = parser.parse_args()
onlyfiles = [f for f in listdir(load_path_rank_base) if isfile(join(load_path_rank_base, f))]
vocab = Lang()
if args.make_vocab:
process_raw_abstracts(vocab)
joblib.dump(vocab, "aminer_100_vocab.pkl")
print("Done generating vocab")
embeddings = gen_embeddings(vocab,file=glove_path,emb_size=300,emb_dim=300)
joblib.dump(embeddings, "aminer_100_embed.pkl")
print("Done")
exit()
else:
vocab = joblib.load("aminer_100_vocab.pkl")
embeddings = joblib.load("aminer_100_embed.pkl")
for i, file_ in tqdm(enumerate(onlyfiles),total=len(onlyfiles)):
file_path = load_path_rank_base + file_
G = nx.Graph()
with open(file_path,'r', encoding="utf8") as f:
for line in f:
data = json.loads(line)
G = process_line(G,data,vocab)
G = check_graph(G)
print("%s has %d Nodes and %d edges" %(file_,len(G),len(G.edges)))
if not os.path.exists(save_path_graph_base):
os.mkdir(save_path_graph_base)
save_path_graph = save_path_graph_base + file_.split('.')[0] + '_graph.pkl'
nx.write_gpickle(G,save_path_graph)
|
normal
|
{
"blob_id": "2da7892722afde5a6f87e3bd6d5763c895ac96c9",
"index": 284,
"step-1": "<mask token>\n\n\nclass Lang:\n\n def __init__(self):\n super(Lang, self).__init__()\n self.word2index = {}\n self.word2count = {}\n self.index2word = {}\n self.n_words = 0\n\n def index_words(self, sentence):\n for word in sentence:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\n<mask token>\n\n\ndef check_graph(G):\n total_nodes = len(G.nodes)\n no_emb_nodes = 0\n nodes_to_delete = []\n for node_str in G.nodes:\n try:\n emb = G.node[node_str]['emb']\n except:\n no_emb_nodes += 1\n nodes_to_delete.append(node_str)\n print('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))\n G.remove_nodes_from(nodes_to_delete)\n return G\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Lang:\n\n def __init__(self):\n super(Lang, self).__init__()\n self.word2index = {}\n self.word2count = {}\n self.index2word = {}\n self.n_words = 0\n\n def index_words(self, sentence):\n for word in sentence:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\ndef gen_embeddings(vocab, file, emb_size, emb_dim):\n \"\"\"\n Generate an initial embedding matrix for word_dict.\n If an embedding file is not given or a word is not in the embedding file,\n a randomly initialized vector will be used.\n \"\"\"\n embeddings = np.zeros((vocab.n_words, emb_size))\n print('Embeddings: %d x %d' % (vocab.n_words, emb_size))\n if file is not None:\n print('Loading embedding file: %s' % file)\n pre_trained = 0\n for line in open(file).readlines():\n sp = line.split()\n if len(sp) == emb_dim + 1:\n if sp[0] in vocab.word2index:\n pre_trained += 1\n embeddings[vocab.word2index[sp[0]]] = [float(x) for x in\n sp[1:]]\n else:\n print(sp[0])\n print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * \n 100.0 / vocab.n_words))\n return embeddings\n\n\ndef process_raw_abstracts(vocab):\n with open(raw_save_path, 'r', encoding='utf8') as f:\n for line in tqdm(f, total=13304586):\n tokens = nltk.tokenize.word_tokenize(line)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n vocab.index_words(tokens)\n\n\n<mask token>\n\n\ndef check_graph(G):\n total_nodes = len(G.nodes)\n no_emb_nodes = 0\n nodes_to_delete = []\n for node_str in G.nodes:\n try:\n emb = G.node[node_str]['emb']\n except:\n no_emb_nodes += 1\n nodes_to_delete.append(node_str)\n print('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))\n G.remove_nodes_from(nodes_to_delete)\n return G\n\n\ndef process_line(G, line, vocab=None):\n try:\n fos = data['fos']\n abstract = data['indexed_abstract']\n paper_id = data['id']\n references_id = data['references']\n text = list(abstract['InvertedIndex'].keys())\n text = ' '.join(text)\n if args.process_raw:\n with open(raw_save_path, 'a+') as f:\n f.write(text)\n f.write('\\n')\n \"\"\"Create Node Embedding if Node doesn't exist \"\"\"\n if vocab is not None:\n tokens = nltk.tokenize.word_tokenize(text)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n node_emb = get_node_embed(tokens, vocab)\n for field in fos:\n name = field['name']\n for ref in references_id:\n G.add_edge(paper_id, ref)\n G.node[paper_id]['emb'] = node_emb\n except:\n return G\n return G\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Lang:\n\n def __init__(self):\n super(Lang, self).__init__()\n self.word2index = {}\n self.word2count = {}\n self.index2word = {}\n self.n_words = 0\n\n def index_words(self, sentence):\n for word in sentence:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\ndef gen_embeddings(vocab, file, emb_size, emb_dim):\n \"\"\"\n Generate an initial embedding matrix for word_dict.\n If an embedding file is not given or a word is not in the embedding file,\n a randomly initialized vector will be used.\n \"\"\"\n embeddings = np.zeros((vocab.n_words, emb_size))\n print('Embeddings: %d x %d' % (vocab.n_words, emb_size))\n if file is not None:\n print('Loading embedding file: %s' % file)\n pre_trained = 0\n for line in open(file).readlines():\n sp = line.split()\n if len(sp) == emb_dim + 1:\n if sp[0] in vocab.word2index:\n pre_trained += 1\n embeddings[vocab.word2index[sp[0]]] = [float(x) for x in\n sp[1:]]\n else:\n print(sp[0])\n print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * \n 100.0 / vocab.n_words))\n return embeddings\n\n\ndef process_raw_abstracts(vocab):\n with open(raw_save_path, 'r', encoding='utf8') as f:\n for line in tqdm(f, total=13304586):\n tokens = nltk.tokenize.word_tokenize(line)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n vocab.index_words(tokens)\n\n\ndef get_node_embed(text, vocab):\n sum_embed = 0\n for word in text:\n embed = embeddings[vocab.word2index[word]]\n sum_embed += embed\n return sum_embed\n\n\ndef check_graph(G):\n total_nodes = len(G.nodes)\n no_emb_nodes = 0\n nodes_to_delete = []\n for node_str in G.nodes:\n try:\n emb = G.node[node_str]['emb']\n except:\n no_emb_nodes += 1\n nodes_to_delete.append(node_str)\n print('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))\n G.remove_nodes_from(nodes_to_delete)\n return G\n\n\ndef process_line(G, line, vocab=None):\n try:\n fos = data['fos']\n abstract = data['indexed_abstract']\n paper_id = data['id']\n references_id = data['references']\n text = list(abstract['InvertedIndex'].keys())\n text = ' '.join(text)\n if args.process_raw:\n with open(raw_save_path, 'a+') as f:\n f.write(text)\n f.write('\\n')\n \"\"\"Create Node Embedding if Node doesn't exist \"\"\"\n if vocab is not None:\n tokens = nltk.tokenize.word_tokenize(text)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n node_emb = get_node_embed(tokens, vocab)\n for field in fos:\n name = field['name']\n for ref in references_id:\n G.add_edge(paper_id, ref)\n G.node[paper_id]['emb'] = node_emb\n except:\n return G\n return G\n\n\n<mask token>\n",
"step-4": "<mask token>\nnltk.download('stopwords')\nnltk_stopwords = nltk.corpus.stopwords.words('english')\ndata_path = '/home/joey.bose/dblp_papers_v11.txt'\nsave_path_base = '/home/joey.bose/aminer_data/'\nload_path_rank_base = '/home/joey.bose/aminer_data_ranked/fos/'\nsave_path_graph_base = '/home/joey.bose/aminer_data_ranked/graphs/'\nraw_save_path = '/home/joey.bose/aminer_data_ranked/aminer_raw.txt'\nspacy_nlp = spacy.load('en_core_web_sm')\nglove_path = (\n '/home/joey.bose/docker_temp/meta-graph/meta-graph/glove.840B.300d.txt')\n\n\nclass Lang:\n\n def __init__(self):\n super(Lang, self).__init__()\n self.word2index = {}\n self.word2count = {}\n self.index2word = {}\n self.n_words = 0\n\n def index_words(self, sentence):\n for word in sentence:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\ndef gen_embeddings(vocab, file, emb_size, emb_dim):\n \"\"\"\n Generate an initial embedding matrix for word_dict.\n If an embedding file is not given or a word is not in the embedding file,\n a randomly initialized vector will be used.\n \"\"\"\n embeddings = np.zeros((vocab.n_words, emb_size))\n print('Embeddings: %d x %d' % (vocab.n_words, emb_size))\n if file is not None:\n print('Loading embedding file: %s' % file)\n pre_trained = 0\n for line in open(file).readlines():\n sp = line.split()\n if len(sp) == emb_dim + 1:\n if sp[0] in vocab.word2index:\n pre_trained += 1\n embeddings[vocab.word2index[sp[0]]] = [float(x) for x in\n sp[1:]]\n else:\n print(sp[0])\n print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * \n 100.0 / vocab.n_words))\n return embeddings\n\n\ndef process_raw_abstracts(vocab):\n with open(raw_save_path, 'r', encoding='utf8') as f:\n for line in tqdm(f, total=13304586):\n tokens = nltk.tokenize.word_tokenize(line)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n vocab.index_words(tokens)\n\n\ndef get_node_embed(text, vocab):\n sum_embed = 0\n for word in text:\n embed = embeddings[vocab.word2index[word]]\n sum_embed += embed\n return sum_embed\n\n\ndef check_graph(G):\n total_nodes = len(G.nodes)\n no_emb_nodes = 0\n nodes_to_delete = []\n for node_str in G.nodes:\n try:\n emb = G.node[node_str]['emb']\n except:\n no_emb_nodes += 1\n nodes_to_delete.append(node_str)\n print('%d Nodes and %d missing nodes in G ' % (total_nodes, no_emb_nodes))\n G.remove_nodes_from(nodes_to_delete)\n return G\n\n\ndef process_line(G, line, vocab=None):\n try:\n fos = data['fos']\n abstract = data['indexed_abstract']\n paper_id = data['id']\n references_id = data['references']\n text = list(abstract['InvertedIndex'].keys())\n text = ' '.join(text)\n if args.process_raw:\n with open(raw_save_path, 'a+') as f:\n f.write(text)\n f.write('\\n')\n \"\"\"Create Node Embedding if Node doesn't exist \"\"\"\n if vocab is not None:\n tokens = nltk.tokenize.word_tokenize(text)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n node_emb = get_node_embed(tokens, vocab)\n for field in fos:\n name = field['name']\n for ref in references_id:\n G.add_edge(paper_id, ref)\n G.node[paper_id]['emb'] = node_emb\n except:\n return G\n return G\n\n\nif __name__ == '__main__':\n \"\"\"\n Create Aminer-Citation v-11 Graphs\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--topk', type=int, default='100')\n parser.add_argument('--process_raw', action='store_true', default=False,\n help='Process Raw Data')\n parser.add_argument('--make_vocab', action='store_true', default=False,\n help='Create Vocab from the raw abstract data')\n args = parser.parse_args()\n onlyfiles = [f for f in listdir(load_path_rank_base) if isfile(join(\n load_path_rank_base, f))]\n vocab = Lang()\n if args.make_vocab:\n process_raw_abstracts(vocab)\n joblib.dump(vocab, 'aminer_100_vocab.pkl')\n print('Done generating vocab')\n embeddings = gen_embeddings(vocab, file=glove_path, emb_size=300,\n emb_dim=300)\n joblib.dump(embeddings, 'aminer_100_embed.pkl')\n print('Done')\n exit()\n else:\n vocab = joblib.load('aminer_100_vocab.pkl')\n embeddings = joblib.load('aminer_100_embed.pkl')\n for i, file_ in tqdm(enumerate(onlyfiles), total=len(onlyfiles)):\n file_path = load_path_rank_base + file_\n G = nx.Graph()\n with open(file_path, 'r', encoding='utf8') as f:\n for line in f:\n data = json.loads(line)\n G = process_line(G, data, vocab)\n G = check_graph(G)\n print('%s has %d Nodes and %d edges' % (file_, len(G), len(G.edges)))\n if not os.path.exists(save_path_graph_base):\n os.mkdir(save_path_graph_base)\n save_path_graph = save_path_graph_base + file_.split('.')[0\n ] + '_graph.pkl'\n nx.write_gpickle(G, save_path_graph)\n",
"step-5": "import json\nimport os\nimport ipdb\nfrom tqdm import tqdm\nimport argparse\nfrom os import listdir\nfrom os.path import isfile, join\nimport pickle\nimport joblib\nfrom collections import Counter\nfrom shutil import copyfile\nimport networkx as nx\nimport spacy\nimport nltk\nimport numpy as np\n\nnltk.download('stopwords')\nnltk_stopwords = nltk.corpus.stopwords.words('english')\ndata_path = '/home/joey.bose/dblp_papers_v11.txt'\nsave_path_base = '/home/joey.bose/aminer_data/'\nload_path_rank_base = '/home/joey.bose/aminer_data_ranked/fos/'\nsave_path_graph_base = '/home/joey.bose/aminer_data_ranked/graphs/'\nraw_save_path = '/home/joey.bose/aminer_data_ranked/aminer_raw.txt'\nspacy_nlp = spacy.load('en_core_web_sm')\nglove_path = '/home/joey.bose/docker_temp/meta-graph/meta-graph/glove.840B.300d.txt'\n\nclass Lang:\n def __init__(self):\n super(Lang, self).__init__()\n self.word2index = {}\n self.word2count = {}\n self.index2word = {}\n self.n_words = 0 # Count default tokens\n\n def index_words(self, sentence):\n for word in sentence:\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\ndef gen_embeddings(vocab, file, emb_size, emb_dim):\n \"\"\"\n Generate an initial embedding matrix for word_dict.\n If an embedding file is not given or a word is not in the embedding file,\n a randomly initialized vector will be used.\n \"\"\"\n # embeddings = np.random.randn(vocab.n_words, emb_size) * 0.01\n embeddings = np.zeros((vocab.n_words, emb_size))\n print('Embeddings: %d x %d' % (vocab.n_words, emb_size))\n if file is not None:\n print('Loading embedding file: %s' % file)\n pre_trained = 0\n for line in open(file).readlines():\n sp = line.split()\n if(len(sp) == emb_dim + 1):\n if sp[0] in vocab.word2index:\n pre_trained += 1\n embeddings[vocab.word2index[sp[0]]] = [float(x) for x in sp[1:]]\n else:\n print(sp[0])\n print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * 100.0 / vocab.n_words))\n return embeddings\n\ndef process_raw_abstracts(vocab):\n with open(raw_save_path,\"r\",encoding=\"utf8\") as f:\n for line in tqdm(f,total=13304586):\n tokens = nltk.tokenize.word_tokenize(line)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n vocab.index_words(tokens)\n\ndef get_node_embed(text,vocab):\n sum_embed = 0\n for word in text:\n embed = embeddings[vocab.word2index[word]]\n sum_embed += embed\n return sum_embed\ndef check_graph(G):\n total_nodes = len(G.nodes)\n no_emb_nodes = 0\n nodes_to_delete = []\n for node_str in G.nodes:\n try:\n emb = G.node[node_str]['emb']\n except:\n no_emb_nodes += 1\n nodes_to_delete.append(node_str)\n print(\"%d Nodes and %d missing nodes in G \" %(total_nodes, no_emb_nodes))\n G.remove_nodes_from(nodes_to_delete)\n return G\n\ndef process_line(G, line, vocab=None):\n try:\n fos = data['fos']\n abstract = data['indexed_abstract']\n paper_id = data['id']\n references_id = data['references']\n text = list(abstract['InvertedIndex'].keys())\n text =\" \".join(text)\n if args.process_raw:\n with open(raw_save_path,\"a+\") as f:\n f.write(text)\n f.write('\\n')\n\n '''Create Node Embedding if Node doesn't exist '''\n if vocab is not None:\n tokens = nltk.tokenize.word_tokenize(text)\n tokens = [token for token in tokens if not token in nltk_stopwords]\n node_emb = get_node_embed(tokens,vocab)\n\n for field in fos:\n name = field['name']\n for ref in references_id:\n G.add_edge(paper_id, ref)\n G.node[paper_id]['emb'] = node_emb\n except:\n return G\n\n return G\n\nif __name__ == '__main__':\n \"\"\"\n Create Aminer-Citation v-11 Graphs\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--topk', type=int, default='100')\n parser.add_argument(\"--process_raw\", action=\"store_true\", default=False,\n\t\thelp='Process Raw Data')\n parser.add_argument(\"--make_vocab\", action=\"store_true\", default=False,\n\t\thelp='Create Vocab from the raw abstract data')\n args = parser.parse_args()\n onlyfiles = [f for f in listdir(load_path_rank_base) if isfile(join(load_path_rank_base, f))]\n vocab = Lang()\n if args.make_vocab:\n process_raw_abstracts(vocab)\n joblib.dump(vocab, \"aminer_100_vocab.pkl\")\n print(\"Done generating vocab\")\n embeddings = gen_embeddings(vocab,file=glove_path,emb_size=300,emb_dim=300)\n joblib.dump(embeddings, \"aminer_100_embed.pkl\")\n print(\"Done\")\n exit()\n else:\n vocab = joblib.load(\"aminer_100_vocab.pkl\")\n embeddings = joblib.load(\"aminer_100_embed.pkl\")\n\n for i, file_ in tqdm(enumerate(onlyfiles),total=len(onlyfiles)):\n file_path = load_path_rank_base + file_\n G = nx.Graph()\n with open(file_path,'r', encoding=\"utf8\") as f:\n for line in f:\n data = json.loads(line)\n G = process_line(G,data,vocab)\n G = check_graph(G)\n print(\"%s has %d Nodes and %d edges\" %(file_,len(G),len(G.edges)))\n if not os.path.exists(save_path_graph_base):\n os.mkdir(save_path_graph_base)\n save_path_graph = save_path_graph_base + file_.split('.')[0] + '_graph.pkl'\n nx.write_gpickle(G,save_path_graph)\n\n",
"step-ids": [
5,
8,
9,
11,
13
]
}
|
[
5,
8,
9,
11,
13
] |
o = input()
v = []
s = 0
for i in range(12):
col = []
for j in range(12):
col.append(float(input()))
v.append(col)
a = 1
for i in range(1, 12):
for j in range(a):
s += v[i][j]
a+=1
if o == 'S':
print("%.1f"%s)
if o == 'M':
print("%.1f"%(s/66))
|
normal
|
{
"blob_id": "0df20722fba6223c9d4fc9f72bfb399b479db6ac",
"index": 7917,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(12):\n col = []\n for j in range(12):\n col.append(float(input()))\n v.append(col)\n<mask token>\nfor i in range(1, 12):\n for j in range(a):\n s += v[i][j]\n a += 1\nif o == 'S':\n print('%.1f' % s)\nif o == 'M':\n print('%.1f' % (s / 66))\n",
"step-3": "o = input()\nv = []\ns = 0\nfor i in range(12):\n col = []\n for j in range(12):\n col.append(float(input()))\n v.append(col)\na = 1\nfor i in range(1, 12):\n for j in range(a):\n s += v[i][j]\n a += 1\nif o == 'S':\n print('%.1f' % s)\nif o == 'M':\n print('%.1f' % (s / 66))\n",
"step-4": "o = input()\nv = []\ns = 0\nfor i in range(12):\n col = []\n for j in range(12):\n col.append(float(input()))\n v.append(col)\na = 1\nfor i in range(1, 12):\n for j in range(a):\n s += v[i][j]\n a+=1\nif o == 'S':\n print(\"%.1f\"%s)\nif o == 'M':\n print(\"%.1f\"%(s/66))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Idea:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def cmd(self):
return 'intellij-idea-ultimate-edition %s' % self.folder
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Idea:
def __init__(self, folder):
self.folder = folder
<|reserved_special_token_0|>
def cmd(self):
return 'intellij-idea-ultimate-edition %s' % self.folder
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Idea:
def __init__(self, folder):
self.folder = folder
def name(self):
return 'jetbrains-idea'
def cmd(self):
return 'intellij-idea-ultimate-edition %s' % self.folder
<|reserved_special_token_1|>
import os
class Idea:
def __init__(self, folder):
self.folder = folder
def name(self):
return 'jetbrains-idea'
def cmd(self):
return 'intellij-idea-ultimate-edition %s' % self.folder
<|reserved_special_token_1|>
import os
class Idea:
def __init__(self, folder):
self.folder = folder
def name(self):
return "jetbrains-idea"
def cmd(self):
return "intellij-idea-ultimate-edition %s" % self.folder
|
flexible
|
{
"blob_id": "90fc6e37e3988a2014c66913db61749509db2d53",
"index": 1036,
"step-1": "<mask token>\n\n\nclass Idea:\n <mask token>\n <mask token>\n\n def cmd(self):\n return 'intellij-idea-ultimate-edition %s' % self.folder\n",
"step-2": "<mask token>\n\n\nclass Idea:\n\n def __init__(self, folder):\n self.folder = folder\n <mask token>\n\n def cmd(self):\n return 'intellij-idea-ultimate-edition %s' % self.folder\n",
"step-3": "<mask token>\n\n\nclass Idea:\n\n def __init__(self, folder):\n self.folder = folder\n\n def name(self):\n return 'jetbrains-idea'\n\n def cmd(self):\n return 'intellij-idea-ultimate-edition %s' % self.folder\n",
"step-4": "import os\n\n\nclass Idea:\n\n def __init__(self, folder):\n self.folder = folder\n\n def name(self):\n return 'jetbrains-idea'\n\n def cmd(self):\n return 'intellij-idea-ultimate-edition %s' % self.folder\n",
"step-5": "import os\n\nclass Idea:\n def __init__(self, folder):\n self.folder = folder\n\n def name(self):\n return \"jetbrains-idea\"\n\n def cmd(self):\n return \"intellij-idea-ultimate-edition %s\" % self.folder\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
files = ['arria2_ddr3.qip']
<|reserved_special_token_1|>
files = [
"arria2_ddr3.qip"
]
|
flexible
|
{
"blob_id": "cad881dd29be16de8375b3ce6e4a437562a05097",
"index": 5426,
"step-1": "<mask token>\n",
"step-2": "files = ['arria2_ddr3.qip']\n",
"step-3": "files = [\n \"arria2_ddr3.qip\"\n ]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import json
import logging
logger = logging.getLogger(__name__)
from django.db.models import Q
from channels_api.bindings import ResourceBinding
from .models import LetterTransaction, UserLetter, TeamWord, Dictionary
from .serializers import LetterTransactionSerializer, UserLetterSerializer, TeamWordSerializer
class TeamWordBinding(ResourceBinding):
model = TeamWord
stream = "teamwords"
serializer_class = TeamWordSerializer
def get_queryset(self):
return TeamWord.objects.filter(user__group__team=self.user.group.team)
@classmethod
def group_names(self, instance, action):
return [str(instance.user.group.team)]
def has_permission(self, user, action, pk):
logger.debug("TW has_permission {} {} {}".format(user, action, pk))
if action in ['update', 'delete']:
return False
if action == 'create':
payload = json.loads(self.message.content['text'])
if 'data' not in payload or 'word' not in payload['data']:
logger.debug("Possibly malicious malformed TeamWord from {}".format(self.user.username))
return False
word = payload['data']['word']
word_letters = set(word.lower())
if len(word_letters) == 0:
return False
user = self.user
user_letters = set()
for letter in UserLetter.objects.filter(user=user):
user_letters.add(letter.letter.lower())
for letter in LetterTransaction.objects.filter(borrower=user, approved=True):
user_letters.add(letter.letter.lower())
if not word_letters.issubset(user_letters):
return False
team_words = set()
for tword in self.get_queryset():
team_words.add(tword.word)
if word in team_words:
return False
try:
wordObj = Dictionary.objects.get(word=word)
except Exception as e:
return False
return True
# allow list, retrieve, subscribe
return True
class UserLetterBinding(ResourceBinding):
model = UserLetter
stream = "userletters"
serializer_class = UserLetterSerializer
def get_queryset(self):
queries = Q(user=self.user)
for profile in self.message.user.group.profile_set.all():
queries |= Q(user=profile.user)
return UserLetter.objects.filter(queries)
@classmethod
def group_names(self, instance, action):
logger.debug(str(instance))
return [instance.user.username + "solo"]
def has_permission(self, user, action, pk):
logger.debug("UL has_permission {} {} {}".format(user, action, pk))
if action in ['create', 'update', 'delete']:
return False
# allow list, retrieve, subscribe
return True
class LetterTransactionBinding(ResourceBinding):
model = LetterTransaction
stream = "lettertransactions"
serializer_class = LetterTransactionSerializer
def get_queryset(self):
return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(letter__user=self.user))
@classmethod
def group_names(self, instance, action):
# Send this to only the borrower and lender
return [instance.borrower.username + "solo", instance.letter.user.username + "solo"]
def has_permission(self, user, action, pk):
logger.debug("TR has_permission {} {} {}".format(user, action, self.message.content['text']))
if action == "delete":
return False
if action == "create" or action == "update":
payload = json.loads(self.message.content['text'])
if 'data' not in payload or 'letter' not in payload['data']:
logger.debug("Possibly malicious malformed LetterTransaction from {}".format(self.user.username))
return False
ul = UserLetter.objects.get(pk=payload['data']['letter'])
# If this UserLetter is not owned by a friend, permission denied
if ul.user.profile not in self.user.group.profile_set.all():
logger.debug("Malicious LetterTransaction creation suspected by {}".format(self.user.username))
return False
# allow list, retrieve, subscribe, and legitimate create
return True
|
normal
|
{
"blob_id": "c2e0f2eda6ef44a52ee4e192b8eb71bde0a69bff",
"index": 8954,
"step-1": "<mask token>\n\n\nclass TeamWordBinding(ResourceBinding):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug('TW has_permission {} {} {}'.format(user, action, pk))\n if action in ['update', 'delete']:\n return False\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug('Possibly malicious malformed TeamWord from {}'\n .format(self.user.username))\n return False\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user,\n approved=True):\n user_letters.add(letter.letter.lower())\n if not word_letters.issubset(user_letters):\n return False\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n if word in team_words:\n return False\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n return True\n return True\n\n\nclass UserLetterBinding(ResourceBinding):\n model = UserLetter\n stream = 'userletters'\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('UL has_permission {} {} {}'.format(user, action, pk))\n if action in ['create', 'update', 'delete']:\n return False\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n model = LetterTransaction\n stream = 'lettertransactions'\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(\n letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n return [instance.borrower.username + 'solo', instance.letter.user.\n username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('TR has_permission {} {} {}'.format(user, action, self\n .message.content['text']))\n if action == 'delete':\n return False\n if action == 'create' or action == 'update':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\n 'Possibly malicious malformed LetterTransaction from {}'\n .format(self.user.username))\n return False\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\n 'Malicious LetterTransaction creation suspected by {}'.\n format(self.user.username))\n return False\n return True\n",
"step-2": "<mask token>\n\n\nclass TeamWordBinding(ResourceBinding):\n <mask token>\n <mask token>\n <mask token>\n\n def get_queryset(self):\n return TeamWord.objects.filter(user__group__team=self.user.group.team)\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug('TW has_permission {} {} {}'.format(user, action, pk))\n if action in ['update', 'delete']:\n return False\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug('Possibly malicious malformed TeamWord from {}'\n .format(self.user.username))\n return False\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user,\n approved=True):\n user_letters.add(letter.letter.lower())\n if not word_letters.issubset(user_letters):\n return False\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n if word in team_words:\n return False\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n return True\n return True\n\n\nclass UserLetterBinding(ResourceBinding):\n model = UserLetter\n stream = 'userletters'\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('UL has_permission {} {} {}'.format(user, action, pk))\n if action in ['create', 'update', 'delete']:\n return False\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n model = LetterTransaction\n stream = 'lettertransactions'\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(\n letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n return [instance.borrower.username + 'solo', instance.letter.user.\n username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('TR has_permission {} {} {}'.format(user, action, self\n .message.content['text']))\n if action == 'delete':\n return False\n if action == 'create' or action == 'update':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\n 'Possibly malicious malformed LetterTransaction from {}'\n .format(self.user.username))\n return False\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\n 'Malicious LetterTransaction creation suspected by {}'.\n format(self.user.username))\n return False\n return True\n",
"step-3": "<mask token>\n\n\nclass TeamWordBinding(ResourceBinding):\n model = TeamWord\n stream = 'teamwords'\n serializer_class = TeamWordSerializer\n\n def get_queryset(self):\n return TeamWord.objects.filter(user__group__team=self.user.group.team)\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug('TW has_permission {} {} {}'.format(user, action, pk))\n if action in ['update', 'delete']:\n return False\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug('Possibly malicious malformed TeamWord from {}'\n .format(self.user.username))\n return False\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user,\n approved=True):\n user_letters.add(letter.letter.lower())\n if not word_letters.issubset(user_letters):\n return False\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n if word in team_words:\n return False\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n return True\n return True\n\n\nclass UserLetterBinding(ResourceBinding):\n model = UserLetter\n stream = 'userletters'\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('UL has_permission {} {} {}'.format(user, action, pk))\n if action in ['create', 'update', 'delete']:\n return False\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n model = LetterTransaction\n stream = 'lettertransactions'\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(\n letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n return [instance.borrower.username + 'solo', instance.letter.user.\n username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('TR has_permission {} {} {}'.format(user, action, self\n .message.content['text']))\n if action == 'delete':\n return False\n if action == 'create' or action == 'update':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\n 'Possibly malicious malformed LetterTransaction from {}'\n .format(self.user.username))\n return False\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\n 'Malicious LetterTransaction creation suspected by {}'.\n format(self.user.username))\n return False\n return True\n",
"step-4": "import json\nimport logging\nlogger = logging.getLogger(__name__)\nfrom django.db.models import Q\nfrom channels_api.bindings import ResourceBinding\nfrom .models import LetterTransaction, UserLetter, TeamWord, Dictionary\nfrom .serializers import LetterTransactionSerializer, UserLetterSerializer, TeamWordSerializer\n\n\nclass TeamWordBinding(ResourceBinding):\n model = TeamWord\n stream = 'teamwords'\n serializer_class = TeamWordSerializer\n\n def get_queryset(self):\n return TeamWord.objects.filter(user__group__team=self.user.group.team)\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug('TW has_permission {} {} {}'.format(user, action, pk))\n if action in ['update', 'delete']:\n return False\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug('Possibly malicious malformed TeamWord from {}'\n .format(self.user.username))\n return False\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user,\n approved=True):\n user_letters.add(letter.letter.lower())\n if not word_letters.issubset(user_letters):\n return False\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n if word in team_words:\n return False\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n return True\n return True\n\n\nclass UserLetterBinding(ResourceBinding):\n model = UserLetter\n stream = 'userletters'\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('UL has_permission {} {} {}'.format(user, action, pk))\n if action in ['create', 'update', 'delete']:\n return False\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n model = LetterTransaction\n stream = 'lettertransactions'\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(\n letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n return [instance.borrower.username + 'solo', instance.letter.user.\n username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('TR has_permission {} {} {}'.format(user, action, self\n .message.content['text']))\n if action == 'delete':\n return False\n if action == 'create' or action == 'update':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\n 'Possibly malicious malformed LetterTransaction from {}'\n .format(self.user.username))\n return False\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\n 'Malicious LetterTransaction creation suspected by {}'.\n format(self.user.username))\n return False\n return True\n",
"step-5": "import json\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom django.db.models import Q\n\nfrom channels_api.bindings import ResourceBinding\n\nfrom .models import LetterTransaction, UserLetter, TeamWord, Dictionary\nfrom .serializers import LetterTransactionSerializer, UserLetterSerializer, TeamWordSerializer\n\n\nclass TeamWordBinding(ResourceBinding):\n\n model = TeamWord\n stream = \"teamwords\"\n serializer_class = TeamWordSerializer\n\n def get_queryset(self):\n return TeamWord.objects.filter(user__group__team=self.user.group.team)\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug(\"TW has_permission {} {} {}\".format(user, action, pk))\n\n if action in ['update', 'delete']:\n return False\n\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug(\"Possibly malicious malformed TeamWord from {}\".format(self.user.username))\n return False\n\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user, approved=True):\n user_letters.add(letter.letter.lower())\n\n if not word_letters.issubset(user_letters):\n return False\n\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n\n if word in team_words:\n return False\n\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n\n return True\n\n # allow list, retrieve, subscribe\n return True\n\n \nclass UserLetterBinding(ResourceBinding):\n\n model = UserLetter\n stream = \"userletters\"\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + \"solo\"]\n\n def has_permission(self, user, action, pk):\n logger.debug(\"UL has_permission {} {} {}\".format(user, action, pk))\n\n if action in ['create', 'update', 'delete']:\n return False\n\n # allow list, retrieve, subscribe\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n\n model = LetterTransaction\n stream = \"lettertransactions\"\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n # Send this to only the borrower and lender\n return [instance.borrower.username + \"solo\", instance.letter.user.username + \"solo\"]\n\n def has_permission(self, user, action, pk):\n logger.debug(\"TR has_permission {} {} {}\".format(user, action, self.message.content['text']))\n\n if action == \"delete\":\n return False\n\n if action == \"create\" or action == \"update\":\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\"Possibly malicious malformed LetterTransaction from {}\".format(self.user.username))\n return False\n\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n\n # If this UserLetter is not owned by a friend, permission denied\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\"Malicious LetterTransaction creation suspected by {}\".format(self.user.username))\n return False\n\n # allow list, retrieve, subscribe, and legitimate create\n return True\n",
"step-ids": [
13,
14,
15,
17,
18
]
}
|
[
13,
14,
15,
17,
18
] |
import http.server
import socketserver
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
import json
import io
import urllib
import requests
from lib.Emby_ws import xnoppo_ws
from lib.Emby_http import *
from lib.Xnoppo import *
from lib.Xnoppo_TV import *
import lib.Xnoppo_AVR
import shutil
import asyncio
import threading
import logging
import logging.handlers
import psutil
def get_version():
return("2.01")
def thread_function(ws_object):
print("Thread: starting")
ws_object.start()
print("Thread: finishing")
def restart():
print('restart')
try:
emby_wsocket.stop()
except:
sys.exit()
sys.exit()
print('fin restart')
def save_config(config_file, config):
with open(config_file, 'w') as fw:
json.dump(config, fw, indent=4)
fw.close
try:
emby_wsocket.ws_config=config
emby_wsocket.EmbySession.config=config
except:
emby_wsocket.ws_config=config
def get_state():
status={}
status["Version"]=get_version()
try:
status["Playstate"]=emby_wsocket.EmbySession.playstate
status["playedtitle"]=emby_wsocket.EmbySession.playedtitle
status["server"]=emby_wsocket.EmbySession.server
status["folder"]=emby_wsocket.EmbySession.folder
status["filename"]=emby_wsocket.EmbySession.filename
status["CurrentData"]=emby_wsocket.EmbySession.currentdata
# gives a single float value
except:
status["Playstate"]="Not_Connected"
status["playedtitle"]=""
status["server"]=""
status["folder"]=""
status["filename"]=""
status["CurrentData"]=""
status["cpu_perc"]=psutil.cpu_percent()
status["mem_perc"]=psutil.virtual_memory().percent
# you can have the percentage of used RAM
print(psutil.virtual_memory().percent)
print(status)
return(status)
def cargar_config(config_file,tv_path,av_path,lang_path):
with open(config_file, 'r') as f:
config = json.load(f)
#ver_configuracion(config)
f.close
## new options default config values
config["Version"]=get_version()
default = config.get("Autoscript", False)
config["Autoscript"]=default
default = config.get("enable_all_libraries", False)
config["enable_all_libraries"]=default
default = config.get("TV_model", "")
config["TV_model"]=default
default = config.get("TV_SOURCES", [])
config["TV_SOURCES"] = default
default = config.get("AV_model", "")
config["AV_model"]=default
default = config.get("AV_SOURCES", [])
config["AV_SOURCES"] = default
default = config.get("TV_script_init", "")
config["TV_script_init"]=default
default = config.get("TV_script_end", "")
config["TV_script_end"]=default
default = config.get("av_delay_hdmi", 0)
config["av_delay_hdmi"]=default
default = config.get("AV_Port", 23)
config["AV_Port"]=default
default = config.get("timeout_oppo_mount", 60)
config["timeout_oppo_mount"]=default
default = config.get("language","es-ES")
config["language"]=default
default = config.get("default_nfs",False)
config["default_nfs"]=default
default = config.get("wait_nfs",False)
config["wait_nfs"]=default
default = config.get("refresh_time",5)
config["refresh_time"]=default
default = config.get("check_beta",False)
config["check_beta"]=default
default = config.get("smbtrick",False)
config["smbtrick"]=default
default = config.get("BRDisc",False)
config["BRDisc"]=default
## testeado de rutas
edit_server=0
server_list = config["servers"]
for server in server_list:
default = server.get("Test_OK", False)
server_list[edit_server]["Test_OK"]=default
edit_server=edit_server+1
## Cambio de booleans de texto antiguos a boleans actuales.
if config["TV"]=='True':
config["TV"]=True;
if config["TV"]=='False':
config["TV"]=False;
if config["AV"]=='True':
config["AV"]=True;
if config["AV"]=='False':
config["AV"]=False;
config["servers"]=server_list
config["tv_dirs"]=get_dir_folders(tv_path);
config["av_dirs"]=get_dir_folders(av_path);
config["langs"]=get_dir_folders(lang_path);
return(config)
def check_version(config):
url = "https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js"
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
print(config["check_beta"])
if config["check_beta"]==True:
last_version=version["beta_version"]
last_version_file=version["beta_version_file"]
else:
last_version=version["curr_version"]
last_version_file=version["curr_version_file"]
xno_version=get_version()
resp = {}
resp["version"]=last_version
resp["file"]=last_version_file
print(xno_version)
print(last_version)
if xno_version<last_version:
resp["new_version"]=True
else:
resp["new_version"]=False
print(resp)
return(resp)
def update_version(config,vers_path,cwd):
url = "https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js"
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
if config["check_beta"]==True:
last_version=version["beta_version"]
last_version_file=version["beta_version_file"]
else:
last_version=version["curr_version"]
last_version_file=version["curr_version_file"]
url2 = "https://github.com/siberian-git/Xnoppo/raw/main/versions/" + last_version_file
headers = {}
response2 = requests.get(url2, headers=headers)
filename=vers_path + last_version_file
with open(filename, 'wb') as f:
f.write(response2.content)
f.close()
shutil.unpack_archive(filename, cwd)
if sys.platform.startswith('win'):
separador="\\"
else:
separador="/"
tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador
av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador
if config["TV"]==True and config["TV_model"]!="":
move_files(tv_path + config["TV_model"],lib_path)
if config["AV"]==True and config["AV_model"]!="":
move_files(av_path + config["AV_model"],lib_path)
resp = {}
resp["version"]=last_version
resp["file"]=last_version_file
resp["new_version"]=False
return(resp)
def cargar_lang(config_file):
with open(config_file.encode(sys.getfilesystemencoding()), 'r',encoding='latin-1') as f:
config = json.load(f)
#ver_configuracion(config)
f.close
## new options default config values
return(config)
def leer_file(web_file):
with open(web_file, 'r',encoding='utf8') as f:
num=f.read()
f.close
return(num)
def leer_img(web_file):
with open(web_file, 'rb') as f:
num=f.read()
f.close
return(num)
def test_path(config,server):
rutas = get_mount_path(server["Emby_Path"] + "/test.mkv",server)
result2 = test_mount_path(config,rutas["Servidor"],rutas["Carpeta"])
return(result2)
def get_mount_path(movie,server_data):
movie = movie.replace(server_data["Emby_Path"],server_data["Oppo_Path"])
movie = movie.replace('\\\\','\\')
movie = movie.replace('\\','/')
word = '/'
inicio = movie.find(word)
inicio = inicio +1
final = movie.find(word,inicio,len(movie))
servidor = movie[inicio:final]
ultimo=final+1
result=final+1
while result > 0:
ultimo=result+1
result=movie.find(word,ultimo,len(movie))
fichero=movie[ultimo:len(movie)]
final=final+1
ultimo=ultimo-1
carpeta=movie[final:ultimo]
resultado={}
resultado["Servidor"]=servidor
resultado["Carpeta"]=carpeta
resultado["Fichero"]=fichero
return(resultado)
def test_mount_path(config,servidor,carpeta):
sendnotifyremote(config["Oppo_IP"])
#print("Conectando con el OPPO")
result=check_socket(config)
if result==0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey("EJT",config)
time.sleep(1)
#print("Solicitando montar ruta al OPPO")
response_data6b = getsetupmenu(config)
while response_data6f.find('devicelist":[]') > 0:
time.sleep(1)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey("QPW",config)
device_list=json.loads(response_data6f)
if config["DebugLevel"]>0: print(device_list)
nfs=config["default_nfs"]
for device in device_list["devicelist"]:
if device["name"].upper()==servidor.upper():
if device["sub_type"]=="nfs":
nfs=True
break
else:
nfs=False
break
if nfs:
response_login = LoginNFS(config,servidor)
else:
response_login = LoginSambaWithOutID(config,servidor)
if config["Always_ON"]==False:
time.sleep(5)
response_data6b = getsetupmenu(config)
if nfs:
response_mount = mountSharedNFSFolder(servidor,carpeta,'','',config)
else:
response_mount = mountSharedFolder(servidor,carpeta,'','',config)
response=json.loads(response_mount)
#print(response)
if config["Autoscript"]==True:
result=umountSharedFolder(config)
if response["success"]==True:
a = "OK"
else:
a = "FAILURE"
return(a)
else:
print("No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo")
def test_emby(config):
try:
EmbySession=EmbyHttp(config)
user_info = EmbySession.user_info
if user_info["SessionInfo"]["Id"]!="":
return("OK")
else:
return("FAILED")
except:
return("FAILED")
def test_oppo(config):
result=check_socket(config)
if result==0:
return("OK")
else:
return("FAILED")
def carga_libraries(config):
try:
EmbySession=EmbyHttp(config)
views_list=EmbySession.get_user_views(EmbySession.user_info["User"]["Id"])
libraries = []
for view in views_list:
library= {}
library["Name"]=view["Name"]
library["Id"]=view["Id"]
library["Active"]=False
try:
lib_list=config["Libraries"]
except:
lib_list={}
for lib in lib_list:
if lib["Id"]==view["Id"]:
library["Active"]=lib["Active"]
libraries.append(library)
config["Libraries"]=libraries
return(0)
except:
return(1)
def is_library_active(config,libraryname):
for library in config["Libraries"]:
if library["Name"]==libraryname:
return(library["Active"])
return(False)
def get_selectableFolders(config):
EmbySession=EmbyHttp(config)
MediaFolders = EmbySession.get_emby_selectablefolders()
servers=[]
for Folder in MediaFolders:
index=1
active=is_library_active(config,Folder["Name"])
if config["enable_all_libraries"]==True:
active=True;
if active==True:
for SubFolder in Folder["SubFolders"]:
server={}
server["Id"]=SubFolder["Id"]
if index>1:
server["name"]=Folder["Name"]+"("+str(index)+")"
else:
server["name"]=Folder["Name"]
server["Emby_Path"]=SubFolder["Path"]
server["Oppo_Path"]="/"
try:
serv_list=config["servers"]
except:
serv_list={}
for serv in serv_list:
if server["Emby_Path"]==serv["Emby_Path"]:
server["name"]=serv["name"];
server["Oppo_Path"]=serv["Oppo_Path"];
server["Test_OK"]=serv["Test_OK"];
servers.append(server)
index=index+1
config["servers"]=servers
def get_dir_folders(directory):
os.chdir(directory)
dirs = os.listdir(".")
encontrado=False
list_dir=[]
#a =""
#list_dir.append(a)
for x in dirs:
if os.path.isdir(x):
list_dir.append(x)
return(list_dir)
def move_files(src, dest):
os.chdir(src)
src_files = os.listdir('.')
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, dest)
return(0)
def get_devices(config):
try:
EmbySession=EmbyHttp(config)
devices = EmbySession.get_emby_devices()
index=0
dev_temp = []
for device in devices["Items"]:
try:
if device["Id"]!='Xnoppo':
device["Name"]=device["Name"] + " / " + device["AppName"]
device["Id"]=device["ReportedDeviceId"]
dev_temp.append(device)
except:
pass
config["devices"]=dev_temp
return('OK')
except:
return('FAILURE')
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador="\\"
else:
separador="/"
resource_path=cwd + separador + 'web' + separador + 'resources' + separador
html_path = cwd + separador + 'web' + separador
tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador
av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/emby_conf.html':
i = leer_file(html_path + 'emby_conf.html')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/oppo_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'oppo_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/lib_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'lib_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/path_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'path_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/tv_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'tv_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/av_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'av_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/other_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'other_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/status.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'status.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/help.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'help.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/remote.html':
i = leer_file(html_path + 'remote.html')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/android-chrome-36x36.png':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_img(resource_path + 'android-chrome-36x36.png')
self.wfile.write(bytes(i))
return(0)
if self.path == '/av-receiver-icon-2.jpg':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_img(resource_path + 'av-receiver-icon-2.jpg')
self.wfile.write(bytes(i))
return(0)
if self.path == '/dragon.png':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_img(resource_path + 'dragon.png')
self.wfile.write(bytes(i))
return(0)
if self.path == '/xnoppo_config':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/xnoppo_config_lib':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
carga_libraries(a)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/xnoppo_config_dev':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
get_devices(a)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/check_version':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = check_version(config)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/update_version':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = update_version(config,vers_path,cwd)
restart()
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/get_state':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = get_state()
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/restart':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
a = "Restarting"
self.wfile.write(bytes(a,"utf-8"))
restart()
if self.path == '/refresh_paths':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
get_selectableFolders(a)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/lang':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = cargar_lang(lang_path + config["language"] + separador +'lang.js')
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path.find("/send_key?")>=0:
get_data = self.path
print(get_data)
a = len('/send_key?sendkey=')
b=get_data[a:len(get_data)]
print(b)
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
sendnotifyremote(config["Oppo_IP"])
result=check_socket(config)
if b=='PON':
if result==0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey("EJT",config)
if config["BRDisc"]==True:
time.sleep(1)
response_data_on = sendremotekey("EJT",config)
time.sleep(1)
response_data6b = getsetupmenu(config)
else:
response_data_on = sendremotekey(b,config)
self.send_response(200)
self.send_header("Content-type", "text")
self.end_headers()
a = "ok"
self.wfile.write(bytes(a,"utf-8"))
return(0)
if self.path == '/log.txt':
self.send_response(200)
self.send_header("Content-type", "text")
self.end_headers()
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')
self.wfile.write(bytes(a))
return(0)
else:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("<html><head><title>https://pythonbasics.org</title></head>", "utf-8"))
self.wfile.write(bytes("<p>Request: %s</p>" % self.path, "utf-8"))
self.wfile.write(bytes("<body>", "utf-8"))
self.wfile.write(bytes("<p>This is an example web server.</p>", "utf-8"))
self.wfile.write(bytes("</body></html>", "utf-8"))
def do_POST(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador="\\"
else:
separador="/"
resource_path=cwd + separador + 'web' + separador + 'resources' + separador
html_path = cwd + separador + 'web' + separador
tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador
av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador
lib_path = cwd + separador + 'lib' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/save_config':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json',config)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
if self.path == '/check_emby':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = test_emby(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
status = get_state()
if status["Playstate"]=="Not_Connected":
save_config(cwd + separador + 'config.json',config)
emby_wsocket.ws_config=config
restart()
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/check_oppo':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = test_oppo(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/test_path':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
server = json.loads(post_data.decode('utf-8'))
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = test_path(config,server)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(server))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(server),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/navigate_path':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
path_obj = json.loads(post_data.decode('utf-8'))
path = path_obj["path"]
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = navigate_folder(path,config)
a_json=json.dumps(a)
print(len(a_json))
self.send_response(200)
self.send_header("Content-Length", len(a_json))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/move_tv':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json',config)
move_files(tv_path + config["TV_model"],lib_path)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
restart()
return(0)
if self.path == '/move_av':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json',config)
move_files(av_path + config["AV_model"],lib_path)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
restart()
return(0)
if self.path == '/get_tv_key':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = get_tv_key(config)
if a == 'OK':
save_config(cwd + separador + 'config.json',config)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/tv_test_conn':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = tv_test_conn(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/get_tv_sources':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = get_tv_sources(config)
if a == 'OK':
save_config(cwd + separador + 'config.json',config)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/get_av_sources':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = get_hdmi_list(config)
if a != None:
config["AV_SOURCES"]=a
save_config(cwd + separador + 'config.json',config)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/tv_test_init':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = tv_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/tv_test_end':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = tv_set_prev(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/av_test_on':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = av_check_power(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/av_test_off':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = av_power_off(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/av_test_hdmi':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = av_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if __name__ == "__main__":
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador="\\"
else:
separador="/"
config_file = cwd + separador + "config.json"
resource_path=cwd + separador + 'web' + separador + 'resources' + separador
html_path = cwd + separador + 'web' + separador
tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador
av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador
lib_path = cwd + separador + 'lib' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
config = cargar_config(config_file,tv_path,av_path,lang_path)
logfile=cwd + separador + "emby_xnoppo_client_logging.log"
lang = cargar_lang(lang_path + config["language"] + separador +'lang.js')
if config["DebugLevel"]==0:
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.CRITICAL)
elif config["DebugLevel"]==1:
rfh = logging.handlers.RotatingFileHandler(
filename=logfile,
mode='a',
maxBytes=50*1024*1024,
backupCount=2,
encoding=None,
delay=0
)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.INFO,handlers=[rfh])
elif config["DebugLevel"]==2:
rfh = logging.handlers.RotatingFileHandler(
filename=logfile,
mode='a',
maxBytes=5*1024*1024,
backupCount=2,
encoding=None,
delay=0
)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.DEBUG,handlers=[rfh])
emby_wsocket = xnoppo_ws()
emby_wsocket.ws_config=config
emby_wsocket.config_file=config_file
emby_wsocket.ws_lang=lang
x = threading.Thread(target=thread_function, args=(emby_wsocket,))
x.start()
espera=0
estado_anterior=''
logging.debug('Arrancamos el Servidor Web\n')
serverPort = 8090
webServer = HTTPServer(("", serverPort), MyServer)
print("Server started http://%s:%s" % ("", serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
logging.info('Fin proceso')
logging.info('Finished')
print("Server stopped.")
|
normal
|
{
"blob_id": "2ff85ac059f160fcc6b39b4298e8216cbad77ab3",
"index": 504,
"step-1": "<mask token>\n\n\ndef get_version():\n return '2.01'\n\n\n<mask token>\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<mask token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\n<mask token>\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\n<mask token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_version():\n return '2.01'\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\ndef get_state():\n status = {}\n status['Version'] = get_version()\n try:\n status['Playstate'] = emby_wsocket.EmbySession.playstate\n status['playedtitle'] = emby_wsocket.EmbySession.playedtitle\n status['server'] = emby_wsocket.EmbySession.server\n status['folder'] = emby_wsocket.EmbySession.folder\n status['filename'] = emby_wsocket.EmbySession.filename\n status['CurrentData'] = emby_wsocket.EmbySession.currentdata\n except:\n status['Playstate'] = 'Not_Connected'\n status['playedtitle'] = ''\n status['server'] = ''\n status['folder'] = ''\n status['filename'] = ''\n status['CurrentData'] = ''\n status['cpu_perc'] = psutil.cpu_percent()\n status['mem_perc'] = psutil.virtual_memory().percent\n print(psutil.virtual_memory().percent)\n print(status)\n return status\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\n<mask token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_version():\n return '2.01'\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\ndef get_state():\n status = {}\n status['Version'] = get_version()\n try:\n status['Playstate'] = emby_wsocket.EmbySession.playstate\n status['playedtitle'] = emby_wsocket.EmbySession.playedtitle\n status['server'] = emby_wsocket.EmbySession.server\n status['folder'] = emby_wsocket.EmbySession.folder\n status['filename'] = emby_wsocket.EmbySession.filename\n status['CurrentData'] = emby_wsocket.EmbySession.currentdata\n except:\n status['Playstate'] = 'Not_Connected'\n status['playedtitle'] = ''\n status['server'] = ''\n status['folder'] = ''\n status['filename'] = ''\n status['CurrentData'] = ''\n status['cpu_perc'] = psutil.cpu_percent()\n status['mem_perc'] = psutil.virtual_memory().percent\n print(psutil.virtual_memory().percent)\n print(status)\n return status\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\ndef move_files(src, dest):\n os.chdir(src)\n src_files = os.listdir('.')\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest)\n return 0\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<mask token>\n",
"step-4": "import http.server\nimport socketserver\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport time\nimport json\nimport io\nimport urllib\nimport requests\nfrom lib.Emby_ws import xnoppo_ws\nfrom lib.Emby_http import *\nfrom lib.Xnoppo import *\nfrom lib.Xnoppo_TV import *\nimport lib.Xnoppo_AVR\nimport shutil\nimport asyncio\nimport threading\nimport logging\nimport logging.handlers\nimport psutil\n\n\ndef get_version():\n return '2.01'\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\ndef get_state():\n status = {}\n status['Version'] = get_version()\n try:\n status['Playstate'] = emby_wsocket.EmbySession.playstate\n status['playedtitle'] = emby_wsocket.EmbySession.playedtitle\n status['server'] = emby_wsocket.EmbySession.server\n status['folder'] = emby_wsocket.EmbySession.folder\n status['filename'] = emby_wsocket.EmbySession.filename\n status['CurrentData'] = emby_wsocket.EmbySession.currentdata\n except:\n status['Playstate'] = 'Not_Connected'\n status['playedtitle'] = ''\n status['server'] = ''\n status['folder'] = ''\n status['filename'] = ''\n status['CurrentData'] = ''\n status['cpu_perc'] = psutil.cpu_percent()\n status['mem_perc'] = psutil.virtual_memory().percent\n print(psutil.virtual_memory().percent)\n print(status)\n return status\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\ndef move_files(src, dest):\n os.chdir(src)\n src_files = os.listdir('.')\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest)\n return 0\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\nif __name__ == '__main__':\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n config_file = cwd + separador + 'config.json'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n config = cargar_config(config_file, tv_path, av_path, lang_path)\n logfile = cwd + separador + 'emby_xnoppo_client_logging.log'\n lang = cargar_lang(lang_path + config['language'] + separador + 'lang.js')\n if config['DebugLevel'] == 0:\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.CRITICAL)\n elif config['DebugLevel'] == 1:\n rfh = logging.handlers.RotatingFileHandler(filename=logfile, mode=\n 'a', maxBytes=50 * 1024 * 1024, backupCount=2, encoding=None,\n delay=0)\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.INFO, handlers=[rfh])\n elif config['DebugLevel'] == 2:\n rfh = logging.handlers.RotatingFileHandler(filename=logfile, mode=\n 'a', maxBytes=5 * 1024 * 1024, backupCount=2, encoding=None,\n delay=0)\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.DEBUG, handlers=[rfh]\n )\n emby_wsocket = xnoppo_ws()\n emby_wsocket.ws_config = config\n emby_wsocket.config_file = config_file\n emby_wsocket.ws_lang = lang\n x = threading.Thread(target=thread_function, args=(emby_wsocket,))\n x.start()\n espera = 0\n estado_anterior = ''\n logging.debug('Arrancamos el Servidor Web\\n')\n serverPort = 8090\n webServer = HTTPServer(('', serverPort), MyServer)\n print('Server started http://%s:%s' % ('', serverPort))\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n webServer.server_close()\n logging.info('Fin proceso')\n logging.info('Finished')\n print('Server stopped.')\n",
"step-5": "import http.server\nimport socketserver\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport time\nimport json\nimport io\nimport urllib\nimport requests\nfrom lib.Emby_ws import xnoppo_ws\nfrom lib.Emby_http import *\nfrom lib.Xnoppo import *\nfrom lib.Xnoppo_TV import *\nimport lib.Xnoppo_AVR\nimport shutil\nimport asyncio\nimport threading\nimport logging\nimport logging.handlers\nimport psutil\n\ndef get_version():\n return(\"2.01\")\n\ndef thread_function(ws_object):\n print(\"Thread: starting\")\n ws_object.start()\n print(\"Thread: finishing\")\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n \ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config=config\n emby_wsocket.EmbySession.config=config\n except:\n emby_wsocket.ws_config=config\ndef get_state():\n status={}\n status[\"Version\"]=get_version()\n try:\n status[\"Playstate\"]=emby_wsocket.EmbySession.playstate\n status[\"playedtitle\"]=emby_wsocket.EmbySession.playedtitle\n status[\"server\"]=emby_wsocket.EmbySession.server\n status[\"folder\"]=emby_wsocket.EmbySession.folder\n status[\"filename\"]=emby_wsocket.EmbySession.filename\n status[\"CurrentData\"]=emby_wsocket.EmbySession.currentdata\n # gives a single float value\n except:\n status[\"Playstate\"]=\"Not_Connected\"\n status[\"playedtitle\"]=\"\"\n status[\"server\"]=\"\"\n status[\"folder\"]=\"\"\n status[\"filename\"]=\"\"\n status[\"CurrentData\"]=\"\"\n status[\"cpu_perc\"]=psutil.cpu_percent()\n status[\"mem_perc\"]=psutil.virtual_memory().percent\n \n # you can have the percentage of used RAM\n print(psutil.virtual_memory().percent)\n\n\n print(status)\n return(status)\n\ndef cargar_config(config_file,tv_path,av_path,lang_path):\n\n with open(config_file, 'r') as f: \n config = json.load(f)\n #ver_configuracion(config)\n f.close\n ## new options default config values\n config[\"Version\"]=get_version()\n default = config.get(\"Autoscript\", False)\n config[\"Autoscript\"]=default\n default = config.get(\"enable_all_libraries\", False)\n config[\"enable_all_libraries\"]=default\n default = config.get(\"TV_model\", \"\")\n config[\"TV_model\"]=default\n default = config.get(\"TV_SOURCES\", [])\n config[\"TV_SOURCES\"] = default\n default = config.get(\"AV_model\", \"\")\n config[\"AV_model\"]=default\n default = config.get(\"AV_SOURCES\", [])\n config[\"AV_SOURCES\"] = default\n default = config.get(\"TV_script_init\", \"\")\n config[\"TV_script_init\"]=default\n default = config.get(\"TV_script_end\", \"\")\n config[\"TV_script_end\"]=default\n default = config.get(\"av_delay_hdmi\", 0)\n config[\"av_delay_hdmi\"]=default\n default = config.get(\"AV_Port\", 23)\n config[\"AV_Port\"]=default\n default = config.get(\"timeout_oppo_mount\", 60)\n config[\"timeout_oppo_mount\"]=default\n default = config.get(\"language\",\"es-ES\")\n config[\"language\"]=default\n default = config.get(\"default_nfs\",False)\n config[\"default_nfs\"]=default\n default = config.get(\"wait_nfs\",False)\n config[\"wait_nfs\"]=default\n default = config.get(\"refresh_time\",5)\n config[\"refresh_time\"]=default\n default = config.get(\"check_beta\",False)\n config[\"check_beta\"]=default\n default = config.get(\"smbtrick\",False)\n config[\"smbtrick\"]=default\n default = config.get(\"BRDisc\",False)\n config[\"BRDisc\"]=default\n\n ## testeado de rutas\n edit_server=0\n server_list = config[\"servers\"]\n for server in server_list:\n default = server.get(\"Test_OK\", False)\n server_list[edit_server][\"Test_OK\"]=default\n edit_server=edit_server+1\n ## Cambio de booleans de texto antiguos a boleans actuales.\n if config[\"TV\"]=='True':\n config[\"TV\"]=True;\n if config[\"TV\"]=='False':\n config[\"TV\"]=False;\n if config[\"AV\"]=='True':\n config[\"AV\"]=True;\n if config[\"AV\"]=='False':\n config[\"AV\"]=False;\n config[\"servers\"]=server_list\n config[\"tv_dirs\"]=get_dir_folders(tv_path);\n config[\"av_dirs\"]=get_dir_folders(av_path);\n config[\"langs\"]=get_dir_folders(lang_path);\n\n return(config)\n\ndef check_version(config):\n\n url = \"https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js\"\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config[\"check_beta\"])\n if config[\"check_beta\"]==True:\n last_version=version[\"beta_version\"]\n last_version_file=version[\"beta_version_file\"]\n else:\n last_version=version[\"curr_version\"]\n last_version_file=version[\"curr_version_file\"]\n xno_version=get_version()\n resp = {}\n resp[\"version\"]=last_version\n resp[\"file\"]=last_version_file\n print(xno_version)\n print(last_version)\n if xno_version<last_version:\n resp[\"new_version\"]=True\n else:\n resp[\"new_version\"]=False\n print(resp)\n return(resp)\n\ndef update_version(config,vers_path,cwd):\n\n url = \"https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js\"\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config[\"check_beta\"]==True:\n last_version=version[\"beta_version\"]\n last_version_file=version[\"beta_version_file\"]\n else:\n last_version=version[\"curr_version\"]\n last_version_file=version[\"curr_version_file\"]\n url2 = \"https://github.com/siberian-git/Xnoppo/raw/main/versions/\" + last_version_file\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename=vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador=\"\\\\\"\n else:\n separador=\"/\"\n tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador\n av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador\n if config[\"TV\"]==True and config[\"TV_model\"]!=\"\":\n move_files(tv_path + config[\"TV_model\"],lib_path)\n if config[\"AV\"]==True and config[\"AV_model\"]!=\"\":\n move_files(av_path + config[\"AV_model\"],lib_path)\n resp = {}\n resp[\"version\"]=last_version\n resp[\"file\"]=last_version_file\n resp[\"new_version\"]=False\n return(resp)\n\ndef cargar_lang(config_file):\n\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',encoding='latin-1') as f: \n config = json.load(f)\n #ver_configuracion(config)\n f.close\n ## new options default config values\n return(config)\n\ndef leer_file(web_file):\n\n with open(web_file, 'r',encoding='utf8') as f:\n num=f.read()\n f.close\n return(num)\n\ndef leer_img(web_file):\n\n with open(web_file, 'rb') as f:\n num=f.read()\n f.close\n return(num)\n\n\ndef test_path(config,server):\n \n rutas = get_mount_path(server[\"Emby_Path\"] + \"/test.mkv\",server)\n result2 = test_mount_path(config,rutas[\"Servidor\"],rutas[\"Carpeta\"])\n return(result2)\n\ndef get_mount_path(movie,server_data):\n\n movie = movie.replace(server_data[\"Emby_Path\"],server_data[\"Oppo_Path\"])\n movie = movie.replace('\\\\\\\\','\\\\')\n movie = movie.replace('\\\\','/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio +1 \n final = movie.find(word,inicio,len(movie))\n servidor = movie[inicio:final]\n ultimo=final+1\n result=final+1\n while result > 0:\n ultimo=result+1\n result=movie.find(word,ultimo,len(movie))\n fichero=movie[ultimo:len(movie)]\n final=final+1\n ultimo=ultimo-1\n carpeta=movie[final:ultimo]\n resultado={}\n resultado[\"Servidor\"]=servidor\n resultado[\"Carpeta\"]=carpeta\n resultado[\"Fichero\"]=fichero\n return(resultado)\n\ndef test_mount_path(config,servidor,carpeta):\n sendnotifyremote(config[\"Oppo_IP\"])\n #print(\"Conectando con el OPPO\")\n result=check_socket(config)\n if result==0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey(\"EJT\",config)\n time.sleep(1)\n #print(\"Solicitando montar ruta al OPPO\")\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey(\"QPW\",config)\n device_list=json.loads(response_data6f)\n if config[\"DebugLevel\"]>0: print(device_list)\n nfs=config[\"default_nfs\"]\n for device in device_list[\"devicelist\"]:\n if device[\"name\"].upper()==servidor.upper():\n if device[\"sub_type\"]==\"nfs\":\n nfs=True\n break\n else:\n nfs=False\n break\n if nfs:\n response_login = LoginNFS(config,servidor)\n else:\n response_login = LoginSambaWithOutID(config,servidor)\n if config[\"Always_ON\"]==False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor,carpeta,'','',config)\n else:\n response_mount = mountSharedFolder(servidor,carpeta,'','',config)\n response=json.loads(response_mount)\n #print(response)\n if config[\"Autoscript\"]==True:\n result=umountSharedFolder(config)\n if response[\"success\"]==True:\n a = \"OK\"\n else:\n a = \"FAILURE\" \n return(a)\n else:\n print(\"No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo\")\n\ndef test_emby(config):\n try:\n EmbySession=EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info[\"SessionInfo\"][\"Id\"]!=\"\":\n return(\"OK\")\n else:\n return(\"FAILED\")\n except:\n return(\"FAILED\")\n\ndef test_oppo(config):\n result=check_socket(config)\n if result==0:\n return(\"OK\")\n else:\n return(\"FAILED\")\n\ndef carga_libraries(config):\n try:\n EmbySession=EmbyHttp(config)\n views_list=EmbySession.get_user_views(EmbySession.user_info[\"User\"][\"Id\"])\n libraries = []\n for view in views_list:\n library= {}\n library[\"Name\"]=view[\"Name\"]\n library[\"Id\"]=view[\"Id\"]\n library[\"Active\"]=False\n try:\n lib_list=config[\"Libraries\"]\n except:\n lib_list={}\n for lib in lib_list:\n if lib[\"Id\"]==view[\"Id\"]:\n library[\"Active\"]=lib[\"Active\"]\n libraries.append(library)\n config[\"Libraries\"]=libraries\n return(0)\n except:\n return(1)\ndef is_library_active(config,libraryname):\n for library in config[\"Libraries\"]:\n if library[\"Name\"]==libraryname:\n return(library[\"Active\"])\n return(False)\n\ndef get_selectableFolders(config):\n EmbySession=EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers=[]\n for Folder in MediaFolders:\n index=1\n active=is_library_active(config,Folder[\"Name\"])\n if config[\"enable_all_libraries\"]==True:\n active=True;\n if active==True:\n for SubFolder in Folder[\"SubFolders\"]: \n server={}\n server[\"Id\"]=SubFolder[\"Id\"]\n if index>1:\n server[\"name\"]=Folder[\"Name\"]+\"(\"+str(index)+\")\"\n else:\n server[\"name\"]=Folder[\"Name\"]\n server[\"Emby_Path\"]=SubFolder[\"Path\"]\n server[\"Oppo_Path\"]=\"/\"\n try:\n serv_list=config[\"servers\"]\n except:\n serv_list={}\n for serv in serv_list:\n if server[\"Emby_Path\"]==serv[\"Emby_Path\"]:\n server[\"name\"]=serv[\"name\"];\n server[\"Oppo_Path\"]=serv[\"Oppo_Path\"];\n server[\"Test_OK\"]=serv[\"Test_OK\"];\n servers.append(server)\n index=index+1\n config[\"servers\"]=servers\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir(\".\")\n encontrado=False\n list_dir=[]\n #a =\"\"\n #list_dir.append(a)\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return(list_dir)\n\ndef move_files(src, dest):\n os.chdir(src)\n src_files = os.listdir('.')\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest)\n return(0)\n\ndef get_devices(config):\n try:\n EmbySession=EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index=0\n dev_temp = []\n for device in devices[\"Items\"]:\n try:\n if device[\"Id\"]!='Xnoppo':\n device[\"Name\"]=device[\"Name\"] + \" / \" + device[\"AppName\"]\n device[\"Id\"]=device[\"ReportedDeviceId\"]\n dev_temp.append(device)\n except:\n pass\n config[\"devices\"]=dev_temp\n return('OK')\n except:\n return('FAILURE')\n\nclass MyServer(BaseHTTPRequestHandler):\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador=\"\\\\\"\n else:\n separador=\"/\"\n resource_path=cwd + separador + 'web' + separador + 'resources' + separador\n html_path = cwd + separador + 'web' + separador\n tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador\n av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n \n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return(0)\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return(0)\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return(0)\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\")) \n return(0)\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = update_version(config,vers_path,cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/restart':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n a = \"Restarting\"\n self.wfile.write(bytes(a,\"utf-8\"))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/lang':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = cargar_lang(lang_path + config[\"language\"] + separador +'lang.js')\n self.wfile.write(bytes(json.dumps(a),\"utf-8\")) \n return(0)\n if self.path.find(\"/send_key?\")>=0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b=get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n sendnotifyremote(config[\"Oppo_IP\"])\n result=check_socket(config)\n if b=='PON':\n if result==0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey(\"EJT\",config)\n if config[\"BRDisc\"]==True:\n time.sleep(1)\n response_data_on = sendremotekey(\"EJT\",config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b,config)\n self.send_response(200)\n self.send_header(\"Content-type\", \"text\")\n self.end_headers()\n a = \"ok\"\n self.wfile.write(bytes(a,\"utf-8\")) \n return(0)\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text\")\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a)) \n return(0)\n else:\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(\"<html><head><title>https://pythonbasics.org</title></head>\", \"utf-8\"))\n self.wfile.write(bytes(\"<p>Request: %s</p>\" % self.path, \"utf-8\"))\n self.wfile.write(bytes(\"<body>\", \"utf-8\"))\n self.wfile.write(bytes(\"<p>This is an example web server.</p>\", \"utf-8\"))\n self.wfile.write(bytes(\"</body></html>\", \"utf-8\"))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador=\"\\\\\"\n else:\n separador=\"/\"\n resource_path=cwd + separador + 'web' + separador + 'resources' + separador\n html_path = cwd + separador + 'web' + separador\n tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador\n av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n \n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json',config)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n status = get_state()\n if status[\"Playstate\"]==\"Not_Connected\":\n save_config(cwd + separador + 'config.json',config)\n emby_wsocket.ws_config=config\n restart()\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = test_path(config,server)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(server))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj[\"path\"]\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = navigate_folder(path,config)\n a_json=json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header(\"Content-Length\", len(a_json))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json',config)\n move_files(tv_path + config[\"TV_model\"],lib_path)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n restart()\n return(0)\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json',config)\n move_files(av_path + config[\"AV_model\"],lib_path)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n restart()\n return(0)\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json',config)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json',config)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = get_hdmi_list(config)\n if a != None:\n config[\"AV_SOURCES\"]=a\n save_config(cwd + separador + 'config.json',config)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\nif __name__ == \"__main__\":\n\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador=\"\\\\\"\n else:\n separador=\"/\"\n config_file = cwd + separador + \"config.json\"\n resource_path=cwd + separador + 'web' + separador + 'resources' + separador\n html_path = cwd + separador + 'web' + separador\n tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador\n av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n config = cargar_config(config_file,tv_path,av_path,lang_path)\n logfile=cwd + separador + \"emby_xnoppo_client_logging.log\"\n lang = cargar_lang(lang_path + config[\"language\"] + separador +'lang.js')\n\n if config[\"DebugLevel\"]==0:\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.CRITICAL)\n elif config[\"DebugLevel\"]==1:\n rfh = logging.handlers.RotatingFileHandler(\n filename=logfile, \n mode='a',\n maxBytes=50*1024*1024,\n backupCount=2,\n encoding=None,\n delay=0\n )\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.INFO,handlers=[rfh])\n elif config[\"DebugLevel\"]==2:\n rfh = logging.handlers.RotatingFileHandler(\n filename=logfile, \n mode='a',\n maxBytes=5*1024*1024,\n backupCount=2,\n encoding=None,\n delay=0\n )\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.DEBUG,handlers=[rfh])\n emby_wsocket = xnoppo_ws()\n emby_wsocket.ws_config=config\n emby_wsocket.config_file=config_file\n emby_wsocket.ws_lang=lang\n x = threading.Thread(target=thread_function, args=(emby_wsocket,))\n x.start()\n espera=0\n estado_anterior=''\n\n logging.debug('Arrancamos el Servidor Web\\n')\n serverPort = 8090\n webServer = HTTPServer((\"\", serverPort), MyServer)\n print(\"Server started http://%s:%s\" % (\"\", serverPort))\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n webServer.server_close()\n logging.info('Fin proceso')\n logging.info('Finished')\n print(\"Server stopped.\")\n",
"step-ids": [
21,
24,
25,
27,
28
]
}
|
[
21,
24,
25,
27,
28
] |
<|reserved_special_token_0|>
class Running(object):
<|reserved_special_token_0|>
def __init__(self, args, device_id):
"""
:param args: parser.parse_args()
:param device_id: 0 or -1
"""
self.args = args
self.device_id = device_id
self.model_flags = ['hidden_size', 'ff_size', 'heads',
'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']
self.device = 'cpu' if self.args.visible_gpus == '-1' else 'cuda'
logger.info('Device ID %d' % self.device_id)
logger.info(f'Device {self.device}')
torch.manual_seed(self.args.seed)
random.seed(self.args.seed)
if self.device_id >= 0:
torch.cuda.set_device(self.device_id)
init_logger(args.log_file)
def baseline(self, cal_lead=False, cal_oracle=False):
test_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'test', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=True)
trainer = build_trainer(self.args, self.device_id, None, None)
if cal_lead:
trainer.test(test_iter, 0, cal_lead=True)
elif cal_oracle:
trainer.test(test_iter, 0, cal_oracle=True)
<|reserved_special_token_0|>
def train(self):
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=True)
if self.args.train_from:
logger.info(f'Loading checkpoint from {self.args.train_from}')
checkpoint = torch.load(self.args.train_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
model.load_cp(checkpoint)
optimizer = model_builder.build_optim(self.args, model, checkpoint)
else:
optimizer = model_builder.build_optim(self.args, model, None)
logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, optimizer)
trainer.train(self.train_iter, self.args.train_steps)
def validate(self, step):
logger.info(f'Loading checkpoint from {self.args.validate_from}')
checkpoint = torch.load(self.args.validate_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
print(self.args)
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
valid_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'valid', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=False)
trainer = build_trainer(self.args, self.device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
<|reserved_special_token_0|>
def test(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'test', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=True)
trainer.test(test_iter, step)
def gen_features_vector(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'test', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=True)
trainer.gen_features_vector(test_iter, step)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener,
daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
rank, original_trace = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
rank, original_trace = self.error_queue.get()
msg = (
'\n\n-- Tracebacks above this line can probably\n be ignored --\n\n'
)
msg += original_trace
raise Exception(msg)
class Running(object):
"""Run Model"""
def __init__(self, args, device_id):
"""
:param args: parser.parse_args()
:param device_id: 0 or -1
"""
self.args = args
self.device_id = device_id
self.model_flags = ['hidden_size', 'ff_size', 'heads',
'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']
self.device = 'cpu' if self.args.visible_gpus == '-1' else 'cuda'
logger.info('Device ID %d' % self.device_id)
logger.info(f'Device {self.device}')
torch.manual_seed(self.args.seed)
random.seed(self.args.seed)
if self.device_id >= 0:
torch.cuda.set_device(self.device_id)
init_logger(args.log_file)
def baseline(self, cal_lead=False, cal_oracle=False):
test_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'test', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=True)
trainer = build_trainer(self.args, self.device_id, None, None)
if cal_lead:
trainer.test(test_iter, 0, cal_lead=True)
elif cal_oracle:
trainer.test(test_iter, 0, cal_oracle=True)
def train_iter(self):
return data_loader.DataLoader(self.args, data_loader.load_dataset(
self.args, 'train', shuffle=True), self.args.batch_size, self.
device, shuffle=True, is_test=False)
def train(self):
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=True)
if self.args.train_from:
logger.info(f'Loading checkpoint from {self.args.train_from}')
checkpoint = torch.load(self.args.train_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
model.load_cp(checkpoint)
optimizer = model_builder.build_optim(self.args, model, checkpoint)
else:
optimizer = model_builder.build_optim(self.args, model, None)
logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, optimizer)
trainer.train(self.train_iter, self.args.train_steps)
def validate(self, step):
logger.info(f'Loading checkpoint from {self.args.validate_from}')
checkpoint = torch.load(self.args.validate_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
print(self.args)
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
valid_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'valid', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=False)
trainer = build_trainer(self.args, self.device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def wait_and_validate(self):
time_step = 0
if self.args.test_all:
cp_files = sorted(glob.glob(os.path.join(self.args.model_path,
'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = self.validate(step=step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if i - max_step > 10:
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info(f'PPL {str(xent_lst)}')
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
self.test(step)
else:
while True:
cp_files = sorted(glob.glob(os.path.join(self.args.
model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if cp_files:
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if os.path.getsize(cp) <= 0:
time.sleep(60)
continue
if time_of_cp > time_step:
time_step = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
self.validate(step)
self.test(step)
cp_files = sorted(glob.glob(os.path.join(self.args.
model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if cp_files:
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if time_of_cp > time_step:
continue
else:
time.sleep(300)
def test(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'test', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=True)
trainer.test(test_iter, step)
def gen_features_vector(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'test', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=True)
trainer.gen_features_vector(test_iter, step)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MultiRunning(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener,
daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
rank, original_trace = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
rank, original_trace = self.error_queue.get()
msg = (
'\n\n-- Tracebacks above this line can probably\n be ignored --\n\n'
)
msg += original_trace
raise Exception(msg)
class Running(object):
"""Run Model"""
def __init__(self, args, device_id):
"""
:param args: parser.parse_args()
:param device_id: 0 or -1
"""
self.args = args
self.device_id = device_id
self.model_flags = ['hidden_size', 'ff_size', 'heads',
'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']
self.device = 'cpu' if self.args.visible_gpus == '-1' else 'cuda'
logger.info('Device ID %d' % self.device_id)
logger.info(f'Device {self.device}')
torch.manual_seed(self.args.seed)
random.seed(self.args.seed)
if self.device_id >= 0:
torch.cuda.set_device(self.device_id)
init_logger(args.log_file)
def baseline(self, cal_lead=False, cal_oracle=False):
test_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'test', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=True)
trainer = build_trainer(self.args, self.device_id, None, None)
if cal_lead:
trainer.test(test_iter, 0, cal_lead=True)
elif cal_oracle:
trainer.test(test_iter, 0, cal_oracle=True)
def train_iter(self):
return data_loader.DataLoader(self.args, data_loader.load_dataset(
self.args, 'train', shuffle=True), self.args.batch_size, self.
device, shuffle=True, is_test=False)
def train(self):
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=True)
if self.args.train_from:
logger.info(f'Loading checkpoint from {self.args.train_from}')
checkpoint = torch.load(self.args.train_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
model.load_cp(checkpoint)
optimizer = model_builder.build_optim(self.args, model, checkpoint)
else:
optimizer = model_builder.build_optim(self.args, model, None)
logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, optimizer)
trainer.train(self.train_iter, self.args.train_steps)
def validate(self, step):
logger.info(f'Loading checkpoint from {self.args.validate_from}')
checkpoint = torch.load(self.args.validate_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
print(self.args)
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
valid_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'valid', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=False)
trainer = build_trainer(self.args, self.device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def wait_and_validate(self):
time_step = 0
if self.args.test_all:
cp_files = sorted(glob.glob(os.path.join(self.args.model_path,
'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = self.validate(step=step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if i - max_step > 10:
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info(f'PPL {str(xent_lst)}')
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
self.test(step)
else:
while True:
cp_files = sorted(glob.glob(os.path.join(self.args.
model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if cp_files:
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if os.path.getsize(cp) <= 0:
time.sleep(60)
continue
if time_of_cp > time_step:
time_step = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
self.validate(step)
self.test(step)
cp_files = sorted(glob.glob(os.path.join(self.args.
model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if cp_files:
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if time_of_cp > time_step:
continue
else:
time.sleep(300)
def test(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'test', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=True)
trainer.test(test_iter, step)
def gen_features_vector(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'test', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=True)
trainer.gen_features_vector(test_iter, step)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MultiRunning(object):
def __init__(self, args, device_id):
self.args = args
self.device_id = device_id
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener,
daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
rank, original_trace = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
rank, original_trace = self.error_queue.get()
msg = (
'\n\n-- Tracebacks above this line can probably\n be ignored --\n\n'
)
msg += original_trace
raise Exception(msg)
class Running(object):
"""Run Model"""
def __init__(self, args, device_id):
"""
:param args: parser.parse_args()
:param device_id: 0 or -1
"""
self.args = args
self.device_id = device_id
self.model_flags = ['hidden_size', 'ff_size', 'heads',
'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']
self.device = 'cpu' if self.args.visible_gpus == '-1' else 'cuda'
logger.info('Device ID %d' % self.device_id)
logger.info(f'Device {self.device}')
torch.manual_seed(self.args.seed)
random.seed(self.args.seed)
if self.device_id >= 0:
torch.cuda.set_device(self.device_id)
init_logger(args.log_file)
def baseline(self, cal_lead=False, cal_oracle=False):
test_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'test', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=True)
trainer = build_trainer(self.args, self.device_id, None, None)
if cal_lead:
trainer.test(test_iter, 0, cal_lead=True)
elif cal_oracle:
trainer.test(test_iter, 0, cal_oracle=True)
def train_iter(self):
return data_loader.DataLoader(self.args, data_loader.load_dataset(
self.args, 'train', shuffle=True), self.args.batch_size, self.
device, shuffle=True, is_test=False)
def train(self):
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=True)
if self.args.train_from:
logger.info(f'Loading checkpoint from {self.args.train_from}')
checkpoint = torch.load(self.args.train_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
model.load_cp(checkpoint)
optimizer = model_builder.build_optim(self.args, model, checkpoint)
else:
optimizer = model_builder.build_optim(self.args, model, None)
logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, optimizer)
trainer.train(self.train_iter, self.args.train_steps)
def validate(self, step):
logger.info(f'Loading checkpoint from {self.args.validate_from}')
checkpoint = torch.load(self.args.validate_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
print(self.args)
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
valid_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'valid', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=False)
trainer = build_trainer(self.args, self.device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def wait_and_validate(self):
time_step = 0
if self.args.test_all:
cp_files = sorted(glob.glob(os.path.join(self.args.model_path,
'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = self.validate(step=step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if i - max_step > 10:
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info(f'PPL {str(xent_lst)}')
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
self.test(step)
else:
while True:
cp_files = sorted(glob.glob(os.path.join(self.args.
model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if cp_files:
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if os.path.getsize(cp) <= 0:
time.sleep(60)
continue
if time_of_cp > time_step:
time_step = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
self.validate(step)
self.test(step)
cp_files = sorted(glob.glob(os.path.join(self.args.
model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if cp_files:
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if time_of_cp > time_step:
continue
else:
time.sleep(300)
def test(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'test', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=True)
trainer.test(test_iter, step)
def gen_features_vector(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda
storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device,
load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.
load_dataset(self.args, 'test', shuffle=False), self.args.
batch_size, self.device, shuffle=False, is_test=True)
trainer.gen_features_vector(test_iter, step)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import os
import time
import glob
import torch
import random
import signal
import argparse
from models.trainer import build_trainer
from models import data_loader, model_builder
from models.pytorch_pretrained_bert.modeling import BertConfig
from utils import distributed
from utils.logging import logger, init_logger
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class MultiRunning(object):
def __init__(self, args, device_id):
self.args = args
self.device_id = device_id
def multi_card_run(self):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = self.args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
process = []
for i in range(nb_gpu):
self.device_id = i
process.append(mp.Process(target=self.multi_card_train, args=(self.args, self.device_id, error_queue),
daemon=True))
process[i].start()
logger.info(" Starting process pid: %d " % process[i].pid)
error_handler.add_child(process[i].pid)
for p in process:
p.join()
def multi_card_train(self, error_queue):
""" run process """
setattr(self.args, 'gpu_ranks', [int(i) for i in self.args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(self.device_id, self.args.world_size, self.args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != self.args.gpu_ranks[self.device_id]:
raise AssertionError("An error occurred in Distributed initialization")
runner = Running(self.args, self.device_id)
runner.train()
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((self.args.gpu_ranks[self.device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
class Running(object):
"""Run Model"""
def __init__(self, args, device_id):
"""
:param args: parser.parse_args()
:param device_id: 0 or -1
"""
self.args = args
self.device_id = device_id
self.model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval',
'rnn_size']
self.device = "cpu" if self.args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % self.device_id)
logger.info(f'Device {self.device}')
torch.manual_seed(self.args.seed)
random.seed(self.args.seed)
if self.device_id >= 0:
torch.cuda.set_device(self.device_id)
init_logger(args.log_file)
def baseline(self, cal_lead=False, cal_oracle=False):
test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=True)
trainer = build_trainer(self.args, self.device_id, None, None)
if cal_lead:
trainer.test(test_iter, 0, cal_lead=True)
elif cal_oracle:
trainer.test(test_iter, 0, cal_oracle=True)
def train_iter(self):
return data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'train', shuffle=True),
self.args.batch_size, self.device, shuffle=True, is_test=False)
def train(self):
model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=True)
if self.args.train_from:
logger.info(f'Loading checkpoint from {self.args.train_from}')
checkpoint = torch.load(self.args.train_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
model.load_cp(checkpoint)
optimizer = model_builder.build_optim(self.args, model, checkpoint)
else:
optimizer = model_builder.build_optim(self.args, model, None)
logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, optimizer)
trainer.train(self.train_iter, self.args.train_steps)
def validate(self, step):
logger.info(f'Loading checkpoint from {self.args.validate_from}')
checkpoint = torch.load(self.args.validate_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
print(self.args)
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
valid_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'valid', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=False)
trainer = build_trainer(self.args, self.device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def wait_and_validate(self):
time_step = 0
if self.args.test_all:
cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = self.validate(step=step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if i - max_step > 10:
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info(f'PPL {str(xent_lst)}')
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
self.test(step)
else:
while True:
cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if cp_files:
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if os.path.getsize(cp) <= 0:
time.sleep(60)
continue
if time_of_cp > time_step:
time_step = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
self.validate(step)
self.test(step)
cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if cp_files:
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if time_of_cp > time_step:
continue
else:
time.sleep(300)
def test(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
# logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=True)
trainer.test(test_iter, step)
def gen_features_vector(self, step=None):
if not step:
try:
step = int(self.args.test_from.split('.')[-2].split('_')[-1])
except IndexError:
step = 0
logger.info(f'Loading checkpoint from {self.args.test_from}')
checkpoint = torch.load(self.args.test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt:
if k in self.model_flags:
setattr(self.args, k, opt[k])
config = BertConfig.from_json_file(self.args.bert_config_path)
model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)
model.load_cp(checkpoint)
model.eval()
# logger.info(model)
trainer = build_trainer(self.args, self.device_id, model, None)
test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),
self.args.batch_size, self.device, shuffle=False, is_test=True)
trainer.gen_features_vector(test_iter, step)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='transformer', type=str,
choices=['classifier', 'transformer', 'rnn', 'baseline'])
parser.add_argument("-mode", default='train', type=str, choices=['train', 'validate', 'test', 'vector'])
parser.add_argument("-data_name", default='chinese_summary', help='vy_text')
parser.add_argument("-bert_data_path", default='./data/bert_data/', help='./data/bert_data/')
parser.add_argument("-model_path", default='./models/models_check_points/')
parser.add_argument("-result_path", default='./results/')
parser.add_argument("-temp_dir", default='./temp/')
parser.add_argument("-bert_pretrained_model_path", default='./models/pytorch_pretrained_bert/bert_pretrain/')
parser.add_argument("-bert_config_path", default='./models/pytorch_pretrained_bert/bert_pretrain/bert_config.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=2048, type=int)
parser.add_argument("-heads", default=8, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optimizer", default='adam', type=str)
parser.add_argument("-lr", default=2e-3, type=float, help='learning rate')
parser.add_argument("-beta1", default=0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='noam', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5000, type=int)
parser.add_argument("-accum_count", default=2, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=50, type=int)
parser.add_argument("-train_steps", default=50000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument('-visible_gpus', default='0', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='./logs/project.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-test_from", default='./models/models_check_points/model_step_50000.pt')
parser.add_argument("-train_from", default='', help='./models/models_check_points/model_step_45000.pt')
parser.add_argument("-validate_from", default='../models/models_check_points/model_step_50000.pt')
parser.add_argument("-report_rouge", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-shuffle_data", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-vy_predict", type=str2bool, nargs='?', const=False, default=True)
_args = parser.parse_args()
gpu_ranks: str = str(_args.gpu_ranks)
_args.gpu_ranks = [int(i) for i in gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = _args.visible_gpus
init_logger(_args.log_file)
_device = "cpu" if _args.visible_gpus == '-1' else "cuda"
_device_id = 0 if _device == "cuda" else -1
runner = Running(args=_args, device_id=_device_id)
multi_runner = MultiRunning(args=_args, device_id=_device_id)
if _args.world_size > 1:
multi_runner.multi_card_run()
elif _args.mode == 'train':
runner.train()
elif _args.mode == 'validate':
runner.wait_and_validate()
elif _args.mode == 'test':
runner.test()
elif _args.mode == 'lead':
runner.baseline(cal_lead=True)
elif _args.mode == 'oracle':
runner.baseline(cal_oracle=True)
elif _args.mode == 'vector':
runner.gen_features_vector()
|
flexible
|
{
"blob_id": "3adb50a6375a73f786369dd22712a657b66f758e",
"index": 8432,
"step-1": "<mask token>\n\n\nclass Running(object):\n <mask token>\n\n def __init__(self, args, device_id):\n \"\"\"\n :param args: parser.parse_args()\n :param device_id: 0 or -1\n \"\"\"\n self.args = args\n self.device_id = device_id\n self.model_flags = ['hidden_size', 'ff_size', 'heads',\n 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']\n self.device = 'cpu' if self.args.visible_gpus == '-1' else 'cuda'\n logger.info('Device ID %d' % self.device_id)\n logger.info(f'Device {self.device}')\n torch.manual_seed(self.args.seed)\n random.seed(self.args.seed)\n if self.device_id >= 0:\n torch.cuda.set_device(self.device_id)\n init_logger(args.log_file)\n\n def baseline(self, cal_lead=False, cal_oracle=False):\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer = build_trainer(self.args, self.device_id, None, None)\n if cal_lead:\n trainer.test(test_iter, 0, cal_lead=True)\n elif cal_oracle:\n trainer.test(test_iter, 0, cal_oracle=True)\n <mask token>\n\n def train(self):\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=True)\n if self.args.train_from:\n logger.info(f'Loading checkpoint from {self.args.train_from}')\n checkpoint = torch.load(self.args.train_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n model.load_cp(checkpoint)\n optimizer = model_builder.build_optim(self.args, model, checkpoint)\n else:\n optimizer = model_builder.build_optim(self.args, model, None)\n logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, optimizer)\n trainer.train(self.train_iter, self.args.train_steps)\n\n def validate(self, step):\n logger.info(f'Loading checkpoint from {self.args.validate_from}')\n checkpoint = torch.load(self.args.validate_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n print(self.args)\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n valid_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'valid', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=False)\n trainer = build_trainer(self.args, self.device_id, model, None)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n <mask token>\n\n def test(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.test(test_iter, step)\n\n def gen_features_vector(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.gen_features_vector(test_iter, step)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(target=self.error_listener,\n daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n rank, original_trace = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT)\n rank, original_trace = self.error_queue.get()\n msg = (\n '\\n\\n-- Tracebacks above this line can probably\\n be ignored --\\n\\n'\n )\n msg += original_trace\n raise Exception(msg)\n\n\nclass Running(object):\n \"\"\"Run Model\"\"\"\n\n def __init__(self, args, device_id):\n \"\"\"\n :param args: parser.parse_args()\n :param device_id: 0 or -1\n \"\"\"\n self.args = args\n self.device_id = device_id\n self.model_flags = ['hidden_size', 'ff_size', 'heads',\n 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']\n self.device = 'cpu' if self.args.visible_gpus == '-1' else 'cuda'\n logger.info('Device ID %d' % self.device_id)\n logger.info(f'Device {self.device}')\n torch.manual_seed(self.args.seed)\n random.seed(self.args.seed)\n if self.device_id >= 0:\n torch.cuda.set_device(self.device_id)\n init_logger(args.log_file)\n\n def baseline(self, cal_lead=False, cal_oracle=False):\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer = build_trainer(self.args, self.device_id, None, None)\n if cal_lead:\n trainer.test(test_iter, 0, cal_lead=True)\n elif cal_oracle:\n trainer.test(test_iter, 0, cal_oracle=True)\n\n def train_iter(self):\n return data_loader.DataLoader(self.args, data_loader.load_dataset(\n self.args, 'train', shuffle=True), self.args.batch_size, self.\n device, shuffle=True, is_test=False)\n\n def train(self):\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=True)\n if self.args.train_from:\n logger.info(f'Loading checkpoint from {self.args.train_from}')\n checkpoint = torch.load(self.args.train_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n model.load_cp(checkpoint)\n optimizer = model_builder.build_optim(self.args, model, checkpoint)\n else:\n optimizer = model_builder.build_optim(self.args, model, None)\n logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, optimizer)\n trainer.train(self.train_iter, self.args.train_steps)\n\n def validate(self, step):\n logger.info(f'Loading checkpoint from {self.args.validate_from}')\n checkpoint = torch.load(self.args.validate_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n print(self.args)\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n valid_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'valid', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=False)\n trainer = build_trainer(self.args, self.device_id, model, None)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n\n def wait_and_validate(self):\n time_step = 0\n if self.args.test_all:\n cp_files = sorted(glob.glob(os.path.join(self.args.model_path,\n 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n xent_lst = []\n for i, cp in enumerate(cp_files):\n step = int(cp.split('.')[-2].split('_')[-1])\n xent = self.validate(step=step)\n xent_lst.append((xent, cp))\n max_step = xent_lst.index(min(xent_lst))\n if i - max_step > 10:\n break\n xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]\n logger.info(f'PPL {str(xent_lst)}')\n for xent, cp in xent_lst:\n step = int(cp.split('.')[-2].split('_')[-1])\n self.test(step)\n else:\n while True:\n cp_files = sorted(glob.glob(os.path.join(self.args.\n model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if os.path.getsize(cp) <= 0:\n time.sleep(60)\n continue\n if time_of_cp > time_step:\n time_step = time_of_cp\n step = int(cp.split('.')[-2].split('_')[-1])\n self.validate(step)\n self.test(step)\n cp_files = sorted(glob.glob(os.path.join(self.args.\n model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if time_of_cp > time_step:\n continue\n else:\n time.sleep(300)\n\n def test(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.test(test_iter, step)\n\n def gen_features_vector(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.gen_features_vector(test_iter, step)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MultiRunning(object):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(target=self.error_listener,\n daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n rank, original_trace = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT)\n rank, original_trace = self.error_queue.get()\n msg = (\n '\\n\\n-- Tracebacks above this line can probably\\n be ignored --\\n\\n'\n )\n msg += original_trace\n raise Exception(msg)\n\n\nclass Running(object):\n \"\"\"Run Model\"\"\"\n\n def __init__(self, args, device_id):\n \"\"\"\n :param args: parser.parse_args()\n :param device_id: 0 or -1\n \"\"\"\n self.args = args\n self.device_id = device_id\n self.model_flags = ['hidden_size', 'ff_size', 'heads',\n 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']\n self.device = 'cpu' if self.args.visible_gpus == '-1' else 'cuda'\n logger.info('Device ID %d' % self.device_id)\n logger.info(f'Device {self.device}')\n torch.manual_seed(self.args.seed)\n random.seed(self.args.seed)\n if self.device_id >= 0:\n torch.cuda.set_device(self.device_id)\n init_logger(args.log_file)\n\n def baseline(self, cal_lead=False, cal_oracle=False):\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer = build_trainer(self.args, self.device_id, None, None)\n if cal_lead:\n trainer.test(test_iter, 0, cal_lead=True)\n elif cal_oracle:\n trainer.test(test_iter, 0, cal_oracle=True)\n\n def train_iter(self):\n return data_loader.DataLoader(self.args, data_loader.load_dataset(\n self.args, 'train', shuffle=True), self.args.batch_size, self.\n device, shuffle=True, is_test=False)\n\n def train(self):\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=True)\n if self.args.train_from:\n logger.info(f'Loading checkpoint from {self.args.train_from}')\n checkpoint = torch.load(self.args.train_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n model.load_cp(checkpoint)\n optimizer = model_builder.build_optim(self.args, model, checkpoint)\n else:\n optimizer = model_builder.build_optim(self.args, model, None)\n logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, optimizer)\n trainer.train(self.train_iter, self.args.train_steps)\n\n def validate(self, step):\n logger.info(f'Loading checkpoint from {self.args.validate_from}')\n checkpoint = torch.load(self.args.validate_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n print(self.args)\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n valid_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'valid', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=False)\n trainer = build_trainer(self.args, self.device_id, model, None)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n\n def wait_and_validate(self):\n time_step = 0\n if self.args.test_all:\n cp_files = sorted(glob.glob(os.path.join(self.args.model_path,\n 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n xent_lst = []\n for i, cp in enumerate(cp_files):\n step = int(cp.split('.')[-2].split('_')[-1])\n xent = self.validate(step=step)\n xent_lst.append((xent, cp))\n max_step = xent_lst.index(min(xent_lst))\n if i - max_step > 10:\n break\n xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]\n logger.info(f'PPL {str(xent_lst)}')\n for xent, cp in xent_lst:\n step = int(cp.split('.')[-2].split('_')[-1])\n self.test(step)\n else:\n while True:\n cp_files = sorted(glob.glob(os.path.join(self.args.\n model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if os.path.getsize(cp) <= 0:\n time.sleep(60)\n continue\n if time_of_cp > time_step:\n time_step = time_of_cp\n step = int(cp.split('.')[-2].split('_')[-1])\n self.validate(step)\n self.test(step)\n cp_files = sorted(glob.glob(os.path.join(self.args.\n model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if time_of_cp > time_step:\n continue\n else:\n time.sleep(300)\n\n def test(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.test(test_iter, step)\n\n def gen_features_vector(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.gen_features_vector(test_iter, step)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass MultiRunning(object):\n\n def __init__(self, args, device_id):\n self.args = args\n self.device_id = device_id\n <mask token>\n <mask token>\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(target=self.error_listener,\n daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n rank, original_trace = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT)\n rank, original_trace = self.error_queue.get()\n msg = (\n '\\n\\n-- Tracebacks above this line can probably\\n be ignored --\\n\\n'\n )\n msg += original_trace\n raise Exception(msg)\n\n\nclass Running(object):\n \"\"\"Run Model\"\"\"\n\n def __init__(self, args, device_id):\n \"\"\"\n :param args: parser.parse_args()\n :param device_id: 0 or -1\n \"\"\"\n self.args = args\n self.device_id = device_id\n self.model_flags = ['hidden_size', 'ff_size', 'heads',\n 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']\n self.device = 'cpu' if self.args.visible_gpus == '-1' else 'cuda'\n logger.info('Device ID %d' % self.device_id)\n logger.info(f'Device {self.device}')\n torch.manual_seed(self.args.seed)\n random.seed(self.args.seed)\n if self.device_id >= 0:\n torch.cuda.set_device(self.device_id)\n init_logger(args.log_file)\n\n def baseline(self, cal_lead=False, cal_oracle=False):\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer = build_trainer(self.args, self.device_id, None, None)\n if cal_lead:\n trainer.test(test_iter, 0, cal_lead=True)\n elif cal_oracle:\n trainer.test(test_iter, 0, cal_oracle=True)\n\n def train_iter(self):\n return data_loader.DataLoader(self.args, data_loader.load_dataset(\n self.args, 'train', shuffle=True), self.args.batch_size, self.\n device, shuffle=True, is_test=False)\n\n def train(self):\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=True)\n if self.args.train_from:\n logger.info(f'Loading checkpoint from {self.args.train_from}')\n checkpoint = torch.load(self.args.train_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n model.load_cp(checkpoint)\n optimizer = model_builder.build_optim(self.args, model, checkpoint)\n else:\n optimizer = model_builder.build_optim(self.args, model, None)\n logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, optimizer)\n trainer.train(self.train_iter, self.args.train_steps)\n\n def validate(self, step):\n logger.info(f'Loading checkpoint from {self.args.validate_from}')\n checkpoint = torch.load(self.args.validate_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n print(self.args)\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n valid_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'valid', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=False)\n trainer = build_trainer(self.args, self.device_id, model, None)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n\n def wait_and_validate(self):\n time_step = 0\n if self.args.test_all:\n cp_files = sorted(glob.glob(os.path.join(self.args.model_path,\n 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n xent_lst = []\n for i, cp in enumerate(cp_files):\n step = int(cp.split('.')[-2].split('_')[-1])\n xent = self.validate(step=step)\n xent_lst.append((xent, cp))\n max_step = xent_lst.index(min(xent_lst))\n if i - max_step > 10:\n break\n xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]\n logger.info(f'PPL {str(xent_lst)}')\n for xent, cp in xent_lst:\n step = int(cp.split('.')[-2].split('_')[-1])\n self.test(step)\n else:\n while True:\n cp_files = sorted(glob.glob(os.path.join(self.args.\n model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if os.path.getsize(cp) <= 0:\n time.sleep(60)\n continue\n if time_of_cp > time_step:\n time_step = time_of_cp\n step = int(cp.split('.')[-2].split('_')[-1])\n self.validate(step)\n self.test(step)\n cp_files = sorted(glob.glob(os.path.join(self.args.\n model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if time_of_cp > time_step:\n continue\n else:\n time.sleep(300)\n\n def test(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.test(test_iter, step)\n\n def gen_features_vector(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda\n storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device,\n load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.\n load_dataset(self.args, 'test', shuffle=False), self.args.\n batch_size, self.device, shuffle=False, is_test=True)\n trainer.gen_features_vector(test_iter, step)\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\n\"\"\"\n Main training workflow\n\"\"\"\nfrom __future__ import division\n\nimport os\nimport time\nimport glob\nimport torch\nimport random\nimport signal\nimport argparse\n\nfrom models.trainer import build_trainer\nfrom models import data_loader, model_builder\nfrom models.pytorch_pretrained_bert.modeling import BertConfig\n\nfrom utils import distributed\nfrom utils.logging import logger, init_logger\n\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\nclass MultiRunning(object):\n def __init__(self, args, device_id):\n self.args = args\n self.device_id = device_id\n\n def multi_card_run(self):\n \"\"\" Spawns 1 process per GPU \"\"\"\n init_logger()\n\n nb_gpu = self.args.world_size\n mp = torch.multiprocessing.get_context('spawn')\n\n # Create a thread to listen for errors in the child processes.\n error_queue = mp.SimpleQueue()\n error_handler = ErrorHandler(error_queue)\n\n # Train with multiprocessing.\n process = []\n for i in range(nb_gpu):\n self.device_id = i\n process.append(mp.Process(target=self.multi_card_train, args=(self.args, self.device_id, error_queue),\n daemon=True))\n process[i].start()\n logger.info(\" Starting process pid: %d \" % process[i].pid)\n error_handler.add_child(process[i].pid)\n for p in process:\n p.join()\n\n def multi_card_train(self, error_queue):\n \"\"\" run process \"\"\"\n setattr(self.args, 'gpu_ranks', [int(i) for i in self.args.gpu_ranks])\n\n try:\n gpu_rank = distributed.multi_init(self.device_id, self.args.world_size, self.args.gpu_ranks)\n print('gpu_rank %d' % gpu_rank)\n if gpu_rank != self.args.gpu_ranks[self.device_id]:\n raise AssertionError(\"An error occurred in Distributed initialization\")\n runner = Running(self.args, self.device_id)\n runner.train()\n except KeyboardInterrupt:\n pass # killed by parent, do nothing\n except Exception:\n # propagate exception to parent process, keeping original traceback\n import traceback\n error_queue.put((self.args.gpu_ranks[self.device_id], traceback.format_exc()))\n\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably\n be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)\n\n\nclass Running(object):\n \"\"\"Run Model\"\"\"\n\n def __init__(self, args, device_id):\n \"\"\"\n :param args: parser.parse_args()\n :param device_id: 0 or -1\n \"\"\"\n self.args = args\n self.device_id = device_id\n self.model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval',\n 'rnn_size']\n\n self.device = \"cpu\" if self.args.visible_gpus == '-1' else \"cuda\"\n logger.info('Device ID %d' % self.device_id)\n logger.info(f'Device {self.device}')\n torch.manual_seed(self.args.seed)\n random.seed(self.args.seed)\n\n if self.device_id >= 0:\n torch.cuda.set_device(self.device_id)\n\n init_logger(args.log_file)\n\n def baseline(self, cal_lead=False, cal_oracle=False):\n test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),\n self.args.batch_size, self.device, shuffle=False, is_test=True)\n\n trainer = build_trainer(self.args, self.device_id, None, None)\n\n if cal_lead:\n trainer.test(test_iter, 0, cal_lead=True)\n elif cal_oracle:\n trainer.test(test_iter, 0, cal_oracle=True)\n\n def train_iter(self):\n return data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'train', shuffle=True),\n self.args.batch_size, self.device, shuffle=True, is_test=False)\n\n def train(self):\n model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=True)\n\n if self.args.train_from:\n logger.info(f'Loading checkpoint from {self.args.train_from}')\n checkpoint = torch.load(self.args.train_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n model.load_cp(checkpoint)\n optimizer = model_builder.build_optim(self.args, model, checkpoint)\n else:\n optimizer = model_builder.build_optim(self.args, model, None)\n\n logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, optimizer)\n trainer.train(self.train_iter, self.args.train_steps)\n\n def validate(self, step):\n\n logger.info(f'Loading checkpoint from {self.args.validate_from}')\n checkpoint = torch.load(self.args.validate_from, map_location=lambda storage, loc: storage)\n\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n print(self.args)\n\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n\n valid_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'valid', shuffle=False),\n self.args.batch_size, self.device, shuffle=False, is_test=False)\n trainer = build_trainer(self.args, self.device_id, model, None)\n stats = trainer.validate(valid_iter, step)\n return stats.xent()\n\n def wait_and_validate(self):\n time_step = 0\n if self.args.test_all:\n cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n xent_lst = []\n for i, cp in enumerate(cp_files):\n step = int(cp.split('.')[-2].split('_')[-1])\n xent = self.validate(step=step)\n xent_lst.append((xent, cp))\n max_step = xent_lst.index(min(xent_lst))\n if i - max_step > 10:\n break\n xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]\n logger.info(f'PPL {str(xent_lst)}')\n for xent, cp in xent_lst:\n step = int(cp.split('.')[-2].split('_')[-1])\n self.test(step)\n else:\n while True:\n cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if os.path.getsize(cp) <= 0:\n time.sleep(60)\n continue\n if time_of_cp > time_step:\n time_step = time_of_cp\n step = int(cp.split('.')[-2].split('_')[-1])\n self.validate(step)\n self.test(step)\n\n cp_files = sorted(glob.glob(os.path.join(self.args.model_path, 'model_step_*.pt')))\n cp_files.sort(key=os.path.getmtime)\n if cp_files:\n cp = cp_files[-1]\n time_of_cp = os.path.getmtime(cp)\n if time_of_cp > time_step:\n continue\n else:\n time.sleep(300)\n\n def test(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n # logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),\n self.args.batch_size, self.device, shuffle=False, is_test=True)\n trainer.test(test_iter, step)\n\n def gen_features_vector(self, step=None):\n if not step:\n try:\n step = int(self.args.test_from.split('.')[-2].split('_')[-1])\n except IndexError:\n step = 0\n\n logger.info(f'Loading checkpoint from {self.args.test_from}')\n checkpoint = torch.load(self.args.test_from, map_location=lambda storage, loc: storage)\n opt = vars(checkpoint['opt'])\n for k in opt:\n if k in self.model_flags:\n setattr(self.args, k, opt[k])\n\n config = BertConfig.from_json_file(self.args.bert_config_path)\n model = model_builder.Summarizer(self.args, self.device, load_pretrained_bert=False, bert_config=config)\n model.load_cp(checkpoint)\n model.eval()\n # logger.info(model)\n trainer = build_trainer(self.args, self.device_id, model, None)\n test_iter = data_loader.DataLoader(self.args, data_loader.load_dataset(self.args, 'test', shuffle=False),\n self.args.batch_size, self.device, shuffle=False, is_test=True)\n trainer.gen_features_vector(test_iter, step)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-encoder\", default='transformer', type=str,\n choices=['classifier', 'transformer', 'rnn', 'baseline'])\n parser.add_argument(\"-mode\", default='train', type=str, choices=['train', 'validate', 'test', 'vector'])\n parser.add_argument(\"-data_name\", default='chinese_summary', help='vy_text')\n parser.add_argument(\"-bert_data_path\", default='./data/bert_data/', help='./data/bert_data/')\n parser.add_argument(\"-model_path\", default='./models/models_check_points/')\n parser.add_argument(\"-result_path\", default='./results/')\n parser.add_argument(\"-temp_dir\", default='./temp/')\n parser.add_argument(\"-bert_pretrained_model_path\", default='./models/pytorch_pretrained_bert/bert_pretrain/')\n parser.add_argument(\"-bert_config_path\", default='./models/pytorch_pretrained_bert/bert_pretrain/bert_config.json')\n\n parser.add_argument(\"-batch_size\", default=1000, type=int)\n\n parser.add_argument(\"-use_interval\", type=str2bool, nargs='?', const=True, default=True)\n parser.add_argument(\"-hidden_size\", default=128, type=int)\n parser.add_argument(\"-ff_size\", default=2048, type=int)\n parser.add_argument(\"-heads\", default=8, type=int)\n parser.add_argument(\"-inter_layers\", default=2, type=int)\n parser.add_argument(\"-rnn_size\", default=512, type=int)\n\n parser.add_argument(\"-param_init\", default=0, type=float)\n parser.add_argument(\"-param_init_glorot\", type=str2bool, nargs='?', const=True, default=True)\n parser.add_argument(\"-dropout\", default=0.1, type=float)\n parser.add_argument(\"-optimizer\", default='adam', type=str)\n parser.add_argument(\"-lr\", default=2e-3, type=float, help='learning rate')\n parser.add_argument(\"-beta1\", default=0.9, type=float)\n parser.add_argument(\"-beta2\", default=0.999, type=float)\n parser.add_argument(\"-decay_method\", default='noam', type=str)\n parser.add_argument(\"-warmup_steps\", default=8000, type=int)\n parser.add_argument(\"-max_grad_norm\", default=0, type=float)\n\n parser.add_argument(\"-save_checkpoint_steps\", default=5000, type=int)\n parser.add_argument(\"-accum_count\", default=2, type=int)\n parser.add_argument(\"-world_size\", default=1, type=int)\n parser.add_argument(\"-report_every\", default=50, type=int)\n parser.add_argument(\"-train_steps\", default=50000, type=int)\n parser.add_argument(\"-recall_eval\", type=str2bool, nargs='?', const=True, default=False)\n\n parser.add_argument('-visible_gpus', default='0', type=str)\n parser.add_argument('-gpu_ranks', default='0', type=str)\n parser.add_argument('-log_file', default='./logs/project.log')\n parser.add_argument('-dataset', default='')\n parser.add_argument('-seed', default=666, type=int)\n\n parser.add_argument(\"-test_all\", type=str2bool, nargs='?', const=True, default=False)\n parser.add_argument(\"-test_from\", default='./models/models_check_points/model_step_50000.pt')\n parser.add_argument(\"-train_from\", default='', help='./models/models_check_points/model_step_45000.pt')\n parser.add_argument(\"-validate_from\", default='../models/models_check_points/model_step_50000.pt')\n parser.add_argument(\"-report_rouge\", type=str2bool, nargs='?', const=True, default=False)\n parser.add_argument(\"-block_trigram\", type=str2bool, nargs='?', const=True, default=True)\n\n parser.add_argument(\"-shuffle_data\", type=str2bool, nargs='?', const=True, default=False)\n parser.add_argument(\"-vy_predict\", type=str2bool, nargs='?', const=False, default=True)\n\n _args = parser.parse_args()\n\n gpu_ranks: str = str(_args.gpu_ranks)\n _args.gpu_ranks = [int(i) for i in gpu_ranks.split(',')]\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = _args.visible_gpus\n\n init_logger(_args.log_file)\n _device = \"cpu\" if _args.visible_gpus == '-1' else \"cuda\"\n _device_id = 0 if _device == \"cuda\" else -1\n\n runner = Running(args=_args, device_id=_device_id)\n multi_runner = MultiRunning(args=_args, device_id=_device_id)\n if _args.world_size > 1:\n multi_runner.multi_card_run()\n elif _args.mode == 'train':\n runner.train()\n elif _args.mode == 'validate':\n runner.wait_and_validate()\n elif _args.mode == 'test':\n runner.test()\n elif _args.mode == 'lead':\n runner.baseline(cal_lead=True)\n elif _args.mode == 'oracle':\n runner.baseline(cal_oracle=True)\n elif _args.mode == 'vector':\n runner.gen_features_vector()\n",
"step-ids": [
7,
16,
17,
18,
24
]
}
|
[
7,
16,
17,
18,
24
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Beach',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='SelectedBeach',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('json_beach', models.ForeignKey(related_name='json', blank=True, to='testapp.Beach', null=True)),
('rest_framework_beach', models.ForeignKey(related_name='rest', blank=True, to='testapp.Beach', null=True)),
('tastypie_beach_contains', models.ForeignKey(related_name='tp_contains', blank=True, to='testapp.Beach', null=True)),
('tastypie_beach_starts', models.ForeignKey(related_name='tp_starts', blank=True, to='testapp.Beach', null=True)),
],
),
]
|
normal
|
{
"blob_id": "9555e5f75e3045afff6da9228764fca542caf539",
"index": 2448,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Beach', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('name', models.CharField(max_length=128)\n )]), migrations.CreateModel(name='SelectedBeach', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('json_beach', models.ForeignKey(\n related_name='json', blank=True, to='testapp.Beach', null=True)), (\n 'rest_framework_beach', models.ForeignKey(related_name='rest',\n blank=True, to='testapp.Beach', null=True)), (\n 'tastypie_beach_contains', models.ForeignKey(related_name=\n 'tp_contains', blank=True, to='testapp.Beach', null=True)), (\n 'tastypie_beach_starts', models.ForeignKey(related_name='tp_starts',\n blank=True, to='testapp.Beach', null=True))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Beach', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('name', models.CharField(max_length=128)\n )]), migrations.CreateModel(name='SelectedBeach', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('json_beach', models.ForeignKey(\n related_name='json', blank=True, to='testapp.Beach', null=True)), (\n 'rest_framework_beach', models.ForeignKey(related_name='rest',\n blank=True, to='testapp.Beach', null=True)), (\n 'tastypie_beach_contains', models.ForeignKey(related_name=\n 'tp_contains', blank=True, to='testapp.Beach', null=True)), (\n 'tastypie_beach_starts', models.ForeignKey(related_name='tp_starts',\n blank=True, to='testapp.Beach', null=True))])]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Beach',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=128)),\n ],\n ),\n migrations.CreateModel(\n name='SelectedBeach',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('json_beach', models.ForeignKey(related_name='json', blank=True, to='testapp.Beach', null=True)),\n ('rest_framework_beach', models.ForeignKey(related_name='rest', blank=True, to='testapp.Beach', null=True)),\n ('tastypie_beach_contains', models.ForeignKey(related_name='tp_contains', blank=True, to='testapp.Beach', null=True)),\n ('tastypie_beach_starts', models.ForeignKey(related_name='tp_starts', blank=True, to='testapp.Beach', null=True)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import numpy as np
import random
import copy
class Node(object):
'''
Defines a Node Class for storing characteristics and CPT of each node
'''
def __init__(self,name):
self.parents = []
self.children = []
self.name = name
self.cpt=[]
self.limit = 3
def addParent(self,x):
self.parents.append(x)
def addChild(self,x):
self.children.append(x)
def createCPT(self,data):
cpt = computeProb(data,self.limit,self.parents,self.name)
self.cpt = cpt
def computeProb(data,limit,cols,target):
numCol = len(cols)
if numCol==0:
return(cpt_0(data,limit,cols,target))
elif numCol ==1:
return(cpt_1(data,limit,cols,target))
elif numCol ==2:
return(cpt_2(data,limit,cols,target))
elif numCol ==3:
return(cpt_3(data,limit,cols,target))
else:
return(cpt_4(data,limit,cols,target))
#Functions for computing the Conditional Probability Tables (CPTs)
def cpt_2(data,limit,cols,target):
cpt = []
alpha = 0.001
for var1 in range(limit):
for var2 in range(limit):
totalN = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) ] )
for targetVar in range(limit):
count = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[target]==targetVar) ] )
if totalN ==0:
cpt.append([var1,var2,targetVar, float(totalN + 3*alpha)])
else:
cpt.append([var1,var2,targetVar, float(count)/float(totalN + 3*alpha)])
cpt = pd.DataFrame(cpt, columns=[cols[0],cols[1],target,'prob'])
return(cpt)
def cpt_1(data,limit,cols,target):
cpt = []
alpha = 0.001
for var1 in range(limit):
totalN = len( data[ (data[cols[0]]==var1)] )
for targetVar in range(limit):
count = len( data[ (data[cols[0]]==var1) & (data[target]==targetVar) ] )
if totalN ==0:
cpt.append([var1,targetVar, float(totalN + 3*alpha)])
else:
cpt.append([var1,targetVar, float(count)/float(totalN + 3*alpha)])
cpt = pd.DataFrame(cpt, columns=[cols[0],target,'prob'])
return(cpt)
def cpt_0(data,limit,cols,target):
alpha = 0.001
cpt = []
totalN = len( data )
for targetVar in range(limit):
count = len( data[ (data[target]==targetVar) ] )
if totalN ==0:
cpt.append([targetVar, alpha/float(totalN + 3*alpha)])
else:
cpt.append([targetVar, float(count)/(totalN + 3*alpha)])
cpt = pd.DataFrame(cpt, columns=[target,'prob'])
return(cpt)
def cpt_3(data,limit,cols,target):
cpt = []
alpha = 0.001
for var1 in range(limit):
for var2 in range(limit):
for var3 in range(limit):
totalN = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[cols[2]]==var3) ] )
for targetVar in range(limit):
count = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[cols[2]]==var3) & (data[target]==targetVar) ] )
if totalN ==0:
cpt.append([var1,var2,var3,targetVar, alpha/float(totalN + 3*alpha)])
else:
cpt.append([var1,var2,var3,targetVar, float(count)/float(totalN + 3*alpha)])
cpt = pd.DataFrame(cpt, columns=[cols[0],cols[1],cols[2],target,'prob'])
return(cpt)
def cpt_4(data,limit,cols,target):
cpt = []
alpha = 0.001
for var1 in range(limit):
for var2 in range(limit):
for var3 in range(limit):
for var4 in range(limit):
totalN = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[cols[2]]==var3) & (data[cols[3]]==var4) ] )
for targetVar in range(limit):
count = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[cols[2]]==var3) & (data[cols[3]]==var4) & (data[target]==targetVar) ] )
if totalN ==0:
cpt.append([var1,var2,var3,var4,targetVar, alpha/float(totalN + 3*alpha)])
else:
cpt.append([var1,var2,var3,var4,targetVar, float(count)/float(totalN + 3*alpha)])
cpt = pd.DataFrame(cpt, columns=[cols[0],cols[1],cols[2],cols[3],target,'prob'])
return(cpt)
structMap = {0:[1,2],1:[1,3],2:[1,4],3:[2,3],4:[2,4],5:[3,4]} # Mapping of the structure position and the nodes that it connects
class BayesNet(object):
def __init__(self,numNodes,structure):
self.structure = structure # Array that defines the structure of the Bayes Net
self.numNodes = numNodes
self.varNodes={}
self.classNode=0
def initGraph(self):
'''
Initializes components of the Bayes Net Graph
'''
self.classNode = Node('Class')
for i in range(self.numNodes):
self.varNodes['x'+str(i+1)]=Node('x'+str(i+1))
self.varNodes['x'+str(i+1)].parents.append('Class')
for i in range(len(self.structure)):
edgeNodes = structMap[i]
firstNode = 'x'+str(edgeNodes[0])
secondNode = 'x'+str(edgeNodes[1])
if self.structure[i]==1:
self.varNodes[firstNode].children.append(secondNode)
self.varNodes[secondNode].parents.append(firstNode)
elif self.structure[i]==-1:
self.varNodes[secondNode].children.append(firstNode)
self.varNodes[firstNode].parents.append(secondNode)
def compCPT(self,data):
'''
Computes Conditional Probability Table for all the nodes
'''
self.classNode.createCPT(data)
for i in range(len(self.varNodes)):
self.varNodes['x'+str(i+1)].createCPT(data)
def predict(self,data):
'''
Predicts most likely class given a single data sample
'''
maxProb = 0
maxProbClass = 0
for classVal in range(3):
dt = data.copy()
dt["Class"] = classVal
prob = 1.0
for i in range(self.numNodes):
#print('Node is x'+str(i+1))
pt=self.varNodes['x'+str(i+1)].cpt
mergeList = self.varNodes['x'+str(i+1)].parents + ['x'+str(i+1)]
cpt_prob = pd.merge(left=pt,right=dt,on=mergeList,how='inner')['prob'][0]
#print("cpt_prob is ",str(cpt_prob))
prob = cpt_prob*prob
#print("Class :%d Prob : %f"%(classVal,prob))
if prob>maxProb:
maxProb = prob
maxProbClass = classVal
return(maxProbClass)
|
normal
|
{
"blob_id": "eb4bc008b7e68f8a6e80e837fa970d77a5ed3547",
"index": 8218,
"step-1": "<mask token>\n\n\nclass Node(object):\n \"\"\"\n Defines a Node Class for storing characteristics and CPT of each node\n \"\"\"\n\n def __init__(self, name):\n self.parents = []\n self.children = []\n self.name = name\n self.cpt = []\n self.limit = 3\n\n def addParent(self, x):\n self.parents.append(x)\n\n def addChild(self, x):\n self.children.append(x)\n\n def createCPT(self, data):\n cpt = computeProb(data, self.limit, self.parents, self.name)\n self.cpt = cpt\n\n\n<mask token>\n\n\ndef cpt_0(data, limit, cols, target):\n alpha = 0.001\n cpt = []\n totalN = len(data)\n for targetVar in range(limit):\n count = len(data[data[target] == targetVar])\n if totalN == 0:\n cpt.append([targetVar, alpha / float(totalN + 3 * alpha)])\n else:\n cpt.append([targetVar, float(count) / (totalN + 3 * alpha)])\n cpt = pd.DataFrame(cpt, columns=[target, 'prob'])\n return cpt\n\n\n<mask token>\n\n\nclass BayesNet(object):\n\n def __init__(self, numNodes, structure):\n self.structure = structure\n self.numNodes = numNodes\n self.varNodes = {}\n self.classNode = 0\n\n def initGraph(self):\n \"\"\"\n Initializes components of the Bayes Net Graph\n \"\"\"\n self.classNode = Node('Class')\n for i in range(self.numNodes):\n self.varNodes['x' + str(i + 1)] = Node('x' + str(i + 1))\n self.varNodes['x' + str(i + 1)].parents.append('Class')\n for i in range(len(self.structure)):\n edgeNodes = structMap[i]\n firstNode = 'x' + str(edgeNodes[0])\n secondNode = 'x' + str(edgeNodes[1])\n if self.structure[i] == 1:\n self.varNodes[firstNode].children.append(secondNode)\n self.varNodes[secondNode].parents.append(firstNode)\n elif self.structure[i] == -1:\n self.varNodes[secondNode].children.append(firstNode)\n self.varNodes[firstNode].parents.append(secondNode)\n\n def compCPT(self, data):\n \"\"\"\n Computes Conditional Probability Table for all the nodes\n \"\"\"\n self.classNode.createCPT(data)\n for i in range(len(self.varNodes)):\n self.varNodes['x' + str(i + 1)].createCPT(data)\n\n def predict(self, data):\n \"\"\"\n Predicts most likely class given a single data sample\n \n \"\"\"\n maxProb = 0\n maxProbClass = 0\n for classVal in range(3):\n dt = data.copy()\n dt['Class'] = classVal\n prob = 1.0\n for i in range(self.numNodes):\n pt = self.varNodes['x' + str(i + 1)].cpt\n mergeList = self.varNodes['x' + str(i + 1)].parents + ['x' +\n str(i + 1)]\n cpt_prob = pd.merge(left=pt, right=dt, on=mergeList, how=\n 'inner')['prob'][0]\n prob = cpt_prob * prob\n if prob > maxProb:\n maxProb = prob\n maxProbClass = classVal\n return maxProbClass\n",
"step-2": "<mask token>\n\n\nclass Node(object):\n \"\"\"\n Defines a Node Class for storing characteristics and CPT of each node\n \"\"\"\n\n def __init__(self, name):\n self.parents = []\n self.children = []\n self.name = name\n self.cpt = []\n self.limit = 3\n\n def addParent(self, x):\n self.parents.append(x)\n\n def addChild(self, x):\n self.children.append(x)\n\n def createCPT(self, data):\n cpt = computeProb(data, self.limit, self.parents, self.name)\n self.cpt = cpt\n\n\n<mask token>\n\n\ndef cpt_1(data, limit, cols, target):\n cpt = []\n alpha = 0.001\n for var1 in range(limit):\n totalN = len(data[data[cols[0]] == var1])\n for targetVar in range(limit):\n count = len(data[(data[cols[0]] == var1) & (data[target] ==\n targetVar)])\n if totalN == 0:\n cpt.append([var1, targetVar, float(totalN + 3 * alpha)])\n else:\n cpt.append([var1, targetVar, float(count) / float(totalN + \n 3 * alpha)])\n cpt = pd.DataFrame(cpt, columns=[cols[0], target, 'prob'])\n return cpt\n\n\ndef cpt_0(data, limit, cols, target):\n alpha = 0.001\n cpt = []\n totalN = len(data)\n for targetVar in range(limit):\n count = len(data[data[target] == targetVar])\n if totalN == 0:\n cpt.append([targetVar, alpha / float(totalN + 3 * alpha)])\n else:\n cpt.append([targetVar, float(count) / (totalN + 3 * alpha)])\n cpt = pd.DataFrame(cpt, columns=[target, 'prob'])\n return cpt\n\n\n<mask token>\n\n\nclass BayesNet(object):\n\n def __init__(self, numNodes, structure):\n self.structure = structure\n self.numNodes = numNodes\n self.varNodes = {}\n self.classNode = 0\n\n def initGraph(self):\n \"\"\"\n Initializes components of the Bayes Net Graph\n \"\"\"\n self.classNode = Node('Class')\n for i in range(self.numNodes):\n self.varNodes['x' + str(i + 1)] = Node('x' + str(i + 1))\n self.varNodes['x' + str(i + 1)].parents.append('Class')\n for i in range(len(self.structure)):\n edgeNodes = structMap[i]\n firstNode = 'x' + str(edgeNodes[0])\n secondNode = 'x' + str(edgeNodes[1])\n if self.structure[i] == 1:\n self.varNodes[firstNode].children.append(secondNode)\n self.varNodes[secondNode].parents.append(firstNode)\n elif self.structure[i] == -1:\n self.varNodes[secondNode].children.append(firstNode)\n self.varNodes[firstNode].parents.append(secondNode)\n\n def compCPT(self, data):\n \"\"\"\n Computes Conditional Probability Table for all the nodes\n \"\"\"\n self.classNode.createCPT(data)\n for i in range(len(self.varNodes)):\n self.varNodes['x' + str(i + 1)].createCPT(data)\n\n def predict(self, data):\n \"\"\"\n Predicts most likely class given a single data sample\n \n \"\"\"\n maxProb = 0\n maxProbClass = 0\n for classVal in range(3):\n dt = data.copy()\n dt['Class'] = classVal\n prob = 1.0\n for i in range(self.numNodes):\n pt = self.varNodes['x' + str(i + 1)].cpt\n mergeList = self.varNodes['x' + str(i + 1)].parents + ['x' +\n str(i + 1)]\n cpt_prob = pd.merge(left=pt, right=dt, on=mergeList, how=\n 'inner')['prob'][0]\n prob = cpt_prob * prob\n if prob > maxProb:\n maxProb = prob\n maxProbClass = classVal\n return maxProbClass\n",
"step-3": "<mask token>\n\n\nclass Node(object):\n \"\"\"\n Defines a Node Class for storing characteristics and CPT of each node\n \"\"\"\n\n def __init__(self, name):\n self.parents = []\n self.children = []\n self.name = name\n self.cpt = []\n self.limit = 3\n\n def addParent(self, x):\n self.parents.append(x)\n\n def addChild(self, x):\n self.children.append(x)\n\n def createCPT(self, data):\n cpt = computeProb(data, self.limit, self.parents, self.name)\n self.cpt = cpt\n\n\n<mask token>\n\n\ndef cpt_2(data, limit, cols, target):\n cpt = []\n alpha = 0.001\n for var1 in range(limit):\n for var2 in range(limit):\n totalN = len(data[(data[cols[0]] == var1) & (data[cols[1]] ==\n var2)])\n for targetVar in range(limit):\n count = len(data[(data[cols[0]] == var1) & (data[cols[1]] ==\n var2) & (data[target] == targetVar)])\n if totalN == 0:\n cpt.append([var1, var2, targetVar, float(totalN + 3 *\n alpha)])\n else:\n cpt.append([var1, var2, targetVar, float(count) / float\n (totalN + 3 * alpha)])\n cpt = pd.DataFrame(cpt, columns=[cols[0], cols[1], target, 'prob'])\n return cpt\n\n\ndef cpt_1(data, limit, cols, target):\n cpt = []\n alpha = 0.001\n for var1 in range(limit):\n totalN = len(data[data[cols[0]] == var1])\n for targetVar in range(limit):\n count = len(data[(data[cols[0]] == var1) & (data[target] ==\n targetVar)])\n if totalN == 0:\n cpt.append([var1, targetVar, float(totalN + 3 * alpha)])\n else:\n cpt.append([var1, targetVar, float(count) / float(totalN + \n 3 * alpha)])\n cpt = pd.DataFrame(cpt, columns=[cols[0], target, 'prob'])\n return cpt\n\n\ndef cpt_0(data, limit, cols, target):\n alpha = 0.001\n cpt = []\n totalN = len(data)\n for targetVar in range(limit):\n count = len(data[data[target] == targetVar])\n if totalN == 0:\n cpt.append([targetVar, alpha / float(totalN + 3 * alpha)])\n else:\n cpt.append([targetVar, float(count) / (totalN + 3 * alpha)])\n cpt = pd.DataFrame(cpt, columns=[target, 'prob'])\n return cpt\n\n\n<mask token>\n\n\ndef cpt_4(data, limit, cols, target):\n cpt = []\n alpha = 0.001\n for var1 in range(limit):\n for var2 in range(limit):\n for var3 in range(limit):\n for var4 in range(limit):\n totalN = len(data[(data[cols[0]] == var1) & (data[cols[\n 1]] == var2) & (data[cols[2]] == var3) & (data[cols\n [3]] == var4)])\n for targetVar in range(limit):\n count = len(data[(data[cols[0]] == var1) & (data[\n cols[1]] == var2) & (data[cols[2]] == var3) & (\n data[cols[3]] == var4) & (data[target] ==\n targetVar)])\n if totalN == 0:\n cpt.append([var1, var2, var3, var4, targetVar, \n alpha / float(totalN + 3 * alpha)])\n else:\n cpt.append([var1, var2, var3, var4, targetVar, \n float(count) / float(totalN + 3 * alpha)])\n cpt = pd.DataFrame(cpt, columns=[cols[0], cols[1], cols[2], cols[3],\n target, 'prob'])\n return cpt\n\n\n<mask token>\n\n\nclass BayesNet(object):\n\n def __init__(self, numNodes, structure):\n self.structure = structure\n self.numNodes = numNodes\n self.varNodes = {}\n self.classNode = 0\n\n def initGraph(self):\n \"\"\"\n Initializes components of the Bayes Net Graph\n \"\"\"\n self.classNode = Node('Class')\n for i in range(self.numNodes):\n self.varNodes['x' + str(i + 1)] = Node('x' + str(i + 1))\n self.varNodes['x' + str(i + 1)].parents.append('Class')\n for i in range(len(self.structure)):\n edgeNodes = structMap[i]\n firstNode = 'x' + str(edgeNodes[0])\n secondNode = 'x' + str(edgeNodes[1])\n if self.structure[i] == 1:\n self.varNodes[firstNode].children.append(secondNode)\n self.varNodes[secondNode].parents.append(firstNode)\n elif self.structure[i] == -1:\n self.varNodes[secondNode].children.append(firstNode)\n self.varNodes[firstNode].parents.append(secondNode)\n\n def compCPT(self, data):\n \"\"\"\n Computes Conditional Probability Table for all the nodes\n \"\"\"\n self.classNode.createCPT(data)\n for i in range(len(self.varNodes)):\n self.varNodes['x' + str(i + 1)].createCPT(data)\n\n def predict(self, data):\n \"\"\"\n Predicts most likely class given a single data sample\n \n \"\"\"\n maxProb = 0\n maxProbClass = 0\n for classVal in range(3):\n dt = data.copy()\n dt['Class'] = classVal\n prob = 1.0\n for i in range(self.numNodes):\n pt = self.varNodes['x' + str(i + 1)].cpt\n mergeList = self.varNodes['x' + str(i + 1)].parents + ['x' +\n str(i + 1)]\n cpt_prob = pd.merge(left=pt, right=dt, on=mergeList, how=\n 'inner')['prob'][0]\n prob = cpt_prob * prob\n if prob > maxProb:\n maxProb = prob\n maxProbClass = classVal\n return maxProbClass\n",
"step-4": "<mask token>\n\n\nclass Node(object):\n \"\"\"\n Defines a Node Class for storing characteristics and CPT of each node\n \"\"\"\n\n def __init__(self, name):\n self.parents = []\n self.children = []\n self.name = name\n self.cpt = []\n self.limit = 3\n\n def addParent(self, x):\n self.parents.append(x)\n\n def addChild(self, x):\n self.children.append(x)\n\n def createCPT(self, data):\n cpt = computeProb(data, self.limit, self.parents, self.name)\n self.cpt = cpt\n\n\ndef computeProb(data, limit, cols, target):\n numCol = len(cols)\n if numCol == 0:\n return cpt_0(data, limit, cols, target)\n elif numCol == 1:\n return cpt_1(data, limit, cols, target)\n elif numCol == 2:\n return cpt_2(data, limit, cols, target)\n elif numCol == 3:\n return cpt_3(data, limit, cols, target)\n else:\n return cpt_4(data, limit, cols, target)\n\n\ndef cpt_2(data, limit, cols, target):\n cpt = []\n alpha = 0.001\n for var1 in range(limit):\n for var2 in range(limit):\n totalN = len(data[(data[cols[0]] == var1) & (data[cols[1]] ==\n var2)])\n for targetVar in range(limit):\n count = len(data[(data[cols[0]] == var1) & (data[cols[1]] ==\n var2) & (data[target] == targetVar)])\n if totalN == 0:\n cpt.append([var1, var2, targetVar, float(totalN + 3 *\n alpha)])\n else:\n cpt.append([var1, var2, targetVar, float(count) / float\n (totalN + 3 * alpha)])\n cpt = pd.DataFrame(cpt, columns=[cols[0], cols[1], target, 'prob'])\n return cpt\n\n\ndef cpt_1(data, limit, cols, target):\n cpt = []\n alpha = 0.001\n for var1 in range(limit):\n totalN = len(data[data[cols[0]] == var1])\n for targetVar in range(limit):\n count = len(data[(data[cols[0]] == var1) & (data[target] ==\n targetVar)])\n if totalN == 0:\n cpt.append([var1, targetVar, float(totalN + 3 * alpha)])\n else:\n cpt.append([var1, targetVar, float(count) / float(totalN + \n 3 * alpha)])\n cpt = pd.DataFrame(cpt, columns=[cols[0], target, 'prob'])\n return cpt\n\n\ndef cpt_0(data, limit, cols, target):\n alpha = 0.001\n cpt = []\n totalN = len(data)\n for targetVar in range(limit):\n count = len(data[data[target] == targetVar])\n if totalN == 0:\n cpt.append([targetVar, alpha / float(totalN + 3 * alpha)])\n else:\n cpt.append([targetVar, float(count) / (totalN + 3 * alpha)])\n cpt = pd.DataFrame(cpt, columns=[target, 'prob'])\n return cpt\n\n\ndef cpt_3(data, limit, cols, target):\n cpt = []\n alpha = 0.001\n for var1 in range(limit):\n for var2 in range(limit):\n for var3 in range(limit):\n totalN = len(data[(data[cols[0]] == var1) & (data[cols[1]] ==\n var2) & (data[cols[2]] == var3)])\n for targetVar in range(limit):\n count = len(data[(data[cols[0]] == var1) & (data[cols[1\n ]] == var2) & (data[cols[2]] == var3) & (data[\n target] == targetVar)])\n if totalN == 0:\n cpt.append([var1, var2, var3, targetVar, alpha /\n float(totalN + 3 * alpha)])\n else:\n cpt.append([var1, var2, var3, targetVar, float(\n count) / float(totalN + 3 * alpha)])\n cpt = pd.DataFrame(cpt, columns=[cols[0], cols[1], cols[2], target, 'prob']\n )\n return cpt\n\n\ndef cpt_4(data, limit, cols, target):\n cpt = []\n alpha = 0.001\n for var1 in range(limit):\n for var2 in range(limit):\n for var3 in range(limit):\n for var4 in range(limit):\n totalN = len(data[(data[cols[0]] == var1) & (data[cols[\n 1]] == var2) & (data[cols[2]] == var3) & (data[cols\n [3]] == var4)])\n for targetVar in range(limit):\n count = len(data[(data[cols[0]] == var1) & (data[\n cols[1]] == var2) & (data[cols[2]] == var3) & (\n data[cols[3]] == var4) & (data[target] ==\n targetVar)])\n if totalN == 0:\n cpt.append([var1, var2, var3, var4, targetVar, \n alpha / float(totalN + 3 * alpha)])\n else:\n cpt.append([var1, var2, var3, var4, targetVar, \n float(count) / float(totalN + 3 * alpha)])\n cpt = pd.DataFrame(cpt, columns=[cols[0], cols[1], cols[2], cols[3],\n target, 'prob'])\n return cpt\n\n\n<mask token>\n\n\nclass BayesNet(object):\n\n def __init__(self, numNodes, structure):\n self.structure = structure\n self.numNodes = numNodes\n self.varNodes = {}\n self.classNode = 0\n\n def initGraph(self):\n \"\"\"\n Initializes components of the Bayes Net Graph\n \"\"\"\n self.classNode = Node('Class')\n for i in range(self.numNodes):\n self.varNodes['x' + str(i + 1)] = Node('x' + str(i + 1))\n self.varNodes['x' + str(i + 1)].parents.append('Class')\n for i in range(len(self.structure)):\n edgeNodes = structMap[i]\n firstNode = 'x' + str(edgeNodes[0])\n secondNode = 'x' + str(edgeNodes[1])\n if self.structure[i] == 1:\n self.varNodes[firstNode].children.append(secondNode)\n self.varNodes[secondNode].parents.append(firstNode)\n elif self.structure[i] == -1:\n self.varNodes[secondNode].children.append(firstNode)\n self.varNodes[firstNode].parents.append(secondNode)\n\n def compCPT(self, data):\n \"\"\"\n Computes Conditional Probability Table for all the nodes\n \"\"\"\n self.classNode.createCPT(data)\n for i in range(len(self.varNodes)):\n self.varNodes['x' + str(i + 1)].createCPT(data)\n\n def predict(self, data):\n \"\"\"\n Predicts most likely class given a single data sample\n \n \"\"\"\n maxProb = 0\n maxProbClass = 0\n for classVal in range(3):\n dt = data.copy()\n dt['Class'] = classVal\n prob = 1.0\n for i in range(self.numNodes):\n pt = self.varNodes['x' + str(i + 1)].cpt\n mergeList = self.varNodes['x' + str(i + 1)].parents + ['x' +\n str(i + 1)]\n cpt_prob = pd.merge(left=pt, right=dt, on=mergeList, how=\n 'inner')['prob'][0]\n prob = cpt_prob * prob\n if prob > maxProb:\n maxProb = prob\n maxProbClass = classVal\n return maxProbClass\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport random\nimport copy\n\nclass Node(object):\n '''\n Defines a Node Class for storing characteristics and CPT of each node\n '''\n \n def __init__(self,name):\n self.parents = []\n self.children = []\n self.name = name\n self.cpt=[]\n self.limit = 3\n \n def addParent(self,x):\n self.parents.append(x)\n \n def addChild(self,x):\n self.children.append(x)\n \n def createCPT(self,data):\n cpt = computeProb(data,self.limit,self.parents,self.name)\n self.cpt = cpt\n\n\ndef computeProb(data,limit,cols,target):\n \n numCol = len(cols)\n \n if numCol==0:\n return(cpt_0(data,limit,cols,target))\n elif numCol ==1:\n return(cpt_1(data,limit,cols,target))\n elif numCol ==2:\n return(cpt_2(data,limit,cols,target))\n elif numCol ==3:\n return(cpt_3(data,limit,cols,target))\n else:\n return(cpt_4(data,limit,cols,target))\n \n\n#Functions for computing the Conditional Probability Tables (CPTs)\n\ndef cpt_2(data,limit,cols,target):\n \n cpt = []\n alpha = 0.001\n \n for var1 in range(limit):\n for var2 in range(limit):\n \n totalN = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) ] )\n \n for targetVar in range(limit):\n \n count = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[target]==targetVar) ] )\n if totalN ==0:\n cpt.append([var1,var2,targetVar, float(totalN + 3*alpha)])\n else:\n cpt.append([var1,var2,targetVar, float(count)/float(totalN + 3*alpha)])\n \n cpt = pd.DataFrame(cpt, columns=[cols[0],cols[1],target,'prob'])\n \n return(cpt)\n\ndef cpt_1(data,limit,cols,target):\n \n cpt = []\n alpha = 0.001\n \n for var1 in range(limit):\n \n \n totalN = len( data[ (data[cols[0]]==var1)] )\n \n \n for targetVar in range(limit):\n \n count = len( data[ (data[cols[0]]==var1) & (data[target]==targetVar) ] )\n \n if totalN ==0:\n cpt.append([var1,targetVar, float(totalN + 3*alpha)])\n else:\n cpt.append([var1,targetVar, float(count)/float(totalN + 3*alpha)])\n \n cpt = pd.DataFrame(cpt, columns=[cols[0],target,'prob'])\n \n return(cpt)\n\ndef cpt_0(data,limit,cols,target):\n \n alpha = 0.001\n cpt = []\n \n \n totalN = len( data )\n \n \n for targetVar in range(limit):\n \n count = len( data[ (data[target]==targetVar) ] )\n if totalN ==0:\n cpt.append([targetVar, alpha/float(totalN + 3*alpha)])\n else:\n cpt.append([targetVar, float(count)/(totalN + 3*alpha)])\n \n cpt = pd.DataFrame(cpt, columns=[target,'prob'])\n \n return(cpt)\n\n\ndef cpt_3(data,limit,cols,target):\n \n cpt = []\n alpha = 0.001\n \n for var1 in range(limit):\n for var2 in range(limit):\n for var3 in range(limit):\n \n totalN = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[cols[2]]==var3) ] )\n\n for targetVar in range(limit):\n\n count = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[cols[2]]==var3) & (data[target]==targetVar) ] )\n if totalN ==0:\n cpt.append([var1,var2,var3,targetVar, alpha/float(totalN + 3*alpha)])\n else:\n cpt.append([var1,var2,var3,targetVar, float(count)/float(totalN + 3*alpha)])\n \n cpt = pd.DataFrame(cpt, columns=[cols[0],cols[1],cols[2],target,'prob'])\n \n return(cpt)\n\ndef cpt_4(data,limit,cols,target):\n \n cpt = []\n alpha = 0.001\n \n for var1 in range(limit):\n for var2 in range(limit):\n for var3 in range(limit):\n for var4 in range(limit):\n \n totalN = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[cols[2]]==var3) & (data[cols[3]]==var4) ] )\n\n for targetVar in range(limit):\n\n count = len( data[ (data[cols[0]]==var1) & (data[cols[1]]==var2) & (data[cols[2]]==var3) & (data[cols[3]]==var4) & (data[target]==targetVar) ] )\n if totalN ==0:\n cpt.append([var1,var2,var3,var4,targetVar, alpha/float(totalN + 3*alpha)])\n else:\n cpt.append([var1,var2,var3,var4,targetVar, float(count)/float(totalN + 3*alpha)])\n\n cpt = pd.DataFrame(cpt, columns=[cols[0],cols[1],cols[2],cols[3],target,'prob'])\n \n return(cpt)\n\nstructMap = {0:[1,2],1:[1,3],2:[1,4],3:[2,3],4:[2,4],5:[3,4]} # Mapping of the structure position and the nodes that it connects\n\n\nclass BayesNet(object):\n \n def __init__(self,numNodes,structure):\n self.structure = structure # Array that defines the structure of the Bayes Net\n self.numNodes = numNodes\n self.varNodes={}\n self.classNode=0\n \n \n def initGraph(self):\n '''\n Initializes components of the Bayes Net Graph\n '''\n \n self.classNode = Node('Class')\n \n for i in range(self.numNodes):\n self.varNodes['x'+str(i+1)]=Node('x'+str(i+1))\n self.varNodes['x'+str(i+1)].parents.append('Class')\n \n for i in range(len(self.structure)):\n \n edgeNodes = structMap[i]\n firstNode = 'x'+str(edgeNodes[0])\n secondNode = 'x'+str(edgeNodes[1])\n \n if self.structure[i]==1:\n self.varNodes[firstNode].children.append(secondNode)\n self.varNodes[secondNode].parents.append(firstNode)\n elif self.structure[i]==-1:\n self.varNodes[secondNode].children.append(firstNode)\n self.varNodes[firstNode].parents.append(secondNode)\n \n def compCPT(self,data):\n '''\n Computes Conditional Probability Table for all the nodes\n '''\n \n self.classNode.createCPT(data)\n \n for i in range(len(self.varNodes)):\n self.varNodes['x'+str(i+1)].createCPT(data)\n \n \n def predict(self,data):\n '''\n Predicts most likely class given a single data sample\n \n '''\n maxProb = 0\n maxProbClass = 0\n\n for classVal in range(3):\n\n dt = data.copy()\n dt[\"Class\"] = classVal\n prob = 1.0\n\n for i in range(self.numNodes):\n #print('Node is x'+str(i+1))\n\n pt=self.varNodes['x'+str(i+1)].cpt\n\n mergeList = self.varNodes['x'+str(i+1)].parents + ['x'+str(i+1)]\n\n cpt_prob = pd.merge(left=pt,right=dt,on=mergeList,how='inner')['prob'][0]\n #print(\"cpt_prob is \",str(cpt_prob))\n\n prob = cpt_prob*prob\n\n #print(\"Class :%d Prob : %f\"%(classVal,prob))\n\n if prob>maxProb:\n maxProb = prob\n maxProbClass = classVal\n \n return(maxProbClass)\n\n\n ",
"step-ids": [
12,
13,
15,
17,
20
]
}
|
[
12,
13,
15,
17,
20
] |
<|reserved_special_token_0|>
def run_final_test_days():
sqs = [5]
cams = [1]
permutations = [(True, True, True)]
permutations_names = ['all data perez']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams
=c, clear_sky_label=True)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,
pred_csi=True)
lstm.set_days(data.get_thesis_test_days())
lstm.run_experiment()
def run_lstm_experiment(set='test'):
sqs = [5]
cams = [1]
permutations = [(True, True, False)]
permutations_names = ['pxl_onsite']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
if set == 'test':
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,
cams=c, clear_sky_label=False)
else:
data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,
clear_sky_label=False)
data.scale_mega('lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
if set == 'test':
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_TEST_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_thesis_test_days())
else:
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_PREM2_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_prem_days())
lstm.run_experiment()
<|reserved_special_token_0|>
def optimize():
seq_l = [5]
nodes = [(50, 25, 10)]
activations = ['relu']
opts = ['Adam']
learning_rate = [0.001]
data = DataFrameSequenceMulti(False, True, True, True)
lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')
num = 0
for s in seq_l:
data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)
data.normalize_mega_df()
data.split_data_set(10, 15)
data.flatten_data_set_to_3d()
for n in nodes:
for a in activations:
for o in opts:
for lr in learning_rate:
if o == 'Adam':
opt = optimizers.Adam(lr=lr)
else:
opt = optimizers.RMSprop(lr=lr)
lstm.set_model(n, a, opt)
out = lstm.train(100)
res.append(out)
settings = 'nodes: ' + str(n) + ' activation: ' + str(a
) + ' optimizer: ' + str(o) + ' lr: ' + str(lr
) + ' seq_l: ' + str(s)
sets.append(settings)
plot_history(settings, num, out)
min_loss.append(min(out.history['loss']))
min_vals.append(min(out.history['val_loss']))
num = num + 1
best_val_loss = min_vals.index(min(min_vals))
print('BEST VAL LOSS: ')
print(sets[best_val_loss])
print('val loss: ' + str(min(min_vals)))
print('epoch: ')
print(res[best_val_loss].history['val_loss'].index(min(res[
best_val_loss].history['val_loss'])))
best_loss = min_loss.index(min(min_loss))
print('BEST Train LOSS: ')
print(sets[best_loss])
print('train loss: ' + str(min(min_loss)))
print('epoch: ')
print(res[best_loss].history['loss'].index(min(res[best_loss].history[
'loss'])))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run_final_all_days():
data = DataFrameSequenceMulti(False, True, True, True)
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(5)
name_data = 'data_' + 'all'
name_epoch = 'epochs_' + str(epochs)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)
lstm.set_days(data.get_all_test_days())
lstm.run_experiment()
def run_final_test_days():
sqs = [5]
cams = [1]
permutations = [(True, True, True)]
permutations_names = ['all data perez']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams
=c, clear_sky_label=True)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,
pred_csi=True)
lstm.set_days(data.get_thesis_test_days())
lstm.run_experiment()
def run_lstm_experiment(set='test'):
sqs = [5]
cams = [1]
permutations = [(True, True, False)]
permutations_names = ['pxl_onsite']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
if set == 'test':
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,
cams=c, clear_sky_label=False)
else:
data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,
clear_sky_label=False)
data.scale_mega('lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
if set == 'test':
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_TEST_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_thesis_test_days())
else:
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_PREM2_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_prem_days())
lstm.run_experiment()
def LSTM_test():
data = DataFrameSequenceMulti(False, True, True, False)
data.load_prev_mega_df()
lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')
data.split_data_set_EXPRMTL(9, 15, 3)
data.scale_mega(model='lstm')
data.flatten_data_set_to_3d()
lstm.get_model()
lstm.train(100)
y_pred, rmse = lstm.predict()
print(rmse)
def optimize():
seq_l = [5]
nodes = [(50, 25, 10)]
activations = ['relu']
opts = ['Adam']
learning_rate = [0.001]
data = DataFrameSequenceMulti(False, True, True, True)
lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')
num = 0
for s in seq_l:
data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)
data.normalize_mega_df()
data.split_data_set(10, 15)
data.flatten_data_set_to_3d()
for n in nodes:
for a in activations:
for o in opts:
for lr in learning_rate:
if o == 'Adam':
opt = optimizers.Adam(lr=lr)
else:
opt = optimizers.RMSprop(lr=lr)
lstm.set_model(n, a, opt)
out = lstm.train(100)
res.append(out)
settings = 'nodes: ' + str(n) + ' activation: ' + str(a
) + ' optimizer: ' + str(o) + ' lr: ' + str(lr
) + ' seq_l: ' + str(s)
sets.append(settings)
plot_history(settings, num, out)
min_loss.append(min(out.history['loss']))
min_vals.append(min(out.history['val_loss']))
num = num + 1
best_val_loss = min_vals.index(min(min_vals))
print('BEST VAL LOSS: ')
print(sets[best_val_loss])
print('val loss: ' + str(min(min_vals)))
print('epoch: ')
print(res[best_val_loss].history['val_loss'].index(min(res[
best_val_loss].history['val_loss'])))
best_loss = min_loss.index(min(min_loss))
print('BEST Train LOSS: ')
print(sets[best_loss])
print('train loss: ' + str(min(min_loss)))
print('epoch: ')
print(res[best_loss].history['loss'].index(min(res[best_loss].history[
'loss'])))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
epochs = 100
start = 6
end = 18
res = []
sets = []
min_vals = []
min_loss = []
def run_final_all_days():
data = DataFrameSequenceMulti(False, True, True, True)
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(5)
name_data = 'data_' + 'all'
name_epoch = 'epochs_' + str(epochs)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)
lstm.set_days(data.get_all_test_days())
lstm.run_experiment()
def run_final_test_days():
sqs = [5]
cams = [1]
permutations = [(True, True, True)]
permutations_names = ['all data perez']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams
=c, clear_sky_label=True)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,
pred_csi=True)
lstm.set_days(data.get_thesis_test_days())
lstm.run_experiment()
def run_lstm_experiment(set='test'):
sqs = [5]
cams = [1]
permutations = [(True, True, False)]
permutations_names = ['pxl_onsite']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
if set == 'test':
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,
cams=c, clear_sky_label=False)
else:
data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,
clear_sky_label=False)
data.scale_mega('lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
if set == 'test':
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_TEST_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_thesis_test_days())
else:
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_PREM2_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_prem_days())
lstm.run_experiment()
def LSTM_test():
data = DataFrameSequenceMulti(False, True, True, False)
data.load_prev_mega_df()
lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')
data.split_data_set_EXPRMTL(9, 15, 3)
data.scale_mega(model='lstm')
data.flatten_data_set_to_3d()
lstm.get_model()
lstm.train(100)
y_pred, rmse = lstm.predict()
print(rmse)
def optimize():
seq_l = [5]
nodes = [(50, 25, 10)]
activations = ['relu']
opts = ['Adam']
learning_rate = [0.001]
data = DataFrameSequenceMulti(False, True, True, True)
lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')
num = 0
for s in seq_l:
data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)
data.normalize_mega_df()
data.split_data_set(10, 15)
data.flatten_data_set_to_3d()
for n in nodes:
for a in activations:
for o in opts:
for lr in learning_rate:
if o == 'Adam':
opt = optimizers.Adam(lr=lr)
else:
opt = optimizers.RMSprop(lr=lr)
lstm.set_model(n, a, opt)
out = lstm.train(100)
res.append(out)
settings = 'nodes: ' + str(n) + ' activation: ' + str(a
) + ' optimizer: ' + str(o) + ' lr: ' + str(lr
) + ' seq_l: ' + str(s)
sets.append(settings)
plot_history(settings, num, out)
min_loss.append(min(out.history['loss']))
min_vals.append(min(out.history['val_loss']))
num = num + 1
best_val_loss = min_vals.index(min(min_vals))
print('BEST VAL LOSS: ')
print(sets[best_val_loss])
print('val loss: ' + str(min(min_vals)))
print('epoch: ')
print(res[best_val_loss].history['val_loss'].index(min(res[
best_val_loss].history['val_loss'])))
best_loss = min_loss.index(min(min_loss))
print('BEST Train LOSS: ')
print(sets[best_loss])
print('train loss: ' + str(min(min_loss)))
print('epoch: ')
print(res[best_loss].history['loss'].index(min(res[best_loss].history[
'loss'])))
run_lstm_experiment(set='test')
<|reserved_special_token_1|>
from data.dataframe_sequence_multi import DataFrameSequenceMulti
from metrics import Metrics
from models.models_ts_multi import lstm_model_multi
import threading
import sys
from keras import optimizers
from data.data_helper import plot_history
epochs = 100
start = 6
end = 18
res = []
sets = []
min_vals = []
min_loss = []
def run_final_all_days():
data = DataFrameSequenceMulti(False, True, True, True)
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(5)
name_data = 'data_' + 'all'
name_epoch = 'epochs_' + str(epochs)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)
lstm.set_days(data.get_all_test_days())
lstm.run_experiment()
def run_final_test_days():
sqs = [5]
cams = [1]
permutations = [(True, True, True)]
permutations_names = ['all data perez']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams
=c, clear_sky_label=True)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,
pred_csi=True)
lstm.set_days(data.get_thesis_test_days())
lstm.run_experiment()
def run_lstm_experiment(set='test'):
sqs = [5]
cams = [1]
permutations = [(True, True, False)]
permutations_names = ['pxl_onsite']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
if set == 'test':
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,
cams=c, clear_sky_label=False)
else:
data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,
clear_sky_label=False)
data.scale_mega('lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
if set == 'test':
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_TEST_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_thesis_test_days())
else:
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_PREM2_PXL' + name_epoch + name_time +
name_data + name_cam)
lstm.set_days(data.get_prem_days())
lstm.run_experiment()
def LSTM_test():
data = DataFrameSequenceMulti(False, True, True, False)
data.load_prev_mega_df()
lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')
data.split_data_set_EXPRMTL(9, 15, 3)
data.scale_mega(model='lstm')
data.flatten_data_set_to_3d()
lstm.get_model()
lstm.train(100)
y_pred, rmse = lstm.predict()
print(rmse)
def optimize():
seq_l = [5]
nodes = [(50, 25, 10)]
activations = ['relu']
opts = ['Adam']
learning_rate = [0.001]
data = DataFrameSequenceMulti(False, True, True, True)
lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')
num = 0
for s in seq_l:
data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)
data.normalize_mega_df()
data.split_data_set(10, 15)
data.flatten_data_set_to_3d()
for n in nodes:
for a in activations:
for o in opts:
for lr in learning_rate:
if o == 'Adam':
opt = optimizers.Adam(lr=lr)
else:
opt = optimizers.RMSprop(lr=lr)
lstm.set_model(n, a, opt)
out = lstm.train(100)
res.append(out)
settings = 'nodes: ' + str(n) + ' activation: ' + str(a
) + ' optimizer: ' + str(o) + ' lr: ' + str(lr
) + ' seq_l: ' + str(s)
sets.append(settings)
plot_history(settings, num, out)
min_loss.append(min(out.history['loss']))
min_vals.append(min(out.history['val_loss']))
num = num + 1
best_val_loss = min_vals.index(min(min_vals))
print('BEST VAL LOSS: ')
print(sets[best_val_loss])
print('val loss: ' + str(min(min_vals)))
print('epoch: ')
print(res[best_val_loss].history['val_loss'].index(min(res[
best_val_loss].history['val_loss'])))
best_loss = min_loss.index(min(min_loss))
print('BEST Train LOSS: ')
print(sets[best_loss])
print('train loss: ' + str(min(min_loss)))
print('epoch: ')
print(res[best_loss].history['loss'].index(min(res[best_loss].history[
'loss'])))
run_lstm_experiment(set='test')
<|reserved_special_token_1|>
from data.dataframe_sequence_multi import DataFrameSequenceMulti
from metrics import Metrics
from models.models_ts_multi import lstm_model_multi
import threading
import sys
from keras import optimizers
from data.data_helper import plot_history
epochs = 100
start = 6
end = 18
res = []
sets = []
min_vals = []
min_loss = []
def run_final_all_days():
# onsite
# data = DataFrameSequenceMulti(False, True, False, False)
# onsite & img
# data = DataFrameSequenceMulti(False, True, True, False)
# all data
data = DataFrameSequenceMulti(False, True, True, True)
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(5)
name_data = 'data_' + 'all'
name_epoch = 'epochs_' + str(epochs)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)
lstm.set_days(data.get_all_test_days())
lstm.run_experiment()
def run_final_test_days():
# sqs = [5, 10]
sqs=[5]
cams = [1]
permutations = [(True,True,True)]
# permutations = [(True, True, True), (True, False, False), (False, True, False)]
# permutations_names = ['all data', 'onsite_only', 'img only']
permutations_names = ['all data perez']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams=c, clear_sky_label=True)
# data.normalize_mega_df()
data.scale_mega(model='lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
lstm = lstm_model_multi.LSTM_predictor(data, epochs,
'LSTM_TSET GRAD prz' + name_time + name_data + name_cam, pred_csi=True)
lstm.set_days(data.get_thesis_test_days())
lstm.run_experiment()
def run_lstm_experiment(set='test'):
sqs = [5]
cams = [1]
permutations = [(True, True, False)]
permutations_names = ['pxl_onsite']
for pidx, p in enumerate(permutations):
for s in sqs:
for c in cams:
data = DataFrameSequenceMulti(False, p[0], p[1], p[2])
if set == 'test':
# data.load_prev_mega_df()
data.build_ts_df(start, end, [7,8,9,10,11,12], s, cams=c, clear_sky_label=False)
# data.save_df()
else:
data.build_ts_df(start, end, [7,8,9,10], s, cams=c, clear_sky_label=False)
data.scale_mega('lstm')
name_time = '_sqnc_' + str(s)
name_data = 'data_' + permutations_names[pidx]
name_epoch = '_epochs_' + str(epochs)
name_cam = '_cams_' + str(c)
if set == 'test':
lstm = lstm_model_multi.LSTM_predictor(data, 100,
'LSTM_TEST_PXL' + name_epoch + name_time + name_data + name_cam)
lstm.set_days(data.get_thesis_test_days())
else:
lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_PREM2_PXL' + name_epoch + name_time + name_data + name_cam)
lstm.set_days(data.get_prem_days())
lstm.run_experiment()
def LSTM_test():
data = DataFrameSequenceMulti(False, True, True, False)
# data.build_ts_df(6, 19, [7,8,9,10], 5)
data.load_prev_mega_df()
lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')
data.split_data_set_EXPRMTL(9, 15, 3)
data.scale_mega(model='lstm')
data.flatten_data_set_to_3d()
lstm.get_model()
lstm.train(100)
y_pred, rmse = lstm.predict()
# plot_history('s1', 1, lstm.history)
# import matplotlib.pyplot as plt
# from matplotlib.lines import lineStyles
# plt.plot(lstm.history.history['loss'])
# plt.plot(lstm.history.history['val_loss'], linestyle=':')
# ymin = min(lstm.history.history['val_loss'])
# xpos = lstm.history.history['val_loss'].index(ymin)
# xmin = lstm.history.history['val_loss'][xpos]
# plt.annotate('Minimum validation loss', size=20, xy=(xpos, ymin), xytext=(xpos, ymin + 30000),
# arrowprops=dict(facecolor='black', shrink=0.05, width=5, headwidth=20),
# horizontalalignment='center', verticalalignment='top',
# )
# plt.ylim(0, 100000)
# plt.title('LSTM M 5 all data', size=20)
# plt.ylabel('Mean squared error', size=20)
# plt.xlabel('Epochs', size=20)
# plt.legend(['train', 'validation'], loc='upper left')
# plt.show()
#
# Metrics.write_results_multi('LSTM_TEST_MULTI', data.test_x_df.reshape(
# (data.test_x_df.shape[0],
# data.sequence_len_minutes,
# data.number_of_features)),
# data.test_y_df, y_pred)
print(rmse)
def optimize():
# data.build_ts_df(6, 19, [8, 9, 10,11,12], 10, cams=1, clear_sky_label=False)
# data.normalize_mega_df()
# data.split_data_set(10,15)
# data.flatten_data_set_to_3d()
#
# seq_l = [3,5,10]
# nodes = [(50,25,10),(60,30,15),(80,40,20)]
# activations = ['relu', 'sigmoid']
# opts = ['Adam', 'RMSprop']
# learning_rate = [0.001, 0.01, 0.1]
seq_l = [5]
nodes = [(50,25,10)]
activations = ['relu']
opts = ['Adam']
learning_rate = [0.001]
data = DataFrameSequenceMulti(False, True, True, True)
lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')
num = 0
for s in seq_l:
data.build_ts_df(6, 19, [7,8,9,10,11,12], s, 1)
data.normalize_mega_df()
data.split_data_set(10, 15)
data.flatten_data_set_to_3d()
for n in nodes:
for a in activations:
for o in opts:
for lr in learning_rate:
if o == 'Adam':
opt = optimizers.Adam(lr=lr)
else:
opt = optimizers.RMSprop(lr=lr)
lstm.set_model(n, a, opt)
out = lstm.train(100)
res.append(out)
settings = 'nodes: ' + str(n) + ' activation: ' + str(a) + ' optimizer: ' + str(o) + ' lr: ' + str(lr) + " seq_l: " + str(s)
sets.append(settings)
plot_history(settings, num, out)
min_loss.append(min(out.history['loss']))
min_vals.append(min(out.history['val_loss']))
num = num + 1
best_val_loss = min_vals.index(min(min_vals))
print('BEST VAL LOSS: ')
print(sets[best_val_loss])
print('val loss: ' + str(min(min_vals)))
print('epoch: ')
print(res[best_val_loss].history['val_loss'].index(min(res[best_val_loss].history['val_loss'])))
best_loss = min_loss.index(min(min_loss))
print('BEST Train LOSS: ')
print(sets[best_loss])
print('train loss: ' + str(min(min_loss)))
print('epoch: ')
print(res[best_loss].history['loss'].index(min(res[best_loss].history['loss'])))
run_lstm_experiment(set='test')
# run_final_test_days()
# run_final_all_days()
# LSTM_test()
|
flexible
|
{
"blob_id": "af903feda57e4ace0c7f909abbeb86bb9a7e4d8c",
"index": 1806,
"step-1": "<mask token>\n\n\ndef run_final_test_days():\n sqs = [5]\n cams = [1]\n permutations = [(True, True, True)]\n permutations_names = ['all data perez']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams\n =c, clear_sky_label=True)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,\n pred_csi=True)\n lstm.set_days(data.get_thesis_test_days())\n lstm.run_experiment()\n\n\ndef run_lstm_experiment(set='test'):\n sqs = [5]\n cams = [1]\n permutations = [(True, True, False)]\n permutations_names = ['pxl_onsite']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n if set == 'test':\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,\n cams=c, clear_sky_label=False)\n else:\n data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,\n clear_sky_label=False)\n data.scale_mega('lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n if set == 'test':\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_TEST_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_thesis_test_days())\n else:\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_PREM2_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_prem_days())\n lstm.run_experiment()\n\n\n<mask token>\n\n\ndef optimize():\n seq_l = [5]\n nodes = [(50, 25, 10)]\n activations = ['relu']\n opts = ['Adam']\n learning_rate = [0.001]\n data = DataFrameSequenceMulti(False, True, True, True)\n lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')\n num = 0\n for s in seq_l:\n data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)\n data.normalize_mega_df()\n data.split_data_set(10, 15)\n data.flatten_data_set_to_3d()\n for n in nodes:\n for a in activations:\n for o in opts:\n for lr in learning_rate:\n if o == 'Adam':\n opt = optimizers.Adam(lr=lr)\n else:\n opt = optimizers.RMSprop(lr=lr)\n lstm.set_model(n, a, opt)\n out = lstm.train(100)\n res.append(out)\n settings = 'nodes: ' + str(n) + ' activation: ' + str(a\n ) + ' optimizer: ' + str(o) + ' lr: ' + str(lr\n ) + ' seq_l: ' + str(s)\n sets.append(settings)\n plot_history(settings, num, out)\n min_loss.append(min(out.history['loss']))\n min_vals.append(min(out.history['val_loss']))\n num = num + 1\n best_val_loss = min_vals.index(min(min_vals))\n print('BEST VAL LOSS: ')\n print(sets[best_val_loss])\n print('val loss: ' + str(min(min_vals)))\n print('epoch: ')\n print(res[best_val_loss].history['val_loss'].index(min(res[\n best_val_loss].history['val_loss'])))\n best_loss = min_loss.index(min(min_loss))\n print('BEST Train LOSS: ')\n print(sets[best_loss])\n print('train loss: ' + str(min(min_loss)))\n print('epoch: ')\n print(res[best_loss].history['loss'].index(min(res[best_loss].history[\n 'loss'])))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_final_all_days():\n data = DataFrameSequenceMulti(False, True, True, True)\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(5)\n name_data = 'data_' + 'all'\n name_epoch = 'epochs_' + str(epochs)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)\n lstm.set_days(data.get_all_test_days())\n lstm.run_experiment()\n\n\ndef run_final_test_days():\n sqs = [5]\n cams = [1]\n permutations = [(True, True, True)]\n permutations_names = ['all data perez']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams\n =c, clear_sky_label=True)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,\n pred_csi=True)\n lstm.set_days(data.get_thesis_test_days())\n lstm.run_experiment()\n\n\ndef run_lstm_experiment(set='test'):\n sqs = [5]\n cams = [1]\n permutations = [(True, True, False)]\n permutations_names = ['pxl_onsite']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n if set == 'test':\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,\n cams=c, clear_sky_label=False)\n else:\n data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,\n clear_sky_label=False)\n data.scale_mega('lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n if set == 'test':\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_TEST_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_thesis_test_days())\n else:\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_PREM2_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_prem_days())\n lstm.run_experiment()\n\n\ndef LSTM_test():\n data = DataFrameSequenceMulti(False, True, True, False)\n data.load_prev_mega_df()\n lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')\n data.split_data_set_EXPRMTL(9, 15, 3)\n data.scale_mega(model='lstm')\n data.flatten_data_set_to_3d()\n lstm.get_model()\n lstm.train(100)\n y_pred, rmse = lstm.predict()\n print(rmse)\n\n\ndef optimize():\n seq_l = [5]\n nodes = [(50, 25, 10)]\n activations = ['relu']\n opts = ['Adam']\n learning_rate = [0.001]\n data = DataFrameSequenceMulti(False, True, True, True)\n lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')\n num = 0\n for s in seq_l:\n data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)\n data.normalize_mega_df()\n data.split_data_set(10, 15)\n data.flatten_data_set_to_3d()\n for n in nodes:\n for a in activations:\n for o in opts:\n for lr in learning_rate:\n if o == 'Adam':\n opt = optimizers.Adam(lr=lr)\n else:\n opt = optimizers.RMSprop(lr=lr)\n lstm.set_model(n, a, opt)\n out = lstm.train(100)\n res.append(out)\n settings = 'nodes: ' + str(n) + ' activation: ' + str(a\n ) + ' optimizer: ' + str(o) + ' lr: ' + str(lr\n ) + ' seq_l: ' + str(s)\n sets.append(settings)\n plot_history(settings, num, out)\n min_loss.append(min(out.history['loss']))\n min_vals.append(min(out.history['val_loss']))\n num = num + 1\n best_val_loss = min_vals.index(min(min_vals))\n print('BEST VAL LOSS: ')\n print(sets[best_val_loss])\n print('val loss: ' + str(min(min_vals)))\n print('epoch: ')\n print(res[best_val_loss].history['val_loss'].index(min(res[\n best_val_loss].history['val_loss'])))\n best_loss = min_loss.index(min(min_loss))\n print('BEST Train LOSS: ')\n print(sets[best_loss])\n print('train loss: ' + str(min(min_loss)))\n print('epoch: ')\n print(res[best_loss].history['loss'].index(min(res[best_loss].history[\n 'loss'])))\n\n\n<mask token>\n",
"step-3": "<mask token>\nepochs = 100\nstart = 6\nend = 18\nres = []\nsets = []\nmin_vals = []\nmin_loss = []\n\n\ndef run_final_all_days():\n data = DataFrameSequenceMulti(False, True, True, True)\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(5)\n name_data = 'data_' + 'all'\n name_epoch = 'epochs_' + str(epochs)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)\n lstm.set_days(data.get_all_test_days())\n lstm.run_experiment()\n\n\ndef run_final_test_days():\n sqs = [5]\n cams = [1]\n permutations = [(True, True, True)]\n permutations_names = ['all data perez']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams\n =c, clear_sky_label=True)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,\n pred_csi=True)\n lstm.set_days(data.get_thesis_test_days())\n lstm.run_experiment()\n\n\ndef run_lstm_experiment(set='test'):\n sqs = [5]\n cams = [1]\n permutations = [(True, True, False)]\n permutations_names = ['pxl_onsite']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n if set == 'test':\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,\n cams=c, clear_sky_label=False)\n else:\n data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,\n clear_sky_label=False)\n data.scale_mega('lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n if set == 'test':\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_TEST_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_thesis_test_days())\n else:\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_PREM2_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_prem_days())\n lstm.run_experiment()\n\n\ndef LSTM_test():\n data = DataFrameSequenceMulti(False, True, True, False)\n data.load_prev_mega_df()\n lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')\n data.split_data_set_EXPRMTL(9, 15, 3)\n data.scale_mega(model='lstm')\n data.flatten_data_set_to_3d()\n lstm.get_model()\n lstm.train(100)\n y_pred, rmse = lstm.predict()\n print(rmse)\n\n\ndef optimize():\n seq_l = [5]\n nodes = [(50, 25, 10)]\n activations = ['relu']\n opts = ['Adam']\n learning_rate = [0.001]\n data = DataFrameSequenceMulti(False, True, True, True)\n lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')\n num = 0\n for s in seq_l:\n data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)\n data.normalize_mega_df()\n data.split_data_set(10, 15)\n data.flatten_data_set_to_3d()\n for n in nodes:\n for a in activations:\n for o in opts:\n for lr in learning_rate:\n if o == 'Adam':\n opt = optimizers.Adam(lr=lr)\n else:\n opt = optimizers.RMSprop(lr=lr)\n lstm.set_model(n, a, opt)\n out = lstm.train(100)\n res.append(out)\n settings = 'nodes: ' + str(n) + ' activation: ' + str(a\n ) + ' optimizer: ' + str(o) + ' lr: ' + str(lr\n ) + ' seq_l: ' + str(s)\n sets.append(settings)\n plot_history(settings, num, out)\n min_loss.append(min(out.history['loss']))\n min_vals.append(min(out.history['val_loss']))\n num = num + 1\n best_val_loss = min_vals.index(min(min_vals))\n print('BEST VAL LOSS: ')\n print(sets[best_val_loss])\n print('val loss: ' + str(min(min_vals)))\n print('epoch: ')\n print(res[best_val_loss].history['val_loss'].index(min(res[\n best_val_loss].history['val_loss'])))\n best_loss = min_loss.index(min(min_loss))\n print('BEST Train LOSS: ')\n print(sets[best_loss])\n print('train loss: ' + str(min(min_loss)))\n print('epoch: ')\n print(res[best_loss].history['loss'].index(min(res[best_loss].history[\n 'loss'])))\n\n\nrun_lstm_experiment(set='test')\n",
"step-4": "from data.dataframe_sequence_multi import DataFrameSequenceMulti\nfrom metrics import Metrics\nfrom models.models_ts_multi import lstm_model_multi\nimport threading\nimport sys\nfrom keras import optimizers\nfrom data.data_helper import plot_history\nepochs = 100\nstart = 6\nend = 18\nres = []\nsets = []\nmin_vals = []\nmin_loss = []\n\n\ndef run_final_all_days():\n data = DataFrameSequenceMulti(False, True, True, True)\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(5)\n name_data = 'data_' + 'all'\n name_epoch = 'epochs_' + str(epochs)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)\n lstm.set_days(data.get_all_test_days())\n lstm.run_experiment()\n\n\ndef run_final_test_days():\n sqs = [5]\n cams = [1]\n permutations = [(True, True, True)]\n permutations_names = ['all data perez']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams\n =c, clear_sky_label=True)\n data.scale_mega(model='lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n lstm = lstm_model_multi.LSTM_predictor(data, epochs, \n 'LSTM_TSET GRAD prz' + name_time + name_data + name_cam,\n pred_csi=True)\n lstm.set_days(data.get_thesis_test_days())\n lstm.run_experiment()\n\n\ndef run_lstm_experiment(set='test'):\n sqs = [5]\n cams = [1]\n permutations = [(True, True, False)]\n permutations_names = ['pxl_onsite']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n if set == 'test':\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s,\n cams=c, clear_sky_label=False)\n else:\n data.build_ts_df(start, end, [7, 8, 9, 10], s, cams=c,\n clear_sky_label=False)\n data.scale_mega('lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n if set == 'test':\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_TEST_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_thesis_test_days())\n else:\n lstm = lstm_model_multi.LSTM_predictor(data, 100, \n 'LSTM_PREM2_PXL' + name_epoch + name_time +\n name_data + name_cam)\n lstm.set_days(data.get_prem_days())\n lstm.run_experiment()\n\n\ndef LSTM_test():\n data = DataFrameSequenceMulti(False, True, True, False)\n data.load_prev_mega_df()\n lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')\n data.split_data_set_EXPRMTL(9, 15, 3)\n data.scale_mega(model='lstm')\n data.flatten_data_set_to_3d()\n lstm.get_model()\n lstm.train(100)\n y_pred, rmse = lstm.predict()\n print(rmse)\n\n\ndef optimize():\n seq_l = [5]\n nodes = [(50, 25, 10)]\n activations = ['relu']\n opts = ['Adam']\n learning_rate = [0.001]\n data = DataFrameSequenceMulti(False, True, True, True)\n lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')\n num = 0\n for s in seq_l:\n data.build_ts_df(6, 19, [7, 8, 9, 10, 11, 12], s, 1)\n data.normalize_mega_df()\n data.split_data_set(10, 15)\n data.flatten_data_set_to_3d()\n for n in nodes:\n for a in activations:\n for o in opts:\n for lr in learning_rate:\n if o == 'Adam':\n opt = optimizers.Adam(lr=lr)\n else:\n opt = optimizers.RMSprop(lr=lr)\n lstm.set_model(n, a, opt)\n out = lstm.train(100)\n res.append(out)\n settings = 'nodes: ' + str(n) + ' activation: ' + str(a\n ) + ' optimizer: ' + str(o) + ' lr: ' + str(lr\n ) + ' seq_l: ' + str(s)\n sets.append(settings)\n plot_history(settings, num, out)\n min_loss.append(min(out.history['loss']))\n min_vals.append(min(out.history['val_loss']))\n num = num + 1\n best_val_loss = min_vals.index(min(min_vals))\n print('BEST VAL LOSS: ')\n print(sets[best_val_loss])\n print('val loss: ' + str(min(min_vals)))\n print('epoch: ')\n print(res[best_val_loss].history['val_loss'].index(min(res[\n best_val_loss].history['val_loss'])))\n best_loss = min_loss.index(min(min_loss))\n print('BEST Train LOSS: ')\n print(sets[best_loss])\n print('train loss: ' + str(min(min_loss)))\n print('epoch: ')\n print(res[best_loss].history['loss'].index(min(res[best_loss].history[\n 'loss'])))\n\n\nrun_lstm_experiment(set='test')\n",
"step-5": "from data.dataframe_sequence_multi import DataFrameSequenceMulti\nfrom metrics import Metrics\nfrom models.models_ts_multi import lstm_model_multi\nimport threading\nimport sys\nfrom keras import optimizers\nfrom data.data_helper import plot_history\n\nepochs = 100\nstart = 6\nend = 18\n\nres = []\nsets = []\nmin_vals = []\nmin_loss = []\n\ndef run_final_all_days():\n # onsite\n # data = DataFrameSequenceMulti(False, True, False, False)\n # onsite & img\n # data = DataFrameSequenceMulti(False, True, True, False)\n # all data\n data = DataFrameSequenceMulti(False, True, True, True)\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], 5)\n data.scale_mega(model='lstm')\n\n name_time = '_sqnc_' + str(5)\n name_data = 'data_' + 'all'\n name_epoch = 'epochs_' + str(epochs)\n\n lstm = lstm_model_multi.LSTM_predictor(data, epochs,\n 'LSTM_SEQUENCE_MULTI alldays' + name_epoch + name_time + name_data)\n lstm.set_days(data.get_all_test_days())\n lstm.run_experiment()\n\ndef run_final_test_days():\n # sqs = [5, 10]\n sqs=[5]\n cams = [1]\n permutations = [(True,True,True)]\n # permutations = [(True, True, True), (True, False, False), (False, True, False)]\n # permutations_names = ['all data', 'onsite_only', 'img only']\n permutations_names = ['all data perez']\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n data.build_ts_df(start, end, [7, 8, 9, 10, 11, 12], s, cams=c, clear_sky_label=True)\n # data.normalize_mega_df()\n data.scale_mega(model='lstm')\n\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n\n lstm = lstm_model_multi.LSTM_predictor(data, epochs,\n 'LSTM_TSET GRAD prz' + name_time + name_data + name_cam, pred_csi=True)\n lstm.set_days(data.get_thesis_test_days())\n lstm.run_experiment()\n\n\ndef run_lstm_experiment(set='test'):\n sqs = [5]\n cams = [1]\n permutations = [(True, True, False)]\n permutations_names = ['pxl_onsite']\n\n for pidx, p in enumerate(permutations):\n for s in sqs:\n for c in cams:\n data = DataFrameSequenceMulti(False, p[0], p[1], p[2])\n if set == 'test':\n # data.load_prev_mega_df()\n data.build_ts_df(start, end, [7,8,9,10,11,12], s, cams=c, clear_sky_label=False)\n # data.save_df()\n else:\n data.build_ts_df(start, end, [7,8,9,10], s, cams=c, clear_sky_label=False)\n data.scale_mega('lstm')\n name_time = '_sqnc_' + str(s)\n name_data = 'data_' + permutations_names[pidx]\n name_epoch = '_epochs_' + str(epochs)\n name_cam = '_cams_' + str(c)\n if set == 'test':\n lstm = lstm_model_multi.LSTM_predictor(data, 100,\n 'LSTM_TEST_PXL' + name_epoch + name_time + name_data + name_cam)\n lstm.set_days(data.get_thesis_test_days())\n else:\n lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_PREM2_PXL' + name_epoch + name_time + name_data + name_cam)\n lstm.set_days(data.get_prem_days())\n\n lstm.run_experiment()\n\n\ndef LSTM_test():\n data = DataFrameSequenceMulti(False, True, True, False)\n # data.build_ts_df(6, 19, [7,8,9,10], 5)\n data.load_prev_mega_df()\n lstm = lstm_model_multi.LSTM_predictor(data, 100, 'LSTM_TEST')\n data.split_data_set_EXPRMTL(9, 15, 3)\n data.scale_mega(model='lstm')\n data.flatten_data_set_to_3d()\n\n lstm.get_model()\n lstm.train(100)\n y_pred, rmse = lstm.predict()\n # plot_history('s1', 1, lstm.history)\n\n # import matplotlib.pyplot as plt\n # from matplotlib.lines import lineStyles\n # plt.plot(lstm.history.history['loss'])\n # plt.plot(lstm.history.history['val_loss'], linestyle=':')\n # ymin = min(lstm.history.history['val_loss'])\n # xpos = lstm.history.history['val_loss'].index(ymin)\n # xmin = lstm.history.history['val_loss'][xpos]\n # plt.annotate('Minimum validation loss', size=20, xy=(xpos, ymin), xytext=(xpos, ymin + 30000),\n # arrowprops=dict(facecolor='black', shrink=0.05, width=5, headwidth=20),\n # horizontalalignment='center', verticalalignment='top',\n # )\n # plt.ylim(0, 100000)\n # plt.title('LSTM M 5 all data', size=20)\n # plt.ylabel('Mean squared error', size=20)\n # plt.xlabel('Epochs', size=20)\n # plt.legend(['train', 'validation'], loc='upper left')\n # plt.show()\n #\n # Metrics.write_results_multi('LSTM_TEST_MULTI', data.test_x_df.reshape(\n # (data.test_x_df.shape[0],\n # data.sequence_len_minutes,\n # data.number_of_features)),\n # data.test_y_df, y_pred)\n\n print(rmse)\n\ndef optimize():\n # data.build_ts_df(6, 19, [8, 9, 10,11,12], 10, cams=1, clear_sky_label=False)\n # data.normalize_mega_df()\n # data.split_data_set(10,15)\n # data.flatten_data_set_to_3d()\n #\n # seq_l = [3,5,10]\n # nodes = [(50,25,10),(60,30,15),(80,40,20)]\n # activations = ['relu', 'sigmoid']\n # opts = ['Adam', 'RMSprop']\n # learning_rate = [0.001, 0.01, 0.1]\n\n\n seq_l = [5]\n nodes = [(50,25,10)]\n activations = ['relu']\n opts = ['Adam']\n learning_rate = [0.001]\n\n data = DataFrameSequenceMulti(False, True, True, True)\n lstm = lstm_model_multi.LSTM_predictor(data, 50, 'LSTM_TEST')\n num = 0\n for s in seq_l:\n data.build_ts_df(6, 19, [7,8,9,10,11,12], s, 1)\n data.normalize_mega_df()\n data.split_data_set(10, 15)\n data.flatten_data_set_to_3d()\n for n in nodes:\n for a in activations:\n for o in opts:\n for lr in learning_rate:\n\n if o == 'Adam':\n opt = optimizers.Adam(lr=lr)\n else:\n opt = optimizers.RMSprop(lr=lr)\n\n lstm.set_model(n, a, opt)\n out = lstm.train(100)\n res.append(out)\n settings = 'nodes: ' + str(n) + ' activation: ' + str(a) + ' optimizer: ' + str(o) + ' lr: ' + str(lr) + \" seq_l: \" + str(s)\n sets.append(settings)\n plot_history(settings, num, out)\n min_loss.append(min(out.history['loss']))\n min_vals.append(min(out.history['val_loss']))\n num = num + 1\n\n best_val_loss = min_vals.index(min(min_vals))\n print('BEST VAL LOSS: ')\n print(sets[best_val_loss])\n print('val loss: ' + str(min(min_vals)))\n print('epoch: ')\n print(res[best_val_loss].history['val_loss'].index(min(res[best_val_loss].history['val_loss'])))\n\n best_loss = min_loss.index(min(min_loss))\n print('BEST Train LOSS: ')\n print(sets[best_loss])\n print('train loss: ' + str(min(min_loss)))\n print('epoch: ')\n print(res[best_loss].history['loss'].index(min(res[best_loss].history['loss'])))\n\nrun_lstm_experiment(set='test')\n# run_final_test_days()\n# run_final_all_days()\n# LSTM_test()",
"step-ids": [
3,
5,
7,
8,
9
]
}
|
[
3,
5,
7,
8,
9
] |
import tensorflow as tf
from typing import Optional, Tuple, Union, Callable
_data_augmentation = tf.keras.Sequential(
[
tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal"),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
]
)
def _freeze_model(
model: tf.keras.Model,
freeze: Union[bool, int, float] = False,
):
# Obs:
# When you set layer.trainable = False, the BatchNormalization layer will
# run in inference mode, and will not update its mean and variance statistics
# https://www.tensorflow.org/tutorials/images/transfer_learning#important_note_about_batchnormalization_layers
if isinstance(freeze, int):
freeze_len = freeze
elif isinstance(freeze, float):
freeze_len = int(freeze * len(model.layers))
else: # isinstance(freeze, bool):
if freeze:
freeze_len = len(model.layers)
else:
freeze_len = 0
if freeze_len != len(model.layers):
model.trainable = True
for layer in model.layers[:freeze_len]:
layer.trainable = False
def generate_model(
base_model: tf.keras.Model,
img_shape: Tuple[Optional[int], Optional[int], Optional[int]],
freeze: Union[bool, int, float] = False,
preprocess_input: Optional[Callable] = None,
use_data_augmentation: bool = True,
):
inputs = tf.keras.layers.Input(shape=img_shape)
if use_data_augmentation:
x = _data_augmentation(inputs)
if preprocess_input is not None:
x = preprocess_input(inputs)
x = base_model(x, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model = tf.keras.Model(inputs, outputs)
_freeze_model(base_model, freeze)
base_learning_rate = 0.0001
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
return model
|
normal
|
{
"blob_id": "86d42716e05155f9e659b22c42635a8f5b8c4a60",
"index": 753,
"step-1": "<mask token>\n\n\ndef generate_model(base_model: tf.keras.Model, img_shape: Tuple[Optional[\n int], Optional[int], Optional[int]], freeze: Union[bool, int, float]=\n False, preprocess_input: Optional[Callable]=None, use_data_augmentation:\n bool=True):\n inputs = tf.keras.layers.Input(shape=img_shape)\n if use_data_augmentation:\n x = _data_augmentation(inputs)\n if preprocess_input is not None:\n x = preprocess_input(inputs)\n x = base_model(x, training=False)\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)\n model = tf.keras.Model(inputs, outputs)\n _freeze_model(base_model, freeze)\n base_learning_rate = 0.0001\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=\n base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(),\n metrics=['accuracy'])\n return model\n",
"step-2": "<mask token>\n\n\ndef _freeze_model(model: tf.keras.Model, freeze: Union[bool, int, float]=False\n ):\n if isinstance(freeze, int):\n freeze_len = freeze\n elif isinstance(freeze, float):\n freeze_len = int(freeze * len(model.layers))\n elif freeze:\n freeze_len = len(model.layers)\n else:\n freeze_len = 0\n if freeze_len != len(model.layers):\n model.trainable = True\n for layer in model.layers[:freeze_len]:\n layer.trainable = False\n\n\ndef generate_model(base_model: tf.keras.Model, img_shape: Tuple[Optional[\n int], Optional[int], Optional[int]], freeze: Union[bool, int, float]=\n False, preprocess_input: Optional[Callable]=None, use_data_augmentation:\n bool=True):\n inputs = tf.keras.layers.Input(shape=img_shape)\n if use_data_augmentation:\n x = _data_augmentation(inputs)\n if preprocess_input is not None:\n x = preprocess_input(inputs)\n x = base_model(x, training=False)\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)\n model = tf.keras.Model(inputs, outputs)\n _freeze_model(base_model, freeze)\n base_learning_rate = 0.0001\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=\n base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(),\n metrics=['accuracy'])\n return model\n",
"step-3": "<mask token>\n_data_augmentation = tf.keras.Sequential([tf.keras.layers.experimental.\n preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.\n preprocessing.RandomRotation(0.2)])\n\n\ndef _freeze_model(model: tf.keras.Model, freeze: Union[bool, int, float]=False\n ):\n if isinstance(freeze, int):\n freeze_len = freeze\n elif isinstance(freeze, float):\n freeze_len = int(freeze * len(model.layers))\n elif freeze:\n freeze_len = len(model.layers)\n else:\n freeze_len = 0\n if freeze_len != len(model.layers):\n model.trainable = True\n for layer in model.layers[:freeze_len]:\n layer.trainable = False\n\n\ndef generate_model(base_model: tf.keras.Model, img_shape: Tuple[Optional[\n int], Optional[int], Optional[int]], freeze: Union[bool, int, float]=\n False, preprocess_input: Optional[Callable]=None, use_data_augmentation:\n bool=True):\n inputs = tf.keras.layers.Input(shape=img_shape)\n if use_data_augmentation:\n x = _data_augmentation(inputs)\n if preprocess_input is not None:\n x = preprocess_input(inputs)\n x = base_model(x, training=False)\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)\n model = tf.keras.Model(inputs, outputs)\n _freeze_model(base_model, freeze)\n base_learning_rate = 0.0001\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=\n base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(),\n metrics=['accuracy'])\n return model\n",
"step-4": "import tensorflow as tf\nfrom typing import Optional, Tuple, Union, Callable\n_data_augmentation = tf.keras.Sequential([tf.keras.layers.experimental.\n preprocessing.RandomFlip('horizontal'), tf.keras.layers.experimental.\n preprocessing.RandomRotation(0.2)])\n\n\ndef _freeze_model(model: tf.keras.Model, freeze: Union[bool, int, float]=False\n ):\n if isinstance(freeze, int):\n freeze_len = freeze\n elif isinstance(freeze, float):\n freeze_len = int(freeze * len(model.layers))\n elif freeze:\n freeze_len = len(model.layers)\n else:\n freeze_len = 0\n if freeze_len != len(model.layers):\n model.trainable = True\n for layer in model.layers[:freeze_len]:\n layer.trainable = False\n\n\ndef generate_model(base_model: tf.keras.Model, img_shape: Tuple[Optional[\n int], Optional[int], Optional[int]], freeze: Union[bool, int, float]=\n False, preprocess_input: Optional[Callable]=None, use_data_augmentation:\n bool=True):\n inputs = tf.keras.layers.Input(shape=img_shape)\n if use_data_augmentation:\n x = _data_augmentation(inputs)\n if preprocess_input is not None:\n x = preprocess_input(inputs)\n x = base_model(x, training=False)\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)\n model = tf.keras.Model(inputs, outputs)\n _freeze_model(base_model, freeze)\n base_learning_rate = 0.0001\n model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=\n base_learning_rate), loss=tf.keras.losses.BinaryCrossentropy(),\n metrics=['accuracy'])\n return model\n",
"step-5": "import tensorflow as tf\nfrom typing import Optional, Tuple, Union, Callable\n\n_data_augmentation = tf.keras.Sequential(\n [\n tf.keras.layers.experimental.preprocessing.RandomFlip(\"horizontal\"),\n tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),\n ]\n)\n\n\ndef _freeze_model(\n model: tf.keras.Model,\n freeze: Union[bool, int, float] = False,\n):\n # Obs:\n # When you set layer.trainable = False, the BatchNormalization layer will\n # run in inference mode, and will not update its mean and variance statistics\n # https://www.tensorflow.org/tutorials/images/transfer_learning#important_note_about_batchnormalization_layers\n\n if isinstance(freeze, int):\n freeze_len = freeze\n elif isinstance(freeze, float):\n freeze_len = int(freeze * len(model.layers))\n else: # isinstance(freeze, bool):\n if freeze:\n freeze_len = len(model.layers)\n else:\n freeze_len = 0\n\n if freeze_len != len(model.layers):\n model.trainable = True\n\n for layer in model.layers[:freeze_len]:\n layer.trainable = False\n\n\ndef generate_model(\n base_model: tf.keras.Model,\n img_shape: Tuple[Optional[int], Optional[int], Optional[int]],\n freeze: Union[bool, int, float] = False,\n preprocess_input: Optional[Callable] = None,\n use_data_augmentation: bool = True,\n):\n inputs = tf.keras.layers.Input(shape=img_shape)\n if use_data_augmentation:\n x = _data_augmentation(inputs)\n if preprocess_input is not None:\n x = preprocess_input(inputs)\n x = base_model(x, training=False)\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dropout(0.2)(x)\n outputs = tf.keras.layers.Dense(1, activation=\"sigmoid\")(x)\n\n model = tf.keras.Model(inputs, outputs)\n\n _freeze_model(base_model, freeze)\n\n base_learning_rate = 0.0001\n model.compile(\n optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate),\n loss=tf.keras.losses.BinaryCrossentropy(),\n metrics=[\"accuracy\"],\n )\n\n return model\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#day11
n = int(input("Enter a number: "))
c = 0
a,b = 0, 1
list = [a, b]
for i in range(2,n+1):
c = a+b
list.append(c)
a,b = b, c
print(n,"th fibonacci number is ",list[n])
|
normal
|
{
"blob_id": "255cdbce1f9f7709165b1a29362026ad92ba4712",
"index": 2303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2, n + 1):\n c = a + b\n list.append(c)\n a, b = b, c\nprint(n, 'th fibonacci number is ', list[n])\n",
"step-3": "n = int(input('Enter a number: '))\nc = 0\na, b = 0, 1\nlist = [a, b]\nfor i in range(2, n + 1):\n c = a + b\n list.append(c)\n a, b = b, c\nprint(n, 'th fibonacci number is ', list[n])\n",
"step-4": "#day11\nn = int(input(\"Enter a number: \"))\nc = 0\na,b = 0, 1\nlist = [a, b]\nfor i in range(2,n+1):\n c = a+b\n list.append(c)\n a,b = b, c\nprint(n,\"th fibonacci number is \",list[n])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(accuracy_score(true_labels, guesses))
print(recall_score(true_labels, guesses))
print(precision_score(true_labels, guesses))
print(f1_score(true_labels, guesses))
<|reserved_special_token_0|>
print(confusion_matrix(true_labels, guesses))
<|reserved_special_token_1|>
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
print(accuracy_score(true_labels, guesses))
print(recall_score(true_labels, guesses))
print(precision_score(true_labels, guesses))
print(f1_score(true_labels, guesses))
from sklearn.metrics import confusion_matrix
print(confusion_matrix(true_labels, guesses))
|
flexible
|
{
"blob_id": "faa53db9dd581b6508fb9e4042ec86ebaf850e60",
"index": 5320,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(accuracy_score(true_labels, guesses))\nprint(recall_score(true_labels, guesses))\nprint(precision_score(true_labels, guesses))\nprint(f1_score(true_labels, guesses))\n<mask token>\nprint(confusion_matrix(true_labels, guesses))\n",
"step-3": "from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\nprint(accuracy_score(true_labels, guesses))\nprint(recall_score(true_labels, guesses))\nprint(precision_score(true_labels, guesses))\nprint(f1_score(true_labels, guesses))\nfrom sklearn.metrics import confusion_matrix\nprint(confusion_matrix(true_labels, guesses))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def yolo():
root = 'Z:\\'
name = '23367640.png'
execution_path = os.getcwd()
yolo_path = 'Z:\\yolo.h5'
localdir = False
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
if localdir:
detector.setModelPath(os.path.join(execution_path, yolo_path))
else:
detector.setModelPath(yolo_path)
detector.loadModel()
pathOut = 'yolo_out_2.jpg'
path = root + name
pathOut = root + name + 'yolo_out' + '.jpg'
detections = detector.detectObjectsFromImage(input_image=os.path.join(
execution_path, path), output_image_path=os.path.join(
execution_path, pathOut), minimum_percentage_probability=10)
for eachObject in detections:
print(eachObject['name'], ' : ', eachObject[
'percentage_probability'], ' : ', eachObject['box_points'])
print('--------------------------------')
return detections, path
<|reserved_special_token_0|>
def Detect(image):
frameHeight, frameWidth, ch = image.shape
inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),
(0, 0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print('========')
H = output.shape[2]
W = output.shape[3]
points = []
threshold = 0.1
maxKeypoints = 44
Keypoints = output.shape[1]
print('Keypoints from output?', Keypoints)
Keypoints = 15
labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',
'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',
'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',
'Left Ankle', 'Chest', 'Background']
for i in range(Keypoints):
probMap = output[0, i, :, :]
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
x = frameWidth * point[0] / W
y = frameHeight * point[1] / H
if prob > threshold:
cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness
=-1, lineType=cv2.FILLED)
cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.
FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else:
points.append(None)
print(points)
cv2.imshow('Output-Keypoints', image)
cv2.waitKey()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def yolo():
root = 'Z:\\'
name = '23367640.png'
execution_path = os.getcwd()
yolo_path = 'Z:\\yolo.h5'
localdir = False
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
if localdir:
detector.setModelPath(os.path.join(execution_path, yolo_path))
else:
detector.setModelPath(yolo_path)
detector.loadModel()
pathOut = 'yolo_out_2.jpg'
path = root + name
pathOut = root + name + 'yolo_out' + '.jpg'
detections = detector.detectObjectsFromImage(input_image=os.path.join(
execution_path, path), output_image_path=os.path.join(
execution_path, pathOut), minimum_percentage_probability=10)
for eachObject in detections:
print(eachObject['name'], ' : ', eachObject[
'percentage_probability'], ' : ', eachObject['box_points'])
print('--------------------------------')
return detections, path
<|reserved_special_token_0|>
for i in det:
print(i)
<|reserved_special_token_0|>
for d in det:
if d['name'] == 'person' and d['percentage_probability'] > yolo_thr:
x1, y1, x2, y2 = d['box_points']
if bWiden:
x1 -= 20
x2 += 20
y1 -= 30
y2 += 30
cropped = yoloImage[y1:y2, x1:x2]
cv2.imshow(d['name'] + str(x1), cropped)
collected.append(cropped)
cv2.waitKey()
<|reserved_special_token_0|>
cv2.imshow('FRAME' + src, frame)
<|reserved_special_token_0|>
print('H, W, Ch', frameHeight, frameWidth, ch)
<|reserved_special_token_0|>
net.setInput(inpBlob)
print(inpBlob)
<|reserved_special_token_0|>
print(output)
print('========')
<|reserved_special_token_0|>
print('Keypoints from output?', Keypoints)
<|reserved_special_token_0|>
for i in range(Keypoints):
probMap = output[0, i, :, :]
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
x = frameWidth * point[0] / W
y = frameHeight * point[1] / H
if prob > threshold:
cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1,
lineType=cv2.FILLED)
cv2.putText(frame, '{}'.format(i), (int(x), int(y)), cv2.
FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else:
points.append(None)
print(points)
cv2.imshow('Output-Keypoints', frame)
def Detect(image):
frameHeight, frameWidth, ch = image.shape
inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),
(0, 0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print('========')
H = output.shape[2]
W = output.shape[3]
points = []
threshold = 0.1
maxKeypoints = 44
Keypoints = output.shape[1]
print('Keypoints from output?', Keypoints)
Keypoints = 15
labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',
'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',
'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',
'Left Ankle', 'Chest', 'Background']
for i in range(Keypoints):
probMap = output[0, i, :, :]
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
x = frameWidth * point[0] / W
y = frameHeight * point[1] / H
if prob > threshold:
cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness
=-1, lineType=cv2.FILLED)
cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.
FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else:
points.append(None)
print(points)
cv2.imshow('Output-Keypoints', image)
cv2.waitKey()
for i in collected:
Detect(i)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
boxes = []
def yolo():
root = 'Z:\\'
name = '23367640.png'
execution_path = os.getcwd()
yolo_path = 'Z:\\yolo.h5'
localdir = False
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
if localdir:
detector.setModelPath(os.path.join(execution_path, yolo_path))
else:
detector.setModelPath(yolo_path)
detector.loadModel()
pathOut = 'yolo_out_2.jpg'
path = root + name
pathOut = root + name + 'yolo_out' + '.jpg'
detections = detector.detectObjectsFromImage(input_image=os.path.join(
execution_path, path), output_image_path=os.path.join(
execution_path, pathOut), minimum_percentage_probability=10)
for eachObject in detections:
print(eachObject['name'], ' : ', eachObject[
'percentage_probability'], ' : ', eachObject['box_points'])
print('--------------------------------')
return detections, path
det, path = yolo()
yoloImage = cv2.imread(path)
for i in det:
print(i)
protoFile = 'Z:\\pose\\mpi\\pose_deploy_linevec_faster_4_stages.prototxt'
weightsFile = 'Z:\\pose\\mpi\\pose_iter_160000.caffemodel'
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
<|reserved_special_token_0|>
yolo_thr = 70
collected = []
bWiden = False
for d in det:
if d['name'] == 'person' and d['percentage_probability'] > yolo_thr:
x1, y1, x2, y2 = d['box_points']
if bWiden:
x1 -= 20
x2 += 20
y1 -= 30
y2 += 30
cropped = yoloImage[y1:y2, x1:x2]
cv2.imshow(d['name'] + str(x1), cropped)
collected.append(cropped)
cv2.waitKey()
srcs = ['z:\\pose1.webp', 'Z:\\2w.jpg', 'Z:\\grigor.jpg']
id = 2
src = path
frame = cv2.imread(src)
cv2.imshow('FRAME' + src, frame)
frameHeight, frameWidth, ch = frame.shape
print('H, W, Ch', frameHeight, frameWidth, ch)
inWidth = 368
inHeight = 368
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0,
0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print('========')
H = output.shape[2]
W = output.shape[3]
points = []
threshold = 0.3
maxKeypoints = 44
Keypoints = output.shape[1]
print('Keypoints from output?', Keypoints)
Keypoints = 15
labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow', 'Right Wrist',
'Left Shoulder', 'Left Elbow', 'Left Wrist', 'Right Hip', 'Right Knee',
'Right Ankle', 'Left Hip', 'Left Knee', 'Left Ankle', 'Chest', 'Background'
]
for i in range(Keypoints):
probMap = output[0, i, :, :]
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
x = frameWidth * point[0] / W
y = frameHeight * point[1] / H
if prob > threshold:
cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1,
lineType=cv2.FILLED)
cv2.putText(frame, '{}'.format(i), (int(x), int(y)), cv2.
FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else:
points.append(None)
print(points)
cv2.imshow('Output-Keypoints', frame)
def Detect(image):
frameHeight, frameWidth, ch = image.shape
inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),
(0, 0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print('========')
H = output.shape[2]
W = output.shape[3]
points = []
threshold = 0.1
maxKeypoints = 44
Keypoints = output.shape[1]
print('Keypoints from output?', Keypoints)
Keypoints = 15
labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',
'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',
'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',
'Left Ankle', 'Chest', 'Background']
for i in range(Keypoints):
probMap = output[0, i, :, :]
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
x = frameWidth * point[0] / W
y = frameHeight * point[1] / H
if prob > threshold:
cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness
=-1, lineType=cv2.FILLED)
cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.
FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else:
points.append(None)
print(points)
cv2.imshow('Output-Keypoints', image)
cv2.waitKey()
for i in collected:
Detect(i)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import tensorflow.compat.v1 as tf
from imageai.Detection import ObjectDetection
import os
boxes = []
def yolo():
root = 'Z:\\'
name = '23367640.png'
execution_path = os.getcwd()
yolo_path = 'Z:\\yolo.h5'
localdir = False
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
if localdir:
detector.setModelPath(os.path.join(execution_path, yolo_path))
else:
detector.setModelPath(yolo_path)
detector.loadModel()
pathOut = 'yolo_out_2.jpg'
path = root + name
pathOut = root + name + 'yolo_out' + '.jpg'
detections = detector.detectObjectsFromImage(input_image=os.path.join(
execution_path, path), output_image_path=os.path.join(
execution_path, pathOut), minimum_percentage_probability=10)
for eachObject in detections:
print(eachObject['name'], ' : ', eachObject[
'percentage_probability'], ' : ', eachObject['box_points'])
print('--------------------------------')
return detections, path
det, path = yolo()
yoloImage = cv2.imread(path)
for i in det:
print(i)
protoFile = 'Z:\\pose\\mpi\\pose_deploy_linevec_faster_4_stages.prototxt'
weightsFile = 'Z:\\pose\\mpi\\pose_iter_160000.caffemodel'
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
<|reserved_special_token_0|>
yolo_thr = 70
collected = []
bWiden = False
for d in det:
if d['name'] == 'person' and d['percentage_probability'] > yolo_thr:
x1, y1, x2, y2 = d['box_points']
if bWiden:
x1 -= 20
x2 += 20
y1 -= 30
y2 += 30
cropped = yoloImage[y1:y2, x1:x2]
cv2.imshow(d['name'] + str(x1), cropped)
collected.append(cropped)
cv2.waitKey()
srcs = ['z:\\pose1.webp', 'Z:\\2w.jpg', 'Z:\\grigor.jpg']
id = 2
src = path
frame = cv2.imread(src)
cv2.imshow('FRAME' + src, frame)
frameHeight, frameWidth, ch = frame.shape
print('H, W, Ch', frameHeight, frameWidth, ch)
inWidth = 368
inHeight = 368
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0,
0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print('========')
H = output.shape[2]
W = output.shape[3]
points = []
threshold = 0.3
maxKeypoints = 44
Keypoints = output.shape[1]
print('Keypoints from output?', Keypoints)
Keypoints = 15
labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow', 'Right Wrist',
'Left Shoulder', 'Left Elbow', 'Left Wrist', 'Right Hip', 'Right Knee',
'Right Ankle', 'Left Hip', 'Left Knee', 'Left Ankle', 'Chest', 'Background'
]
for i in range(Keypoints):
probMap = output[0, i, :, :]
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
x = frameWidth * point[0] / W
y = frameHeight * point[1] / H
if prob > threshold:
cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1,
lineType=cv2.FILLED)
cv2.putText(frame, '{}'.format(i), (int(x), int(y)), cv2.
FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else:
points.append(None)
print(points)
cv2.imshow('Output-Keypoints', frame)
def Detect(image):
frameHeight, frameWidth, ch = image.shape
inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),
(0, 0, 0), swapRB=False, crop=False)
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print('========')
H = output.shape[2]
W = output.shape[3]
points = []
threshold = 0.1
maxKeypoints = 44
Keypoints = output.shape[1]
print('Keypoints from output?', Keypoints)
Keypoints = 15
labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',
'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',
'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',
'Left Ankle', 'Chest', 'Background']
for i in range(Keypoints):
probMap = output[0, i, :, :]
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
x = frameWidth * point[0] / W
y = frameHeight * point[1] / H
if prob > threshold:
cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness
=-1, lineType=cv2.FILLED)
cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.
FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else:
points.append(None)
print(points)
cv2.imshow('Output-Keypoints', image)
cv2.waitKey()
for i in collected:
Detect(i)
cv2.waitKey(0)
cv2.destroyAllWindows()
<|reserved_special_token_1|>
# Pose estimation and object detection: OpenCV DNN, ImageAI, YOLO, mpi, caffemodel, tensorflow
# Authors:
# Tutorial by: https://learnopencv.com/deep-learning-based-human-pose-estimation-using-opencv-cpp-python/
# Model file links collection (replace .sh script): Twenkid
# http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel
#https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/models/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt
# ImageAI: https://github.com/OlafenwaMoses/ImageAI
# # YOLOv3:
# yolo.h5
# https://github-releases.githubusercontent.com/125932201/1b8496e8-86fc-11e8-895f-fefe61ebb499?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210813%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210813T002422Z&X-Amz-Expires=300&X-Amz-Signature=02e6839be131d27b142baf50449d021339cbb334eed67a114ff9b960b8beb987&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo.h5&response-content-type=application%2Foctet-stream
# yolo-tiny.h5
# https://github-releases.githubusercontent.com/125932201/7cf559e6-86fa-11e8-81e8-1e959be261a8?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210812%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210812T232641Z&X-Amz-Expires=300&X-Amz-Signature=a5b91876c83b83a6aafba333c63c5f4a880bea9a937b30e52e92bbb0ac784018&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo-tiny.h5&response-content-type=application%2Foctet-stream
# Todor Arnaudov - Twenkid: debug and merging, LearnOpenCV python code had a few misses, 13.8.2021
# It seems the pose model expects only one person so the image must be segmented first! pose1.jpg
# Detect with YOLO or ImageAI etc. then use DNN
# Specify the paths for the 2 files
# I tried with yolo-tiny, but the accuracy of the bounding boxes didn't seem acceptable.
#tf 1.15 for older versions of ImageAI - but tf doesn't support Py 3.8
#ImageAI: older versions require tf 1.x
#tf 2.4 - required by ImageAI 2.1.6 -- no GPU supported on Win 7, tf requires CUDA 11.0 (Win10). Win7: CUDA 10.x. CPU: works
# Set the paths to models, images etc.
# My experiments results: disappointingly bad pose estimation on the images I tested. Sometimes good, sometimes terrible.
import cv2
import tensorflow.compat.v1 as tf
from imageai.Detection import ObjectDetection
import os
boxes = []
def yolo():
#name = "k.jpg"
root = "Z:\\"
name = "23367640.png" #t.jpg" #"p1.jpg" #"2w.jpg" #"grigor.jpg" #"2w.jpg" #"pose1.webp" #1.jpg"
execution_path = os.getcwd()
yolo_path = "Z:\\yolo.h5"
#yolo_path = "Z:\\yolo-tiny.h5"
localdir = False
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
#detector.setModelTypeAsTinyYOLOv3()
if localdir:
detector.setModelPath(os.path.join(execution_path , yolo_path))
else:
detector.setModelPath(yolo_path)
#dir(detector)
detector.loadModel()
#loaded_model = tf.keras.models.load_model("./src/mood-saved-models/"model + ".h5")
#loaded_model = tf.keras.models.load_model(detector.)
#path = "E:\capture_023_29092020_150305.jpg" #IMG_20200528_044908.jpg"
#pathOut = "E:\YOLO_capture_023_29092020_150305.jpg"
#path = "pose1.webp" #E:\\capture_046_29092020_150628.jpg"
pathOut = "yolo_out_2.jpg"
path = root + name
pathOut = root + name + "yolo_out" + ".jpg"
detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , path), output_image_path=os.path.join(execution_path , pathOut), minimum_percentage_probability=10) #30)
for eachObject in detections:
print(eachObject["name"] , " : ", eachObject["percentage_probability"], " : ", eachObject["box_points"] )
print("--------------------------------")
return detections, path
det,path = yolo()
yoloImage = cv2.imread(path) #crop regions from it
for i in det:
print(i)
protoFile = "Z:\\pose\\mpi\\pose_deploy_linevec_faster_4_stages.prototxt"
#protoFile = "pose_deploy_linevec_faster_4_stages.prototxt"
#weightsFile = "Z:\\pose\\mpi\\pose_iter_440000.caffemodel"
weightsFile = "Z:\\pose\\mpi\\pose_iter_160000.caffemodel"
#weightsFile = "pose_iter_160000.caffemodel"
#weightsFile = "pose_iter_440000.caffemodel"
# Read the network into Memory
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
"""
{'name': 'person', 'percentage_probability': 99.86668229103088, 'box_points': [1
8, 38, 153, 397]}
{'name': 'person', 'percentage_probability': 53.89136075973511, 'box_points': [3
86, 93, 428, 171]}
{'name': 'person', 'percentage_probability': 11.339860409498215, 'box_points': [
585, 99, 641, 180]}
{'name': 'person', 'percentage_probability': 10.276197642087936, 'box_points': [
126, 178, 164, 290]}
{'name': 'person', 'percentage_probability': 99.94878768920898, 'box_points': [2
93, 80, 394, 410]}
{'name': 'person', 'percentage_probability': 99.95986223220825, 'box_points': [4
78, 88, 589, 410]}
{'name': 'person', 'percentage_probability': 67.95878410339355, 'box_points': [1
, 212, 39, 300]}
{'name': 'person', 'percentage_probability': 63.609880208969116, 'box_points': [
153, 193, 192, 306]}
{'name': 'person', 'percentage_probability': 23.985233902931213, 'box_points': [
226, 198, 265, 308]}
{'name': 'sports ball', 'percentage_probability': 20.820775628089905, 'box_point
s': [229, 50, 269, 94]}
{'name': 'person', 'percentage_probability': 40.28712213039398, 'box_points': [4
23, 110, 457, 160]}
H, W, Ch 407 211 3
"""
yolo_thr = 70 #in percents, not 0.7
collected = []
bWiden = False
for d in det:
if (d['name'] == 'person') and d['percentage_probability'] > yolo_thr:
x1,y1,x2,y2 = d['box_points']
if bWiden:
x1-=20
x2+=20
y1-=30
y2+=30
cropped = yoloImage[y1:y2, x1:x2]
cv2.imshow(d['name']+str(x1), cropped)
collected.append(cropped) #or copy first?
cv2.waitKey()
#x1,y1, ...
# for i in collected: cv2.imshow("COLLECTED?", i); cv2.waitKey() #OK
# Read image
#frame = cv2.imread("Z:\\23367640.png") #1.jpg")
#src = "Z:\\2w.jpg" #z:\\pose1.webp" #nacep1.jpg"
#src = "z:\\pose1.webp"
srcs = ["z:\\pose1.webp","Z:\\2w.jpg", "Z:\\grigor.jpg"]
id = 2
#src = srcs[2]
src = path #from first yolo, in order to compare
frame = cv2.imread(src)
cv2.imshow("FRAME"+src, frame)
#frameWidth, frameHeight, _ = frame.shape
frameHeight, frameWidth, ch = frame.shape
print("H, W, Ch", frameHeight, frameWidth, ch)
# Specify the input image dimensions
inWidth = 368 #184 #368
inHeight = 368 #184 #368
# Prepare the frame to be fed to the network
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
#cv2.imshow("G", inpBlob) #unsupported
#cv2.waitKey(0)
# Set the prepared object as the input blob of the network
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print("========")
H = output.shape[2]
W = output.shape[3]
# Empty list to store the detected keypoints
points = []
threshold = 0.3
maxKeypoints = 44
Keypoints = output.shape[1]
print("Keypoints from output?", Keypoints)
Keypoints = 15 #MPI ... returns only 15
labels = ["Head", "Neck", "Right Shoulder", "Right Elbow", "Right Wrist", "Left Shoulder", "Left Elbow", "Left Wrist", "Right Hip", "Right Knee", "Right Ankle", "Left Hip", "Left Knee", "Left Ankle", "Chest", "Background"]
#for i in range(len()):
for i in range(Keypoints): #?
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
if prob > threshold :
cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(frame, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
# Add the point to the list if the probability is greater than the threshold
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else :
points.append(None)
print(points)
cv2.imshow("Output-Keypoints",frame)
def Detect(image): #inWidth, Height ... - global, set as params later
frameHeight, frameWidth, ch = image.shape
# Prepare the image to be fed to the network
inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)
#cv2.imshow("G", inpBlob) #unsupported
#cv2.waitKey(0)
# Set the prepared object as the input blob of the network
net.setInput(inpBlob)
print(inpBlob)
output = net.forward()
print(output)
print("========")
H = output.shape[2]
W = output.shape[3]
# Empty list to store the detected keypoints
points = []
threshold = 0.1
maxKeypoints = 44
Keypoints = output.shape[1]
print("Keypoints from output?", Keypoints)
Keypoints = 15 #MPI ... returns only 15
labels = ["Head", "Neck", "Right Shoulder", "Right Elbow", "Right Wrist", "Left Shoulder", "Left Elbow", "Left Wrist", "Right Hip", "Right Knee", "Right Ankle", "Left Hip", "Left Knee", "Left Ankle", "Chest", "Background"]
#for i in range(len()):
for i in range(Keypoints): #?
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
if prob > threshold :
cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(image, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
# Add the point to the list if the probability is greater than the threshold
print(i, labels[i])
print(x, y)
points.append((int(x), int(y)))
else :
points.append(None)
print(points)
cv2.imshow("Output-Keypoints",image)
cv2.waitKey()
for i in collected: Detect(i)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "c80ae9d2eb07fd716a80a5e2d7b5237925fda02c",
"index": 5861,
"step-1": "<mask token>\n\n\ndef yolo():\n root = 'Z:\\\\'\n name = '23367640.png'\n execution_path = os.getcwd()\n yolo_path = 'Z:\\\\yolo.h5'\n localdir = False\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n if localdir:\n detector.setModelPath(os.path.join(execution_path, yolo_path))\n else:\n detector.setModelPath(yolo_path)\n detector.loadModel()\n pathOut = 'yolo_out_2.jpg'\n path = root + name\n pathOut = root + name + 'yolo_out' + '.jpg'\n detections = detector.detectObjectsFromImage(input_image=os.path.join(\n execution_path, path), output_image_path=os.path.join(\n execution_path, pathOut), minimum_percentage_probability=10)\n for eachObject in detections:\n print(eachObject['name'], ' : ', eachObject[\n 'percentage_probability'], ' : ', eachObject['box_points'])\n print('--------------------------------')\n return detections, path\n\n\n<mask token>\n\n\ndef Detect(image):\n frameHeight, frameWidth, ch = image.shape\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n print(output)\n print('========')\n H = output.shape[2]\n W = output.shape[3]\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print('Keypoints from output?', Keypoints)\n Keypoints = 15\n labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',\n 'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',\n 'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',\n 'Left Ankle', 'Chest', 'Background']\n for i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness\n =-1, lineType=cv2.FILLED)\n cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\n print(points)\n cv2.imshow('Output-Keypoints', image)\n cv2.waitKey()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef yolo():\n root = 'Z:\\\\'\n name = '23367640.png'\n execution_path = os.getcwd()\n yolo_path = 'Z:\\\\yolo.h5'\n localdir = False\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n if localdir:\n detector.setModelPath(os.path.join(execution_path, yolo_path))\n else:\n detector.setModelPath(yolo_path)\n detector.loadModel()\n pathOut = 'yolo_out_2.jpg'\n path = root + name\n pathOut = root + name + 'yolo_out' + '.jpg'\n detections = detector.detectObjectsFromImage(input_image=os.path.join(\n execution_path, path), output_image_path=os.path.join(\n execution_path, pathOut), minimum_percentage_probability=10)\n for eachObject in detections:\n print(eachObject['name'], ' : ', eachObject[\n 'percentage_probability'], ' : ', eachObject['box_points'])\n print('--------------------------------')\n return detections, path\n\n\n<mask token>\nfor i in det:\n print(i)\n<mask token>\nfor d in det:\n if d['name'] == 'person' and d['percentage_probability'] > yolo_thr:\n x1, y1, x2, y2 = d['box_points']\n if bWiden:\n x1 -= 20\n x2 += 20\n y1 -= 30\n y2 += 30\n cropped = yoloImage[y1:y2, x1:x2]\n cv2.imshow(d['name'] + str(x1), cropped)\n collected.append(cropped)\n cv2.waitKey()\n<mask token>\ncv2.imshow('FRAME' + src, frame)\n<mask token>\nprint('H, W, Ch', frameHeight, frameWidth, ch)\n<mask token>\nnet.setInput(inpBlob)\nprint(inpBlob)\n<mask token>\nprint(output)\nprint('========')\n<mask token>\nprint('Keypoints from output?', Keypoints)\n<mask token>\nfor i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1,\n lineType=cv2.FILLED)\n cv2.putText(frame, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\nprint(points)\ncv2.imshow('Output-Keypoints', frame)\n\n\ndef Detect(image):\n frameHeight, frameWidth, ch = image.shape\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n print(output)\n print('========')\n H = output.shape[2]\n W = output.shape[3]\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print('Keypoints from output?', Keypoints)\n Keypoints = 15\n labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',\n 'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',\n 'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',\n 'Left Ankle', 'Chest', 'Background']\n for i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness\n =-1, lineType=cv2.FILLED)\n cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\n print(points)\n cv2.imshow('Output-Keypoints', image)\n cv2.waitKey()\n\n\nfor i in collected:\n Detect(i)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nboxes = []\n\n\ndef yolo():\n root = 'Z:\\\\'\n name = '23367640.png'\n execution_path = os.getcwd()\n yolo_path = 'Z:\\\\yolo.h5'\n localdir = False\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n if localdir:\n detector.setModelPath(os.path.join(execution_path, yolo_path))\n else:\n detector.setModelPath(yolo_path)\n detector.loadModel()\n pathOut = 'yolo_out_2.jpg'\n path = root + name\n pathOut = root + name + 'yolo_out' + '.jpg'\n detections = detector.detectObjectsFromImage(input_image=os.path.join(\n execution_path, path), output_image_path=os.path.join(\n execution_path, pathOut), minimum_percentage_probability=10)\n for eachObject in detections:\n print(eachObject['name'], ' : ', eachObject[\n 'percentage_probability'], ' : ', eachObject['box_points'])\n print('--------------------------------')\n return detections, path\n\n\ndet, path = yolo()\nyoloImage = cv2.imread(path)\nfor i in det:\n print(i)\nprotoFile = 'Z:\\\\pose\\\\mpi\\\\pose_deploy_linevec_faster_4_stages.prototxt'\nweightsFile = 'Z:\\\\pose\\\\mpi\\\\pose_iter_160000.caffemodel'\nnet = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\n<mask token>\nyolo_thr = 70\ncollected = []\nbWiden = False\nfor d in det:\n if d['name'] == 'person' and d['percentage_probability'] > yolo_thr:\n x1, y1, x2, y2 = d['box_points']\n if bWiden:\n x1 -= 20\n x2 += 20\n y1 -= 30\n y2 += 30\n cropped = yoloImage[y1:y2, x1:x2]\n cv2.imshow(d['name'] + str(x1), cropped)\n collected.append(cropped)\n cv2.waitKey()\nsrcs = ['z:\\\\pose1.webp', 'Z:\\\\2w.jpg', 'Z:\\\\grigor.jpg']\nid = 2\nsrc = path\nframe = cv2.imread(src)\ncv2.imshow('FRAME' + src, frame)\nframeHeight, frameWidth, ch = frame.shape\nprint('H, W, Ch', frameHeight, frameWidth, ch)\ninWidth = 368\ninHeight = 368\ninpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, \n 0, 0), swapRB=False, crop=False)\nnet.setInput(inpBlob)\nprint(inpBlob)\noutput = net.forward()\nprint(output)\nprint('========')\nH = output.shape[2]\nW = output.shape[3]\npoints = []\nthreshold = 0.3\nmaxKeypoints = 44\nKeypoints = output.shape[1]\nprint('Keypoints from output?', Keypoints)\nKeypoints = 15\nlabels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow', 'Right Wrist',\n 'Left Shoulder', 'Left Elbow', 'Left Wrist', 'Right Hip', 'Right Knee',\n 'Right Ankle', 'Left Hip', 'Left Knee', 'Left Ankle', 'Chest', 'Background'\n ]\nfor i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1,\n lineType=cv2.FILLED)\n cv2.putText(frame, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\nprint(points)\ncv2.imshow('Output-Keypoints', frame)\n\n\ndef Detect(image):\n frameHeight, frameWidth, ch = image.shape\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n print(output)\n print('========')\n H = output.shape[2]\n W = output.shape[3]\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print('Keypoints from output?', Keypoints)\n Keypoints = 15\n labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',\n 'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',\n 'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',\n 'Left Ankle', 'Chest', 'Background']\n for i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness\n =-1, lineType=cv2.FILLED)\n cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\n print(points)\n cv2.imshow('Output-Keypoints', image)\n cv2.waitKey()\n\n\nfor i in collected:\n Detect(i)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport tensorflow.compat.v1 as tf\nfrom imageai.Detection import ObjectDetection\nimport os\nboxes = []\n\n\ndef yolo():\n root = 'Z:\\\\'\n name = '23367640.png'\n execution_path = os.getcwd()\n yolo_path = 'Z:\\\\yolo.h5'\n localdir = False\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n if localdir:\n detector.setModelPath(os.path.join(execution_path, yolo_path))\n else:\n detector.setModelPath(yolo_path)\n detector.loadModel()\n pathOut = 'yolo_out_2.jpg'\n path = root + name\n pathOut = root + name + 'yolo_out' + '.jpg'\n detections = detector.detectObjectsFromImage(input_image=os.path.join(\n execution_path, path), output_image_path=os.path.join(\n execution_path, pathOut), minimum_percentage_probability=10)\n for eachObject in detections:\n print(eachObject['name'], ' : ', eachObject[\n 'percentage_probability'], ' : ', eachObject['box_points'])\n print('--------------------------------')\n return detections, path\n\n\ndet, path = yolo()\nyoloImage = cv2.imread(path)\nfor i in det:\n print(i)\nprotoFile = 'Z:\\\\pose\\\\mpi\\\\pose_deploy_linevec_faster_4_stages.prototxt'\nweightsFile = 'Z:\\\\pose\\\\mpi\\\\pose_iter_160000.caffemodel'\nnet = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\n<mask token>\nyolo_thr = 70\ncollected = []\nbWiden = False\nfor d in det:\n if d['name'] == 'person' and d['percentage_probability'] > yolo_thr:\n x1, y1, x2, y2 = d['box_points']\n if bWiden:\n x1 -= 20\n x2 += 20\n y1 -= 30\n y2 += 30\n cropped = yoloImage[y1:y2, x1:x2]\n cv2.imshow(d['name'] + str(x1), cropped)\n collected.append(cropped)\n cv2.waitKey()\nsrcs = ['z:\\\\pose1.webp', 'Z:\\\\2w.jpg', 'Z:\\\\grigor.jpg']\nid = 2\nsrc = path\nframe = cv2.imread(src)\ncv2.imshow('FRAME' + src, frame)\nframeHeight, frameWidth, ch = frame.shape\nprint('H, W, Ch', frameHeight, frameWidth, ch)\ninWidth = 368\ninHeight = 368\ninpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, \n 0, 0), swapRB=False, crop=False)\nnet.setInput(inpBlob)\nprint(inpBlob)\noutput = net.forward()\nprint(output)\nprint('========')\nH = output.shape[2]\nW = output.shape[3]\npoints = []\nthreshold = 0.3\nmaxKeypoints = 44\nKeypoints = output.shape[1]\nprint('Keypoints from output?', Keypoints)\nKeypoints = 15\nlabels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow', 'Right Wrist',\n 'Left Shoulder', 'Left Elbow', 'Left Wrist', 'Right Hip', 'Right Knee',\n 'Right Ankle', 'Left Hip', 'Left Knee', 'Left Ankle', 'Chest', 'Background'\n ]\nfor i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1,\n lineType=cv2.FILLED)\n cv2.putText(frame, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\nprint(points)\ncv2.imshow('Output-Keypoints', frame)\n\n\ndef Detect(image):\n frameHeight, frameWidth, ch = image.shape\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight),\n (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n print(output)\n print('========')\n H = output.shape[2]\n W = output.shape[3]\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print('Keypoints from output?', Keypoints)\n Keypoints = 15\n labels = ['Head', 'Neck', 'Right Shoulder', 'Right Elbow',\n 'Right Wrist', 'Left Shoulder', 'Left Elbow', 'Left Wrist',\n 'Right Hip', 'Right Knee', 'Right Ankle', 'Left Hip', 'Left Knee',\n 'Left Ankle', 'Chest', 'Background']\n for i in range(Keypoints):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = frameWidth * point[0] / W\n y = frameHeight * point[1] / H\n if prob > threshold:\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness\n =-1, lineType=cv2.FILLED)\n cv2.putText(image, '{}'.format(i), (int(x), int(y)), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else:\n points.append(None)\n print(points)\n cv2.imshow('Output-Keypoints', image)\n cv2.waitKey()\n\n\nfor i in collected:\n Detect(i)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-5": "# Pose estimation and object detection: OpenCV DNN, ImageAI, YOLO, mpi, caffemodel, tensorflow\n# Authors:\n# Tutorial by: https://learnopencv.com/deep-learning-based-human-pose-estimation-using-opencv-cpp-python/\n# Model file links collection (replace .sh script): Twenkid\n# http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel\n#https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/models/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt\n# ImageAI: https://github.com/OlafenwaMoses/ImageAI\n# # YOLOv3:\n# yolo.h5\n# https://github-releases.githubusercontent.com/125932201/1b8496e8-86fc-11e8-895f-fefe61ebb499?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210813%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210813T002422Z&X-Amz-Expires=300&X-Amz-Signature=02e6839be131d27b142baf50449d021339cbb334eed67a114ff9b960b8beb987&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo.h5&response-content-type=application%2Foctet-stream\n# yolo-tiny.h5\n# https://github-releases.githubusercontent.com/125932201/7cf559e6-86fa-11e8-81e8-1e959be261a8?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20210812%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20210812T232641Z&X-Amz-Expires=300&X-Amz-Signature=a5b91876c83b83a6aafba333c63c5f4a880bea9a937b30e52e92bbb0ac784018&X-Amz-SignedHeaders=host&actor_id=23367640&key_id=0&repo_id=125932201&response-content-disposition=attachment%3B%20filename%3Dyolo-tiny.h5&response-content-type=application%2Foctet-stream\n# Todor Arnaudov - Twenkid: debug and merging, LearnOpenCV python code had a few misses, 13.8.2021\n# It seems the pose model expects only one person so the image must be segmented first! pose1.jpg\n# Detect with YOLO or ImageAI etc. then use DNN\n# Specify the paths for the 2 files\n# I tried with yolo-tiny, but the accuracy of the bounding boxes didn't seem acceptable.\n#tf 1.15 for older versions of ImageAI - but tf doesn't support Py 3.8\n#ImageAI: older versions require tf 1.x\n#tf 2.4 - required by ImageAI 2.1.6 -- no GPU supported on Win 7, tf requires CUDA 11.0 (Win10). Win7: CUDA 10.x. CPU: works\n# Set the paths to models, images etc.\n# My experiments results: disappointingly bad pose estimation on the images I tested. Sometimes good, sometimes terrible. \n\nimport cv2\nimport tensorflow.compat.v1 as tf\nfrom imageai.Detection import ObjectDetection\nimport os\nboxes = []\n\ndef yolo():\n #name = \"k.jpg\"\n root = \"Z:\\\\\"\n name = \"23367640.png\" #t.jpg\" #\"p1.jpg\" #\"2w.jpg\" #\"grigor.jpg\" #\"2w.jpg\" #\"pose1.webp\" #1.jpg\"\n execution_path = os.getcwd()\n yolo_path = \"Z:\\\\yolo.h5\"\n #yolo_path = \"Z:\\\\yolo-tiny.h5\"\n localdir = False\n\n detector = ObjectDetection()\n detector.setModelTypeAsYOLOv3()\n #detector.setModelTypeAsTinyYOLOv3()\n \n if localdir:\n detector.setModelPath(os.path.join(execution_path , yolo_path))\n else: \n detector.setModelPath(yolo_path)\n\n #dir(detector)\n detector.loadModel()\n #loaded_model = tf.keras.models.load_model(\"./src/mood-saved-models/\"model + \".h5\")\n #loaded_model = tf.keras.models.load_model(detector.)\n\n #path = \"E:\\capture_023_29092020_150305.jpg\" #IMG_20200528_044908.jpg\"\n #pathOut = \"E:\\YOLO_capture_023_29092020_150305.jpg\"\n\n #path = \"pose1.webp\" #E:\\\\capture_046_29092020_150628.jpg\"\n pathOut = \"yolo_out_2.jpg\"\n\n\n \n path = root + name\n pathOut = root + name + \"yolo_out\" + \".jpg\"\n\n detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , path), output_image_path=os.path.join(execution_path , pathOut), minimum_percentage_probability=10) #30)\n\n for eachObject in detections:\n print(eachObject[\"name\"] , \" : \", eachObject[\"percentage_probability\"], \" : \", eachObject[\"box_points\"] )\n print(\"--------------------------------\")\n return detections, path\n\ndet,path = yolo()\nyoloImage = cv2.imread(path) #crop regions from it \nfor i in det:\n print(i)\n \n\nprotoFile = \"Z:\\\\pose\\\\mpi\\\\pose_deploy_linevec_faster_4_stages.prototxt\"\n#protoFile = \"pose_deploy_linevec_faster_4_stages.prototxt\"\n#weightsFile = \"Z:\\\\pose\\\\mpi\\\\pose_iter_440000.caffemodel\"\nweightsFile = \"Z:\\\\pose\\\\mpi\\\\pose_iter_160000.caffemodel\"\n#weightsFile = \"pose_iter_160000.caffemodel\"\n#weightsFile = \"pose_iter_440000.caffemodel\"\n\n# Read the network into Memory\nnet = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\n\n\"\"\"\n{'name': 'person', 'percentage_probability': 99.86668229103088, 'box_points': [1\n8, 38, 153, 397]}\n{'name': 'person', 'percentage_probability': 53.89136075973511, 'box_points': [3\n86, 93, 428, 171]}\n{'name': 'person', 'percentage_probability': 11.339860409498215, 'box_points': [\n585, 99, 641, 180]}\n{'name': 'person', 'percentage_probability': 10.276197642087936, 'box_points': [\n126, 178, 164, 290]}\n{'name': 'person', 'percentage_probability': 99.94878768920898, 'box_points': [2\n93, 80, 394, 410]}\n{'name': 'person', 'percentage_probability': 99.95986223220825, 'box_points': [4\n78, 88, 589, 410]}\n{'name': 'person', 'percentage_probability': 67.95878410339355, 'box_points': [1\n, 212, 39, 300]}\n{'name': 'person', 'percentage_probability': 63.609880208969116, 'box_points': [\n153, 193, 192, 306]}\n{'name': 'person', 'percentage_probability': 23.985233902931213, 'box_points': [\n226, 198, 265, 308]}\n{'name': 'sports ball', 'percentage_probability': 20.820775628089905, 'box_point\ns': [229, 50, 269, 94]}\n{'name': 'person', 'percentage_probability': 40.28712213039398, 'box_points': [4\n23, 110, 457, 160]}\nH, W, Ch 407 211 3\n\"\"\"\nyolo_thr = 70 #in percents, not 0.7\ncollected = []\nbWiden = False\nfor d in det:\n if (d['name'] == 'person') and d['percentage_probability'] > yolo_thr:\n x1,y1,x2,y2 = d['box_points']\n if bWiden:\n x1-=20\n x2+=20\n y1-=30\n y2+=30\n cropped = yoloImage[y1:y2, x1:x2] \n cv2.imshow(d['name']+str(x1), cropped)\n collected.append(cropped) #or copy first?\n cv2.waitKey()\n #x1,y1, ...\n\n# for i in collected: cv2.imshow(\"COLLECTED?\", i); cv2.waitKey() #OK\n \n# Read image\n#frame = cv2.imread(\"Z:\\\\23367640.png\") #1.jpg\")\n#src = \"Z:\\\\2w.jpg\" #z:\\\\pose1.webp\" #nacep1.jpg\"\n#src = \"z:\\\\pose1.webp\" \nsrcs = [\"z:\\\\pose1.webp\",\"Z:\\\\2w.jpg\", \"Z:\\\\grigor.jpg\"]\nid = 2\n#src = srcs[2] \nsrc = path #from first yolo, in order to compare\n\nframe = cv2.imread(src)\ncv2.imshow(\"FRAME\"+src, frame)\n#frameWidth, frameHeight, _ = frame.shape\nframeHeight, frameWidth, ch = frame.shape\nprint(\"H, W, Ch\", frameHeight, frameWidth, ch)\n \n# Specify the input image dimensions\ninWidth = 368 #184 #368\ninHeight = 368 #184 #368\n\n# Prepare the frame to be fed to the network\ninpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)\n\n#cv2.imshow(\"G\", inpBlob) #unsupported\n#cv2.waitKey(0)\n\n# Set the prepared object as the input blob of the network\nnet.setInput(inpBlob)\nprint(inpBlob)\noutput = net.forward()\n\nprint(output)\n\nprint(\"========\")\n\nH = output.shape[2]\nW = output.shape[3]\n# Empty list to store the detected keypoints\npoints = []\nthreshold = 0.3\nmaxKeypoints = 44\nKeypoints = output.shape[1]\nprint(\"Keypoints from output?\", Keypoints)\nKeypoints = 15 #MPI ... returns only 15\n\nlabels = [\"Head\", \"Neck\", \"Right Shoulder\", \"Right Elbow\", \"Right Wrist\", \"Left Shoulder\", \"Left Elbow\", \"Left Wrist\", \"Right Hip\", \"Right Knee\", \"Right Ankle\", \"Left Hip\", \"Left Knee\", \"Left Ankle\", \"Chest\", \"Background\"]\n\n#for i in range(len()):\nfor i in range(Keypoints): #?\n # confidence map of corresponding body's part.\n probMap = output[0, i, :, :]\n\n # Find global maxima of the probMap.\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n\n # Scale the point to fit on the original image\n x = (frameWidth * point[0]) / W\n y = (frameHeight * point[1]) / H\n\n if prob > threshold :\n cv2.circle(frame, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)\n cv2.putText(frame, \"{}\".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n\n # Add the point to the list if the probability is greater than the threshold\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else :\n points.append(None)\n\nprint(points)\n\ncv2.imshow(\"Output-Keypoints\",frame)\n\ndef Detect(image): #inWidth, Height ... - global, set as params later \n frameHeight, frameWidth, ch = image.shape\n # Prepare the image to be fed to the network\n inpBlob = cv2.dnn.blobFromImage(image, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)\n\n #cv2.imshow(\"G\", inpBlob) #unsupported\n #cv2.waitKey(0)\n\n # Set the prepared object as the input blob of the network\n net.setInput(inpBlob)\n print(inpBlob)\n output = net.forward()\n\n print(output)\n\n print(\"========\")\n\n H = output.shape[2]\n W = output.shape[3]\n # Empty list to store the detected keypoints\n points = []\n threshold = 0.1\n maxKeypoints = 44\n Keypoints = output.shape[1]\n print(\"Keypoints from output?\", Keypoints)\n Keypoints = 15 #MPI ... returns only 15\n\n labels = [\"Head\", \"Neck\", \"Right Shoulder\", \"Right Elbow\", \"Right Wrist\", \"Left Shoulder\", \"Left Elbow\", \"Left Wrist\", \"Right Hip\", \"Right Knee\", \"Right Ankle\", \"Left Hip\", \"Left Knee\", \"Left Ankle\", \"Chest\", \"Background\"]\n\n #for i in range(len()):\n for i in range(Keypoints): #?\n # confidence map of corresponding body's part.\n probMap = output[0, i, :, :]\n\n # Find global maxima of the probMap.\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n\n # Scale the point to fit on the original image\n x = (frameWidth * point[0]) / W\n y = (frameHeight * point[1]) / H\n\n if prob > threshold :\n cv2.circle(image, (int(x), int(y)), 5, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)\n cv2.putText(image, \"{}\".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)\n\n # Add the point to the list if the probability is greater than the threshold\n print(i, labels[i])\n print(x, y)\n points.append((int(x), int(y)))\n else :\n points.append(None)\n\n print(points)\n cv2.imshow(\"Output-Keypoints\",image)\n cv2.waitKey()\n\nfor i in collected: Detect(i)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import abc
import numpy as np
import ray
from tqdm.autonotebook import tqdm
from src.algorithm.info_theory.it_estimator import (CachingEstimator,
MPCachingEstimator)
from src.algorithm.utils import differ, independent_roll, union
class FeatureSelector(metaclass=abc.ABCMeta):
def __init__(self, itEstimator, trajectories, discrete=False, nproc=None):
self.trajectories = trajectories
self.nproc = nproc
self.discrete = discrete
if nproc != 1:
self.itEstimator = MPCachingEstimator(
itEstimator, self._get_arrays, nproc=nproc)
self.two = ray.put(2)
else:
self.itEstimator = CachingEstimator(itEstimator, self._get_arrays)
self.seed()
self._setup()
def _setup(self):
self.n_features = self.trajectories[0].shape[1] - 1
self.id_reward = self.n_features
self.set_reward = frozenset({self.id_reward})
self.id_J_k = -1
self.set_J_k = frozenset({self.id_J_k})
self.idSet = frozenset(range(self.n_features))
self.idSelected = None
self.tot_t = min(len(tr) for tr in self.trajectories)
self.data_per_traj = np.dstack(
[tr[:self.tot_t, :] for tr in self.trajectories])
self.Rts = np.abs(self.data_per_traj[:, self.id_reward, :]).max(axis=1)
self.Rmax = self.Rts.max()
self.on_mu = None
self.trajectories = None
def _prep_data(self, max_t, on_mu):
if hasattr(self, 't_step_data') and max_t + 1 == self.t_step_data.shape[2] and on_mu == self.on_mu:
return
self.itEstimator.cache.clear()
assert max_t < self.tot_t, f"max timestep {max_t} is not less than the shortest trajectory (len {self.tot_t})"
self.on_mu = on_mu
if on_mu:
stop_len = 1
stop_ids = -1
else:
stop_len = self.tot_t - max_t
stop_ids = slice(None)
shift = np.zeros(self.n_features + 1, dtype=np.int)
shift[self.id_reward] = -1
self.t_step_data = []
for t in range(max_t + 1):
t_shift = t*shift
t_step_eps = []
for ep in self.data_per_traj.transpose(2, 0, 1):
t_step_eps.append(independent_roll(
ep, t_shift)[: stop_len, stop_ids])
self.t_step_data.append(np.vstack(t_step_eps))
if self.t_step_data:
self.t_step_data = np.dstack(self.t_step_data)
else:
self.t_step_data = np.empty((self.data_per_traj.shape[-1], 1, 1))
def _get_arrays(self, ids, t):
if not isinstance(ids, list):
ids = list(ids)
# memory efficiency
if self.on_mu:
if t == self.t_step_data.shape[2]:
ft, t = t, 0
else:
ft = 0
feats = self.data_per_traj[ft, :-1, :].T
rew = self.t_step_data[:, 0, t][:, None]
data = np.hstack([feats, rew, self.J_k])
return data[:, ids]
return self.t_step_data[:, ids, t]
def _generate_steplist(self, k, sampling, freq):
if sampling == "frequency":
max_t = (k-1) * freq
return np.arange(k*freq, step=freq), max_t
if sampling == "decaying":
p = np.exp(-np.arange(self.tot_t)/freq) / freq
p = p/p.sum()
steplist = np.sort(self.np_random.choice(
self.tot_t, size=k, replace=False, p=p))
return steplist, steplist[-1]
if sampling == "variance":
variances = np.var(
self.data_per_traj[:, self.id_reward, :], axis=1)
most_var = np.argsort(variances)[::-1][:k]
steplist = np.sort(most_var)
return steplist, steplist[-1]
raise NotImplemented
def _get_weights_by_steplist(self, steplist, gamma, use_Rt):
k = len(steplist)
gamma = gamma
weights = np.ones(k + 1)
weights[:-1] = gamma ** steplist
weights[k] = 1 - (1 - gamma) * weights[:-1].sum()
if use_Rt:
Rsteps = self.Rts[steplist]
weights[:-1] *= Rsteps
weights[k] *= self.max_J_k
return weights
def _prep_J_k(self, k, gamma):
self.J_k = np.polyval(
self.data_per_traj[k:, -1, :], gamma).reshape(-1, 1)
self.max_J_k = np.abs(self.J_k).max()
def _prep_all(self, k, gamma, sampling, freq, use_Rt, on_mu):
self.reset()
steplist, max_t = self._generate_steplist(k, sampling, freq)
self.steplist = steplist
self._prep_data(max_t, on_mu)
self._prep_J_k(k, gamma)
self.weights = self._get_weights_by_steplist(steplist, gamma, use_Rt)
return steplist
def scoreFeatures(self, *args, **kwargs):
if self.nproc != 1:
return self._scoreFeatureParallel(*args, **kwargs)
else:
return self._scoreFeatureSequential(*args, **kwargs)
def scoreSubset(self, *args, **kwargs):
if self.nproc != 1:
return self._scoreSubsetParallel(*args, **kwargs)
else:
return self._scoreSubsetSequential(*args, **kwargs)
def computeError(self, residual=None, correction=None, use_Rt=True):
if residual is None:
residual = self.residual_error
if correction is None:
correction = self.correction_term
if use_Rt:
Rmax = 1
else:
Rmax = self.Rmax
return 2**(1/2) * (Rmax * residual + correction)
def reset(self):
self.residual_error = 0
self.correction_term = 0
self.weights = None
self.steplist = None
self.idSelected = None
def seed(self, seed=None):
self.np_random = np.random.seed(seed)
return
def _scoreSubsetSequential(self, k, gamma, S, sampling="frequency", freq=1, use_Rt=True, on_mu=True, show_progress=True):
steplist = self._prep_all(k, gamma, sampling, freq, use_Rt, on_mu)
S = frozenset(S)
no_S = self.idSet.difference(S)
score = np.zeros(k+1)
for j, t in enumerate(steplist):
score[j] = self.itEstimator.estimateCMI(
self.set_reward, no_S, S, t=t)
score[k] = self.itEstimator.estimateCMI(self.set_J_k, no_S, S, t=k)
score = np.clip(score, 0, 2)
score = np.sqrt(score)
self.residual_error = score[:-1] @ self.weights[:-1]
self.correction_term = score[-1] * self.weights[-1]
return self.computeError(use_Rt=use_Rt)
def _scoreSubsetParallel(self, k, gamma, S, sampling="frequency", freq=1, use_Rt=True, on_mu=True, show_progress=True):
steplist = self._prep_all(k, gamma, sampling, freq, use_Rt, on_mu)
S = frozenset(S)
no_S = self.idSet.difference(S)
res = []
for t in steplist:
res.append(self.itEstimator.estimateCMI(
self.set_reward, no_S, S, t=t))
res.append(self.itEstimator.estimateCMI(self.set_J_k, no_S, S, t=k))
res = map(lambda x: ray.get(x), res)
score = np.fromiter(res, np.float64)
score = np.clip(score, 0, 2)
score = np.sqrt(score)
self.residual_error = score[:-1] @ self.weights[:-1]
self.correction_term = score[-1] * self.weights[-1]
return self.computeError(use_Rt=use_Rt)
def _scoreFeatureParallel(self, steplist, gamma, sum_cmi, show_progress):
k = len(steplist)
S = frozenset(self.idSelected)
no_S = self.idSet.difference(self.idSelected)
if self.forward:
shrink_S = no_S
op_S, op_noS = union, differ
else:
shrink_S = S
op_S, op_noS = differ, union
list_ids = np.fromiter(shrink_S, dtype=np.int)
res = []
for i, id in enumerate(list_ids):
id = frozenset({id})
S_next = op_S(S, id)
no_S_next = op_noS(no_S, id)
if sum_cmi:
target = id
else:
target = no_S_next
for j, t in enumerate(steplist):
res.append(self.itEstimator.estimateCMI(
self.set_reward, target, S_next, t=t))
res.append(self.itEstimator.estimateCMI(self.set_J_k, target,
S_next, t=k))
res = map(lambda x: ray.get(x), tqdm(
res, leave=False, disable=not show_progress))
score_mat = np.fromiter(res, np.float64).reshape(k + 1, -1, order='F')
score_mat = np.clip(score_mat, 0, 2)
scores = np.sqrt(score_mat)
cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])
new_cond_entropy = self.weights[-1] * scores[-1, :]
sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)
return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[sorted_idx], score_mat[:, sorted_idx]
def _scoreFeatureSequential(self, steplist, gamma, sum_cmi, show_progress):
k = len(steplist)
S = frozenset(self.idSelected)
no_S = self.idSet.difference(self.idSelected)
if self.forward:
shrink_S = no_S
op_S, op_noS = union, differ
else:
shrink_S = S
op_S, op_noS = differ, union
list_ids = np.fromiter(shrink_S, dtype=np.int)
score_mat = np.zeros((k+1, len(list_ids)))
for i, id in enumerate(tqdm(list_ids, leave=False, disable=not show_progress)):
id = frozenset({id})
S_next = op_S(S, id)
no_S_next = op_noS(no_S, id)
if sum_cmi:
target = id
else:
target = no_S_next
for j, t in enumerate(steplist):
score_mat[j, i] = self.itEstimator.estimateCMI(
self.set_reward, target, S_next, t=t)
score_mat[k, i] = self.itEstimator.estimateCMI(
self.set_J_k, target, S_next, t=k)
score_mat = np.clip(score_mat, 0, 2)
scores = np.sqrt(score_mat)
cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])
new_cond_entropy = self.weights[-1] * scores[-1, :]
sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)
return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[sorted_idx], score_mat[:, sorted_idx]
@abc.abstractmethod
def selectOnError(self, k, gamma, max_error, sampling="frequency", freq=1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):
pass
@abc.abstractmethod
def selectNfeatures(self, n, k, gamma, sampling="frequency", freq=1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):
pass
@abc.abstractmethod
def try_all(self, k, gamma, all_scores=False, max_n=None, sampling="frequency", freq=1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):
pass
|
normal
|
{
"blob_id": "983473129bfd56138a615e0f5bdb1353e9c6d8af",
"index": 6441,
"step-1": "<mask token>\n\n\nclass FeatureSelector(metaclass=abc.ABCMeta):\n <mask token>\n\n def _setup(self):\n self.n_features = self.trajectories[0].shape[1] - 1\n self.id_reward = self.n_features\n self.set_reward = frozenset({self.id_reward})\n self.id_J_k = -1\n self.set_J_k = frozenset({self.id_J_k})\n self.idSet = frozenset(range(self.n_features))\n self.idSelected = None\n self.tot_t = min(len(tr) for tr in self.trajectories)\n self.data_per_traj = np.dstack([tr[:self.tot_t, :] for tr in self.\n trajectories])\n self.Rts = np.abs(self.data_per_traj[:, self.id_reward, :]).max(axis=1)\n self.Rmax = self.Rts.max()\n self.on_mu = None\n self.trajectories = None\n\n def _prep_data(self, max_t, on_mu):\n if hasattr(self, 't_step_data'\n ) and max_t + 1 == self.t_step_data.shape[2\n ] and on_mu == self.on_mu:\n return\n self.itEstimator.cache.clear()\n assert max_t < self.tot_t, f'max timestep {max_t} is not less than the shortest trajectory (len {self.tot_t})'\n self.on_mu = on_mu\n if on_mu:\n stop_len = 1\n stop_ids = -1\n else:\n stop_len = self.tot_t - max_t\n stop_ids = slice(None)\n shift = np.zeros(self.n_features + 1, dtype=np.int)\n shift[self.id_reward] = -1\n self.t_step_data = []\n for t in range(max_t + 1):\n t_shift = t * shift\n t_step_eps = []\n for ep in self.data_per_traj.transpose(2, 0, 1):\n t_step_eps.append(independent_roll(ep, t_shift)[:stop_len,\n stop_ids])\n self.t_step_data.append(np.vstack(t_step_eps))\n if self.t_step_data:\n self.t_step_data = np.dstack(self.t_step_data)\n else:\n self.t_step_data = np.empty((self.data_per_traj.shape[-1], 1, 1))\n\n def _get_arrays(self, ids, t):\n if not isinstance(ids, list):\n ids = list(ids)\n if self.on_mu:\n if t == self.t_step_data.shape[2]:\n ft, t = t, 0\n else:\n ft = 0\n feats = self.data_per_traj[ft, :-1, :].T\n rew = self.t_step_data[:, 0, t][:, None]\n data = np.hstack([feats, rew, self.J_k])\n return data[:, ids]\n return self.t_step_data[:, ids, t]\n\n def _generate_steplist(self, k, sampling, freq):\n if sampling == 'frequency':\n max_t = (k - 1) * freq\n return np.arange(k * freq, step=freq), max_t\n if sampling == 'decaying':\n p = np.exp(-np.arange(self.tot_t) / freq) / freq\n p = p / p.sum()\n steplist = np.sort(self.np_random.choice(self.tot_t, size=k,\n replace=False, p=p))\n return steplist, steplist[-1]\n if sampling == 'variance':\n variances = np.var(self.data_per_traj[:, self.id_reward, :], axis=1\n )\n most_var = np.argsort(variances)[::-1][:k]\n steplist = np.sort(most_var)\n return steplist, steplist[-1]\n raise NotImplemented\n <mask token>\n\n def _prep_J_k(self, k, gamma):\n self.J_k = np.polyval(self.data_per_traj[k:, -1, :], gamma).reshape(\n -1, 1)\n self.max_J_k = np.abs(self.J_k).max()\n\n def _prep_all(self, k, gamma, sampling, freq, use_Rt, on_mu):\n self.reset()\n steplist, max_t = self._generate_steplist(k, sampling, freq)\n self.steplist = steplist\n self._prep_data(max_t, on_mu)\n self._prep_J_k(k, gamma)\n self.weights = self._get_weights_by_steplist(steplist, gamma, use_Rt)\n return steplist\n <mask token>\n\n def scoreSubset(self, *args, **kwargs):\n if self.nproc != 1:\n return self._scoreSubsetParallel(*args, **kwargs)\n else:\n return self._scoreSubsetSequential(*args, **kwargs)\n\n def computeError(self, residual=None, correction=None, use_Rt=True):\n if residual is None:\n residual = self.residual_error\n if correction is None:\n correction = self.correction_term\n if use_Rt:\n Rmax = 1\n else:\n Rmax = self.Rmax\n return 2 ** (1 / 2) * (Rmax * residual + correction)\n\n def reset(self):\n self.residual_error = 0\n self.correction_term = 0\n self.weights = None\n self.steplist = None\n self.idSelected = None\n <mask token>\n <mask token>\n\n def _scoreSubsetParallel(self, k, gamma, S, sampling='frequency', freq=\n 1, use_Rt=True, on_mu=True, show_progress=True):\n steplist = self._prep_all(k, gamma, sampling, freq, use_Rt, on_mu)\n S = frozenset(S)\n no_S = self.idSet.difference(S)\n res = []\n for t in steplist:\n res.append(self.itEstimator.estimateCMI(self.set_reward, no_S,\n S, t=t))\n res.append(self.itEstimator.estimateCMI(self.set_J_k, no_S, S, t=k))\n res = map(lambda x: ray.get(x), res)\n score = np.fromiter(res, np.float64)\n score = np.clip(score, 0, 2)\n score = np.sqrt(score)\n self.residual_error = score[:-1] @ self.weights[:-1]\n self.correction_term = score[-1] * self.weights[-1]\n return self.computeError(use_Rt=use_Rt)\n\n def _scoreFeatureParallel(self, steplist, gamma, sum_cmi, show_progress):\n k = len(steplist)\n S = frozenset(self.idSelected)\n no_S = self.idSet.difference(self.idSelected)\n if self.forward:\n shrink_S = no_S\n op_S, op_noS = union, differ\n else:\n shrink_S = S\n op_S, op_noS = differ, union\n list_ids = np.fromiter(shrink_S, dtype=np.int)\n res = []\n for i, id in enumerate(list_ids):\n id = frozenset({id})\n S_next = op_S(S, id)\n no_S_next = op_noS(no_S, id)\n if sum_cmi:\n target = id\n else:\n target = no_S_next\n for j, t in enumerate(steplist):\n res.append(self.itEstimator.estimateCMI(self.set_reward,\n target, S_next, t=t))\n res.append(self.itEstimator.estimateCMI(self.set_J_k, target,\n S_next, t=k))\n res = map(lambda x: ray.get(x), tqdm(res, leave=False, disable=not\n show_progress))\n score_mat = np.fromiter(res, np.float64).reshape(k + 1, -1, order='F')\n score_mat = np.clip(score_mat, 0, 2)\n scores = np.sqrt(score_mat)\n cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])\n new_cond_entropy = self.weights[-1] * scores[-1, :]\n sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)\n return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[\n sorted_idx], score_mat[:, sorted_idx]\n\n def _scoreFeatureSequential(self, steplist, gamma, sum_cmi, show_progress):\n k = len(steplist)\n S = frozenset(self.idSelected)\n no_S = self.idSet.difference(self.idSelected)\n if self.forward:\n shrink_S = no_S\n op_S, op_noS = union, differ\n else:\n shrink_S = S\n op_S, op_noS = differ, union\n list_ids = np.fromiter(shrink_S, dtype=np.int)\n score_mat = np.zeros((k + 1, len(list_ids)))\n for i, id in enumerate(tqdm(list_ids, leave=False, disable=not\n show_progress)):\n id = frozenset({id})\n S_next = op_S(S, id)\n no_S_next = op_noS(no_S, id)\n if sum_cmi:\n target = id\n else:\n target = no_S_next\n for j, t in enumerate(steplist):\n score_mat[j, i] = self.itEstimator.estimateCMI(self.\n set_reward, target, S_next, t=t)\n score_mat[k, i] = self.itEstimator.estimateCMI(self.set_J_k,\n target, S_next, t=k)\n score_mat = np.clip(score_mat, 0, 2)\n scores = np.sqrt(score_mat)\n cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])\n new_cond_entropy = self.weights[-1] * scores[-1, :]\n sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)\n return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[\n sorted_idx], score_mat[:, sorted_idx]\n\n @abc.abstractmethod\n def selectOnError(self, k, gamma, max_error, sampling='frequency', freq\n =1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):\n pass\n\n @abc.abstractmethod\n def selectNfeatures(self, n, k, gamma, sampling='frequency', freq=1,\n use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):\n pass\n\n @abc.abstractmethod\n def try_all(self, k, gamma, all_scores=False, max_n=None, sampling=\n 'frequency', freq=1, use_Rt=True, on_mu=True, sum_cmi=True,\n show_progress=True):\n pass\n",
"step-2": "<mask token>\n\n\nclass FeatureSelector(metaclass=abc.ABCMeta):\n\n def __init__(self, itEstimator, trajectories, discrete=False, nproc=None):\n self.trajectories = trajectories\n self.nproc = nproc\n self.discrete = discrete\n if nproc != 1:\n self.itEstimator = MPCachingEstimator(itEstimator, self.\n _get_arrays, nproc=nproc)\n self.two = ray.put(2)\n else:\n self.itEstimator = CachingEstimator(itEstimator, self._get_arrays)\n self.seed()\n self._setup()\n\n def _setup(self):\n self.n_features = self.trajectories[0].shape[1] - 1\n self.id_reward = self.n_features\n self.set_reward = frozenset({self.id_reward})\n self.id_J_k = -1\n self.set_J_k = frozenset({self.id_J_k})\n self.idSet = frozenset(range(self.n_features))\n self.idSelected = None\n self.tot_t = min(len(tr) for tr in self.trajectories)\n self.data_per_traj = np.dstack([tr[:self.tot_t, :] for tr in self.\n trajectories])\n self.Rts = np.abs(self.data_per_traj[:, self.id_reward, :]).max(axis=1)\n self.Rmax = self.Rts.max()\n self.on_mu = None\n self.trajectories = None\n\n def _prep_data(self, max_t, on_mu):\n if hasattr(self, 't_step_data'\n ) and max_t + 1 == self.t_step_data.shape[2\n ] and on_mu == self.on_mu:\n return\n self.itEstimator.cache.clear()\n assert max_t < self.tot_t, f'max timestep {max_t} is not less than the shortest trajectory (len {self.tot_t})'\n self.on_mu = on_mu\n if on_mu:\n stop_len = 1\n stop_ids = -1\n else:\n stop_len = self.tot_t - max_t\n stop_ids = slice(None)\n shift = np.zeros(self.n_features + 1, dtype=np.int)\n shift[self.id_reward] = -1\n self.t_step_data = []\n for t in range(max_t + 1):\n t_shift = t * shift\n t_step_eps = []\n for ep in self.data_per_traj.transpose(2, 0, 1):\n t_step_eps.append(independent_roll(ep, t_shift)[:stop_len,\n stop_ids])\n self.t_step_data.append(np.vstack(t_step_eps))\n if self.t_step_data:\n self.t_step_data = np.dstack(self.t_step_data)\n else:\n self.t_step_data = np.empty((self.data_per_traj.shape[-1], 1, 1))\n\n def _get_arrays(self, ids, t):\n if not isinstance(ids, list):\n ids = list(ids)\n if self.on_mu:\n if t == self.t_step_data.shape[2]:\n ft, t = t, 0\n else:\n ft = 0\n feats = self.data_per_traj[ft, :-1, :].T\n rew = self.t_step_data[:, 0, t][:, None]\n data = np.hstack([feats, rew, self.J_k])\n return data[:, ids]\n return self.t_step_data[:, ids, t]\n\n def _generate_steplist(self, k, sampling, freq):\n if sampling == 'frequency':\n max_t = (k - 1) * freq\n return np.arange(k * freq, step=freq), max_t\n if sampling == 'decaying':\n p = np.exp(-np.arange(self.tot_t) / freq) / freq\n p = p / p.sum()\n steplist = np.sort(self.np_random.choice(self.tot_t, size=k,\n replace=False, p=p))\n return steplist, steplist[-1]\n if sampling == 'variance':\n variances = np.var(self.data_per_traj[:, self.id_reward, :], axis=1\n )\n most_var = np.argsort(variances)[::-1][:k]\n steplist = np.sort(most_var)\n return steplist, steplist[-1]\n raise NotImplemented\n <mask token>\n\n def _prep_J_k(self, k, gamma):\n self.J_k = np.polyval(self.data_per_traj[k:, -1, :], gamma).reshape(\n -1, 1)\n self.max_J_k = np.abs(self.J_k).max()\n\n def _prep_all(self, k, gamma, sampling, freq, use_Rt, on_mu):\n self.reset()\n steplist, max_t = self._generate_steplist(k, sampling, freq)\n self.steplist = steplist\n self._prep_data(max_t, on_mu)\n self._prep_J_k(k, gamma)\n self.weights = self._get_weights_by_steplist(steplist, gamma, use_Rt)\n return steplist\n <mask token>\n\n def scoreSubset(self, *args, **kwargs):\n if self.nproc != 1:\n return self._scoreSubsetParallel(*args, **kwargs)\n else:\n return self._scoreSubsetSequential(*args, **kwargs)\n\n def computeError(self, residual=None, correction=None, use_Rt=True):\n if residual is None:\n residual = self.residual_error\n if correction is None:\n correction = self.correction_term\n if use_Rt:\n Rmax = 1\n else:\n Rmax = self.Rmax\n return 2 ** (1 / 2) * (Rmax * residual + correction)\n\n def reset(self):\n self.residual_error = 0\n self.correction_term = 0\n self.weights = None\n self.steplist = None\n self.idSelected = None\n\n def seed(self, seed=None):\n self.np_random = np.random.seed(seed)\n return\n <mask token>\n\n def _scoreSubsetParallel(self, k, gamma, S, sampling='frequency', freq=\n 1, use_Rt=True, on_mu=True, show_progress=True):\n steplist = self._prep_all(k, gamma, sampling, freq, use_Rt, on_mu)\n S = frozenset(S)\n no_S = self.idSet.difference(S)\n res = []\n for t in steplist:\n res.append(self.itEstimator.estimateCMI(self.set_reward, no_S,\n S, t=t))\n res.append(self.itEstimator.estimateCMI(self.set_J_k, no_S, S, t=k))\n res = map(lambda x: ray.get(x), res)\n score = np.fromiter(res, np.float64)\n score = np.clip(score, 0, 2)\n score = np.sqrt(score)\n self.residual_error = score[:-1] @ self.weights[:-1]\n self.correction_term = score[-1] * self.weights[-1]\n return self.computeError(use_Rt=use_Rt)\n\n def _scoreFeatureParallel(self, steplist, gamma, sum_cmi, show_progress):\n k = len(steplist)\n S = frozenset(self.idSelected)\n no_S = self.idSet.difference(self.idSelected)\n if self.forward:\n shrink_S = no_S\n op_S, op_noS = union, differ\n else:\n shrink_S = S\n op_S, op_noS = differ, union\n list_ids = np.fromiter(shrink_S, dtype=np.int)\n res = []\n for i, id in enumerate(list_ids):\n id = frozenset({id})\n S_next = op_S(S, id)\n no_S_next = op_noS(no_S, id)\n if sum_cmi:\n target = id\n else:\n target = no_S_next\n for j, t in enumerate(steplist):\n res.append(self.itEstimator.estimateCMI(self.set_reward,\n target, S_next, t=t))\n res.append(self.itEstimator.estimateCMI(self.set_J_k, target,\n S_next, t=k))\n res = map(lambda x: ray.get(x), tqdm(res, leave=False, disable=not\n show_progress))\n score_mat = np.fromiter(res, np.float64).reshape(k + 1, -1, order='F')\n score_mat = np.clip(score_mat, 0, 2)\n scores = np.sqrt(score_mat)\n cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])\n new_cond_entropy = self.weights[-1] * scores[-1, :]\n sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)\n return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[\n sorted_idx], score_mat[:, sorted_idx]\n\n def _scoreFeatureSequential(self, steplist, gamma, sum_cmi, show_progress):\n k = len(steplist)\n S = frozenset(self.idSelected)\n no_S = self.idSet.difference(self.idSelected)\n if self.forward:\n shrink_S = no_S\n op_S, op_noS = union, differ\n else:\n shrink_S = S\n op_S, op_noS = differ, union\n list_ids = np.fromiter(shrink_S, dtype=np.int)\n score_mat = np.zeros((k + 1, len(list_ids)))\n for i, id in enumerate(tqdm(list_ids, leave=False, disable=not\n show_progress)):\n id = frozenset({id})\n S_next = op_S(S, id)\n no_S_next = op_noS(no_S, id)\n if sum_cmi:\n target = id\n else:\n target = no_S_next\n for j, t in enumerate(steplist):\n score_mat[j, i] = self.itEstimator.estimateCMI(self.\n set_reward, target, S_next, t=t)\n score_mat[k, i] = self.itEstimator.estimateCMI(self.set_J_k,\n target, S_next, t=k)\n score_mat = np.clip(score_mat, 0, 2)\n scores = np.sqrt(score_mat)\n cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])\n new_cond_entropy = self.weights[-1] * scores[-1, :]\n sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)\n return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[\n sorted_idx], score_mat[:, sorted_idx]\n\n @abc.abstractmethod\n def selectOnError(self, k, gamma, max_error, sampling='frequency', freq\n =1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):\n pass\n\n @abc.abstractmethod\n def selectNfeatures(self, n, k, gamma, sampling='frequency', freq=1,\n use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):\n pass\n\n @abc.abstractmethod\n def try_all(self, k, gamma, all_scores=False, max_n=None, sampling=\n 'frequency', freq=1, use_Rt=True, on_mu=True, sum_cmi=True,\n show_progress=True):\n pass\n",
"step-3": "<mask token>\n\n\nclass FeatureSelector(metaclass=abc.ABCMeta):\n\n def __init__(self, itEstimator, trajectories, discrete=False, nproc=None):\n self.trajectories = trajectories\n self.nproc = nproc\n self.discrete = discrete\n if nproc != 1:\n self.itEstimator = MPCachingEstimator(itEstimator, self.\n _get_arrays, nproc=nproc)\n self.two = ray.put(2)\n else:\n self.itEstimator = CachingEstimator(itEstimator, self._get_arrays)\n self.seed()\n self._setup()\n\n def _setup(self):\n self.n_features = self.trajectories[0].shape[1] - 1\n self.id_reward = self.n_features\n self.set_reward = frozenset({self.id_reward})\n self.id_J_k = -1\n self.set_J_k = frozenset({self.id_J_k})\n self.idSet = frozenset(range(self.n_features))\n self.idSelected = None\n self.tot_t = min(len(tr) for tr in self.trajectories)\n self.data_per_traj = np.dstack([tr[:self.tot_t, :] for tr in self.\n trajectories])\n self.Rts = np.abs(self.data_per_traj[:, self.id_reward, :]).max(axis=1)\n self.Rmax = self.Rts.max()\n self.on_mu = None\n self.trajectories = None\n\n def _prep_data(self, max_t, on_mu):\n if hasattr(self, 't_step_data'\n ) and max_t + 1 == self.t_step_data.shape[2\n ] and on_mu == self.on_mu:\n return\n self.itEstimator.cache.clear()\n assert max_t < self.tot_t, f'max timestep {max_t} is not less than the shortest trajectory (len {self.tot_t})'\n self.on_mu = on_mu\n if on_mu:\n stop_len = 1\n stop_ids = -1\n else:\n stop_len = self.tot_t - max_t\n stop_ids = slice(None)\n shift = np.zeros(self.n_features + 1, dtype=np.int)\n shift[self.id_reward] = -1\n self.t_step_data = []\n for t in range(max_t + 1):\n t_shift = t * shift\n t_step_eps = []\n for ep in self.data_per_traj.transpose(2, 0, 1):\n t_step_eps.append(independent_roll(ep, t_shift)[:stop_len,\n stop_ids])\n self.t_step_data.append(np.vstack(t_step_eps))\n if self.t_step_data:\n self.t_step_data = np.dstack(self.t_step_data)\n else:\n self.t_step_data = np.empty((self.data_per_traj.shape[-1], 1, 1))\n\n def _get_arrays(self, ids, t):\n if not isinstance(ids, list):\n ids = list(ids)\n if self.on_mu:\n if t == self.t_step_data.shape[2]:\n ft, t = t, 0\n else:\n ft = 0\n feats = self.data_per_traj[ft, :-1, :].T\n rew = self.t_step_data[:, 0, t][:, None]\n data = np.hstack([feats, rew, self.J_k])\n return data[:, ids]\n return self.t_step_data[:, ids, t]\n\n def _generate_steplist(self, k, sampling, freq):\n if sampling == 'frequency':\n max_t = (k - 1) * freq\n return np.arange(k * freq, step=freq), max_t\n if sampling == 'decaying':\n p = np.exp(-np.arange(self.tot_t) / freq) / freq\n p = p / p.sum()\n steplist = np.sort(self.np_random.choice(self.tot_t, size=k,\n replace=False, p=p))\n return steplist, steplist[-1]\n if sampling == 'variance':\n variances = np.var(self.data_per_traj[:, self.id_reward, :], axis=1\n )\n most_var = np.argsort(variances)[::-1][:k]\n steplist = np.sort(most_var)\n return steplist, steplist[-1]\n raise NotImplemented\n\n def _get_weights_by_steplist(self, steplist, gamma, use_Rt):\n k = len(steplist)\n gamma = gamma\n weights = np.ones(k + 1)\n weights[:-1] = gamma ** steplist\n weights[k] = 1 - (1 - gamma) * weights[:-1].sum()\n if use_Rt:\n Rsteps = self.Rts[steplist]\n weights[:-1] *= Rsteps\n weights[k] *= self.max_J_k\n return weights\n\n def _prep_J_k(self, k, gamma):\n self.J_k = np.polyval(self.data_per_traj[k:, -1, :], gamma).reshape(\n -1, 1)\n self.max_J_k = np.abs(self.J_k).max()\n\n def _prep_all(self, k, gamma, sampling, freq, use_Rt, on_mu):\n self.reset()\n steplist, max_t = self._generate_steplist(k, sampling, freq)\n self.steplist = steplist\n self._prep_data(max_t, on_mu)\n self._prep_J_k(k, gamma)\n self.weights = self._get_weights_by_steplist(steplist, gamma, use_Rt)\n return steplist\n\n def scoreFeatures(self, *args, **kwargs):\n if self.nproc != 1:\n return self._scoreFeatureParallel(*args, **kwargs)\n else:\n return self._scoreFeatureSequential(*args, **kwargs)\n\n def scoreSubset(self, *args, **kwargs):\n if self.nproc != 1:\n return self._scoreSubsetParallel(*args, **kwargs)\n else:\n return self._scoreSubsetSequential(*args, **kwargs)\n\n def computeError(self, residual=None, correction=None, use_Rt=True):\n if residual is None:\n residual = self.residual_error\n if correction is None:\n correction = self.correction_term\n if use_Rt:\n Rmax = 1\n else:\n Rmax = self.Rmax\n return 2 ** (1 / 2) * (Rmax * residual + correction)\n\n def reset(self):\n self.residual_error = 0\n self.correction_term = 0\n self.weights = None\n self.steplist = None\n self.idSelected = None\n\n def seed(self, seed=None):\n self.np_random = np.random.seed(seed)\n return\n <mask token>\n\n def _scoreSubsetParallel(self, k, gamma, S, sampling='frequency', freq=\n 1, use_Rt=True, on_mu=True, show_progress=True):\n steplist = self._prep_all(k, gamma, sampling, freq, use_Rt, on_mu)\n S = frozenset(S)\n no_S = self.idSet.difference(S)\n res = []\n for t in steplist:\n res.append(self.itEstimator.estimateCMI(self.set_reward, no_S,\n S, t=t))\n res.append(self.itEstimator.estimateCMI(self.set_J_k, no_S, S, t=k))\n res = map(lambda x: ray.get(x), res)\n score = np.fromiter(res, np.float64)\n score = np.clip(score, 0, 2)\n score = np.sqrt(score)\n self.residual_error = score[:-1] @ self.weights[:-1]\n self.correction_term = score[-1] * self.weights[-1]\n return self.computeError(use_Rt=use_Rt)\n\n def _scoreFeatureParallel(self, steplist, gamma, sum_cmi, show_progress):\n k = len(steplist)\n S = frozenset(self.idSelected)\n no_S = self.idSet.difference(self.idSelected)\n if self.forward:\n shrink_S = no_S\n op_S, op_noS = union, differ\n else:\n shrink_S = S\n op_S, op_noS = differ, union\n list_ids = np.fromiter(shrink_S, dtype=np.int)\n res = []\n for i, id in enumerate(list_ids):\n id = frozenset({id})\n S_next = op_S(S, id)\n no_S_next = op_noS(no_S, id)\n if sum_cmi:\n target = id\n else:\n target = no_S_next\n for j, t in enumerate(steplist):\n res.append(self.itEstimator.estimateCMI(self.set_reward,\n target, S_next, t=t))\n res.append(self.itEstimator.estimateCMI(self.set_J_k, target,\n S_next, t=k))\n res = map(lambda x: ray.get(x), tqdm(res, leave=False, disable=not\n show_progress))\n score_mat = np.fromiter(res, np.float64).reshape(k + 1, -1, order='F')\n score_mat = np.clip(score_mat, 0, 2)\n scores = np.sqrt(score_mat)\n cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])\n new_cond_entropy = self.weights[-1] * scores[-1, :]\n sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)\n return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[\n sorted_idx], score_mat[:, sorted_idx]\n\n def _scoreFeatureSequential(self, steplist, gamma, sum_cmi, show_progress):\n k = len(steplist)\n S = frozenset(self.idSelected)\n no_S = self.idSet.difference(self.idSelected)\n if self.forward:\n shrink_S = no_S\n op_S, op_noS = union, differ\n else:\n shrink_S = S\n op_S, op_noS = differ, union\n list_ids = np.fromiter(shrink_S, dtype=np.int)\n score_mat = np.zeros((k + 1, len(list_ids)))\n for i, id in enumerate(tqdm(list_ids, leave=False, disable=not\n show_progress)):\n id = frozenset({id})\n S_next = op_S(S, id)\n no_S_next = op_noS(no_S, id)\n if sum_cmi:\n target = id\n else:\n target = no_S_next\n for j, t in enumerate(steplist):\n score_mat[j, i] = self.itEstimator.estimateCMI(self.\n set_reward, target, S_next, t=t)\n score_mat[k, i] = self.itEstimator.estimateCMI(self.set_J_k,\n target, S_next, t=k)\n score_mat = np.clip(score_mat, 0, 2)\n scores = np.sqrt(score_mat)\n cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])\n new_cond_entropy = self.weights[-1] * scores[-1, :]\n sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)\n return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[\n sorted_idx], score_mat[:, sorted_idx]\n\n @abc.abstractmethod\n def selectOnError(self, k, gamma, max_error, sampling='frequency', freq\n =1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):\n pass\n\n @abc.abstractmethod\n def selectNfeatures(self, n, k, gamma, sampling='frequency', freq=1,\n use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):\n pass\n\n @abc.abstractmethod\n def try_all(self, k, gamma, all_scores=False, max_n=None, sampling=\n 'frequency', freq=1, use_Rt=True, on_mu=True, sum_cmi=True,\n show_progress=True):\n pass\n",
"step-4": "import abc\nimport numpy as np\nimport ray\nfrom tqdm.autonotebook import tqdm\nfrom src.algorithm.info_theory.it_estimator import CachingEstimator, MPCachingEstimator\nfrom src.algorithm.utils import differ, independent_roll, union\n\n\nclass FeatureSelector(metaclass=abc.ABCMeta):\n\n def __init__(self, itEstimator, trajectories, discrete=False, nproc=None):\n self.trajectories = trajectories\n self.nproc = nproc\n self.discrete = discrete\n if nproc != 1:\n self.itEstimator = MPCachingEstimator(itEstimator, self.\n _get_arrays, nproc=nproc)\n self.two = ray.put(2)\n else:\n self.itEstimator = CachingEstimator(itEstimator, self._get_arrays)\n self.seed()\n self._setup()\n\n def _setup(self):\n self.n_features = self.trajectories[0].shape[1] - 1\n self.id_reward = self.n_features\n self.set_reward = frozenset({self.id_reward})\n self.id_J_k = -1\n self.set_J_k = frozenset({self.id_J_k})\n self.idSet = frozenset(range(self.n_features))\n self.idSelected = None\n self.tot_t = min(len(tr) for tr in self.trajectories)\n self.data_per_traj = np.dstack([tr[:self.tot_t, :] for tr in self.\n trajectories])\n self.Rts = np.abs(self.data_per_traj[:, self.id_reward, :]).max(axis=1)\n self.Rmax = self.Rts.max()\n self.on_mu = None\n self.trajectories = None\n\n def _prep_data(self, max_t, on_mu):\n if hasattr(self, 't_step_data'\n ) and max_t + 1 == self.t_step_data.shape[2\n ] and on_mu == self.on_mu:\n return\n self.itEstimator.cache.clear()\n assert max_t < self.tot_t, f'max timestep {max_t} is not less than the shortest trajectory (len {self.tot_t})'\n self.on_mu = on_mu\n if on_mu:\n stop_len = 1\n stop_ids = -1\n else:\n stop_len = self.tot_t - max_t\n stop_ids = slice(None)\n shift = np.zeros(self.n_features + 1, dtype=np.int)\n shift[self.id_reward] = -1\n self.t_step_data = []\n for t in range(max_t + 1):\n t_shift = t * shift\n t_step_eps = []\n for ep in self.data_per_traj.transpose(2, 0, 1):\n t_step_eps.append(independent_roll(ep, t_shift)[:stop_len,\n stop_ids])\n self.t_step_data.append(np.vstack(t_step_eps))\n if self.t_step_data:\n self.t_step_data = np.dstack(self.t_step_data)\n else:\n self.t_step_data = np.empty((self.data_per_traj.shape[-1], 1, 1))\n\n def _get_arrays(self, ids, t):\n if not isinstance(ids, list):\n ids = list(ids)\n if self.on_mu:\n if t == self.t_step_data.shape[2]:\n ft, t = t, 0\n else:\n ft = 0\n feats = self.data_per_traj[ft, :-1, :].T\n rew = self.t_step_data[:, 0, t][:, None]\n data = np.hstack([feats, rew, self.J_k])\n return data[:, ids]\n return self.t_step_data[:, ids, t]\n\n def _generate_steplist(self, k, sampling, freq):\n if sampling == 'frequency':\n max_t = (k - 1) * freq\n return np.arange(k * freq, step=freq), max_t\n if sampling == 'decaying':\n p = np.exp(-np.arange(self.tot_t) / freq) / freq\n p = p / p.sum()\n steplist = np.sort(self.np_random.choice(self.tot_t, size=k,\n replace=False, p=p))\n return steplist, steplist[-1]\n if sampling == 'variance':\n variances = np.var(self.data_per_traj[:, self.id_reward, :], axis=1\n )\n most_var = np.argsort(variances)[::-1][:k]\n steplist = np.sort(most_var)\n return steplist, steplist[-1]\n raise NotImplemented\n\n def _get_weights_by_steplist(self, steplist, gamma, use_Rt):\n k = len(steplist)\n gamma = gamma\n weights = np.ones(k + 1)\n weights[:-1] = gamma ** steplist\n weights[k] = 1 - (1 - gamma) * weights[:-1].sum()\n if use_Rt:\n Rsteps = self.Rts[steplist]\n weights[:-1] *= Rsteps\n weights[k] *= self.max_J_k\n return weights\n\n def _prep_J_k(self, k, gamma):\n self.J_k = np.polyval(self.data_per_traj[k:, -1, :], gamma).reshape(\n -1, 1)\n self.max_J_k = np.abs(self.J_k).max()\n\n def _prep_all(self, k, gamma, sampling, freq, use_Rt, on_mu):\n self.reset()\n steplist, max_t = self._generate_steplist(k, sampling, freq)\n self.steplist = steplist\n self._prep_data(max_t, on_mu)\n self._prep_J_k(k, gamma)\n self.weights = self._get_weights_by_steplist(steplist, gamma, use_Rt)\n return steplist\n\n def scoreFeatures(self, *args, **kwargs):\n if self.nproc != 1:\n return self._scoreFeatureParallel(*args, **kwargs)\n else:\n return self._scoreFeatureSequential(*args, **kwargs)\n\n def scoreSubset(self, *args, **kwargs):\n if self.nproc != 1:\n return self._scoreSubsetParallel(*args, **kwargs)\n else:\n return self._scoreSubsetSequential(*args, **kwargs)\n\n def computeError(self, residual=None, correction=None, use_Rt=True):\n if residual is None:\n residual = self.residual_error\n if correction is None:\n correction = self.correction_term\n if use_Rt:\n Rmax = 1\n else:\n Rmax = self.Rmax\n return 2 ** (1 / 2) * (Rmax * residual + correction)\n\n def reset(self):\n self.residual_error = 0\n self.correction_term = 0\n self.weights = None\n self.steplist = None\n self.idSelected = None\n\n def seed(self, seed=None):\n self.np_random = np.random.seed(seed)\n return\n\n def _scoreSubsetSequential(self, k, gamma, S, sampling='frequency',\n freq=1, use_Rt=True, on_mu=True, show_progress=True):\n steplist = self._prep_all(k, gamma, sampling, freq, use_Rt, on_mu)\n S = frozenset(S)\n no_S = self.idSet.difference(S)\n score = np.zeros(k + 1)\n for j, t in enumerate(steplist):\n score[j] = self.itEstimator.estimateCMI(self.set_reward, no_S,\n S, t=t)\n score[k] = self.itEstimator.estimateCMI(self.set_J_k, no_S, S, t=k)\n score = np.clip(score, 0, 2)\n score = np.sqrt(score)\n self.residual_error = score[:-1] @ self.weights[:-1]\n self.correction_term = score[-1] * self.weights[-1]\n return self.computeError(use_Rt=use_Rt)\n\n def _scoreSubsetParallel(self, k, gamma, S, sampling='frequency', freq=\n 1, use_Rt=True, on_mu=True, show_progress=True):\n steplist = self._prep_all(k, gamma, sampling, freq, use_Rt, on_mu)\n S = frozenset(S)\n no_S = self.idSet.difference(S)\n res = []\n for t in steplist:\n res.append(self.itEstimator.estimateCMI(self.set_reward, no_S,\n S, t=t))\n res.append(self.itEstimator.estimateCMI(self.set_J_k, no_S, S, t=k))\n res = map(lambda x: ray.get(x), res)\n score = np.fromiter(res, np.float64)\n score = np.clip(score, 0, 2)\n score = np.sqrt(score)\n self.residual_error = score[:-1] @ self.weights[:-1]\n self.correction_term = score[-1] * self.weights[-1]\n return self.computeError(use_Rt=use_Rt)\n\n def _scoreFeatureParallel(self, steplist, gamma, sum_cmi, show_progress):\n k = len(steplist)\n S = frozenset(self.idSelected)\n no_S = self.idSet.difference(self.idSelected)\n if self.forward:\n shrink_S = no_S\n op_S, op_noS = union, differ\n else:\n shrink_S = S\n op_S, op_noS = differ, union\n list_ids = np.fromiter(shrink_S, dtype=np.int)\n res = []\n for i, id in enumerate(list_ids):\n id = frozenset({id})\n S_next = op_S(S, id)\n no_S_next = op_noS(no_S, id)\n if sum_cmi:\n target = id\n else:\n target = no_S_next\n for j, t in enumerate(steplist):\n res.append(self.itEstimator.estimateCMI(self.set_reward,\n target, S_next, t=t))\n res.append(self.itEstimator.estimateCMI(self.set_J_k, target,\n S_next, t=k))\n res = map(lambda x: ray.get(x), tqdm(res, leave=False, disable=not\n show_progress))\n score_mat = np.fromiter(res, np.float64).reshape(k + 1, -1, order='F')\n score_mat = np.clip(score_mat, 0, 2)\n scores = np.sqrt(score_mat)\n cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])\n new_cond_entropy = self.weights[-1] * scores[-1, :]\n sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)\n return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[\n sorted_idx], score_mat[:, sorted_idx]\n\n def _scoreFeatureSequential(self, steplist, gamma, sum_cmi, show_progress):\n k = len(steplist)\n S = frozenset(self.idSelected)\n no_S = self.idSet.difference(self.idSelected)\n if self.forward:\n shrink_S = no_S\n op_S, op_noS = union, differ\n else:\n shrink_S = S\n op_S, op_noS = differ, union\n list_ids = np.fromiter(shrink_S, dtype=np.int)\n score_mat = np.zeros((k + 1, len(list_ids)))\n for i, id in enumerate(tqdm(list_ids, leave=False, disable=not\n show_progress)):\n id = frozenset({id})\n S_next = op_S(S, id)\n no_S_next = op_noS(no_S, id)\n if sum_cmi:\n target = id\n else:\n target = no_S_next\n for j, t in enumerate(steplist):\n score_mat[j, i] = self.itEstimator.estimateCMI(self.\n set_reward, target, S_next, t=t)\n score_mat[k, i] = self.itEstimator.estimateCMI(self.set_J_k,\n target, S_next, t=k)\n score_mat = np.clip(score_mat, 0, 2)\n scores = np.sqrt(score_mat)\n cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])\n new_cond_entropy = self.weights[-1] * scores[-1, :]\n sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)\n return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[\n sorted_idx], score_mat[:, sorted_idx]\n\n @abc.abstractmethod\n def selectOnError(self, k, gamma, max_error, sampling='frequency', freq\n =1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):\n pass\n\n @abc.abstractmethod\n def selectNfeatures(self, n, k, gamma, sampling='frequency', freq=1,\n use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):\n pass\n\n @abc.abstractmethod\n def try_all(self, k, gamma, all_scores=False, max_n=None, sampling=\n 'frequency', freq=1, use_Rt=True, on_mu=True, sum_cmi=True,\n show_progress=True):\n pass\n",
"step-5": "import abc\n\nimport numpy as np\nimport ray\nfrom tqdm.autonotebook import tqdm\n\nfrom src.algorithm.info_theory.it_estimator import (CachingEstimator,\n MPCachingEstimator)\nfrom src.algorithm.utils import differ, independent_roll, union\n\n\nclass FeatureSelector(metaclass=abc.ABCMeta):\n def __init__(self, itEstimator, trajectories, discrete=False, nproc=None):\n self.trajectories = trajectories\n self.nproc = nproc\n self.discrete = discrete\n\n if nproc != 1:\n self.itEstimator = MPCachingEstimator(\n itEstimator, self._get_arrays, nproc=nproc)\n self.two = ray.put(2)\n else:\n self.itEstimator = CachingEstimator(itEstimator, self._get_arrays)\n\n self.seed()\n self._setup()\n\n def _setup(self):\n self.n_features = self.trajectories[0].shape[1] - 1\n self.id_reward = self.n_features\n self.set_reward = frozenset({self.id_reward})\n self.id_J_k = -1\n self.set_J_k = frozenset({self.id_J_k})\n self.idSet = frozenset(range(self.n_features))\n self.idSelected = None\n\n self.tot_t = min(len(tr) for tr in self.trajectories)\n self.data_per_traj = np.dstack(\n [tr[:self.tot_t, :] for tr in self.trajectories])\n\n self.Rts = np.abs(self.data_per_traj[:, self.id_reward, :]).max(axis=1)\n self.Rmax = self.Rts.max()\n\n self.on_mu = None\n self.trajectories = None\n\n def _prep_data(self, max_t, on_mu):\n if hasattr(self, 't_step_data') and max_t + 1 == self.t_step_data.shape[2] and on_mu == self.on_mu:\n return\n\n self.itEstimator.cache.clear()\n\n assert max_t < self.tot_t, f\"max timestep {max_t} is not less than the shortest trajectory (len {self.tot_t})\"\n\n self.on_mu = on_mu\n if on_mu:\n stop_len = 1\n stop_ids = -1\n else:\n stop_len = self.tot_t - max_t\n stop_ids = slice(None)\n\n shift = np.zeros(self.n_features + 1, dtype=np.int)\n shift[self.id_reward] = -1\n\n self.t_step_data = []\n for t in range(max_t + 1):\n t_shift = t*shift\n t_step_eps = []\n for ep in self.data_per_traj.transpose(2, 0, 1):\n t_step_eps.append(independent_roll(\n ep, t_shift)[: stop_len, stop_ids])\n\n self.t_step_data.append(np.vstack(t_step_eps))\n\n if self.t_step_data:\n self.t_step_data = np.dstack(self.t_step_data)\n else:\n self.t_step_data = np.empty((self.data_per_traj.shape[-1], 1, 1))\n\n def _get_arrays(self, ids, t):\n if not isinstance(ids, list):\n ids = list(ids)\n\n # memory efficiency\n if self.on_mu:\n if t == self.t_step_data.shape[2]:\n ft, t = t, 0\n else:\n ft = 0\n feats = self.data_per_traj[ft, :-1, :].T\n rew = self.t_step_data[:, 0, t][:, None]\n data = np.hstack([feats, rew, self.J_k])\n return data[:, ids]\n\n return self.t_step_data[:, ids, t]\n\n def _generate_steplist(self, k, sampling, freq):\n if sampling == \"frequency\":\n max_t = (k-1) * freq\n return np.arange(k*freq, step=freq), max_t\n\n if sampling == \"decaying\":\n p = np.exp(-np.arange(self.tot_t)/freq) / freq\n p = p/p.sum()\n steplist = np.sort(self.np_random.choice(\n self.tot_t, size=k, replace=False, p=p))\n return steplist, steplist[-1]\n\n if sampling == \"variance\":\n variances = np.var(\n self.data_per_traj[:, self.id_reward, :], axis=1)\n most_var = np.argsort(variances)[::-1][:k]\n steplist = np.sort(most_var)\n return steplist, steplist[-1]\n\n raise NotImplemented\n\n def _get_weights_by_steplist(self, steplist, gamma, use_Rt):\n k = len(steplist)\n gamma = gamma\n\n weights = np.ones(k + 1)\n weights[:-1] = gamma ** steplist\n\n weights[k] = 1 - (1 - gamma) * weights[:-1].sum()\n\n if use_Rt:\n Rsteps = self.Rts[steplist]\n weights[:-1] *= Rsteps\n\n weights[k] *= self.max_J_k\n\n return weights\n\n def _prep_J_k(self, k, gamma):\n self.J_k = np.polyval(\n self.data_per_traj[k:, -1, :], gamma).reshape(-1, 1)\n self.max_J_k = np.abs(self.J_k).max()\n\n def _prep_all(self, k, gamma, sampling, freq, use_Rt, on_mu):\n self.reset()\n steplist, max_t = self._generate_steplist(k, sampling, freq)\n self.steplist = steplist\n self._prep_data(max_t, on_mu)\n self._prep_J_k(k, gamma)\n self.weights = self._get_weights_by_steplist(steplist, gamma, use_Rt)\n\n return steplist\n\n def scoreFeatures(self, *args, **kwargs):\n if self.nproc != 1:\n return self._scoreFeatureParallel(*args, **kwargs)\n else:\n return self._scoreFeatureSequential(*args, **kwargs)\n\n def scoreSubset(self, *args, **kwargs):\n if self.nproc != 1:\n return self._scoreSubsetParallel(*args, **kwargs)\n else:\n return self._scoreSubsetSequential(*args, **kwargs)\n\n def computeError(self, residual=None, correction=None, use_Rt=True):\n if residual is None:\n residual = self.residual_error\n if correction is None:\n correction = self.correction_term\n if use_Rt:\n Rmax = 1\n else:\n Rmax = self.Rmax\n\n return 2**(1/2) * (Rmax * residual + correction)\n\n def reset(self):\n self.residual_error = 0\n self.correction_term = 0\n self.weights = None\n self.steplist = None\n self.idSelected = None\n\n def seed(self, seed=None):\n self.np_random = np.random.seed(seed)\n return\n\n def _scoreSubsetSequential(self, k, gamma, S, sampling=\"frequency\", freq=1, use_Rt=True, on_mu=True, show_progress=True):\n steplist = self._prep_all(k, gamma, sampling, freq, use_Rt, on_mu)\n\n S = frozenset(S)\n no_S = self.idSet.difference(S)\n\n score = np.zeros(k+1)\n\n for j, t in enumerate(steplist):\n score[j] = self.itEstimator.estimateCMI(\n self.set_reward, no_S, S, t=t)\n\n score[k] = self.itEstimator.estimateCMI(self.set_J_k, no_S, S, t=k)\n\n score = np.clip(score, 0, 2)\n score = np.sqrt(score)\n\n self.residual_error = score[:-1] @ self.weights[:-1]\n self.correction_term = score[-1] * self.weights[-1]\n\n return self.computeError(use_Rt=use_Rt)\n\n def _scoreSubsetParallel(self, k, gamma, S, sampling=\"frequency\", freq=1, use_Rt=True, on_mu=True, show_progress=True):\n steplist = self._prep_all(k, gamma, sampling, freq, use_Rt, on_mu)\n\n S = frozenset(S)\n no_S = self.idSet.difference(S)\n\n res = []\n for t in steplist:\n res.append(self.itEstimator.estimateCMI(\n self.set_reward, no_S, S, t=t))\n\n res.append(self.itEstimator.estimateCMI(self.set_J_k, no_S, S, t=k))\n\n res = map(lambda x: ray.get(x), res)\n score = np.fromiter(res, np.float64)\n\n score = np.clip(score, 0, 2)\n score = np.sqrt(score)\n\n self.residual_error = score[:-1] @ self.weights[:-1]\n self.correction_term = score[-1] * self.weights[-1]\n\n return self.computeError(use_Rt=use_Rt)\n\n def _scoreFeatureParallel(self, steplist, gamma, sum_cmi, show_progress):\n k = len(steplist)\n\n S = frozenset(self.idSelected)\n no_S = self.idSet.difference(self.idSelected)\n\n if self.forward:\n shrink_S = no_S\n op_S, op_noS = union, differ\n else:\n shrink_S = S\n op_S, op_noS = differ, union\n\n list_ids = np.fromiter(shrink_S, dtype=np.int)\n\n res = []\n for i, id in enumerate(list_ids):\n id = frozenset({id})\n S_next = op_S(S, id)\n no_S_next = op_noS(no_S, id)\n\n if sum_cmi:\n target = id\n else:\n target = no_S_next\n\n for j, t in enumerate(steplist):\n res.append(self.itEstimator.estimateCMI(\n self.set_reward, target, S_next, t=t))\n\n res.append(self.itEstimator.estimateCMI(self.set_J_k, target,\n S_next, t=k))\n\n res = map(lambda x: ray.get(x), tqdm(\n res, leave=False, disable=not show_progress))\n score_mat = np.fromiter(res, np.float64).reshape(k + 1, -1, order='F')\n\n score_mat = np.clip(score_mat, 0, 2)\n scores = np.sqrt(score_mat)\n\n cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])\n new_cond_entropy = self.weights[-1] * scores[-1, :]\n\n sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)\n\n return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[sorted_idx], score_mat[:, sorted_idx]\n\n def _scoreFeatureSequential(self, steplist, gamma, sum_cmi, show_progress):\n k = len(steplist)\n\n S = frozenset(self.idSelected)\n no_S = self.idSet.difference(self.idSelected)\n\n if self.forward:\n shrink_S = no_S\n op_S, op_noS = union, differ\n else:\n shrink_S = S\n op_S, op_noS = differ, union\n\n list_ids = np.fromiter(shrink_S, dtype=np.int)\n score_mat = np.zeros((k+1, len(list_ids)))\n\n for i, id in enumerate(tqdm(list_ids, leave=False, disable=not show_progress)):\n id = frozenset({id})\n S_next = op_S(S, id)\n no_S_next = op_noS(no_S, id)\n\n if sum_cmi:\n target = id\n else:\n target = no_S_next\n\n for j, t in enumerate(steplist):\n score_mat[j, i] = self.itEstimator.estimateCMI(\n self.set_reward, target, S_next, t=t)\n\n score_mat[k, i] = self.itEstimator.estimateCMI(\n self.set_J_k, target, S_next, t=k)\n\n score_mat = np.clip(score_mat, 0, 2)\n scores = np.sqrt(score_mat)\n\n cmi_wsum = np.einsum('a, ab->b', self.weights[:-1], scores[:-1, :])\n new_cond_entropy = self.weights[-1] * scores[-1, :]\n\n sorted_idx = np.argsort(cmi_wsum + new_cond_entropy)\n\n return list_ids[sorted_idx], cmi_wsum[sorted_idx], new_cond_entropy[sorted_idx], score_mat[:, sorted_idx]\n\n @abc.abstractmethod\n def selectOnError(self, k, gamma, max_error, sampling=\"frequency\", freq=1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):\n pass\n\n @abc.abstractmethod\n def selectNfeatures(self, n, k, gamma, sampling=\"frequency\", freq=1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):\n pass\n\n @abc.abstractmethod\n def try_all(self, k, gamma, all_scores=False, max_n=None, sampling=\"frequency\", freq=1, use_Rt=True, on_mu=True, sum_cmi=True, show_progress=True):\n pass\n",
"step-ids": [
16,
18,
20,
22,
23
]
}
|
[
16,
18,
20,
22,
23
] |
# apport hook for oem-config; adds log file
import os.path
def add_info(report):
if os.path.exists('/var/log/oem-config.log'):
report['OemConfigLog'] = ('/var/log/oem-config.log',)
|
normal
|
{
"blob_id": "74b1cdcb1aaf6cde7e8ce3eeb73cd82689719b00",
"index": 6404,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef add_info(report):\n if os.path.exists('/var/log/oem-config.log'):\n report['OemConfigLog'] = '/var/log/oem-config.log',\n",
"step-3": "import os.path\n\n\ndef add_info(report):\n if os.path.exists('/var/log/oem-config.log'):\n report['OemConfigLog'] = '/var/log/oem-config.log',\n",
"step-4": "# apport hook for oem-config; adds log file\n\nimport os.path\n\ndef add_info(report):\n if os.path.exists('/var/log/oem-config.log'):\n report['OemConfigLog'] = ('/var/log/oem-config.log',)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class API:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def characters(self):
req = self._make_request(self.CHARACTERS_RESOURCE)
if req.status_code != 200:
return None
result = req.json()
characters = []
for character in result['characters']:
characters.append(character['name'])
return characters
<|reserved_special_token_0|>
def _character_unauthed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), False)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class APIError(Exception):
pass
class Character:
def __init__(self, name, fullname, level, house, xp_rank, player_kills,
mob_kills, explorer_rank, current_class, messages_total=None,
messages_unread=None):
self.name = name
self.fullname = fullname
self.level = level
self.house = house
self.xp_rank = xp_rank
self.player_kills = player_kills
self.mob_kills = mob_kills
self.explorer_rank = explorer_rank
self.current_class = current_class
self.messages_total = messages_total
self.messages_unread = messages_unread
@staticmethod
def parse(json_data):
name = json_data['name']
fullname = json_data['fullname']
level = int(json_data['level'])
house = json_data['house']
xp_rank = json_data['xp_rank']
player_kills = int(json_data['player_kills'])
mob_kills = int(json_data['mob_kills'])
explorer_rank = int(json_data['explorer_rank'])
current_class = json_data['class']
messages_total = None
messages_unread = None
if 'messages_total' in json_data and 'messages_unread' in json_data:
messages_total = json_data['messages_total']
messages_unread = json_data['messages_unread']
return Character(name, fullname, level, house, xp_rank,
player_kills, mob_kills, explorer_rank, current_class,
messages_total, messages_unread)
def __repr__(self):
return '<Character: {} ({})>'.format(self.name, self.fullname)
class NewsSection:
def __init__(self, name, read, total, unread):
self.name = name
self.read = read
self.total = total
self.unread = unread
@staticmethod
def parse(json_data):
name = json_data['name']
read = int(json_data['read'])
total = int(json_data['total'])
unread = int(json_data['unread'])
return NewsSection(name, read, total, unread)
def __repr__(self):
return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.
read, self.total)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class API:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):
self.endpoint = endpoint
if username is not None and password is not None:
self.username = username
self.password = password
self.checkauth()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def characters(self):
req = self._make_request(self.CHARACTERS_RESOURCE)
if req.status_code != 200:
return None
result = req.json()
characters = []
for character in result['characters']:
characters.append(character['name'])
return characters
@_requires_auth
def _character_authed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), True)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def _character_unauthed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), False)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def character(self, character=None):
if self.auth is True and (self.username == character or character is
None):
return self._character_authed(character or self.username)
else:
return self._character_unauthed(character)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class APIError(Exception):
pass
class Character:
def __init__(self, name, fullname, level, house, xp_rank, player_kills,
mob_kills, explorer_rank, current_class, messages_total=None,
messages_unread=None):
self.name = name
self.fullname = fullname
self.level = level
self.house = house
self.xp_rank = xp_rank
self.player_kills = player_kills
self.mob_kills = mob_kills
self.explorer_rank = explorer_rank
self.current_class = current_class
self.messages_total = messages_total
self.messages_unread = messages_unread
@staticmethod
def parse(json_data):
name = json_data['name']
fullname = json_data['fullname']
level = int(json_data['level'])
house = json_data['house']
xp_rank = json_data['xp_rank']
player_kills = int(json_data['player_kills'])
mob_kills = int(json_data['mob_kills'])
explorer_rank = int(json_data['explorer_rank'])
current_class = json_data['class']
messages_total = None
messages_unread = None
if 'messages_total' in json_data and 'messages_unread' in json_data:
messages_total = json_data['messages_total']
messages_unread = json_data['messages_unread']
return Character(name, fullname, level, house, xp_rank,
player_kills, mob_kills, explorer_rank, current_class,
messages_total, messages_unread)
def __repr__(self):
return '<Character: {} ({})>'.format(self.name, self.fullname)
class NewsSection:
def __init__(self, name, read, total, unread):
self.name = name
self.read = read
self.total = total
self.unread = unread
@staticmethod
def parse(json_data):
name = json_data['name']
read = int(json_data['read'])
total = int(json_data['total'])
unread = int(json_data['unread'])
return NewsSection(name, read, total, unread)
def __repr__(self):
return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.
read, self.total)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class API:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):
self.endpoint = endpoint
if username is not None and password is not None:
self.username = username
self.password = password
self.checkauth()
def _get_endpoint(self, fmt_str, args):
return self.endpoint + fmt_str.format(*args)
def _make_request(self, resource, args=(), authed=False, params={}):
endpoint = self._get_endpoint(resource, args)
auth_params = {}
if authed:
if self.username is None or self.password is None:
raise APIError()
auth_params = {'character': self.username, 'password': self.
password}
params = params.copy()
params.update(auth_params)
req = requests.get(endpoint, params=params)
return req
def checkauth(self):
if self.auth is not None:
return self.auth
req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)
if req.status_code == 200:
self.auth = True
else:
self.auth = False
return self.auth
def characters(self):
req = self._make_request(self.CHARACTERS_RESOURCE)
if req.status_code != 200:
return None
result = req.json()
characters = []
for character in result['characters']:
characters.append(character['name'])
return characters
@_requires_auth
def _character_authed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), True)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def _character_unauthed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), False)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def character(self, character=None):
if self.auth is True and (self.username == character or character is
None):
return self._character_authed(character or self.username)
else:
return self._character_unauthed(character)
def sections(self):
req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)
if req.status_code != 200:
return None
result = req.json()
sections_list = map(NewsSection.parse, result)
return sections_list
def posts(self, section, page=None):
params = {}
if page is not None:
params['page'] = page
req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,),
authed=self.auth, params=params)
if req.status_code != 200:
return None
result = req.json()
return result
<|reserved_special_token_0|>
class APIError(Exception):
pass
class Character:
def __init__(self, name, fullname, level, house, xp_rank, player_kills,
mob_kills, explorer_rank, current_class, messages_total=None,
messages_unread=None):
self.name = name
self.fullname = fullname
self.level = level
self.house = house
self.xp_rank = xp_rank
self.player_kills = player_kills
self.mob_kills = mob_kills
self.explorer_rank = explorer_rank
self.current_class = current_class
self.messages_total = messages_total
self.messages_unread = messages_unread
@staticmethod
def parse(json_data):
name = json_data['name']
fullname = json_data['fullname']
level = int(json_data['level'])
house = json_data['house']
xp_rank = json_data['xp_rank']
player_kills = int(json_data['player_kills'])
mob_kills = int(json_data['mob_kills'])
explorer_rank = int(json_data['explorer_rank'])
current_class = json_data['class']
messages_total = None
messages_unread = None
if 'messages_total' in json_data and 'messages_unread' in json_data:
messages_total = json_data['messages_total']
messages_unread = json_data['messages_unread']
return Character(name, fullname, level, house, xp_rank,
player_kills, mob_kills, explorer_rank, current_class,
messages_total, messages_unread)
def __repr__(self):
return '<Character: {} ({})>'.format(self.name, self.fullname)
class NewsSection:
def __init__(self, name, read, total, unread):
self.name = name
self.read = read
self.total = total
self.unread = unread
@staticmethod
def parse(json_data):
name = json_data['name']
read = int(json_data['read'])
total = int(json_data['total'])
unread = int(json_data['unread'])
return NewsSection(name, read, total, unread)
def __repr__(self):
return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.
read, self.total)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _requires_auth(func):
def wrapper(self, *args, **kwargs):
if self.auth is not True:
raise APIError()
return func(self, *args, **kwargs)
return wrapper
class API:
auth = None
CHECKAUTH_RESOURCE = '/checkauth.json'
CHARACTERS_RESOURCE = '/characters.json'
SPECIFIC_CHARACTER_RESOURCE = '/characters/{}.json'
NEWS_RESOURCE = '/news.json'
SPECIFIC_NEWS_RESOURCE = '/news/{}.json'
SPECIFIC_NEWS_POST_RESOURCE = '/news/{}/{}.json'
def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):
self.endpoint = endpoint
if username is not None and password is not None:
self.username = username
self.password = password
self.checkauth()
def _get_endpoint(self, fmt_str, args):
return self.endpoint + fmt_str.format(*args)
def _make_request(self, resource, args=(), authed=False, params={}):
endpoint = self._get_endpoint(resource, args)
auth_params = {}
if authed:
if self.username is None or self.password is None:
raise APIError()
auth_params = {'character': self.username, 'password': self.
password}
params = params.copy()
params.update(auth_params)
req = requests.get(endpoint, params=params)
return req
def checkauth(self):
if self.auth is not None:
return self.auth
req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)
if req.status_code == 200:
self.auth = True
else:
self.auth = False
return self.auth
def characters(self):
req = self._make_request(self.CHARACTERS_RESOURCE)
if req.status_code != 200:
return None
result = req.json()
characters = []
for character in result['characters']:
characters.append(character['name'])
return characters
@_requires_auth
def _character_authed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), True)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def _character_unauthed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (
character,), False)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def character(self, character=None):
if self.auth is True and (self.username == character or character is
None):
return self._character_authed(character or self.username)
else:
return self._character_unauthed(character)
def sections(self):
req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)
if req.status_code != 200:
return None
result = req.json()
sections_list = map(NewsSection.parse, result)
return sections_list
def posts(self, section, page=None):
params = {}
if page is not None:
params['page'] = page
req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,),
authed=self.auth, params=params)
if req.status_code != 200:
return None
result = req.json()
return result
def post(self, section, number):
pass
class APIError(Exception):
pass
class Character:
def __init__(self, name, fullname, level, house, xp_rank, player_kills,
mob_kills, explorer_rank, current_class, messages_total=None,
messages_unread=None):
self.name = name
self.fullname = fullname
self.level = level
self.house = house
self.xp_rank = xp_rank
self.player_kills = player_kills
self.mob_kills = mob_kills
self.explorer_rank = explorer_rank
self.current_class = current_class
self.messages_total = messages_total
self.messages_unread = messages_unread
@staticmethod
def parse(json_data):
name = json_data['name']
fullname = json_data['fullname']
level = int(json_data['level'])
house = json_data['house']
xp_rank = json_data['xp_rank']
player_kills = int(json_data['player_kills'])
mob_kills = int(json_data['mob_kills'])
explorer_rank = int(json_data['explorer_rank'])
current_class = json_data['class']
messages_total = None
messages_unread = None
if 'messages_total' in json_data and 'messages_unread' in json_data:
messages_total = json_data['messages_total']
messages_unread = json_data['messages_unread']
return Character(name, fullname, level, house, xp_rank,
player_kills, mob_kills, explorer_rank, current_class,
messages_total, messages_unread)
def __repr__(self):
return '<Character: {} ({})>'.format(self.name, self.fullname)
class NewsSection:
def __init__(self, name, read, total, unread):
self.name = name
self.read = read
self.total = total
self.unread = unread
@staticmethod
def parse(json_data):
name = json_data['name']
read = int(json_data['read'])
total = int(json_data['total'])
unread = int(json_data['unread'])
return NewsSection(name, read, total, unread)
def __repr__(self):
return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.
read, self.total)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
ACHAEA_ENDPOINT = 'https://api.achaea.com'
def _requires_auth(func):
def wrapper(self, *args, **kwargs):
if self.auth is not True:
raise APIError()
return func(self, *args, **kwargs)
return wrapper
class API:
auth = None
CHECKAUTH_RESOURCE = '/checkauth.json'
CHARACTERS_RESOURCE = '/characters.json'
SPECIFIC_CHARACTER_RESOURCE = '/characters/{}.json'
NEWS_RESOURCE = '/news.json'
SPECIFIC_NEWS_RESOURCE = '/news/{}.json'
SPECIFIC_NEWS_POST_RESOURCE = '/news/{}/{}.json'
def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):
self.endpoint = endpoint
if username is not None and password is not None:
self.username = username
self.password = password
self.checkauth()
def _get_endpoint(self, fmt_str, args):
return self.endpoint + fmt_str.format(*args)
def _make_request(self, resource, args=(), authed=False, params={}):
endpoint = self._get_endpoint(resource, args)
auth_params = {}
if authed:
if self.username is None or self.password is None:
raise APIError()
auth_params = {'character': self.username, 'password': self.password}
params = params.copy()
params.update(auth_params)
req = requests.get(endpoint, params=params)
return req
def checkauth(self):
if self.auth is not None:
return self.auth
req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)
if req.status_code == 200:
self.auth = True
else:
self.auth = False
return self.auth
def characters(self):
req = self._make_request(self.CHARACTERS_RESOURCE)
if req.status_code != 200:
return None
result = req.json()
characters = []
for character in result['characters']:
characters.append(character['name'])
return characters
@_requires_auth
def _character_authed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (character,), True)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def _character_unauthed(self, character):
req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (character,), False)
if req.status_code != 200:
return None
result = req.json()
return Character.parse(result)
def character(self, character=None):
if self.auth is True and (self.username == character or character is None):
return self._character_authed(character or self.username)
else:
return self._character_unauthed(character)
def sections(self):
req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)
if req.status_code != 200:
return None
result = req.json()
sections_list = map(NewsSection.parse, result)
return sections_list
def posts(self, section, page=None):
params = {}
if page is not None:
params['page'] = page
req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,), authed=self.auth,
params=params)
if req.status_code != 200:
return None
result = req.json()
return result
def post(self, section, number):
pass
class APIError(Exception):
pass
class Character:
def __init__(self, name, fullname, level, house, xp_rank, player_kills, mob_kills,
explorer_rank, current_class, messages_total=None, messages_unread=None):
self.name = name
self.fullname = fullname
self.level = level
self.house = house
self.xp_rank = xp_rank
self.player_kills = player_kills
self.mob_kills = mob_kills
self.explorer_rank = explorer_rank
self.current_class = current_class
self.messages_total = messages_total
self.messages_unread = messages_unread
@staticmethod
def parse(json_data):
name = json_data['name']
fullname = json_data['fullname']
level = int(json_data['level'])
house = json_data['house']
xp_rank = json_data['xp_rank']
player_kills = int(json_data['player_kills'])
mob_kills = int(json_data['mob_kills'])
explorer_rank = int(json_data['explorer_rank'])
current_class = json_data['class']
messages_total = None
messages_unread = None
if 'messages_total' in json_data and 'messages_unread' in json_data:
messages_total = json_data['messages_total']
messages_unread = json_data['messages_unread']
return Character(name, fullname, level, house, xp_rank, player_kills, mob_kills,
explorer_rank, current_class, messages_total, messages_unread)
def __repr__(self):
return '<Character: {} ({})>'.format(self.name, self.fullname)
class NewsSection:
def __init__(self, name, read, total, unread):
self.name = name
self.read = read
self.total = total
self.unread = unread
@staticmethod
def parse(json_data):
name = json_data['name']
read = int(json_data['read'])
total = int(json_data['total'])
unread = int(json_data['unread'])
return NewsSection(name, read, total, unread)
def __repr__(self):
return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.read, self.total)
|
flexible
|
{
"blob_id": "da66b254afb3a8fcd3783a38d8624caa917e58c3",
"index": 652,
"step-1": "<mask token>\n\n\nclass API:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def characters(self):\n req = self._make_request(self.CHARACTERS_RESOURCE)\n if req.status_code != 200:\n return None\n result = req.json()\n characters = []\n for character in result['characters']:\n characters.append(character['name'])\n return characters\n <mask token>\n\n def _character_unauthed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), False)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass APIError(Exception):\n pass\n\n\nclass Character:\n\n def __init__(self, name, fullname, level, house, xp_rank, player_kills,\n mob_kills, explorer_rank, current_class, messages_total=None,\n messages_unread=None):\n self.name = name\n self.fullname = fullname\n self.level = level\n self.house = house\n self.xp_rank = xp_rank\n self.player_kills = player_kills\n self.mob_kills = mob_kills\n self.explorer_rank = explorer_rank\n self.current_class = current_class\n self.messages_total = messages_total\n self.messages_unread = messages_unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n fullname = json_data['fullname']\n level = int(json_data['level'])\n house = json_data['house']\n xp_rank = json_data['xp_rank']\n player_kills = int(json_data['player_kills'])\n mob_kills = int(json_data['mob_kills'])\n explorer_rank = int(json_data['explorer_rank'])\n current_class = json_data['class']\n messages_total = None\n messages_unread = None\n if 'messages_total' in json_data and 'messages_unread' in json_data:\n messages_total = json_data['messages_total']\n messages_unread = json_data['messages_unread']\n return Character(name, fullname, level, house, xp_rank,\n player_kills, mob_kills, explorer_rank, current_class,\n messages_total, messages_unread)\n\n def __repr__(self):\n return '<Character: {} ({})>'.format(self.name, self.fullname)\n\n\nclass NewsSection:\n\n def __init__(self, name, read, total, unread):\n self.name = name\n self.read = read\n self.total = total\n self.unread = unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n read = int(json_data['read'])\n total = int(json_data['total'])\n unread = int(json_data['unread'])\n return NewsSection(name, read, total, unread)\n\n def __repr__(self):\n return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.\n read, self.total)\n",
"step-2": "<mask token>\n\n\nclass API:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):\n self.endpoint = endpoint\n if username is not None and password is not None:\n self.username = username\n self.password = password\n self.checkauth()\n <mask token>\n <mask token>\n <mask token>\n\n def characters(self):\n req = self._make_request(self.CHARACTERS_RESOURCE)\n if req.status_code != 200:\n return None\n result = req.json()\n characters = []\n for character in result['characters']:\n characters.append(character['name'])\n return characters\n\n @_requires_auth\n def _character_authed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), True)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n\n def _character_unauthed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), False)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n\n def character(self, character=None):\n if self.auth is True and (self.username == character or character is\n None):\n return self._character_authed(character or self.username)\n else:\n return self._character_unauthed(character)\n <mask token>\n <mask token>\n <mask token>\n\n\nclass APIError(Exception):\n pass\n\n\nclass Character:\n\n def __init__(self, name, fullname, level, house, xp_rank, player_kills,\n mob_kills, explorer_rank, current_class, messages_total=None,\n messages_unread=None):\n self.name = name\n self.fullname = fullname\n self.level = level\n self.house = house\n self.xp_rank = xp_rank\n self.player_kills = player_kills\n self.mob_kills = mob_kills\n self.explorer_rank = explorer_rank\n self.current_class = current_class\n self.messages_total = messages_total\n self.messages_unread = messages_unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n fullname = json_data['fullname']\n level = int(json_data['level'])\n house = json_data['house']\n xp_rank = json_data['xp_rank']\n player_kills = int(json_data['player_kills'])\n mob_kills = int(json_data['mob_kills'])\n explorer_rank = int(json_data['explorer_rank'])\n current_class = json_data['class']\n messages_total = None\n messages_unread = None\n if 'messages_total' in json_data and 'messages_unread' in json_data:\n messages_total = json_data['messages_total']\n messages_unread = json_data['messages_unread']\n return Character(name, fullname, level, house, xp_rank,\n player_kills, mob_kills, explorer_rank, current_class,\n messages_total, messages_unread)\n\n def __repr__(self):\n return '<Character: {} ({})>'.format(self.name, self.fullname)\n\n\nclass NewsSection:\n\n def __init__(self, name, read, total, unread):\n self.name = name\n self.read = read\n self.total = total\n self.unread = unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n read = int(json_data['read'])\n total = int(json_data['total'])\n unread = int(json_data['unread'])\n return NewsSection(name, read, total, unread)\n\n def __repr__(self):\n return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.\n read, self.total)\n",
"step-3": "<mask token>\n\n\nclass API:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):\n self.endpoint = endpoint\n if username is not None and password is not None:\n self.username = username\n self.password = password\n self.checkauth()\n\n def _get_endpoint(self, fmt_str, args):\n return self.endpoint + fmt_str.format(*args)\n\n def _make_request(self, resource, args=(), authed=False, params={}):\n endpoint = self._get_endpoint(resource, args)\n auth_params = {}\n if authed:\n if self.username is None or self.password is None:\n raise APIError()\n auth_params = {'character': self.username, 'password': self.\n password}\n params = params.copy()\n params.update(auth_params)\n req = requests.get(endpoint, params=params)\n return req\n\n def checkauth(self):\n if self.auth is not None:\n return self.auth\n req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)\n if req.status_code == 200:\n self.auth = True\n else:\n self.auth = False\n return self.auth\n\n def characters(self):\n req = self._make_request(self.CHARACTERS_RESOURCE)\n if req.status_code != 200:\n return None\n result = req.json()\n characters = []\n for character in result['characters']:\n characters.append(character['name'])\n return characters\n\n @_requires_auth\n def _character_authed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), True)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n\n def _character_unauthed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), False)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n\n def character(self, character=None):\n if self.auth is True and (self.username == character or character is\n None):\n return self._character_authed(character or self.username)\n else:\n return self._character_unauthed(character)\n\n def sections(self):\n req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)\n if req.status_code != 200:\n return None\n result = req.json()\n sections_list = map(NewsSection.parse, result)\n return sections_list\n\n def posts(self, section, page=None):\n params = {}\n if page is not None:\n params['page'] = page\n req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,),\n authed=self.auth, params=params)\n if req.status_code != 200:\n return None\n result = req.json()\n return result\n <mask token>\n\n\nclass APIError(Exception):\n pass\n\n\nclass Character:\n\n def __init__(self, name, fullname, level, house, xp_rank, player_kills,\n mob_kills, explorer_rank, current_class, messages_total=None,\n messages_unread=None):\n self.name = name\n self.fullname = fullname\n self.level = level\n self.house = house\n self.xp_rank = xp_rank\n self.player_kills = player_kills\n self.mob_kills = mob_kills\n self.explorer_rank = explorer_rank\n self.current_class = current_class\n self.messages_total = messages_total\n self.messages_unread = messages_unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n fullname = json_data['fullname']\n level = int(json_data['level'])\n house = json_data['house']\n xp_rank = json_data['xp_rank']\n player_kills = int(json_data['player_kills'])\n mob_kills = int(json_data['mob_kills'])\n explorer_rank = int(json_data['explorer_rank'])\n current_class = json_data['class']\n messages_total = None\n messages_unread = None\n if 'messages_total' in json_data and 'messages_unread' in json_data:\n messages_total = json_data['messages_total']\n messages_unread = json_data['messages_unread']\n return Character(name, fullname, level, house, xp_rank,\n player_kills, mob_kills, explorer_rank, current_class,\n messages_total, messages_unread)\n\n def __repr__(self):\n return '<Character: {} ({})>'.format(self.name, self.fullname)\n\n\nclass NewsSection:\n\n def __init__(self, name, read, total, unread):\n self.name = name\n self.read = read\n self.total = total\n self.unread = unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n read = int(json_data['read'])\n total = int(json_data['total'])\n unread = int(json_data['unread'])\n return NewsSection(name, read, total, unread)\n\n def __repr__(self):\n return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.\n read, self.total)\n",
"step-4": "<mask token>\n\n\ndef _requires_auth(func):\n\n def wrapper(self, *args, **kwargs):\n if self.auth is not True:\n raise APIError()\n return func(self, *args, **kwargs)\n return wrapper\n\n\nclass API:\n auth = None\n CHECKAUTH_RESOURCE = '/checkauth.json'\n CHARACTERS_RESOURCE = '/characters.json'\n SPECIFIC_CHARACTER_RESOURCE = '/characters/{}.json'\n NEWS_RESOURCE = '/news.json'\n SPECIFIC_NEWS_RESOURCE = '/news/{}.json'\n SPECIFIC_NEWS_POST_RESOURCE = '/news/{}/{}.json'\n\n def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):\n self.endpoint = endpoint\n if username is not None and password is not None:\n self.username = username\n self.password = password\n self.checkauth()\n\n def _get_endpoint(self, fmt_str, args):\n return self.endpoint + fmt_str.format(*args)\n\n def _make_request(self, resource, args=(), authed=False, params={}):\n endpoint = self._get_endpoint(resource, args)\n auth_params = {}\n if authed:\n if self.username is None or self.password is None:\n raise APIError()\n auth_params = {'character': self.username, 'password': self.\n password}\n params = params.copy()\n params.update(auth_params)\n req = requests.get(endpoint, params=params)\n return req\n\n def checkauth(self):\n if self.auth is not None:\n return self.auth\n req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)\n if req.status_code == 200:\n self.auth = True\n else:\n self.auth = False\n return self.auth\n\n def characters(self):\n req = self._make_request(self.CHARACTERS_RESOURCE)\n if req.status_code != 200:\n return None\n result = req.json()\n characters = []\n for character in result['characters']:\n characters.append(character['name'])\n return characters\n\n @_requires_auth\n def _character_authed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), True)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n\n def _character_unauthed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (\n character,), False)\n if req.status_code != 200:\n return None\n result = req.json()\n return Character.parse(result)\n\n def character(self, character=None):\n if self.auth is True and (self.username == character or character is\n None):\n return self._character_authed(character or self.username)\n else:\n return self._character_unauthed(character)\n\n def sections(self):\n req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)\n if req.status_code != 200:\n return None\n result = req.json()\n sections_list = map(NewsSection.parse, result)\n return sections_list\n\n def posts(self, section, page=None):\n params = {}\n if page is not None:\n params['page'] = page\n req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,),\n authed=self.auth, params=params)\n if req.status_code != 200:\n return None\n result = req.json()\n return result\n\n def post(self, section, number):\n pass\n\n\nclass APIError(Exception):\n pass\n\n\nclass Character:\n\n def __init__(self, name, fullname, level, house, xp_rank, player_kills,\n mob_kills, explorer_rank, current_class, messages_total=None,\n messages_unread=None):\n self.name = name\n self.fullname = fullname\n self.level = level\n self.house = house\n self.xp_rank = xp_rank\n self.player_kills = player_kills\n self.mob_kills = mob_kills\n self.explorer_rank = explorer_rank\n self.current_class = current_class\n self.messages_total = messages_total\n self.messages_unread = messages_unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n fullname = json_data['fullname']\n level = int(json_data['level'])\n house = json_data['house']\n xp_rank = json_data['xp_rank']\n player_kills = int(json_data['player_kills'])\n mob_kills = int(json_data['mob_kills'])\n explorer_rank = int(json_data['explorer_rank'])\n current_class = json_data['class']\n messages_total = None\n messages_unread = None\n if 'messages_total' in json_data and 'messages_unread' in json_data:\n messages_total = json_data['messages_total']\n messages_unread = json_data['messages_unread']\n return Character(name, fullname, level, house, xp_rank,\n player_kills, mob_kills, explorer_rank, current_class,\n messages_total, messages_unread)\n\n def __repr__(self):\n return '<Character: {} ({})>'.format(self.name, self.fullname)\n\n\nclass NewsSection:\n\n def __init__(self, name, read, total, unread):\n self.name = name\n self.read = read\n self.total = total\n self.unread = unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n read = int(json_data['read'])\n total = int(json_data['total'])\n unread = int(json_data['unread'])\n return NewsSection(name, read, total, unread)\n\n def __repr__(self):\n return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.\n read, self.total)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport requests\n\nACHAEA_ENDPOINT = 'https://api.achaea.com'\n\n\ndef _requires_auth(func):\n def wrapper(self, *args, **kwargs):\n if self.auth is not True:\n raise APIError()\n return func(self, *args, **kwargs)\n return wrapper\n\n\nclass API:\n\n auth = None\n\n CHECKAUTH_RESOURCE = '/checkauth.json'\n CHARACTERS_RESOURCE = '/characters.json'\n SPECIFIC_CHARACTER_RESOURCE = '/characters/{}.json'\n NEWS_RESOURCE = '/news.json'\n SPECIFIC_NEWS_RESOURCE = '/news/{}.json'\n SPECIFIC_NEWS_POST_RESOURCE = '/news/{}/{}.json'\n\n def __init__(self, endpoint=ACHAEA_ENDPOINT, username=None, password=None):\n self.endpoint = endpoint\n if username is not None and password is not None:\n self.username = username\n self.password = password\n self.checkauth()\n\n def _get_endpoint(self, fmt_str, args):\n return self.endpoint + fmt_str.format(*args)\n\n def _make_request(self, resource, args=(), authed=False, params={}):\n endpoint = self._get_endpoint(resource, args)\n auth_params = {}\n if authed:\n if self.username is None or self.password is None:\n raise APIError()\n auth_params = {'character': self.username, 'password': self.password}\n params = params.copy()\n params.update(auth_params)\n req = requests.get(endpoint, params=params)\n return req\n\n def checkauth(self):\n if self.auth is not None:\n return self.auth\n\n req = self._make_request(self.CHECKAUTH_RESOURCE, authed=True)\n\n if req.status_code == 200:\n self.auth = True\n else:\n self.auth = False\n\n return self.auth\n\n def characters(self):\n req = self._make_request(self.CHARACTERS_RESOURCE)\n if req.status_code != 200:\n return None\n\n result = req.json()\n characters = []\n for character in result['characters']:\n characters.append(character['name'])\n return characters\n\n @_requires_auth\n def _character_authed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (character,), True)\n if req.status_code != 200:\n return None\n\n result = req.json()\n return Character.parse(result)\n\n def _character_unauthed(self, character):\n req = self._make_request(self.SPECIFIC_CHARACTER_RESOURCE, (character,), False)\n if req.status_code != 200:\n return None\n\n result = req.json()\n return Character.parse(result)\n\n def character(self, character=None):\n if self.auth is True and (self.username == character or character is None):\n return self._character_authed(character or self.username)\n else:\n return self._character_unauthed(character)\n\n def sections(self):\n req = self._make_request(self.NEWS_RESOURCE, authed=self.auth)\n if req.status_code != 200:\n return None\n\n result = req.json()\n sections_list = map(NewsSection.parse, result)\n return sections_list\n\n def posts(self, section, page=None):\n params = {}\n if page is not None:\n params['page'] = page\n req = self._make_request(self.SPECIFIC_NEWS_RESOURCE, (section,), authed=self.auth,\n params=params)\n if req.status_code != 200:\n return None\n\n result = req.json()\n return result\n\n def post(self, section, number):\n pass\n\n\nclass APIError(Exception):\n pass\n\n\nclass Character:\n\n def __init__(self, name, fullname, level, house, xp_rank, player_kills, mob_kills,\n explorer_rank, current_class, messages_total=None, messages_unread=None):\n self.name = name\n self.fullname = fullname\n self.level = level\n self.house = house\n self.xp_rank = xp_rank\n self.player_kills = player_kills\n self.mob_kills = mob_kills\n self.explorer_rank = explorer_rank\n self.current_class = current_class\n self.messages_total = messages_total\n self.messages_unread = messages_unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n fullname = json_data['fullname']\n level = int(json_data['level'])\n house = json_data['house']\n xp_rank = json_data['xp_rank']\n player_kills = int(json_data['player_kills'])\n mob_kills = int(json_data['mob_kills'])\n explorer_rank = int(json_data['explorer_rank'])\n current_class = json_data['class']\n messages_total = None\n messages_unread = None\n if 'messages_total' in json_data and 'messages_unread' in json_data:\n messages_total = json_data['messages_total']\n messages_unread = json_data['messages_unread']\n\n return Character(name, fullname, level, house, xp_rank, player_kills, mob_kills,\n explorer_rank, current_class, messages_total, messages_unread)\n\n def __repr__(self):\n return '<Character: {} ({})>'.format(self.name, self.fullname)\n\n\nclass NewsSection:\n\n def __init__(self, name, read, total, unread):\n self.name = name\n self.read = read\n self.total = total\n self.unread = unread\n\n @staticmethod\n def parse(json_data):\n name = json_data['name']\n read = int(json_data['read'])\n total = int(json_data['total'])\n unread = int(json_data['unread'])\n return NewsSection(name, read, total, unread)\n\n def __repr__(self):\n return '<NewsSection: {} ({}/{} unread)>'.format(self.name, self.read, self.total)\n",
"step-ids": [
12,
15,
20,
23,
26
]
}
|
[
12,
15,
20,
23,
26
] |
# pylint: disable=missing-docstring,function-redefined
import uuid
from behave import given, then, when
import requests
from features.steps import utils
from testsuite.oauth import authorize
from testsuite import fhir
ERROR_AUTHORIZATION_FAILED = 'Authorization failed.'
ERROR_BAD_CONFORMANCE = 'Could not parse conformance statement.'
ERROR_OAUTH_DISABLED = 'OAuth is not enabled on this server.'
ERROR_SELENIUM_SCREENSHOT = '''
An authorization error occurred: {0}
For more information, see:
{2}{1}
'''
@given('OAuth is enabled')
def step_impl(context):
assert context.vendor_config['auth']['strategy'] != 'none', \
ERROR_OAUTH_DISABLED
if context.conformance is None:
assert False, ERROR_BAD_CONFORMANCE
fhir.get_oauth_uris(context.conformance)
@given('I am logged in')
def step_impl(context):
assert context.oauth is not None, ERROR_AUTHORIZATION_FAILED
assert context.oauth.access_token is not None, \
ERROR_AUTHORIZATION_FAILED
@given('I am not logged in')
def step_impl(context):
context.oauth.access_token = None
@when('I log in')
def step_impl(context):
try:
context.oauth.authorize()
except authorize.AuthorizationException as err:
error = ERROR_SELENIUM_SCREENSHOT.format(
err.args[0],
err.args[1],
context.vendor_config['host'],
)
assert False, error
@when('I ask for authorization without the {field_name} field')
def step_impl(context, field_name):
""" A step 1 implementation with a named field missing.
"""
fields = {
'response_type': 'code',
'client_id': context.vendor_config['auth']['client_id'],
'redirect_uri': context.vendor_config['auth']['redirect_uri'],
'scope': context.vendor_config['auth']['scope'],
'state': uuid.uuid4(),
}
del fields[field_name]
uris = fhir.get_oauth_uris(context.conformance)
response = requests.get(uris['authorize'],
params=fields,
allow_redirects=False,
timeout=5)
context.response = response
@when('I ask for authorization with the following override')
def step_impl(context):
urls = fhir.get_oauth_uris(context.conformance)
authorizer = authorize.Authorizer(config=context.vendor_config['auth'],
authorize_url=urls['authorize'])
with authorizer:
parameters = authorizer.launch_params
parameters.update(dict(context.table))
try:
authorizer.ask_for_authorization(parameters)
response = authorizer.provide_user_input()
except authorize.AuthorizationException as err:
error = ERROR_SELENIUM_SCREENSHOT.format(
err.args[0],
err.args[1],
context.vendor_config['host'],
)
assert False, error
context.authorizer = authorizer
context.authorization_sent = parameters
context.authorization_received = response
@when('I ask for authorization')
def step_impl(context):
try:
context.code = context.oauth.request_authorization()
except authorize.AuthorizationException as err:
error = ERROR_SELENIUM_SCREENSHOT.format(
err.args[0],
err.args[1],
context.vendor_config['host'],
)
assert False, error
@when('I exchange my authorization code')
def step_impl(context):
""" A fully formed and correct step 3 implementation.
"""
fields = {
'grant_type': 'authorization_code',
'code': context.code,
'client_id': context.vendor_config['auth']['client_id'],
'redirect_uri': context.vendor_config['auth']['redirect_uri'],
}
context.response = token_request(fields,
context.vendor_config['auth'],
context.conformance)
@when('I exchange my authorization code without the {field_name} field')
def step_impl(context, field_name):
""" A step 3 implementation missing a named field.
"""
fields = {
'grant_type': 'authorization_code',
'code': context.code,
'client_id': context.vendor_config['auth']['client_id'],
'redirect_uri': context.vendor_config['auth']['redirect_uri'],
}
del fields[field_name]
context.response = token_request(fields,
context.vendor_config['auth'],
context.conformance)
@when('I exchange my authorization code with the following override')
def step_impl(context):
""" A step 3 implementation with a table specified override.
"""
fields = {
'grant_type': 'authorization_code',
'code': context.code,
'client_id': context.vendor_config['auth']['client_id'],
'redirect_uri': context.vendor_config['auth']['redirect_uri'],
}
fields.update(dict(context.table))
context.response = token_request(fields,
context.vendor_config['auth'],
context.conformance)
@then('the authorization response redirect should validate')
def step_impl(context):
try:
response = context.authorization_received
context.authorizer._validate_state(response) # pylint: disable=protected-access
context.authorizer._validate_code(response) # pylint: disable=protected-access
except AssertionError as err:
assert False, utils.bad_redirect_assert(err,
context.authorization_sent,
response)
@when('I ask for a new access token')
def step_impl(context):
""" A fully formed and correct implementation of step 5.
"""
fields = {
'grant_type': 'refresh_token',
'refresh_token': context.oauth.refresh_token,
'scope': context.vendor_config['auth']['scope'],
}
context.response = token_request(fields,
context.vendor_config['auth'],
context.conformance)
@when('I ask for a new access token without the {field_name} field')
def step_impl(context, field_name):
""" A step 5 implementation missing a named field.
"""
fields = {
'grant_type': 'refresh_token',
'refresh_token': context.oauth.refresh_token,
'scope': context.vendor_config['auth']['scope'],
}
del fields[field_name]
context.response = token_request(fields,
context.vendor_config['auth'],
context.conformance)
def token_request(post_data, auth_config, conformance):
""" Make a token request.
Should be modeled after `testsuite.oauth.authorization_code._token_request`.
Args:
post_data (dict): The parameters to send.
auth_config (dict): The vendor auth config.
conformance (dict): The server's conformance statement so that URIs can be determined.
Returns:
A requests Response object.
"""
auth = None
if auth_config.get('confidential_client'):
auth = requests.auth.HTTPBasicAuth(auth_config['client_id'],
auth_config['client_secret'])
uris = fhir.get_oauth_uris(conformance)
response = requests.post(uris['token'],
data=post_data,
allow_redirects=False,
auth=auth,
timeout=5)
return response
|
normal
|
{
"blob_id": "ef0c9f740f1ca0906aeb7a5c5e5d35baca189310",
"index": 6128,
"step-1": "<mask token>\n\n\n@given('I am logged in')\ndef step_impl(context):\n assert context.oauth is not None, ERROR_AUTHORIZATION_FAILED\n assert context.oauth.access_token is not None, ERROR_AUTHORIZATION_FAILED\n\n\n@given('I am not logged in')\ndef step_impl(context):\n context.oauth.access_token = None\n\n\n<mask token>\n\n\n@when('I ask for authorization without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 1 implementation with a named field missing.\n \"\"\"\n fields = {'response_type': 'code', 'client_id': context.vendor_config[\n 'auth']['client_id'], 'redirect_uri': context.vendor_config['auth']\n ['redirect_uri'], 'scope': context.vendor_config['auth']['scope'],\n 'state': uuid.uuid4()}\n del fields[field_name]\n uris = fhir.get_oauth_uris(context.conformance)\n response = requests.get(uris['authorize'], params=fields,\n allow_redirects=False, timeout=5)\n context.response = response\n\n\n@when('I ask for authorization with the following override')\ndef step_impl(context):\n urls = fhir.get_oauth_uris(context.conformance)\n authorizer = authorize.Authorizer(config=context.vendor_config['auth'],\n authorize_url=urls['authorize'])\n with authorizer:\n parameters = authorizer.launch_params\n parameters.update(dict(context.table))\n try:\n authorizer.ask_for_authorization(parameters)\n response = authorizer.provide_user_input()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(err.args[0], err.args[\n 1], context.vendor_config['host'])\n assert False, error\n context.authorizer = authorizer\n context.authorization_sent = parameters\n context.authorization_received = response\n\n\n@when('I ask for authorization')\ndef step_impl(context):\n try:\n context.code = context.oauth.request_authorization()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(err.args[0], err.args[1],\n context.vendor_config['host'])\n assert False, error\n\n\n<mask token>\n\n\n@when('I exchange my authorization code with the following override')\ndef step_impl(context):\n \"\"\" A step 3 implementation with a table specified override.\n \"\"\"\n fields = {'grant_type': 'authorization_code', 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri']}\n fields.update(dict(context.table))\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\n@then('the authorization response redirect should validate')\ndef step_impl(context):\n try:\n response = context.authorization_received\n context.authorizer._validate_state(response)\n context.authorizer._validate_code(response)\n except AssertionError as err:\n assert False, utils.bad_redirect_assert(err, context.\n authorization_sent, response)\n\n\n<mask token>\n\n\n@when('I ask for a new access token without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 5 implementation missing a named field.\n \"\"\"\n fields = {'grant_type': 'refresh_token', 'refresh_token': context.oauth\n .refresh_token, 'scope': context.vendor_config['auth']['scope']}\n del fields[field_name]\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\ndef token_request(post_data, auth_config, conformance):\n \"\"\" Make a token request.\n\n Should be modeled after `testsuite.oauth.authorization_code._token_request`.\n\n Args:\n post_data (dict): The parameters to send.\n auth_config (dict): The vendor auth config.\n conformance (dict): The server's conformance statement so that URIs can be determined.\n\n Returns:\n A requests Response object.\n \"\"\"\n auth = None\n if auth_config.get('confidential_client'):\n auth = requests.auth.HTTPBasicAuth(auth_config['client_id'],\n auth_config['client_secret'])\n uris = fhir.get_oauth_uris(conformance)\n response = requests.post(uris['token'], data=post_data, allow_redirects\n =False, auth=auth, timeout=5)\n return response\n",
"step-2": "<mask token>\n\n\n@given('I am logged in')\ndef step_impl(context):\n assert context.oauth is not None, ERROR_AUTHORIZATION_FAILED\n assert context.oauth.access_token is not None, ERROR_AUTHORIZATION_FAILED\n\n\n@given('I am not logged in')\ndef step_impl(context):\n context.oauth.access_token = None\n\n\n<mask token>\n\n\n@when('I ask for authorization without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 1 implementation with a named field missing.\n \"\"\"\n fields = {'response_type': 'code', 'client_id': context.vendor_config[\n 'auth']['client_id'], 'redirect_uri': context.vendor_config['auth']\n ['redirect_uri'], 'scope': context.vendor_config['auth']['scope'],\n 'state': uuid.uuid4()}\n del fields[field_name]\n uris = fhir.get_oauth_uris(context.conformance)\n response = requests.get(uris['authorize'], params=fields,\n allow_redirects=False, timeout=5)\n context.response = response\n\n\n@when('I ask for authorization with the following override')\ndef step_impl(context):\n urls = fhir.get_oauth_uris(context.conformance)\n authorizer = authorize.Authorizer(config=context.vendor_config['auth'],\n authorize_url=urls['authorize'])\n with authorizer:\n parameters = authorizer.launch_params\n parameters.update(dict(context.table))\n try:\n authorizer.ask_for_authorization(parameters)\n response = authorizer.provide_user_input()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(err.args[0], err.args[\n 1], context.vendor_config['host'])\n assert False, error\n context.authorizer = authorizer\n context.authorization_sent = parameters\n context.authorization_received = response\n\n\n@when('I ask for authorization')\ndef step_impl(context):\n try:\n context.code = context.oauth.request_authorization()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(err.args[0], err.args[1],\n context.vendor_config['host'])\n assert False, error\n\n\n<mask token>\n\n\n@when('I exchange my authorization code without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 3 implementation missing a named field.\n \"\"\"\n fields = {'grant_type': 'authorization_code', 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri']}\n del fields[field_name]\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\n@when('I exchange my authorization code with the following override')\ndef step_impl(context):\n \"\"\" A step 3 implementation with a table specified override.\n \"\"\"\n fields = {'grant_type': 'authorization_code', 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri']}\n fields.update(dict(context.table))\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\n@then('the authorization response redirect should validate')\ndef step_impl(context):\n try:\n response = context.authorization_received\n context.authorizer._validate_state(response)\n context.authorizer._validate_code(response)\n except AssertionError as err:\n assert False, utils.bad_redirect_assert(err, context.\n authorization_sent, response)\n\n\n<mask token>\n\n\n@when('I ask for a new access token without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 5 implementation missing a named field.\n \"\"\"\n fields = {'grant_type': 'refresh_token', 'refresh_token': context.oauth\n .refresh_token, 'scope': context.vendor_config['auth']['scope']}\n del fields[field_name]\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\ndef token_request(post_data, auth_config, conformance):\n \"\"\" Make a token request.\n\n Should be modeled after `testsuite.oauth.authorization_code._token_request`.\n\n Args:\n post_data (dict): The parameters to send.\n auth_config (dict): The vendor auth config.\n conformance (dict): The server's conformance statement so that URIs can be determined.\n\n Returns:\n A requests Response object.\n \"\"\"\n auth = None\n if auth_config.get('confidential_client'):\n auth = requests.auth.HTTPBasicAuth(auth_config['client_id'],\n auth_config['client_secret'])\n uris = fhir.get_oauth_uris(conformance)\n response = requests.post(uris['token'], data=post_data, allow_redirects\n =False, auth=auth, timeout=5)\n return response\n",
"step-3": "<mask token>\n\n\n@given('OAuth is enabled')\ndef step_impl(context):\n assert context.vendor_config['auth']['strategy'\n ] != 'none', ERROR_OAUTH_DISABLED\n if context.conformance is None:\n assert False, ERROR_BAD_CONFORMANCE\n fhir.get_oauth_uris(context.conformance)\n\n\n@given('I am logged in')\ndef step_impl(context):\n assert context.oauth is not None, ERROR_AUTHORIZATION_FAILED\n assert context.oauth.access_token is not None, ERROR_AUTHORIZATION_FAILED\n\n\n@given('I am not logged in')\ndef step_impl(context):\n context.oauth.access_token = None\n\n\n@when('I log in')\ndef step_impl(context):\n try:\n context.oauth.authorize()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(err.args[0], err.args[1],\n context.vendor_config['host'])\n assert False, error\n\n\n@when('I ask for authorization without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 1 implementation with a named field missing.\n \"\"\"\n fields = {'response_type': 'code', 'client_id': context.vendor_config[\n 'auth']['client_id'], 'redirect_uri': context.vendor_config['auth']\n ['redirect_uri'], 'scope': context.vendor_config['auth']['scope'],\n 'state': uuid.uuid4()}\n del fields[field_name]\n uris = fhir.get_oauth_uris(context.conformance)\n response = requests.get(uris['authorize'], params=fields,\n allow_redirects=False, timeout=5)\n context.response = response\n\n\n@when('I ask for authorization with the following override')\ndef step_impl(context):\n urls = fhir.get_oauth_uris(context.conformance)\n authorizer = authorize.Authorizer(config=context.vendor_config['auth'],\n authorize_url=urls['authorize'])\n with authorizer:\n parameters = authorizer.launch_params\n parameters.update(dict(context.table))\n try:\n authorizer.ask_for_authorization(parameters)\n response = authorizer.provide_user_input()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(err.args[0], err.args[\n 1], context.vendor_config['host'])\n assert False, error\n context.authorizer = authorizer\n context.authorization_sent = parameters\n context.authorization_received = response\n\n\n@when('I ask for authorization')\ndef step_impl(context):\n try:\n context.code = context.oauth.request_authorization()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(err.args[0], err.args[1],\n context.vendor_config['host'])\n assert False, error\n\n\n<mask token>\n\n\n@when('I exchange my authorization code without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 3 implementation missing a named field.\n \"\"\"\n fields = {'grant_type': 'authorization_code', 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri']}\n del fields[field_name]\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\n@when('I exchange my authorization code with the following override')\ndef step_impl(context):\n \"\"\" A step 3 implementation with a table specified override.\n \"\"\"\n fields = {'grant_type': 'authorization_code', 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri']}\n fields.update(dict(context.table))\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\n@then('the authorization response redirect should validate')\ndef step_impl(context):\n try:\n response = context.authorization_received\n context.authorizer._validate_state(response)\n context.authorizer._validate_code(response)\n except AssertionError as err:\n assert False, utils.bad_redirect_assert(err, context.\n authorization_sent, response)\n\n\n<mask token>\n\n\n@when('I ask for a new access token without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 5 implementation missing a named field.\n \"\"\"\n fields = {'grant_type': 'refresh_token', 'refresh_token': context.oauth\n .refresh_token, 'scope': context.vendor_config['auth']['scope']}\n del fields[field_name]\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\ndef token_request(post_data, auth_config, conformance):\n \"\"\" Make a token request.\n\n Should be modeled after `testsuite.oauth.authorization_code._token_request`.\n\n Args:\n post_data (dict): The parameters to send.\n auth_config (dict): The vendor auth config.\n conformance (dict): The server's conformance statement so that URIs can be determined.\n\n Returns:\n A requests Response object.\n \"\"\"\n auth = None\n if auth_config.get('confidential_client'):\n auth = requests.auth.HTTPBasicAuth(auth_config['client_id'],\n auth_config['client_secret'])\n uris = fhir.get_oauth_uris(conformance)\n response = requests.post(uris['token'], data=post_data, allow_redirects\n =False, auth=auth, timeout=5)\n return response\n",
"step-4": "<mask token>\nERROR_AUTHORIZATION_FAILED = 'Authorization failed.'\nERROR_BAD_CONFORMANCE = 'Could not parse conformance statement.'\nERROR_OAUTH_DISABLED = 'OAuth is not enabled on this server.'\nERROR_SELENIUM_SCREENSHOT = \"\"\"\nAn authorization error occurred: {0}\n\nFor more information, see:\n {2}{1}\n\"\"\"\n\n\n@given('OAuth is enabled')\ndef step_impl(context):\n assert context.vendor_config['auth']['strategy'\n ] != 'none', ERROR_OAUTH_DISABLED\n if context.conformance is None:\n assert False, ERROR_BAD_CONFORMANCE\n fhir.get_oauth_uris(context.conformance)\n\n\n@given('I am logged in')\ndef step_impl(context):\n assert context.oauth is not None, ERROR_AUTHORIZATION_FAILED\n assert context.oauth.access_token is not None, ERROR_AUTHORIZATION_FAILED\n\n\n@given('I am not logged in')\ndef step_impl(context):\n context.oauth.access_token = None\n\n\n@when('I log in')\ndef step_impl(context):\n try:\n context.oauth.authorize()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(err.args[0], err.args[1],\n context.vendor_config['host'])\n assert False, error\n\n\n@when('I ask for authorization without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 1 implementation with a named field missing.\n \"\"\"\n fields = {'response_type': 'code', 'client_id': context.vendor_config[\n 'auth']['client_id'], 'redirect_uri': context.vendor_config['auth']\n ['redirect_uri'], 'scope': context.vendor_config['auth']['scope'],\n 'state': uuid.uuid4()}\n del fields[field_name]\n uris = fhir.get_oauth_uris(context.conformance)\n response = requests.get(uris['authorize'], params=fields,\n allow_redirects=False, timeout=5)\n context.response = response\n\n\n@when('I ask for authorization with the following override')\ndef step_impl(context):\n urls = fhir.get_oauth_uris(context.conformance)\n authorizer = authorize.Authorizer(config=context.vendor_config['auth'],\n authorize_url=urls['authorize'])\n with authorizer:\n parameters = authorizer.launch_params\n parameters.update(dict(context.table))\n try:\n authorizer.ask_for_authorization(parameters)\n response = authorizer.provide_user_input()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(err.args[0], err.args[\n 1], context.vendor_config['host'])\n assert False, error\n context.authorizer = authorizer\n context.authorization_sent = parameters\n context.authorization_received = response\n\n\n@when('I ask for authorization')\ndef step_impl(context):\n try:\n context.code = context.oauth.request_authorization()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(err.args[0], err.args[1],\n context.vendor_config['host'])\n assert False, error\n\n\n@when('I exchange my authorization code')\ndef step_impl(context):\n \"\"\" A fully formed and correct step 3 implementation.\n \"\"\"\n fields = {'grant_type': 'authorization_code', 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri']}\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\n@when('I exchange my authorization code without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 3 implementation missing a named field.\n \"\"\"\n fields = {'grant_type': 'authorization_code', 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri']}\n del fields[field_name]\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\n@when('I exchange my authorization code with the following override')\ndef step_impl(context):\n \"\"\" A step 3 implementation with a table specified override.\n \"\"\"\n fields = {'grant_type': 'authorization_code', 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri']}\n fields.update(dict(context.table))\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\n@then('the authorization response redirect should validate')\ndef step_impl(context):\n try:\n response = context.authorization_received\n context.authorizer._validate_state(response)\n context.authorizer._validate_code(response)\n except AssertionError as err:\n assert False, utils.bad_redirect_assert(err, context.\n authorization_sent, response)\n\n\n@when('I ask for a new access token')\ndef step_impl(context):\n \"\"\" A fully formed and correct implementation of step 5.\n \"\"\"\n fields = {'grant_type': 'refresh_token', 'refresh_token': context.oauth\n .refresh_token, 'scope': context.vendor_config['auth']['scope']}\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\n@when('I ask for a new access token without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 5 implementation missing a named field.\n \"\"\"\n fields = {'grant_type': 'refresh_token', 'refresh_token': context.oauth\n .refresh_token, 'scope': context.vendor_config['auth']['scope']}\n del fields[field_name]\n context.response = token_request(fields, context.vendor_config['auth'],\n context.conformance)\n\n\ndef token_request(post_data, auth_config, conformance):\n \"\"\" Make a token request.\n\n Should be modeled after `testsuite.oauth.authorization_code._token_request`.\n\n Args:\n post_data (dict): The parameters to send.\n auth_config (dict): The vendor auth config.\n conformance (dict): The server's conformance statement so that URIs can be determined.\n\n Returns:\n A requests Response object.\n \"\"\"\n auth = None\n if auth_config.get('confidential_client'):\n auth = requests.auth.HTTPBasicAuth(auth_config['client_id'],\n auth_config['client_secret'])\n uris = fhir.get_oauth_uris(conformance)\n response = requests.post(uris['token'], data=post_data, allow_redirects\n =False, auth=auth, timeout=5)\n return response\n",
"step-5": "# pylint: disable=missing-docstring,function-redefined\nimport uuid\n\nfrom behave import given, then, when\nimport requests\n\nfrom features.steps import utils\nfrom testsuite.oauth import authorize\nfrom testsuite import fhir\n\n\nERROR_AUTHORIZATION_FAILED = 'Authorization failed.'\nERROR_BAD_CONFORMANCE = 'Could not parse conformance statement.'\nERROR_OAUTH_DISABLED = 'OAuth is not enabled on this server.'\nERROR_SELENIUM_SCREENSHOT = '''\nAn authorization error occurred: {0}\n\nFor more information, see:\n {2}{1}\n'''\n\n\n@given('OAuth is enabled')\ndef step_impl(context):\n assert context.vendor_config['auth']['strategy'] != 'none', \\\n ERROR_OAUTH_DISABLED\n\n if context.conformance is None:\n assert False, ERROR_BAD_CONFORMANCE\n\n fhir.get_oauth_uris(context.conformance)\n\n\n@given('I am logged in')\ndef step_impl(context):\n assert context.oauth is not None, ERROR_AUTHORIZATION_FAILED\n assert context.oauth.access_token is not None, \\\n ERROR_AUTHORIZATION_FAILED\n\n\n@given('I am not logged in')\ndef step_impl(context):\n context.oauth.access_token = None\n\n\n@when('I log in')\ndef step_impl(context):\n try:\n context.oauth.authorize()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(\n err.args[0],\n err.args[1],\n context.vendor_config['host'],\n )\n assert False, error\n\n\n@when('I ask for authorization without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 1 implementation with a named field missing.\n \"\"\"\n fields = {\n 'response_type': 'code',\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n 'scope': context.vendor_config['auth']['scope'],\n 'state': uuid.uuid4(),\n }\n\n del fields[field_name]\n\n uris = fhir.get_oauth_uris(context.conformance)\n\n response = requests.get(uris['authorize'],\n params=fields,\n allow_redirects=False,\n timeout=5)\n\n context.response = response\n\n\n@when('I ask for authorization with the following override')\ndef step_impl(context):\n urls = fhir.get_oauth_uris(context.conformance)\n authorizer = authorize.Authorizer(config=context.vendor_config['auth'],\n authorize_url=urls['authorize'])\n with authorizer:\n parameters = authorizer.launch_params\n parameters.update(dict(context.table))\n\n try:\n authorizer.ask_for_authorization(parameters)\n response = authorizer.provide_user_input()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(\n err.args[0],\n err.args[1],\n context.vendor_config['host'],\n )\n assert False, error\n\n context.authorizer = authorizer\n context.authorization_sent = parameters\n context.authorization_received = response\n\n\n@when('I ask for authorization')\ndef step_impl(context):\n try:\n context.code = context.oauth.request_authorization()\n except authorize.AuthorizationException as err:\n error = ERROR_SELENIUM_SCREENSHOT.format(\n err.args[0],\n err.args[1],\n context.vendor_config['host'],\n )\n assert False, error\n\n\n@when('I exchange my authorization code')\ndef step_impl(context):\n \"\"\" A fully formed and correct step 3 implementation.\n \"\"\"\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)\n\n\n@when('I exchange my authorization code without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 3 implementation missing a named field.\n \"\"\"\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n del fields[field_name]\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)\n\n\n@when('I exchange my authorization code with the following override')\ndef step_impl(context):\n \"\"\" A step 3 implementation with a table specified override.\n \"\"\"\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)\n\n\n@then('the authorization response redirect should validate')\ndef step_impl(context):\n try:\n response = context.authorization_received\n context.authorizer._validate_state(response) # pylint: disable=protected-access\n context.authorizer._validate_code(response) # pylint: disable=protected-access\n except AssertionError as err:\n assert False, utils.bad_redirect_assert(err,\n context.authorization_sent,\n response)\n\n\n@when('I ask for a new access token')\ndef step_impl(context):\n \"\"\" A fully formed and correct implementation of step 5.\n \"\"\"\n fields = {\n 'grant_type': 'refresh_token',\n 'refresh_token': context.oauth.refresh_token,\n 'scope': context.vendor_config['auth']['scope'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)\n\n\n@when('I ask for a new access token without the {field_name} field')\ndef step_impl(context, field_name):\n \"\"\" A step 5 implementation missing a named field.\n \"\"\"\n fields = {\n 'grant_type': 'refresh_token',\n 'refresh_token': context.oauth.refresh_token,\n 'scope': context.vendor_config['auth']['scope'],\n }\n\n del fields[field_name]\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)\n\n\ndef token_request(post_data, auth_config, conformance):\n \"\"\" Make a token request.\n\n Should be modeled after `testsuite.oauth.authorization_code._token_request`.\n\n Args:\n post_data (dict): The parameters to send.\n auth_config (dict): The vendor auth config.\n conformance (dict): The server's conformance statement so that URIs can be determined.\n\n Returns:\n A requests Response object.\n \"\"\"\n auth = None\n if auth_config.get('confidential_client'):\n auth = requests.auth.HTTPBasicAuth(auth_config['client_id'],\n auth_config['client_secret'])\n\n uris = fhir.get_oauth_uris(conformance)\n\n response = requests.post(uris['token'],\n data=post_data,\n allow_redirects=False,\n auth=auth,\n timeout=5)\n\n return response\n",
"step-ids": [
9,
10,
12,
15,
17
]
}
|
[
9,
10,
12,
15,
17
] |
def has23(nums):
this = nums[0] == 2 or nums[0] == 3
that = nums[1] == 2 or nums[1] == 3
return this or that
|
normal
|
{
"blob_id": "174c4c1ed7f2197e012644999cf23f5e82f4b7c3",
"index": 3148,
"step-1": "<mask token>\n",
"step-2": "def has23(nums):\n this = nums[0] == 2 or nums[0] == 3\n that = nums[1] == 2 or nums[1] == 3\n return this or that\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from collections import Counter
import pandas as pd
import string
from collections import namedtuple, defaultdict
import csv
import sys
import torch
import numpy as np
from sklearn.preprocessing import LabelEncoder
from scipy.sparse import coo_matrix
from tqdm import tqdm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
def get_data():
df = pd.read_csv("./data/filteredCorpus.csv")
df_filt = df[df['outcome']==True] # use only successful games
df_filt = df_filt[df_filt['role']=='speaker'] # use speaker utterances
df_filt = df_filt[df_filt['source']=='human'] # use speaker utterances
# making a list of utterances that we want to use, so we can take these rows from df_filt
utt = df_filt['contents']
utt_filt = [u.lower() for u in utt if len(u.split()) == 1] # only use one word utterances
utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for u in utt_filt] # remove punctuation
utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys()) # use utterances that appear more than once
# df_filt = df_filt[df_filt['numCleanWords'] == 1]
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())
df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(str.maketrans('', '', string.punctuation)))# filter to take out punctuation
df_final = df.loc[df['contents'].isin(utt_final)] # this is the dataset of all the games that we want to use
le = LabelEncoder()
df_final['contents'] = le.fit_transform(df_final['contents'])
return df_final, le
def get_meaning_matrix(df):
df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))
df['colors'] = df['colors'].apply(lambda x: str(x))
colors_le = LabelEncoder()
df['colors'] = colors_le.fit_transform(df['colors']) # 100 x 100 (test data)
print("length colors and contents", len(df['colors']), len(df['contents']))
print("set colors and contents", len(set(df['colors'])), len(set(df['contents'])))
meaning_mat = pd.crosstab(df['colors'], df['contents']) # rows are colors, columns are utterances
# row numbers and column numbers correspond to labels from colors_le and le (utterances) from get_data()
meaning_mat = np.array(meaning_mat) # a num_color x num_utterances matrix
for i in range(len(meaning_mat[:,0])):
if sum(meaning_mat[i,:]) == 0:
print("meaning mat is 0 for this row: ", i)
for j in range(len(meaning_mat[0,:])):
if meaning_mat[i,j] == 0:
print("meaning mat is 0 at: ", i,j," !!!")
return meaning_mat, colors_le
# Literal listener data function
def get_pragmatic_listener_testing_data(df):
output = []
all_utt = list(set(list(df['contents'])))
desc_to_idx = {u: i for i,u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)
colors = (correct, alt1, alt2)
# idxs = random.choice([0,1,2]) # randomly permute colors
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes
output.append((correct_idx, colors_shuff, utt))
return output, all_utt, desc_to_idx # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc
# return all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc
def get_literal_listener_training_data(df):
output = []
all_utt = df['contents']
idx_to_desc = {i: u for i,u in enumerate(all_utt)}
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents']).to(device)
correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)
alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)
alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)
colors = (correct, alt1, alt2)
# idxs = random.choice([0,1,2]) # randomly permute colors
idxs = np.arange(3)
np.random.shuffle(idxs)
colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)
correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes
output.append((correct_idx, colors_shuff, utt))
return output#, all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc
# Literal Speaker data function - hi r u ok
def get_literal_speaker_training_data(df):
output = []
for _, row in tqdm(df.iterrows(), total=len(df)):
utt = torch.tensor(row['contents'], dtype=torch.long).to(device)
color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32).to(device)
output.append([color, utt])
return output # [referent, utterance_idx]
|
normal
|
{
"blob_id": "613b060ee50b49417342cfa70b36f77d112dcc58",
"index": 2951,
"step-1": "<mask token>\n\n\ndef get_data():\n df = pd.read_csv('./data/filteredCorpus.csv')\n df_filt = df[df['outcome'] == True]\n df_filt = df_filt[df_filt['role'] == 'speaker']\n df_filt = df_filt[df_filt['source'] == 'human']\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1]\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for\n u in utt_filt]\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(\n str.maketrans('', '', string.punctuation)))\n df_final = df.loc[df['contents'].isin(utt_final)]\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n return df_final, le\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors'])\n print('length colors and contents', len(df['colors']), len(df['contents']))\n print('set colors and contents', len(set(df['colors'])), len(set(df[\n 'contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents'])\n meaning_mat = np.array(meaning_mat)\n for i in range(len(meaning_mat[:, 0])):\n if sum(meaning_mat[i, :]) == 0:\n print('meaning mat is 0 for this row: ', i)\n for j in range(len(meaning_mat[0, :])):\n if meaning_mat[i, j] == 0:\n print('meaning mat is 0 at: ', i, j, ' !!!')\n return meaning_mat, colors_le\n\n\n<mask token>\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output\n\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32).to(device)\n output.append([color, utt])\n return output\n",
"step-2": "<mask token>\n\n\ndef get_data():\n df = pd.read_csv('./data/filteredCorpus.csv')\n df_filt = df[df['outcome'] == True]\n df_filt = df_filt[df_filt['role'] == 'speaker']\n df_filt = df_filt[df_filt['source'] == 'human']\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1]\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for\n u in utt_filt]\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(\n str.maketrans('', '', string.punctuation)))\n df_final = df.loc[df['contents'].isin(utt_final)]\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n return df_final, le\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors'])\n print('length colors and contents', len(df['colors']), len(df['contents']))\n print('set colors and contents', len(set(df['colors'])), len(set(df[\n 'contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents'])\n meaning_mat = np.array(meaning_mat)\n for i in range(len(meaning_mat[:, 0])):\n if sum(meaning_mat[i, :]) == 0:\n print('meaning mat is 0 for this row: ', i)\n for j in range(len(meaning_mat[0, :])):\n if meaning_mat[i, j] == 0:\n print('meaning mat is 0 at: ', i, j, ' !!!')\n return meaning_mat, colors_le\n\n\ndef get_pragmatic_listener_testing_data(df):\n output = []\n all_utt = list(set(list(df['contents'])))\n desc_to_idx = {u: i for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output, all_utt, desc_to_idx\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output\n\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32).to(device)\n output.append([color, utt])\n return output\n",
"step-3": "<mask token>\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice = 'cpu'\n\n\ndef get_data():\n df = pd.read_csv('./data/filteredCorpus.csv')\n df_filt = df[df['outcome'] == True]\n df_filt = df_filt[df_filt['role'] == 'speaker']\n df_filt = df_filt[df_filt['source'] == 'human']\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1]\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for\n u in utt_filt]\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(\n str.maketrans('', '', string.punctuation)))\n df_final = df.loc[df['contents'].isin(utt_final)]\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n return df_final, le\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors'])\n print('length colors and contents', len(df['colors']), len(df['contents']))\n print('set colors and contents', len(set(df['colors'])), len(set(df[\n 'contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents'])\n meaning_mat = np.array(meaning_mat)\n for i in range(len(meaning_mat[:, 0])):\n if sum(meaning_mat[i, :]) == 0:\n print('meaning mat is 0 for this row: ', i)\n for j in range(len(meaning_mat[0, :])):\n if meaning_mat[i, j] == 0:\n print('meaning mat is 0 at: ', i, j, ' !!!')\n return meaning_mat, colors_le\n\n\ndef get_pragmatic_listener_testing_data(df):\n output = []\n all_utt = list(set(list(df['contents'])))\n desc_to_idx = {u: i for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output, all_utt, desc_to_idx\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output\n\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32).to(device)\n output.append([color, utt])\n return output\n",
"step-4": "from collections import Counter\nimport pandas as pd\nimport string\nfrom collections import namedtuple, defaultdict\nimport csv\nimport sys\nimport torch\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy.sparse import coo_matrix\nfrom tqdm import tqdm\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice = 'cpu'\n\n\ndef get_data():\n df = pd.read_csv('./data/filteredCorpus.csv')\n df_filt = df[df['outcome'] == True]\n df_filt = df_filt[df_filt['role'] == 'speaker']\n df_filt = df_filt[df_filt['source'] == 'human']\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1]\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for\n u in utt_filt]\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(\n str.maketrans('', '', string.punctuation)))\n df_final = df.loc[df['contents'].isin(utt_final)]\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n return df_final, le\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors'])\n print('length colors and contents', len(df['colors']), len(df['contents']))\n print('set colors and contents', len(set(df['colors'])), len(set(df[\n 'contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents'])\n meaning_mat = np.array(meaning_mat)\n for i in range(len(meaning_mat[:, 0])):\n if sum(meaning_mat[i, :]) == 0:\n print('meaning mat is 0 for this row: ', i)\n for j in range(len(meaning_mat[0, :])):\n if meaning_mat[i, j] == 0:\n print('meaning mat is 0 at: ', i, j, ' !!!')\n return meaning_mat, colors_le\n\n\ndef get_pragmatic_listener_testing_data(df):\n output = []\n all_utt = list(set(list(df['contents'])))\n desc_to_idx = {u: i for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output, all_utt, desc_to_idx\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i, u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']],\n dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']],\n dtype=torch.float32)\n colors = correct, alt1, alt2\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]],\n colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device)\n output.append((correct_idx, colors_shuff, utt))\n return output\n\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']],\n dtype=torch.float32).to(device)\n output.append([color, utt])\n return output\n",
"step-5": "from collections import Counter\nimport pandas as pd\nimport string\nfrom collections import namedtuple, defaultdict\nimport csv\nimport sys\nimport torch\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy.sparse import coo_matrix\nfrom tqdm import tqdm\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice = 'cpu'\n\ndef get_data():\n df = pd.read_csv(\"./data/filteredCorpus.csv\")\n df_filt = df[df['outcome']==True] # use only successful games\n df_filt = df_filt[df_filt['role']=='speaker'] # use speaker utterances\n df_filt = df_filt[df_filt['source']=='human'] # use speaker utterances\n\n # making a list of utterances that we want to use, so we can take these rows from df_filt\n utt = df_filt['contents']\n utt_filt = [u.lower() for u in utt if len(u.split()) == 1] # only use one word utterances\n utt_filt = [u.translate(str.maketrans('', '', string.punctuation)) for u in utt_filt] # remove punctuation\n utt_final = list((Counter(utt_filt) - Counter(set(utt_filt))).keys()) # use utterances that appear more than once\n\n # df_filt = df_filt[df_filt['numCleanWords'] == 1]\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.lower())\n df_filt['contents'] = df_filt['contents'].apply(lambda x: x.translate(str.maketrans('', '', string.punctuation)))# filter to take out punctuation\n df_final = df.loc[df['contents'].isin(utt_final)] # this is the dataset of all the games that we want to use\n\n le = LabelEncoder()\n df_final['contents'] = le.fit_transform(df_final['contents'])\n\n return df_final, le\n\n\n\ndef get_meaning_matrix(df):\n df['colors'] = list(zip(df['clickColH'], df['clickColS'], df['clickColL']))\n df['colors'] = df['colors'].apply(lambda x: str(x))\n colors_le = LabelEncoder()\n df['colors'] = colors_le.fit_transform(df['colors']) # 100 x 100 (test data)\n print(\"length colors and contents\", len(df['colors']), len(df['contents']))\n print(\"set colors and contents\", len(set(df['colors'])), len(set(df['contents'])))\n meaning_mat = pd.crosstab(df['colors'], df['contents']) # rows are colors, columns are utterances\n # row numbers and column numbers correspond to labels from colors_le and le (utterances) from get_data()\n meaning_mat = np.array(meaning_mat) # a num_color x num_utterances matrix\n\n for i in range(len(meaning_mat[:,0])):\n if sum(meaning_mat[i,:]) == 0:\n print(\"meaning mat is 0 for this row: \", i)\n for j in range(len(meaning_mat[0,:])):\n if meaning_mat[i,j] == 0:\n print(\"meaning mat is 0 at: \", i,j,\" !!!\")\n return meaning_mat, colors_le\n\n\n\n\n# Literal listener data function\n\n\n\n\ndef get_pragmatic_listener_testing_data(df):\n output = []\n all_utt = list(set(list(df['contents'])))\n desc_to_idx = {u: i for i,u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)\n colors = (correct, alt1, alt2)\n # idxs = random.choice([0,1,2]) # randomly permute colors\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes\n output.append((correct_idx, colors_shuff, utt))\n return output, all_utt, desc_to_idx # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc\n\n # return all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc\n\n\n\n\ndef get_literal_listener_training_data(df):\n output = []\n all_utt = df['contents']\n idx_to_desc = {i: u for i,u in enumerate(all_utt)}\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents']).to(device)\n correct = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32)\n alt1 = torch.tensor(row[['alt1ColH', 'alt1ColS', 'alt1ColL']], dtype=torch.float32)\n alt2 = torch.tensor(row[['alt2ColH', 'alt2ColS', 'alt2ColL']], dtype=torch.float32)\n colors = (correct, alt1, alt2)\n # idxs = random.choice([0,1,2]) # randomly permute colors\n idxs = np.arange(3)\n np.random.shuffle(idxs)\n colors_shuff = torch.stack([colors[idxs[0]], colors[idxs[1]], colors[idxs[2]]]).to(device)\n correct_idx = torch.tensor(idxs[0], dtype=torch.long).to(device) # index where correct color goes\n output.append((correct_idx, colors_shuff, utt))\n return output#, all_utt, idx_to_desc # [correct_referent_idx, list_of_three_referents, descriptor_idx] desc_to_idx idx_to_desc\n\n# Literal Speaker data function - hi r u ok\n\ndef get_literal_speaker_training_data(df):\n output = []\n for _, row in tqdm(df.iterrows(), total=len(df)):\n utt = torch.tensor(row['contents'], dtype=torch.long).to(device)\n color = torch.tensor(row[['clickColH', 'clickColS', 'clickColL']], dtype=torch.float32).to(device)\n output.append([color, utt])\n\n return output # [referent, utterance_idx]\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import io
import xlsxwriter
import zipfile
from django.conf import settings
from django.http import Http404, HttpResponse
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.views.generic.detail import DetailView
from django.shortcuts import render, get_object_or_404, redirect
from .viewsAlexis import *
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from carga_horaria.models import Periodo, Colegio, Plan
from carga_horaria.formsDani import PeriodoForm, ColegioForm, PlanForm
from django.core.urlresolvers import reverse_lazy, reverse
from guardian.shortcuts import get_objects_for_user
from guardian.shortcuts import assign_perm
from guardian.shortcuts import remove_perm
from wkhtmltopdf.views import PDFTemplateResponse, PDFTemplateView
from .models import Nivel
from .models import Profesor
from .models import Asistente
from .models import Periodo
from .models import Asignacion
from .models import AsignacionExtra
from .models import AsignacionNoAula
from .models import Colegio
from .forms import AsignacionForm
from .forms import AsignacionUpdateForm
from .forms import AsignacionFUAForm
from .forms import AsignacionNoAulaFUAForm
from .forms import AsignacionFUAUpdateForm
from .forms import AsignacionNoAulaFUAUpdateForm
from .forms import AsignacionExtraForm
from .forms import AsignacionExtraUpdateForm
from .forms import AsignacionNoAulaForm
from .forms import AsignacionNoAulaUpdateForm
from .models import AsignacionAsistente
from .forms import AsignacionAsistenteForm
from .forms import AssignPermForm
from .formsDani import PlantillaPlanForm
@login_required
def assign(request):
if not request.user.is_superuser:
raise Http404
year = request.session.get('periodo', 2020)
if request.method == 'POST':
form = AssignPermForm(request.POST, year=year)
if form.is_valid():
user = form.cleaned_data['usuario']
# clear perms first
remove_perm('carga_horaria.change_colegio', user, get_objects_for_user(user, 'carga_horaria.change_colegio').filter(periode=year))
for c in form.cleaned_data['colegios']:
assign_perm('change_colegio', user, c)
form = AssignPermForm(year=year)
return render(request, 'carga_horaria/assign.html', {'form': form})
@login_required
def switch_periodo(request, year=2021):
request.session['periodo'] = year
try:
del request.session['colegio__pk']
del request.session['colegio__nombre']
except KeyError:
pass
return redirect('carga-horaria:home')
@login_required
def switch(request, pk=None):
if pk:
colegio = get_object_or_404(Colegio, pk=pk)
request.session['colegio__pk'] = colegio.pk
request.session['colegio__nombre'] = colegio.nombre
return redirect('carga-horaria:home')
colegios = get_objects_for_user(request.user, "carga_horaria.change_colegio", Colegio.objects.filter(periode=request.session.get('periodo', 2020)))
return render(request, 'carga_horaria/switch.html', {'colegios': colegios})
@login_required
def clear(request):
del request.session['colegio__pk']
del request.session['colegio__nombre']
return redirect('carga-horaria:home')
@login_required
def home(request):
return render(request, 'carga_horaria/home.html')
@login_required
def anexo(request, pk):
p = get_object_or_404(Profesor, pk=pk)
colegio = Colegio.objects.get(pk=request.session['colegio__pk'])
response = PDFTemplateResponse(request=request,
template='carga_horaria/profesor/anexo_profesor.html',
filename='anexo1.pdf',
context={'profesor': p,
'colegio': colegio,
'periodo': request.session.get('periodo', 2020)},
show_content_in_browser=settings.DEBUG)
return response
@login_required
def anexos(request):
profesores = get_for_user(request, Profesor.objects.all(), 'colegio__pk', request.user)
mem_zip = io.BytesIO()
with zipfile.ZipFile(mem_zip, mode="w", compression=zipfile.ZIP_DEFLATED) as zf:
for pp in profesores:
zf.writestr(*pp.generar_anexo_1())
response = HttpResponse(mem_zip.getvalue(), content_type='applicaton/zip')
response['Content-Disposition'] = 'attachment; filename="anexos1.zip"'
return response
@login_required
def anexo_asistente(request, pk):
p = get_object_or_404(Asistente, pk=pk)
colegio = Colegio.objects.get(pk=request.session['colegio__pk'])
response = PDFTemplateResponse(request=request,
template='carga_horaria/asistente/anexo_asistente.html',
filename='anexo1.pdf',
context={'profesor': p,
'colegio': colegio,
'periodo': request.session.get('periodo', 2020)},
show_content_in_browser=settings.DEBUG)
return response
@login_required
def anexos_asistentes(request):
profesores = get_for_user(request, Asistente.objects.all(), 'colegio__pk', request.user)
mem_zip = io.BytesIO()
with zipfile.ZipFile(mem_zip, mode="w", compression=zipfile.ZIP_DEFLATED) as zf:
for pp in profesores:
zf.writestr(*pp.generar_anexo_1())
response = HttpResponse(mem_zip.getvalue(), content_type='applicaton/zip')
response['Content-Disposition'] = 'attachment; filename="anexos1.zip"'
return response
@login_required
def profesores_pdf(request):
profesores = get_for_user(request, Profesor.objects.all(), 'colegio__pk', request.user)
response = PDFTemplateResponse(request=request,
template='carga_horaria/profesor/listado_profesor_pdf.html',
filename='listado_profesores.pdf',
context={'profesores': profesores},
show_content_in_browser=settings.DEBUG)
return response
@login_required
def asistentes_pdf(request):
asistentes = get_for_user(request, Asistente.objects.all(), 'colegio__pk', request.user)
response = PDFTemplateResponse(request=request,
template='carga_horaria/asistente/listado_asistente_pdf.html',
filename='listado_asistentes.pdf',
context={'asistentes': asistentes},
show_content_in_browser=settings.DEBUG)
return response
@login_required
def periodo_pdf(request, pk):
periodo = get_object_or_404(Periodo, pk=pk)
response = PDFTemplateResponse(request=request,
template='carga_horaria/periodo/periodo_pdf.html',
filename='carga_horaria.pdf',
context={'object': periodo},
show_content_in_browser=settings.DEBUG)
return response
@login_required
def plan_refresh(request, pk):
plan = get_object_or_404(Plan, pk=pk)
plan.refresh_asignaturas()
messages.success(request, "Se han actualizado los cursos asociados al plan ID: {}".format(plan.pk))
return redirect('carga-horaria:planes')
# class AnexoView(PDFTemplateView):
# template_name = 'carga_horaria/profesor/anexo_profesor.html'
# filename = 'anexo1.pdf'
# def get(self, request, *args, **kwargs):
# pk = kwargs.pop('pk')
# self.p = get_object_or_404(Profesor, pk=pk)
# self.ax = [{'descripcion': 'Planificación', 'curso': '', 'horas': self.p.horas_planificacion},
# {'descripcion': 'Recreo', 'curso': '', 'horas': self.p.horas_recreo}] + list(self.p.asignacionextra_set.all())
# return super(AnexoView, self).get(request, *args, **kwargs)
# def get_context_data(self, *args, **kwargs):
# ctx = super(AnexoView, self).get_context_data(*args, **kwargs)
# ctx.update({'asignaciones': self.p.asignacion_set.all(),
# 'asignaciones_extra': self.ax,
# 'profesor': self.p})
# anexo = AnexoView.as_view()
"""
Comienzo Crud Periodos
"""
class PeriodoListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):
"""
Listado de periodos
"""
model = Periodo
lookup = 'colegio__pk'
template_name = 'carga_horaria/periodo/listado_periodos.html'
search_fields = ['nombre', 'colegio']
paginate_by = 10
def get_context_data(self, *args, **kwargs):
ctx = super(PeriodoListView, self).get_context_data(*args, **kwargs)
ox = ctx['object_list']
ordering = {str(value): index for index, value in enumerate(Nivel)}
ctx['object_list'] = sorted(ox, key=lambda x: ordering["Nivel."+x.plan.nivel])
# added for convenience, pasted from AsignaturaBaseListView
ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]
ctx['nivel_actual'] = self.request.GET.get('nivel')
return ctx
def get_queryset(self):
qs = super().get_queryset()
nivel = self.request.GET.get('nivel')
if nivel:
qs = qs.filter(plan__nivel=nivel)
return qs
class PeriodoDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Periodo
"""
model = Periodo
template_name = 'carga_horaria/periodo/detalle_periodo.html'
class PeriodoCreateView(LoginRequiredMixin, CreateView):
model = Periodo
form_class = PeriodoForm
template_name = 'carga_horaria/periodo/nuevo_periodo.html'
success_url = reverse_lazy('carga-horaria:periodos')
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(PeriodoCreateView, self).get_form_kwargs(*args, **kwargs)
kwargs.update({'user': self.request.user,
'colegio': self.request.session.get('colegio__pk', None)})
return kwargs
class PeriodoUpdateView(LoginRequiredMixin, UpdateView):
model = Periodo
form_class = PeriodoForm
template_name = 'carga_horaria/periodo/editar_periodo.html'
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(PeriodoUpdateView, self).get_form_kwargs(*args, **kwargs)
kwargs.update({'user': self.request.user,
'colegio': self.request.session.get('colegio__pk', None)})
return kwargs
def get_success_url(self):
return reverse(
'carga-horaria:periodo',
kwargs={
'pk': self.object.pk,
}
)
class PeriodoDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Periodo
success_url = reverse_lazy('carga-horaria:periodos')
template_name = 'carga_horaria/periodo/eliminar_periodo.html'
def test_func(self):
return self.request.user.is_superuser
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
"""
Fin Crud Periodos
"""
"""
Comienzo Crud Colegios
"""
class ColegioListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):
"""
Listado de periodos
"""
model = Colegio
lookup = 'pk'
template_name = 'carga_horaria/colegio/listado_colegios.html'
search_fields = ['nombre', 'jec']
paginate_by = 6
class ColegioDetailView(LoginRequiredMixin, ObjPermissionRequiredMixin, DetailView):
"""
Detalle de Colegio
"""
model = Colegio
permission = 'carga_horaria.change_colegio'
template_name = 'carga_horaria/colegio/detalle_colegio.html'
class ColegioCreateView(LoginRequiredMixin, CreateView):
model = Colegio
form_class = ColegioForm
template_name = 'carga_horaria/colegio/nuevo_colegio.html'
success_url = reverse_lazy('carga-horaria:colegios')
# success_message = u"Nuevo periodo %(nombre)s creado satisfactoriamente."
# error_message = "Revise que todos los campos del formulario hayan sido validados correctamente."
def form_valid(self, form):
colegio = form.save(commit=False)
colegio.periode = self.request.session.get('periodo', 2020)
colegio.save()
return redirect(reverse('carga-horaria:colegios'))
class ColegioUpdateView(LoginRequiredMixin, UpdateView):
model = Colegio
form_class = ColegioForm
template_name = 'carga_horaria/colegio/editar_colegio.html'
def get_success_url(self):
return reverse(
'carga-horaria:colegio',
kwargs={
'pk': self.object.pk,
}
)
class ColegioDeleteView(LoginRequiredMixin, DeleteView):
model = Colegio
success_url = reverse_lazy('carga-horaria:colegios')
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
"""
Fin Crud Colegios
"""
"""
Comienzo Crud Planes
"""
class PlanListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):
"""
Listado de planes
"""
model = Plan
lookup = 'colegio__pk'
template_name = 'carga_horaria/plan/listado_planes.html'
search_fields = ['nombre', 'nivel']
paginate_by = 10
ordering = ['-pk']
class PlanDetailView(LoginRequiredMixin, DetailView):
"""
Detalle de Plan
"""
model = Plan
template_name = 'carga_horaria/plan/detalle_plan.html'
class PlanCreateView(LoginRequiredMixin, CreateView):
model = Plan
form_class = PlanForm
template_name = 'carga_horaria/plan/nuevo_plan.html'
success_url = reverse_lazy('carga-horaria:planes')
# success_message = u"Nuevo periodo %(nombre)s creado satisfactoriamente."
# error_message = "Revise que todos los campos del formulario hayan sido validados correctamente."
def get_form_kwargs(self, *args, **kwargs):
kwargs = super(PlanCreateView, self).get_form_kwargs(*args, **kwargs)
kwargs.update({'user': self.request.user,
'colegio': self.request.session.get('colegio__pk', None)})
return kwargs
@login_required
def crear_desde_plantilla(request):
if request.method == 'POST':
form = PlantillaPlanForm(request.POST)
if form.is_valid():
plantilla = form.cleaned_data['plantilla']
nivel = form.cleaned_data['nivel']
colegio_pk = request.session.get('colegio__pk', None)
if colegio_pk:
colegio = Colegio.objects.get(pk=colegio_pk)
nuevo = Plan.objects.create(nivel=nivel, colegio=colegio)
else:
nuevo = Plan.objects.create(nivel=nivel)
for ab in plantilla.asignaturabase_set.all():
AsignaturaBase.objects.create(nombre=ab.nombre,
plan=nuevo,
horas_jec=ab.horas_jec,
horas_nec=ab.horas_nec)
return redirect('carga-horaria:planes')
else:
form = PlantillaPlanForm()
return render(request, 'carga_horaria/plantilla.html', {'form': form})
class PlanUpdateView(LoginRequiredMixin, UpdateView):
model = Plan
form_class = PlanForm
template_name = 'carga_horaria/plan/editar_plan.html'
def get_success_url(self):
return reverse(
'carga-horaria:plan',
kwargs={
'pk': self.object.pk,
}
)
class PlanDeleteView(LoginRequiredMixin, DeleteView):
model = Plan
success_url = reverse_lazy('carga-horaria:planes')
template_name = 'carga_horaria/plan/eliminar_plan.html'
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
"""
Fin Crud Planes
"""
@login_required
def asignatura_limpiar(request, pk, periodo_pk):
aa = get_object_or_404(Asignatura, pk=pk)
aa.asignacion_set.all().delete()
return redirect(reverse('carga-horaria:periodo', kwargs={'pk': periodo_pk}))
@login_required
def asignatura_dif(request, pk):
pp = get_object_or_404(Periodo, pk=pk)
if request.method == 'POST':
# check first if there are any candidates for merging
nombre = request.POST['asignatura']
colegio_pk = request.session.get('colegio__pk', None)
can_confirm = request.POST.get('can_confirm', False)
if colegio_pk and Asignatura.objects.filter(periodos__colegio=colegio_pk, nombre=nombre) and not can_confirm:
ax = Asignatura.objects.filter(periodos__colegio=colegio_pk, nombre=nombre).distinct()
return render(request, 'carga_horaria/asignatura/asignatura_dif_confirm.html', {'object': pp,
'candidatas': ax})
else:
aa = Asignatura.objects.create(nombre=request.POST['asignatura'],
diferenciada=True,
horas=6)
aa.periodos.add(pp)
return redirect('carga-horaria:periodo', pp.pk)
return render(request, 'carga_horaria/asignatura/asignatura_dif.html', {'object': pp})
@login_required
def asignatura_merge(request, pk, asignatura_pk):
pp = get_object_or_404(Periodo, pk=pk)
aa = get_object_or_404(Asignatura, pk=asignatura_pk)
aa.periodos.add(pp)
return redirect('carga-horaria:periodo', pk)
@login_required
def asignatura_maybe(request, pk):
pp = get_object_or_404(Periodo, pk=pk)
candidatas = Asignatura.objects.filter(periodos__colegio=pp.colegio, combinable=True).exclude(periodos__pk__in=[pk]).distinct()
if candidatas:
return render(request, 'carga_horaria/asignatura/asignatura_maybe.html', {'object': pp, 'candidatas': candidatas})
else:
return redirect('carga-horaria:asignatura__nuevo', pk)
@login_required
def asignar(request, pk, periodo_pk):
aa = get_object_or_404(Asignatura, pk=pk)
if request.method == 'POST':
form = AsignacionForm(request.POST, asignatura=aa, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))
if form.is_valid():
asignacion = form.save(commit=False)
asignacion.asignatura = aa
asignacion.save()
return redirect('carga-horaria:periodo', periodo_pk)
else:
form = AsignacionForm(user=request.user, colegio=request.session.get('colegio__pk', None))
return render(request, 'carga_horaria/asignar.html', {'object': aa,
'form': form})
@login_required
def asignar_fua(request, pk, tipo):
pp = get_object_or_404(Profesor, pk=pk)
tipo_display = dict(Asignacion.TIPO_CHOICES)[int(tipo)]
if request.method == 'POST':
form = AsignacionFUAForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))
if form.is_valid():
asignacion = form.save(commit=False)
asignacion.profesor = pp
asignacion.tipo = tipo
asignacion.save()
return redirect('carga-horaria:profesor', pp.pk)
else:
form = AsignacionFUAForm(user=request.user, colegio=request.session.get('colegio__pk', None))
return render(request, 'carga_horaria/asignar_fua.html', {'object': pp,
'tipo': tipo_display,
'form': form})
@login_required
def asignar_no_aula_fua(request, pk, tipo):
pp = get_object_or_404(Profesor, pk=pk)
tipo_display = dict(AsignacionNoAula.TIPO_CHOICES)[int(tipo)]
if request.method == 'POST':
form = AsignacionNoAulaFUAForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))
if form.is_valid():
asignacion = form.save(commit=False)
asignacion.profesor = pp
asignacion.tipo = tipo
if asignacion.horas == 0:
asignacion.horas = pp.horas_no_aula_disponibles
asignacion.save()
return redirect('carga-horaria:profesor', pp.pk)
else:
form = AsignacionNoAulaFUAForm(user=request.user, colegio=request.session.get('colegio__pk', None))
return render(request, 'carga_horaria/asignar_no_aula_fua.html', {'profesor': pp,
'tipo': tipo_display,
'form': form})
@login_required
def asignar_extra(request, pk):
pp = get_object_or_404(Profesor, pk=pk)
if request.method == 'POST':
form = AsignacionExtraForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))
if form.is_valid():
asignacion = form.save(commit=False)
asignacion.profesor = pp
if asignacion.horas == 0:
asignacion.horas = pp.horas_no_lectivas_disponibles
asignacion.save()
return redirect('carga-horaria:profesor', pp.pk)
else:
form = AsignacionExtraForm(user=request.user, colegio=request.session.get('colegio__pk', None))
return render(request, 'carga_horaria/asignar_extra.html', {'profesor': pp,
'form': form})
@login_required
def asignar_no_aula(request, pk):
pp = get_object_or_404(Profesor, pk=pk)
if request.method == 'POST':
form = AsignacionNoAulaForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))
if form.is_valid():
asignacion = form.save(commit=False)
asignacion.profesor = pp
if asignacion.horas == 0:
asignacion.horas = pp.horas_no_aula_disponibles
asignacion.save()
return redirect('carga-horaria:profesor', pp.pk)
else:
form = AsignacionNoAulaForm(user=request.user, colegio=request.session.get('colegio__pk', None))
return render(request, 'carga_horaria/asignar_no_aula.html', {'profesor': pp,
'form': form})
class AsignacionDeleteView(LoginRequiredMixin, DeleteView):
model = Asignacion
template_name = 'carga_horaria/periodo/eliminar_periodo.html'
def get_success_url(self):
return reverse('carga-horaria:profesor', kwargs={'pk': self.kwargs['profesor_pk']})
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
class AsignacionUpdateView(LoginRequiredMixin, UpdateView):
model = Asignacion
form_class = AsignacionUpdateForm
template_name = 'carga_horaria/asignar_update.html'
def get_success_url(self):
return reverse(
'carga-horaria:profesor',
kwargs={
'pk': self.object.profesor.pk,
}
)
class AsignacionExtraUpdateView(LoginRequiredMixin, UpdateView):
model = AsignacionExtra
form_class = AsignacionExtraUpdateForm
template_name = 'carga_horaria/asignar_extra.html'
def get_context_data(self, *args, **kwargs):
ctx = super(AsignacionExtraUpdateView, self).get_context_data(*args, **kwargs)
ctx['profesor'] = self.object.profesor
return ctx
def get_form_kwargs(self, *args, **kwargs):
pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))
kwargs = super(AsignacionExtraUpdateView, self).get_form_kwargs(*args, **kwargs)
kwargs.update({'profesor': pp,
'user': self.request.user,
'colegio': self.request.session.get('colegio__pk', None)})
return kwargs
def form_valid(self, form):
asignacion = form.save(commit=False)
if asignacion.horas == 0:
asignacion_old = Asignacion.objects.get(pk=asignacion.pk)
asignacion.horas = asignacion.profesor.horas_no_lectivas_disponibles + float(asignacion_old.horas)
asignacion.save()
return redirect(self.get_success_url())
def get_success_url(self):
return reverse(
'carga-horaria:profesor',
kwargs={
'pk': self.object.profesor.pk,
}
)
class AsignacionExtraDeleteView(LoginRequiredMixin, DeleteView):
model = AsignacionExtra
template_name = 'carga_horaria/periodo/eliminar_periodo.html'
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def get_success_url(self):
return reverse(
'carga-horaria:profesor',
kwargs={
'pk': self.object.profesor.pk,
}
)
class AsignacionNoAulaUpdateView(LoginRequiredMixin, UpdateView):
model = AsignacionNoAula
form_class = AsignacionNoAulaUpdateForm
template_name = 'carga_horaria/asignar_no_aula.html'
def form_valid(self, form):
asignacion = form.save(commit=False)
if asignacion.horas == 0:
asignacion_old = AsignacionNoAula.objects.get(pk=asignacion.pk)
asignacion.horas = asignacion.profesor.horas_no_aula_disponibles + asignacion_old.horas
asignacion.save()
return redirect(self.get_success_url())
def get_context_data(self, *args, **kwargs):
ctx = super(AsignacionNoAulaUpdateView, self).get_context_data(*args, **kwargs)
ctx['profesor'] = self.object.profesor
return ctx
def get_form_kwargs(self, *args, **kwargs):
pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))
kwargs = super(AsignacionNoAulaUpdateView, self).get_form_kwargs(*args, **kwargs)
kwargs.update({'profesor': pp,
'user': self.request.user,
'colegio': self.request.session.get('colegio__pk', None)})
return kwargs
def get_success_url(self):
return reverse(
'carga-horaria:profesor',
kwargs={
'pk': self.object.profesor.pk,
}
)
class AsignacionNoAulaDeleteView(LoginRequiredMixin, DeleteView):
model = AsignacionNoAula
template_name = 'carga_horaria/periodo/eliminar_periodo.html'
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def get_success_url(self):
return reverse(
'carga-horaria:profesor',
kwargs={
'pk': self.object.profesor.pk,
}
)
@login_required
def asignar_asistente(request, pk, tipo):
pp = get_object_or_404(Asistente, pk=pk)
tipo_display = dict(AsignacionAsistente.TIPO_CHOICES)[int(tipo)]
if request.method == 'POST':
form = AsignacionAsistenteForm(request.POST, asistente=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))
if form.is_valid():
asignacion = form.save(commit=False)
asignacion.asistente = pp
asignacion.tipo = tipo
# if asignacion.horas == 0:
# asignacion.horas = pp.horas_no_lectivas_disponibles
asignacion.save()
return redirect('carga-horaria:asistente', pp.pk)
else:
form = AsignacionAsistenteForm(user=request.user, colegio=request.session.get('colegio__pk', None))
return render(request, 'carga_horaria/asignar_asistente.html', {'asistente': pp,
'form': form})
class AsignacionAsistenteDeleteView(LoginRequiredMixin, DeleteView):
model = AsignacionAsistente
template_name = 'carga_horaria/periodo/eliminar_periodo.html'
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def get_success_url(self):
return reverse(
'carga-horaria:asistente',
kwargs={
'pk': self.object.asistente.pk,
}
)
@login_required
def profesores_info(request):
output = io.BytesIO()
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet('Profesores')
# Some data we want to write to the worksheet.
qs = get_for_user(request, Profesor.objects.all(), 'colegio__pk', request.user)
# Start from the first cell. Rows and columns are zero indexed.
row = 0
col = 0
# Iterate over the data and write it out row by row.
worksheet.write(0, 0, 'RUT')
worksheet.write(0, 1, 'Nombre Docente')
worksheet.write(0, 2, 'Dirección Docente')
worksheet.write(0, 3, 'Comuna')
worksheet.write(0, 4, 'Nacionalidad')
worksheet.write(0, 5, 'Teléfono')
worksheet.write(0, 6, 'Email personal')
worksheet.write(0, 7, 'Email institucional')
worksheet.write(0, 8, 'Estado civil')
worksheet.write(0, 9, 'Discapacidad')
worksheet.write(0, 10, 'Recibe pensión')
worksheet.write(0, 11, 'Adventista')
worksheet.write(0, 12, 'Fecha de Nacimiento')
worksheet.write(0, 13, 'Tipo de Contrato')
worksheet.write(0, 14, 'Cargo')
worksheet.write(0, 15, 'Fecha de Inicio Contrato')
worksheet.write(0, 16, 'Horas Contrato Propuestas')
worksheet.write(0, 17, 'Horas SBVG')
worksheet.write(0, 18, 'Horas SEP')
worksheet.write(0, 19, 'Horas PIE')
worksheet.write(0, 20, 'Horas Indefinidas Actual')
worksheet.write(0, 21, 'Horas Plazo Fijo Actual')
worksheet.write(0, 22, 'Horas Jornada Semanal')
worksheet.write(0, 23, 'Asignaciones Aula Plan')
worksheet.write(0, 24, 'Horas Aula PIE')
worksheet.write(0, 25, 'Horas Aula SEP')
worksheet.write(0, 26, 'Horas Aula Sostenedor')
worksheet.write(0, 27, 'Horas disponibles')
worksheet.write(0, 28, 'Asignación No Lectiva')
worksheet.write(0, 29, 'Horas no lectivas disponibles')
worksheet.write(0, 30, 'Asignación No Aula Normal')
worksheet.write(0, 31, 'Asignación No Aula PIE')
worksheet.write(0, 32, 'Asignación No Aula SEP')
worksheet.write(0, 33, 'Especialidad')
worksheet.write(0, 34, 'Profesor Jefe')
worksheet.write(0, 35, 'Fundación que lo contrata')
worksheet.write(0, 36, 'Colegio')
row = 1
for pp in qs:
worksheet.write(row, 0, pp.rut)
worksheet.write(row, 1, pp.nombre)
worksheet.write(row, 2, pp.direccion)
worksheet.write(row, 3, pp.persona.comuna)
worksheet.write(row, 4, pp.persona.nacionalidad)
worksheet.write(row, 5, pp.persona.telefono)
worksheet.write(row, 6, pp.persona.email_personal)
worksheet.write(row, 7, pp.persona.email_institucional)
worksheet.write(row, 8, pp.persona.get_estado_civil_display())
worksheet.write(row, 9, 'Sí' if pp.persona.discapacidad else 'No')
worksheet.write(row, 10, 'Sí' if pp.persona.recibe_pension else 'No')
worksheet.write(row, 11, 'Sí' if pp.persona.adventista else 'No')
worksheet.write(row, 12, pp.persona.fecha_nacimiento)
worksheet.write(row, 13, pp.get_tipo_display())
worksheet.write(row, 14, pp.get_cargo_display())
worksheet.write(row, 15, pp.fecha_inicio)
worksheet.write(row, 16, pp.horas_semanales_total)
worksheet.write(row, 17, pp.horas_sbvg_total)
worksheet.write(row, 18, pp.total_sep)
worksheet.write(row, 19, pp.total_pie)
worksheet.write(row, 20, pp.horas_indefinidas)
worksheet.write(row, 21, pp.horas_plazo_fijo)
worksheet.write(row, 22, pp.horas_semanales)
worksheet.write(row, 23, pp.horas_asignadas_plan)
worksheet.write(row, 24, pp.horas_asignadas_pie)
worksheet.write(row, 25, pp.horas_asignadas_sep)
worksheet.write(row, 26, pp.horas_asignadas_sostenedor)
worksheet.write(row, 27, pp.horas_disponibles)
worksheet.write(row, 28, pp.horas_no_lectivas_asignadas_anexo)
worksheet.write(row, 29, pp.horas_no_lectivas_disponibles)
worksheet.write(row, 30, pp.horas_no_aula_asignadas_ordinaria)
worksheet.write(row, 31, pp.horas_no_aula_asignadas_pie)
worksheet.write(row, 32, pp.horas_no_aula_asignadas_sep)
worksheet.write(row, 33, str(pp.especialidad))
worksheet.write(row, 34, pp.jefatura if pp.es_profesor_jefe else 'No')
worksheet.write(row, 35, str(pp.fundacion))
worksheet.write(row, 36, str(pp.colegio))
row += 1
workbook.close()
output.seek(0)
# Set up the Http response.
filename = 'profesores-info.xlsx'
response = HttpResponse(
output,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
@login_required
def asistentes_info(request):
output = io.BytesIO()
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet('Asistentes')
# Some data we want to write to the worksheet.
qs = get_for_user(request, Asistente.objects.all(), 'colegio__pk', request.user)
# Start from the first cell. Rows and columns are zero indexed.
row = 0
col = 0
# Iterate over the data and write it out row by row.
worksheet.write(0, 0, 'RUT')
worksheet.write(0, 1, 'Nombre Asistente')
worksheet.write(0, 2, 'Fecha de Nacimiento')
worksheet.write(0, 3, 'Nacionalidad')
worksheet.write(0, 4, 'Dirección')
worksheet.write(0, 5, 'Comuna')
worksheet.write(0, 6, 'Teléfono')
worksheet.write(0, 7, 'Email personal')
worksheet.write(0, 8, 'Email institucional')
worksheet.write(0, 9, 'Estado civil')
worksheet.write(0, 10, 'Adventista')
worksheet.write(0, 11, 'Discapacidad')
worksheet.write(0, 12, 'Recibe pensión')
worksheet.write(0, 13, 'Fecha de Inicio Contrato')
worksheet.write(0, 14, 'Horas Contrato')
worksheet.write(0, 15, 'Función')
worksheet.write(0, 16, 'SEP')
worksheet.write(0, 17, 'PIE')
worksheet.write(0, 18, 'Sostenedor')
worksheet.write(0, 19, 'Fundación que lo contrata')
worksheet.write(0, 20, 'Colegio')
row = 1
for pp in qs:
worksheet.write(row, 0, pp.rut)
worksheet.write(row, 1, pp.nombre)
worksheet.write(row, 2, pp.persona.fecha_nacimiento)
worksheet.write(row, 3, pp.persona.nacionalidad)
worksheet.write(row, 4, pp.persona.direccion)
worksheet.write(row, 5, pp.persona.comuna)
worksheet.write(row, 6, pp.persona.telefono)
worksheet.write(row, 7, pp.persona.email_personal)
worksheet.write(row, 8, pp.persona.email_institucional)
worksheet.write(row, 9, pp.persona.get_estado_civil_display())
worksheet.write(row, 10, 'Sí' if pp.persona.adventista else 'No')
worksheet.write(row, 11, 'Sí' if pp.persona.discapacidad else 'No')
worksheet.write(row, 12, 'Sí' if pp.persona.recibe_pension else 'No')
worksheet.write(row, 13, pp.fecha_inicio)
worksheet.write(row, 14, pp.horas)
worksheet.write(row, 15, pp.funcion)
worksheet.write(row, 16, pp.horas_sep)
worksheet.write(row, 17, pp.horas_pie)
worksheet.write(row, 18, pp.horas_sostenedor)
worksheet.write(row, 19, str(pp.fundacion))
worksheet.write(row, 20, str(pp.colegio))
row += 1
workbook.close()
output.seek(0)
# Set up the Http response.
filename = 'asistentes-info.xlsx'
response = HttpResponse(
output,
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
)
response['Content-Disposition'] = 'attachment; filename=%s' % filename
return response
|
normal
|
{
"blob_id": "a9ebd323d4b91c7e6a7e7179329ae80e22774927",
"index": 4843,
"step-1": "<mask token>\n\n\nclass PeriodoUpdateView(LoginRequiredMixin, UpdateView):\n <mask token>\n <mask token>\n <mask token>\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PeriodoUpdateView, self).get_form_kwargs(*args, **kwargs\n )\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n <mask token>\n\n\nclass PeriodoDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Periodo\n success_url = reverse_lazy('carga-horaria:periodos')\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def test_func(self):\n return self.request.user.is_superuser\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass ColegioListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de periodos\n \"\"\"\n model = Colegio\n lookup = 'pk'\n template_name = 'carga_horaria/colegio/listado_colegios.html'\n search_fields = ['nombre', 'jec']\n paginate_by = 6\n\n\nclass ColegioDetailView(LoginRequiredMixin, ObjPermissionRequiredMixin,\n DetailView):\n \"\"\"\n Detalle de Colegio\n \"\"\"\n model = Colegio\n permission = 'carga_horaria.change_colegio'\n template_name = 'carga_horaria/colegio/detalle_colegio.html'\n\n\nclass ColegioCreateView(LoginRequiredMixin, CreateView):\n model = Colegio\n form_class = ColegioForm\n template_name = 'carga_horaria/colegio/nuevo_colegio.html'\n success_url = reverse_lazy('carga-horaria:colegios')\n\n def form_valid(self, form):\n colegio = form.save(commit=False)\n colegio.periode = self.request.session.get('periodo', 2020)\n colegio.save()\n return redirect(reverse('carga-horaria:colegios'))\n\n\nclass ColegioUpdateView(LoginRequiredMixin, UpdateView):\n model = Colegio\n form_class = ColegioForm\n template_name = 'carga_horaria/colegio/editar_colegio.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:colegio', kwargs={'pk': self.object.pk})\n\n\nclass ColegioDeleteView(LoginRequiredMixin, DeleteView):\n model = Colegio\n success_url = reverse_lazy('carga-horaria:colegios')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass PlanListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de planes\n \"\"\"\n model = Plan\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/plan/listado_planes.html'\n search_fields = ['nombre', 'nivel']\n paginate_by = 10\n ordering = ['-pk']\n\n\nclass PlanDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Plan\n \"\"\"\n model = Plan\n template_name = 'carga_horaria/plan/detalle_plan.html'\n\n\nclass PlanCreateView(LoginRequiredMixin, CreateView):\n model = Plan\n form_class = PlanForm\n template_name = 'carga_horaria/plan/nuevo_plan.html'\n success_url = reverse_lazy('carga-horaria:planes')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PlanCreateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n\n<mask token>\n\n\nclass PlanUpdateView(LoginRequiredMixin, UpdateView):\n model = Plan\n form_class = PlanForm\n template_name = 'carga_horaria/plan/editar_plan.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:plan', kwargs={'pk': self.object.pk})\n\n\nclass PlanDeleteView(LoginRequiredMixin, DeleteView):\n model = Plan\n success_url = reverse_lazy('carga-horaria:planes')\n template_name = 'carga_horaria/plan/eliminar_plan.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass AsignacionDeleteView(LoginRequiredMixin, DeleteView):\n model = Asignacion\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.kwargs[\n 'profesor_pk']})\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\nclass AsignacionUpdateView(LoginRequiredMixin, UpdateView):\n model = Asignacion\n form_class = AsignacionUpdateForm\n template_name = 'carga_horaria/asignar_update.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionExtraUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignacionExtra\n form_class = AsignacionExtraUpdateForm\n template_name = 'carga_horaria/asignar_extra.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(AsignacionExtraUpdateView, self).get_context_data(*args,\n **kwargs)\n ctx['profesor'] = self.object.profesor\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))\n kwargs = super(AsignacionExtraUpdateView, self).get_form_kwargs(*\n args, **kwargs)\n kwargs.update({'profesor': pp, 'user': self.request.user, 'colegio':\n self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def form_valid(self, form):\n asignacion = form.save(commit=False)\n if asignacion.horas == 0:\n asignacion_old = Asignacion.objects.get(pk=asignacion.pk)\n asignacion.horas = (asignacion.profesor.\n horas_no_lectivas_disponibles + float(asignacion_old.horas))\n asignacion.save()\n return redirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionExtraDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionExtra\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionNoAulaUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignacionNoAula\n form_class = AsignacionNoAulaUpdateForm\n template_name = 'carga_horaria/asignar_no_aula.html'\n\n def form_valid(self, form):\n asignacion = form.save(commit=False)\n if asignacion.horas == 0:\n asignacion_old = AsignacionNoAula.objects.get(pk=asignacion.pk)\n asignacion.horas = (asignacion.profesor.\n horas_no_aula_disponibles + asignacion_old.horas)\n asignacion.save()\n return redirect(self.get_success_url())\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(AsignacionNoAulaUpdateView, self).get_context_data(*\n args, **kwargs)\n ctx['profesor'] = self.object.profesor\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))\n kwargs = super(AsignacionNoAulaUpdateView, self).get_form_kwargs(*\n args, **kwargs)\n kwargs.update({'profesor': pp, 'user': self.request.user, 'colegio':\n self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionNoAulaDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionNoAula\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\n<mask token>\n\n\nclass AsignacionAsistenteDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionAsistente\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:asistente', kwargs={'pk': self.object\n .asistente.pk})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PeriodoListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass PeriodoDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Periodo\n \"\"\"\n model = Periodo\n template_name = 'carga_horaria/periodo/detalle_periodo.html'\n\n\nclass PeriodoCreateView(LoginRequiredMixin, CreateView):\n model = Periodo\n form_class = PeriodoForm\n template_name = 'carga_horaria/periodo/nuevo_periodo.html'\n success_url = reverse_lazy('carga-horaria:periodos')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PeriodoCreateView, self).get_form_kwargs(*args, **kwargs\n )\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n\nclass PeriodoUpdateView(LoginRequiredMixin, UpdateView):\n model = Periodo\n form_class = PeriodoForm\n template_name = 'carga_horaria/periodo/editar_periodo.html'\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PeriodoUpdateView, self).get_form_kwargs(*args, **kwargs\n )\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.object.pk})\n\n\nclass PeriodoDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Periodo\n success_url = reverse_lazy('carga-horaria:periodos')\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def test_func(self):\n return self.request.user.is_superuser\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass ColegioListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de periodos\n \"\"\"\n model = Colegio\n lookup = 'pk'\n template_name = 'carga_horaria/colegio/listado_colegios.html'\n search_fields = ['nombre', 'jec']\n paginate_by = 6\n\n\nclass ColegioDetailView(LoginRequiredMixin, ObjPermissionRequiredMixin,\n DetailView):\n \"\"\"\n Detalle de Colegio\n \"\"\"\n model = Colegio\n permission = 'carga_horaria.change_colegio'\n template_name = 'carga_horaria/colegio/detalle_colegio.html'\n\n\nclass ColegioCreateView(LoginRequiredMixin, CreateView):\n model = Colegio\n form_class = ColegioForm\n template_name = 'carga_horaria/colegio/nuevo_colegio.html'\n success_url = reverse_lazy('carga-horaria:colegios')\n\n def form_valid(self, form):\n colegio = form.save(commit=False)\n colegio.periode = self.request.session.get('periodo', 2020)\n colegio.save()\n return redirect(reverse('carga-horaria:colegios'))\n\n\nclass ColegioUpdateView(LoginRequiredMixin, UpdateView):\n model = Colegio\n form_class = ColegioForm\n template_name = 'carga_horaria/colegio/editar_colegio.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:colegio', kwargs={'pk': self.object.pk})\n\n\nclass ColegioDeleteView(LoginRequiredMixin, DeleteView):\n model = Colegio\n success_url = reverse_lazy('carga-horaria:colegios')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass PlanListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de planes\n \"\"\"\n model = Plan\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/plan/listado_planes.html'\n search_fields = ['nombre', 'nivel']\n paginate_by = 10\n ordering = ['-pk']\n\n\nclass PlanDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Plan\n \"\"\"\n model = Plan\n template_name = 'carga_horaria/plan/detalle_plan.html'\n\n\nclass PlanCreateView(LoginRequiredMixin, CreateView):\n model = Plan\n form_class = PlanForm\n template_name = 'carga_horaria/plan/nuevo_plan.html'\n success_url = reverse_lazy('carga-horaria:planes')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PlanCreateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n\n<mask token>\n\n\nclass PlanUpdateView(LoginRequiredMixin, UpdateView):\n model = Plan\n form_class = PlanForm\n template_name = 'carga_horaria/plan/editar_plan.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:plan', kwargs={'pk': self.object.pk})\n\n\nclass PlanDeleteView(LoginRequiredMixin, DeleteView):\n model = Plan\n success_url = reverse_lazy('carga-horaria:planes')\n template_name = 'carga_horaria/plan/eliminar_plan.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass AsignacionDeleteView(LoginRequiredMixin, DeleteView):\n model = Asignacion\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.kwargs[\n 'profesor_pk']})\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\nclass AsignacionUpdateView(LoginRequiredMixin, UpdateView):\n model = Asignacion\n form_class = AsignacionUpdateForm\n template_name = 'carga_horaria/asignar_update.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionExtraUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignacionExtra\n form_class = AsignacionExtraUpdateForm\n template_name = 'carga_horaria/asignar_extra.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(AsignacionExtraUpdateView, self).get_context_data(*args,\n **kwargs)\n ctx['profesor'] = self.object.profesor\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))\n kwargs = super(AsignacionExtraUpdateView, self).get_form_kwargs(*\n args, **kwargs)\n kwargs.update({'profesor': pp, 'user': self.request.user, 'colegio':\n self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def form_valid(self, form):\n asignacion = form.save(commit=False)\n if asignacion.horas == 0:\n asignacion_old = Asignacion.objects.get(pk=asignacion.pk)\n asignacion.horas = (asignacion.profesor.\n horas_no_lectivas_disponibles + float(asignacion_old.horas))\n asignacion.save()\n return redirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionExtraDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionExtra\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionNoAulaUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignacionNoAula\n form_class = AsignacionNoAulaUpdateForm\n template_name = 'carga_horaria/asignar_no_aula.html'\n\n def form_valid(self, form):\n asignacion = form.save(commit=False)\n if asignacion.horas == 0:\n asignacion_old = AsignacionNoAula.objects.get(pk=asignacion.pk)\n asignacion.horas = (asignacion.profesor.\n horas_no_aula_disponibles + asignacion_old.horas)\n asignacion.save()\n return redirect(self.get_success_url())\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(AsignacionNoAulaUpdateView, self).get_context_data(*\n args, **kwargs)\n ctx['profesor'] = self.object.profesor\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))\n kwargs = super(AsignacionNoAulaUpdateView, self).get_form_kwargs(*\n args, **kwargs)\n kwargs.update({'profesor': pp, 'user': self.request.user, 'colegio':\n self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionNoAulaDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionNoAula\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\n<mask token>\n\n\nclass AsignacionAsistenteDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionAsistente\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:asistente', kwargs={'pk': self.object\n .asistente.pk})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@login_required\ndef assign(request):\n if not request.user.is_superuser:\n raise Http404\n year = request.session.get('periodo', 2020)\n if request.method == 'POST':\n form = AssignPermForm(request.POST, year=year)\n if form.is_valid():\n user = form.cleaned_data['usuario']\n remove_perm('carga_horaria.change_colegio', user,\n get_objects_for_user(user, 'carga_horaria.change_colegio').\n filter(periode=year))\n for c in form.cleaned_data['colegios']:\n assign_perm('change_colegio', user, c)\n form = AssignPermForm(year=year)\n return render(request, 'carga_horaria/assign.html', {'form': form})\n\n\n<mask token>\n\n\n@login_required\ndef switch(request, pk=None):\n if pk:\n colegio = get_object_or_404(Colegio, pk=pk)\n request.session['colegio__pk'] = colegio.pk\n request.session['colegio__nombre'] = colegio.nombre\n return redirect('carga-horaria:home')\n colegios = get_objects_for_user(request.user,\n 'carga_horaria.change_colegio', Colegio.objects.filter(periode=\n request.session.get('periodo', 2020)))\n return render(request, 'carga_horaria/switch.html', {'colegios': colegios})\n\n\n@login_required\ndef clear(request):\n del request.session['colegio__pk']\n del request.session['colegio__nombre']\n return redirect('carga-horaria:home')\n\n\n<mask token>\n\n\n@login_required\ndef anexo(request, pk):\n p = get_object_or_404(Profesor, pk=pk)\n colegio = Colegio.objects.get(pk=request.session['colegio__pk'])\n response = PDFTemplateResponse(request=request, template=\n 'carga_horaria/profesor/anexo_profesor.html', filename='anexo1.pdf',\n context={'profesor': p, 'colegio': colegio, 'periodo': request.\n session.get('periodo', 2020)}, show_content_in_browser=settings.DEBUG)\n return response\n\n\n<mask token>\n\n\n@login_required\ndef anexo_asistente(request, pk):\n p = get_object_or_404(Asistente, pk=pk)\n colegio = Colegio.objects.get(pk=request.session['colegio__pk'])\n response = PDFTemplateResponse(request=request, template=\n 'carga_horaria/asistente/anexo_asistente.html', filename=\n 'anexo1.pdf', context={'profesor': p, 'colegio': colegio, 'periodo':\n request.session.get('periodo', 2020)}, show_content_in_browser=\n settings.DEBUG)\n return response\n\n\n<mask token>\n\n\n@login_required\ndef periodo_pdf(request, pk):\n periodo = get_object_or_404(Periodo, pk=pk)\n response = PDFTemplateResponse(request=request, template=\n 'carga_horaria/periodo/periodo_pdf.html', filename=\n 'carga_horaria.pdf', context={'object': periodo},\n show_content_in_browser=settings.DEBUG)\n return response\n\n\n@login_required\ndef plan_refresh(request, pk):\n plan = get_object_or_404(Plan, pk=pk)\n plan.refresh_asignaturas()\n messages.success(request,\n 'Se han actualizado los cursos asociados al plan ID: {}'.format(\n plan.pk))\n return redirect('carga-horaria:planes')\n\n\n<mask token>\n\n\nclass PeriodoListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de periodos\n \"\"\"\n model = Periodo\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/periodo/listado_periodos.html'\n search_fields = ['nombre', 'colegio']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(PeriodoListView, self).get_context_data(*args, **kwargs)\n ox = ctx['object_list']\n ordering = {str(value): index for index, value in enumerate(Nivel)}\n ctx['object_list'] = sorted(ox, key=lambda x: ordering['Nivel.' + x\n .plan.nivel])\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(plan__nivel=nivel)\n return qs\n\n\nclass PeriodoDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Periodo\n \"\"\"\n model = Periodo\n template_name = 'carga_horaria/periodo/detalle_periodo.html'\n\n\nclass PeriodoCreateView(LoginRequiredMixin, CreateView):\n model = Periodo\n form_class = PeriodoForm\n template_name = 'carga_horaria/periodo/nuevo_periodo.html'\n success_url = reverse_lazy('carga-horaria:periodos')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PeriodoCreateView, self).get_form_kwargs(*args, **kwargs\n )\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n\nclass PeriodoUpdateView(LoginRequiredMixin, UpdateView):\n model = Periodo\n form_class = PeriodoForm\n template_name = 'carga_horaria/periodo/editar_periodo.html'\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PeriodoUpdateView, self).get_form_kwargs(*args, **kwargs\n )\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.object.pk})\n\n\nclass PeriodoDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Periodo\n success_url = reverse_lazy('carga-horaria:periodos')\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def test_func(self):\n return self.request.user.is_superuser\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass ColegioListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de periodos\n \"\"\"\n model = Colegio\n lookup = 'pk'\n template_name = 'carga_horaria/colegio/listado_colegios.html'\n search_fields = ['nombre', 'jec']\n paginate_by = 6\n\n\nclass ColegioDetailView(LoginRequiredMixin, ObjPermissionRequiredMixin,\n DetailView):\n \"\"\"\n Detalle de Colegio\n \"\"\"\n model = Colegio\n permission = 'carga_horaria.change_colegio'\n template_name = 'carga_horaria/colegio/detalle_colegio.html'\n\n\nclass ColegioCreateView(LoginRequiredMixin, CreateView):\n model = Colegio\n form_class = ColegioForm\n template_name = 'carga_horaria/colegio/nuevo_colegio.html'\n success_url = reverse_lazy('carga-horaria:colegios')\n\n def form_valid(self, form):\n colegio = form.save(commit=False)\n colegio.periode = self.request.session.get('periodo', 2020)\n colegio.save()\n return redirect(reverse('carga-horaria:colegios'))\n\n\nclass ColegioUpdateView(LoginRequiredMixin, UpdateView):\n model = Colegio\n form_class = ColegioForm\n template_name = 'carga_horaria/colegio/editar_colegio.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:colegio', kwargs={'pk': self.object.pk})\n\n\nclass ColegioDeleteView(LoginRequiredMixin, DeleteView):\n model = Colegio\n success_url = reverse_lazy('carga-horaria:colegios')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass PlanListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de planes\n \"\"\"\n model = Plan\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/plan/listado_planes.html'\n search_fields = ['nombre', 'nivel']\n paginate_by = 10\n ordering = ['-pk']\n\n\nclass PlanDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Plan\n \"\"\"\n model = Plan\n template_name = 'carga_horaria/plan/detalle_plan.html'\n\n\nclass PlanCreateView(LoginRequiredMixin, CreateView):\n model = Plan\n form_class = PlanForm\n template_name = 'carga_horaria/plan/nuevo_plan.html'\n success_url = reverse_lazy('carga-horaria:planes')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PlanCreateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n\n@login_required\ndef crear_desde_plantilla(request):\n if request.method == 'POST':\n form = PlantillaPlanForm(request.POST)\n if form.is_valid():\n plantilla = form.cleaned_data['plantilla']\n nivel = form.cleaned_data['nivel']\n colegio_pk = request.session.get('colegio__pk', None)\n if colegio_pk:\n colegio = Colegio.objects.get(pk=colegio_pk)\n nuevo = Plan.objects.create(nivel=nivel, colegio=colegio)\n else:\n nuevo = Plan.objects.create(nivel=nivel)\n for ab in plantilla.asignaturabase_set.all():\n AsignaturaBase.objects.create(nombre=ab.nombre, plan=nuevo,\n horas_jec=ab.horas_jec, horas_nec=ab.horas_nec)\n return redirect('carga-horaria:planes')\n else:\n form = PlantillaPlanForm()\n return render(request, 'carga_horaria/plantilla.html', {'form': form})\n\n\nclass PlanUpdateView(LoginRequiredMixin, UpdateView):\n model = Plan\n form_class = PlanForm\n template_name = 'carga_horaria/plan/editar_plan.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:plan', kwargs={'pk': self.object.pk})\n\n\nclass PlanDeleteView(LoginRequiredMixin, DeleteView):\n model = Plan\n success_url = reverse_lazy('carga-horaria:planes')\n template_name = 'carga_horaria/plan/eliminar_plan.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\n@login_required\ndef asignatura_limpiar(request, pk, periodo_pk):\n aa = get_object_or_404(Asignatura, pk=pk)\n aa.asignacion_set.all().delete()\n return redirect(reverse('carga-horaria:periodo', kwargs={'pk': periodo_pk})\n )\n\n\n<mask token>\n\n\n@login_required\ndef asignatura_maybe(request, pk):\n pp = get_object_or_404(Periodo, pk=pk)\n candidatas = Asignatura.objects.filter(periodos__colegio=pp.colegio,\n combinable=True).exclude(periodos__pk__in=[pk]).distinct()\n if candidatas:\n return render(request,\n 'carga_horaria/asignatura/asignatura_maybe.html', {'object': pp,\n 'candidatas': candidatas})\n else:\n return redirect('carga-horaria:asignatura__nuevo', pk)\n\n\n<mask token>\n\n\n@login_required\ndef asignar_fua(request, pk, tipo):\n pp = get_object_or_404(Profesor, pk=pk)\n tipo_display = dict(Asignacion.TIPO_CHOICES)[int(tipo)]\n if request.method == 'POST':\n form = AsignacionFUAForm(request.POST, profesor=pp, user=request.\n user, colegio=request.session.get('colegio__pk', None), periodo\n =request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n asignacion.tipo = tipo\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionFUAForm(user=request.user, colegio=request.session\n .get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_fua.html', {'object': pp,\n 'tipo': tipo_display, 'form': form})\n\n\n@login_required\ndef asignar_no_aula_fua(request, pk, tipo):\n pp = get_object_or_404(Profesor, pk=pk)\n tipo_display = dict(AsignacionNoAula.TIPO_CHOICES)[int(tipo)]\n if request.method == 'POST':\n form = AsignacionNoAulaFUAForm(request.POST, profesor=pp, user=\n request.user, colegio=request.session.get('colegio__pk', None),\n periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n asignacion.tipo = tipo\n if asignacion.horas == 0:\n asignacion.horas = pp.horas_no_aula_disponibles\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionNoAulaFUAForm(user=request.user, colegio=request.\n session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_no_aula_fua.html', {\n 'profesor': pp, 'tipo': tipo_display, 'form': form})\n\n\n<mask token>\n\n\n@login_required\ndef asignar_no_aula(request, pk):\n pp = get_object_or_404(Profesor, pk=pk)\n if request.method == 'POST':\n form = AsignacionNoAulaForm(request.POST, profesor=pp, user=request\n .user, colegio=request.session.get('colegio__pk', None),\n periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n if asignacion.horas == 0:\n asignacion.horas = pp.horas_no_aula_disponibles\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionNoAulaForm(user=request.user, colegio=request.\n session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_no_aula.html', {\n 'profesor': pp, 'form': form})\n\n\nclass AsignacionDeleteView(LoginRequiredMixin, DeleteView):\n model = Asignacion\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.kwargs[\n 'profesor_pk']})\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\nclass AsignacionUpdateView(LoginRequiredMixin, UpdateView):\n model = Asignacion\n form_class = AsignacionUpdateForm\n template_name = 'carga_horaria/asignar_update.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionExtraUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignacionExtra\n form_class = AsignacionExtraUpdateForm\n template_name = 'carga_horaria/asignar_extra.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(AsignacionExtraUpdateView, self).get_context_data(*args,\n **kwargs)\n ctx['profesor'] = self.object.profesor\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))\n kwargs = super(AsignacionExtraUpdateView, self).get_form_kwargs(*\n args, **kwargs)\n kwargs.update({'profesor': pp, 'user': self.request.user, 'colegio':\n self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def form_valid(self, form):\n asignacion = form.save(commit=False)\n if asignacion.horas == 0:\n asignacion_old = Asignacion.objects.get(pk=asignacion.pk)\n asignacion.horas = (asignacion.profesor.\n horas_no_lectivas_disponibles + float(asignacion_old.horas))\n asignacion.save()\n return redirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionExtraDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionExtra\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionNoAulaUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignacionNoAula\n form_class = AsignacionNoAulaUpdateForm\n template_name = 'carga_horaria/asignar_no_aula.html'\n\n def form_valid(self, form):\n asignacion = form.save(commit=False)\n if asignacion.horas == 0:\n asignacion_old = AsignacionNoAula.objects.get(pk=asignacion.pk)\n asignacion.horas = (asignacion.profesor.\n horas_no_aula_disponibles + asignacion_old.horas)\n asignacion.save()\n return redirect(self.get_success_url())\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(AsignacionNoAulaUpdateView, self).get_context_data(*\n args, **kwargs)\n ctx['profesor'] = self.object.profesor\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))\n kwargs = super(AsignacionNoAulaUpdateView, self).get_form_kwargs(*\n args, **kwargs)\n kwargs.update({'profesor': pp, 'user': self.request.user, 'colegio':\n self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionNoAulaDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionNoAula\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\n@login_required\ndef asignar_asistente(request, pk, tipo):\n pp = get_object_or_404(Asistente, pk=pk)\n tipo_display = dict(AsignacionAsistente.TIPO_CHOICES)[int(tipo)]\n if request.method == 'POST':\n form = AsignacionAsistenteForm(request.POST, asistente=pp, user=\n request.user, colegio=request.session.get('colegio__pk', None),\n periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.asistente = pp\n asignacion.tipo = tipo\n asignacion.save()\n return redirect('carga-horaria:asistente', pp.pk)\n else:\n form = AsignacionAsistenteForm(user=request.user, colegio=request.\n session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_asistente.html', {\n 'asistente': pp, 'form': form})\n\n\nclass AsignacionAsistenteDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionAsistente\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:asistente', kwargs={'pk': self.object\n .asistente.pk})\n\n\n<mask token>\n\n\n@login_required\ndef asistentes_info(request):\n output = io.BytesIO()\n workbook = xlsxwriter.Workbook(output)\n worksheet = workbook.add_worksheet('Asistentes')\n qs = get_for_user(request, Asistente.objects.all(), 'colegio__pk',\n request.user)\n row = 0\n col = 0\n worksheet.write(0, 0, 'RUT')\n worksheet.write(0, 1, 'Nombre Asistente')\n worksheet.write(0, 2, 'Fecha de Nacimiento')\n worksheet.write(0, 3, 'Nacionalidad')\n worksheet.write(0, 4, 'Dirección')\n worksheet.write(0, 5, 'Comuna')\n worksheet.write(0, 6, 'Teléfono')\n worksheet.write(0, 7, 'Email personal')\n worksheet.write(0, 8, 'Email institucional')\n worksheet.write(0, 9, 'Estado civil')\n worksheet.write(0, 10, 'Adventista')\n worksheet.write(0, 11, 'Discapacidad')\n worksheet.write(0, 12, 'Recibe pensión')\n worksheet.write(0, 13, 'Fecha de Inicio Contrato')\n worksheet.write(0, 14, 'Horas Contrato')\n worksheet.write(0, 15, 'Función')\n worksheet.write(0, 16, 'SEP')\n worksheet.write(0, 17, 'PIE')\n worksheet.write(0, 18, 'Sostenedor')\n worksheet.write(0, 19, 'Fundación que lo contrata')\n worksheet.write(0, 20, 'Colegio')\n row = 1\n for pp in qs:\n worksheet.write(row, 0, pp.rut)\n worksheet.write(row, 1, pp.nombre)\n worksheet.write(row, 2, pp.persona.fecha_nacimiento)\n worksheet.write(row, 3, pp.persona.nacionalidad)\n worksheet.write(row, 4, pp.persona.direccion)\n worksheet.write(row, 5, pp.persona.comuna)\n worksheet.write(row, 6, pp.persona.telefono)\n worksheet.write(row, 7, pp.persona.email_personal)\n worksheet.write(row, 8, pp.persona.email_institucional)\n worksheet.write(row, 9, pp.persona.get_estado_civil_display())\n worksheet.write(row, 10, 'Sí' if pp.persona.adventista else 'No')\n worksheet.write(row, 11, 'Sí' if pp.persona.discapacidad else 'No')\n worksheet.write(row, 12, 'Sí' if pp.persona.recibe_pension else 'No')\n worksheet.write(row, 13, pp.fecha_inicio)\n worksheet.write(row, 14, pp.horas)\n worksheet.write(row, 15, pp.funcion)\n worksheet.write(row, 16, pp.horas_sep)\n worksheet.write(row, 17, pp.horas_pie)\n worksheet.write(row, 18, pp.horas_sostenedor)\n worksheet.write(row, 19, str(pp.fundacion))\n worksheet.write(row, 20, str(pp.colegio))\n row += 1\n workbook.close()\n output.seek(0)\n filename = 'asistentes-info.xlsx'\n response = HttpResponse(output, content_type=\n 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n return response\n",
"step-4": "<mask token>\n\n\n@login_required\ndef assign(request):\n if not request.user.is_superuser:\n raise Http404\n year = request.session.get('periodo', 2020)\n if request.method == 'POST':\n form = AssignPermForm(request.POST, year=year)\n if form.is_valid():\n user = form.cleaned_data['usuario']\n remove_perm('carga_horaria.change_colegio', user,\n get_objects_for_user(user, 'carga_horaria.change_colegio').\n filter(periode=year))\n for c in form.cleaned_data['colegios']:\n assign_perm('change_colegio', user, c)\n form = AssignPermForm(year=year)\n return render(request, 'carga_horaria/assign.html', {'form': form})\n\n\n@login_required\ndef switch_periodo(request, year=2021):\n request.session['periodo'] = year\n try:\n del request.session['colegio__pk']\n del request.session['colegio__nombre']\n except KeyError:\n pass\n return redirect('carga-horaria:home')\n\n\n@login_required\ndef switch(request, pk=None):\n if pk:\n colegio = get_object_or_404(Colegio, pk=pk)\n request.session['colegio__pk'] = colegio.pk\n request.session['colegio__nombre'] = colegio.nombre\n return redirect('carga-horaria:home')\n colegios = get_objects_for_user(request.user,\n 'carga_horaria.change_colegio', Colegio.objects.filter(periode=\n request.session.get('periodo', 2020)))\n return render(request, 'carga_horaria/switch.html', {'colegios': colegios})\n\n\n@login_required\ndef clear(request):\n del request.session['colegio__pk']\n del request.session['colegio__nombre']\n return redirect('carga-horaria:home')\n\n\n@login_required\ndef home(request):\n return render(request, 'carga_horaria/home.html')\n\n\n@login_required\ndef anexo(request, pk):\n p = get_object_or_404(Profesor, pk=pk)\n colegio = Colegio.objects.get(pk=request.session['colegio__pk'])\n response = PDFTemplateResponse(request=request, template=\n 'carga_horaria/profesor/anexo_profesor.html', filename='anexo1.pdf',\n context={'profesor': p, 'colegio': colegio, 'periodo': request.\n session.get('periodo', 2020)}, show_content_in_browser=settings.DEBUG)\n return response\n\n\n<mask token>\n\n\n@login_required\ndef anexo_asistente(request, pk):\n p = get_object_or_404(Asistente, pk=pk)\n colegio = Colegio.objects.get(pk=request.session['colegio__pk'])\n response = PDFTemplateResponse(request=request, template=\n 'carga_horaria/asistente/anexo_asistente.html', filename=\n 'anexo1.pdf', context={'profesor': p, 'colegio': colegio, 'periodo':\n request.session.get('periodo', 2020)}, show_content_in_browser=\n settings.DEBUG)\n return response\n\n\n@login_required\ndef anexos_asistentes(request):\n profesores = get_for_user(request, Asistente.objects.all(),\n 'colegio__pk', request.user)\n mem_zip = io.BytesIO()\n with zipfile.ZipFile(mem_zip, mode='w', compression=zipfile.ZIP_DEFLATED\n ) as zf:\n for pp in profesores:\n zf.writestr(*pp.generar_anexo_1())\n response = HttpResponse(mem_zip.getvalue(), content_type='applicaton/zip')\n response['Content-Disposition'] = 'attachment; filename=\"anexos1.zip\"'\n return response\n\n\n<mask token>\n\n\n@login_required\ndef periodo_pdf(request, pk):\n periodo = get_object_or_404(Periodo, pk=pk)\n response = PDFTemplateResponse(request=request, template=\n 'carga_horaria/periodo/periodo_pdf.html', filename=\n 'carga_horaria.pdf', context={'object': periodo},\n show_content_in_browser=settings.DEBUG)\n return response\n\n\n@login_required\ndef plan_refresh(request, pk):\n plan = get_object_or_404(Plan, pk=pk)\n plan.refresh_asignaturas()\n messages.success(request,\n 'Se han actualizado los cursos asociados al plan ID: {}'.format(\n plan.pk))\n return redirect('carga-horaria:planes')\n\n\n<mask token>\n\n\nclass PeriodoListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de periodos\n \"\"\"\n model = Periodo\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/periodo/listado_periodos.html'\n search_fields = ['nombre', 'colegio']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(PeriodoListView, self).get_context_data(*args, **kwargs)\n ox = ctx['object_list']\n ordering = {str(value): index for index, value in enumerate(Nivel)}\n ctx['object_list'] = sorted(ox, key=lambda x: ordering['Nivel.' + x\n .plan.nivel])\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(plan__nivel=nivel)\n return qs\n\n\nclass PeriodoDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Periodo\n \"\"\"\n model = Periodo\n template_name = 'carga_horaria/periodo/detalle_periodo.html'\n\n\nclass PeriodoCreateView(LoginRequiredMixin, CreateView):\n model = Periodo\n form_class = PeriodoForm\n template_name = 'carga_horaria/periodo/nuevo_periodo.html'\n success_url = reverse_lazy('carga-horaria:periodos')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PeriodoCreateView, self).get_form_kwargs(*args, **kwargs\n )\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n\nclass PeriodoUpdateView(LoginRequiredMixin, UpdateView):\n model = Periodo\n form_class = PeriodoForm\n template_name = 'carga_horaria/periodo/editar_periodo.html'\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PeriodoUpdateView, self).get_form_kwargs(*args, **kwargs\n )\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n def get_success_url(self):\n return reverse('carga-horaria:periodo', kwargs={'pk': self.object.pk})\n\n\nclass PeriodoDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Periodo\n success_url = reverse_lazy('carga-horaria:periodos')\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def test_func(self):\n return self.request.user.is_superuser\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass ColegioListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de periodos\n \"\"\"\n model = Colegio\n lookup = 'pk'\n template_name = 'carga_horaria/colegio/listado_colegios.html'\n search_fields = ['nombre', 'jec']\n paginate_by = 6\n\n\nclass ColegioDetailView(LoginRequiredMixin, ObjPermissionRequiredMixin,\n DetailView):\n \"\"\"\n Detalle de Colegio\n \"\"\"\n model = Colegio\n permission = 'carga_horaria.change_colegio'\n template_name = 'carga_horaria/colegio/detalle_colegio.html'\n\n\nclass ColegioCreateView(LoginRequiredMixin, CreateView):\n model = Colegio\n form_class = ColegioForm\n template_name = 'carga_horaria/colegio/nuevo_colegio.html'\n success_url = reverse_lazy('carga-horaria:colegios')\n\n def form_valid(self, form):\n colegio = form.save(commit=False)\n colegio.periode = self.request.session.get('periodo', 2020)\n colegio.save()\n return redirect(reverse('carga-horaria:colegios'))\n\n\nclass ColegioUpdateView(LoginRequiredMixin, UpdateView):\n model = Colegio\n form_class = ColegioForm\n template_name = 'carga_horaria/colegio/editar_colegio.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:colegio', kwargs={'pk': self.object.pk})\n\n\nclass ColegioDeleteView(LoginRequiredMixin, DeleteView):\n model = Colegio\n success_url = reverse_lazy('carga-horaria:colegios')\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\nclass PlanListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de planes\n \"\"\"\n model = Plan\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/plan/listado_planes.html'\n search_fields = ['nombre', 'nivel']\n paginate_by = 10\n ordering = ['-pk']\n\n\nclass PlanDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Plan\n \"\"\"\n model = Plan\n template_name = 'carga_horaria/plan/detalle_plan.html'\n\n\nclass PlanCreateView(LoginRequiredMixin, CreateView):\n model = Plan\n form_class = PlanForm\n template_name = 'carga_horaria/plan/nuevo_plan.html'\n success_url = reverse_lazy('carga-horaria:planes')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PlanCreateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'user': self.request.user, 'colegio': self.request.\n session.get('colegio__pk', None)})\n return kwargs\n\n\n@login_required\ndef crear_desde_plantilla(request):\n if request.method == 'POST':\n form = PlantillaPlanForm(request.POST)\n if form.is_valid():\n plantilla = form.cleaned_data['plantilla']\n nivel = form.cleaned_data['nivel']\n colegio_pk = request.session.get('colegio__pk', None)\n if colegio_pk:\n colegio = Colegio.objects.get(pk=colegio_pk)\n nuevo = Plan.objects.create(nivel=nivel, colegio=colegio)\n else:\n nuevo = Plan.objects.create(nivel=nivel)\n for ab in plantilla.asignaturabase_set.all():\n AsignaturaBase.objects.create(nombre=ab.nombre, plan=nuevo,\n horas_jec=ab.horas_jec, horas_nec=ab.horas_nec)\n return redirect('carga-horaria:planes')\n else:\n form = PlantillaPlanForm()\n return render(request, 'carga_horaria/plantilla.html', {'form': form})\n\n\nclass PlanUpdateView(LoginRequiredMixin, UpdateView):\n model = Plan\n form_class = PlanForm\n template_name = 'carga_horaria/plan/editar_plan.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:plan', kwargs={'pk': self.object.pk})\n\n\nclass PlanDeleteView(LoginRequiredMixin, DeleteView):\n model = Plan\n success_url = reverse_lazy('carga-horaria:planes')\n template_name = 'carga_horaria/plan/eliminar_plan.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n<mask token>\n\n\n@login_required\ndef asignatura_limpiar(request, pk, periodo_pk):\n aa = get_object_or_404(Asignatura, pk=pk)\n aa.asignacion_set.all().delete()\n return redirect(reverse('carga-horaria:periodo', kwargs={'pk': periodo_pk})\n )\n\n\n@login_required\ndef asignatura_dif(request, pk):\n pp = get_object_or_404(Periodo, pk=pk)\n if request.method == 'POST':\n nombre = request.POST['asignatura']\n colegio_pk = request.session.get('colegio__pk', None)\n can_confirm = request.POST.get('can_confirm', False)\n if colegio_pk and Asignatura.objects.filter(periodos__colegio=\n colegio_pk, nombre=nombre) and not can_confirm:\n ax = Asignatura.objects.filter(periodos__colegio=colegio_pk,\n nombre=nombre).distinct()\n return render(request,\n 'carga_horaria/asignatura/asignatura_dif_confirm.html', {\n 'object': pp, 'candidatas': ax})\n else:\n aa = Asignatura.objects.create(nombre=request.POST['asignatura'\n ], diferenciada=True, horas=6)\n aa.periodos.add(pp)\n return redirect('carga-horaria:periodo', pp.pk)\n return render(request, 'carga_horaria/asignatura/asignatura_dif.html',\n {'object': pp})\n\n\n@login_required\ndef asignatura_merge(request, pk, asignatura_pk):\n pp = get_object_or_404(Periodo, pk=pk)\n aa = get_object_or_404(Asignatura, pk=asignatura_pk)\n aa.periodos.add(pp)\n return redirect('carga-horaria:periodo', pk)\n\n\n@login_required\ndef asignatura_maybe(request, pk):\n pp = get_object_or_404(Periodo, pk=pk)\n candidatas = Asignatura.objects.filter(periodos__colegio=pp.colegio,\n combinable=True).exclude(periodos__pk__in=[pk]).distinct()\n if candidatas:\n return render(request,\n 'carga_horaria/asignatura/asignatura_maybe.html', {'object': pp,\n 'candidatas': candidatas})\n else:\n return redirect('carga-horaria:asignatura__nuevo', pk)\n\n\n<mask token>\n\n\n@login_required\ndef asignar_fua(request, pk, tipo):\n pp = get_object_or_404(Profesor, pk=pk)\n tipo_display = dict(Asignacion.TIPO_CHOICES)[int(tipo)]\n if request.method == 'POST':\n form = AsignacionFUAForm(request.POST, profesor=pp, user=request.\n user, colegio=request.session.get('colegio__pk', None), periodo\n =request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n asignacion.tipo = tipo\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionFUAForm(user=request.user, colegio=request.session\n .get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_fua.html', {'object': pp,\n 'tipo': tipo_display, 'form': form})\n\n\n@login_required\ndef asignar_no_aula_fua(request, pk, tipo):\n pp = get_object_or_404(Profesor, pk=pk)\n tipo_display = dict(AsignacionNoAula.TIPO_CHOICES)[int(tipo)]\n if request.method == 'POST':\n form = AsignacionNoAulaFUAForm(request.POST, profesor=pp, user=\n request.user, colegio=request.session.get('colegio__pk', None),\n periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n asignacion.tipo = tipo\n if asignacion.horas == 0:\n asignacion.horas = pp.horas_no_aula_disponibles\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionNoAulaFUAForm(user=request.user, colegio=request.\n session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_no_aula_fua.html', {\n 'profesor': pp, 'tipo': tipo_display, 'form': form})\n\n\n@login_required\ndef asignar_extra(request, pk):\n pp = get_object_or_404(Profesor, pk=pk)\n if request.method == 'POST':\n form = AsignacionExtraForm(request.POST, profesor=pp, user=request.\n user, colegio=request.session.get('colegio__pk', None), periodo\n =request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n if asignacion.horas == 0:\n asignacion.horas = pp.horas_no_lectivas_disponibles\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionExtraForm(user=request.user, colegio=request.\n session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_extra.html', {'profesor':\n pp, 'form': form})\n\n\n@login_required\ndef asignar_no_aula(request, pk):\n pp = get_object_or_404(Profesor, pk=pk)\n if request.method == 'POST':\n form = AsignacionNoAulaForm(request.POST, profesor=pp, user=request\n .user, colegio=request.session.get('colegio__pk', None),\n periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n if asignacion.horas == 0:\n asignacion.horas = pp.horas_no_aula_disponibles\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionNoAulaForm(user=request.user, colegio=request.\n session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_no_aula.html', {\n 'profesor': pp, 'form': form})\n\n\nclass AsignacionDeleteView(LoginRequiredMixin, DeleteView):\n model = Asignacion\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.kwargs[\n 'profesor_pk']})\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\nclass AsignacionUpdateView(LoginRequiredMixin, UpdateView):\n model = Asignacion\n form_class = AsignacionUpdateForm\n template_name = 'carga_horaria/asignar_update.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionExtraUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignacionExtra\n form_class = AsignacionExtraUpdateForm\n template_name = 'carga_horaria/asignar_extra.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(AsignacionExtraUpdateView, self).get_context_data(*args,\n **kwargs)\n ctx['profesor'] = self.object.profesor\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))\n kwargs = super(AsignacionExtraUpdateView, self).get_form_kwargs(*\n args, **kwargs)\n kwargs.update({'profesor': pp, 'user': self.request.user, 'colegio':\n self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def form_valid(self, form):\n asignacion = form.save(commit=False)\n if asignacion.horas == 0:\n asignacion_old = Asignacion.objects.get(pk=asignacion.pk)\n asignacion.horas = (asignacion.profesor.\n horas_no_lectivas_disponibles + float(asignacion_old.horas))\n asignacion.save()\n return redirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionExtraDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionExtra\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionNoAulaUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignacionNoAula\n form_class = AsignacionNoAulaUpdateForm\n template_name = 'carga_horaria/asignar_no_aula.html'\n\n def form_valid(self, form):\n asignacion = form.save(commit=False)\n if asignacion.horas == 0:\n asignacion_old = AsignacionNoAula.objects.get(pk=asignacion.pk)\n asignacion.horas = (asignacion.profesor.\n horas_no_aula_disponibles + asignacion_old.horas)\n asignacion.save()\n return redirect(self.get_success_url())\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(AsignacionNoAulaUpdateView, self).get_context_data(*\n args, **kwargs)\n ctx['profesor'] = self.object.profesor\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))\n kwargs = super(AsignacionNoAulaUpdateView, self).get_form_kwargs(*\n args, **kwargs)\n kwargs.update({'profesor': pp, 'user': self.request.user, 'colegio':\n self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\nclass AsignacionNoAulaDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionNoAula\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.object.\n profesor.pk})\n\n\n@login_required\ndef asignar_asistente(request, pk, tipo):\n pp = get_object_or_404(Asistente, pk=pk)\n tipo_display = dict(AsignacionAsistente.TIPO_CHOICES)[int(tipo)]\n if request.method == 'POST':\n form = AsignacionAsistenteForm(request.POST, asistente=pp, user=\n request.user, colegio=request.session.get('colegio__pk', None),\n periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.asistente = pp\n asignacion.tipo = tipo\n asignacion.save()\n return redirect('carga-horaria:asistente', pp.pk)\n else:\n form = AsignacionAsistenteForm(user=request.user, colegio=request.\n session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_asistente.html', {\n 'asistente': pp, 'form': form})\n\n\nclass AsignacionAsistenteDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionAsistente\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse('carga-horaria:asistente', kwargs={'pk': self.object\n .asistente.pk})\n\n\n<mask token>\n\n\n@login_required\ndef asistentes_info(request):\n output = io.BytesIO()\n workbook = xlsxwriter.Workbook(output)\n worksheet = workbook.add_worksheet('Asistentes')\n qs = get_for_user(request, Asistente.objects.all(), 'colegio__pk',\n request.user)\n row = 0\n col = 0\n worksheet.write(0, 0, 'RUT')\n worksheet.write(0, 1, 'Nombre Asistente')\n worksheet.write(0, 2, 'Fecha de Nacimiento')\n worksheet.write(0, 3, 'Nacionalidad')\n worksheet.write(0, 4, 'Dirección')\n worksheet.write(0, 5, 'Comuna')\n worksheet.write(0, 6, 'Teléfono')\n worksheet.write(0, 7, 'Email personal')\n worksheet.write(0, 8, 'Email institucional')\n worksheet.write(0, 9, 'Estado civil')\n worksheet.write(0, 10, 'Adventista')\n worksheet.write(0, 11, 'Discapacidad')\n worksheet.write(0, 12, 'Recibe pensión')\n worksheet.write(0, 13, 'Fecha de Inicio Contrato')\n worksheet.write(0, 14, 'Horas Contrato')\n worksheet.write(0, 15, 'Función')\n worksheet.write(0, 16, 'SEP')\n worksheet.write(0, 17, 'PIE')\n worksheet.write(0, 18, 'Sostenedor')\n worksheet.write(0, 19, 'Fundación que lo contrata')\n worksheet.write(0, 20, 'Colegio')\n row = 1\n for pp in qs:\n worksheet.write(row, 0, pp.rut)\n worksheet.write(row, 1, pp.nombre)\n worksheet.write(row, 2, pp.persona.fecha_nacimiento)\n worksheet.write(row, 3, pp.persona.nacionalidad)\n worksheet.write(row, 4, pp.persona.direccion)\n worksheet.write(row, 5, pp.persona.comuna)\n worksheet.write(row, 6, pp.persona.telefono)\n worksheet.write(row, 7, pp.persona.email_personal)\n worksheet.write(row, 8, pp.persona.email_institucional)\n worksheet.write(row, 9, pp.persona.get_estado_civil_display())\n worksheet.write(row, 10, 'Sí' if pp.persona.adventista else 'No')\n worksheet.write(row, 11, 'Sí' if pp.persona.discapacidad else 'No')\n worksheet.write(row, 12, 'Sí' if pp.persona.recibe_pension else 'No')\n worksheet.write(row, 13, pp.fecha_inicio)\n worksheet.write(row, 14, pp.horas)\n worksheet.write(row, 15, pp.funcion)\n worksheet.write(row, 16, pp.horas_sep)\n worksheet.write(row, 17, pp.horas_pie)\n worksheet.write(row, 18, pp.horas_sostenedor)\n worksheet.write(row, 19, str(pp.fundacion))\n worksheet.write(row, 20, str(pp.colegio))\n row += 1\n workbook.close()\n output.seek(0)\n filename = 'asistentes-info.xlsx'\n response = HttpResponse(output, content_type=\n 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n return response\n",
"step-5": "import io\nimport xlsxwriter\nimport zipfile\nfrom django.conf import settings\nfrom django.http import Http404, HttpResponse\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.views.generic.detail import DetailView\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom .viewsAlexis import *\nfrom django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom carga_horaria.models import Periodo, Colegio, Plan\nfrom carga_horaria.formsDani import PeriodoForm, ColegioForm, PlanForm\nfrom django.core.urlresolvers import reverse_lazy, reverse\nfrom guardian.shortcuts import get_objects_for_user\nfrom guardian.shortcuts import assign_perm\nfrom guardian.shortcuts import remove_perm\nfrom wkhtmltopdf.views import PDFTemplateResponse, PDFTemplateView\nfrom .models import Nivel\nfrom .models import Profesor\nfrom .models import Asistente\nfrom .models import Periodo\nfrom .models import Asignacion\nfrom .models import AsignacionExtra\nfrom .models import AsignacionNoAula\nfrom .models import Colegio\nfrom .forms import AsignacionForm\nfrom .forms import AsignacionUpdateForm\nfrom .forms import AsignacionFUAForm\nfrom .forms import AsignacionNoAulaFUAForm\nfrom .forms import AsignacionFUAUpdateForm\nfrom .forms import AsignacionNoAulaFUAUpdateForm\nfrom .forms import AsignacionExtraForm\nfrom .forms import AsignacionExtraUpdateForm\nfrom .forms import AsignacionNoAulaForm\nfrom .forms import AsignacionNoAulaUpdateForm\nfrom .models import AsignacionAsistente\nfrom .forms import AsignacionAsistenteForm\nfrom .forms import AssignPermForm\nfrom .formsDani import PlantillaPlanForm\n\n\n@login_required\ndef assign(request):\n if not request.user.is_superuser:\n raise Http404\n\n year = request.session.get('periodo', 2020)\n if request.method == 'POST':\n form = AssignPermForm(request.POST, year=year)\n if form.is_valid():\n user = form.cleaned_data['usuario']\n\n # clear perms first\n remove_perm('carga_horaria.change_colegio', user, get_objects_for_user(user, 'carga_horaria.change_colegio').filter(periode=year))\n\n for c in form.cleaned_data['colegios']:\n assign_perm('change_colegio', user, c)\n \n form = AssignPermForm(year=year)\n return render(request, 'carga_horaria/assign.html', {'form': form})\n\n\n\n@login_required\ndef switch_periodo(request, year=2021):\n request.session['periodo'] = year\n try:\n del request.session['colegio__pk']\n del request.session['colegio__nombre']\n except KeyError:\n pass\n return redirect('carga-horaria:home')\n\n@login_required\ndef switch(request, pk=None):\n if pk:\n colegio = get_object_or_404(Colegio, pk=pk)\n request.session['colegio__pk'] = colegio.pk\n request.session['colegio__nombre'] = colegio.nombre\n return redirect('carga-horaria:home')\n colegios = get_objects_for_user(request.user, \"carga_horaria.change_colegio\", Colegio.objects.filter(periode=request.session.get('periodo', 2020)))\n return render(request, 'carga_horaria/switch.html', {'colegios': colegios})\n\n@login_required\ndef clear(request):\n del request.session['colegio__pk']\n del request.session['colegio__nombre']\n return redirect('carga-horaria:home')\n\n@login_required\ndef home(request):\n return render(request, 'carga_horaria/home.html')\n\n\n\n@login_required\ndef anexo(request, pk):\n p = get_object_or_404(Profesor, pk=pk)\n colegio = Colegio.objects.get(pk=request.session['colegio__pk'])\n response = PDFTemplateResponse(request=request,\n template='carga_horaria/profesor/anexo_profesor.html',\n filename='anexo1.pdf',\n context={'profesor': p,\n 'colegio': colegio,\n 'periodo': request.session.get('periodo', 2020)},\n show_content_in_browser=settings.DEBUG)\n return response\n\n@login_required\ndef anexos(request):\n profesores = get_for_user(request, Profesor.objects.all(), 'colegio__pk', request.user)\n mem_zip = io.BytesIO()\n with zipfile.ZipFile(mem_zip, mode=\"w\", compression=zipfile.ZIP_DEFLATED) as zf:\n for pp in profesores:\n zf.writestr(*pp.generar_anexo_1())\n\n response = HttpResponse(mem_zip.getvalue(), content_type='applicaton/zip')\n response['Content-Disposition'] = 'attachment; filename=\"anexos1.zip\"'\n return response\n\n\n@login_required\ndef anexo_asistente(request, pk):\n p = get_object_or_404(Asistente, pk=pk)\n colegio = Colegio.objects.get(pk=request.session['colegio__pk'])\n response = PDFTemplateResponse(request=request,\n template='carga_horaria/asistente/anexo_asistente.html',\n filename='anexo1.pdf',\n context={'profesor': p,\n 'colegio': colegio,\n 'periodo': request.session.get('periodo', 2020)},\n show_content_in_browser=settings.DEBUG)\n return response\n\n@login_required\ndef anexos_asistentes(request):\n profesores = get_for_user(request, Asistente.objects.all(), 'colegio__pk', request.user)\n mem_zip = io.BytesIO()\n with zipfile.ZipFile(mem_zip, mode=\"w\", compression=zipfile.ZIP_DEFLATED) as zf:\n for pp in profesores:\n zf.writestr(*pp.generar_anexo_1())\n\n response = HttpResponse(mem_zip.getvalue(), content_type='applicaton/zip')\n response['Content-Disposition'] = 'attachment; filename=\"anexos1.zip\"'\n return response\n\n\n@login_required\ndef profesores_pdf(request):\n profesores = get_for_user(request, Profesor.objects.all(), 'colegio__pk', request.user)\n response = PDFTemplateResponse(request=request,\n template='carga_horaria/profesor/listado_profesor_pdf.html',\n filename='listado_profesores.pdf',\n context={'profesores': profesores},\n show_content_in_browser=settings.DEBUG)\n return response\n\n\n@login_required\ndef asistentes_pdf(request):\n asistentes = get_for_user(request, Asistente.objects.all(), 'colegio__pk', request.user)\n response = PDFTemplateResponse(request=request,\n template='carga_horaria/asistente/listado_asistente_pdf.html',\n filename='listado_asistentes.pdf',\n context={'asistentes': asistentes},\n show_content_in_browser=settings.DEBUG)\n return response\n\n@login_required\ndef periodo_pdf(request, pk):\n periodo = get_object_or_404(Periodo, pk=pk)\n response = PDFTemplateResponse(request=request,\n template='carga_horaria/periodo/periodo_pdf.html',\n filename='carga_horaria.pdf',\n context={'object': periodo},\n show_content_in_browser=settings.DEBUG)\n return response\n\n@login_required\ndef plan_refresh(request, pk):\n plan = get_object_or_404(Plan, pk=pk)\n plan.refresh_asignaturas()\n messages.success(request, \"Se han actualizado los cursos asociados al plan ID: {}\".format(plan.pk))\n return redirect('carga-horaria:planes')\n\n# class AnexoView(PDFTemplateView):\n# template_name = 'carga_horaria/profesor/anexo_profesor.html'\n# filename = 'anexo1.pdf'\n\n# def get(self, request, *args, **kwargs):\n# pk = kwargs.pop('pk')\n# self.p = get_object_or_404(Profesor, pk=pk)\n# self.ax = [{'descripcion': 'Planificación', 'curso': '', 'horas': self.p.horas_planificacion},\n# {'descripcion': 'Recreo', 'curso': '', 'horas': self.p.horas_recreo}] + list(self.p.asignacionextra_set.all())\n# return super(AnexoView, self).get(request, *args, **kwargs)\n\n# def get_context_data(self, *args, **kwargs):\n# ctx = super(AnexoView, self).get_context_data(*args, **kwargs)\n# ctx.update({'asignaciones': self.p.asignacion_set.all(),\n# 'asignaciones_extra': self.ax,\n# 'profesor': self.p})\n\n# anexo = AnexoView.as_view()\n\n\n\"\"\"\n Comienzo Crud Periodos\n\"\"\"\nclass PeriodoListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de periodos\n \"\"\"\n model = Periodo\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/periodo/listado_periodos.html'\n search_fields = ['nombre', 'colegio']\n paginate_by = 10\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(PeriodoListView, self).get_context_data(*args, **kwargs)\n ox = ctx['object_list']\n ordering = {str(value): index for index, value in enumerate(Nivel)}\n ctx['object_list'] = sorted(ox, key=lambda x: ordering[\"Nivel.\"+x.plan.nivel])\n # added for convenience, pasted from AsignaturaBaseListView\n ctx['levels'] = [(tag.name, tag.value) for tag in Nivel]\n ctx['nivel_actual'] = self.request.GET.get('nivel')\n return ctx\n\n def get_queryset(self):\n qs = super().get_queryset()\n\n nivel = self.request.GET.get('nivel')\n if nivel:\n qs = qs.filter(plan__nivel=nivel)\n\n return qs\n\n\n\n\nclass PeriodoDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Periodo\n \"\"\"\n model = Periodo\n template_name = 'carga_horaria/periodo/detalle_periodo.html'\n\n\nclass PeriodoCreateView(LoginRequiredMixin, CreateView):\n model = Periodo\n form_class = PeriodoForm\n template_name = 'carga_horaria/periodo/nuevo_periodo.html'\n success_url = reverse_lazy('carga-horaria:periodos')\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PeriodoCreateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'user': self.request.user,\n 'colegio': self.request.session.get('colegio__pk', None)})\n return kwargs\n\n\nclass PeriodoUpdateView(LoginRequiredMixin, UpdateView):\n model = Periodo\n form_class = PeriodoForm\n template_name = 'carga_horaria/periodo/editar_periodo.html'\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PeriodoUpdateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'user': self.request.user,\n 'colegio': self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:periodo',\n kwargs={\n 'pk': self.object.pk,\n }\n )\n\n\nclass PeriodoDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Periodo\n success_url = reverse_lazy('carga-horaria:periodos')\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def test_func(self):\n return self.request.user.is_superuser\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\"\"\"\n Fin Crud Periodos\n\"\"\"\n\n\"\"\"\n Comienzo Crud Colegios\n\"\"\"\nclass ColegioListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de periodos\n \"\"\"\n model = Colegio\n lookup = 'pk'\n template_name = 'carga_horaria/colegio/listado_colegios.html'\n search_fields = ['nombre', 'jec']\n paginate_by = 6\n\n\nclass ColegioDetailView(LoginRequiredMixin, ObjPermissionRequiredMixin, DetailView):\n \"\"\"\n Detalle de Colegio\n \"\"\"\n model = Colegio\n permission = 'carga_horaria.change_colegio'\n template_name = 'carga_horaria/colegio/detalle_colegio.html'\n\n\nclass ColegioCreateView(LoginRequiredMixin, CreateView):\n model = Colegio\n form_class = ColegioForm\n template_name = 'carga_horaria/colegio/nuevo_colegio.html'\n success_url = reverse_lazy('carga-horaria:colegios')\n# success_message = u\"Nuevo periodo %(nombre)s creado satisfactoriamente.\"\n# error_message = \"Revise que todos los campos del formulario hayan sido validados correctamente.\"\n\n def form_valid(self, form):\n colegio = form.save(commit=False)\n colegio.periode = self.request.session.get('periodo', 2020)\n colegio.save()\n return redirect(reverse('carga-horaria:colegios'))\n\n\nclass ColegioUpdateView(LoginRequiredMixin, UpdateView):\n model = Colegio\n form_class = ColegioForm\n template_name = 'carga_horaria/colegio/editar_colegio.html'\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:colegio',\n kwargs={\n 'pk': self.object.pk,\n }\n )\n\n\n\nclass ColegioDeleteView(LoginRequiredMixin, DeleteView):\n model = Colegio\n success_url = reverse_lazy('carga-horaria:colegios')\n \n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\n\"\"\"\n Fin Crud Colegios\n\"\"\"\n\n\"\"\"\n Comienzo Crud Planes\n\"\"\"\nclass PlanListView(LoginRequiredMixin, GetObjectsForUserMixin, ListView):\n \"\"\"\n Listado de planes\n \"\"\"\n model = Plan\n lookup = 'colegio__pk'\n template_name = 'carga_horaria/plan/listado_planes.html'\n search_fields = ['nombre', 'nivel']\n paginate_by = 10\n ordering = ['-pk']\n\n\nclass PlanDetailView(LoginRequiredMixin, DetailView):\n \"\"\"\n Detalle de Plan\n \"\"\"\n model = Plan\n template_name = 'carga_horaria/plan/detalle_plan.html'\n\n\nclass PlanCreateView(LoginRequiredMixin, CreateView):\n model = Plan\n form_class = PlanForm\n template_name = 'carga_horaria/plan/nuevo_plan.html'\n success_url = reverse_lazy('carga-horaria:planes')\n# success_message = u\"Nuevo periodo %(nombre)s creado satisfactoriamente.\"\n# error_message = \"Revise que todos los campos del formulario hayan sido validados correctamente.\"\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super(PlanCreateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'user': self.request.user,\n 'colegio': self.request.session.get('colegio__pk', None)})\n return kwargs\n\n\n@login_required\ndef crear_desde_plantilla(request):\n if request.method == 'POST':\n form = PlantillaPlanForm(request.POST)\n if form.is_valid():\n plantilla = form.cleaned_data['plantilla']\n nivel = form.cleaned_data['nivel']\n\n colegio_pk = request.session.get('colegio__pk', None)\n if colegio_pk:\n colegio = Colegio.objects.get(pk=colegio_pk)\n nuevo = Plan.objects.create(nivel=nivel, colegio=colegio)\n else:\n nuevo = Plan.objects.create(nivel=nivel)\n for ab in plantilla.asignaturabase_set.all():\n AsignaturaBase.objects.create(nombre=ab.nombre,\n plan=nuevo,\n horas_jec=ab.horas_jec,\n horas_nec=ab.horas_nec)\n return redirect('carga-horaria:planes')\n else:\n form = PlantillaPlanForm()\n return render(request, 'carga_horaria/plantilla.html', {'form': form})\n\n\nclass PlanUpdateView(LoginRequiredMixin, UpdateView):\n model = Plan\n form_class = PlanForm\n template_name = 'carga_horaria/plan/editar_plan.html'\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:plan',\n kwargs={\n 'pk': self.object.pk,\n }\n )\n\n\nclass PlanDeleteView(LoginRequiredMixin, DeleteView):\n model = Plan\n success_url = reverse_lazy('carga-horaria:planes')\n template_name = 'carga_horaria/plan/eliminar_plan.html'\n \n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\"\"\"\n Fin Crud Planes\n\"\"\"\n\n@login_required\ndef asignatura_limpiar(request, pk, periodo_pk):\n aa = get_object_or_404(Asignatura, pk=pk)\n aa.asignacion_set.all().delete()\n return redirect(reverse('carga-horaria:periodo', kwargs={'pk': periodo_pk}))\n\n\n@login_required\ndef asignatura_dif(request, pk):\n pp = get_object_or_404(Periodo, pk=pk)\n\n if request.method == 'POST':\n # check first if there are any candidates for merging\n nombre = request.POST['asignatura']\n colegio_pk = request.session.get('colegio__pk', None)\n can_confirm = request.POST.get('can_confirm', False)\n if colegio_pk and Asignatura.objects.filter(periodos__colegio=colegio_pk, nombre=nombre) and not can_confirm:\n ax = Asignatura.objects.filter(periodos__colegio=colegio_pk, nombre=nombre).distinct()\n return render(request, 'carga_horaria/asignatura/asignatura_dif_confirm.html', {'object': pp,\n 'candidatas': ax})\n else:\n aa = Asignatura.objects.create(nombre=request.POST['asignatura'],\n diferenciada=True,\n horas=6)\n aa.periodos.add(pp)\n return redirect('carga-horaria:periodo', pp.pk)\n return render(request, 'carga_horaria/asignatura/asignatura_dif.html', {'object': pp})\n\n\n@login_required\ndef asignatura_merge(request, pk, asignatura_pk):\n pp = get_object_or_404(Periodo, pk=pk)\n aa = get_object_or_404(Asignatura, pk=asignatura_pk)\n aa.periodos.add(pp)\n return redirect('carga-horaria:periodo', pk)\n\n\n@login_required\ndef asignatura_maybe(request, pk):\n pp = get_object_or_404(Periodo, pk=pk)\n candidatas = Asignatura.objects.filter(periodos__colegio=pp.colegio, combinable=True).exclude(periodos__pk__in=[pk]).distinct()\n if candidatas:\n return render(request, 'carga_horaria/asignatura/asignatura_maybe.html', {'object': pp, 'candidatas': candidatas})\n else:\n return redirect('carga-horaria:asignatura__nuevo', pk)\n\n\n@login_required\ndef asignar(request, pk, periodo_pk):\n aa = get_object_or_404(Asignatura, pk=pk)\n\n if request.method == 'POST':\n form = AsignacionForm(request.POST, asignatura=aa, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.asignatura = aa\n asignacion.save()\n return redirect('carga-horaria:periodo', periodo_pk)\n else:\n form = AsignacionForm(user=request.user, colegio=request.session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar.html', {'object': aa,\n 'form': form})\n\n\n@login_required\ndef asignar_fua(request, pk, tipo):\n pp = get_object_or_404(Profesor, pk=pk)\n tipo_display = dict(Asignacion.TIPO_CHOICES)[int(tipo)]\n\n if request.method == 'POST':\n form = AsignacionFUAForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n asignacion.tipo = tipo\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionFUAForm(user=request.user, colegio=request.session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_fua.html', {'object': pp,\n 'tipo': tipo_display,\n 'form': form})\n\n@login_required\ndef asignar_no_aula_fua(request, pk, tipo):\n pp = get_object_or_404(Profesor, pk=pk)\n tipo_display = dict(AsignacionNoAula.TIPO_CHOICES)[int(tipo)]\n\n if request.method == 'POST':\n form = AsignacionNoAulaFUAForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n asignacion.tipo = tipo\n if asignacion.horas == 0:\n asignacion.horas = pp.horas_no_aula_disponibles\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionNoAulaFUAForm(user=request.user, colegio=request.session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_no_aula_fua.html', {'profesor': pp,\n 'tipo': tipo_display,\n 'form': form})\n\n\n\n@login_required\ndef asignar_extra(request, pk):\n pp = get_object_or_404(Profesor, pk=pk)\n\n if request.method == 'POST':\n form = AsignacionExtraForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n if asignacion.horas == 0:\n asignacion.horas = pp.horas_no_lectivas_disponibles\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionExtraForm(user=request.user, colegio=request.session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_extra.html', {'profesor': pp,\n 'form': form})\n\n\n@login_required\ndef asignar_no_aula(request, pk):\n pp = get_object_or_404(Profesor, pk=pk)\n\n if request.method == 'POST':\n form = AsignacionNoAulaForm(request.POST, profesor=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.profesor = pp\n if asignacion.horas == 0:\n asignacion.horas = pp.horas_no_aula_disponibles\n asignacion.save()\n return redirect('carga-horaria:profesor', pp.pk)\n else:\n form = AsignacionNoAulaForm(user=request.user, colegio=request.session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_no_aula.html', {'profesor': pp,\n 'form': form})\n\nclass AsignacionDeleteView(LoginRequiredMixin, DeleteView):\n model = Asignacion\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get_success_url(self):\n return reverse('carga-horaria:profesor', kwargs={'pk': self.kwargs['profesor_pk']})\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n\nclass AsignacionUpdateView(LoginRequiredMixin, UpdateView):\n model = Asignacion\n form_class = AsignacionUpdateForm\n template_name = 'carga_horaria/asignar_update.html'\n\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:profesor',\n kwargs={\n 'pk': self.object.profesor.pk,\n }\n )\n\n\nclass AsignacionExtraUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignacionExtra\n form_class = AsignacionExtraUpdateForm\n template_name = 'carga_horaria/asignar_extra.html'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(AsignacionExtraUpdateView, self).get_context_data(*args, **kwargs)\n ctx['profesor'] = self.object.profesor\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))\n\n kwargs = super(AsignacionExtraUpdateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'profesor': pp,\n 'user': self.request.user,\n 'colegio': self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def form_valid(self, form):\n asignacion = form.save(commit=False)\n if asignacion.horas == 0:\n asignacion_old = Asignacion.objects.get(pk=asignacion.pk)\n asignacion.horas = asignacion.profesor.horas_no_lectivas_disponibles + float(asignacion_old.horas)\n asignacion.save()\n return redirect(self.get_success_url())\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:profesor',\n kwargs={\n 'pk': self.object.profesor.pk,\n }\n )\n\n\nclass AsignacionExtraDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionExtra\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:profesor',\n kwargs={\n 'pk': self.object.profesor.pk,\n }\n )\n\n\nclass AsignacionNoAulaUpdateView(LoginRequiredMixin, UpdateView):\n model = AsignacionNoAula\n form_class = AsignacionNoAulaUpdateForm\n template_name = 'carga_horaria/asignar_no_aula.html'\n\n def form_valid(self, form):\n asignacion = form.save(commit=False)\n if asignacion.horas == 0:\n asignacion_old = AsignacionNoAula.objects.get(pk=asignacion.pk)\n asignacion.horas = asignacion.profesor.horas_no_aula_disponibles + asignacion_old.horas\n asignacion.save()\n return redirect(self.get_success_url())\n\n def get_context_data(self, *args, **kwargs):\n ctx = super(AsignacionNoAulaUpdateView, self).get_context_data(*args, **kwargs)\n ctx['profesor'] = self.object.profesor\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n pp = get_object_or_404(Profesor, pk=self.kwargs.get('profesor_pk'))\n\n kwargs = super(AsignacionNoAulaUpdateView, self).get_form_kwargs(*args, **kwargs)\n kwargs.update({'profesor': pp,\n 'user': self.request.user,\n 'colegio': self.request.session.get('colegio__pk', None)})\n return kwargs\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:profesor',\n kwargs={\n 'pk': self.object.profesor.pk,\n }\n )\n\nclass AsignacionNoAulaDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionNoAula\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:profesor',\n kwargs={\n 'pk': self.object.profesor.pk,\n }\n )\n\n\n@login_required\ndef asignar_asistente(request, pk, tipo):\n pp = get_object_or_404(Asistente, pk=pk)\n tipo_display = dict(AsignacionAsistente.TIPO_CHOICES)[int(tipo)]\n\n if request.method == 'POST':\n form = AsignacionAsistenteForm(request.POST, asistente=pp, user=request.user, colegio=request.session.get('colegio__pk', None), periodo=request.session.get('periodo', 2020))\n if form.is_valid():\n asignacion = form.save(commit=False)\n asignacion.asistente = pp\n asignacion.tipo = tipo\n # if asignacion.horas == 0:\n # asignacion.horas = pp.horas_no_lectivas_disponibles\n asignacion.save()\n return redirect('carga-horaria:asistente', pp.pk)\n else:\n form = AsignacionAsistenteForm(user=request.user, colegio=request.session.get('colegio__pk', None))\n return render(request, 'carga_horaria/asignar_asistente.html', {'asistente': pp,\n 'form': form})\n\nclass AsignacionAsistenteDeleteView(LoginRequiredMixin, DeleteView):\n model = AsignacionAsistente\n template_name = 'carga_horaria/periodo/eliminar_periodo.html'\n\n def get(self, request, *args, **kwargs):\n return self.post(request, *args, **kwargs)\n\n def get_success_url(self):\n return reverse(\n 'carga-horaria:asistente',\n kwargs={\n 'pk': self.object.asistente.pk,\n }\n )\n\n@login_required\ndef profesores_info(request):\n output = io.BytesIO()\n\n # Create a workbook and add a worksheet.\n workbook = xlsxwriter.Workbook(output)\n worksheet = workbook.add_worksheet('Profesores')\n \n # Some data we want to write to the worksheet.\n qs = get_for_user(request, Profesor.objects.all(), 'colegio__pk', request.user)\n\n # Start from the first cell. Rows and columns are zero indexed.\n row = 0\n col = 0\n\n # Iterate over the data and write it out row by row.\n worksheet.write(0, 0, 'RUT')\n worksheet.write(0, 1, 'Nombre Docente')\n worksheet.write(0, 2, 'Dirección Docente')\n worksheet.write(0, 3, 'Comuna')\n worksheet.write(0, 4, 'Nacionalidad')\n worksheet.write(0, 5, 'Teléfono')\n worksheet.write(0, 6, 'Email personal')\n worksheet.write(0, 7, 'Email institucional')\n worksheet.write(0, 8, 'Estado civil')\n worksheet.write(0, 9, 'Discapacidad')\n worksheet.write(0, 10, 'Recibe pensión')\n worksheet.write(0, 11, 'Adventista')\n worksheet.write(0, 12, 'Fecha de Nacimiento')\n worksheet.write(0, 13, 'Tipo de Contrato')\n worksheet.write(0, 14, 'Cargo')\n worksheet.write(0, 15, 'Fecha de Inicio Contrato')\n worksheet.write(0, 16, 'Horas Contrato Propuestas')\n worksheet.write(0, 17, 'Horas SBVG')\n worksheet.write(0, 18, 'Horas SEP')\n worksheet.write(0, 19, 'Horas PIE')\n worksheet.write(0, 20, 'Horas Indefinidas Actual')\n worksheet.write(0, 21, 'Horas Plazo Fijo Actual')\n worksheet.write(0, 22, 'Horas Jornada Semanal')\n worksheet.write(0, 23, 'Asignaciones Aula Plan')\n worksheet.write(0, 24, 'Horas Aula PIE')\n worksheet.write(0, 25, 'Horas Aula SEP')\n worksheet.write(0, 26, 'Horas Aula Sostenedor')\n worksheet.write(0, 27, 'Horas disponibles')\n worksheet.write(0, 28, 'Asignación No Lectiva')\n worksheet.write(0, 29, 'Horas no lectivas disponibles')\n worksheet.write(0, 30, 'Asignación No Aula Normal')\n worksheet.write(0, 31, 'Asignación No Aula PIE')\n worksheet.write(0, 32, 'Asignación No Aula SEP')\n worksheet.write(0, 33, 'Especialidad')\n worksheet.write(0, 34, 'Profesor Jefe')\n worksheet.write(0, 35, 'Fundación que lo contrata')\n worksheet.write(0, 36, 'Colegio')\n \n\n\n \n row = 1\n for pp in qs:\n worksheet.write(row, 0, pp.rut)\n worksheet.write(row, 1, pp.nombre)\n worksheet.write(row, 2, pp.direccion)\n worksheet.write(row, 3, pp.persona.comuna)\n worksheet.write(row, 4, pp.persona.nacionalidad)\n worksheet.write(row, 5, pp.persona.telefono)\n worksheet.write(row, 6, pp.persona.email_personal)\n worksheet.write(row, 7, pp.persona.email_institucional)\n worksheet.write(row, 8, pp.persona.get_estado_civil_display())\n worksheet.write(row, 9, 'Sí' if pp.persona.discapacidad else 'No')\n worksheet.write(row, 10, 'Sí' if pp.persona.recibe_pension else 'No')\n worksheet.write(row, 11, 'Sí' if pp.persona.adventista else 'No')\n worksheet.write(row, 12, pp.persona.fecha_nacimiento)\n worksheet.write(row, 13, pp.get_tipo_display())\n worksheet.write(row, 14, pp.get_cargo_display())\n worksheet.write(row, 15, pp.fecha_inicio)\n worksheet.write(row, 16, pp.horas_semanales_total)\n worksheet.write(row, 17, pp.horas_sbvg_total)\n worksheet.write(row, 18, pp.total_sep)\n worksheet.write(row, 19, pp.total_pie)\n worksheet.write(row, 20, pp.horas_indefinidas)\n worksheet.write(row, 21, pp.horas_plazo_fijo)\n worksheet.write(row, 22, pp.horas_semanales)\n worksheet.write(row, 23, pp.horas_asignadas_plan)\n worksheet.write(row, 24, pp.horas_asignadas_pie)\n worksheet.write(row, 25, pp.horas_asignadas_sep)\n worksheet.write(row, 26, pp.horas_asignadas_sostenedor)\n worksheet.write(row, 27, pp.horas_disponibles)\n worksheet.write(row, 28, pp.horas_no_lectivas_asignadas_anexo)\n worksheet.write(row, 29, pp.horas_no_lectivas_disponibles)\n worksheet.write(row, 30, pp.horas_no_aula_asignadas_ordinaria)\n worksheet.write(row, 31, pp.horas_no_aula_asignadas_pie)\n worksheet.write(row, 32, pp.horas_no_aula_asignadas_sep)\n worksheet.write(row, 33, str(pp.especialidad))\n worksheet.write(row, 34, pp.jefatura if pp.es_profesor_jefe else 'No')\n worksheet.write(row, 35, str(pp.fundacion))\n worksheet.write(row, 36, str(pp.colegio))\n\n row += 1\n\n workbook.close()\n output.seek(0)\n\n # Set up the Http response.\n filename = 'profesores-info.xlsx'\n response = HttpResponse(\n output,\n content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n )\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n\n return response\n\n\n@login_required\ndef asistentes_info(request):\n output = io.BytesIO()\n\n # Create a workbook and add a worksheet.\n workbook = xlsxwriter.Workbook(output)\n worksheet = workbook.add_worksheet('Asistentes')\n \n # Some data we want to write to the worksheet.\n qs = get_for_user(request, Asistente.objects.all(), 'colegio__pk', request.user)\n\n # Start from the first cell. Rows and columns are zero indexed.\n row = 0\n col = 0\n\n # Iterate over the data and write it out row by row.\n worksheet.write(0, 0, 'RUT')\n worksheet.write(0, 1, 'Nombre Asistente')\n worksheet.write(0, 2, 'Fecha de Nacimiento')\n worksheet.write(0, 3, 'Nacionalidad')\n worksheet.write(0, 4, 'Dirección')\n worksheet.write(0, 5, 'Comuna')\n worksheet.write(0, 6, 'Teléfono')\n worksheet.write(0, 7, 'Email personal')\n worksheet.write(0, 8, 'Email institucional')\n worksheet.write(0, 9, 'Estado civil')\n worksheet.write(0, 10, 'Adventista')\n worksheet.write(0, 11, 'Discapacidad')\n worksheet.write(0, 12, 'Recibe pensión')\n worksheet.write(0, 13, 'Fecha de Inicio Contrato')\n worksheet.write(0, 14, 'Horas Contrato')\n worksheet.write(0, 15, 'Función')\n worksheet.write(0, 16, 'SEP')\n worksheet.write(0, 17, 'PIE')\n worksheet.write(0, 18, 'Sostenedor')\n worksheet.write(0, 19, 'Fundación que lo contrata')\n worksheet.write(0, 20, 'Colegio')\n\n \n row = 1\n for pp in qs:\n worksheet.write(row, 0, pp.rut)\n worksheet.write(row, 1, pp.nombre)\n worksheet.write(row, 2, pp.persona.fecha_nacimiento)\n worksheet.write(row, 3, pp.persona.nacionalidad)\n worksheet.write(row, 4, pp.persona.direccion)\n worksheet.write(row, 5, pp.persona.comuna)\n worksheet.write(row, 6, pp.persona.telefono)\n worksheet.write(row, 7, pp.persona.email_personal)\n worksheet.write(row, 8, pp.persona.email_institucional)\n worksheet.write(row, 9, pp.persona.get_estado_civil_display())\n worksheet.write(row, 10, 'Sí' if pp.persona.adventista else 'No')\n worksheet.write(row, 11, 'Sí' if pp.persona.discapacidad else 'No')\n worksheet.write(row, 12, 'Sí' if pp.persona.recibe_pension else 'No')\n worksheet.write(row, 13, pp.fecha_inicio)\n worksheet.write(row, 14, pp.horas)\n worksheet.write(row, 15, pp.funcion)\n worksheet.write(row, 16, pp.horas_sep)\n worksheet.write(row, 17, pp.horas_pie)\n worksheet.write(row, 18, pp.horas_sostenedor)\n worksheet.write(row, 19, str(pp.fundacion))\n worksheet.write(row, 20, str(pp.colegio))\n row += 1\n\n workbook.close()\n output.seek(0)\n\n # Set up the Http response.\n filename = 'asistentes-info.xlsx'\n response = HttpResponse(\n output,\n content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n )\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n\n return response\n",
"step-ids": [
67,
76,
95,
101,
108
]
}
|
[
67,
76,
95,
101,
108
] |
from django.contrib import admin
from django.urls import path, include
from .views import hindex,galeria,mision_vision,direccion,registro,login,logout_vista,registro_insumo,admin_insumos
urlpatterns = [
path('',hindex,name='HINDEX'),
path('galeria/',galeria,name='GALE'),
path('mision/',mision_vision,name='MISION'),
path('direccion/',direccion,name='UBICACION'),
path('registro/',registro,name='FORMU'),
path('login/',login,name='LOGIN'),
path('logout_vista/',logout_vista,name='LOGOUT'),
path('registro_insumo/',registro_insumo,name='INSUMOS'),
path('admin_insumos/',admin_insumos,name='ADMIN'),
]
admin.site.site_header="Administración Lavado de Autos"
|
normal
|
{
"blob_id": "dff5a46c6f1eb715fe5e1eec87e42ceb295b0eae",
"index": 4650,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', hindex, name='HINDEX'), path('galeria/', galeria,\n name='GALE'), path('mision/', mision_vision, name='MISION'), path(\n 'direccion/', direccion, name='UBICACION'), path('registro/', registro,\n name='FORMU'), path('login/', login, name='LOGIN'), path(\n 'logout_vista/', logout_vista, name='LOGOUT'), path('registro_insumo/',\n registro_insumo, name='INSUMOS'), path('admin_insumos/', admin_insumos,\n name='ADMIN')]\nadmin.site.site_header = 'Administración Lavado de Autos'\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path, include\nfrom .views import hindex, galeria, mision_vision, direccion, registro, login, logout_vista, registro_insumo, admin_insumos\nurlpatterns = [path('', hindex, name='HINDEX'), path('galeria/', galeria,\n name='GALE'), path('mision/', mision_vision, name='MISION'), path(\n 'direccion/', direccion, name='UBICACION'), path('registro/', registro,\n name='FORMU'), path('login/', login, name='LOGIN'), path(\n 'logout_vista/', logout_vista, name='LOGOUT'), path('registro_insumo/',\n registro_insumo, name='INSUMOS'), path('admin_insumos/', admin_insumos,\n name='ADMIN')]\nadmin.site.site_header = 'Administración Lavado de Autos'\n",
"step-4": "from django.contrib import admin\nfrom django.urls import path, include\nfrom .views import hindex,galeria,mision_vision,direccion,registro,login,logout_vista,registro_insumo,admin_insumos\n\n\nurlpatterns = [\n path('',hindex,name='HINDEX'),\n path('galeria/',galeria,name='GALE'),\n path('mision/',mision_vision,name='MISION'),\n path('direccion/',direccion,name='UBICACION'),\n path('registro/',registro,name='FORMU'),\n path('login/',login,name='LOGIN'),\n path('logout_vista/',logout_vista,name='LOGOUT'),\n path('registro_insumo/',registro_insumo,name='INSUMOS'),\n path('admin_insumos/',admin_insumos,name='ADMIN'),\n]\n\n\nadmin.site.site_header=\"Administración Lavado de Autos\"",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
"""
Plot EEG data.
Usage:
plotting.py [options] [<file>]
Options:
-h --help Show this screen.
--version Show version.
--center Center the data before plotting
--sample-index=N Row index (indexed from one).
--transpose Transpose data.
--xlim=lim X-axis limits.
Data
----
ELECTRODES : dict
Dictionary indexed by electrode name with 2D positions as values
References
----------
The five percent electrode system for high-resolution EEG and ERP
measurement, Robert Oostenveld, Peter Praamstra.
"""
from __future__ import absolute_import, division, print_function
from math import cos, pi, sin
import matplotlib.lines as lines
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
from scipy.interpolate import griddata
__all__ = ('ELECTRODES', 'MultiPlot', 'TopoPlot', 'topoplot')
ELECTRODES = {
'AF3': (-0.25, 0.62),
'AF4': (0.25, 0.62),
'AF7': (0.8 * cos(0.7 * pi), 0.8 * sin(0.7 * pi)),
'AF8': (0.8 * cos(0.3 * pi), 0.8 * sin(0.3 * pi)),
'AFz': (0, 0.6),
'C1': (-0.2, 0),
'C2': (0.2, 0),
'C3': (-0.4, 0),
'C4': (0.4, 0),
'C5': (-0.6, 0),
'C6': (0.6, 0),
'CP1': (-0.18, -0.2),
'CP2': (0.18, -0.2),
'CP3': (-0.36, 0.4 * sin(1.17 * pi)),
'CP4': (0.36, 0.4 * sin(1.83 * pi)),
'CP5': (0.6 * cos(1.12 * pi), 0.6 * sin(1.12 * pi)),
'CP6': (0.6 * cos(1.88 * pi), 0.6 * sin(1.88 * pi)),
'CPz': (0, -0.2),
'Cz': (0, 0),
'F1': (-0.18, 0.4),
'F2': (0.18, 0.4),
'F3': (-0.35, 0.41),
'F4': (0.35, 0.41),
'F5': (-0.5, 0.43),
'F6': (0.5, 0.43),
'F7': (0.8 * cos(0.8 * pi), 0.8 * sin(0.8 * pi)),
'F8': (0.8 * cos(0.2 * pi), 0.8 * sin(0.2 * pi)),
'FC1': (-0.2, 0.21),
'FC2': (0.2, 0.21),
'FC3': (-0.39, 0.22),
'FC4': (0.39, 0.22),
'FC5': (-0.57, 0.23),
'FC6': (0.57, 0.23),
'FCz': (0, 0.2),
'FP1': (0.8 * cos(0.6 * pi), 0.8 * sin(0.6 * pi)),
'FP2': (0.8 * cos(0.4 * pi), 0.8 * sin(0.4 * pi)),
'Fpz': (0, 0.8),
'FT7': (0.8 * cos(0.9 * pi), 0.8 * sin(0.9 * pi)),
'FT8': (0.8 * cos(0.1 * pi), 0.8 * sin(0.1 * pi)),
'Fz': (0, 0.4),
'Iz': (0, -1),
'Nz': (0, 1),
'P1': (-0.18, -0.41),
'P2': (0.18, -0.41),
'P3': (-0.35, -0.42),
'P4': (0.35, -0.42),
'P5': (-0.5, -0.44),
'P6': (0.5, -0.44),
'P7': (0.8 * cos(1.2 * pi), 0.8 * sin(1.2 * pi)),
'P8': (0.8 * cos(1.8 * pi), 0.8 * sin(1.8 * pi)),
'PO3': (-0.24, -0.62),
'PO4': (0.24, -0.62),
'PO7': (0.8 * cos(1.3 * pi), 0.8 * sin(1.3 * pi)),
'PO8': (0.8 * cos(1.7 * pi), 0.8 * sin(1.7 * pi)),
'POz': (0, -0.6),
'Pz': (0, -0.4),
'O1': (0.8 * cos(1.4 * pi), 0.8 * sin(1.4 * pi)),
'O2': (0.8 * cos(1.6 * pi), 0.8 * sin(1.6 * pi)),
'Oz': (0, -0.8),
'T7': (-0.8, 0),
'T8': (0.8, 0),
'T9': (-1, 0),
'T10': (1, 0),
'TP7': (0.8 * cos(1.1 * pi), 0.8 * sin(1.1 * pi)),
'TP8': (0.8 * cos(1.9 * pi), 0.8 * sin(1.9 * pi)),
'TP9': (cos(1.1 * pi), sin(1.1 * pi)),
'TP10': (cos(1.9 * pi), sin(1.9 * pi)),
}
class TopoPlot(object):
"""Topographic plot."""
def __init__(self, data=None, axes=None):
"""Setup defaults.
Parameters
----------
data : Pandas.Series or dict
Pandas Series with values indexed by electrodes.
axes : matplotlib.axes.AxesSubplot object
Axis object to render on.
"""
if axes is None:
self.figure = plt.figure()
axes = self.figure.gca()
else:
self.figure = axes.get_figure()
self.axes = axes
self.center = np.array((0, 0))
if isinstance(data, dict):
self.data = pd.Series(data)
elif isinstance(data, pd.Series):
self.data = data
elif data is None:
self.data = None
else:
raise ValueError("Wrong type of value for 'data': {}".format(
type(data)))
@staticmethod
def normalize_electrode_name(name):
"""Normalize electrode name.
Parameters
----------
name : str
Name of electrode to be normalized
Examples
--------
>>> TopoPlot.normalize_electrode_name('fpz')
'Fpz'
>>> TopoPlot.normalize_electrode_name('AFZ')
'AFz'
"""
return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')
def draw_electrodes(self):
"""Draw electrodes."""
for electrode, position in ELECTRODES.items():
circle = plt.Circle(self.center + position,
radius=0.04, fill=True,
facecolor=(1, 1, 1))
self.axes.add_patch(circle)
position = self.center + position
self.axes.text(position[0], position[1], electrode,
verticalalignment='center',
horizontalalignment='center',
size=6)
def draw_head(self):
"""Draw outer head."""
circle = plt.Circle(self.center, radius=1, fill=False)
self.axes.add_patch(circle)
def draw_inner_head(self):
"""Draw inner head."""
circle = plt.Circle(self.center, radius=0.8, fill=False)
self.axes.add_patch(circle)
def draw_nose(self):
"""Draw nose."""
nose = plt.Line2D([sin(-0.1), 0, sin(0.1)],
[cos(-0.1), 1.1, cos(0.1)],
color=(0, 0, 0))
self.axes.add_line(nose)
def draw_data(self, method='linear', number_of_contours=10):
"""Draw countours from provided data."""
if self.data is not None:
# Coordinates for points to interpolate to
xi, yi = np.mgrid[-1:1:100j, -1:1:100j]
# Electrode positions for data to interpolate from
points = []
for electrode in self.data.index:
name = TopoPlot.normalize_electrode_name(electrode)
points.append(ELECTRODES[name])
# Interpolate
# TODO: Will not work with 2 electrodes.
zi = griddata(points, self.data.values, (xi, yi), method=method)
# Defaults
if number_of_contours is None:
number_of_contours = 10
# Draw
plt.contourf(xi, yi, zi, number_of_contours)
# TODO: center
def draw(self, title=None, method='linear', number_of_contours=None):
"""Draw all components in topoplot including the data.
Parameters
----------
title : str, optional
Title to put on the plot
methods : str, optional
Interpolation method
number_of_contours : int
Number of contours in the colored plot.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}
>>> plt.ion()
>>> topo_plot = TopoPlot(data)
>>> topo_plot.draw()
"""
self.draw_head()
self.draw_inner_head()
self.draw_electrodes()
self.draw_nose()
self.draw_data(method=method, number_of_contours=number_of_contours)
self.axes.axis((-1.2, 1.2, -1.2, 1.2))
self.axes.axis('equal')
if title is not None:
self.axes.set_title(title)
class MultiPlot(TopoPlot):
"""Multiple plots organized topographically.
References
----------
http://www.fieldtriptoolbox.org/reference/ft_multiploter
"""
def __init__(self, data=None, axes=None, xlim=None, ylim=None):
"""Setup defaults.
Parameters
----------
data : Pandas.DataFrame
Pandas DataFrame with values indexed by electrodes.
axes : matplotlib.axes.AxesSubplot object
Axis object to render on.
"""
if axes is None:
self.figure = plt.figure()
axes = self.figure.gca()
else:
self.figure = axes.get_figure()
self.axes = axes
# Contains a list of axes used to plot data data from individual
# electrodes
self._subaxes = []
self.xlim = xlim
self.ylim = ylim
self.center = np.array((0, 0))
if isinstance(data, pd.DataFrame):
self.data = data
elif data is None:
self.data = None
else:
raise ValueError("Wrong type of value for 'data': {}".format(
type(data)))
def add_subplot_axes(self, ax, rect, axis_bgcolor=None):
"""Add subaxes to currect specified axes.
References
----------
Pablo https://stackoverflow.com/users/2309442/pablo
Pablo's answer to "Embedding small plots inside subplots in matplotlib"
https://stackoverflow.com/questions/17458580/
"""
# Modified from
# https://stackoverflow.com/questions/17458580/
box = ax.get_position()
width, height = box.width, box.height
subaxes_box = [(rect[0], rect[1]),
(rect[0] + rect[2], rect[1] + rect[3])]
subaxes_display_coords = ax.transData.transform(subaxes_box)
trans_figure = self.figure.transFigure.inverted()
subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)
x, y = subaxes_figure_coords[0, :]
width, height = (subaxes_figure_coords[1, :] -
subaxes_figure_coords[0, :])
subaxes = self.figure.add_axes(
[x, y, width, height], axis_bgcolor=axis_bgcolor)
x_labelsize = subaxes.get_xticklabels()[0].get_size()
y_labelsize = subaxes.get_yticklabels()[0].get_size()
x_labelsize *= rect[2] ** 0.5
y_labelsize *= rect[3] ** 0.5
subaxes.xaxis.set_tick_params(labelsize=x_labelsize)
subaxes.yaxis.set_tick_params(labelsize=y_labelsize)
return subaxes
def draw_data(self, type='plot', width=None, height=None,
xlim=None, ylim=None,
vmin=None, vmax=None,
axis=False, yscale='linear'):
"""Draw data.
Parameters
----------
type : 'plot', 'spectrogram', optional
Type of plot
xlim : 2-tuple of floats, optional
X-axis limits
ylim : 2-tuple of floats, optional
Y-axis limits
vmin : float, optional
Minimum value for spectrogram colormap
vmax : float, optional
Maximum value for spectrogram colormap
axis : bool, optional
Determine whether the axis should be shown
"""
if self.data is not None:
if ylim is None:
if self.ylim is None and type != 'spectrogram':
ylim = self.auto_ylim(xlim, yscale=yscale)
else:
ylim = self.ylim
if xlim is None:
xlim = self.xlim
if vmin is None:
vmin = 0
# Determine a suitable width for subaxes
number_of_electrodes = len([
electrode
for electrode in self.data.columns
if electrode in ELECTRODES])
if width is None:
if number_of_electrodes > 32:
width = 0.15
else:
width = 0.25
if height is None:
height = 0.25
for electrode in self.data.columns:
if electrode in ELECTRODES:
# Axes and position
x, y = ELECTRODES[electrode]
subaxes = self.add_subplot_axes(
self.axes,
[x - width / 2, y - height / 2, width, height],
axis_bgcolor='w')
# Actual data plot
if type == 'plot':
self.data.ix[:, electrode].plot(
ax=subaxes, xlim=xlim, ylim=ylim)
if not axis:
# x-axis
trans = transforms.blended_transform_factory(
subaxes.transAxes, subaxes.transData)
line = lines.Line2D(
(0, 1), (0, 0),
transform=trans, color=(0, 0, 0))
subaxes.add_line(line)
trans = transforms.blended_transform_factory(
subaxes.transAxes, subaxes.transAxes)
line = lines.Line2D(
(0, 0), (0, 1),
transform=trans, color=(0, 0, 0))
subaxes.add_line(line)
elif type == 'spectrogram':
spectrum, frequencies, midpoints, axes = plt.specgram(
self.data.ix[:, electrode],
Fs=self.data.sampling_rate,
vmin=vmin,
vmax=vmax,
axes=subaxes)
# Adjust axis around spectrogram image.
if xlim is None:
xlim = midpoints[0], midpoints[-1]
subaxes.set_xlim(xlim)
if ylim is None:
ylim = frequencies[0], frequencies[-1]
subaxes.set_ylim(ylim)
else:
raise ValueError("Wrong value for 'type' argument")
if not axis:
subaxes.set_axis_off()
# Annotation
# http://matplotlib.org/users/transforms_tutorial.html
subaxes.text(0.5, 0.95, electrode,
transform=subaxes.transAxes,
fontweight='bold', va='top', ha='center')
subaxes.set_yticklabels([])
subaxes.set_xticklabels([])
self._subaxes.append(subaxes)
@property
def xlim(self):
"""Return xlim for subplots."""
lim = [ax.get_xlim() for ax in self._subaxes]
if lim == []:
lim = None
return lim
@xlim.setter
def xlim(self, left=None, right=None):
"""Set x-axis limits on all subplots."""
for ax in self._subaxes:
ax.set_xlim(left, right)
self.figure.canvas.draw()
@property
def ylim(self):
"""Return ylim for subplots."""
lim = [ax.get_ylim() for ax in self._subaxes]
if lim == []:
lim = None
return lim
@ylim.setter
def ylim(self, bottom=None, top=None):
"""Set y-axis limits on all subplots."""
for ax in self._subaxes:
ax.set_ylim(bottom, top)
self.figure.canvas.draw()
@property
def yscale(self):
"""Return yscale for subplots."""
yscales = [ax.get_yscale() for ax in self._subaxes]
return yscales
@yscale.setter
def yscale(self, value='linear'):
"""Set y-axis limits on all subplots."""
for ax in self._subaxes:
ax.set_yscale(value)
self.figure.canvas.draw()
def auto_ylim(self, xlim=None, yscale='linear'):
"""Return an estimate for a good ylim.
Parameters
----------
xlim : 2-tuple, optional
Limits in (the index of) the data from where the scaling should be
computed.
yscale : linear or log, optional
Scaling of y-axis.
"""
electrodes = [col for col in self.data.columns
if col in ELECTRODES]
if xlim is None:
data = self.data.ix[:, electrodes]
else:
indices = ((self.data.index >= xlim[0]) &
(self.data.index <= xlim[1]))
data = self.data.ix[indices, electrodes]
min_data = data.min().min()
max_data = data.max().max()
abs_max = max(abs(min_data), max_data)
if yscale == 'linear' or yscale == 'symlog':
if min_data >= 0:
ylim = 0, max_data
else:
ylim = -abs_max, abs_max
elif yscale == 'log':
if min_data > 0:
ylim = min_data, max_data
else:
pseudo_zero = abs_max * 10 ** -5
ylim = pseudo_zero, abs_max
else:
raise ValueError('Wrong value to yscale: {}'.format(yscale))
return ylim
def draw(self, type='plot', title=None, xlim=None, ylim=None,
vmin=None, vmax=None,
axis=False, yscale='linear'):
"""Draw all components in multiplot including the data.
Parameters
----------
title : str, optional
Title to put on the plot
xlim : tuple of floats, optional
X-axis limits used for each individual plots
ylim : tuple of floats, optional
Y-axis limits used for each individual plots
"""
self.axes.axis((-1.2, 1.2, -1.2, 1.2))
self.draw_head()
self.draw_inner_head()
self.draw_nose()
self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin,
vmax=vmax, axis=axis, yscale=yscale)
if title is not None:
self.axes.set_title(title)
self.yscale = yscale
def topoplot(data=None, axes=None, method='linear', number_of_contours=10,
title=None, xlim=None, ylim=None):
"""Plot topographic map of the scalp in 2-D circular view.
Draw the colored scalp map based on data in a Pandas Series where
the values are indexed according to electrode name.
Parameters
----------
data : pandas.Series or pandas.DataFrame, optional
Series with values and indexed by electrode names.
methods : str, optional
Interpolation method
number_of_contours : int
Number of contours in the colored plot.
xlim : 2-tuple of floats, optional
Limits of x-axis in multiplot
ylim : 2-tuple of floats, optional
Limits of y-axis in multiplot
References
----------
https://github.com/compmem/ptsa/blob/master/ptsa/plotting/topo.py
http://sccn.ucsd.edu/~jung/tutorial/topoplot.htm
Examples
--------
>>> import matplotlib.pyplot as plt
>>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}
>>> plt.ion()
>>> topo_plot = topoplot(data)
"""
if isinstance(data, pd.Series) or isinstance(data, dict) or data is None:
topo_plot = TopoPlot(data=data, axes=axes)
topo_plot.draw(title=title, method=method,
number_of_contours=number_of_contours)
return topo_plot
elif isinstance(data, pd.DataFrame):
multi_plot = MultiPlot(data=data, axes=axes)
multi_plot.draw(title=title, xlim=xlim, ylim=ylim)
return multi_plot
def show():
"""Show plot."""
plt.show()
def main(args):
"""Handle command-line interface to topographic plot."""
xlim = args['--xlim']
if args['--xlim'] is not None:
xlim = [float(lim) for lim in xlim.split(',')]
if args['<file>'] is None:
topoplot()
else:
filename = args['<file>']
if filename.lower().endswith('.csv'):
from .core import read_csv
df = read_csv(filename, index_col=0)
if args['--transpose']:
df = df.T
if args['--sample-index'] is None:
if args['--center'] is not None:
df = df.center()
topoplot(df, xlim=xlim)
else:
sample_index = int(args['--sample-index'])
series = df.iloc[sample_index - 1, :]
topoplot(series)
else:
exit('Only csv files handled')
plt.show()
if __name__ == '__main__':
from docopt import docopt
main(docopt(__doc__))
|
normal
|
{
"blob_id": "5bd7160b6b2e283e221aeb0a6913e6d13511c1db",
"index": 7073,
"step-1": "<mask token>\n\n\nclass TopoPlot(object):\n <mask token>\n\n def __init__(self, data=None, axes=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.Series or dict\n Pandas Series with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self.center = np.array((0, 0))\n if isinstance(data, dict):\n self.data = pd.Series(data)\n elif isinstance(data, pd.Series):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n <mask token>\n\n def draw_electrodes(self):\n \"\"\"Draw electrodes.\"\"\"\n for electrode, position in ELECTRODES.items():\n circle = plt.Circle(self.center + position, radius=0.04, fill=\n True, facecolor=(1, 1, 1))\n self.axes.add_patch(circle)\n position = self.center + position\n self.axes.text(position[0], position[1], electrode,\n verticalalignment='center', horizontalalignment='center',\n size=6)\n\n def draw_head(self):\n \"\"\"Draw outer head.\"\"\"\n circle = plt.Circle(self.center, radius=1, fill=False)\n self.axes.add_patch(circle)\n <mask token>\n <mask token>\n\n def draw_data(self, method='linear', number_of_contours=10):\n \"\"\"Draw countours from provided data.\"\"\"\n if self.data is not None:\n xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j]\n points = []\n for electrode in self.data.index:\n name = TopoPlot.normalize_electrode_name(electrode)\n points.append(ELECTRODES[name])\n zi = griddata(points, self.data.values, (xi, yi), method=method)\n if number_of_contours is None:\n number_of_contours = 10\n plt.contourf(xi, yi, zi, number_of_contours)\n\n def draw(self, title=None, method='linear', number_of_contours=None):\n \"\"\"Draw all components in topoplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = TopoPlot(data)\n >>> topo_plot.draw()\n\n \"\"\"\n self.draw_head()\n self.draw_inner_head()\n self.draw_electrodes()\n self.draw_nose()\n self.draw_data(method=method, number_of_contours=number_of_contours)\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.axes.axis('equal')\n if title is not None:\n self.axes.set_title(title)\n\n\nclass MultiPlot(TopoPlot):\n \"\"\"Multiple plots organized topographically.\n\n References\n ----------\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\n\n \"\"\"\n\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.DataFrame\n Pandas DataFrame with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self._subaxes = []\n self.xlim = xlim\n self.ylim = ylim\n self.center = np.array((0, 0))\n if isinstance(data, pd.DataFrame):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\n \"\"\"Add subaxes to currect specified axes.\n\n References\n ----------\n Pablo https://stackoverflow.com/users/2309442/pablo\n\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\n https://stackoverflow.com/questions/17458580/\n\n \"\"\"\n box = ax.get_position()\n width, height = box.width, box.height\n subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] +\n rect[3])]\n subaxes_display_coords = ax.transData.transform(subaxes_box)\n trans_figure = self.figure.transFigure.inverted()\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\n x, y = subaxes_figure_coords[0, :]\n width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[\n 0, :]\n subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor=\n axis_bgcolor)\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2] ** 0.5\n y_labelsize *= rect[3] ** 0.5\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\n return subaxes\n\n def draw_data(self, type='plot', width=None, height=None, xlim=None,\n ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw data.\n\n Parameters\n ----------\n type : 'plot', 'spectrogram', optional\n Type of plot\n xlim : 2-tuple of floats, optional\n X-axis limits\n ylim : 2-tuple of floats, optional\n Y-axis limits\n vmin : float, optional\n Minimum value for spectrogram colormap\n vmax : float, optional\n Maximum value for spectrogram colormap\n axis : bool, optional\n Determine whether the axis should be shown\n\n \"\"\"\n if self.data is not None:\n if ylim is None:\n if self.ylim is None and type != 'spectrogram':\n ylim = self.auto_ylim(xlim, yscale=yscale)\n else:\n ylim = self.ylim\n if xlim is None:\n xlim = self.xlim\n if vmin is None:\n vmin = 0\n number_of_electrodes = len([electrode for electrode in self.\n data.columns if electrode in ELECTRODES])\n if width is None:\n if number_of_electrodes > 32:\n width = 0.15\n else:\n width = 0.25\n if height is None:\n height = 0.25\n for electrode in self.data.columns:\n if electrode in ELECTRODES:\n x, y = ELECTRODES[electrode]\n subaxes = self.add_subplot_axes(self.axes, [x - width /\n 2, y - height / 2, width, height], axis_bgcolor='w')\n if type == 'plot':\n self.data.ix[:, electrode].plot(ax=subaxes, xlim=\n xlim, ylim=ylim)\n if not axis:\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transData)\n line = lines.Line2D((0, 1), (0, 0), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transAxes)\n line = lines.Line2D((0, 0), (0, 1), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n elif type == 'spectrogram':\n spectrum, frequencies, midpoints, axes = plt.specgram(\n self.data.ix[:, electrode], Fs=self.data.\n sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes)\n if xlim is None:\n xlim = midpoints[0], midpoints[-1]\n subaxes.set_xlim(xlim)\n if ylim is None:\n ylim = frequencies[0], frequencies[-1]\n subaxes.set_ylim(ylim)\n else:\n raise ValueError(\"Wrong value for 'type' argument\")\n if not axis:\n subaxes.set_axis_off()\n subaxes.text(0.5, 0.95, electrode, transform=subaxes.\n transAxes, fontweight='bold', va='top', ha='center')\n subaxes.set_yticklabels([])\n subaxes.set_xticklabels([])\n self._subaxes.append(subaxes)\n\n @property\n def xlim(self):\n \"\"\"Return xlim for subplots.\"\"\"\n lim = [ax.get_xlim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @xlim.setter\n def xlim(self, left=None, right=None):\n \"\"\"Set x-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_xlim(left, right)\n self.figure.canvas.draw()\n\n @property\n def ylim(self):\n \"\"\"Return ylim for subplots.\"\"\"\n lim = [ax.get_ylim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @ylim.setter\n def ylim(self, bottom=None, top=None):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_ylim(bottom, top)\n self.figure.canvas.draw()\n\n @property\n def yscale(self):\n \"\"\"Return yscale for subplots.\"\"\"\n yscales = [ax.get_yscale() for ax in self._subaxes]\n return yscales\n\n @yscale.setter\n def yscale(self, value='linear'):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_yscale(value)\n self.figure.canvas.draw()\n\n def auto_ylim(self, xlim=None, yscale='linear'):\n \"\"\"Return an estimate for a good ylim.\n\n Parameters\n ----------\n xlim : 2-tuple, optional\n Limits in (the index of) the data from where the scaling should be\n computed.\n yscale : linear or log, optional\n Scaling of y-axis.\n\n \"\"\"\n electrodes = [col for col in self.data.columns if col in ELECTRODES]\n if xlim is None:\n data = self.data.ix[:, electrodes]\n else:\n indices = (self.data.index >= xlim[0]) & (self.data.index <=\n xlim[1])\n data = self.data.ix[indices, electrodes]\n min_data = data.min().min()\n max_data = data.max().max()\n abs_max = max(abs(min_data), max_data)\n if yscale == 'linear' or yscale == 'symlog':\n if min_data >= 0:\n ylim = 0, max_data\n else:\n ylim = -abs_max, abs_max\n elif yscale == 'log':\n if min_data > 0:\n ylim = min_data, max_data\n else:\n pseudo_zero = abs_max * 10 ** -5\n ylim = pseudo_zero, abs_max\n else:\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\n return ylim\n\n def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None,\n vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw all components in multiplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n xlim : tuple of floats, optional\n X-axis limits used for each individual plots\n ylim : tuple of floats, optional\n Y-axis limits used for each individual plots\n\n \"\"\"\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.draw_head()\n self.draw_inner_head()\n self.draw_nose()\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=\n vmax, axis=axis, yscale=yscale)\n if title is not None:\n self.axes.set_title(title)\n self.yscale = yscale\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TopoPlot(object):\n <mask token>\n\n def __init__(self, data=None, axes=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.Series or dict\n Pandas Series with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self.center = np.array((0, 0))\n if isinstance(data, dict):\n self.data = pd.Series(data)\n elif isinstance(data, pd.Series):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n @staticmethod\n def normalize_electrode_name(name):\n \"\"\"Normalize electrode name.\n\n Parameters\n ----------\n name : str\n Name of electrode to be normalized\n\n Examples\n --------\n >>> TopoPlot.normalize_electrode_name('fpz')\n 'Fpz'\n\n >>> TopoPlot.normalize_electrode_name('AFZ')\n 'AFz'\n\n \"\"\"\n return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')\n\n def draw_electrodes(self):\n \"\"\"Draw electrodes.\"\"\"\n for electrode, position in ELECTRODES.items():\n circle = plt.Circle(self.center + position, radius=0.04, fill=\n True, facecolor=(1, 1, 1))\n self.axes.add_patch(circle)\n position = self.center + position\n self.axes.text(position[0], position[1], electrode,\n verticalalignment='center', horizontalalignment='center',\n size=6)\n\n def draw_head(self):\n \"\"\"Draw outer head.\"\"\"\n circle = plt.Circle(self.center, radius=1, fill=False)\n self.axes.add_patch(circle)\n\n def draw_inner_head(self):\n \"\"\"Draw inner head.\"\"\"\n circle = plt.Circle(self.center, radius=0.8, fill=False)\n self.axes.add_patch(circle)\n\n def draw_nose(self):\n \"\"\"Draw nose.\"\"\"\n nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos(\n 0.1)], color=(0, 0, 0))\n self.axes.add_line(nose)\n\n def draw_data(self, method='linear', number_of_contours=10):\n \"\"\"Draw countours from provided data.\"\"\"\n if self.data is not None:\n xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j]\n points = []\n for electrode in self.data.index:\n name = TopoPlot.normalize_electrode_name(electrode)\n points.append(ELECTRODES[name])\n zi = griddata(points, self.data.values, (xi, yi), method=method)\n if number_of_contours is None:\n number_of_contours = 10\n plt.contourf(xi, yi, zi, number_of_contours)\n\n def draw(self, title=None, method='linear', number_of_contours=None):\n \"\"\"Draw all components in topoplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = TopoPlot(data)\n >>> topo_plot.draw()\n\n \"\"\"\n self.draw_head()\n self.draw_inner_head()\n self.draw_electrodes()\n self.draw_nose()\n self.draw_data(method=method, number_of_contours=number_of_contours)\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.axes.axis('equal')\n if title is not None:\n self.axes.set_title(title)\n\n\nclass MultiPlot(TopoPlot):\n \"\"\"Multiple plots organized topographically.\n\n References\n ----------\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\n\n \"\"\"\n\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.DataFrame\n Pandas DataFrame with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self._subaxes = []\n self.xlim = xlim\n self.ylim = ylim\n self.center = np.array((0, 0))\n if isinstance(data, pd.DataFrame):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\n \"\"\"Add subaxes to currect specified axes.\n\n References\n ----------\n Pablo https://stackoverflow.com/users/2309442/pablo\n\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\n https://stackoverflow.com/questions/17458580/\n\n \"\"\"\n box = ax.get_position()\n width, height = box.width, box.height\n subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] +\n rect[3])]\n subaxes_display_coords = ax.transData.transform(subaxes_box)\n trans_figure = self.figure.transFigure.inverted()\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\n x, y = subaxes_figure_coords[0, :]\n width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[\n 0, :]\n subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor=\n axis_bgcolor)\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2] ** 0.5\n y_labelsize *= rect[3] ** 0.5\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\n return subaxes\n\n def draw_data(self, type='plot', width=None, height=None, xlim=None,\n ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw data.\n\n Parameters\n ----------\n type : 'plot', 'spectrogram', optional\n Type of plot\n xlim : 2-tuple of floats, optional\n X-axis limits\n ylim : 2-tuple of floats, optional\n Y-axis limits\n vmin : float, optional\n Minimum value for spectrogram colormap\n vmax : float, optional\n Maximum value for spectrogram colormap\n axis : bool, optional\n Determine whether the axis should be shown\n\n \"\"\"\n if self.data is not None:\n if ylim is None:\n if self.ylim is None and type != 'spectrogram':\n ylim = self.auto_ylim(xlim, yscale=yscale)\n else:\n ylim = self.ylim\n if xlim is None:\n xlim = self.xlim\n if vmin is None:\n vmin = 0\n number_of_electrodes = len([electrode for electrode in self.\n data.columns if electrode in ELECTRODES])\n if width is None:\n if number_of_electrodes > 32:\n width = 0.15\n else:\n width = 0.25\n if height is None:\n height = 0.25\n for electrode in self.data.columns:\n if electrode in ELECTRODES:\n x, y = ELECTRODES[electrode]\n subaxes = self.add_subplot_axes(self.axes, [x - width /\n 2, y - height / 2, width, height], axis_bgcolor='w')\n if type == 'plot':\n self.data.ix[:, electrode].plot(ax=subaxes, xlim=\n xlim, ylim=ylim)\n if not axis:\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transData)\n line = lines.Line2D((0, 1), (0, 0), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transAxes)\n line = lines.Line2D((0, 0), (0, 1), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n elif type == 'spectrogram':\n spectrum, frequencies, midpoints, axes = plt.specgram(\n self.data.ix[:, electrode], Fs=self.data.\n sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes)\n if xlim is None:\n xlim = midpoints[0], midpoints[-1]\n subaxes.set_xlim(xlim)\n if ylim is None:\n ylim = frequencies[0], frequencies[-1]\n subaxes.set_ylim(ylim)\n else:\n raise ValueError(\"Wrong value for 'type' argument\")\n if not axis:\n subaxes.set_axis_off()\n subaxes.text(0.5, 0.95, electrode, transform=subaxes.\n transAxes, fontweight='bold', va='top', ha='center')\n subaxes.set_yticklabels([])\n subaxes.set_xticklabels([])\n self._subaxes.append(subaxes)\n\n @property\n def xlim(self):\n \"\"\"Return xlim for subplots.\"\"\"\n lim = [ax.get_xlim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @xlim.setter\n def xlim(self, left=None, right=None):\n \"\"\"Set x-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_xlim(left, right)\n self.figure.canvas.draw()\n\n @property\n def ylim(self):\n \"\"\"Return ylim for subplots.\"\"\"\n lim = [ax.get_ylim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @ylim.setter\n def ylim(self, bottom=None, top=None):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_ylim(bottom, top)\n self.figure.canvas.draw()\n\n @property\n def yscale(self):\n \"\"\"Return yscale for subplots.\"\"\"\n yscales = [ax.get_yscale() for ax in self._subaxes]\n return yscales\n\n @yscale.setter\n def yscale(self, value='linear'):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_yscale(value)\n self.figure.canvas.draw()\n\n def auto_ylim(self, xlim=None, yscale='linear'):\n \"\"\"Return an estimate for a good ylim.\n\n Parameters\n ----------\n xlim : 2-tuple, optional\n Limits in (the index of) the data from where the scaling should be\n computed.\n yscale : linear or log, optional\n Scaling of y-axis.\n\n \"\"\"\n electrodes = [col for col in self.data.columns if col in ELECTRODES]\n if xlim is None:\n data = self.data.ix[:, electrodes]\n else:\n indices = (self.data.index >= xlim[0]) & (self.data.index <=\n xlim[1])\n data = self.data.ix[indices, electrodes]\n min_data = data.min().min()\n max_data = data.max().max()\n abs_max = max(abs(min_data), max_data)\n if yscale == 'linear' or yscale == 'symlog':\n if min_data >= 0:\n ylim = 0, max_data\n else:\n ylim = -abs_max, abs_max\n elif yscale == 'log':\n if min_data > 0:\n ylim = min_data, max_data\n else:\n pseudo_zero = abs_max * 10 ** -5\n ylim = pseudo_zero, abs_max\n else:\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\n return ylim\n\n def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None,\n vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw all components in multiplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n xlim : tuple of floats, optional\n X-axis limits used for each individual plots\n ylim : tuple of floats, optional\n Y-axis limits used for each individual plots\n\n \"\"\"\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.draw_head()\n self.draw_inner_head()\n self.draw_nose()\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=\n vmax, axis=axis, yscale=yscale)\n if title is not None:\n self.axes.set_title(title)\n self.yscale = yscale\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TopoPlot(object):\n \"\"\"Topographic plot.\"\"\"\n\n def __init__(self, data=None, axes=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.Series or dict\n Pandas Series with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self.center = np.array((0, 0))\n if isinstance(data, dict):\n self.data = pd.Series(data)\n elif isinstance(data, pd.Series):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n @staticmethod\n def normalize_electrode_name(name):\n \"\"\"Normalize electrode name.\n\n Parameters\n ----------\n name : str\n Name of electrode to be normalized\n\n Examples\n --------\n >>> TopoPlot.normalize_electrode_name('fpz')\n 'Fpz'\n\n >>> TopoPlot.normalize_electrode_name('AFZ')\n 'AFz'\n\n \"\"\"\n return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')\n\n def draw_electrodes(self):\n \"\"\"Draw electrodes.\"\"\"\n for electrode, position in ELECTRODES.items():\n circle = plt.Circle(self.center + position, radius=0.04, fill=\n True, facecolor=(1, 1, 1))\n self.axes.add_patch(circle)\n position = self.center + position\n self.axes.text(position[0], position[1], electrode,\n verticalalignment='center', horizontalalignment='center',\n size=6)\n\n def draw_head(self):\n \"\"\"Draw outer head.\"\"\"\n circle = plt.Circle(self.center, radius=1, fill=False)\n self.axes.add_patch(circle)\n\n def draw_inner_head(self):\n \"\"\"Draw inner head.\"\"\"\n circle = plt.Circle(self.center, radius=0.8, fill=False)\n self.axes.add_patch(circle)\n\n def draw_nose(self):\n \"\"\"Draw nose.\"\"\"\n nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos(\n 0.1)], color=(0, 0, 0))\n self.axes.add_line(nose)\n\n def draw_data(self, method='linear', number_of_contours=10):\n \"\"\"Draw countours from provided data.\"\"\"\n if self.data is not None:\n xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j]\n points = []\n for electrode in self.data.index:\n name = TopoPlot.normalize_electrode_name(electrode)\n points.append(ELECTRODES[name])\n zi = griddata(points, self.data.values, (xi, yi), method=method)\n if number_of_contours is None:\n number_of_contours = 10\n plt.contourf(xi, yi, zi, number_of_contours)\n\n def draw(self, title=None, method='linear', number_of_contours=None):\n \"\"\"Draw all components in topoplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = TopoPlot(data)\n >>> topo_plot.draw()\n\n \"\"\"\n self.draw_head()\n self.draw_inner_head()\n self.draw_electrodes()\n self.draw_nose()\n self.draw_data(method=method, number_of_contours=number_of_contours)\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.axes.axis('equal')\n if title is not None:\n self.axes.set_title(title)\n\n\nclass MultiPlot(TopoPlot):\n \"\"\"Multiple plots organized topographically.\n\n References\n ----------\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\n\n \"\"\"\n\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.DataFrame\n Pandas DataFrame with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self._subaxes = []\n self.xlim = xlim\n self.ylim = ylim\n self.center = np.array((0, 0))\n if isinstance(data, pd.DataFrame):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\n \"\"\"Add subaxes to currect specified axes.\n\n References\n ----------\n Pablo https://stackoverflow.com/users/2309442/pablo\n\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\n https://stackoverflow.com/questions/17458580/\n\n \"\"\"\n box = ax.get_position()\n width, height = box.width, box.height\n subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] +\n rect[3])]\n subaxes_display_coords = ax.transData.transform(subaxes_box)\n trans_figure = self.figure.transFigure.inverted()\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\n x, y = subaxes_figure_coords[0, :]\n width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[\n 0, :]\n subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor=\n axis_bgcolor)\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2] ** 0.5\n y_labelsize *= rect[3] ** 0.5\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\n return subaxes\n\n def draw_data(self, type='plot', width=None, height=None, xlim=None,\n ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw data.\n\n Parameters\n ----------\n type : 'plot', 'spectrogram', optional\n Type of plot\n xlim : 2-tuple of floats, optional\n X-axis limits\n ylim : 2-tuple of floats, optional\n Y-axis limits\n vmin : float, optional\n Minimum value for spectrogram colormap\n vmax : float, optional\n Maximum value for spectrogram colormap\n axis : bool, optional\n Determine whether the axis should be shown\n\n \"\"\"\n if self.data is not None:\n if ylim is None:\n if self.ylim is None and type != 'spectrogram':\n ylim = self.auto_ylim(xlim, yscale=yscale)\n else:\n ylim = self.ylim\n if xlim is None:\n xlim = self.xlim\n if vmin is None:\n vmin = 0\n number_of_electrodes = len([electrode for electrode in self.\n data.columns if electrode in ELECTRODES])\n if width is None:\n if number_of_electrodes > 32:\n width = 0.15\n else:\n width = 0.25\n if height is None:\n height = 0.25\n for electrode in self.data.columns:\n if electrode in ELECTRODES:\n x, y = ELECTRODES[electrode]\n subaxes = self.add_subplot_axes(self.axes, [x - width /\n 2, y - height / 2, width, height], axis_bgcolor='w')\n if type == 'plot':\n self.data.ix[:, electrode].plot(ax=subaxes, xlim=\n xlim, ylim=ylim)\n if not axis:\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transData)\n line = lines.Line2D((0, 1), (0, 0), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transAxes)\n line = lines.Line2D((0, 0), (0, 1), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n elif type == 'spectrogram':\n spectrum, frequencies, midpoints, axes = plt.specgram(\n self.data.ix[:, electrode], Fs=self.data.\n sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes)\n if xlim is None:\n xlim = midpoints[0], midpoints[-1]\n subaxes.set_xlim(xlim)\n if ylim is None:\n ylim = frequencies[0], frequencies[-1]\n subaxes.set_ylim(ylim)\n else:\n raise ValueError(\"Wrong value for 'type' argument\")\n if not axis:\n subaxes.set_axis_off()\n subaxes.text(0.5, 0.95, electrode, transform=subaxes.\n transAxes, fontweight='bold', va='top', ha='center')\n subaxes.set_yticklabels([])\n subaxes.set_xticklabels([])\n self._subaxes.append(subaxes)\n\n @property\n def xlim(self):\n \"\"\"Return xlim for subplots.\"\"\"\n lim = [ax.get_xlim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @xlim.setter\n def xlim(self, left=None, right=None):\n \"\"\"Set x-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_xlim(left, right)\n self.figure.canvas.draw()\n\n @property\n def ylim(self):\n \"\"\"Return ylim for subplots.\"\"\"\n lim = [ax.get_ylim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @ylim.setter\n def ylim(self, bottom=None, top=None):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_ylim(bottom, top)\n self.figure.canvas.draw()\n\n @property\n def yscale(self):\n \"\"\"Return yscale for subplots.\"\"\"\n yscales = [ax.get_yscale() for ax in self._subaxes]\n return yscales\n\n @yscale.setter\n def yscale(self, value='linear'):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_yscale(value)\n self.figure.canvas.draw()\n\n def auto_ylim(self, xlim=None, yscale='linear'):\n \"\"\"Return an estimate for a good ylim.\n\n Parameters\n ----------\n xlim : 2-tuple, optional\n Limits in (the index of) the data from where the scaling should be\n computed.\n yscale : linear or log, optional\n Scaling of y-axis.\n\n \"\"\"\n electrodes = [col for col in self.data.columns if col in ELECTRODES]\n if xlim is None:\n data = self.data.ix[:, electrodes]\n else:\n indices = (self.data.index >= xlim[0]) & (self.data.index <=\n xlim[1])\n data = self.data.ix[indices, electrodes]\n min_data = data.min().min()\n max_data = data.max().max()\n abs_max = max(abs(min_data), max_data)\n if yscale == 'linear' or yscale == 'symlog':\n if min_data >= 0:\n ylim = 0, max_data\n else:\n ylim = -abs_max, abs_max\n elif yscale == 'log':\n if min_data > 0:\n ylim = min_data, max_data\n else:\n pseudo_zero = abs_max * 10 ** -5\n ylim = pseudo_zero, abs_max\n else:\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\n return ylim\n\n def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None,\n vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw all components in multiplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n xlim : tuple of floats, optional\n X-axis limits used for each individual plots\n ylim : tuple of floats, optional\n Y-axis limits used for each individual plots\n\n \"\"\"\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.draw_head()\n self.draw_inner_head()\n self.draw_nose()\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=\n vmax, axis=axis, yscale=yscale)\n if title is not None:\n self.axes.set_title(title)\n self.yscale = yscale\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TopoPlot(object):\n \"\"\"Topographic plot.\"\"\"\n\n def __init__(self, data=None, axes=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.Series or dict\n Pandas Series with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self.center = np.array((0, 0))\n if isinstance(data, dict):\n self.data = pd.Series(data)\n elif isinstance(data, pd.Series):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n @staticmethod\n def normalize_electrode_name(name):\n \"\"\"Normalize electrode name.\n\n Parameters\n ----------\n name : str\n Name of electrode to be normalized\n\n Examples\n --------\n >>> TopoPlot.normalize_electrode_name('fpz')\n 'Fpz'\n\n >>> TopoPlot.normalize_electrode_name('AFZ')\n 'AFz'\n\n \"\"\"\n return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')\n\n def draw_electrodes(self):\n \"\"\"Draw electrodes.\"\"\"\n for electrode, position in ELECTRODES.items():\n circle = plt.Circle(self.center + position, radius=0.04, fill=\n True, facecolor=(1, 1, 1))\n self.axes.add_patch(circle)\n position = self.center + position\n self.axes.text(position[0], position[1], electrode,\n verticalalignment='center', horizontalalignment='center',\n size=6)\n\n def draw_head(self):\n \"\"\"Draw outer head.\"\"\"\n circle = plt.Circle(self.center, radius=1, fill=False)\n self.axes.add_patch(circle)\n\n def draw_inner_head(self):\n \"\"\"Draw inner head.\"\"\"\n circle = plt.Circle(self.center, radius=0.8, fill=False)\n self.axes.add_patch(circle)\n\n def draw_nose(self):\n \"\"\"Draw nose.\"\"\"\n nose = plt.Line2D([sin(-0.1), 0, sin(0.1)], [cos(-0.1), 1.1, cos(\n 0.1)], color=(0, 0, 0))\n self.axes.add_line(nose)\n\n def draw_data(self, method='linear', number_of_contours=10):\n \"\"\"Draw countours from provided data.\"\"\"\n if self.data is not None:\n xi, yi = np.mgrid[-1:1:100.0j, -1:1:100.0j]\n points = []\n for electrode in self.data.index:\n name = TopoPlot.normalize_electrode_name(electrode)\n points.append(ELECTRODES[name])\n zi = griddata(points, self.data.values, (xi, yi), method=method)\n if number_of_contours is None:\n number_of_contours = 10\n plt.contourf(xi, yi, zi, number_of_contours)\n\n def draw(self, title=None, method='linear', number_of_contours=None):\n \"\"\"Draw all components in topoplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = TopoPlot(data)\n >>> topo_plot.draw()\n\n \"\"\"\n self.draw_head()\n self.draw_inner_head()\n self.draw_electrodes()\n self.draw_nose()\n self.draw_data(method=method, number_of_contours=number_of_contours)\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.axes.axis('equal')\n if title is not None:\n self.axes.set_title(title)\n\n\nclass MultiPlot(TopoPlot):\n \"\"\"Multiple plots organized topographically.\n\n References\n ----------\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\n\n \"\"\"\n\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\n \"\"\"Setup defaults.\n\n Parameters\n ----------\n data : Pandas.DataFrame\n Pandas DataFrame with values indexed by electrodes.\n axes : matplotlib.axes.AxesSubplot object\n Axis object to render on.\n\n \"\"\"\n if axes is None:\n self.figure = plt.figure()\n axes = self.figure.gca()\n else:\n self.figure = axes.get_figure()\n self.axes = axes\n self._subaxes = []\n self.xlim = xlim\n self.ylim = ylim\n self.center = np.array((0, 0))\n if isinstance(data, pd.DataFrame):\n self.data = data\n elif data is None:\n self.data = None\n else:\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\n type(data)))\n\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\n \"\"\"Add subaxes to currect specified axes.\n\n References\n ----------\n Pablo https://stackoverflow.com/users/2309442/pablo\n\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\n https://stackoverflow.com/questions/17458580/\n\n \"\"\"\n box = ax.get_position()\n width, height = box.width, box.height\n subaxes_box = [(rect[0], rect[1]), (rect[0] + rect[2], rect[1] +\n rect[3])]\n subaxes_display_coords = ax.transData.transform(subaxes_box)\n trans_figure = self.figure.transFigure.inverted()\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\n x, y = subaxes_figure_coords[0, :]\n width, height = subaxes_figure_coords[1, :] - subaxes_figure_coords[\n 0, :]\n subaxes = self.figure.add_axes([x, y, width, height], axis_bgcolor=\n axis_bgcolor)\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2] ** 0.5\n y_labelsize *= rect[3] ** 0.5\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\n return subaxes\n\n def draw_data(self, type='plot', width=None, height=None, xlim=None,\n ylim=None, vmin=None, vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw data.\n\n Parameters\n ----------\n type : 'plot', 'spectrogram', optional\n Type of plot\n xlim : 2-tuple of floats, optional\n X-axis limits\n ylim : 2-tuple of floats, optional\n Y-axis limits\n vmin : float, optional\n Minimum value for spectrogram colormap\n vmax : float, optional\n Maximum value for spectrogram colormap\n axis : bool, optional\n Determine whether the axis should be shown\n\n \"\"\"\n if self.data is not None:\n if ylim is None:\n if self.ylim is None and type != 'spectrogram':\n ylim = self.auto_ylim(xlim, yscale=yscale)\n else:\n ylim = self.ylim\n if xlim is None:\n xlim = self.xlim\n if vmin is None:\n vmin = 0\n number_of_electrodes = len([electrode for electrode in self.\n data.columns if electrode in ELECTRODES])\n if width is None:\n if number_of_electrodes > 32:\n width = 0.15\n else:\n width = 0.25\n if height is None:\n height = 0.25\n for electrode in self.data.columns:\n if electrode in ELECTRODES:\n x, y = ELECTRODES[electrode]\n subaxes = self.add_subplot_axes(self.axes, [x - width /\n 2, y - height / 2, width, height], axis_bgcolor='w')\n if type == 'plot':\n self.data.ix[:, electrode].plot(ax=subaxes, xlim=\n xlim, ylim=ylim)\n if not axis:\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transData)\n line = lines.Line2D((0, 1), (0, 0), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n trans = transforms.blended_transform_factory(\n subaxes.transAxes, subaxes.transAxes)\n line = lines.Line2D((0, 0), (0, 1), transform=\n trans, color=(0, 0, 0))\n subaxes.add_line(line)\n elif type == 'spectrogram':\n spectrum, frequencies, midpoints, axes = plt.specgram(\n self.data.ix[:, electrode], Fs=self.data.\n sampling_rate, vmin=vmin, vmax=vmax, axes=subaxes)\n if xlim is None:\n xlim = midpoints[0], midpoints[-1]\n subaxes.set_xlim(xlim)\n if ylim is None:\n ylim = frequencies[0], frequencies[-1]\n subaxes.set_ylim(ylim)\n else:\n raise ValueError(\"Wrong value for 'type' argument\")\n if not axis:\n subaxes.set_axis_off()\n subaxes.text(0.5, 0.95, electrode, transform=subaxes.\n transAxes, fontweight='bold', va='top', ha='center')\n subaxes.set_yticklabels([])\n subaxes.set_xticklabels([])\n self._subaxes.append(subaxes)\n\n @property\n def xlim(self):\n \"\"\"Return xlim for subplots.\"\"\"\n lim = [ax.get_xlim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @xlim.setter\n def xlim(self, left=None, right=None):\n \"\"\"Set x-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_xlim(left, right)\n self.figure.canvas.draw()\n\n @property\n def ylim(self):\n \"\"\"Return ylim for subplots.\"\"\"\n lim = [ax.get_ylim() for ax in self._subaxes]\n if lim == []:\n lim = None\n return lim\n\n @ylim.setter\n def ylim(self, bottom=None, top=None):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_ylim(bottom, top)\n self.figure.canvas.draw()\n\n @property\n def yscale(self):\n \"\"\"Return yscale for subplots.\"\"\"\n yscales = [ax.get_yscale() for ax in self._subaxes]\n return yscales\n\n @yscale.setter\n def yscale(self, value='linear'):\n \"\"\"Set y-axis limits on all subplots.\"\"\"\n for ax in self._subaxes:\n ax.set_yscale(value)\n self.figure.canvas.draw()\n\n def auto_ylim(self, xlim=None, yscale='linear'):\n \"\"\"Return an estimate for a good ylim.\n\n Parameters\n ----------\n xlim : 2-tuple, optional\n Limits in (the index of) the data from where the scaling should be\n computed.\n yscale : linear or log, optional\n Scaling of y-axis.\n\n \"\"\"\n electrodes = [col for col in self.data.columns if col in ELECTRODES]\n if xlim is None:\n data = self.data.ix[:, electrodes]\n else:\n indices = (self.data.index >= xlim[0]) & (self.data.index <=\n xlim[1])\n data = self.data.ix[indices, electrodes]\n min_data = data.min().min()\n max_data = data.max().max()\n abs_max = max(abs(min_data), max_data)\n if yscale == 'linear' or yscale == 'symlog':\n if min_data >= 0:\n ylim = 0, max_data\n else:\n ylim = -abs_max, abs_max\n elif yscale == 'log':\n if min_data > 0:\n ylim = min_data, max_data\n else:\n pseudo_zero = abs_max * 10 ** -5\n ylim = pseudo_zero, abs_max\n else:\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\n return ylim\n\n def draw(self, type='plot', title=None, xlim=None, ylim=None, vmin=None,\n vmax=None, axis=False, yscale='linear'):\n \"\"\"Draw all components in multiplot including the data.\n\n Parameters\n ----------\n title : str, optional\n Title to put on the plot\n xlim : tuple of floats, optional\n X-axis limits used for each individual plots\n ylim : tuple of floats, optional\n Y-axis limits used for each individual plots\n\n \"\"\"\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\n self.draw_head()\n self.draw_inner_head()\n self.draw_nose()\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin, vmax=\n vmax, axis=axis, yscale=yscale)\n if title is not None:\n self.axes.set_title(title)\n self.yscale = yscale\n\n\ndef topoplot(data=None, axes=None, method='linear', number_of_contours=10,\n title=None, xlim=None, ylim=None):\n \"\"\"Plot topographic map of the scalp in 2-D circular view.\n\n Draw the colored scalp map based on data in a Pandas Series where\n the values are indexed according to electrode name.\n\n Parameters\n ----------\n data : pandas.Series or pandas.DataFrame, optional\n Series with values and indexed by electrode names.\n methods : str, optional\n Interpolation method\n number_of_contours : int\n Number of contours in the colored plot.\n xlim : 2-tuple of floats, optional\n Limits of x-axis in multiplot\n ylim : 2-tuple of floats, optional\n Limits of y-axis in multiplot\n\n References\n ----------\n https://github.com/compmem/ptsa/blob/master/ptsa/plotting/topo.py\n\n http://sccn.ucsd.edu/~jung/tutorial/topoplot.htm\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\n >>> plt.ion()\n >>> topo_plot = topoplot(data)\n\n \"\"\"\n if isinstance(data, pd.Series) or isinstance(data, dict) or data is None:\n topo_plot = TopoPlot(data=data, axes=axes)\n topo_plot.draw(title=title, method=method, number_of_contours=\n number_of_contours)\n return topo_plot\n elif isinstance(data, pd.DataFrame):\n multi_plot = MultiPlot(data=data, axes=axes)\n multi_plot.draw(title=title, xlim=xlim, ylim=ylim)\n return multi_plot\n\n\ndef show():\n \"\"\"Show plot.\"\"\"\n plt.show()\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\r\n\"\"\"\r\nPlot EEG data.\r\n\r\nUsage:\r\n plotting.py [options] [<file>]\r\n\r\nOptions:\r\n -h --help Show this screen.\r\n --version Show version.\r\n --center Center the data before plotting\r\n --sample-index=N Row index (indexed from one).\r\n --transpose Transpose data.\r\n --xlim=lim X-axis limits.\r\n\r\nData\r\n----\r\nELECTRODES : dict\r\n Dictionary indexed by electrode name with 2D positions as values\r\n\r\nReferences\r\n----------\r\nThe five percent electrode system for high-resolution EEG and ERP\r\nmeasurement, Robert Oostenveld, Peter Praamstra.\r\n\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import, division, print_function\r\n\r\nfrom math import cos, pi, sin\r\n\r\nimport matplotlib.lines as lines\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.transforms as transforms\r\n\r\nimport numpy as np\r\n\r\nimport pandas as pd\r\n\r\nfrom scipy.interpolate import griddata\r\n\r\n\r\n__all__ = ('ELECTRODES', 'MultiPlot', 'TopoPlot', 'topoplot')\r\n\r\n\r\nELECTRODES = {\r\n 'AF3': (-0.25, 0.62),\r\n 'AF4': (0.25, 0.62),\r\n 'AF7': (0.8 * cos(0.7 * pi), 0.8 * sin(0.7 * pi)),\r\n 'AF8': (0.8 * cos(0.3 * pi), 0.8 * sin(0.3 * pi)),\r\n 'AFz': (0, 0.6),\r\n 'C1': (-0.2, 0),\r\n 'C2': (0.2, 0),\r\n 'C3': (-0.4, 0),\r\n 'C4': (0.4, 0),\r\n 'C5': (-0.6, 0),\r\n 'C6': (0.6, 0),\r\n 'CP1': (-0.18, -0.2),\r\n 'CP2': (0.18, -0.2),\r\n 'CP3': (-0.36, 0.4 * sin(1.17 * pi)),\r\n 'CP4': (0.36, 0.4 * sin(1.83 * pi)),\r\n 'CP5': (0.6 * cos(1.12 * pi), 0.6 * sin(1.12 * pi)),\r\n 'CP6': (0.6 * cos(1.88 * pi), 0.6 * sin(1.88 * pi)),\r\n 'CPz': (0, -0.2),\r\n 'Cz': (0, 0),\r\n 'F1': (-0.18, 0.4),\r\n 'F2': (0.18, 0.4),\r\n 'F3': (-0.35, 0.41),\r\n 'F4': (0.35, 0.41),\r\n 'F5': (-0.5, 0.43),\r\n 'F6': (0.5, 0.43),\r\n 'F7': (0.8 * cos(0.8 * pi), 0.8 * sin(0.8 * pi)),\r\n 'F8': (0.8 * cos(0.2 * pi), 0.8 * sin(0.2 * pi)),\r\n 'FC1': (-0.2, 0.21),\r\n 'FC2': (0.2, 0.21),\r\n 'FC3': (-0.39, 0.22),\r\n 'FC4': (0.39, 0.22),\r\n 'FC5': (-0.57, 0.23),\r\n 'FC6': (0.57, 0.23),\r\n 'FCz': (0, 0.2),\r\n 'FP1': (0.8 * cos(0.6 * pi), 0.8 * sin(0.6 * pi)),\r\n 'FP2': (0.8 * cos(0.4 * pi), 0.8 * sin(0.4 * pi)),\r\n 'Fpz': (0, 0.8),\r\n 'FT7': (0.8 * cos(0.9 * pi), 0.8 * sin(0.9 * pi)),\r\n 'FT8': (0.8 * cos(0.1 * pi), 0.8 * sin(0.1 * pi)),\r\n 'Fz': (0, 0.4),\r\n 'Iz': (0, -1),\r\n 'Nz': (0, 1),\r\n 'P1': (-0.18, -0.41),\r\n 'P2': (0.18, -0.41),\r\n 'P3': (-0.35, -0.42),\r\n 'P4': (0.35, -0.42),\r\n 'P5': (-0.5, -0.44),\r\n 'P6': (0.5, -0.44),\r\n 'P7': (0.8 * cos(1.2 * pi), 0.8 * sin(1.2 * pi)),\r\n 'P8': (0.8 * cos(1.8 * pi), 0.8 * sin(1.8 * pi)),\r\n 'PO3': (-0.24, -0.62),\r\n 'PO4': (0.24, -0.62),\r\n 'PO7': (0.8 * cos(1.3 * pi), 0.8 * sin(1.3 * pi)),\r\n 'PO8': (0.8 * cos(1.7 * pi), 0.8 * sin(1.7 * pi)),\r\n 'POz': (0, -0.6),\r\n 'Pz': (0, -0.4),\r\n 'O1': (0.8 * cos(1.4 * pi), 0.8 * sin(1.4 * pi)),\r\n 'O2': (0.8 * cos(1.6 * pi), 0.8 * sin(1.6 * pi)),\r\n 'Oz': (0, -0.8),\r\n 'T7': (-0.8, 0),\r\n 'T8': (0.8, 0),\r\n 'T9': (-1, 0),\r\n 'T10': (1, 0),\r\n 'TP7': (0.8 * cos(1.1 * pi), 0.8 * sin(1.1 * pi)),\r\n 'TP8': (0.8 * cos(1.9 * pi), 0.8 * sin(1.9 * pi)),\r\n 'TP9': (cos(1.1 * pi), sin(1.1 * pi)),\r\n 'TP10': (cos(1.9 * pi), sin(1.9 * pi)),\r\n}\r\n\r\n\r\nclass TopoPlot(object):\r\n \"\"\"Topographic plot.\"\"\"\r\n\r\n def __init__(self, data=None, axes=None):\r\n \"\"\"Setup defaults.\r\n\r\n Parameters\r\n ----------\r\n data : Pandas.Series or dict\r\n Pandas Series with values indexed by electrodes.\r\n axes : matplotlib.axes.AxesSubplot object\r\n Axis object to render on.\r\n\r\n \"\"\"\r\n if axes is None:\r\n self.figure = plt.figure()\r\n axes = self.figure.gca()\r\n else:\r\n self.figure = axes.get_figure()\r\n self.axes = axes\r\n self.center = np.array((0, 0))\r\n if isinstance(data, dict):\r\n self.data = pd.Series(data)\r\n elif isinstance(data, pd.Series):\r\n self.data = data\r\n elif data is None:\r\n self.data = None\r\n else:\r\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\r\n type(data)))\r\n\r\n @staticmethod\r\n def normalize_electrode_name(name):\r\n \"\"\"Normalize electrode name.\r\n\r\n Parameters\r\n ----------\r\n name : str\r\n Name of electrode to be normalized\r\n\r\n Examples\r\n --------\r\n >>> TopoPlot.normalize_electrode_name('fpz')\r\n 'Fpz'\r\n\r\n >>> TopoPlot.normalize_electrode_name('AFZ')\r\n 'AFz'\r\n\r\n \"\"\"\r\n return name.upper().replace('FPZ', 'Fpz').replace('Z', 'z')\r\n\r\n def draw_electrodes(self):\r\n \"\"\"Draw electrodes.\"\"\"\r\n for electrode, position in ELECTRODES.items():\r\n circle = plt.Circle(self.center + position,\r\n radius=0.04, fill=True,\r\n facecolor=(1, 1, 1))\r\n self.axes.add_patch(circle)\r\n position = self.center + position\r\n self.axes.text(position[0], position[1], electrode,\r\n verticalalignment='center',\r\n horizontalalignment='center',\r\n size=6)\r\n\r\n def draw_head(self):\r\n \"\"\"Draw outer head.\"\"\"\r\n circle = plt.Circle(self.center, radius=1, fill=False)\r\n self.axes.add_patch(circle)\r\n\r\n def draw_inner_head(self):\r\n \"\"\"Draw inner head.\"\"\"\r\n circle = plt.Circle(self.center, radius=0.8, fill=False)\r\n self.axes.add_patch(circle)\r\n\r\n def draw_nose(self):\r\n \"\"\"Draw nose.\"\"\"\r\n nose = plt.Line2D([sin(-0.1), 0, sin(0.1)],\r\n [cos(-0.1), 1.1, cos(0.1)],\r\n color=(0, 0, 0))\r\n self.axes.add_line(nose)\r\n\r\n def draw_data(self, method='linear', number_of_contours=10):\r\n \"\"\"Draw countours from provided data.\"\"\"\r\n if self.data is not None:\r\n # Coordinates for points to interpolate to\r\n xi, yi = np.mgrid[-1:1:100j, -1:1:100j]\r\n\r\n # Electrode positions for data to interpolate from\r\n points = []\r\n for electrode in self.data.index:\r\n name = TopoPlot.normalize_electrode_name(electrode)\r\n points.append(ELECTRODES[name])\r\n\r\n # Interpolate\r\n # TODO: Will not work with 2 electrodes.\r\n zi = griddata(points, self.data.values, (xi, yi), method=method)\r\n\r\n # Defaults\r\n if number_of_contours is None:\r\n number_of_contours = 10\r\n\r\n # Draw\r\n plt.contourf(xi, yi, zi, number_of_contours)\r\n\r\n # TODO: center\r\n\r\n def draw(self, title=None, method='linear', number_of_contours=None):\r\n \"\"\"Draw all components in topoplot including the data.\r\n\r\n Parameters\r\n ----------\r\n title : str, optional\r\n Title to put on the plot\r\n methods : str, optional\r\n Interpolation method\r\n number_of_contours : int\r\n Number of contours in the colored plot.\r\n\r\n Examples\r\n --------\r\n >>> import matplotlib.pyplot as plt\r\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\r\n >>> plt.ion()\r\n >>> topo_plot = TopoPlot(data)\r\n >>> topo_plot.draw()\r\n\r\n \"\"\"\r\n self.draw_head()\r\n self.draw_inner_head()\r\n self.draw_electrodes()\r\n self.draw_nose()\r\n self.draw_data(method=method, number_of_contours=number_of_contours)\r\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\r\n self.axes.axis('equal')\r\n if title is not None:\r\n self.axes.set_title(title)\r\n\r\n\r\nclass MultiPlot(TopoPlot):\r\n \"\"\"Multiple plots organized topographically.\r\n\r\n References\r\n ----------\r\n http://www.fieldtriptoolbox.org/reference/ft_multiploter\r\n\r\n \"\"\"\r\n\r\n def __init__(self, data=None, axes=None, xlim=None, ylim=None):\r\n \"\"\"Setup defaults.\r\n\r\n Parameters\r\n ----------\r\n data : Pandas.DataFrame\r\n Pandas DataFrame with values indexed by electrodes.\r\n axes : matplotlib.axes.AxesSubplot object\r\n Axis object to render on.\r\n\r\n \"\"\"\r\n if axes is None:\r\n self.figure = plt.figure()\r\n axes = self.figure.gca()\r\n else:\r\n self.figure = axes.get_figure()\r\n self.axes = axes\r\n\r\n # Contains a list of axes used to plot data data from individual\r\n # electrodes\r\n self._subaxes = []\r\n\r\n self.xlim = xlim\r\n self.ylim = ylim\r\n\r\n self.center = np.array((0, 0))\r\n\r\n if isinstance(data, pd.DataFrame):\r\n self.data = data\r\n elif data is None:\r\n self.data = None\r\n else:\r\n raise ValueError(\"Wrong type of value for 'data': {}\".format(\r\n type(data)))\r\n\r\n def add_subplot_axes(self, ax, rect, axis_bgcolor=None):\r\n \"\"\"Add subaxes to currect specified axes.\r\n\r\n References\r\n ----------\r\n Pablo https://stackoverflow.com/users/2309442/pablo\r\n\r\n Pablo's answer to \"Embedding small plots inside subplots in matplotlib\"\r\n https://stackoverflow.com/questions/17458580/\r\n\r\n \"\"\"\r\n # Modified from\r\n # https://stackoverflow.com/questions/17458580/\r\n box = ax.get_position()\r\n width, height = box.width, box.height\r\n subaxes_box = [(rect[0], rect[1]),\r\n (rect[0] + rect[2], rect[1] + rect[3])]\r\n subaxes_display_coords = ax.transData.transform(subaxes_box)\r\n trans_figure = self.figure.transFigure.inverted()\r\n subaxes_figure_coords = trans_figure.transform(subaxes_display_coords)\r\n x, y = subaxes_figure_coords[0, :]\r\n width, height = (subaxes_figure_coords[1, :] -\r\n subaxes_figure_coords[0, :])\r\n subaxes = self.figure.add_axes(\r\n [x, y, width, height], axis_bgcolor=axis_bgcolor)\r\n x_labelsize = subaxes.get_xticklabels()[0].get_size()\r\n y_labelsize = subaxes.get_yticklabels()[0].get_size()\r\n x_labelsize *= rect[2] ** 0.5\r\n y_labelsize *= rect[3] ** 0.5\r\n subaxes.xaxis.set_tick_params(labelsize=x_labelsize)\r\n subaxes.yaxis.set_tick_params(labelsize=y_labelsize)\r\n return subaxes\r\n\r\n def draw_data(self, type='plot', width=None, height=None,\r\n xlim=None, ylim=None,\r\n vmin=None, vmax=None,\r\n axis=False, yscale='linear'):\r\n \"\"\"Draw data.\r\n\r\n Parameters\r\n ----------\r\n type : 'plot', 'spectrogram', optional\r\n Type of plot\r\n xlim : 2-tuple of floats, optional\r\n X-axis limits\r\n ylim : 2-tuple of floats, optional\r\n Y-axis limits\r\n vmin : float, optional\r\n Minimum value for spectrogram colormap\r\n vmax : float, optional\r\n Maximum value for spectrogram colormap\r\n axis : bool, optional\r\n Determine whether the axis should be shown\r\n\r\n \"\"\"\r\n if self.data is not None:\r\n\r\n if ylim is None:\r\n if self.ylim is None and type != 'spectrogram':\r\n ylim = self.auto_ylim(xlim, yscale=yscale)\r\n else:\r\n ylim = self.ylim\r\n\r\n if xlim is None:\r\n xlim = self.xlim\r\n\r\n if vmin is None:\r\n vmin = 0\r\n\r\n # Determine a suitable width for subaxes\r\n number_of_electrodes = len([\r\n electrode\r\n for electrode in self.data.columns\r\n if electrode in ELECTRODES])\r\n if width is None:\r\n if number_of_electrodes > 32:\r\n width = 0.15\r\n else:\r\n width = 0.25\r\n if height is None:\r\n height = 0.25\r\n\r\n for electrode in self.data.columns:\r\n if electrode in ELECTRODES:\r\n\r\n # Axes and position\r\n x, y = ELECTRODES[electrode]\r\n subaxes = self.add_subplot_axes(\r\n self.axes,\r\n [x - width / 2, y - height / 2, width, height],\r\n axis_bgcolor='w')\r\n\r\n # Actual data plot\r\n if type == 'plot':\r\n self.data.ix[:, electrode].plot(\r\n ax=subaxes, xlim=xlim, ylim=ylim)\r\n\r\n if not axis:\r\n # x-axis\r\n trans = transforms.blended_transform_factory(\r\n subaxes.transAxes, subaxes.transData)\r\n line = lines.Line2D(\r\n (0, 1), (0, 0),\r\n transform=trans, color=(0, 0, 0))\r\n subaxes.add_line(line)\r\n\r\n trans = transforms.blended_transform_factory(\r\n subaxes.transAxes, subaxes.transAxes)\r\n line = lines.Line2D(\r\n (0, 0), (0, 1),\r\n transform=trans, color=(0, 0, 0))\r\n subaxes.add_line(line)\r\n\r\n elif type == 'spectrogram':\r\n spectrum, frequencies, midpoints, axes = plt.specgram(\r\n self.data.ix[:, electrode],\r\n Fs=self.data.sampling_rate,\r\n vmin=vmin,\r\n vmax=vmax,\r\n axes=subaxes)\r\n\r\n # Adjust axis around spectrogram image.\r\n if xlim is None:\r\n xlim = midpoints[0], midpoints[-1]\r\n subaxes.set_xlim(xlim)\r\n if ylim is None:\r\n ylim = frequencies[0], frequencies[-1]\r\n subaxes.set_ylim(ylim)\r\n\r\n else:\r\n raise ValueError(\"Wrong value for 'type' argument\")\r\n\r\n if not axis:\r\n subaxes.set_axis_off()\r\n\r\n # Annotation\r\n # http://matplotlib.org/users/transforms_tutorial.html\r\n subaxes.text(0.5, 0.95, electrode,\r\n transform=subaxes.transAxes,\r\n fontweight='bold', va='top', ha='center')\r\n subaxes.set_yticklabels([])\r\n subaxes.set_xticklabels([])\r\n\r\n self._subaxes.append(subaxes)\r\n\r\n @property\r\n def xlim(self):\r\n \"\"\"Return xlim for subplots.\"\"\"\r\n lim = [ax.get_xlim() for ax in self._subaxes]\r\n if lim == []:\r\n lim = None\r\n return lim\r\n\r\n @xlim.setter\r\n def xlim(self, left=None, right=None):\r\n \"\"\"Set x-axis limits on all subplots.\"\"\"\r\n for ax in self._subaxes:\r\n ax.set_xlim(left, right)\r\n self.figure.canvas.draw()\r\n\r\n @property\r\n def ylim(self):\r\n \"\"\"Return ylim for subplots.\"\"\"\r\n lim = [ax.get_ylim() for ax in self._subaxes]\r\n if lim == []:\r\n lim = None\r\n return lim\r\n\r\n @ylim.setter\r\n def ylim(self, bottom=None, top=None):\r\n \"\"\"Set y-axis limits on all subplots.\"\"\"\r\n for ax in self._subaxes:\r\n ax.set_ylim(bottom, top)\r\n self.figure.canvas.draw()\r\n\r\n @property\r\n def yscale(self):\r\n \"\"\"Return yscale for subplots.\"\"\"\r\n yscales = [ax.get_yscale() for ax in self._subaxes]\r\n return yscales\r\n\r\n @yscale.setter\r\n def yscale(self, value='linear'):\r\n \"\"\"Set y-axis limits on all subplots.\"\"\"\r\n for ax in self._subaxes:\r\n ax.set_yscale(value)\r\n self.figure.canvas.draw()\r\n\r\n def auto_ylim(self, xlim=None, yscale='linear'):\r\n \"\"\"Return an estimate for a good ylim.\r\n\r\n Parameters\r\n ----------\r\n xlim : 2-tuple, optional\r\n Limits in (the index of) the data from where the scaling should be\r\n computed.\r\n yscale : linear or log, optional\r\n Scaling of y-axis.\r\n\r\n \"\"\"\r\n electrodes = [col for col in self.data.columns\r\n if col in ELECTRODES]\r\n if xlim is None:\r\n data = self.data.ix[:, electrodes]\r\n else:\r\n indices = ((self.data.index >= xlim[0]) &\r\n (self.data.index <= xlim[1]))\r\n data = self.data.ix[indices, electrodes]\r\n min_data = data.min().min()\r\n max_data = data.max().max()\r\n abs_max = max(abs(min_data), max_data)\r\n if yscale == 'linear' or yscale == 'symlog':\r\n if min_data >= 0:\r\n ylim = 0, max_data\r\n else:\r\n ylim = -abs_max, abs_max\r\n elif yscale == 'log':\r\n if min_data > 0:\r\n ylim = min_data, max_data\r\n else:\r\n pseudo_zero = abs_max * 10 ** -5\r\n ylim = pseudo_zero, abs_max\r\n else:\r\n raise ValueError('Wrong value to yscale: {}'.format(yscale))\r\n return ylim\r\n\r\n def draw(self, type='plot', title=None, xlim=None, ylim=None,\r\n vmin=None, vmax=None,\r\n axis=False, yscale='linear'):\r\n \"\"\"Draw all components in multiplot including the data.\r\n\r\n Parameters\r\n ----------\r\n title : str, optional\r\n Title to put on the plot\r\n xlim : tuple of floats, optional\r\n X-axis limits used for each individual plots\r\n ylim : tuple of floats, optional\r\n Y-axis limits used for each individual plots\r\n\r\n \"\"\"\r\n self.axes.axis((-1.2, 1.2, -1.2, 1.2))\r\n self.draw_head()\r\n self.draw_inner_head()\r\n self.draw_nose()\r\n self.draw_data(type=type, xlim=xlim, ylim=ylim, vmin=vmin,\r\n vmax=vmax, axis=axis, yscale=yscale)\r\n if title is not None:\r\n self.axes.set_title(title)\r\n self.yscale = yscale\r\n\r\n\r\ndef topoplot(data=None, axes=None, method='linear', number_of_contours=10,\r\n title=None, xlim=None, ylim=None):\r\n \"\"\"Plot topographic map of the scalp in 2-D circular view.\r\n\r\n Draw the colored scalp map based on data in a Pandas Series where\r\n the values are indexed according to electrode name.\r\n\r\n Parameters\r\n ----------\r\n data : pandas.Series or pandas.DataFrame, optional\r\n Series with values and indexed by electrode names.\r\n methods : str, optional\r\n Interpolation method\r\n number_of_contours : int\r\n Number of contours in the colored plot.\r\n xlim : 2-tuple of floats, optional\r\n Limits of x-axis in multiplot\r\n ylim : 2-tuple of floats, optional\r\n Limits of y-axis in multiplot\r\n\r\n References\r\n ----------\r\n https://github.com/compmem/ptsa/blob/master/ptsa/plotting/topo.py\r\n\r\n http://sccn.ucsd.edu/~jung/tutorial/topoplot.htm\r\n\r\n Examples\r\n --------\r\n >>> import matplotlib.pyplot as plt\r\n >>> data = {'O1': 1, 'O2': 2, 'P3': -2, 'P4': -4}\r\n >>> plt.ion()\r\n >>> topo_plot = topoplot(data)\r\n\r\n \"\"\"\r\n if isinstance(data, pd.Series) or isinstance(data, dict) or data is None:\r\n topo_plot = TopoPlot(data=data, axes=axes)\r\n topo_plot.draw(title=title, method=method,\r\n number_of_contours=number_of_contours)\r\n return topo_plot\r\n elif isinstance(data, pd.DataFrame):\r\n multi_plot = MultiPlot(data=data, axes=axes)\r\n multi_plot.draw(title=title, xlim=xlim, ylim=ylim)\r\n return multi_plot\r\n\r\n\r\ndef show():\r\n \"\"\"Show plot.\"\"\"\r\n plt.show()\r\n\r\n\r\ndef main(args):\r\n \"\"\"Handle command-line interface to topographic plot.\"\"\"\r\n xlim = args['--xlim']\r\n if args['--xlim'] is not None:\r\n xlim = [float(lim) for lim in xlim.split(',')]\r\n\r\n if args['<file>'] is None:\r\n topoplot()\r\n else:\r\n filename = args['<file>']\r\n if filename.lower().endswith('.csv'):\r\n from .core import read_csv\r\n\r\n df = read_csv(filename, index_col=0)\r\n if args['--transpose']:\r\n df = df.T\r\n if args['--sample-index'] is None:\r\n if args['--center'] is not None:\r\n df = df.center()\r\n topoplot(df, xlim=xlim)\r\n else:\r\n sample_index = int(args['--sample-index'])\r\n series = df.iloc[sample_index - 1, :]\r\n topoplot(series)\r\n else:\r\n exit('Only csv files handled')\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n from docopt import docopt\r\n\r\n main(docopt(__doc__))\r\n",
"step-ids": [
19,
22,
23,
25,
30
]
}
|
[
19,
22,
23,
25,
30
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is the ES library for Apache Kibble.
It stores the elasticsearch handler and config options.
"""
import elasticsearch
from kibble.configuration import KibbleConfigParser
class KibbleESWrapper(object):
"""
Class for rewriting old-style queries to the new ones,
where doc_type is an integral part of the DB name
"""
def __init__(self, ES):
self.ES = ES
def get(self, index, doc_type, id):
return self.ES.get(index=index + "_" + doc_type, doc_type="_doc", id=id)
def exists(self, index, doc_type, id):
return self.ES.exists(index=index + "_" + doc_type, doc_type="_doc", id=id)
def delete(self, index, doc_type, id):
return self.ES.delete(index=index + "_" + doc_type, doc_type="_doc", id=id)
def index(self, index, doc_type, id, body):
return self.ES.index(
index=index + "_" + doc_type, doc_type="_doc", id=id, body=body
)
def update(self, index, doc_type, id, body):
return self.ES.update(
index=index + "_" + doc_type, doc_type="_doc", id=id, body=body
)
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
def delete_by_query(self, **kwargs):
return self.ES.delete_by_query(**kwargs)
def search(
self, index, doc_type, size=100, scroll=None, _source_include=None, body=None
):
return self.ES.search(
index=index + "_" + doc_type,
doc_type="_doc",
size=size,
scroll=scroll,
_source_include=_source_include,
body=body,
)
def count(self, index, doc_type="*", body=None):
return self.ES.count(index=index + "_" + doc_type, doc_type="_doc", body=body)
class KibbleESWrapperSeven(object):
"""
Class for rewriting old-style queries to the >= 7.x ones,
where doc_type is an integral part of the DB name and NO DOC_TYPE!
"""
def __init__(self, ES):
self.ES = ES
def get(self, index, doc_type, id):
return self.ES.get(index=index + "_" + doc_type, id=id)
def exists(self, index, doc_type, id):
return self.ES.exists(index=index + "_" + doc_type, id=id)
def delete(self, index, doc_type, id):
return self.ES.delete(index=index + "_" + doc_type, id=id)
def index(self, index, doc_type, id, body):
return self.ES.index(index=index + "_" + doc_type, id=id, body=body)
def update(self, index, doc_type, id, body):
return self.ES.update(index=index + "_" + doc_type, id=id, body=body)
def scroll(self, scroll_id, scroll):
return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)
def delete_by_query(self, **kwargs):
return self.ES.delete_by_query(**kwargs)
def search(
self, index, doc_type, size=100, scroll=None, _source_include=None, body=None
):
return self.ES.search(
index=index + "_" + doc_type,
size=size,
scroll=scroll,
_source_includes=_source_include,
body=body,
)
def count(self, index, doc_type="*", body=None):
return self.ES.count(index=index + "_" + doc_type, body=body)
class KibbleDatabase(object):
def __init__(self, config: KibbleConfigParser):
self.config = config
self.dbname = config.get("elasticsearch", "dbname")
self.ES = elasticsearch.Elasticsearch(
[config.get("elasticsearch", "conn_uri")],
use_ssl=config.getboolean("elasticsearch", "ssl"),
verify_certs=False,
max_retries=5,
retry_on_timeout=True,
)
# IMPORTANT BIT: Figure out if this is ES < 6.x, 6.x or >= 7.x.
# If so, we're using the new ES DB mappings, and need to adjust ALL
# ES calls to match this.
self.ESversion = int(self.ES.info()["version"]["number"].split(".")[0])
if self.ESversion >= 7:
self.ES = KibbleESWrapperSeven(self.ES)
elif self.ESversion >= 6:
self.ES = KibbleESWrapper(self.ES)
|
normal
|
{
"blob_id": "f4b704a1416bfd6524340a68a20981957abf4340",
"index": 9850,
"step-1": "<mask token>\n\n\nclass KibbleESWrapper(object):\n <mask token>\n\n def __init__(self, ES):\n self.ES = ES\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n <mask token>\n <mask token>\n <mask token>\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, size=size,\n scroll=scroll, _source_includes=_source_include, body=body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get('elasticsearch', 'dbname')\n self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',\n 'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),\n verify_certs=False, max_retries=5, retry_on_timeout=True)\n self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n",
"step-2": "<mask token>\n\n\nclass KibbleESWrapper(object):\n <mask token>\n\n def __init__(self, ES):\n self.ES = ES\n <mask token>\n <mask token>\n <mask token>\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n <mask token>\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n <mask token>\n <mask token>\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, doc_type='_doc',\n body=body)\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, size=size,\n scroll=scroll, _source_includes=_source_include, body=body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get('elasticsearch', 'dbname')\n self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',\n 'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),\n verify_certs=False, max_retries=5, retry_on_timeout=True)\n self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n",
"step-3": "<mask token>\n\n\nclass KibbleESWrapper(object):\n <mask token>\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, doc_type='_doc', id=id\n )\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, doc_type='_doc',\n id=id)\n <mask token>\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n <mask token>\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, doc_type='_doc',\n size=size, scroll=scroll, _source_include=_source_include, body\n =body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, doc_type='_doc',\n body=body)\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, size=size,\n scroll=scroll, _source_includes=_source_include, body=body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get('elasticsearch', 'dbname')\n self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',\n 'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),\n verify_certs=False, max_retries=5, retry_on_timeout=True)\n self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n",
"step-4": "<mask token>\n\n\nclass KibbleESWrapper(object):\n <mask token>\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, doc_type='_doc', id=id\n )\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, doc_type='_doc',\n id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, doc_type='_doc',\n id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, doc_type='_doc',\n id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, doc_type='_doc',\n size=size, scroll=scroll, _source_include=_source_include, body\n =body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, doc_type='_doc',\n body=body)\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + '_' + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + '_' + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + '_' + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + '_' + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + '_' + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(self, index, doc_type, size=100, scroll=None,\n _source_include=None, body=None):\n return self.ES.search(index=index + '_' + doc_type, size=size,\n scroll=scroll, _source_includes=_source_include, body=body)\n\n def count(self, index, doc_type='*', body=None):\n return self.ES.count(index=index + '_' + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get('elasticsearch', 'dbname')\n self.ES = elasticsearch.Elasticsearch([config.get('elasticsearch',\n 'conn_uri')], use_ssl=config.getboolean('elasticsearch', 'ssl'),\n verify_certs=False, max_retries=5, retry_on_timeout=True)\n self.ESversion = int(self.ES.info()['version']['number'].split('.')[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n",
"step-5": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nThis is the ES library for Apache Kibble.\nIt stores the elasticsearch handler and config options.\n\"\"\"\n\nimport elasticsearch\n\nfrom kibble.configuration import KibbleConfigParser\n\n\nclass KibbleESWrapper(object):\n \"\"\"\n Class for rewriting old-style queries to the new ones,\n where doc_type is an integral part of the DB name\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(\n index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id, body=body\n )\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(\n index=index + \"_\" + doc_type, doc_type=\"_doc\", id=id, body=body\n )\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(\n self, index, doc_type, size=100, scroll=None, _source_include=None, body=None\n ):\n return self.ES.search(\n index=index + \"_\" + doc_type,\n doc_type=\"_doc\",\n size=size,\n scroll=scroll,\n _source_include=_source_include,\n body=body,\n )\n\n def count(self, index, doc_type=\"*\", body=None):\n return self.ES.count(index=index + \"_\" + doc_type, doc_type=\"_doc\", body=body)\n\n\nclass KibbleESWrapperSeven(object):\n \"\"\"\n Class for rewriting old-style queries to the >= 7.x ones,\n where doc_type is an integral part of the DB name and NO DOC_TYPE!\n \"\"\"\n\n def __init__(self, ES):\n self.ES = ES\n\n def get(self, index, doc_type, id):\n return self.ES.get(index=index + \"_\" + doc_type, id=id)\n\n def exists(self, index, doc_type, id):\n return self.ES.exists(index=index + \"_\" + doc_type, id=id)\n\n def delete(self, index, doc_type, id):\n return self.ES.delete(index=index + \"_\" + doc_type, id=id)\n\n def index(self, index, doc_type, id, body):\n return self.ES.index(index=index + \"_\" + doc_type, id=id, body=body)\n\n def update(self, index, doc_type, id, body):\n return self.ES.update(index=index + \"_\" + doc_type, id=id, body=body)\n\n def scroll(self, scroll_id, scroll):\n return self.ES.scroll(scroll_id=scroll_id, scroll=scroll)\n\n def delete_by_query(self, **kwargs):\n return self.ES.delete_by_query(**kwargs)\n\n def search(\n self, index, doc_type, size=100, scroll=None, _source_include=None, body=None\n ):\n return self.ES.search(\n index=index + \"_\" + doc_type,\n size=size,\n scroll=scroll,\n _source_includes=_source_include,\n body=body,\n )\n\n def count(self, index, doc_type=\"*\", body=None):\n return self.ES.count(index=index + \"_\" + doc_type, body=body)\n\n\nclass KibbleDatabase(object):\n def __init__(self, config: KibbleConfigParser):\n self.config = config\n self.dbname = config.get(\"elasticsearch\", \"dbname\")\n self.ES = elasticsearch.Elasticsearch(\n [config.get(\"elasticsearch\", \"conn_uri\")],\n use_ssl=config.getboolean(\"elasticsearch\", \"ssl\"),\n verify_certs=False,\n max_retries=5,\n retry_on_timeout=True,\n )\n\n # IMPORTANT BIT: Figure out if this is ES < 6.x, 6.x or >= 7.x.\n # If so, we're using the new ES DB mappings, and need to adjust ALL\n # ES calls to match this.\n self.ESversion = int(self.ES.info()[\"version\"][\"number\"].split(\".\")[0])\n if self.ESversion >= 7:\n self.ES = KibbleESWrapperSeven(self.ES)\n elif self.ESversion >= 6:\n self.ES = KibbleESWrapper(self.ES)\n",
"step-ids": [
17,
19,
23,
25,
28
]
}
|
[
17,
19,
23,
25,
28
] |
<|reserved_special_token_0|>
@app.route('/')
def hello():
return 'Flask setup'
def sheets_row_writer(data_list):
print('sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
worksheet.append_row(data_list)
print('Write complete')
def sheets_row_writer_donor(data_list_donor):
print('donor sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
worksheet.append_row(data_list_donor)
print('Write complete')
<|reserved_special_token_0|>
def death_global_api():
data = request.get_json(silent=True)
page = requests.get('https://www.worldometers.info/coronavirus/')
response = death_global()
reply = {'fulfillmentText': response}
return jsonify(reply)
<|reserved_special_token_0|>
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print(intent)
def news_nepal_int():
url = 'https://nepalcorona.info/api/v1/news'
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{'card': {'title': data1['title'], 'subtitle':
'Source: ' + data1['source'] + ' >>', 'imageUri': data1[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data1['url']}, {'text': 'Corona Symptoms',
'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {
'title': data2['title'], 'subtitle': 'Source ' + data2['source'
] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':
'Read Full Story', 'postback': data2['url']}, {'text':
'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':
'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle':
'Source ' + data3['source'] + ' >>', 'imageUri': data3[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data3['url']}, {'text': 'Self Isolation',
'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0], name[0], phone[0], place[0]]
sheets_row_writer(ilist)
response2 = 'Hello ' + name[0
] + ' so you are looking for ' + item_required[0
] + ' Your location is ' + place[0
] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'
response = [{'quickReplies': {'title': response2, 'quickReplies': [
'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'
}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message'][
'text']
url = 'https://nepalcorona.info/api/v1/faqs'
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',
'Preventions', 'Self Isolation', 'Play Corona Quiz']
faqs = todos['data']
faq = faqs[rand]
if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==
'भाषा परिवर्तन'):
randq = faq['question']
randa = faq['answer']
opt1 = 'More Quizzles'
opt2 = 'Switch Language'
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = 'अरु देखाउनुहोस >>'
opt2 = 'भाषा परिवर्तन'
response2 = 'Q. ' + randq + '\n A. ' + randa + '\n'
response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':
randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def blood_pal_yes():
print(intent)
print(data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group, blood_amount, location, case, date, phone]
sheets_row_writer(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = (
'The following request has been sent. We will contact you shortly. '
+ blood_group + ' blood (' + str(blood_amount) +
' ) required for ' + case + ' at ' + location + ' On ' + date +
' - ' + phone + ' Thank you .')
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def blood_pal_donor_yes():
print(intent)
print(data)
permananet_address = data['queryResult']['parameters'][
'permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation = data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name, number, email, current_address, permananet_address,
age, height, weight, gender, blood, last_donation]
sheets_row_writer_donor(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = ('Thank you ' + name +
' for registration as a blood donor We will contact you at the time of urgency in your area.'
)
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def world_data_live():
text = death_global()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Data', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def district_all_summary():
text = dss.district_all_summary()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [{'quickReplies': {'title': text, 'quickReplies': [
'District-Summary', 'Province-Data', 'World Data',
'Preventions', "Corona FAQ's", 'Corona Quiz']}, 'platform':
'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def proviencewise_detail():
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [{'card': {'title': 'Covid-19 Provience: ' + str(
province) + ' | Details', 'subtitle': response_summary,
'imageUri':
'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',
'buttons': [{'text': 'Prov ' + str(province) + ' District Data',
'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(
province) + ' Vdc-Mun Data', 'postback':
'dis-vdc data detail int'}, {'text': 'Latest Nepali News',
'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
if dvdc == 'vdc':
print('inside vdc')
typ = 'vdc'
else:
print('inside district')
typ = 'district'
data_return = dss.ard(code, typ)
response = [{'quickReplies': {'title': data_return, 'quickReplies':
['District Summary', 'Province Summary', 'Nepali News',
'World Data', 'Preventions', "Corona FAQ's", 'Corona Quiz']},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def nepal_data_new_main_int():
url = 'https://nepalcorona.info/api/v1/data/nepal'
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = 'Nepal Cases \n Positive :' + str(todos['tested_positive']
) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(
todos['deaths']) + ' ' + '\n'
print(response2)
response_summary = dss.affected_summary()
response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['']}}, {'card': {'title':
'Covid-19 Nepal | Stats', 'subtitle': response_summary,
'imageUri':
'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'
, 'buttons': [{'text': 'Province Summary', 'postback':
'province data int'}, {'text': 'District-Summary', 'postback':
'district data int'}, {'text': 'Latest Nepali News', 'postback':
'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':
['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def batti_update():
url = (
'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'
)
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos['feeds'][0]
response2 = 'Batti Status Now :' + str(feeds['field1'] +
'\n Last Updated: ' + str(feeds['created_at']))
print(response2)
reply = {'fulfillmentText': response2}
return reply
def default():
return 'Incorrect Data'
switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':
news_nepal_int, 'i need help main int - yes': i_need_help_yes,
'faq-que-ans-int': faq_ques_ans,
'bloodpal-need-blood-main-int - yes': blood_pal_yes,
'data world int': world_data_live, 'district data int':
district_all_summary, 'province data int': province_all_summary,
'province-wise-data': proviencewise_detail,
'dis-vdc data detail int': dis_vdc_detail,
'bloodpal-become-donor-main-int': blood_pal_donor_yes,
'batti-update-intent': batti_update}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def hello():
return 'Flask setup'
def sheets_row_writer(data_list):
print('sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
worksheet.append_row(data_list)
print('Write complete')
def sheets_row_writer_donor(data_list_donor):
print('donor sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
worksheet.append_row(data_list_donor)
print('Write complete')
<|reserved_special_token_0|>
def death_global_api():
data = request.get_json(silent=True)
page = requests.get('https://www.worldometers.info/coronavirus/')
response = death_global()
reply = {'fulfillmentText': response}
return jsonify(reply)
def death_country(id):
idu = id.upper()
page = requests.get(
'https://www.worldometers.info/coronavirus/country/' + id + '/')
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all('div', {'class': 'maincounter-number'})
active = soup.find('div', {'class': 'number-table-main'})
active_cases = active.text
cases_list = []
for res in result:
cases_list.append(res.text)
return ('In ' + idu + ' There are' + cases_list[0] +
'Total cases out of which' + cases_list[1] + 'are dead and' +
cases_list[2] + 'have already recovered . There are still ' +
active_cases + ' active cases .')
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print(intent)
def news_nepal_int():
url = 'https://nepalcorona.info/api/v1/news'
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{'card': {'title': data1['title'], 'subtitle':
'Source: ' + data1['source'] + ' >>', 'imageUri': data1[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data1['url']}, {'text': 'Corona Symptoms',
'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {
'title': data2['title'], 'subtitle': 'Source ' + data2['source'
] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':
'Read Full Story', 'postback': data2['url']}, {'text':
'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':
'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle':
'Source ' + data3['source'] + ' >>', 'imageUri': data3[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data3['url']}, {'text': 'Self Isolation',
'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0], name[0], phone[0], place[0]]
sheets_row_writer(ilist)
response2 = 'Hello ' + name[0
] + ' so you are looking for ' + item_required[0
] + ' Your location is ' + place[0
] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'
response = [{'quickReplies': {'title': response2, 'quickReplies': [
'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'
}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message'][
'text']
url = 'https://nepalcorona.info/api/v1/faqs'
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',
'Preventions', 'Self Isolation', 'Play Corona Quiz']
faqs = todos['data']
faq = faqs[rand]
if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==
'भाषा परिवर्तन'):
randq = faq['question']
randa = faq['answer']
opt1 = 'More Quizzles'
opt2 = 'Switch Language'
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = 'अरु देखाउनुहोस >>'
opt2 = 'भाषा परिवर्तन'
response2 = 'Q. ' + randq + '\n A. ' + randa + '\n'
response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':
randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def blood_pal_yes():
print(intent)
print(data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group, blood_amount, location, case, date, phone]
sheets_row_writer(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = (
'The following request has been sent. We will contact you shortly. '
+ blood_group + ' blood (' + str(blood_amount) +
' ) required for ' + case + ' at ' + location + ' On ' + date +
' - ' + phone + ' Thank you .')
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def blood_pal_donor_yes():
print(intent)
print(data)
permananet_address = data['queryResult']['parameters'][
'permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation = data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name, number, email, current_address, permananet_address,
age, height, weight, gender, blood, last_donation]
sheets_row_writer_donor(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = ('Thank you ' + name +
' for registration as a blood donor We will contact you at the time of urgency in your area.'
)
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def world_data_live():
text = death_global()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Data', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def district_all_summary():
text = dss.district_all_summary()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [{'quickReplies': {'title': text, 'quickReplies': [
'District-Summary', 'Province-Data', 'World Data',
'Preventions', "Corona FAQ's", 'Corona Quiz']}, 'platform':
'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def proviencewise_detail():
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [{'card': {'title': 'Covid-19 Provience: ' + str(
province) + ' | Details', 'subtitle': response_summary,
'imageUri':
'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',
'buttons': [{'text': 'Prov ' + str(province) + ' District Data',
'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(
province) + ' Vdc-Mun Data', 'postback':
'dis-vdc data detail int'}, {'text': 'Latest Nepali News',
'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
if dvdc == 'vdc':
print('inside vdc')
typ = 'vdc'
else:
print('inside district')
typ = 'district'
data_return = dss.ard(code, typ)
response = [{'quickReplies': {'title': data_return, 'quickReplies':
['District Summary', 'Province Summary', 'Nepali News',
'World Data', 'Preventions', "Corona FAQ's", 'Corona Quiz']},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def nepal_data_new_main_int():
url = 'https://nepalcorona.info/api/v1/data/nepal'
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = 'Nepal Cases \n Positive :' + str(todos['tested_positive']
) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(
todos['deaths']) + ' ' + '\n'
print(response2)
response_summary = dss.affected_summary()
response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['']}}, {'card': {'title':
'Covid-19 Nepal | Stats', 'subtitle': response_summary,
'imageUri':
'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'
, 'buttons': [{'text': 'Province Summary', 'postback':
'province data int'}, {'text': 'District-Summary', 'postback':
'district data int'}, {'text': 'Latest Nepali News', 'postback':
'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':
['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def batti_update():
url = (
'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'
)
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos['feeds'][0]
response2 = 'Batti Status Now :' + str(feeds['field1'] +
'\n Last Updated: ' + str(feeds['created_at']))
print(response2)
reply = {'fulfillmentText': response2}
return reply
def default():
return 'Incorrect Data'
switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':
news_nepal_int, 'i need help main int - yes': i_need_help_yes,
'faq-que-ans-int': faq_ques_ans,
'bloodpal-need-blood-main-int - yes': blood_pal_yes,
'data world int': world_data_live, 'district data int':
district_all_summary, 'province data int': province_all_summary,
'province-wise-data': proviencewise_detail,
'dis-vdc data detail int': dis_vdc_detail,
'bloodpal-become-donor-main-int': blood_pal_donor_yes,
'batti-update-intent': batti_update}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
app = Flask(__name__)
@app.route('/')
def hello():
return 'Flask setup'
def sheets_row_writer(data_list):
print('sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
worksheet.append_row(data_list)
print('Write complete')
def sheets_row_writer_donor(data_list_donor):
print('donor sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
worksheet.append_row(data_list_donor)
print('Write complete')
def death_global():
page = requests.get('https://www.worldometers.info/coronavirus/')
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all('div', {'class': 'maincounter-number'})
cases_list = []
active = soup.find('div', {'class': 'number-table-main'})
active_cases = active.text
for res in result:
cases_list.append(res.text)
return 'There are' + cases_list[0
] + ' Total cases out of which' + cases_list[1
] + ' have died and' + cases_list[2
] + ' have recovered . There are still ' + active_cases + ' active cases.'
app.route('/death/global', methods=['POST'])
def death_global_api():
data = request.get_json(silent=True)
page = requests.get('https://www.worldometers.info/coronavirus/')
response = death_global()
reply = {'fulfillmentText': response}
return jsonify(reply)
def death_country(id):
idu = id.upper()
page = requests.get(
'https://www.worldometers.info/coronavirus/country/' + id + '/')
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all('div', {'class': 'maincounter-number'})
active = soup.find('div', {'class': 'number-table-main'})
active_cases = active.text
cases_list = []
for res in result:
cases_list.append(res.text)
return ('In ' + idu + ' There are' + cases_list[0] +
'Total cases out of which' + cases_list[1] + 'are dead and' +
cases_list[2] + 'have already recovered . There are still ' +
active_cases + ' active cases .')
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print(intent)
def news_nepal_int():
url = 'https://nepalcorona.info/api/v1/news'
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{'card': {'title': data1['title'], 'subtitle':
'Source: ' + data1['source'] + ' >>', 'imageUri': data1[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data1['url']}, {'text': 'Corona Symptoms',
'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {
'title': data2['title'], 'subtitle': 'Source ' + data2['source'
] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':
'Read Full Story', 'postback': data2['url']}, {'text':
'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':
'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle':
'Source ' + data3['source'] + ' >>', 'imageUri': data3[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data3['url']}, {'text': 'Self Isolation',
'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0], name[0], phone[0], place[0]]
sheets_row_writer(ilist)
response2 = 'Hello ' + name[0
] + ' so you are looking for ' + item_required[0
] + ' Your location is ' + place[0
] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'
response = [{'quickReplies': {'title': response2, 'quickReplies': [
'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'
}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message'][
'text']
url = 'https://nepalcorona.info/api/v1/faqs'
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',
'Preventions', 'Self Isolation', 'Play Corona Quiz']
faqs = todos['data']
faq = faqs[rand]
if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==
'भाषा परिवर्तन'):
randq = faq['question']
randa = faq['answer']
opt1 = 'More Quizzles'
opt2 = 'Switch Language'
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = 'अरु देखाउनुहोस >>'
opt2 = 'भाषा परिवर्तन'
response2 = 'Q. ' + randq + '\n A. ' + randa + '\n'
response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':
randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def blood_pal_yes():
print(intent)
print(data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group, blood_amount, location, case, date, phone]
sheets_row_writer(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = (
'The following request has been sent. We will contact you shortly. '
+ blood_group + ' blood (' + str(blood_amount) +
' ) required for ' + case + ' at ' + location + ' On ' + date +
' - ' + phone + ' Thank you .')
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def blood_pal_donor_yes():
print(intent)
print(data)
permananet_address = data['queryResult']['parameters'][
'permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation = data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name, number, email, current_address, permananet_address,
age, height, weight, gender, blood, last_donation]
sheets_row_writer_donor(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = ('Thank you ' + name +
' for registration as a blood donor We will contact you at the time of urgency in your area.'
)
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def world_data_live():
text = death_global()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Data', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def district_all_summary():
text = dss.district_all_summary()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [{'quickReplies': {'title': text, 'quickReplies': [
'District-Summary', 'Province-Data', 'World Data',
'Preventions', "Corona FAQ's", 'Corona Quiz']}, 'platform':
'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def proviencewise_detail():
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [{'card': {'title': 'Covid-19 Provience: ' + str(
province) + ' | Details', 'subtitle': response_summary,
'imageUri':
'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',
'buttons': [{'text': 'Prov ' + str(province) + ' District Data',
'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(
province) + ' Vdc-Mun Data', 'postback':
'dis-vdc data detail int'}, {'text': 'Latest Nepali News',
'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
if dvdc == 'vdc':
print('inside vdc')
typ = 'vdc'
else:
print('inside district')
typ = 'district'
data_return = dss.ard(code, typ)
response = [{'quickReplies': {'title': data_return, 'quickReplies':
['District Summary', 'Province Summary', 'Nepali News',
'World Data', 'Preventions', "Corona FAQ's", 'Corona Quiz']},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def nepal_data_new_main_int():
url = 'https://nepalcorona.info/api/v1/data/nepal'
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = 'Nepal Cases \n Positive :' + str(todos['tested_positive']
) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(
todos['deaths']) + ' ' + '\n'
print(response2)
response_summary = dss.affected_summary()
response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['']}}, {'card': {'title':
'Covid-19 Nepal | Stats', 'subtitle': response_summary,
'imageUri':
'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'
, 'buttons': [{'text': 'Province Summary', 'postback':
'province data int'}, {'text': 'District-Summary', 'postback':
'district data int'}, {'text': 'Latest Nepali News', 'postback':
'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':
['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def batti_update():
url = (
'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'
)
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos['feeds'][0]
response2 = 'Batti Status Now :' + str(feeds['field1'] +
'\n Last Updated: ' + str(feeds['created_at']))
print(response2)
reply = {'fulfillmentText': response2}
return reply
def default():
return 'Incorrect Data'
switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':
news_nepal_int, 'i need help main int - yes': i_need_help_yes,
'faq-que-ans-int': faq_ques_ans,
'bloodpal-need-blood-main-int - yes': blood_pal_yes,
'data world int': world_data_live, 'district data int':
district_all_summary, 'province data int': province_all_summary,
'province-wise-data': proviencewise_detail,
'dis-vdc data detail int': dis_vdc_detail,
'bloodpal-become-donor-main-int': blood_pal_donor_yes,
'batti-update-intent': batti_update}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
if __name__ == '__main__':
app.run()
<|reserved_special_token_1|>
from flask import Flask, jsonify, request
import requests, json, random
from bs4 import BeautifulSoup
import gspread
import pandas as pd
import dataservices as dss
from oauth2client.service_account import ServiceAccountCredentials
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
app = Flask(__name__)
@app.route('/')
def hello():
return 'Flask setup'
def sheets_row_writer(data_list):
print('sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
worksheet.append_row(data_list)
print('Write complete')
def sheets_row_writer_donor(data_list_donor):
print('donor sheets method invoked')
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
worksheet.append_row(data_list_donor)
print('Write complete')
def death_global():
page = requests.get('https://www.worldometers.info/coronavirus/')
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all('div', {'class': 'maincounter-number'})
cases_list = []
active = soup.find('div', {'class': 'number-table-main'})
active_cases = active.text
for res in result:
cases_list.append(res.text)
return 'There are' + cases_list[0
] + ' Total cases out of which' + cases_list[1
] + ' have died and' + cases_list[2
] + ' have recovered . There are still ' + active_cases + ' active cases.'
app.route('/death/global', methods=['POST'])
def death_global_api():
data = request.get_json(silent=True)
page = requests.get('https://www.worldometers.info/coronavirus/')
response = death_global()
reply = {'fulfillmentText': response}
return jsonify(reply)
def death_country(id):
idu = id.upper()
page = requests.get(
'https://www.worldometers.info/coronavirus/country/' + id + '/')
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all('div', {'class': 'maincounter-number'})
active = soup.find('div', {'class': 'number-table-main'})
active_cases = active.text
cases_list = []
for res in result:
cases_list.append(res.text)
return ('In ' + idu + ' There are' + cases_list[0] +
'Total cases out of which' + cases_list[1] + 'are dead and' +
cases_list[2] + 'have already recovered . There are still ' +
active_cases + ' active cases .')
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print(intent)
def news_nepal_int():
url = 'https://nepalcorona.info/api/v1/news'
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{'card': {'title': data1['title'], 'subtitle':
'Source: ' + data1['source'] + ' >>', 'imageUri': data1[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data1['url']}, {'text': 'Corona Symptoms',
'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {
'title': data2['title'], 'subtitle': 'Source ' + data2['source'
] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':
'Read Full Story', 'postback': data2['url']}, {'text':
'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':
'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle':
'Source ' + data3['source'] + ' >>', 'imageUri': data3[
'image_url'], 'buttons': [{'text': 'Read Full Story',
'postback': data3['url']}, {'text': 'Self Isolation',
'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0], name[0], phone[0], place[0]]
sheets_row_writer(ilist)
response2 = 'Hello ' + name[0
] + ' so you are looking for ' + item_required[0
] + ' Your location is ' + place[0
] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'
response = [{'quickReplies': {'title': response2, 'quickReplies': [
'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'
}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message'][
'text']
url = 'https://nepalcorona.info/api/v1/faqs'
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',
'Preventions', 'Self Isolation', 'Play Corona Quiz']
faqs = todos['data']
faq = faqs[rand]
if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==
'भाषा परिवर्तन'):
randq = faq['question']
randa = faq['answer']
opt1 = 'More Quizzles'
opt2 = 'Switch Language'
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = 'अरु देखाउनुहोस >>'
opt2 = 'भाषा परिवर्तन'
response2 = 'Q. ' + randq + '\n A. ' + randa + '\n'
response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':
randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def blood_pal_yes():
print(intent)
print(data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group, blood_amount, location, case, date, phone]
sheets_row_writer(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = (
'The following request has been sent. We will contact you shortly. '
+ blood_group + ' blood (' + str(blood_amount) +
' ) required for ' + case + ' at ' + location + ' On ' + date +
' - ' + phone + ' Thank you .')
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def blood_pal_donor_yes():
print(intent)
print(data)
permananet_address = data['queryResult']['parameters'][
'permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation = data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name, number, email, current_address, permananet_address,
age, height, weight, gender, blood, last_donation]
sheets_row_writer_donor(ilist)
response3 = """For critical case, please contact
Kathmandu 9880998523
Bhaktapur 9880998525
Kavre 9869294490
Purwanchal 9862176689
Chitwan 9801070746
Butwal 9807522664
Dang 9801920169
Stay connected with BloodPal!"""
response = ('Thank you ' + name +
' for registration as a blood donor We will contact you at the time of urgency in your area.'
)
response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['Dummy text']}}, {'text': {'text': [
response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [
'Dummy text']}}]
reply = {'fulfillmentMessages': response2}
return reply
def world_data_live():
text = death_global()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Data', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def district_all_summary():
text = dss.district_all_summary()
response = [{'quickReplies': {'title': text, 'quickReplies': [
'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',
"Corona FAQ's", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [{'quickReplies': {'title': text, 'quickReplies': [
'District-Summary', 'Province-Data', 'World Data',
'Preventions', "Corona FAQ's", 'Corona Quiz']}, 'platform':
'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def proviencewise_detail():
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [{'card': {'title': 'Covid-19 Provience: ' + str(
province) + ' | Details', 'subtitle': response_summary,
'imageUri':
'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',
'buttons': [{'text': 'Prov ' + str(province) + ' District Data',
'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(
province) + ' Vdc-Mun Data', 'postback':
'dis-vdc data detail int'}, {'text': 'Latest Nepali News',
'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {
'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
if dvdc == 'vdc':
print('inside vdc')
typ = 'vdc'
else:
print('inside district')
typ = 'district'
data_return = dss.ard(code, typ)
response = [{'quickReplies': {'title': data_return, 'quickReplies':
['District Summary', 'Province Summary', 'Nepali News',
'World Data', 'Preventions', "Corona FAQ's", 'Corona Quiz']},
'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def nepal_data_new_main_int():
url = 'https://nepalcorona.info/api/v1/data/nepal'
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = 'Nepal Cases \n Positive :' + str(todos['tested_positive']
) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(
todos['deaths']) + ' ' + '\n'
print(response2)
response_summary = dss.affected_summary()
response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},
{'text': {'text': ['']}}, {'card': {'title':
'Covid-19 Nepal | Stats', 'subtitle': response_summary,
'imageUri':
'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'
, 'buttons': [{'text': 'Province Summary', 'postback':
'province data int'}, {'text': 'District-Summary', 'postback':
'district data int'}, {'text': 'Latest Nepali News', 'postback':
'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':
['Dummy text']}}]
reply = {'fulfillmentMessages': response}
return reply
def batti_update():
url = (
'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'
)
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos['feeds'][0]
response2 = 'Batti Status Now :' + str(feeds['field1'] +
'\n Last Updated: ' + str(feeds['created_at']))
print(response2)
reply = {'fulfillmentText': response2}
return reply
def default():
return 'Incorrect Data'
switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':
news_nepal_int, 'i need help main int - yes': i_need_help_yes,
'faq-que-ans-int': faq_ques_ans,
'bloodpal-need-blood-main-int - yes': blood_pal_yes,
'data world int': world_data_live, 'district data int':
district_all_summary, 'province data int': province_all_summary,
'province-wise-data': proviencewise_detail,
'dis-vdc data detail int': dis_vdc_detail,
'bloodpal-become-donor-main-int': blood_pal_donor_yes,
'batti-update-intent': batti_update}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
if __name__ == '__main__':
app.run()
<|reserved_special_token_1|>
from flask import Flask, jsonify, request
import requests, json, random
from bs4 import BeautifulSoup
import gspread
import pandas as pd
import dataservices as dss
from oauth2client.service_account import ServiceAccountCredentials
# page = requests.get("https://www.worldometers.info/coronavirus/")
# soup = BeautifulSoup(page.content, 'html.parser')
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
# Initialize application
app = Flask(__name__)
@app.route("/")
def hello():
return "Flask setup"
def sheets_row_writer(data_list):
print("sheets method invoked")
credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(1)
# worksheet = client.open('corona-help-resource-management').BloodPal
worksheet.append_row(data_list)
print("Write complete")
def sheets_row_writer_donor(data_list_donor):
print("donor sheets method invoked")
credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)
client = gspread.authorize(credentials)
sh = client.open('corona-help-resource-management')
worksheet = sh.get_worksheet(2)
# worksheet = client.open('corona-help-resource-management').BloodPal
worksheet.append_row(data_list_donor)
print("Write complete")
def death_global():
page = requests.get("https://www.worldometers.info/coronavirus/")
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all("div", {"class":"maincounter-number"})
cases_list = []
active = soup.find("div", {"class":"number-table-main"})
active_cases = active.text
for res in result:
cases_list.append(res.text)
return "There are"+cases_list[0]+" Total cases out of which"+cases_list[1]+" have died and"+cases_list[2]+" have recovered . There are still "+active_cases+" active cases."
app.route("/death/global", methods=['POST'])
def death_global_api():
data = request.get_json(silent=True)
page = requests.get("https://www.worldometers.info/coronavirus/")
response = death_global()
reply = { "fulfillmentText": response }
return jsonify(reply)
def death_country(id):
idu = id.upper()
page = requests.get("https://www.worldometers.info/coronavirus/country/"+id+"/")
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find_all("div", {"class":"maincounter-number"})
active = soup.find("div", {"class":"number-table-main"})
active_cases = active.text
cases_list = []
for res in result:
cases_list.append(res.text)
return "In " +idu+" There are"+cases_list[0]+"Total cases out of which"+cases_list[1]+"are dead and"+cases_list[2]+"have already recovered . There are still "+active_cases+ " active cases ."
@app.route('/get_country_detail', methods=['POST'])
def get_country_detail():
data = request.get_json(silent=True)
intent = data['queryResult']['intent']['displayName']
print (intent)
def news_nepal_int():
url = "https://nepalcorona.info/api/v1/news"
response = requests.get(url)
news = json.loads(response.text)
data = news['data']
data1 = data[0]
data2 = data[1]
data3 = data[2]
response2 = [{
"card":{
"title":data1['title'],
"subtitle":"Source: "+data1['source']+" >>",
"imageUri":data1['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data1['url']
},
{
"text":"Corona Symptoms",
"postback":"symptoms"
}
]
},
"platform":"FACEBOOK"
},
{
"card":{
"title":data2['title'],
"subtitle":"Source "+data2['source']+" >>",
"imageUri":data2['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data2['url']
},
{
"text":"Live Nepal Data",
"postback":"live-nepal-data"
}
]
},
"platform":"FACEBOOK"
},
{
"card":{
"title":data3['title'],
"subtitle":"Source "+data3['source']+" >>",
"imageUri":data3['image_url'],
"buttons":[
{
"text":"Read Full Story",
"postback":data3['url']
},
{
"text":"Self Isolation",
"postback":"self isolation"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response2 }
return reply
def i_need_help_yes():
name = data['queryResult']['parameters']['name-people']
place = data['queryResult']['parameters']['name-place']
item_required = data['queryResult']['parameters']['help-ent']
phone = data['queryResult']['parameters']['phone-number']
ilist = [item_required[0],name[0],phone[0],place[0]]
sheets_row_writer(ilist)
response2 = "Hello "+name[0]+" so you are looking for "+item_required[0]+" Your location is "+place[0]+" One of our Team will contact you @ " +phone[0]+" soon !"
response = [
{
"quickReplies": {
"title": response2,
"quickReplies": [
"Call a Doctor",
"Get Online Support"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def faq_ques_ans():
ff = data['originalDetectIntentRequest']['payload']['data']['message']['text']
url = "https://nepalcorona.info/api/v1/faqs"
response = requests.get(url)
todos = json.loads(response.text)
rand = random.randrange(0, 45, 1)
opt3 = ["Live Nepali Data","Latest Nepali News","Symptoms","Preventions","Self Isolation","Play Corona Quiz"]
faqs = todos['data']
faq = faqs[rand]
if(ff=="English FAQ" or ff =="More Quizzles" or ff =="भाषा परिवर्तन"):
randq= faq['question']
randa = faq['answer']
opt1 = "More Quizzles"
opt2 = "Switch Language"
else:
randq = faq['question_np']
randa = faq['answer_np']
opt1 = "अरु देखाउनुहोस >>"
opt2 = "भाषा परिवर्तन"
response2 = "Q. "+randq+"\n A. "+randa+"\n"
response = [{
"text": {
"text": [
randq
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"quickReplies": {
"title": randa,
"quickReplies": [
opt1,
opt2,
random.choice(opt3)
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def blood_pal_yes():
print (intent)
print (data)
blood_group = data['queryResult']['parameters']['blood-group']
blood_amount = data['queryResult']['parameters']['blood-pint']
location = data['queryResult']['parameters']['blood-location']
case = data['queryResult']['parameters']['blood-case']
date = data['queryResult']['parameters']['blood-date']
phone = data['queryResult']['parameters']['blood-number']
ilist = [blood_group,blood_amount,location,case,date,phone]
sheets_row_writer(ilist)
response3 = "For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!"
response = "The following request has been sent. We will contact you shortly. "+blood_group+" blood ("+str(blood_amount)+" ) required for "+case+" at "+location+" On "+date+" - "+phone+" Thank you ."
response2 = [{
"text": {
"text": [
response
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"text": {
"text": [
response3
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response2 }
return reply
def blood_pal_donor_yes():
print (intent)
print (data)
permananet_address = data['queryResult']['parameters']['permananet-address']
height = data['queryResult']['parameters']['height']
gender = data['queryResult']['parameters']['gender']
age = data['queryResult']['parameters']['age']
blood = data['queryResult']['parameters']['blood']
current_address = data['queryResult']['parameters']['current-address']
email = data['queryResult']['parameters']['email']
name = data['queryResult']['parameters']['name']
last_donation= data['queryResult']['parameters']['last-donation']
weight = data['queryResult']['parameters']['weight']
number = data['queryResult']['parameters']['number']
ilist = [name,number,email,current_address,permananet_address,age,height,weight,gender,blood,last_donation]
sheets_row_writer_donor(ilist)
response3 = "For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!"
response = "Thank you "+name+" for registration as a blood donor We will contact you at the time of urgency in your area."
response2 = [{
"text": {
"text": [
response
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
},
{
"text": {
"text": [
response3
]
},
"platform": "FACEBOOK"
},{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response2 }
return reply
def world_data_live():
text = death_global()
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"Provience Data",
"Nepali News",
"World Data",
"Symptoms",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
#district summary all
def district_all_summary():
text = dss.district_all_summary()
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"Provience Summary",
"Nepali News",
"World Data",
"Symptoms",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
#provience summary all should remove
def province_all_summary():
text = dss.provience_all_summary()
print(text)
response = [
{
"quickReplies": {
"title": text,
"quickReplies": [
"District-Summary",
"Province-Data",
"World Data",
"Preventions",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def proviencewise_detail():
#get provience name
#return dss.ard(provience)
#card
pcode = data['queryResult']['parameters']['custom-province-ent']
province = int(pcode)
print(type(province))
response_summary = dss.ardp(province)
print(response_summary)
response = [
{
"card":{
"title": "Covid-19 Provience: "+str(province)+" | Details",
"subtitle":response_summary,
"imageUri": "https://setopati.net/wp-content/uploads/2018/02/province6.jpg",
"buttons":[
{
"text":"Prov "+str(province)+" District Data",
"postback":"dis-vdc data detail int"
},
{
"text":"Prov "+str(province)+" Vdc-Mun Data",
"postback":"dis-vdc data detail int"
},
{
"text":"Latest Nepali News",
"postback":"news-nepal-int"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response }
return reply
def dis_vdc_detail():
cod = data['queryResult']['parameters']['custom-province-ent']
dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']
print(type(dvdc))
print(dvdc)
code = int(cod)
print(type(code))
# provincecode = pcode
if(dvdc=="vdc"):
print('inside vdc')
typ = "vdc"
else:
print('inside district')
typ = "district"
data_return = dss.ard(code,typ)
response = [
{
"quickReplies": {
"title": data_return,
"quickReplies": [
"District Summary",
"Province Summary",
"Nepali News",
"World Data",
"Preventions",
"Corona FAQ's",
"Corona Quiz"
]
},
"platform": "FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
}
]
reply = { "fulfillmentMessages": response }
return reply
def nepal_data_new_main_int():
url = "https://nepalcorona.info/api/v1/data/nepal"
response = requests.get(url)
todos = json.loads(response.text)
covid_df = dss.create_covid_df()
response2 = "Nepal Cases \n Positive :"+str(todos["tested_positive"])+" | Recovered: "+str(todos["recovered"])+"| Deaths:"+str(todos["deaths"])+" "+"\n"
print(response2)
response_summary = dss.affected_summary()
response = [
{
"text": {
"text": [
response2
]
},
"platform": "FACEBOOK"
},
{
"text": {
"text": [
""
]
}
},
{
"card":{
"title": "Covid-19 Nepal | Stats",
"subtitle":response_summary,
# "subtitle": "Find details by Province, Municipals and Districts for Nepal",
"imageUri": "https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png",
"buttons":[
{
"text":"Province Summary",
"postback":"province data int"
},
{
"text":"District-Summary",
"postback":"district data int"
},
{
"text":"Latest Nepali News",
"postback":"news-nepal-int"
}
]
},
"platform":"FACEBOOK"
},
{
"text":{"text":["Dummy text"]}
},
]
reply = { "fulfillmentMessages": response }
return reply
def batti_update():
url = "https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM"
response = requests.get(url)
todos = json.loads(response.text)
feeds = todos["feeds"][0]
response2 = "Batti Status Now :"+str(feeds["field1"]+"\n Last Updated: "+str(feeds["created_at"]))
print(response2)
reply = { "fulfillmentText": response2 }
return reply
def default():
return "Incorrect Data"
switcher = {
"nepal data int": nepal_data_new_main_int,
"news-nepal-int": news_nepal_int,
"i need help main int - yes": i_need_help_yes,
"faq-que-ans-int": faq_ques_ans,
"bloodpal-need-blood-main-int - yes": blood_pal_yes,
"data world int": world_data_live,
"district data int": district_all_summary,
"province data int": province_all_summary,
"province-wise-data": proviencewise_detail,
"dis-vdc data detail int": dis_vdc_detail,
"bloodpal-become-donor-main-int":blood_pal_donor_yes,
"batti-update-intent":batti_update
}
def switch(intentname):
return switcher.get(intentname, default)()
reply = switch(intent)
return jsonify(reply)
if __name__ == '__main__':
app.run()
|
flexible
|
{
"blob_id": "267cb37f2ccad5b02a809d9b85327eacd9a49515",
"index": 1061,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\n<mask token>\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\n<mask token>\n\n\n@app.route('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\n<mask token>\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\n@app.route('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\n<mask token>\n",
"step-3": "<mask token>\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\napp.route('/death/global', methods=['POST'])\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\n@app.route('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-4": "from flask import Flask, jsonify, request\nimport requests, json, random\nfrom bs4 import BeautifulSoup\nimport gspread\nimport pandas as pd\nimport dataservices as dss\nfrom oauth2client.service_account import ServiceAccountCredentials\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n return 'Flask setup'\n\n\ndef sheets_row_writer(data_list):\n print('sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n worksheet.append_row(data_list)\n print('Write complete')\n\n\ndef sheets_row_writer_donor(data_list_donor):\n print('donor sheets method invoked')\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n worksheet.append_row(data_list_donor)\n print('Write complete')\n\n\ndef death_global():\n page = requests.get('https://www.worldometers.info/coronavirus/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n cases_list = []\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n for res in result:\n cases_list.append(res.text)\n return 'There are' + cases_list[0\n ] + ' Total cases out of which' + cases_list[1\n ] + ' have died and' + cases_list[2\n ] + ' have recovered . There are still ' + active_cases + ' active cases.'\n\n\napp.route('/death/global', methods=['POST'])\n\n\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get('https://www.worldometers.info/coronavirus/')\n response = death_global()\n reply = {'fulfillmentText': response}\n return jsonify(reply)\n\n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\n 'https://www.worldometers.info/coronavirus/country/' + id + '/')\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all('div', {'class': 'maincounter-number'})\n active = soup.find('div', {'class': 'number-table-main'})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n return ('In ' + idu + ' There are' + cases_list[0] +\n 'Total cases out of which' + cases_list[1] + 'are dead and' +\n cases_list[2] + 'have already recovered . There are still ' +\n active_cases + ' active cases .')\n\n\n@app.route('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print(intent)\n\n def news_nepal_int():\n url = 'https://nepalcorona.info/api/v1/news'\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n response2 = [{'card': {'title': data1['title'], 'subtitle': \n 'Source: ' + data1['source'] + ' >>', 'imageUri': data1[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data1['url']}, {'text': 'Corona Symptoms',\n 'postback': 'symptoms'}]}, 'platform': 'FACEBOOK'}, {'card': {\n 'title': data2['title'], 'subtitle': 'Source ' + data2['source'\n ] + ' >>', 'imageUri': data2['image_url'], 'buttons': [{'text':\n 'Read Full Story', 'postback': data2['url']}, {'text':\n 'Live Nepal Data', 'postback': 'live-nepal-data'}]}, 'platform':\n 'FACEBOOK'}, {'card': {'title': data3['title'], 'subtitle': \n 'Source ' + data3['source'] + ' >>', 'imageUri': data3[\n 'image_url'], 'buttons': [{'text': 'Read Full Story',\n 'postback': data3['url']}, {'text': 'Self Isolation',\n 'postback': 'self isolation'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0], name[0], phone[0], place[0]]\n sheets_row_writer(ilist)\n response2 = 'Hello ' + name[0\n ] + ' so you are looking for ' + item_required[0\n ] + ' Your location is ' + place[0\n ] + ' One of our Team will contact you @ ' + phone[0] + ' soon !'\n response = [{'quickReplies': {'title': response2, 'quickReplies': [\n 'Call a Doctor', 'Get Online Support']}, 'platform': 'FACEBOOK'\n }, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message'][\n 'text']\n url = 'https://nepalcorona.info/api/v1/faqs'\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = ['Live Nepali Data', 'Latest Nepali News', 'Symptoms',\n 'Preventions', 'Self Isolation', 'Play Corona Quiz']\n faqs = todos['data']\n faq = faqs[rand]\n if (ff == 'English FAQ' or ff == 'More Quizzles' or ff ==\n 'भाषा परिवर्तन'):\n randq = faq['question']\n randa = faq['answer']\n opt1 = 'More Quizzles'\n opt2 = 'Switch Language'\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = 'अरु देखाउनुहोस >>'\n opt2 = 'भाषा परिवर्तन'\n response2 = 'Q. ' + randq + '\\n A. ' + randa + '\\n'\n response = [{'text': {'text': [randq]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}, {'quickReplies': {'title':\n randa, 'quickReplies': [opt1, opt2, random.choice(opt3)]},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def blood_pal_yes():\n print(intent)\n print(data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group, blood_amount, location, case, date, phone]\n sheets_row_writer(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = (\n 'The following request has been sent. We will contact you shortly. '\n + blood_group + ' blood (' + str(blood_amount) +\n ' ) required for ' + case + ' at ' + location + ' On ' + date +\n ' - ' + phone + ' Thank you .')\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def blood_pal_donor_yes():\n print(intent)\n print(data)\n permananet_address = data['queryResult']['parameters'][\n 'permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation = data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name, number, email, current_address, permananet_address,\n age, height, weight, gender, blood, last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"\"\"For critical case, please contact \n Kathmandu 9880998523 \n Bhaktapur 9880998525 \n Kavre 9869294490 \n Purwanchal 9862176689 \n Chitwan 9801070746 \n Butwal 9807522664 \n Dang 9801920169 \n Stay connected with BloodPal!\"\"\"\n response = ('Thank you ' + name +\n ' for registration as a blood donor We will contact you at the time of urgency in your area.'\n )\n response2 = [{'text': {'text': [response]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['Dummy text']}}, {'text': {'text': [\n response3]}, 'platform': 'FACEBOOK'}, {'text': {'text': [\n 'Dummy text']}}]\n reply = {'fulfillmentMessages': response2}\n return reply\n\n def world_data_live():\n text = death_global()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Data', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def district_all_summary():\n text = dss.district_all_summary()\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'Provience Summary', 'Nepali News', 'World Data', 'Symptoms',\n \"Corona FAQ's\", 'Corona Quiz']}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [{'quickReplies': {'title': text, 'quickReplies': [\n 'District-Summary', 'Province-Data', 'World Data',\n 'Preventions', \"Corona FAQ's\", 'Corona Quiz']}, 'platform':\n 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def proviencewise_detail():\n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n response = [{'card': {'title': 'Covid-19 Provience: ' + str(\n province) + ' | Details', 'subtitle': response_summary,\n 'imageUri':\n 'https://setopati.net/wp-content/uploads/2018/02/province6.jpg',\n 'buttons': [{'text': 'Prov ' + str(province) + ' District Data',\n 'postback': 'dis-vdc data detail int'}, {'text': 'Prov ' + str(\n province) + ' Vdc-Mun Data', 'postback':\n 'dis-vdc data detail int'}, {'text': 'Latest Nepali News',\n 'postback': 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {\n 'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n if dvdc == 'vdc':\n print('inside vdc')\n typ = 'vdc'\n else:\n print('inside district')\n typ = 'district'\n data_return = dss.ard(code, typ)\n response = [{'quickReplies': {'title': data_return, 'quickReplies':\n ['District Summary', 'Province Summary', 'Nepali News',\n 'World Data', 'Preventions', \"Corona FAQ's\", 'Corona Quiz']},\n 'platform': 'FACEBOOK'}, {'text': {'text': ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def nepal_data_new_main_int():\n url = 'https://nepalcorona.info/api/v1/data/nepal'\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n response2 = 'Nepal Cases \\n Positive :' + str(todos['tested_positive']\n ) + ' | Recovered: ' + str(todos['recovered']) + '| Deaths:' + str(\n todos['deaths']) + ' ' + '\\n'\n print(response2)\n response_summary = dss.affected_summary()\n response = [{'text': {'text': [response2]}, 'platform': 'FACEBOOK'},\n {'text': {'text': ['']}}, {'card': {'title':\n 'Covid-19 Nepal | Stats', 'subtitle': response_summary,\n 'imageUri':\n 'https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png'\n , 'buttons': [{'text': 'Province Summary', 'postback':\n 'province data int'}, {'text': 'District-Summary', 'postback':\n 'district data int'}, {'text': 'Latest Nepali News', 'postback':\n 'news-nepal-int'}]}, 'platform': 'FACEBOOK'}, {'text': {'text':\n ['Dummy text']}}]\n reply = {'fulfillmentMessages': response}\n return reply\n\n def batti_update():\n url = (\n 'https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM'\n )\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos['feeds'][0]\n response2 = 'Batti Status Now :' + str(feeds['field1'] +\n '\\n Last Updated: ' + str(feeds['created_at']))\n print(response2)\n reply = {'fulfillmentText': response2}\n return reply\n\n def default():\n return 'Incorrect Data'\n switcher = {'nepal data int': nepal_data_new_main_int, 'news-nepal-int':\n news_nepal_int, 'i need help main int - yes': i_need_help_yes,\n 'faq-que-ans-int': faq_ques_ans,\n 'bloodpal-need-blood-main-int - yes': blood_pal_yes,\n 'data world int': world_data_live, 'district data int':\n district_all_summary, 'province data int': province_all_summary,\n 'province-wise-data': proviencewise_detail,\n 'dis-vdc data detail int': dis_vdc_detail,\n 'bloodpal-become-donor-main-int': blood_pal_donor_yes,\n 'batti-update-intent': batti_update}\n\n def switch(intentname):\n return switcher.get(intentname, default)()\n reply = switch(intent)\n return jsonify(reply)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": "from flask import Flask, jsonify, request\nimport requests, json, random\nfrom bs4 import BeautifulSoup\nimport gspread\nimport pandas as pd\nimport dataservices as dss\nfrom oauth2client.service_account import ServiceAccountCredentials\n# page = requests.get(\"https://www.worldometers.info/coronavirus/\")\n# soup = BeautifulSoup(page.content, 'html.parser')\nscope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\n\n# Initialize application\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef hello():\n return \"Flask setup\"\n\ndef sheets_row_writer(data_list):\n print(\"sheets method invoked\")\n credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(1)\n # worksheet = client.open('corona-help-resource-management').BloodPal\n worksheet.append_row(data_list) \n print(\"Write complete\")\n\ndef sheets_row_writer_donor(data_list_donor):\n print(\"donor sheets method invoked\")\n credentials = ServiceAccountCredentials.from_json_keyfile_name('mechnepal-test-54c4387178d9.json', scope)\n client = gspread.authorize(credentials)\n sh = client.open('corona-help-resource-management')\n worksheet = sh.get_worksheet(2)\n # worksheet = client.open('corona-help-resource-management').BloodPal\n worksheet.append_row(data_list_donor) \n print(\"Write complete\")\n\ndef death_global():\n page = requests.get(\"https://www.worldometers.info/coronavirus/\")\n soup = BeautifulSoup(page.content, 'html.parser')\n \n result = soup.find_all(\"div\", {\"class\":\"maincounter-number\"})\n cases_list = []\n\n active = soup.find(\"div\", {\"class\":\"number-table-main\"})\n active_cases = active.text\n\n for res in result:\n cases_list.append(res.text)\n\n return \"There are\"+cases_list[0]+\" Total cases out of which\"+cases_list[1]+\" have died and\"+cases_list[2]+\" have recovered . There are still \"+active_cases+\" active cases.\"\n\napp.route(\"/death/global\", methods=['POST'])\ndef death_global_api():\n data = request.get_json(silent=True)\n page = requests.get(\"https://www.worldometers.info/coronavirus/\")\n response = death_global()\n reply = { \"fulfillmentText\": response } \n return jsonify(reply)\n \n\ndef death_country(id):\n idu = id.upper()\n page = requests.get(\"https://www.worldometers.info/coronavirus/country/\"+id+\"/\")\n soup = BeautifulSoup(page.content, 'html.parser')\n result = soup.find_all(\"div\", {\"class\":\"maincounter-number\"})\n \n active = soup.find(\"div\", {\"class\":\"number-table-main\"})\n active_cases = active.text\n cases_list = []\n for res in result:\n cases_list.append(res.text)\n\n return \"In \" +idu+\" There are\"+cases_list[0]+\"Total cases out of which\"+cases_list[1]+\"are dead and\"+cases_list[2]+\"have already recovered . There are still \"+active_cases+ \" active cases .\"\n\n@app.route('/get_country_detail', methods=['POST'])\ndef get_country_detail():\n data = request.get_json(silent=True)\n intent = data['queryResult']['intent']['displayName']\n print (intent)\n \n def news_nepal_int():\n url = \"https://nepalcorona.info/api/v1/news\"\n response = requests.get(url)\n news = json.loads(response.text)\n data = news['data']\n data1 = data[0]\n data2 = data[1]\n data3 = data[2]\n \n response2 = [{\n \"card\":{\n \"title\":data1['title'],\n \"subtitle\":\"Source: \"+data1['source']+\" >>\",\n \"imageUri\":data1['image_url'],\n \"buttons\":[\n {\n \"text\":\"Read Full Story\",\n \"postback\":data1['url']\n },\n {\n \"text\":\"Corona Symptoms\",\n \"postback\":\"symptoms\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"card\":{\n \"title\":data2['title'],\n \"subtitle\":\"Source \"+data2['source']+\" >>\",\n \"imageUri\":data2['image_url'],\n \"buttons\":[\n {\n \"text\":\"Read Full Story\",\n \"postback\":data2['url']\n },\n {\n \"text\":\"Live Nepal Data\",\n \"postback\":\"live-nepal-data\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"card\":{\n \"title\":data3['title'],\n \"subtitle\":\"Source \"+data3['source']+\" >>\",\n \"imageUri\":data3['image_url'],\n \"buttons\":[\n {\n \"text\":\"Read Full Story\",\n \"postback\":data3['url']\n },\n {\n \"text\":\"Self Isolation\",\n \"postback\":\"self isolation\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n\n ]\n\n reply = { \"fulfillmentMessages\": response2 }\n return reply\n \n def i_need_help_yes():\n name = data['queryResult']['parameters']['name-people']\n place = data['queryResult']['parameters']['name-place']\n item_required = data['queryResult']['parameters']['help-ent']\n phone = data['queryResult']['parameters']['phone-number']\n ilist = [item_required[0],name[0],phone[0],place[0]]\n sheets_row_writer(ilist)\n response2 = \"Hello \"+name[0]+\" so you are looking for \"+item_required[0]+\" Your location is \"+place[0]+\" One of our Team will contact you @ \" +phone[0]+\" soon !\"\n response = [\n\n {\n \"quickReplies\": {\n \"title\": response2,\n \"quickReplies\": [\n \"Call a Doctor\",\n \"Get Online Support\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def faq_ques_ans():\n ff = data['originalDetectIntentRequest']['payload']['data']['message']['text']\n url = \"https://nepalcorona.info/api/v1/faqs\"\n response = requests.get(url)\n todos = json.loads(response.text)\n rand = random.randrange(0, 45, 1)\n opt3 = [\"Live Nepali Data\",\"Latest Nepali News\",\"Symptoms\",\"Preventions\",\"Self Isolation\",\"Play Corona Quiz\"]\n faqs = todos['data']\n faq = faqs[rand]\n if(ff==\"English FAQ\" or ff ==\"More Quizzles\" or ff ==\"भाषा परिवर्तन\"):\n randq= faq['question']\n randa = faq['answer']\n opt1 = \"More Quizzles\"\n opt2 = \"Switch Language\"\n else:\n randq = faq['question_np']\n randa = faq['answer_np']\n opt1 = \"अरु देखाउनुहोस >>\"\n opt2 = \"भाषा परिवर्तन\"\n\n response2 = \"Q. \"+randq+\"\\n A. \"+randa+\"\\n\"\n response = [{\n \"text\": {\n \"text\": [\n randq\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n\n {\n \"quickReplies\": {\n \"title\": randa,\n \"quickReplies\": [\n opt1,\n opt2,\n random.choice(opt3)\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n reply = { \"fulfillmentMessages\": response }\n\n return reply\n \n def blood_pal_yes():\n print (intent)\n print (data)\n blood_group = data['queryResult']['parameters']['blood-group']\n blood_amount = data['queryResult']['parameters']['blood-pint']\n location = data['queryResult']['parameters']['blood-location']\n case = data['queryResult']['parameters']['blood-case']\n date = data['queryResult']['parameters']['blood-date']\n phone = data['queryResult']['parameters']['blood-number']\n ilist = [blood_group,blood_amount,location,case,date,phone]\n sheets_row_writer(ilist)\n response3 = \"For critical case, please contact \\n Kathmandu 9880998523 \\n Bhaktapur 9880998525 \\n Kavre 9869294490 \\n Purwanchal 9862176689 \\n Chitwan 9801070746 \\n Butwal 9807522664 \\n Dang 9801920169 \\n Stay connected with BloodPal!\"\n response = \"The following request has been sent. We will contact you shortly. \"+blood_group+\" blood (\"+str(blood_amount)+\" ) required for \"+case+\" at \"+location+\" On \"+date+\" - \"+phone+\" Thank you .\"\n response2 = [{\n \"text\": {\n \"text\": [\n response\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n {\n \"text\": {\n \"text\": [\n response3\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n reply = { \"fulfillmentMessages\": response2 }\n return reply\n \n def blood_pal_donor_yes():\n print (intent)\n print (data)\n permananet_address = data['queryResult']['parameters']['permananet-address']\n height = data['queryResult']['parameters']['height']\n gender = data['queryResult']['parameters']['gender']\n age = data['queryResult']['parameters']['age']\n blood = data['queryResult']['parameters']['blood']\n current_address = data['queryResult']['parameters']['current-address']\n email = data['queryResult']['parameters']['email']\n name = data['queryResult']['parameters']['name']\n last_donation= data['queryResult']['parameters']['last-donation']\n weight = data['queryResult']['parameters']['weight']\n number = data['queryResult']['parameters']['number']\n ilist = [name,number,email,current_address,permananet_address,age,height,weight,gender,blood,last_donation]\n sheets_row_writer_donor(ilist)\n response3 = \"For critical case, please contact \\n Kathmandu 9880998523 \\n Bhaktapur 9880998525 \\n Kavre 9869294490 \\n Purwanchal 9862176689 \\n Chitwan 9801070746 \\n Butwal 9807522664 \\n Dang 9801920169 \\n Stay connected with BloodPal!\"\n response = \"Thank you \"+name+\" for registration as a blood donor We will contact you at the time of urgency in your area.\"\n response2 = [{\n \"text\": {\n \"text\": [\n response\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n {\n \"text\": {\n \"text\": [\n response3\n ]\n },\n \"platform\": \"FACEBOOK\"\n },{\n \"text\":{\"text\":[\"Dummy text\"]}\n }\n \n ]\n reply = { \"fulfillmentMessages\": response2 }\n return reply\n\n def world_data_live():\n text = death_global()\n response = [\n {\n \"quickReplies\": {\n \"title\": text,\n \"quickReplies\": [\n \"Provience Data\",\n \"Nepali News\",\n \"World Data\",\n \"Symptoms\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n \n #district summary all\n def district_all_summary():\n text = dss.district_all_summary()\n response = [\n {\n \"quickReplies\": {\n \"title\": text,\n \"quickReplies\": [\n \"Provience Summary\",\n \"Nepali News\",\n \"World Data\",\n \"Symptoms\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n \n #provience summary all should remove \n def province_all_summary():\n text = dss.provience_all_summary()\n print(text)\n response = [\n {\n \"quickReplies\": {\n \"title\": text,\n \"quickReplies\": [\n \"District-Summary\",\n \"Province-Data\",\n \"World Data\",\n \"Preventions\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def proviencewise_detail():\n #get provience name\n #return dss.ard(provience)\n #card \n pcode = data['queryResult']['parameters']['custom-province-ent']\n province = int(pcode)\n print(type(province))\n response_summary = dss.ardp(province)\n print(response_summary)\n\n response = [\n {\n \"card\":{\n \"title\": \"Covid-19 Provience: \"+str(province)+\" | Details\",\n \"subtitle\":response_summary,\n \"imageUri\": \"https://setopati.net/wp-content/uploads/2018/02/province6.jpg\",\n \"buttons\":[\n {\n \"text\":\"Prov \"+str(province)+\" District Data\",\n \"postback\":\"dis-vdc data detail int\"\n },\n {\n \"text\":\"Prov \"+str(province)+\" Vdc-Mun Data\",\n \"postback\":\"dis-vdc data detail int\"\n },\n {\n \"text\":\"Latest Nepali News\",\n \"postback\":\"news-nepal-int\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n ]\n\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n \n def dis_vdc_detail():\n cod = data['queryResult']['parameters']['custom-province-ent']\n dvdc = data['queryResult']['parameters']['custom-dis-vdc-mun-entity']\n \n print(type(dvdc))\n print(dvdc)\n code = int(cod)\n print(type(code))\n\n\n # provincecode = pcode\n if(dvdc==\"vdc\"):\n print('inside vdc')\n typ = \"vdc\" \n else:\n print('inside district')\n typ = \"district\"\n\n data_return = dss.ard(code,typ)\n response = [\n {\n \"quickReplies\": {\n \"title\": data_return,\n \"quickReplies\": [\n \"District Summary\",\n \"Province Summary\",\n \"Nepali News\",\n \"World Data\",\n \"Preventions\",\n \"Corona FAQ's\",\n \"Corona Quiz\"\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n } \n ]\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def nepal_data_new_main_int():\n url = \"https://nepalcorona.info/api/v1/data/nepal\"\n response = requests.get(url)\n todos = json.loads(response.text)\n covid_df = dss.create_covid_df()\n\n \n response2 = \"Nepal Cases \\n Positive :\"+str(todos[\"tested_positive\"])+\" | Recovered: \"+str(todos[\"recovered\"])+\"| Deaths:\"+str(todos[\"deaths\"])+\" \"+\"\\n\"\n print(response2)\n response_summary = dss.affected_summary()\n\n response = [\n {\n \"text\": {\n \"text\": [\n response2\n ]\n },\n \"platform\": \"FACEBOOK\"\n },\n {\n \"text\": {\n \"text\": [\n \"\"\n ]\n }\n },\n {\n \"card\":{\n \"title\": \"Covid-19 Nepal | Stats\",\n \"subtitle\":response_summary,\n # \"subtitle\": \"Find details by Province, Municipals and Districts for Nepal\",\n \"imageUri\": \"https://stock.rtl.lu/rtl/800/rtl2008.lu/nt/p/2020/04/09/16/fdfbf19dc86cb2ef05908e9e83885f97.png\",\n \"buttons\":[\n {\n \"text\":\"Province Summary\",\n \"postback\":\"province data int\"\n },\n {\n \"text\":\"District-Summary\",\n \"postback\":\"district data int\"\n },\n {\n \"text\":\"Latest Nepali News\",\n \"postback\":\"news-nepal-int\"\n }\n ]\n },\n \"platform\":\"FACEBOOK\"\n },\n {\n \"text\":{\"text\":[\"Dummy text\"]}\n },\n ]\n\n\n reply = { \"fulfillmentMessages\": response }\n return reply\n\n def batti_update():\n url = \"https://api.thingspeak.com/channels/1095294/fields/1.json?api_key=U0AR6L9OIISHK7RZ&results=1&fbclid=IwAR1vlCZe6tEMvkEUYcTdUPw3F8OUM6P4RRSZScAyni1u_pDGi6KxHvURawM\"\n response = requests.get(url)\n todos = json.loads(response.text)\n feeds = todos[\"feeds\"][0]\n \n response2 = \"Batti Status Now :\"+str(feeds[\"field1\"]+\"\\n Last Updated: \"+str(feeds[\"created_at\"]))\n print(response2)\n reply = { \"fulfillmentText\": response2 }\n return reply\n\n\n def default():\n return \"Incorrect Data\"\n\n switcher = {\n \"nepal data int\": nepal_data_new_main_int,\n \"news-nepal-int\": news_nepal_int,\n \"i need help main int - yes\": i_need_help_yes,\n \"faq-que-ans-int\": faq_ques_ans,\n \"bloodpal-need-blood-main-int - yes\": blood_pal_yes,\n \"data world int\": world_data_live,\n \"district data int\": district_all_summary,\n \"province data int\": province_all_summary,\n \"province-wise-data\": proviencewise_detail,\n \"dis-vdc data detail int\": dis_vdc_detail,\n \"bloodpal-become-donor-main-int\":blood_pal_donor_yes,\n \"batti-update-intent\":batti_update\n }\n \n def switch(intentname):\n return switcher.get(intentname, default)()\n\n reply = switch(intent)\n return jsonify(reply)\n \n\nif __name__ == '__main__':\n \n app.run()\n",
"step-ids": [
5,
6,
9,
10,
11
]
}
|
[
5,
6,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def solution(record):
answer = []
db = {}
chatting = []
for log in record:
log_list = log.split()
if log_list[0] == 'Enter':
db[log_list[1]] = log_list[2]
chatting.append([True, log_list[1]])
elif log_list[0] == 'Leave':
chatting.append([False, log_list[1]])
elif log_list[0] == 'Change':
db[log_list[1]] = log_list[2]
for chat in chatting:
if chat[0]:
answer.append(db[chat[1]] + '님이 들어왔습니다.')
else:
answer.append(db[chat[1]] + '님이 나갔습니다.')
return answer
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def solution(record):
answer = []
db = {}
chatting = []
for log in record:
log_list = log.split()
if log_list[0] == 'Enter':
db[log_list[1]] = log_list[2]
chatting.append([True, log_list[1]])
elif log_list[0] == 'Leave':
chatting.append([False, log_list[1]])
elif log_list[0] == 'Change':
db[log_list[1]] = log_list[2]
for chat in chatting:
if chat[0]:
answer.append(db[chat[1]] + '님이 들어왔습니다.')
else:
answer.append(db[chat[1]] + '님이 나갔습니다.')
return answer
print(solution(['Enter uid1234 Muzi', 'Enter uid4567 Prodo',
'Leave uid1234', 'Enter uid1234 Prodo', 'Change uid4567 Ryan']))
<|reserved_special_token_1|>
def solution(record):
answer = []
db = {}
chatting = []
for log in record:
log_list = log.split()
if log_list[0] == 'Enter':
db[log_list[1]] = log_list[2]
chatting.append([True, log_list[1]])
elif log_list[0] == 'Leave':
chatting.append([False, log_list[1]])
elif log_list[0] == 'Change':
db[log_list[1]] = log_list[2]
for chat in chatting:
if chat[0]:
answer.append(db[chat[1]] + '님이 들어왔습니다.')
else:
answer.append(db[chat[1]] + '님이 나갔습니다.')
return answer
print(solution(["Enter uid1234 Muzi", "Enter uid4567 Prodo","Leave uid1234","Enter uid1234 Prodo","Change uid4567 Ryan"]))
|
flexible
|
{
"blob_id": "3ffe16494eb45896563a2952f3bcf80fc19b2750",
"index": 1226,
"step-1": "<mask token>\n",
"step-2": "def solution(record):\n answer = []\n db = {}\n chatting = []\n for log in record:\n log_list = log.split()\n if log_list[0] == 'Enter':\n db[log_list[1]] = log_list[2]\n chatting.append([True, log_list[1]])\n elif log_list[0] == 'Leave':\n chatting.append([False, log_list[1]])\n elif log_list[0] == 'Change':\n db[log_list[1]] = log_list[2]\n for chat in chatting:\n if chat[0]:\n answer.append(db[chat[1]] + '님이 들어왔습니다.')\n else:\n answer.append(db[chat[1]] + '님이 나갔습니다.')\n return answer\n\n\n<mask token>\n",
"step-3": "def solution(record):\n answer = []\n db = {}\n chatting = []\n for log in record:\n log_list = log.split()\n if log_list[0] == 'Enter':\n db[log_list[1]] = log_list[2]\n chatting.append([True, log_list[1]])\n elif log_list[0] == 'Leave':\n chatting.append([False, log_list[1]])\n elif log_list[0] == 'Change':\n db[log_list[1]] = log_list[2]\n for chat in chatting:\n if chat[0]:\n answer.append(db[chat[1]] + '님이 들어왔습니다.')\n else:\n answer.append(db[chat[1]] + '님이 나갔습니다.')\n return answer\n\n\nprint(solution(['Enter uid1234 Muzi', 'Enter uid4567 Prodo',\n 'Leave uid1234', 'Enter uid1234 Prodo', 'Change uid4567 Ryan']))\n",
"step-4": "def solution(record):\n answer = []\n db = {}\n chatting = []\n\n for log in record:\n log_list = log.split()\n\n if log_list[0] == 'Enter':\n db[log_list[1]] = log_list[2]\n chatting.append([True, log_list[1]])\n elif log_list[0] == 'Leave':\n chatting.append([False, log_list[1]])\n elif log_list[0] == 'Change':\n db[log_list[1]] = log_list[2]\n\n for chat in chatting:\n if chat[0]:\n answer.append(db[chat[1]] + '님이 들어왔습니다.')\n else:\n answer.append(db[chat[1]] + '님이 나갔습니다.')\n \n return answer\n\n\nprint(solution([\"Enter uid1234 Muzi\", \"Enter uid4567 Prodo\",\"Leave uid1234\",\"Enter uid1234 Prodo\",\"Change uid4567 Ryan\"]))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# 튜플(tuple) - 리스트와 구조가 비슷함
#변경, 삭제 할 수 없다.
t = ('코스모스', '민들레', '국화')
print(t)
print(t[:2])
print(t[1:])
#del t[0] - 삭제 안됨
#t[2] ="매화" - 수정 안됨
t2 = (1, 2, 3)
t3 = (4,) # 1개 추가하기 (쉼표를 붙임)
print(t2)
print(t3)
print(t2 + t3) # 요소 더하기
|
normal
|
{
"blob_id": "45fcafdd30f890ddf5eaa090152fde2e2da4dbef",
"index": 732,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(t)\nprint(t[:2])\nprint(t[1:])\n<mask token>\nprint(t2)\nprint(t3)\nprint(t2 + t3)\n",
"step-3": "t = '코스모스', '민들레', '국화'\nprint(t)\nprint(t[:2])\nprint(t[1:])\nt2 = 1, 2, 3\nt3 = 4,\nprint(t2)\nprint(t3)\nprint(t2 + t3)\n",
"step-4": "# 튜플(tuple) - 리스트와 구조가 비슷함\n#변경, 삭제 할 수 없다.\n\nt = ('코스모스', '민들레', '국화')\nprint(t)\nprint(t[:2])\nprint(t[1:])\n#del t[0] - 삭제 안됨\n#t[2] =\"매화\" - 수정 안됨\n\nt2 = (1, 2, 3)\nt3 = (4,) # 1개 추가하기 (쉼표를 붙임)\nprint(t2)\nprint(t3)\nprint(t2 + t3) # 요소 더하기\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
from lemonpie import lemonpie
from flask_debugtoolbar import DebugToolbarExtension
def main():
lemonpie.debug = True
lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
toolbar = DebugToolbarExtension(lemonpie)
lemonpie.run('0.0.0.0')
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "328c483bf59c6b84090e6bef8814e829398c5a56",
"index": 6954,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n lemonpie.debug = True\n lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n toolbar = DebugToolbarExtension(lemonpie)\n lemonpie.run('0.0.0.0')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n lemonpie.debug = True\n lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n toolbar = DebugToolbarExtension(lemonpie)\n lemonpie.run('0.0.0.0')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from lemonpie import lemonpie\nfrom flask_debugtoolbar import DebugToolbarExtension\n\n\ndef main():\n lemonpie.debug = True\n lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n toolbar = DebugToolbarExtension(lemonpie)\n lemonpie.run('0.0.0.0')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\nfrom lemonpie import lemonpie\nfrom flask_debugtoolbar import DebugToolbarExtension\n\ndef main():\n lemonpie.debug = True\n lemonpie.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\n toolbar = DebugToolbarExtension(lemonpie)\n lemonpie.run('0.0.0.0')\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def dict_from_dataflow_generator(df):
for sample in df.get_data():
yield sample[0]
def split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,
lmdb_output_path2, split_ratio1, batch_size, shuffle,
serialization_name, compression, compression_arg, max_num_samples=None):
data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=
shuffle)
data_dict_df.reset_state()
assert split_ratio1 > 0
assert split_ratio1 < 1
num_samples = data_dict_df.size()
if max_num_samples is not None and max_num_samples > 0:
num_samples = min(num_samples, max_num_samples)
num_batches = num_samples // batch_size
num_batches1 = round(split_ratio1 * num_samples) // batch_size
num_samples1 = num_batches1 * batch_size
num_batches2 = num_batches - num_batches1
num_samples2 = num_batches2 * batch_size
if num_samples1 <= 0 or num_samples2 <= 0:
import sys
sys.stderr.write('Data split will result in empty data set\n')
sys.exit(1)
logger.info('Splitting {} samples into {} train and {} test samples'.
format(num_samples, num_samples1, num_samples2))
if num_samples > num_samples1 + num_samples2:
logger.warn('Dropping {} samples from input dataset'.format(
num_samples - num_samples1 - num_samples2))
fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=
num_samples1, keep_state=True)
with logged_time_measurement(logger, 'Writing train dataset to {} ...'.
format(lmdb_output_path1), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,
lmdb_output_path1, batch_size, write_frequency=10,
serialization_name=serialization_name, compression=compression,
compression_arg=compression_arg)
fixed_size_df.set_size(num_samples2)
with logged_time_measurement(logger, 'Writing test dataset to {} ...'.
format(lmdb_output_path2), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,
lmdb_output_path2, batch_size, write_frequency=10,
serialization_name=serialization_name, compression=compression,
compression_arg=compression_arg, reset_df_state=False)
logger.info('Tagging as train and test')
with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:
lmdb_db.put_item('__train__', msgpack_utils.dumps(True))
with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:
lmdb_db.put_item('__test__', msgpack_utils.dumps(True))
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)
assert lmdb_df.size() == num_samples1
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)
assert lmdb_df.size() == num_samples2
def compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):
with logged_time_measurement(logger, 'Computing data statistics for {}'
.format(lmdb_path), log_start=True):
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)
lmdb_df.reset_state()
data_stats_dict = data_record.compute_dataset_stats_from_dicts(
dict_from_dataflow_generator(lmdb_df))
for key in data_stats_dict:
for key2 in data_stats_dict[key]:
if data_stats_dict[key][key2] is not None:
data_stats_dict[key][key2] = np.asarray(data_stats_dict[key
][key2], dtype=np.float32)
serializer = serialization.get_serializer_by_name(serialization_name)
logger.info('Writing data statistics to {}'.format(lmdb_path))
with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:
data_stats_dump = serializer.dumps(data_stats_dict)
lmdb_db.put_item('__stats__', data_stats_dump)
def run(args):
split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.
lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,
args.serialization, args.compression, args.compression_arg, args.
max_num_samples)
if args.compute_stats:
compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.
serialization)
compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.
serialization)
def main():
np.set_printoptions(threshold=5)
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-v', '--verbose', action='count', default=0, help=
'Set verbosity level.')
parser.add_argument('--lmdb-input-path', required=True, help=
'Path to input LMDB database.')
parser.add_argument('--lmdb-output-path1', required=True, help=
'Path to store train LMDB database.')
parser.add_argument('--lmdb-output-path2', required=True, help=
'Path to store test LMDB database.')
parser.add_argument('--shuffle', type=argparse_bool, default=True)
parser.add_argument('--serialization', type=str, default='pickle')
parser.add_argument('--compression', type=str, default='lz4')
parser.add_argument('--compression-arg', type=str)
parser.add_argument('--split-ratio1', default=0.8, type=float, help=
'Ratio of data to write to output path 1')
parser.add_argument('--batch-size', type=int, default=512)
parser.add_argument('--compute-stats', type=argparse_bool, default=True)
parser.add_argument('--max-num-samples', type=int)
args = parser.parse_args()
run(args)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def dict_from_dataflow_generator(df):
for sample in df.get_data():
yield sample[0]
def split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,
lmdb_output_path2, split_ratio1, batch_size, shuffle,
serialization_name, compression, compression_arg, max_num_samples=None):
data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=
shuffle)
data_dict_df.reset_state()
assert split_ratio1 > 0
assert split_ratio1 < 1
num_samples = data_dict_df.size()
if max_num_samples is not None and max_num_samples > 0:
num_samples = min(num_samples, max_num_samples)
num_batches = num_samples // batch_size
num_batches1 = round(split_ratio1 * num_samples) // batch_size
num_samples1 = num_batches1 * batch_size
num_batches2 = num_batches - num_batches1
num_samples2 = num_batches2 * batch_size
if num_samples1 <= 0 or num_samples2 <= 0:
import sys
sys.stderr.write('Data split will result in empty data set\n')
sys.exit(1)
logger.info('Splitting {} samples into {} train and {} test samples'.
format(num_samples, num_samples1, num_samples2))
if num_samples > num_samples1 + num_samples2:
logger.warn('Dropping {} samples from input dataset'.format(
num_samples - num_samples1 - num_samples2))
fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=
num_samples1, keep_state=True)
with logged_time_measurement(logger, 'Writing train dataset to {} ...'.
format(lmdb_output_path1), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,
lmdb_output_path1, batch_size, write_frequency=10,
serialization_name=serialization_name, compression=compression,
compression_arg=compression_arg)
fixed_size_df.set_size(num_samples2)
with logged_time_measurement(logger, 'Writing test dataset to {} ...'.
format(lmdb_output_path2), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,
lmdb_output_path2, batch_size, write_frequency=10,
serialization_name=serialization_name, compression=compression,
compression_arg=compression_arg, reset_df_state=False)
logger.info('Tagging as train and test')
with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:
lmdb_db.put_item('__train__', msgpack_utils.dumps(True))
with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:
lmdb_db.put_item('__test__', msgpack_utils.dumps(True))
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)
assert lmdb_df.size() == num_samples1
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)
assert lmdb_df.size() == num_samples2
def compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):
with logged_time_measurement(logger, 'Computing data statistics for {}'
.format(lmdb_path), log_start=True):
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)
lmdb_df.reset_state()
data_stats_dict = data_record.compute_dataset_stats_from_dicts(
dict_from_dataflow_generator(lmdb_df))
for key in data_stats_dict:
for key2 in data_stats_dict[key]:
if data_stats_dict[key][key2] is not None:
data_stats_dict[key][key2] = np.asarray(data_stats_dict[key
][key2], dtype=np.float32)
serializer = serialization.get_serializer_by_name(serialization_name)
logger.info('Writing data statistics to {}'.format(lmdb_path))
with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:
data_stats_dump = serializer.dumps(data_stats_dict)
lmdb_db.put_item('__stats__', data_stats_dump)
def run(args):
split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.
lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,
args.serialization, args.compression, args.compression_arg, args.
max_num_samples)
if args.compute_stats:
compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.
serialization)
compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.
serialization)
def main():
np.set_printoptions(threshold=5)
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-v', '--verbose', action='count', default=0, help=
'Set verbosity level.')
parser.add_argument('--lmdb-input-path', required=True, help=
'Path to input LMDB database.')
parser.add_argument('--lmdb-output-path1', required=True, help=
'Path to store train LMDB database.')
parser.add_argument('--lmdb-output-path2', required=True, help=
'Path to store test LMDB database.')
parser.add_argument('--shuffle', type=argparse_bool, default=True)
parser.add_argument('--serialization', type=str, default='pickle')
parser.add_argument('--compression', type=str, default='lz4')
parser.add_argument('--compression-arg', type=str)
parser.add_argument('--split-ratio1', default=0.8, type=float, help=
'Ratio of data to write to output path 1')
parser.add_argument('--batch-size', type=int, default=512)
parser.add_argument('--compute-stats', type=argparse_bool, default=True)
parser.add_argument('--max-num-samples', type=int)
args = parser.parse_args()
run(args)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = log_utils.get_logger('reward_learning/split_data_lmdb')
def dict_from_dataflow_generator(df):
for sample in df.get_data():
yield sample[0]
def split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,
lmdb_output_path2, split_ratio1, batch_size, shuffle,
serialization_name, compression, compression_arg, max_num_samples=None):
data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=
shuffle)
data_dict_df.reset_state()
assert split_ratio1 > 0
assert split_ratio1 < 1
num_samples = data_dict_df.size()
if max_num_samples is not None and max_num_samples > 0:
num_samples = min(num_samples, max_num_samples)
num_batches = num_samples // batch_size
num_batches1 = round(split_ratio1 * num_samples) // batch_size
num_samples1 = num_batches1 * batch_size
num_batches2 = num_batches - num_batches1
num_samples2 = num_batches2 * batch_size
if num_samples1 <= 0 or num_samples2 <= 0:
import sys
sys.stderr.write('Data split will result in empty data set\n')
sys.exit(1)
logger.info('Splitting {} samples into {} train and {} test samples'.
format(num_samples, num_samples1, num_samples2))
if num_samples > num_samples1 + num_samples2:
logger.warn('Dropping {} samples from input dataset'.format(
num_samples - num_samples1 - num_samples2))
fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=
num_samples1, keep_state=True)
with logged_time_measurement(logger, 'Writing train dataset to {} ...'.
format(lmdb_output_path1), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,
lmdb_output_path1, batch_size, write_frequency=10,
serialization_name=serialization_name, compression=compression,
compression_arg=compression_arg)
fixed_size_df.set_size(num_samples2)
with logged_time_measurement(logger, 'Writing test dataset to {} ...'.
format(lmdb_output_path2), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,
lmdb_output_path2, batch_size, write_frequency=10,
serialization_name=serialization_name, compression=compression,
compression_arg=compression_arg, reset_df_state=False)
logger.info('Tagging as train and test')
with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:
lmdb_db.put_item('__train__', msgpack_utils.dumps(True))
with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:
lmdb_db.put_item('__test__', msgpack_utils.dumps(True))
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)
assert lmdb_df.size() == num_samples1
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)
assert lmdb_df.size() == num_samples2
def compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):
with logged_time_measurement(logger, 'Computing data statistics for {}'
.format(lmdb_path), log_start=True):
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)
lmdb_df.reset_state()
data_stats_dict = data_record.compute_dataset_stats_from_dicts(
dict_from_dataflow_generator(lmdb_df))
for key in data_stats_dict:
for key2 in data_stats_dict[key]:
if data_stats_dict[key][key2] is not None:
data_stats_dict[key][key2] = np.asarray(data_stats_dict[key
][key2], dtype=np.float32)
serializer = serialization.get_serializer_by_name(serialization_name)
logger.info('Writing data statistics to {}'.format(lmdb_path))
with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:
data_stats_dump = serializer.dumps(data_stats_dict)
lmdb_db.put_item('__stats__', data_stats_dump)
def run(args):
split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.
lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,
args.serialization, args.compression, args.compression_arg, args.
max_num_samples)
if args.compute_stats:
compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.
serialization)
compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.
serialization)
def main():
np.set_printoptions(threshold=5)
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-v', '--verbose', action='count', default=0, help=
'Set verbosity level.')
parser.add_argument('--lmdb-input-path', required=True, help=
'Path to input LMDB database.')
parser.add_argument('--lmdb-output-path1', required=True, help=
'Path to store train LMDB database.')
parser.add_argument('--lmdb-output-path2', required=True, help=
'Path to store test LMDB database.')
parser.add_argument('--shuffle', type=argparse_bool, default=True)
parser.add_argument('--serialization', type=str, default='pickle')
parser.add_argument('--compression', type=str, default='lz4')
parser.add_argument('--compression-arg', type=str)
parser.add_argument('--split-ratio1', default=0.8, type=float, help=
'Ratio of data to write to output path 1')
parser.add_argument('--batch-size', type=int, default=512)
parser.add_argument('--compute-stats', type=argparse_bool, default=True)
parser.add_argument('--max-num-samples', type=int)
args = parser.parse_args()
run(args)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import scipy.ndimage as nd
import argparse
import numpy as np
from pybh import tensorpack_utils
import data_record
from pybh import serialization
from pybh import msgpack_utils
from pybh import lmdb_utils
from pybh.utils import argparse_bool, logged_time_measurement
from pybh import log_utils
logger = log_utils.get_logger('reward_learning/split_data_lmdb')
def dict_from_dataflow_generator(df):
for sample in df.get_data():
yield sample[0]
def split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,
lmdb_output_path2, split_ratio1, batch_size, shuffle,
serialization_name, compression, compression_arg, max_num_samples=None):
data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=
shuffle)
data_dict_df.reset_state()
assert split_ratio1 > 0
assert split_ratio1 < 1
num_samples = data_dict_df.size()
if max_num_samples is not None and max_num_samples > 0:
num_samples = min(num_samples, max_num_samples)
num_batches = num_samples // batch_size
num_batches1 = round(split_ratio1 * num_samples) // batch_size
num_samples1 = num_batches1 * batch_size
num_batches2 = num_batches - num_batches1
num_samples2 = num_batches2 * batch_size
if num_samples1 <= 0 or num_samples2 <= 0:
import sys
sys.stderr.write('Data split will result in empty data set\n')
sys.exit(1)
logger.info('Splitting {} samples into {} train and {} test samples'.
format(num_samples, num_samples1, num_samples2))
if num_samples > num_samples1 + num_samples2:
logger.warn('Dropping {} samples from input dataset'.format(
num_samples - num_samples1 - num_samples2))
fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=
num_samples1, keep_state=True)
with logged_time_measurement(logger, 'Writing train dataset to {} ...'.
format(lmdb_output_path1), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,
lmdb_output_path1, batch_size, write_frequency=10,
serialization_name=serialization_name, compression=compression,
compression_arg=compression_arg)
fixed_size_df.set_size(num_samples2)
with logged_time_measurement(logger, 'Writing test dataset to {} ...'.
format(lmdb_output_path2), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,
lmdb_output_path2, batch_size, write_frequency=10,
serialization_name=serialization_name, compression=compression,
compression_arg=compression_arg, reset_df_state=False)
logger.info('Tagging as train and test')
with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:
lmdb_db.put_item('__train__', msgpack_utils.dumps(True))
with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:
lmdb_db.put_item('__test__', msgpack_utils.dumps(True))
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)
assert lmdb_df.size() == num_samples1
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)
assert lmdb_df.size() == num_samples2
def compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):
with logged_time_measurement(logger, 'Computing data statistics for {}'
.format(lmdb_path), log_start=True):
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)
lmdb_df.reset_state()
data_stats_dict = data_record.compute_dataset_stats_from_dicts(
dict_from_dataflow_generator(lmdb_df))
for key in data_stats_dict:
for key2 in data_stats_dict[key]:
if data_stats_dict[key][key2] is not None:
data_stats_dict[key][key2] = np.asarray(data_stats_dict[key
][key2], dtype=np.float32)
serializer = serialization.get_serializer_by_name(serialization_name)
logger.info('Writing data statistics to {}'.format(lmdb_path))
with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:
data_stats_dump = serializer.dumps(data_stats_dict)
lmdb_db.put_item('__stats__', data_stats_dump)
def run(args):
split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.
lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,
args.serialization, args.compression, args.compression_arg, args.
max_num_samples)
if args.compute_stats:
compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.
serialization)
compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.
serialization)
def main():
np.set_printoptions(threshold=5)
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-v', '--verbose', action='count', default=0, help=
'Set verbosity level.')
parser.add_argument('--lmdb-input-path', required=True, help=
'Path to input LMDB database.')
parser.add_argument('--lmdb-output-path1', required=True, help=
'Path to store train LMDB database.')
parser.add_argument('--lmdb-output-path2', required=True, help=
'Path to store test LMDB database.')
parser.add_argument('--shuffle', type=argparse_bool, default=True)
parser.add_argument('--serialization', type=str, default='pickle')
parser.add_argument('--compression', type=str, default='lz4')
parser.add_argument('--compression-arg', type=str)
parser.add_argument('--split-ratio1', default=0.8, type=float, help=
'Ratio of data to write to output path 1')
parser.add_argument('--batch-size', type=int, default=512)
parser.add_argument('--compute-stats', type=argparse_bool, default=True)
parser.add_argument('--max-num-samples', type=int)
args = parser.parse_args()
run(args)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Workaround for segmentation fault for some versions when ndimage is imported after tensorflow.
import scipy.ndimage as nd
import argparse
import numpy as np
from pybh import tensorpack_utils
import data_record
from pybh import serialization
from pybh import msgpack_utils
from pybh import lmdb_utils
from pybh.utils import argparse_bool, logged_time_measurement
from pybh import log_utils
logger = log_utils.get_logger("reward_learning/split_data_lmdb")
def dict_from_dataflow_generator(df):
for sample in df.get_data():
yield sample[0]
def split_lmdb_dataset(lmdb_input_path, lmdb_output_path1, lmdb_output_path2, split_ratio1,
batch_size, shuffle, serialization_name, compression, compression_arg, max_num_samples=None):
data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=shuffle)
data_dict_df.reset_state()
assert(split_ratio1 > 0)
assert(split_ratio1 < 1)
num_samples = data_dict_df.size()
if max_num_samples is not None and max_num_samples > 0:
num_samples = min(num_samples, max_num_samples)
num_batches = num_samples // batch_size
num_batches1 = round(split_ratio1 * num_samples) // batch_size
num_samples1 = num_batches1 * batch_size
num_batches2 = num_batches - num_batches1
num_samples2 = num_batches2 * batch_size
if num_samples1 <= 0 or num_samples2 <= 0:
import sys
sys.stderr.write("Data split will result in empty data set\n")
sys.exit(1)
logger.info("Splitting {} samples into {} train and {} test samples".format(num_samples, num_samples1, num_samples2))
if num_samples > num_samples1 + num_samples2:
logger.warn("Dropping {} samples from input dataset".format(num_samples - num_samples1 - num_samples2))
fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=num_samples1, keep_state=True)
with logged_time_measurement(logger, "Writing train dataset to {} ...".format(lmdb_output_path1), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path1, batch_size,
write_frequency=10,
serialization_name=serialization_name,
compression=compression,
compression_arg=compression_arg)
fixed_size_df.set_size(num_samples2)
with logged_time_measurement(logger, "Writing test dataset to {} ...".format(lmdb_output_path2), log_start=True):
tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path2, batch_size,
write_frequency=10,
serialization_name=serialization_name,
compression=compression,
compression_arg=compression_arg,
reset_df_state=False)
logger.info("Tagging as train and test")
with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:
lmdb_db.put_item("__train__", msgpack_utils.dumps(True))
with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:
lmdb_db.put_item("__test__", msgpack_utils.dumps(True))
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)
assert(lmdb_df.size() == num_samples1)
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)
assert(lmdb_df.size() == num_samples2)
def compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):
with logged_time_measurement(logger, "Computing data statistics for {}".format(lmdb_path), log_start=True):
lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)
lmdb_df.reset_state()
data_stats_dict = data_record.compute_dataset_stats_from_dicts(dict_from_dataflow_generator(lmdb_df))
# TODO: Hack to get rid of float64 in HDF5 dataset
for key in data_stats_dict:
for key2 in data_stats_dict[key]:
if data_stats_dict[key][key2] is not None:
data_stats_dict[key][key2] = np.asarray(data_stats_dict[key][key2], dtype=np.float32)
serializer = serialization.get_serializer_by_name(serialization_name)
logger.info("Writing data statistics to {}".format(lmdb_path))
with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:
data_stats_dump = serializer.dumps(data_stats_dict)
lmdb_db.put_item("__stats__", data_stats_dump)
def run(args):
split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.lmdb_output_path2,
args.split_ratio1, args.batch_size,
args.shuffle, args.serialization,
args.compression, args.compression_arg,
args.max_num_samples)
if args.compute_stats:
compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.serialization)
compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.serialization)
def main():
np.set_printoptions(threshold=5)
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-v', '--verbose', action='count',
default=0, help='Set verbosity level.')
parser.add_argument('--lmdb-input-path', required=True, help='Path to input LMDB database.')
parser.add_argument('--lmdb-output-path1', required=True, help='Path to store train LMDB database.')
parser.add_argument('--lmdb-output-path2', required=True, help='Path to store test LMDB database.')
parser.add_argument('--shuffle', type=argparse_bool, default=True)
parser.add_argument('--serialization', type=str, default="pickle")
parser.add_argument('--compression', type=str, default="lz4")
parser.add_argument('--compression-arg', type=str)
parser.add_argument('--split-ratio1', default=0.8, type=float, help="Ratio of data to write to output path 1")
parser.add_argument('--batch-size', type=int, default=512)
parser.add_argument('--compute-stats', type=argparse_bool, default=True)
parser.add_argument('--max-num-samples', type=int)
args = parser.parse_args()
run(args)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "a283fd1e4098ea8bb3cc3580438c90e5932ba22f",
"index": 5852,
"step-1": "<mask token>\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,\n lmdb_output_path2, split_ratio1, batch_size, shuffle,\n serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=\n shuffle)\n data_dict_df.reset_state()\n assert split_ratio1 > 0\n assert split_ratio1 < 1\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write('Data split will result in empty data set\\n')\n sys.exit(1)\n logger.info('Splitting {} samples into {} train and {} test samples'.\n format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn('Dropping {} samples from input dataset'.format(\n num_samples - num_samples1 - num_samples2))\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=\n num_samples1, keep_state=True)\n with logged_time_measurement(logger, 'Writing train dataset to {} ...'.\n format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path1, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg)\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, 'Writing test dataset to {} ...'.\n format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path2, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg, reset_df_state=False)\n logger.info('Tagging as train and test')\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item('__train__', msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item('__test__', msgpack_utils.dumps(True))\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert lmdb_df.size() == num_samples1\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert lmdb_df.size() == num_samples2\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, 'Computing data statistics for {}'\n .format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(\n dict_from_dataflow_generator(lmdb_df))\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key\n ][key2], dtype=np.float32)\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info('Writing data statistics to {}'.format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item('__stats__', data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.\n lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,\n args.serialization, args.compression, args.compression_arg, args.\n max_num_samples)\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.\n serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.\n serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count', default=0, help=\n 'Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help=\n 'Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help=\n 'Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help=\n 'Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default='pickle')\n parser.add_argument('--compression', type=str, default='lz4')\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\n 'Ratio of data to write to output path 1')\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n args = parser.parse_args()\n run(args)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,\n lmdb_output_path2, split_ratio1, batch_size, shuffle,\n serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=\n shuffle)\n data_dict_df.reset_state()\n assert split_ratio1 > 0\n assert split_ratio1 < 1\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write('Data split will result in empty data set\\n')\n sys.exit(1)\n logger.info('Splitting {} samples into {} train and {} test samples'.\n format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn('Dropping {} samples from input dataset'.format(\n num_samples - num_samples1 - num_samples2))\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=\n num_samples1, keep_state=True)\n with logged_time_measurement(logger, 'Writing train dataset to {} ...'.\n format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path1, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg)\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, 'Writing test dataset to {} ...'.\n format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path2, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg, reset_df_state=False)\n logger.info('Tagging as train and test')\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item('__train__', msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item('__test__', msgpack_utils.dumps(True))\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert lmdb_df.size() == num_samples1\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert lmdb_df.size() == num_samples2\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, 'Computing data statistics for {}'\n .format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(\n dict_from_dataflow_generator(lmdb_df))\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key\n ][key2], dtype=np.float32)\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info('Writing data statistics to {}'.format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item('__stats__', data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.\n lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,\n args.serialization, args.compression, args.compression_arg, args.\n max_num_samples)\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.\n serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.\n serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count', default=0, help=\n 'Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help=\n 'Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help=\n 'Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help=\n 'Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default='pickle')\n parser.add_argument('--compression', type=str, default='lz4')\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\n 'Ratio of data to write to output path 1')\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n args = parser.parse_args()\n run(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nlogger = log_utils.get_logger('reward_learning/split_data_lmdb')\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,\n lmdb_output_path2, split_ratio1, batch_size, shuffle,\n serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=\n shuffle)\n data_dict_df.reset_state()\n assert split_ratio1 > 0\n assert split_ratio1 < 1\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write('Data split will result in empty data set\\n')\n sys.exit(1)\n logger.info('Splitting {} samples into {} train and {} test samples'.\n format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn('Dropping {} samples from input dataset'.format(\n num_samples - num_samples1 - num_samples2))\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=\n num_samples1, keep_state=True)\n with logged_time_measurement(logger, 'Writing train dataset to {} ...'.\n format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path1, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg)\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, 'Writing test dataset to {} ...'.\n format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path2, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg, reset_df_state=False)\n logger.info('Tagging as train and test')\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item('__train__', msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item('__test__', msgpack_utils.dumps(True))\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert lmdb_df.size() == num_samples1\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert lmdb_df.size() == num_samples2\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, 'Computing data statistics for {}'\n .format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(\n dict_from_dataflow_generator(lmdb_df))\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key\n ][key2], dtype=np.float32)\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info('Writing data statistics to {}'.format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item('__stats__', data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.\n lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,\n args.serialization, args.compression, args.compression_arg, args.\n max_num_samples)\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.\n serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.\n serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count', default=0, help=\n 'Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help=\n 'Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help=\n 'Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help=\n 'Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default='pickle')\n parser.add_argument('--compression', type=str, default='lz4')\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\n 'Ratio of data to write to output path 1')\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n args = parser.parse_args()\n run(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nimport scipy.ndimage as nd\nimport argparse\nimport numpy as np\nfrom pybh import tensorpack_utils\nimport data_record\nfrom pybh import serialization\nfrom pybh import msgpack_utils\nfrom pybh import lmdb_utils\nfrom pybh.utils import argparse_bool, logged_time_measurement\nfrom pybh import log_utils\nlogger = log_utils.get_logger('reward_learning/split_data_lmdb')\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1,\n lmdb_output_path2, split_ratio1, batch_size, shuffle,\n serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=\n shuffle)\n data_dict_df.reset_state()\n assert split_ratio1 > 0\n assert split_ratio1 < 1\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write('Data split will result in empty data set\\n')\n sys.exit(1)\n logger.info('Splitting {} samples into {} train and {} test samples'.\n format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn('Dropping {} samples from input dataset'.format(\n num_samples - num_samples1 - num_samples2))\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=\n num_samples1, keep_state=True)\n with logged_time_measurement(logger, 'Writing train dataset to {} ...'.\n format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path1, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg)\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, 'Writing test dataset to {} ...'.\n format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df,\n lmdb_output_path2, batch_size, write_frequency=10,\n serialization_name=serialization_name, compression=compression,\n compression_arg=compression_arg, reset_df_state=False)\n logger.info('Tagging as train and test')\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item('__train__', msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item('__test__', msgpack_utils.dumps(True))\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert lmdb_df.size() == num_samples1\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert lmdb_df.size() == num_samples2\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, 'Computing data statistics for {}'\n .format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(\n dict_from_dataflow_generator(lmdb_df))\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key\n ][key2], dtype=np.float32)\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info('Writing data statistics to {}'.format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item('__stats__', data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.\n lmdb_output_path2, args.split_ratio1, args.batch_size, args.shuffle,\n args.serialization, args.compression, args.compression_arg, args.\n max_num_samples)\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.\n serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.\n serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count', default=0, help=\n 'Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help=\n 'Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help=\n 'Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help=\n 'Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default='pickle')\n parser.add_argument('--compression', type=str, default='lz4')\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\n 'Ratio of data to write to output path 1')\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n args = parser.parse_args()\n run(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\n# Workaround for segmentation fault for some versions when ndimage is imported after tensorflow.\nimport scipy.ndimage as nd\n\nimport argparse\nimport numpy as np\nfrom pybh import tensorpack_utils\nimport data_record\nfrom pybh import serialization\nfrom pybh import msgpack_utils\nfrom pybh import lmdb_utils\nfrom pybh.utils import argparse_bool, logged_time_measurement\nfrom pybh import log_utils\n\n\nlogger = log_utils.get_logger(\"reward_learning/split_data_lmdb\")\n\n\ndef dict_from_dataflow_generator(df):\n for sample in df.get_data():\n yield sample[0]\n\n\ndef split_lmdb_dataset(lmdb_input_path, lmdb_output_path1, lmdb_output_path2, split_ratio1,\n batch_size, shuffle, serialization_name, compression, compression_arg, max_num_samples=None):\n data_dict_df = tensorpack_utils.AutoLMDBData(lmdb_input_path, shuffle=shuffle)\n data_dict_df.reset_state()\n\n assert(split_ratio1 > 0)\n assert(split_ratio1 < 1)\n\n num_samples = data_dict_df.size()\n if max_num_samples is not None and max_num_samples > 0:\n num_samples = min(num_samples, max_num_samples)\n num_batches = num_samples // batch_size\n num_batches1 = round(split_ratio1 * num_samples) // batch_size\n num_samples1 = num_batches1 * batch_size\n num_batches2 = num_batches - num_batches1\n num_samples2 = num_batches2 * batch_size\n if num_samples1 <= 0 or num_samples2 <= 0:\n import sys\n sys.stderr.write(\"Data split will result in empty data set\\n\")\n sys.exit(1)\n\n logger.info(\"Splitting {} samples into {} train and {} test samples\".format(num_samples, num_samples1, num_samples2))\n if num_samples > num_samples1 + num_samples2:\n logger.warn(\"Dropping {} samples from input dataset\".format(num_samples - num_samples1 - num_samples2))\n\n fixed_size_df = tensorpack_utils.FixedSizeData(data_dict_df, size=num_samples1, keep_state=True)\n with logged_time_measurement(logger, \"Writing train dataset to {} ...\".format(lmdb_output_path1), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path1, batch_size,\n write_frequency=10,\n serialization_name=serialization_name,\n compression=compression,\n compression_arg=compression_arg)\n\n fixed_size_df.set_size(num_samples2)\n with logged_time_measurement(logger, \"Writing test dataset to {} ...\".format(lmdb_output_path2), log_start=True):\n tensorpack_utils.dump_compressed_dataflow_to_lmdb(fixed_size_df, lmdb_output_path2, batch_size,\n write_frequency=10,\n serialization_name=serialization_name,\n compression=compression,\n compression_arg=compression_arg,\n reset_df_state=False)\n\n logger.info(\"Tagging as train and test\")\n with lmdb_utils.LMDB(lmdb_output_path1, readonly=False) as lmdb_db:\n lmdb_db.put_item(\"__train__\", msgpack_utils.dumps(True))\n with lmdb_utils.LMDB(lmdb_output_path2, readonly=False) as lmdb_db:\n lmdb_db.put_item(\"__test__\", msgpack_utils.dumps(True))\n\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path1)\n assert(lmdb_df.size() == num_samples1)\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_output_path2)\n assert(lmdb_df.size() == num_samples2)\n\n\ndef compute_and_update_stats_in_lmdb(lmdb_path, serialization_name):\n with logged_time_measurement(logger, \"Computing data statistics for {}\".format(lmdb_path), log_start=True):\n lmdb_df = tensorpack_utils.AutoLMDBData(lmdb_path)\n lmdb_df.reset_state()\n data_stats_dict = data_record.compute_dataset_stats_from_dicts(dict_from_dataflow_generator(lmdb_df))\n\n # TODO: Hack to get rid of float64 in HDF5 dataset\n for key in data_stats_dict:\n for key2 in data_stats_dict[key]:\n if data_stats_dict[key][key2] is not None:\n data_stats_dict[key][key2] = np.asarray(data_stats_dict[key][key2], dtype=np.float32)\n\n serializer = serialization.get_serializer_by_name(serialization_name)\n logger.info(\"Writing data statistics to {}\".format(lmdb_path))\n with lmdb_utils.LMDB(lmdb_path, readonly=False) as lmdb_db:\n data_stats_dump = serializer.dumps(data_stats_dict)\n lmdb_db.put_item(\"__stats__\", data_stats_dump)\n\n\ndef run(args):\n split_lmdb_dataset(args.lmdb_input_path, args.lmdb_output_path1, args.lmdb_output_path2,\n args.split_ratio1, args.batch_size,\n args.shuffle, args.serialization,\n args.compression, args.compression_arg,\n args.max_num_samples)\n\n if args.compute_stats:\n compute_and_update_stats_in_lmdb(args.lmdb_output_path1, args.serialization)\n compute_and_update_stats_in_lmdb(args.lmdb_output_path2, args.serialization)\n\n\ndef main():\n np.set_printoptions(threshold=5)\n\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('-v', '--verbose', action='count',\n default=0, help='Set verbosity level.')\n parser.add_argument('--lmdb-input-path', required=True, help='Path to input LMDB database.')\n parser.add_argument('--lmdb-output-path1', required=True, help='Path to store train LMDB database.')\n parser.add_argument('--lmdb-output-path2', required=True, help='Path to store test LMDB database.')\n parser.add_argument('--shuffle', type=argparse_bool, default=True)\n parser.add_argument('--serialization', type=str, default=\"pickle\")\n parser.add_argument('--compression', type=str, default=\"lz4\")\n parser.add_argument('--compression-arg', type=str)\n parser.add_argument('--split-ratio1', default=0.8, type=float, help=\"Ratio of data to write to output path 1\")\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--compute-stats', type=argparse_bool, default=True)\n parser.add_argument('--max-num-samples', type=int)\n\n args = parser.parse_args()\n\n run(args)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from queue import Queue
class Node():
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def array_to_tree_dfs(array):
n = len(array)
if n>0:
root = Node(array[0])
def dfs(node, index):
# if index >= n:
# return
# else:
if 2*(index+1) -1 < n and array[2*(index+1) -1] is not None:
node.left = Node(array[2*(index+1) -1])
dfs(node.left, 2*(index+1) -1)
if 2*(index+1) < n and array[2*(index+1)] is not None:
node.right = Node(array[2*(index+1)])
dfs(node.right, 2*(index+1))
return
dfs(root, 0)
return root
def tree_to_array_bfs(root):
q = Queue(maxsize = 0) # queue with infinity size
q.put(root)
array = []
def bfs():
if q.empty():
return
else:
node = q.get()
array.append(node.value)
if node.left != None:
q.put(node.left)
if node.right != None:
q.put(node.right)
bfs()
return
bfs()
return array
def findClosestValueInBst(tree, target):
distance = abs(tree.value - target)
value = tree.value
def dfs(node):
nonlocal distance, value
# stop condition
if(node is None):
return value
if(node.value == target):
return target
if abs(node.value - target) < distance:
value = node.value
distance = abs(value - target)
# recursion part
if(node.value > target):
return dfs(node.left)
elif(node.value < target):
return dfs(node.right)
return dfs(tree)
if __name__ == '__main__':
array = [5,3,10,2,4,8] + [None]*6 + [9] + [None]*2
root = array_to_tree_dfs(array)
new_array = tree_to_array_bfs(root)
print(new_array)
print(findClosestValueInBst(root, 6))
|
normal
|
{
"blob_id": "a52762fb13c04ced07a41a752578c4173d1eac42",
"index": 8350,
"step-1": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\n<mask token>\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize=0)\n q.put(root)\n array = []\n\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n\n def dfs(node):\n nonlocal distance, value\n if node is None:\n return value\n if node.value == target:\n return target\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n if node.value > target:\n return dfs(node.left)\n elif node.value < target:\n return dfs(node.right)\n return dfs(tree)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef array_to_tree_dfs(array):\n n = len(array)\n if n > 0:\n root = Node(array[0])\n\n def dfs(node, index):\n if 2 * (index + 1) - 1 < n and array[2 * (index + 1) - 1] is not None:\n node.left = Node(array[2 * (index + 1) - 1])\n dfs(node.left, 2 * (index + 1) - 1)\n if 2 * (index + 1) < n and array[2 * (index + 1)] is not None:\n node.right = Node(array[2 * (index + 1)])\n dfs(node.right, 2 * (index + 1))\n return\n dfs(root, 0)\n return root\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize=0)\n q.put(root)\n array = []\n\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n\n def dfs(node):\n nonlocal distance, value\n if node is None:\n return value\n if node.value == target:\n return target\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n if node.value > target:\n return dfs(node.left)\n elif node.value < target:\n return dfs(node.right)\n return dfs(tree)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Node:\n\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef array_to_tree_dfs(array):\n n = len(array)\n if n > 0:\n root = Node(array[0])\n\n def dfs(node, index):\n if 2 * (index + 1) - 1 < n and array[2 * (index + 1) - 1] is not None:\n node.left = Node(array[2 * (index + 1) - 1])\n dfs(node.left, 2 * (index + 1) - 1)\n if 2 * (index + 1) < n and array[2 * (index + 1)] is not None:\n node.right = Node(array[2 * (index + 1)])\n dfs(node.right, 2 * (index + 1))\n return\n dfs(root, 0)\n return root\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize=0)\n q.put(root)\n array = []\n\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n\n def dfs(node):\n nonlocal distance, value\n if node is None:\n return value\n if node.value == target:\n return target\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n if node.value > target:\n return dfs(node.left)\n elif node.value < target:\n return dfs(node.right)\n return dfs(tree)\n\n\nif __name__ == '__main__':\n array = [5, 3, 10, 2, 4, 8] + [None] * 6 + [9] + [None] * 2\n root = array_to_tree_dfs(array)\n new_array = tree_to_array_bfs(root)\n print(new_array)\n print(findClosestValueInBst(root, 6))\n",
"step-4": "from queue import Queue\n\n\nclass Node:\n\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef array_to_tree_dfs(array):\n n = len(array)\n if n > 0:\n root = Node(array[0])\n\n def dfs(node, index):\n if 2 * (index + 1) - 1 < n and array[2 * (index + 1) - 1] is not None:\n node.left = Node(array[2 * (index + 1) - 1])\n dfs(node.left, 2 * (index + 1) - 1)\n if 2 * (index + 1) < n and array[2 * (index + 1)] is not None:\n node.right = Node(array[2 * (index + 1)])\n dfs(node.right, 2 * (index + 1))\n return\n dfs(root, 0)\n return root\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize=0)\n q.put(root)\n array = []\n\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n\n def dfs(node):\n nonlocal distance, value\n if node is None:\n return value\n if node.value == target:\n return target\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n if node.value > target:\n return dfs(node.left)\n elif node.value < target:\n return dfs(node.right)\n return dfs(tree)\n\n\nif __name__ == '__main__':\n array = [5, 3, 10, 2, 4, 8] + [None] * 6 + [9] + [None] * 2\n root = array_to_tree_dfs(array)\n new_array = tree_to_array_bfs(root)\n print(new_array)\n print(findClosestValueInBst(root, 6))\n",
"step-5": "from queue import Queue\n\nclass Node():\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n\ndef array_to_tree_dfs(array):\n n = len(array)\n if n>0:\n root = Node(array[0])\n\n def dfs(node, index):\n # if index >= n:\n # return\n # else:\n if 2*(index+1) -1 < n and array[2*(index+1) -1] is not None:\n node.left = Node(array[2*(index+1) -1])\n dfs(node.left, 2*(index+1) -1)\n if 2*(index+1) < n and array[2*(index+1)] is not None:\n node.right = Node(array[2*(index+1)])\n dfs(node.right, 2*(index+1))\n return\n dfs(root, 0)\n return root\n\n\ndef tree_to_array_bfs(root):\n q = Queue(maxsize = 0) # queue with infinity size\n q.put(root)\n array = []\n def bfs():\n if q.empty():\n return\n else:\n node = q.get()\n array.append(node.value)\n if node.left != None:\n q.put(node.left)\n if node.right != None:\n q.put(node.right)\n bfs()\n return\n bfs()\n return array\n\n \n\n\ndef findClosestValueInBst(tree, target):\n distance = abs(tree.value - target)\n value = tree.value\n \n def dfs(node):\n nonlocal distance, value\n # stop condition\n if(node is None):\n return value\n if(node.value == target):\n return target\n\n if abs(node.value - target) < distance:\n value = node.value\n distance = abs(value - target)\n\n # recursion part\n if(node.value > target):\n return dfs(node.left)\n elif(node.value < target):\n return dfs(node.right)\n\n return dfs(tree)\n\n\nif __name__ == '__main__':\n array = [5,3,10,2,4,8] + [None]*6 + [9] + [None]*2\n root = array_to_tree_dfs(array)\n new_array = tree_to_array_bfs(root) \n print(new_array)\n print(findClosestValueInBst(root, 6))\n \n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print classes, functions and modules which contain static data."""
from __future__ import print_function
from __future__ import unicode_literals
import collections
from . import ast
from . import metrics
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
def _find_warnings(filename, lines, ast_list, static_is_optional):
def print_warning(node, name):
print("{}:{}: static data '{}'".format(
filename,
lines.get_line_number(node.start),
name))
def find_static(function_node):
tokens = []
static_found = False
for node in function_node.body:
if node.name == 'static':
static_found = True
if static_found:
tokens.append(node)
if node.name == ';':
body = list(
ast.ASTBuilder(iter(tokens), filename).generate())
_find_warnings(filename, lines, body, False)
tokens = []
static_found = False
for node in ast_list:
if isinstance(node, ast.VariableDeclaration):
# Ignore 'static' at module scope so we can find globals too.
is_static = 'static' in node.type.modifiers
is_not_const = 'const' not in node.type.modifiers
if is_not_const and (static_is_optional or is_static):
print_warning(node, node.name)
elif isinstance(node, ast.Function) and node.body:
find_static(node)
elif isinstance(node, ast.Class) and node.body:
_find_warnings(filename, lines, node.body, False)
def _find_unused_static_warnings(filename, lines, ast_list):
"""Warn about unused static variables."""
static_declarations = {
node.name: node
for node in ast_list
if (isinstance(node, ast.VariableDeclaration) and
'static' in node.type.modifiers)
}
def find_variables_use(body):
for child in body:
if child.name in static_declarations:
static_use_counts[child.name] += 1
static_use_counts = collections.Counter()
for node in ast_list:
if isinstance(node, ast.Function) and node.body:
find_variables_use(node.body)
elif isinstance(node, ast.Class) and node.body:
for child in node.body:
if isinstance(child, ast.Function) and child.body:
find_variables_use(child.body)
for name in sorted(static_declarations):
if not static_use_counts[name]:
print("{}:{}: unused variable '{}'".format(
filename,
lines.get_line_number(static_declarations[name].start),
name))
def run(filename, source, entire_ast, include_paths, quiet):
lines = metrics.Metrics(source)
_find_warnings(filename, lines, entire_ast, True)
_find_unused_static_warnings(filename, lines, entire_ast)
|
normal
|
{
"blob_id": "57d1fb805fce2ba75ea2962598e809ba35fd7eb6",
"index": 3490,
"step-1": "<mask token>\n\n\ndef _find_warnings(filename, lines, ast_list, static_is_optional):\n\n def print_warning(node, name):\n print(\"{}:{}: static data '{}'\".format(filename, lines.\n get_line_number(node.start), name))\n\n def find_static(function_node):\n tokens = []\n static_found = False\n for node in function_node.body:\n if node.name == 'static':\n static_found = True\n if static_found:\n tokens.append(node)\n if node.name == ';':\n body = list(ast.ASTBuilder(iter(tokens), filename).\n generate())\n _find_warnings(filename, lines, body, False)\n tokens = []\n static_found = False\n for node in ast_list:\n if isinstance(node, ast.VariableDeclaration):\n is_static = 'static' in node.type.modifiers\n is_not_const = 'const' not in node.type.modifiers\n if is_not_const and (static_is_optional or is_static):\n print_warning(node, node.name)\n elif isinstance(node, ast.Function) and node.body:\n find_static(node)\n elif isinstance(node, ast.Class) and node.body:\n _find_warnings(filename, lines, node.body, False)\n\n\ndef _find_unused_static_warnings(filename, lines, ast_list):\n \"\"\"Warn about unused static variables.\"\"\"\n static_declarations = {node.name: node for node in ast_list if \n isinstance(node, ast.VariableDeclaration) and 'static' in node.type\n .modifiers}\n\n def find_variables_use(body):\n for child in body:\n if child.name in static_declarations:\n static_use_counts[child.name] += 1\n static_use_counts = collections.Counter()\n for node in ast_list:\n if isinstance(node, ast.Function) and node.body:\n find_variables_use(node.body)\n elif isinstance(node, ast.Class) and node.body:\n for child in node.body:\n if isinstance(child, ast.Function) and child.body:\n find_variables_use(child.body)\n for name in sorted(static_declarations):\n if not static_use_counts[name]:\n print(\"{}:{}: unused variable '{}'\".format(filename, lines.\n get_line_number(static_declarations[name].start), name))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _find_warnings(filename, lines, ast_list, static_is_optional):\n\n def print_warning(node, name):\n print(\"{}:{}: static data '{}'\".format(filename, lines.\n get_line_number(node.start), name))\n\n def find_static(function_node):\n tokens = []\n static_found = False\n for node in function_node.body:\n if node.name == 'static':\n static_found = True\n if static_found:\n tokens.append(node)\n if node.name == ';':\n body = list(ast.ASTBuilder(iter(tokens), filename).\n generate())\n _find_warnings(filename, lines, body, False)\n tokens = []\n static_found = False\n for node in ast_list:\n if isinstance(node, ast.VariableDeclaration):\n is_static = 'static' in node.type.modifiers\n is_not_const = 'const' not in node.type.modifiers\n if is_not_const and (static_is_optional or is_static):\n print_warning(node, node.name)\n elif isinstance(node, ast.Function) and node.body:\n find_static(node)\n elif isinstance(node, ast.Class) and node.body:\n _find_warnings(filename, lines, node.body, False)\n\n\ndef _find_unused_static_warnings(filename, lines, ast_list):\n \"\"\"Warn about unused static variables.\"\"\"\n static_declarations = {node.name: node for node in ast_list if \n isinstance(node, ast.VariableDeclaration) and 'static' in node.type\n .modifiers}\n\n def find_variables_use(body):\n for child in body:\n if child.name in static_declarations:\n static_use_counts[child.name] += 1\n static_use_counts = collections.Counter()\n for node in ast_list:\n if isinstance(node, ast.Function) and node.body:\n find_variables_use(node.body)\n elif isinstance(node, ast.Class) and node.body:\n for child in node.body:\n if isinstance(child, ast.Function) and child.body:\n find_variables_use(child.body)\n for name in sorted(static_declarations):\n if not static_use_counts[name]:\n print(\"{}:{}: unused variable '{}'\".format(filename, lines.\n get_line_number(static_declarations[name].start), name))\n\n\ndef run(filename, source, entire_ast, include_paths, quiet):\n lines = metrics.Metrics(source)\n _find_warnings(filename, lines, entire_ast, True)\n _find_unused_static_warnings(filename, lines, entire_ast)\n",
"step-3": "<mask token>\n__author__ = 'nnorwitz@google.com (Neal Norwitz)'\n\n\ndef _find_warnings(filename, lines, ast_list, static_is_optional):\n\n def print_warning(node, name):\n print(\"{}:{}: static data '{}'\".format(filename, lines.\n get_line_number(node.start), name))\n\n def find_static(function_node):\n tokens = []\n static_found = False\n for node in function_node.body:\n if node.name == 'static':\n static_found = True\n if static_found:\n tokens.append(node)\n if node.name == ';':\n body = list(ast.ASTBuilder(iter(tokens), filename).\n generate())\n _find_warnings(filename, lines, body, False)\n tokens = []\n static_found = False\n for node in ast_list:\n if isinstance(node, ast.VariableDeclaration):\n is_static = 'static' in node.type.modifiers\n is_not_const = 'const' not in node.type.modifiers\n if is_not_const and (static_is_optional or is_static):\n print_warning(node, node.name)\n elif isinstance(node, ast.Function) and node.body:\n find_static(node)\n elif isinstance(node, ast.Class) and node.body:\n _find_warnings(filename, lines, node.body, False)\n\n\ndef _find_unused_static_warnings(filename, lines, ast_list):\n \"\"\"Warn about unused static variables.\"\"\"\n static_declarations = {node.name: node for node in ast_list if \n isinstance(node, ast.VariableDeclaration) and 'static' in node.type\n .modifiers}\n\n def find_variables_use(body):\n for child in body:\n if child.name in static_declarations:\n static_use_counts[child.name] += 1\n static_use_counts = collections.Counter()\n for node in ast_list:\n if isinstance(node, ast.Function) and node.body:\n find_variables_use(node.body)\n elif isinstance(node, ast.Class) and node.body:\n for child in node.body:\n if isinstance(child, ast.Function) and child.body:\n find_variables_use(child.body)\n for name in sorted(static_declarations):\n if not static_use_counts[name]:\n print(\"{}:{}: unused variable '{}'\".format(filename, lines.\n get_line_number(static_declarations[name].start), name))\n\n\ndef run(filename, source, entire_ast, include_paths, quiet):\n lines = metrics.Metrics(source)\n _find_warnings(filename, lines, entire_ast, True)\n _find_unused_static_warnings(filename, lines, entire_ast)\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport collections\nfrom . import ast\nfrom . import metrics\n__author__ = 'nnorwitz@google.com (Neal Norwitz)'\n\n\ndef _find_warnings(filename, lines, ast_list, static_is_optional):\n\n def print_warning(node, name):\n print(\"{}:{}: static data '{}'\".format(filename, lines.\n get_line_number(node.start), name))\n\n def find_static(function_node):\n tokens = []\n static_found = False\n for node in function_node.body:\n if node.name == 'static':\n static_found = True\n if static_found:\n tokens.append(node)\n if node.name == ';':\n body = list(ast.ASTBuilder(iter(tokens), filename).\n generate())\n _find_warnings(filename, lines, body, False)\n tokens = []\n static_found = False\n for node in ast_list:\n if isinstance(node, ast.VariableDeclaration):\n is_static = 'static' in node.type.modifiers\n is_not_const = 'const' not in node.type.modifiers\n if is_not_const and (static_is_optional or is_static):\n print_warning(node, node.name)\n elif isinstance(node, ast.Function) and node.body:\n find_static(node)\n elif isinstance(node, ast.Class) and node.body:\n _find_warnings(filename, lines, node.body, False)\n\n\ndef _find_unused_static_warnings(filename, lines, ast_list):\n \"\"\"Warn about unused static variables.\"\"\"\n static_declarations = {node.name: node for node in ast_list if \n isinstance(node, ast.VariableDeclaration) and 'static' in node.type\n .modifiers}\n\n def find_variables_use(body):\n for child in body:\n if child.name in static_declarations:\n static_use_counts[child.name] += 1\n static_use_counts = collections.Counter()\n for node in ast_list:\n if isinstance(node, ast.Function) and node.body:\n find_variables_use(node.body)\n elif isinstance(node, ast.Class) and node.body:\n for child in node.body:\n if isinstance(child, ast.Function) and child.body:\n find_variables_use(child.body)\n for name in sorted(static_declarations):\n if not static_use_counts[name]:\n print(\"{}:{}: unused variable '{}'\".format(filename, lines.\n get_line_number(static_declarations[name].start), name))\n\n\ndef run(filename, source, entire_ast, include_paths, quiet):\n lines = metrics.Metrics(source)\n _find_warnings(filename, lines, entire_ast, True)\n _find_unused_static_warnings(filename, lines, entire_ast)\n",
"step-5": "# Copyright 2008 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Print classes, functions and modules which contain static data.\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport collections\n\nfrom . import ast\nfrom . import metrics\n\n\n__author__ = 'nnorwitz@google.com (Neal Norwitz)'\n\n\ndef _find_warnings(filename, lines, ast_list, static_is_optional):\n def print_warning(node, name):\n print(\"{}:{}: static data '{}'\".format(\n filename,\n lines.get_line_number(node.start),\n name))\n\n def find_static(function_node):\n tokens = []\n static_found = False\n for node in function_node.body:\n if node.name == 'static':\n static_found = True\n\n if static_found:\n tokens.append(node)\n if node.name == ';':\n body = list(\n ast.ASTBuilder(iter(tokens), filename).generate())\n _find_warnings(filename, lines, body, False)\n tokens = []\n static_found = False\n\n for node in ast_list:\n if isinstance(node, ast.VariableDeclaration):\n # Ignore 'static' at module scope so we can find globals too.\n is_static = 'static' in node.type.modifiers\n is_not_const = 'const' not in node.type.modifiers\n\n if is_not_const and (static_is_optional or is_static):\n print_warning(node, node.name)\n elif isinstance(node, ast.Function) and node.body:\n find_static(node)\n elif isinstance(node, ast.Class) and node.body:\n _find_warnings(filename, lines, node.body, False)\n\n\ndef _find_unused_static_warnings(filename, lines, ast_list):\n \"\"\"Warn about unused static variables.\"\"\"\n static_declarations = {\n node.name: node\n for node in ast_list\n if (isinstance(node, ast.VariableDeclaration) and\n 'static' in node.type.modifiers)\n }\n\n def find_variables_use(body):\n for child in body:\n if child.name in static_declarations:\n static_use_counts[child.name] += 1\n\n static_use_counts = collections.Counter()\n for node in ast_list:\n if isinstance(node, ast.Function) and node.body:\n find_variables_use(node.body)\n elif isinstance(node, ast.Class) and node.body:\n for child in node.body:\n if isinstance(child, ast.Function) and child.body:\n find_variables_use(child.body)\n\n for name in sorted(static_declarations):\n if not static_use_counts[name]:\n print(\"{}:{}: unused variable '{}'\".format(\n filename,\n lines.get_line_number(static_declarations[name].start),\n name))\n\n\ndef run(filename, source, entire_ast, include_paths, quiet):\n lines = metrics.Metrics(source)\n\n _find_warnings(filename, lines, entire_ast, True)\n _find_unused_static_warnings(filename, lines, entire_ast)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import json
from flask import current_app, request, jsonify, make_response
from flask_cors import cross_origin
from alerta.auth.utils import is_authorized, create_token, get_customer
from alerta.utils.api import absolute_url, deepmerge
from . import auth
try:
import saml2
import saml2.entity
import saml2.metadata
import saml2.config
import saml2.client
import saml2.saml
except ImportError:
pass # saml2 authentication will not work
def spConfig():
return saml2.config.Config()
def saml_client():
saml2_config_default = {
'entityid': absolute_url(),
'service': {
'sp': {
'endpoints': {
'assertion_consumer_service': [
(absolute_url('/auth/saml'), saml2.BINDING_HTTP_POST)
]
}
}
}
}
spConfig().load(deepmerge(saml2_config_default, current_app.config['SAML2_CONFIG']))
return saml2.client.Saml2Client(config=spConfig())
@auth.route('/auth/saml', methods=['GET'])
def saml_redirect_to_idp():
relay_state = None if request.args.get('usePostMessage') is None else 'usePostMessage'
(session_id, result) = saml_client().prepare_for_authenticate(relay_state=relay_state)
return make_response('', 302, result['headers'])
@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])
@cross_origin(supports_credentials=True)
def saml_response_from_idp():
def _make_response(resp_obj, resp_code):
if 'usePostMessage' in request.form.get('RelayState', '') and 'text/html' in request.headers.get('Accept', ''):
origins = current_app.config.get('CORS_ORIGINS', [])
response = make_response(
'''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Authenticating...</title>
<script type="application/javascript">
var origins = {origins};
// in case when API and WebUI are on the same origin
if (origins.indexOf(window.location.origin) < 0)
origins.push(window.location.origin);
// only one will succeed
origins.forEach(origin => window.opener.postMessage({msg_data}, origin));
window.close();
</script>
</head>
<body></body>
</html>'''.format(msg_data=json.dumps(resp_obj), origins=json.dumps(origins)),
resp_code
)
response.headers['Content-Type'] = 'text/html'
return response
else:
return jsonify(**resp_obj), resp_code
authn_response = saml_client().parse_authn_request_response(
request.form['SAMLResponse'],
saml2.entity.BINDING_HTTP_POST
)
identity = authn_response.get_identity()
email = identity['emailAddress'][0]
domain = email.split('@')[1]
name = (current_app.config.get('SAML2_USER_NAME_FORMAT', '{givenName} {surname}')).format(**dict(map(lambda x: (x[0], x[1][0]), identity.items())))
groups = identity.get('groups', [])
if is_authorized('ALLOWED_SAML2_GROUPS', groups):
return _make_response({'status': 'error', 'message': 'User {} is not authorized'.format(email)}, 403)
customer = get_customer(email, groups=[domain])
token = create_token(email, name, email, provider='saml2', customer=customer, groups=groups)
return _make_response({'status': 'ok', 'token': token.tokenize}, 200)
@auth.route('/auth/saml/metadata.xml', methods=['GET'])
def saml_metadata():
edesc = saml2.metadata.entity_descriptor(spConfig())
response = make_response(str(edesc))
response.headers['Content-Type'] = 'text/xml; charset=utf-8'
return response
|
normal
|
{
"blob_id": "b233d212f3a6c453786dc54b2d43578e1faae417",
"index": 7292,
"step-1": "<mask token>\n\n\ndef spConfig():\n return saml2.config.Config()\n\n\ndef saml_client():\n saml2_config_default = {'entityid': absolute_url(), 'service': {'sp': {\n 'endpoints': {'assertion_consumer_service': [(absolute_url(\n '/auth/saml'), saml2.BINDING_HTTP_POST)]}}}}\n spConfig().load(deepmerge(saml2_config_default, current_app.config[\n 'SAML2_CONFIG']))\n return saml2.client.Saml2Client(config=spConfig())\n\n\n@auth.route('/auth/saml', methods=['GET'])\ndef saml_redirect_to_idp():\n relay_state = None if request.args.get('usePostMessage'\n ) is None else 'usePostMessage'\n session_id, result = saml_client().prepare_for_authenticate(relay_state\n =relay_state)\n return make_response('', 302, result['headers'])\n\n\n<mask token>\n\n\n@auth.route('/auth/saml/metadata.xml', methods=['GET'])\ndef saml_metadata():\n edesc = saml2.metadata.entity_descriptor(spConfig())\n response = make_response(str(edesc))\n response.headers['Content-Type'] = 'text/xml; charset=utf-8'\n return response\n",
"step-2": "<mask token>\n\n\ndef spConfig():\n return saml2.config.Config()\n\n\ndef saml_client():\n saml2_config_default = {'entityid': absolute_url(), 'service': {'sp': {\n 'endpoints': {'assertion_consumer_service': [(absolute_url(\n '/auth/saml'), saml2.BINDING_HTTP_POST)]}}}}\n spConfig().load(deepmerge(saml2_config_default, current_app.config[\n 'SAML2_CONFIG']))\n return saml2.client.Saml2Client(config=spConfig())\n\n\n@auth.route('/auth/saml', methods=['GET'])\ndef saml_redirect_to_idp():\n relay_state = None if request.args.get('usePostMessage'\n ) is None else 'usePostMessage'\n session_id, result = saml_client().prepare_for_authenticate(relay_state\n =relay_state)\n return make_response('', 302, result['headers'])\n\n\n@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])\n@cross_origin(supports_credentials=True)\ndef saml_response_from_idp():\n\n def _make_response(resp_obj, resp_code):\n if 'usePostMessage' in request.form.get('RelayState', ''\n ) and 'text/html' in request.headers.get('Accept', ''):\n origins = current_app.config.get('CORS_ORIGINS', [])\n response = make_response(\n \"\"\"<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Authenticating...</title>\n <script type=\"application/javascript\">\n var origins = {origins};\n // in case when API and WebUI are on the same origin\n if (origins.indexOf(window.location.origin) < 0)\n origins.push(window.location.origin);\n // only one will succeed\n origins.forEach(origin => window.opener.postMessage({msg_data}, origin));\n window.close();\n </script>\n </head>\n <body></body>\n </html>\"\"\"\n .format(msg_data=json.dumps(resp_obj), origins=json.dumps(\n origins)), resp_code)\n response.headers['Content-Type'] = 'text/html'\n return response\n else:\n return jsonify(**resp_obj), resp_code\n authn_response = saml_client().parse_authn_request_response(request.\n form['SAMLResponse'], saml2.entity.BINDING_HTTP_POST)\n identity = authn_response.get_identity()\n email = identity['emailAddress'][0]\n domain = email.split('@')[1]\n name = current_app.config.get('SAML2_USER_NAME_FORMAT',\n '{givenName} {surname}').format(**dict(map(lambda x: (x[0], x[1][0]\n ), identity.items())))\n groups = identity.get('groups', [])\n if is_authorized('ALLOWED_SAML2_GROUPS', groups):\n return _make_response({'status': 'error', 'message':\n 'User {} is not authorized'.format(email)}, 403)\n customer = get_customer(email, groups=[domain])\n token = create_token(email, name, email, provider='saml2', customer=\n customer, groups=groups)\n return _make_response({'status': 'ok', 'token': token.tokenize}, 200)\n\n\n@auth.route('/auth/saml/metadata.xml', methods=['GET'])\ndef saml_metadata():\n edesc = saml2.metadata.entity_descriptor(spConfig())\n response = make_response(str(edesc))\n response.headers['Content-Type'] = 'text/xml; charset=utf-8'\n return response\n",
"step-3": "<mask token>\ntry:\n import saml2\n import saml2.entity\n import saml2.metadata\n import saml2.config\n import saml2.client\n import saml2.saml\nexcept ImportError:\n pass\n\n\ndef spConfig():\n return saml2.config.Config()\n\n\ndef saml_client():\n saml2_config_default = {'entityid': absolute_url(), 'service': {'sp': {\n 'endpoints': {'assertion_consumer_service': [(absolute_url(\n '/auth/saml'), saml2.BINDING_HTTP_POST)]}}}}\n spConfig().load(deepmerge(saml2_config_default, current_app.config[\n 'SAML2_CONFIG']))\n return saml2.client.Saml2Client(config=spConfig())\n\n\n@auth.route('/auth/saml', methods=['GET'])\ndef saml_redirect_to_idp():\n relay_state = None if request.args.get('usePostMessage'\n ) is None else 'usePostMessage'\n session_id, result = saml_client().prepare_for_authenticate(relay_state\n =relay_state)\n return make_response('', 302, result['headers'])\n\n\n@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])\n@cross_origin(supports_credentials=True)\ndef saml_response_from_idp():\n\n def _make_response(resp_obj, resp_code):\n if 'usePostMessage' in request.form.get('RelayState', ''\n ) and 'text/html' in request.headers.get('Accept', ''):\n origins = current_app.config.get('CORS_ORIGINS', [])\n response = make_response(\n \"\"\"<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Authenticating...</title>\n <script type=\"application/javascript\">\n var origins = {origins};\n // in case when API and WebUI are on the same origin\n if (origins.indexOf(window.location.origin) < 0)\n origins.push(window.location.origin);\n // only one will succeed\n origins.forEach(origin => window.opener.postMessage({msg_data}, origin));\n window.close();\n </script>\n </head>\n <body></body>\n </html>\"\"\"\n .format(msg_data=json.dumps(resp_obj), origins=json.dumps(\n origins)), resp_code)\n response.headers['Content-Type'] = 'text/html'\n return response\n else:\n return jsonify(**resp_obj), resp_code\n authn_response = saml_client().parse_authn_request_response(request.\n form['SAMLResponse'], saml2.entity.BINDING_HTTP_POST)\n identity = authn_response.get_identity()\n email = identity['emailAddress'][0]\n domain = email.split('@')[1]\n name = current_app.config.get('SAML2_USER_NAME_FORMAT',\n '{givenName} {surname}').format(**dict(map(lambda x: (x[0], x[1][0]\n ), identity.items())))\n groups = identity.get('groups', [])\n if is_authorized('ALLOWED_SAML2_GROUPS', groups):\n return _make_response({'status': 'error', 'message':\n 'User {} is not authorized'.format(email)}, 403)\n customer = get_customer(email, groups=[domain])\n token = create_token(email, name, email, provider='saml2', customer=\n customer, groups=groups)\n return _make_response({'status': 'ok', 'token': token.tokenize}, 200)\n\n\n@auth.route('/auth/saml/metadata.xml', methods=['GET'])\ndef saml_metadata():\n edesc = saml2.metadata.entity_descriptor(spConfig())\n response = make_response(str(edesc))\n response.headers['Content-Type'] = 'text/xml; charset=utf-8'\n return response\n",
"step-4": "import json\nfrom flask import current_app, request, jsonify, make_response\nfrom flask_cors import cross_origin\nfrom alerta.auth.utils import is_authorized, create_token, get_customer\nfrom alerta.utils.api import absolute_url, deepmerge\nfrom . import auth\ntry:\n import saml2\n import saml2.entity\n import saml2.metadata\n import saml2.config\n import saml2.client\n import saml2.saml\nexcept ImportError:\n pass\n\n\ndef spConfig():\n return saml2.config.Config()\n\n\ndef saml_client():\n saml2_config_default = {'entityid': absolute_url(), 'service': {'sp': {\n 'endpoints': {'assertion_consumer_service': [(absolute_url(\n '/auth/saml'), saml2.BINDING_HTTP_POST)]}}}}\n spConfig().load(deepmerge(saml2_config_default, current_app.config[\n 'SAML2_CONFIG']))\n return saml2.client.Saml2Client(config=spConfig())\n\n\n@auth.route('/auth/saml', methods=['GET'])\ndef saml_redirect_to_idp():\n relay_state = None if request.args.get('usePostMessage'\n ) is None else 'usePostMessage'\n session_id, result = saml_client().prepare_for_authenticate(relay_state\n =relay_state)\n return make_response('', 302, result['headers'])\n\n\n@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])\n@cross_origin(supports_credentials=True)\ndef saml_response_from_idp():\n\n def _make_response(resp_obj, resp_code):\n if 'usePostMessage' in request.form.get('RelayState', ''\n ) and 'text/html' in request.headers.get('Accept', ''):\n origins = current_app.config.get('CORS_ORIGINS', [])\n response = make_response(\n \"\"\"<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Authenticating...</title>\n <script type=\"application/javascript\">\n var origins = {origins};\n // in case when API and WebUI are on the same origin\n if (origins.indexOf(window.location.origin) < 0)\n origins.push(window.location.origin);\n // only one will succeed\n origins.forEach(origin => window.opener.postMessage({msg_data}, origin));\n window.close();\n </script>\n </head>\n <body></body>\n </html>\"\"\"\n .format(msg_data=json.dumps(resp_obj), origins=json.dumps(\n origins)), resp_code)\n response.headers['Content-Type'] = 'text/html'\n return response\n else:\n return jsonify(**resp_obj), resp_code\n authn_response = saml_client().parse_authn_request_response(request.\n form['SAMLResponse'], saml2.entity.BINDING_HTTP_POST)\n identity = authn_response.get_identity()\n email = identity['emailAddress'][0]\n domain = email.split('@')[1]\n name = current_app.config.get('SAML2_USER_NAME_FORMAT',\n '{givenName} {surname}').format(**dict(map(lambda x: (x[0], x[1][0]\n ), identity.items())))\n groups = identity.get('groups', [])\n if is_authorized('ALLOWED_SAML2_GROUPS', groups):\n return _make_response({'status': 'error', 'message':\n 'User {} is not authorized'.format(email)}, 403)\n customer = get_customer(email, groups=[domain])\n token = create_token(email, name, email, provider='saml2', customer=\n customer, groups=groups)\n return _make_response({'status': 'ok', 'token': token.tokenize}, 200)\n\n\n@auth.route('/auth/saml/metadata.xml', methods=['GET'])\ndef saml_metadata():\n edesc = saml2.metadata.entity_descriptor(spConfig())\n response = make_response(str(edesc))\n response.headers['Content-Type'] = 'text/xml; charset=utf-8'\n return response\n",
"step-5": "\nimport json\n\nfrom flask import current_app, request, jsonify, make_response\nfrom flask_cors import cross_origin\n\nfrom alerta.auth.utils import is_authorized, create_token, get_customer\nfrom alerta.utils.api import absolute_url, deepmerge\nfrom . import auth\n\ntry:\n import saml2\n import saml2.entity\n import saml2.metadata\n import saml2.config\n import saml2.client\n import saml2.saml\nexcept ImportError:\n pass # saml2 authentication will not work\n\n\ndef spConfig():\n return saml2.config.Config()\n\n\ndef saml_client():\n\n saml2_config_default = {\n 'entityid': absolute_url(),\n 'service': {\n 'sp': {\n 'endpoints': {\n 'assertion_consumer_service': [\n (absolute_url('/auth/saml'), saml2.BINDING_HTTP_POST)\n ]\n }\n }\n }\n }\n spConfig().load(deepmerge(saml2_config_default, current_app.config['SAML2_CONFIG']))\n return saml2.client.Saml2Client(config=spConfig())\n\n\n@auth.route('/auth/saml', methods=['GET'])\ndef saml_redirect_to_idp():\n relay_state = None if request.args.get('usePostMessage') is None else 'usePostMessage'\n (session_id, result) = saml_client().prepare_for_authenticate(relay_state=relay_state)\n return make_response('', 302, result['headers'])\n\n\n@auth.route('/auth/saml', methods=['OPTIONS', 'POST'])\n@cross_origin(supports_credentials=True)\ndef saml_response_from_idp():\n def _make_response(resp_obj, resp_code):\n if 'usePostMessage' in request.form.get('RelayState', '') and 'text/html' in request.headers.get('Accept', ''):\n origins = current_app.config.get('CORS_ORIGINS', [])\n response = make_response(\n '''<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"UTF-8\">\n <title>Authenticating...</title>\n <script type=\"application/javascript\">\n var origins = {origins};\n // in case when API and WebUI are on the same origin\n if (origins.indexOf(window.location.origin) < 0)\n origins.push(window.location.origin);\n // only one will succeed\n origins.forEach(origin => window.opener.postMessage({msg_data}, origin));\n window.close();\n </script>\n </head>\n <body></body>\n </html>'''.format(msg_data=json.dumps(resp_obj), origins=json.dumps(origins)),\n resp_code\n )\n response.headers['Content-Type'] = 'text/html'\n return response\n else:\n return jsonify(**resp_obj), resp_code\n\n authn_response = saml_client().parse_authn_request_response(\n request.form['SAMLResponse'],\n saml2.entity.BINDING_HTTP_POST\n )\n identity = authn_response.get_identity()\n email = identity['emailAddress'][0]\n domain = email.split('@')[1]\n name = (current_app.config.get('SAML2_USER_NAME_FORMAT', '{givenName} {surname}')).format(**dict(map(lambda x: (x[0], x[1][0]), identity.items())))\n\n groups = identity.get('groups', [])\n if is_authorized('ALLOWED_SAML2_GROUPS', groups):\n return _make_response({'status': 'error', 'message': 'User {} is not authorized'.format(email)}, 403)\n\n customer = get_customer(email, groups=[domain])\n\n token = create_token(email, name, email, provider='saml2', customer=customer, groups=groups)\n return _make_response({'status': 'ok', 'token': token.tokenize}, 200)\n\n\n@auth.route('/auth/saml/metadata.xml', methods=['GET'])\ndef saml_metadata():\n edesc = saml2.metadata.entity_descriptor(spConfig())\n response = make_response(str(edesc))\n response.headers['Content-Type'] = 'text/xml; charset=utf-8'\n return response\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class PurchaseDetail(models.Model):
PRODUCT_CHOICES = ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC',
'Plastic'), ('LEATHER', 'Leather'), ('FABRIC', 'Fabric'), ('STEEL',
'Steel')
purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE)
product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES,
default='WOOD')
quantity = models.PositiveSmallIntegerField(blank=False)
rate = models.IntegerField(blank=False)
total = models.IntegerField(blank=False)
remarks = models.CharField(max_length=250)
def _get_total(self):
return self.quantity * self.rate
labor_total = property(_get_total)
def __str__(self):
return self.product_name
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Purchase(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_absolute_url(self):
return reverse('entry:purchase_detail', kwargs={'pk': self.pk})
class PurchaseDetail(models.Model):
PRODUCT_CHOICES = ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC',
'Plastic'), ('LEATHER', 'Leather'), ('FABRIC', 'Fabric'), ('STEEL',
'Steel')
purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE)
product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES,
default='WOOD')
quantity = models.PositiveSmallIntegerField(blank=False)
rate = models.IntegerField(blank=False)
total = models.IntegerField(blank=False)
remarks = models.CharField(max_length=250)
def _get_total(self):
return self.quantity * self.rate
labor_total = property(_get_total)
def __str__(self):
return self.product_name
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Purchase(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.vendor
def get_absolute_url(self):
return reverse('entry:purchase_detail', kwargs={'pk': self.pk})
class PurchaseDetail(models.Model):
PRODUCT_CHOICES = ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC',
'Plastic'), ('LEATHER', 'Leather'), ('FABRIC', 'Fabric'), ('STEEL',
'Steel')
purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE)
product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES,
default='WOOD')
quantity = models.PositiveSmallIntegerField(blank=False)
rate = models.IntegerField(blank=False)
total = models.IntegerField(blank=False)
remarks = models.CharField(max_length=250)
def _get_total(self):
return self.quantity * self.rate
labor_total = property(_get_total)
def __str__(self):
return self.product_name
<|reserved_special_token_1|>
from django.db import models
from django.utils import timezone
from django.db.models.signals import post_save
from django.urls import reverse
class Purchase(models.Model):
invoice = models.SmallIntegerField(primary_key=True, blank=False)
ch_no = models.SmallIntegerField(blank=True, null=True)
vendor = models.CharField(max_length=128, blank=False)
date = models.DateTimeField(default=timezone.now, blank=False)
description = models.TextField(max_length=4096, blank=True, null=True)
def __str__(self):
return self.vendor
def get_absolute_url(self):
return reverse('entry:purchase_detail', kwargs={'pk': self.pk})
class PurchaseDetail(models.Model):
PRODUCT_CHOICES = ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC',
'Plastic'), ('LEATHER', 'Leather'), ('FABRIC', 'Fabric'), ('STEEL',
'Steel')
purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE)
product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES,
default='WOOD')
quantity = models.PositiveSmallIntegerField(blank=False)
rate = models.IntegerField(blank=False)
total = models.IntegerField(blank=False)
remarks = models.CharField(max_length=250)
def _get_total(self):
return self.quantity * self.rate
labor_total = property(_get_total)
def __str__(self):
return self.product_name
<|reserved_special_token_1|>
from django.db import models
from django.utils import timezone
from django.db.models.signals import post_save
from django.urls import reverse
# Create your models here.
class Purchase(models.Model):
invoice = models.SmallIntegerField(primary_key=True,blank=False)
ch_no = models.SmallIntegerField(blank=True,null=True)
vendor = models.CharField(max_length=128, blank=False)
date = models.DateTimeField(default=timezone.now, blank=False)
description = models.TextField(max_length=4096, blank=True, null=True)
def __str__(self):
return self.vendor
def get_absolute_url(self):
return reverse('entry:purchase_detail', kwargs={'pk': self.pk})
class PurchaseDetail(models.Model):
PRODUCT_CHOICES = (
('WOOD', 'Wood'),
('GLASS', 'Glass'),
('PLASTIC', 'Plastic'),
('LEATHER', 'Leather'),
('FABRIC','Fabric'),
('STEEL', 'Steel'),
)
purchase= models.ForeignKey(Purchase,on_delete=models.CASCADE)
product_name = models.CharField(max_length=30,
choices=PRODUCT_CHOICES,
default='WOOD')
quantity = models.PositiveSmallIntegerField(blank=False)
rate = models.IntegerField(blank=False)
total = models.IntegerField(blank=False)
remarks = models.CharField(max_length=250)
def _get_total(self):
return self.quantity * self.rate
labor_total = property(_get_total)
def __str__(self):
return (self.product_name)
|
flexible
|
{
"blob_id": "bb3c42c9f87a463b9f18601c9e3897b6d21351d5",
"index": 7356,
"step-1": "<mask token>\n\n\nclass PurchaseDetail(models.Model):\n PRODUCT_CHOICES = ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC',\n 'Plastic'), ('LEATHER', 'Leather'), ('FABRIC', 'Fabric'), ('STEEL',\n 'Steel')\n purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE)\n product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES,\n default='WOOD')\n quantity = models.PositiveSmallIntegerField(blank=False)\n rate = models.IntegerField(blank=False)\n total = models.IntegerField(blank=False)\n remarks = models.CharField(max_length=250)\n\n def _get_total(self):\n return self.quantity * self.rate\n labor_total = property(_get_total)\n\n def __str__(self):\n return self.product_name\n",
"step-2": "<mask token>\n\n\nclass Purchase(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_absolute_url(self):\n return reverse('entry:purchase_detail', kwargs={'pk': self.pk})\n\n\nclass PurchaseDetail(models.Model):\n PRODUCT_CHOICES = ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC',\n 'Plastic'), ('LEATHER', 'Leather'), ('FABRIC', 'Fabric'), ('STEEL',\n 'Steel')\n purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE)\n product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES,\n default='WOOD')\n quantity = models.PositiveSmallIntegerField(blank=False)\n rate = models.IntegerField(blank=False)\n total = models.IntegerField(blank=False)\n remarks = models.CharField(max_length=250)\n\n def _get_total(self):\n return self.quantity * self.rate\n labor_total = property(_get_total)\n\n def __str__(self):\n return self.product_name\n",
"step-3": "<mask token>\n\n\nclass Purchase(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.vendor\n\n def get_absolute_url(self):\n return reverse('entry:purchase_detail', kwargs={'pk': self.pk})\n\n\nclass PurchaseDetail(models.Model):\n PRODUCT_CHOICES = ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC',\n 'Plastic'), ('LEATHER', 'Leather'), ('FABRIC', 'Fabric'), ('STEEL',\n 'Steel')\n purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE)\n product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES,\n default='WOOD')\n quantity = models.PositiveSmallIntegerField(blank=False)\n rate = models.IntegerField(blank=False)\n total = models.IntegerField(blank=False)\n remarks = models.CharField(max_length=250)\n\n def _get_total(self):\n return self.quantity * self.rate\n labor_total = property(_get_total)\n\n def __str__(self):\n return self.product_name\n",
"step-4": "from django.db import models\nfrom django.utils import timezone\nfrom django.db.models.signals import post_save\nfrom django.urls import reverse\n\n\nclass Purchase(models.Model):\n invoice = models.SmallIntegerField(primary_key=True, blank=False)\n ch_no = models.SmallIntegerField(blank=True, null=True)\n vendor = models.CharField(max_length=128, blank=False)\n date = models.DateTimeField(default=timezone.now, blank=False)\n description = models.TextField(max_length=4096, blank=True, null=True)\n\n def __str__(self):\n return self.vendor\n\n def get_absolute_url(self):\n return reverse('entry:purchase_detail', kwargs={'pk': self.pk})\n\n\nclass PurchaseDetail(models.Model):\n PRODUCT_CHOICES = ('WOOD', 'Wood'), ('GLASS', 'Glass'), ('PLASTIC',\n 'Plastic'), ('LEATHER', 'Leather'), ('FABRIC', 'Fabric'), ('STEEL',\n 'Steel')\n purchase = models.ForeignKey(Purchase, on_delete=models.CASCADE)\n product_name = models.CharField(max_length=30, choices=PRODUCT_CHOICES,\n default='WOOD')\n quantity = models.PositiveSmallIntegerField(blank=False)\n rate = models.IntegerField(blank=False)\n total = models.IntegerField(blank=False)\n remarks = models.CharField(max_length=250)\n\n def _get_total(self):\n return self.quantity * self.rate\n labor_total = property(_get_total)\n\n def __str__(self):\n return self.product_name\n",
"step-5": "from django.db import models\nfrom django.utils import timezone\nfrom django.db.models.signals import post_save\nfrom django.urls import reverse\n# Create your models here.\n\nclass Purchase(models.Model):\n invoice = models.SmallIntegerField(primary_key=True,blank=False)\n ch_no = models.SmallIntegerField(blank=True,null=True)\n vendor = models.CharField(max_length=128, blank=False)\n date = models.DateTimeField(default=timezone.now, blank=False)\n description = models.TextField(max_length=4096, blank=True, null=True)\n\n def __str__(self):\n return self.vendor\n\n def get_absolute_url(self):\n return reverse('entry:purchase_detail', kwargs={'pk': self.pk})\n\n\n\nclass PurchaseDetail(models.Model):\n \n PRODUCT_CHOICES = (\n ('WOOD', 'Wood'),\n ('GLASS', 'Glass'),\n ('PLASTIC', 'Plastic'),\n ('LEATHER', 'Leather'),\n ('FABRIC','Fabric'),\n ('STEEL', 'Steel'),\n )\n purchase= models.ForeignKey(Purchase,on_delete=models.CASCADE)\n product_name = models.CharField(max_length=30,\n choices=PRODUCT_CHOICES,\n default='WOOD')\n quantity = models.PositiveSmallIntegerField(blank=False)\n rate = models.IntegerField(blank=False)\n total = models.IntegerField(blank=False)\n remarks = models.CharField(max_length=250)\n\n def _get_total(self):\n return self.quantity * self.rate\n labor_total = property(_get_total)\n def __str__(self):\n return (self.product_name)\n ",
"step-ids": [
4,
6,
7,
9,
10
]
}
|
[
4,
6,
7,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MonitorLocation(Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
app_label = 'sentry'
db_table = 'sentry_monitorlocation'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MonitorLocation(Model):
__core__ = True
guid = UUIDField(unique=True, auto_add=True)
name = models.CharField(max_length=128)
date_added = models.DateTimeField(default=timezone.now)
objects = BaseManager(cache_fields=('guid',))
class Meta:
app_label = 'sentry'
db_table = 'sentry_monitorlocation'
__repr__ = sane_repr('guid', 'name')
<|reserved_special_token_1|>
from __future__ import absolute_import, print_function
from django.db import models
from django.utils import timezone
from sentry.db.models import Model, BaseManager, UUIDField, sane_repr
class MonitorLocation(Model):
__core__ = True
guid = UUIDField(unique=True, auto_add=True)
name = models.CharField(max_length=128)
date_added = models.DateTimeField(default=timezone.now)
objects = BaseManager(cache_fields=('guid',))
class Meta:
app_label = 'sentry'
db_table = 'sentry_monitorlocation'
__repr__ = sane_repr('guid', 'name')
<|reserved_special_token_1|>
from __future__ import absolute_import, print_function
from django.db import models
from django.utils import timezone
from sentry.db.models import (
Model,
BaseManager,
UUIDField,
sane_repr,
)
class MonitorLocation(Model):
__core__ = True
guid = UUIDField(unique=True, auto_add=True)
name = models.CharField(max_length=128)
date_added = models.DateTimeField(default=timezone.now)
objects = BaseManager(cache_fields=('guid', ))
class Meta:
app_label = 'sentry'
db_table = 'sentry_monitorlocation'
__repr__ = sane_repr('guid', 'name')
|
flexible
|
{
"blob_id": "1a4132358fa9bd4cd74970286ec8bb212b1857cd",
"index": 5247,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass MonitorLocation(Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n app_label = 'sentry'\n db_table = 'sentry_monitorlocation'\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MonitorLocation(Model):\n __core__ = True\n guid = UUIDField(unique=True, auto_add=True)\n name = models.CharField(max_length=128)\n date_added = models.DateTimeField(default=timezone.now)\n objects = BaseManager(cache_fields=('guid',))\n\n\n class Meta:\n app_label = 'sentry'\n db_table = 'sentry_monitorlocation'\n __repr__ = sane_repr('guid', 'name')\n",
"step-4": "from __future__ import absolute_import, print_function\nfrom django.db import models\nfrom django.utils import timezone\nfrom sentry.db.models import Model, BaseManager, UUIDField, sane_repr\n\n\nclass MonitorLocation(Model):\n __core__ = True\n guid = UUIDField(unique=True, auto_add=True)\n name = models.CharField(max_length=128)\n date_added = models.DateTimeField(default=timezone.now)\n objects = BaseManager(cache_fields=('guid',))\n\n\n class Meta:\n app_label = 'sentry'\n db_table = 'sentry_monitorlocation'\n __repr__ = sane_repr('guid', 'name')\n",
"step-5": "from __future__ import absolute_import, print_function\n\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom sentry.db.models import (\n Model,\n BaseManager,\n UUIDField,\n sane_repr,\n)\n\n\nclass MonitorLocation(Model):\n __core__ = True\n\n guid = UUIDField(unique=True, auto_add=True)\n name = models.CharField(max_length=128)\n date_added = models.DateTimeField(default=timezone.now)\n objects = BaseManager(cache_fields=('guid', ))\n\n class Meta:\n app_label = 'sentry'\n db_table = 'sentry_monitorlocation'\n\n __repr__ = sane_repr('guid', 'name')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from .line_detection_research import score_pixel_v3p2
|
flexible
|
{
"blob_id": "305554fc86ddc116677b6d95db7d94d9f2213c41",
"index": 5088,
"step-1": "<mask token>\n",
"step-2": "from .line_detection_research import score_pixel_v3p2\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def book(request):
Book.objects.create(title=request.POST['b_title'], desc=request.POST[
'b_desc'])
return redirect('/')
def author(request):
context = {'the_auths': Author.objects.all()}
return render(request, 'author.html', context)
def auth(request):
Author.objects.create(first_name=request.POST['a_first'], last_name=
request.POST['a_last'], notes=request.POST['a_notes'])
return redirect('/author')
def authInfo(request, authorid):
context = {'selectedAuthor': Author.objects.get(id=authorid)}
return render(request, 'author_info.html', context)
<|reserved_special_token_0|>
def authUpdate(request, bookid):
this_book = Book.objects.get(id=bookid)
this_auth = Author.objects.get(id=request.POST['chosenAuth'])
this_book.authors.add(this_auth)
return redirect(f'/bookinfo/{bookid}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(request):
context = {'the_books': Book.objects.all()}
return render(request, 'index.html', context)
def book(request):
Book.objects.create(title=request.POST['b_title'], desc=request.POST[
'b_desc'])
return redirect('/')
def author(request):
context = {'the_auths': Author.objects.all()}
return render(request, 'author.html', context)
def auth(request):
Author.objects.create(first_name=request.POST['a_first'], last_name=
request.POST['a_last'], notes=request.POST['a_notes'])
return redirect('/author')
def authInfo(request, authorid):
context = {'selectedAuthor': Author.objects.get(id=authorid)}
return render(request, 'author_info.html', context)
<|reserved_special_token_0|>
def authUpdate(request, bookid):
this_book = Book.objects.get(id=bookid)
this_auth = Author.objects.get(id=request.POST['chosenAuth'])
this_book.authors.add(this_auth)
return redirect(f'/bookinfo/{bookid}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(request):
context = {'the_books': Book.objects.all()}
return render(request, 'index.html', context)
def book(request):
Book.objects.create(title=request.POST['b_title'], desc=request.POST[
'b_desc'])
return redirect('/')
def author(request):
context = {'the_auths': Author.objects.all()}
return render(request, 'author.html', context)
def auth(request):
Author.objects.create(first_name=request.POST['a_first'], last_name=
request.POST['a_last'], notes=request.POST['a_notes'])
return redirect('/author')
def authInfo(request, authorid):
context = {'selectedAuthor': Author.objects.get(id=authorid)}
return render(request, 'author_info.html', context)
def bookInfo(request, bookid):
context = {'selectedBook': Book.objects.get(id=bookid), 'allAuthors':
Author.objects.all()}
return render(request, 'book_info.html', context)
def authUpdate(request, bookid):
this_book = Book.objects.get(id=bookid)
this_auth = Author.objects.get(id=request.POST['chosenAuth'])
this_book.authors.add(this_auth)
return redirect(f'/bookinfo/{bookid}')
<|reserved_special_token_1|>
from django.shortcuts import render, HttpResponse, redirect
from .models import Book, Author
def main(request):
context = {'the_books': Book.objects.all()}
return render(request, 'index.html', context)
def book(request):
Book.objects.create(title=request.POST['b_title'], desc=request.POST[
'b_desc'])
return redirect('/')
def author(request):
context = {'the_auths': Author.objects.all()}
return render(request, 'author.html', context)
def auth(request):
Author.objects.create(first_name=request.POST['a_first'], last_name=
request.POST['a_last'], notes=request.POST['a_notes'])
return redirect('/author')
def authInfo(request, authorid):
context = {'selectedAuthor': Author.objects.get(id=authorid)}
return render(request, 'author_info.html', context)
def bookInfo(request, bookid):
context = {'selectedBook': Book.objects.get(id=bookid), 'allAuthors':
Author.objects.all()}
return render(request, 'book_info.html', context)
def authUpdate(request, bookid):
this_book = Book.objects.get(id=bookid)
this_auth = Author.objects.get(id=request.POST['chosenAuth'])
this_book.authors.add(this_auth)
return redirect(f'/bookinfo/{bookid}')
<|reserved_special_token_1|>
from django.shortcuts import render, HttpResponse, redirect
from .models import Book, Author # This is the models.py Database
# Create your views here.
def main(request):
context = {
"the_books" : Book.objects.all(), #Book Class model.py
}
return render(request, "index.html", context)
def book(request):
Book.objects.create(title = request.POST['b_title'], desc = request.POST['b_desc'])
return redirect('/')
def author(request):
context = {
"the_auths" : Author.objects.all(), #Author Class model.py
}
return render(request, "author.html", context)
def auth(request):
Author.objects.create(first_name = request.POST['a_first'], last_name = request.POST['a_last'], notes = request.POST['a_notes'])
# newA = Author(first_name= "jlkj")
# newA.save()
return redirect('/author')
def authInfo(request, authorid):
context = {
'selectedAuthor' : Author.objects.get(id=authorid)
}
return render(request, "author_info.html", context)
def bookInfo(request, bookid):
context = {
'selectedBook' : Book.objects.get(id=bookid),
'allAuthors' : Author.objects.all()
}
return render(request, "book_info.html", context)
def authUpdate(request, bookid):
this_book = Book.objects.get(id=bookid)
this_auth = Author.objects.get(id = request.POST['chosenAuth'])
this_book.authors.add(this_auth)
return redirect(f"/bookinfo/{bookid}")
|
flexible
|
{
"blob_id": "02bec34b138d53235dc944adeae8ccb8d6b3d340",
"index": 4424,
"step-1": "<mask token>\n\n\ndef book(request):\n Book.objects.create(title=request.POST['b_title'], desc=request.POST[\n 'b_desc'])\n return redirect('/')\n\n\ndef author(request):\n context = {'the_auths': Author.objects.all()}\n return render(request, 'author.html', context)\n\n\ndef auth(request):\n Author.objects.create(first_name=request.POST['a_first'], last_name=\n request.POST['a_last'], notes=request.POST['a_notes'])\n return redirect('/author')\n\n\ndef authInfo(request, authorid):\n context = {'selectedAuthor': Author.objects.get(id=authorid)}\n return render(request, 'author_info.html', context)\n\n\n<mask token>\n\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id=request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f'/bookinfo/{bookid}')\n",
"step-2": "<mask token>\n\n\ndef main(request):\n context = {'the_books': Book.objects.all()}\n return render(request, 'index.html', context)\n\n\ndef book(request):\n Book.objects.create(title=request.POST['b_title'], desc=request.POST[\n 'b_desc'])\n return redirect('/')\n\n\ndef author(request):\n context = {'the_auths': Author.objects.all()}\n return render(request, 'author.html', context)\n\n\ndef auth(request):\n Author.objects.create(first_name=request.POST['a_first'], last_name=\n request.POST['a_last'], notes=request.POST['a_notes'])\n return redirect('/author')\n\n\ndef authInfo(request, authorid):\n context = {'selectedAuthor': Author.objects.get(id=authorid)}\n return render(request, 'author_info.html', context)\n\n\n<mask token>\n\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id=request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f'/bookinfo/{bookid}')\n",
"step-3": "<mask token>\n\n\ndef main(request):\n context = {'the_books': Book.objects.all()}\n return render(request, 'index.html', context)\n\n\ndef book(request):\n Book.objects.create(title=request.POST['b_title'], desc=request.POST[\n 'b_desc'])\n return redirect('/')\n\n\ndef author(request):\n context = {'the_auths': Author.objects.all()}\n return render(request, 'author.html', context)\n\n\ndef auth(request):\n Author.objects.create(first_name=request.POST['a_first'], last_name=\n request.POST['a_last'], notes=request.POST['a_notes'])\n return redirect('/author')\n\n\ndef authInfo(request, authorid):\n context = {'selectedAuthor': Author.objects.get(id=authorid)}\n return render(request, 'author_info.html', context)\n\n\ndef bookInfo(request, bookid):\n context = {'selectedBook': Book.objects.get(id=bookid), 'allAuthors':\n Author.objects.all()}\n return render(request, 'book_info.html', context)\n\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id=request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f'/bookinfo/{bookid}')\n",
"step-4": "from django.shortcuts import render, HttpResponse, redirect\nfrom .models import Book, Author\n\n\ndef main(request):\n context = {'the_books': Book.objects.all()}\n return render(request, 'index.html', context)\n\n\ndef book(request):\n Book.objects.create(title=request.POST['b_title'], desc=request.POST[\n 'b_desc'])\n return redirect('/')\n\n\ndef author(request):\n context = {'the_auths': Author.objects.all()}\n return render(request, 'author.html', context)\n\n\ndef auth(request):\n Author.objects.create(first_name=request.POST['a_first'], last_name=\n request.POST['a_last'], notes=request.POST['a_notes'])\n return redirect('/author')\n\n\ndef authInfo(request, authorid):\n context = {'selectedAuthor': Author.objects.get(id=authorid)}\n return render(request, 'author_info.html', context)\n\n\ndef bookInfo(request, bookid):\n context = {'selectedBook': Book.objects.get(id=bookid), 'allAuthors':\n Author.objects.all()}\n return render(request, 'book_info.html', context)\n\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id=request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f'/bookinfo/{bookid}')\n",
"step-5": "from django.shortcuts import render, HttpResponse, redirect\nfrom .models import Book, Author # This is the models.py Database\n\n# Create your views here.\n\ndef main(request):\n context = {\n \"the_books\" : Book.objects.all(), #Book Class model.py\n }\n return render(request, \"index.html\", context)\n\ndef book(request):\n Book.objects.create(title = request.POST['b_title'], desc = request.POST['b_desc'])\n return redirect('/')\n\ndef author(request):\n context = {\n \"the_auths\" : Author.objects.all(), #Author Class model.py\n }\n return render(request, \"author.html\", context)\n\ndef auth(request):\n Author.objects.create(first_name = request.POST['a_first'], last_name = request.POST['a_last'], notes = request.POST['a_notes'])\n # newA = Author(first_name= \"jlkj\")\n # newA.save()\n return redirect('/author')\n\ndef authInfo(request, authorid):\n context = {\n 'selectedAuthor' : Author.objects.get(id=authorid)\n }\n return render(request, \"author_info.html\", context)\n\ndef bookInfo(request, bookid):\n context = {\n 'selectedBook' : Book.objects.get(id=bookid),\n 'allAuthors' : Author.objects.all()\n }\n return render(request, \"book_info.html\", context)\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id = request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f\"/bookinfo/{bookid}\")",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class ChangeEmail(forms.Form):
<|reserved_special_token_0|>
class ChangePassword(forms.Form):
oldPassword = forms.CharField(required=True, min_length=8, max_length=
80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))
password1 = forms.CharField(required=True, min_length=8, max_length=80,
widget=forms.PasswordInput(attrs={'name': 'password1'}), label=
'Password')
password2 = forms.CharField(required=True, min_length=8, max_length=80,
widget=forms.PasswordInput(attrs={'name': 'password2'}), label=
'Confirm password')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RegisterForm(UserCreationForm):
<|reserved_special_token_0|>
class Meta:
model = User
fields = 'username', 'email', 'password1', 'password2'
class ChangeEmail(forms.Form):
email = forms.CharField(required=True, max_length=120, widget=forms.
TextInput(attrs={'name': 'emailInput'}))
class ChangePassword(forms.Form):
oldPassword = forms.CharField(required=True, min_length=8, max_length=
80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))
password1 = forms.CharField(required=True, min_length=8, max_length=80,
widget=forms.PasswordInput(attrs={'name': 'password1'}), label=
'Password')
password2 = forms.CharField(required=True, min_length=8, max_length=80,
widget=forms.PasswordInput(attrs={'name': 'password2'}), label=
'Confirm password')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RegisterForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = 'username', 'email', 'password1', 'password2'
class ChangeEmail(forms.Form):
email = forms.CharField(required=True, max_length=120, widget=forms.
TextInput(attrs={'name': 'emailInput'}))
class ChangePassword(forms.Form):
oldPassword = forms.CharField(required=True, min_length=8, max_length=
80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))
password1 = forms.CharField(required=True, min_length=8, max_length=80,
widget=forms.PasswordInput(attrs={'name': 'password1'}), label=
'Password')
password2 = forms.CharField(required=True, min_length=8, max_length=80,
widget=forms.PasswordInput(attrs={'name': 'password2'}), label=
'Confirm password')
<|reserved_special_token_1|>
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from . import models
class RegisterForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = 'username', 'email', 'password1', 'password2'
class ChangeEmail(forms.Form):
email = forms.CharField(required=True, max_length=120, widget=forms.
TextInput(attrs={'name': 'emailInput'}))
class ChangePassword(forms.Form):
oldPassword = forms.CharField(required=True, min_length=8, max_length=
80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))
password1 = forms.CharField(required=True, min_length=8, max_length=80,
widget=forms.PasswordInput(attrs={'name': 'password1'}), label=
'Password')
password2 = forms.CharField(required=True, min_length=8, max_length=80,
widget=forms.PasswordInput(attrs={'name': 'password2'}), label=
'Confirm password')
<|reserved_special_token_1|>
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from . import models
class RegisterForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ("username", "email", "password1", "password2")
class ChangeEmail(forms.Form):
email = forms.CharField(required=True, max_length=120, widget=forms.TextInput(attrs={'name': 'emailInput'}))
class ChangePassword(forms.Form):
oldPassword = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name':"oldPassword"}))
password1 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password1'}), label="Password")
password2 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password2'}), label='Confirm password')
|
flexible
|
{
"blob_id": "503726cd2d70286189f4b8e02acaa3d5f6e29e12",
"index": 8538,
"step-1": "<mask token>\n\n\nclass ChangeEmail(forms.Form):\n <mask token>\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=\n 80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\n 'Password')\n password2 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password2'}), label=\n 'Confirm password')\n",
"step-2": "<mask token>\n\n\nclass RegisterForm(UserCreationForm):\n <mask token>\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password1', 'password2'\n\n\nclass ChangeEmail(forms.Form):\n email = forms.CharField(required=True, max_length=120, widget=forms.\n TextInput(attrs={'name': 'emailInput'}))\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=\n 80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\n 'Password')\n password2 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password2'}), label=\n 'Confirm password')\n",
"step-3": "<mask token>\n\n\nclass RegisterForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password1', 'password2'\n\n\nclass ChangeEmail(forms.Form):\n email = forms.CharField(required=True, max_length=120, widget=forms.\n TextInput(attrs={'name': 'emailInput'}))\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=\n 80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\n 'Password')\n password2 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password2'}), label=\n 'Confirm password')\n",
"step-4": "from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom . import models\n\n\nclass RegisterForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n\n class Meta:\n model = User\n fields = 'username', 'email', 'password1', 'password2'\n\n\nclass ChangeEmail(forms.Form):\n email = forms.CharField(required=True, max_length=120, widget=forms.\n TextInput(attrs={'name': 'emailInput'}))\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=\n 80, widget=forms.PasswordInput(attrs={'name': 'oldPassword'}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\n 'Password')\n password2 = forms.CharField(required=True, min_length=8, max_length=80,\n widget=forms.PasswordInput(attrs={'name': 'password2'}), label=\n 'Confirm password')\n",
"step-5": "from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom . import models\n\n\nclass RegisterForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n class Meta:\n model = User\n fields = (\"username\", \"email\", \"password1\", \"password2\")\n\n\nclass ChangeEmail(forms.Form):\n email = forms.CharField(required=True, max_length=120, widget=forms.TextInput(attrs={'name': 'emailInput'}))\n\n\nclass ChangePassword(forms.Form):\n oldPassword = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name':\"oldPassword\"}))\n password1 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password1'}), label=\"Password\")\n password2 = forms.CharField(required=True, min_length=8, max_length=80, widget=forms.PasswordInput(attrs={'name': 'password2'}), label='Confirm password')\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_parse_header():
print()
for case_id, case in CASES:
ss = SchemaSheet.from_dictreader(case)
tc = ss.table_config
info_cc = tc.columns[INFO]
assert info_cc.name == INFO
assert info_cc.maps_to == 'description'
assert info_cc.metaslot is not None
assert info_cc.metaslot.name == 'description'
if case_id == 1 or case_id == 2:
assert tc.metatype_column is None
record_cc = tc.columns[RECORD]
assert record_cc.name == RECORD
assert record_cc.maps_to == 'class'
assert record_cc.metaslot is None
if case_id == 2:
field_cc = tc.columns[FIELD]
assert field_cc.name == FIELD
assert field_cc.maps_to == 'slot'
assert field_cc.metaslot is None
if case_id == 1:
sdo_cc = tc.columns[SDO_MAPPINGS]
assert sdo_cc.name == SDO_MAPPINGS
assert sdo_cc.maps_to == 'exact_mappings'
assert sdo_cc.metaslot is not None
assert sdo_cc.metaslot.name == 'exact mappings' or sdo_cc.metaslot.name == 'exact_mappings'
assert sdo_cc.settings.curie_prefix == 'sdo'
wd_cc = tc.columns[WD_MAPPINGS]
assert wd_cc.name == WD_MAPPINGS
assert wd_cc.maps_to == 'exact_mappings'
assert wd_cc.metaslot is not None
assert wd_cc.metaslot.name == 'exact mappings' or wd_cc.metaslot.name == 'exact_mappings'
assert wd_cc.settings.curie_prefix == 'wd'
if case_id == 3:
assert tc.metatype_column == METATYPE
record_cc = tc.columns[METATYPE]
assert record_cc.name == METATYPE
assert record_cc.maps_to == 'metatype'
assert record_cc.metaslot is None
if case_id == 4:
cv_cc = tc.columns[CV]
assert cv_cc.name == CV
assert cv_cc.maps_to == 'enum'
assert cv_cc.metaslot is None
pv_cc = tc.columns[PV]
assert pv_cc.name == PV
assert pv_cc.maps_to == 'permissible_value'
assert pv_cc.metaslot is None
if case_id == 5:
dt_cc = tc.columns[DATATYPE]
assert dt_cc.name == DATATYPE
assert dt_cc.maps_to == 'type'
assert dt_cc.metaslot is None
if case_id == 6:
dt_cc = tc.columns[DATATYPE]
assert dt_cc.name == DATATYPE
assert dt_cc.maps_to == 'type'
assert dt_cc.metaslot is not None
assert dt_cc.metaslot.name == 'type'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
RECORD = 'Record'
FIELD = 'Field'
METATYPE = 'MetaType'
INFO = 'Info'
CV = 'CV'
PV = 'PV'
SDO_MAPPINGS = 'schema.org'
WD_MAPPINGS = 'wikidata'
DATATYPE = 'Datatype'
CASES = [(1, [{RECORD: '> class', INFO: ' description', SDO_MAPPINGS:
'exact_mappings: {curie_prefix: sdo}', WD_MAPPINGS: 'exact_mappings'},
{RECORD: '>', WD_MAPPINGS: 'curie_prefix: wd'}]), (2, [{RECORD:
'> class', FIELD: ' slot', INFO: ' description'}]), (3, [{METATYPE:
'> metatype', INFO: ' description'}]), (4, [{CV: '> enum', PV:
'permissible_value', INFO: ' description'}]), (5, [{DATATYPE: '> type',
INFO: ' description'}])]
def test_parse_header():
print()
for case_id, case in CASES:
ss = SchemaSheet.from_dictreader(case)
tc = ss.table_config
info_cc = tc.columns[INFO]
assert info_cc.name == INFO
assert info_cc.maps_to == 'description'
assert info_cc.metaslot is not None
assert info_cc.metaslot.name == 'description'
if case_id == 1 or case_id == 2:
assert tc.metatype_column is None
record_cc = tc.columns[RECORD]
assert record_cc.name == RECORD
assert record_cc.maps_to == 'class'
assert record_cc.metaslot is None
if case_id == 2:
field_cc = tc.columns[FIELD]
assert field_cc.name == FIELD
assert field_cc.maps_to == 'slot'
assert field_cc.metaslot is None
if case_id == 1:
sdo_cc = tc.columns[SDO_MAPPINGS]
assert sdo_cc.name == SDO_MAPPINGS
assert sdo_cc.maps_to == 'exact_mappings'
assert sdo_cc.metaslot is not None
assert sdo_cc.metaslot.name == 'exact mappings' or sdo_cc.metaslot.name == 'exact_mappings'
assert sdo_cc.settings.curie_prefix == 'sdo'
wd_cc = tc.columns[WD_MAPPINGS]
assert wd_cc.name == WD_MAPPINGS
assert wd_cc.maps_to == 'exact_mappings'
assert wd_cc.metaslot is not None
assert wd_cc.metaslot.name == 'exact mappings' or wd_cc.metaslot.name == 'exact_mappings'
assert wd_cc.settings.curie_prefix == 'wd'
if case_id == 3:
assert tc.metatype_column == METATYPE
record_cc = tc.columns[METATYPE]
assert record_cc.name == METATYPE
assert record_cc.maps_to == 'metatype'
assert record_cc.metaslot is None
if case_id == 4:
cv_cc = tc.columns[CV]
assert cv_cc.name == CV
assert cv_cc.maps_to == 'enum'
assert cv_cc.metaslot is None
pv_cc = tc.columns[PV]
assert pv_cc.name == PV
assert pv_cc.maps_to == 'permissible_value'
assert pv_cc.metaslot is None
if case_id == 5:
dt_cc = tc.columns[DATATYPE]
assert dt_cc.name == DATATYPE
assert dt_cc.maps_to == 'type'
assert dt_cc.metaslot is None
if case_id == 6:
dt_cc = tc.columns[DATATYPE]
assert dt_cc.name == DATATYPE
assert dt_cc.maps_to == 'type'
assert dt_cc.metaslot is not None
assert dt_cc.metaslot.name == 'type'
<|reserved_special_token_1|>
from schemasheets.schemasheet_datamodel import SchemaSheet
RECORD = 'Record'
FIELD = 'Field'
METATYPE = 'MetaType'
INFO = 'Info'
CV = 'CV'
PV = 'PV'
SDO_MAPPINGS = 'schema.org'
WD_MAPPINGS = 'wikidata'
DATATYPE = 'Datatype'
CASES = [(1, [{RECORD: '> class', INFO: ' description', SDO_MAPPINGS:
'exact_mappings: {curie_prefix: sdo}', WD_MAPPINGS: 'exact_mappings'},
{RECORD: '>', WD_MAPPINGS: 'curie_prefix: wd'}]), (2, [{RECORD:
'> class', FIELD: ' slot', INFO: ' description'}]), (3, [{METATYPE:
'> metatype', INFO: ' description'}]), (4, [{CV: '> enum', PV:
'permissible_value', INFO: ' description'}]), (5, [{DATATYPE: '> type',
INFO: ' description'}])]
def test_parse_header():
print()
for case_id, case in CASES:
ss = SchemaSheet.from_dictreader(case)
tc = ss.table_config
info_cc = tc.columns[INFO]
assert info_cc.name == INFO
assert info_cc.maps_to == 'description'
assert info_cc.metaslot is not None
assert info_cc.metaslot.name == 'description'
if case_id == 1 or case_id == 2:
assert tc.metatype_column is None
record_cc = tc.columns[RECORD]
assert record_cc.name == RECORD
assert record_cc.maps_to == 'class'
assert record_cc.metaslot is None
if case_id == 2:
field_cc = tc.columns[FIELD]
assert field_cc.name == FIELD
assert field_cc.maps_to == 'slot'
assert field_cc.metaslot is None
if case_id == 1:
sdo_cc = tc.columns[SDO_MAPPINGS]
assert sdo_cc.name == SDO_MAPPINGS
assert sdo_cc.maps_to == 'exact_mappings'
assert sdo_cc.metaslot is not None
assert sdo_cc.metaslot.name == 'exact mappings' or sdo_cc.metaslot.name == 'exact_mappings'
assert sdo_cc.settings.curie_prefix == 'sdo'
wd_cc = tc.columns[WD_MAPPINGS]
assert wd_cc.name == WD_MAPPINGS
assert wd_cc.maps_to == 'exact_mappings'
assert wd_cc.metaslot is not None
assert wd_cc.metaslot.name == 'exact mappings' or wd_cc.metaslot.name == 'exact_mappings'
assert wd_cc.settings.curie_prefix == 'wd'
if case_id == 3:
assert tc.metatype_column == METATYPE
record_cc = tc.columns[METATYPE]
assert record_cc.name == METATYPE
assert record_cc.maps_to == 'metatype'
assert record_cc.metaslot is None
if case_id == 4:
cv_cc = tc.columns[CV]
assert cv_cc.name == CV
assert cv_cc.maps_to == 'enum'
assert cv_cc.metaslot is None
pv_cc = tc.columns[PV]
assert pv_cc.name == PV
assert pv_cc.maps_to == 'permissible_value'
assert pv_cc.metaslot is None
if case_id == 5:
dt_cc = tc.columns[DATATYPE]
assert dt_cc.name == DATATYPE
assert dt_cc.maps_to == 'type'
assert dt_cc.metaslot is None
if case_id == 6:
dt_cc = tc.columns[DATATYPE]
assert dt_cc.name == DATATYPE
assert dt_cc.maps_to == 'type'
assert dt_cc.metaslot is not None
assert dt_cc.metaslot.name == 'type'
<|reserved_special_token_1|>
from schemasheets.schemasheet_datamodel import SchemaSheet
RECORD = "Record"
FIELD = "Field"
METATYPE = "MetaType"
INFO = "Info"
CV = "CV"
PV = "PV"
SDO_MAPPINGS = "schema.org"
WD_MAPPINGS = "wikidata"
DATATYPE = "Datatype"
CASES = [
(1,
[
{
RECORD: "> class",
INFO: " description",
SDO_MAPPINGS: "exact_mappings: {curie_prefix: sdo}",
WD_MAPPINGS: "exact_mappings"
},
{
RECORD: ">",
WD_MAPPINGS: "curie_prefix: wd"
},
]
),
(2,
[
{RECORD: "> class", FIELD: " slot", INFO: " description"},
]
),
(3,
[
{METATYPE: "> metatype", INFO: " description"},
]
),
(4,
[
{CV: "> enum", PV: "permissible_value", INFO: " description"},
]
),
(5,
[
{DATATYPE: "> type", INFO: " description"},
]
),
# unnecessary/incompatible with the latest meta-model
# (6,
# [
# {DATATYPE: "> metaslot.type", INFO: " description"},
# ]
# ),
]
def test_parse_header():
print()
for case_id, case in CASES:
ss = SchemaSheet.from_dictreader(case)
tc = ss.table_config
info_cc = tc.columns[INFO]
assert info_cc.name == INFO
assert info_cc.maps_to == "description"
assert info_cc.metaslot is not None
assert info_cc.metaslot.name == "description"
if case_id == 1 or case_id == 2:
assert tc.metatype_column is None
record_cc = tc.columns[RECORD]
assert record_cc.name == RECORD
assert record_cc.maps_to == "class"
assert record_cc.metaslot is None
if case_id == 2:
field_cc = tc.columns[FIELD]
assert field_cc.name == FIELD
assert field_cc.maps_to == "slot"
assert field_cc.metaslot is None
if case_id == 1:
sdo_cc = tc.columns[SDO_MAPPINGS]
assert sdo_cc.name == SDO_MAPPINGS
assert sdo_cc.maps_to == "exact_mappings"
assert sdo_cc.metaslot is not None
assert sdo_cc.metaslot.name == "exact mappings" or\
sdo_cc.metaslot.name == "exact_mappings"
assert sdo_cc.settings.curie_prefix == "sdo"
wd_cc = tc.columns[WD_MAPPINGS]
assert wd_cc.name == WD_MAPPINGS
assert wd_cc.maps_to == "exact_mappings"
assert wd_cc.metaslot is not None
assert wd_cc.metaslot.name == "exact mappings" or \
wd_cc.metaslot.name == "exact_mappings"
assert wd_cc.settings.curie_prefix == "wd"
if case_id == 3:
assert tc.metatype_column == METATYPE
record_cc = tc.columns[METATYPE]
assert record_cc.name == METATYPE
assert record_cc.maps_to == "metatype"
assert record_cc.metaslot is None
if case_id == 4:
cv_cc = tc.columns[CV]
assert cv_cc.name == CV
assert cv_cc.maps_to == "enum"
assert cv_cc.metaslot is None
pv_cc = tc.columns[PV]
assert pv_cc.name == PV
assert pv_cc.maps_to == "permissible_value"
assert pv_cc.metaslot is None
if case_id == 5:
dt_cc = tc.columns[DATATYPE]
#print(dt_cc)
assert dt_cc.name == DATATYPE
assert dt_cc.maps_to == "type"
assert dt_cc.metaslot is None
if case_id == 6:
# See https://github.com/linkml/schemasheets/issues/75
dt_cc = tc.columns[DATATYPE]
assert dt_cc.name == DATATYPE
assert dt_cc.maps_to == "type"
assert dt_cc.metaslot is not None
assert dt_cc.metaslot.name == "type"
|
flexible
|
{
"blob_id": "25dc0395da1f1ac2ccd990151c3e5b250802b402",
"index": 2749,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_parse_header():\n print()\n for case_id, case in CASES:\n ss = SchemaSheet.from_dictreader(case)\n tc = ss.table_config\n info_cc = tc.columns[INFO]\n assert info_cc.name == INFO\n assert info_cc.maps_to == 'description'\n assert info_cc.metaslot is not None\n assert info_cc.metaslot.name == 'description'\n if case_id == 1 or case_id == 2:\n assert tc.metatype_column is None\n record_cc = tc.columns[RECORD]\n assert record_cc.name == RECORD\n assert record_cc.maps_to == 'class'\n assert record_cc.metaslot is None\n if case_id == 2:\n field_cc = tc.columns[FIELD]\n assert field_cc.name == FIELD\n assert field_cc.maps_to == 'slot'\n assert field_cc.metaslot is None\n if case_id == 1:\n sdo_cc = tc.columns[SDO_MAPPINGS]\n assert sdo_cc.name == SDO_MAPPINGS\n assert sdo_cc.maps_to == 'exact_mappings'\n assert sdo_cc.metaslot is not None\n assert sdo_cc.metaslot.name == 'exact mappings' or sdo_cc.metaslot.name == 'exact_mappings'\n assert sdo_cc.settings.curie_prefix == 'sdo'\n wd_cc = tc.columns[WD_MAPPINGS]\n assert wd_cc.name == WD_MAPPINGS\n assert wd_cc.maps_to == 'exact_mappings'\n assert wd_cc.metaslot is not None\n assert wd_cc.metaslot.name == 'exact mappings' or wd_cc.metaslot.name == 'exact_mappings'\n assert wd_cc.settings.curie_prefix == 'wd'\n if case_id == 3:\n assert tc.metatype_column == METATYPE\n record_cc = tc.columns[METATYPE]\n assert record_cc.name == METATYPE\n assert record_cc.maps_to == 'metatype'\n assert record_cc.metaslot is None\n if case_id == 4:\n cv_cc = tc.columns[CV]\n assert cv_cc.name == CV\n assert cv_cc.maps_to == 'enum'\n assert cv_cc.metaslot is None\n pv_cc = tc.columns[PV]\n assert pv_cc.name == PV\n assert pv_cc.maps_to == 'permissible_value'\n assert pv_cc.metaslot is None\n if case_id == 5:\n dt_cc = tc.columns[DATATYPE]\n assert dt_cc.name == DATATYPE\n assert dt_cc.maps_to == 'type'\n assert dt_cc.metaslot is None\n if case_id == 6:\n dt_cc = tc.columns[DATATYPE]\n assert dt_cc.name == DATATYPE\n assert dt_cc.maps_to == 'type'\n assert dt_cc.metaslot is not None\n assert dt_cc.metaslot.name == 'type'\n",
"step-3": "<mask token>\nRECORD = 'Record'\nFIELD = 'Field'\nMETATYPE = 'MetaType'\nINFO = 'Info'\nCV = 'CV'\nPV = 'PV'\nSDO_MAPPINGS = 'schema.org'\nWD_MAPPINGS = 'wikidata'\nDATATYPE = 'Datatype'\nCASES = [(1, [{RECORD: '> class', INFO: ' description', SDO_MAPPINGS:\n 'exact_mappings: {curie_prefix: sdo}', WD_MAPPINGS: 'exact_mappings'},\n {RECORD: '>', WD_MAPPINGS: 'curie_prefix: wd'}]), (2, [{RECORD:\n '> class', FIELD: ' slot', INFO: ' description'}]), (3, [{METATYPE:\n '> metatype', INFO: ' description'}]), (4, [{CV: '> enum', PV:\n 'permissible_value', INFO: ' description'}]), (5, [{DATATYPE: '> type',\n INFO: ' description'}])]\n\n\ndef test_parse_header():\n print()\n for case_id, case in CASES:\n ss = SchemaSheet.from_dictreader(case)\n tc = ss.table_config\n info_cc = tc.columns[INFO]\n assert info_cc.name == INFO\n assert info_cc.maps_to == 'description'\n assert info_cc.metaslot is not None\n assert info_cc.metaslot.name == 'description'\n if case_id == 1 or case_id == 2:\n assert tc.metatype_column is None\n record_cc = tc.columns[RECORD]\n assert record_cc.name == RECORD\n assert record_cc.maps_to == 'class'\n assert record_cc.metaslot is None\n if case_id == 2:\n field_cc = tc.columns[FIELD]\n assert field_cc.name == FIELD\n assert field_cc.maps_to == 'slot'\n assert field_cc.metaslot is None\n if case_id == 1:\n sdo_cc = tc.columns[SDO_MAPPINGS]\n assert sdo_cc.name == SDO_MAPPINGS\n assert sdo_cc.maps_to == 'exact_mappings'\n assert sdo_cc.metaslot is not None\n assert sdo_cc.metaslot.name == 'exact mappings' or sdo_cc.metaslot.name == 'exact_mappings'\n assert sdo_cc.settings.curie_prefix == 'sdo'\n wd_cc = tc.columns[WD_MAPPINGS]\n assert wd_cc.name == WD_MAPPINGS\n assert wd_cc.maps_to == 'exact_mappings'\n assert wd_cc.metaslot is not None\n assert wd_cc.metaslot.name == 'exact mappings' or wd_cc.metaslot.name == 'exact_mappings'\n assert wd_cc.settings.curie_prefix == 'wd'\n if case_id == 3:\n assert tc.metatype_column == METATYPE\n record_cc = tc.columns[METATYPE]\n assert record_cc.name == METATYPE\n assert record_cc.maps_to == 'metatype'\n assert record_cc.metaslot is None\n if case_id == 4:\n cv_cc = tc.columns[CV]\n assert cv_cc.name == CV\n assert cv_cc.maps_to == 'enum'\n assert cv_cc.metaslot is None\n pv_cc = tc.columns[PV]\n assert pv_cc.name == PV\n assert pv_cc.maps_to == 'permissible_value'\n assert pv_cc.metaslot is None\n if case_id == 5:\n dt_cc = tc.columns[DATATYPE]\n assert dt_cc.name == DATATYPE\n assert dt_cc.maps_to == 'type'\n assert dt_cc.metaslot is None\n if case_id == 6:\n dt_cc = tc.columns[DATATYPE]\n assert dt_cc.name == DATATYPE\n assert dt_cc.maps_to == 'type'\n assert dt_cc.metaslot is not None\n assert dt_cc.metaslot.name == 'type'\n",
"step-4": "from schemasheets.schemasheet_datamodel import SchemaSheet\nRECORD = 'Record'\nFIELD = 'Field'\nMETATYPE = 'MetaType'\nINFO = 'Info'\nCV = 'CV'\nPV = 'PV'\nSDO_MAPPINGS = 'schema.org'\nWD_MAPPINGS = 'wikidata'\nDATATYPE = 'Datatype'\nCASES = [(1, [{RECORD: '> class', INFO: ' description', SDO_MAPPINGS:\n 'exact_mappings: {curie_prefix: sdo}', WD_MAPPINGS: 'exact_mappings'},\n {RECORD: '>', WD_MAPPINGS: 'curie_prefix: wd'}]), (2, [{RECORD:\n '> class', FIELD: ' slot', INFO: ' description'}]), (3, [{METATYPE:\n '> metatype', INFO: ' description'}]), (4, [{CV: '> enum', PV:\n 'permissible_value', INFO: ' description'}]), (5, [{DATATYPE: '> type',\n INFO: ' description'}])]\n\n\ndef test_parse_header():\n print()\n for case_id, case in CASES:\n ss = SchemaSheet.from_dictreader(case)\n tc = ss.table_config\n info_cc = tc.columns[INFO]\n assert info_cc.name == INFO\n assert info_cc.maps_to == 'description'\n assert info_cc.metaslot is not None\n assert info_cc.metaslot.name == 'description'\n if case_id == 1 or case_id == 2:\n assert tc.metatype_column is None\n record_cc = tc.columns[RECORD]\n assert record_cc.name == RECORD\n assert record_cc.maps_to == 'class'\n assert record_cc.metaslot is None\n if case_id == 2:\n field_cc = tc.columns[FIELD]\n assert field_cc.name == FIELD\n assert field_cc.maps_to == 'slot'\n assert field_cc.metaslot is None\n if case_id == 1:\n sdo_cc = tc.columns[SDO_MAPPINGS]\n assert sdo_cc.name == SDO_MAPPINGS\n assert sdo_cc.maps_to == 'exact_mappings'\n assert sdo_cc.metaslot is not None\n assert sdo_cc.metaslot.name == 'exact mappings' or sdo_cc.metaslot.name == 'exact_mappings'\n assert sdo_cc.settings.curie_prefix == 'sdo'\n wd_cc = tc.columns[WD_MAPPINGS]\n assert wd_cc.name == WD_MAPPINGS\n assert wd_cc.maps_to == 'exact_mappings'\n assert wd_cc.metaslot is not None\n assert wd_cc.metaslot.name == 'exact mappings' or wd_cc.metaslot.name == 'exact_mappings'\n assert wd_cc.settings.curie_prefix == 'wd'\n if case_id == 3:\n assert tc.metatype_column == METATYPE\n record_cc = tc.columns[METATYPE]\n assert record_cc.name == METATYPE\n assert record_cc.maps_to == 'metatype'\n assert record_cc.metaslot is None\n if case_id == 4:\n cv_cc = tc.columns[CV]\n assert cv_cc.name == CV\n assert cv_cc.maps_to == 'enum'\n assert cv_cc.metaslot is None\n pv_cc = tc.columns[PV]\n assert pv_cc.name == PV\n assert pv_cc.maps_to == 'permissible_value'\n assert pv_cc.metaslot is None\n if case_id == 5:\n dt_cc = tc.columns[DATATYPE]\n assert dt_cc.name == DATATYPE\n assert dt_cc.maps_to == 'type'\n assert dt_cc.metaslot is None\n if case_id == 6:\n dt_cc = tc.columns[DATATYPE]\n assert dt_cc.name == DATATYPE\n assert dt_cc.maps_to == 'type'\n assert dt_cc.metaslot is not None\n assert dt_cc.metaslot.name == 'type'\n",
"step-5": "from schemasheets.schemasheet_datamodel import SchemaSheet\n\nRECORD = \"Record\"\nFIELD = \"Field\"\nMETATYPE = \"MetaType\"\nINFO = \"Info\"\nCV = \"CV\"\nPV = \"PV\"\nSDO_MAPPINGS = \"schema.org\"\nWD_MAPPINGS = \"wikidata\"\nDATATYPE = \"Datatype\"\n\nCASES = [\n (1,\n [\n {\n RECORD: \"> class\",\n INFO: \" description\",\n SDO_MAPPINGS: \"exact_mappings: {curie_prefix: sdo}\",\n WD_MAPPINGS: \"exact_mappings\"\n },\n {\n RECORD: \">\",\n WD_MAPPINGS: \"curie_prefix: wd\"\n },\n ]\n ),\n (2,\n [\n {RECORD: \"> class\", FIELD: \" slot\", INFO: \" description\"},\n ]\n ),\n (3,\n [\n {METATYPE: \"> metatype\", INFO: \" description\"},\n ]\n ),\n (4,\n [\n {CV: \"> enum\", PV: \"permissible_value\", INFO: \" description\"},\n ]\n ),\n (5,\n [\n {DATATYPE: \"> type\", INFO: \" description\"},\n ]\n ),\n # unnecessary/incompatible with the latest meta-model\n # (6,\n # [\n # {DATATYPE: \"> metaslot.type\", INFO: \" description\"},\n # ]\n # ),\n]\n\ndef test_parse_header():\n print()\n for case_id, case in CASES:\n ss = SchemaSheet.from_dictreader(case)\n tc = ss.table_config\n info_cc = tc.columns[INFO]\n assert info_cc.name == INFO\n assert info_cc.maps_to == \"description\"\n assert info_cc.metaslot is not None\n assert info_cc.metaslot.name == \"description\"\n if case_id == 1 or case_id == 2:\n assert tc.metatype_column is None\n record_cc = tc.columns[RECORD]\n assert record_cc.name == RECORD\n assert record_cc.maps_to == \"class\"\n assert record_cc.metaslot is None\n if case_id == 2:\n field_cc = tc.columns[FIELD]\n assert field_cc.name == FIELD\n assert field_cc.maps_to == \"slot\"\n assert field_cc.metaslot is None\n if case_id == 1:\n sdo_cc = tc.columns[SDO_MAPPINGS]\n assert sdo_cc.name == SDO_MAPPINGS\n assert sdo_cc.maps_to == \"exact_mappings\"\n assert sdo_cc.metaslot is not None\n assert sdo_cc.metaslot.name == \"exact mappings\" or\\\n sdo_cc.metaslot.name == \"exact_mappings\"\n assert sdo_cc.settings.curie_prefix == \"sdo\"\n wd_cc = tc.columns[WD_MAPPINGS]\n assert wd_cc.name == WD_MAPPINGS\n assert wd_cc.maps_to == \"exact_mappings\"\n assert wd_cc.metaslot is not None\n assert wd_cc.metaslot.name == \"exact mappings\" or \\\n wd_cc.metaslot.name == \"exact_mappings\"\n assert wd_cc.settings.curie_prefix == \"wd\"\n if case_id == 3:\n assert tc.metatype_column == METATYPE\n record_cc = tc.columns[METATYPE]\n assert record_cc.name == METATYPE\n assert record_cc.maps_to == \"metatype\"\n assert record_cc.metaslot is None\n if case_id == 4:\n cv_cc = tc.columns[CV]\n assert cv_cc.name == CV\n assert cv_cc.maps_to == \"enum\"\n assert cv_cc.metaslot is None\n pv_cc = tc.columns[PV]\n assert pv_cc.name == PV\n assert pv_cc.maps_to == \"permissible_value\"\n assert pv_cc.metaslot is None\n if case_id == 5:\n dt_cc = tc.columns[DATATYPE]\n #print(dt_cc)\n assert dt_cc.name == DATATYPE\n assert dt_cc.maps_to == \"type\"\n assert dt_cc.metaslot is None\n if case_id == 6:\n # See https://github.com/linkml/schemasheets/issues/75\n dt_cc = tc.columns[DATATYPE]\n assert dt_cc.name == DATATYPE\n assert dt_cc.maps_to == \"type\"\n assert dt_cc.metaslot is not None\n assert dt_cc.metaslot.name == \"type\"\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import folium
ctx = '../data/'
json = ctx + 'us-states.json'
csv = ctx + 'US_Unemployment_Oct2012.csv'
data = pd.read_csv(csv)
m = folium.Map(location=[37, -102], zoom_start=5)
m.choropleth(geo_data=json, name='choropleth', data=data, columns=['State',
'Unemployment'], Key_on='feature.id', fill_color='YlGn', fill_opacity=
0.7, line_opacity=0.2, legend_name='Unemployment Rate (%)')
folium.LayerControl().add_to(m)
m.save(ctx + 'result.html')
|
normal
|
{
"blob_id": "382cb55a6b849f0240276d8f45746e995b16d714",
"index": 4455,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nm.choropleth(geo_data=json, name='choropleth', data=data, columns=['State',\n 'Unemployment'], Key_on='feature.id', fill_color='YlGn', fill_opacity=\n 0.7, line_opacity=0.2, legend_name='Unemployment Rate (%)')\nfolium.LayerControl().add_to(m)\nm.save(ctx + 'result.html')\n",
"step-3": "<mask token>\nctx = '../data/'\njson = ctx + 'us-states.json'\ncsv = ctx + 'US_Unemployment_Oct2012.csv'\ndata = pd.read_csv(csv)\nm = folium.Map(location=[37, -102], zoom_start=5)\nm.choropleth(geo_data=json, name='choropleth', data=data, columns=['State',\n 'Unemployment'], Key_on='feature.id', fill_color='YlGn', fill_opacity=\n 0.7, line_opacity=0.2, legend_name='Unemployment Rate (%)')\nfolium.LayerControl().add_to(m)\nm.save(ctx + 'result.html')\n",
"step-4": "import pandas as pd\nimport folium\nctx = '../data/'\njson = ctx + 'us-states.json'\ncsv = ctx + 'US_Unemployment_Oct2012.csv'\ndata = pd.read_csv(csv)\nm = folium.Map(location=[37, -102], zoom_start=5)\nm.choropleth(geo_data=json, name='choropleth', data=data, columns=['State',\n 'Unemployment'], Key_on='feature.id', fill_color='YlGn', fill_opacity=\n 0.7, line_opacity=0.2, legend_name='Unemployment Rate (%)')\nfolium.LayerControl().add_to(m)\nm.save(ctx + 'result.html')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# cases where DictAchievement should unlock
# >> CASE
{'name': 'John Doe', 'age': 24}
# >> CASE
{
'name': 'John Doe',
'age': 24
}
# >> CASE
func({'name': 'John Doe', 'age': 24})
|
normal
|
{
"blob_id": "874fa2a6afdd04f3f2232a86f56d220447160ede",
"index": 5167,
"step-1": "<mask token>\n",
"step-2": "{'name': 'John Doe', 'age': 24}\n{'name': 'John Doe', 'age': 24}\nfunc({'name': 'John Doe', 'age': 24})\n",
"step-3": "# cases where DictAchievement should unlock\n\n# >> CASE\n{'name': 'John Doe', 'age': 24}\n\n# >> CASE\n{\n 'name': 'John Doe',\n 'age': 24\n}\n\n# >> CASE\nfunc({'name': 'John Doe', 'age': 24})\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import json
import sys
from os import listdir
from os.path import isfile, join
import params
def encodeText(tweet_text):
tweet_text = tweet_text.replace('\n',' ')
return str(tweet_text)
def parse_file(file_in, file_out):
ptrFile_in = open(file_in, "r")
ptrFile_out = open(file_out, "w", encoding="utf-8")
cleanLines = []
for line in ptrFile_in:
cleanLine = {}
line = line.rstrip()
if line != "":
try:
decoded = json.loads(line)
cleanLine.update({"id" : decoded['id']})
cleanLine.update({"date" : decoded['created_at']})
if decoded.get('extended_tweet') is not None:
cleanLine.update({"text": encodeText(decoded['extended_tweet']['full_text'])})
else:
cleanLine.update({"text": encodeText(decoded['text'])})
cleanLine.update({"user_id" : decoded['user']['id']})
cleanLine.update({"user_name" : '@' + decoded['user']['screen_name']})
if decoded.get('place') is not None:
cleanLine.update({"location" : {"country": decoded['place']['country'], "city": decoded['place']['name']} })
else:
cleanLine.update({"location" : {} })
if decoded.get('retweeted_status') is not None:
cleanLine.update({"retweeted" : True })
if decoded.get('retweeted_status').get('extended_tweet') is not None:
cleanLine.update({"RT_text" : encodeText(decoded['retweeted_status']['extended_tweet']['full_text']) })
else:
cleanLine.update({"RT_text" : encodeText(decoded['retweeted_status']['text']) })
cleanLine.update({"RT_user_id" : decoded['retweeted_status']['user']['id'] })
cleanLine.update({"RT_user_name" : '@' + decoded['retweeted_status']['user']['screen_name'] })
else:
cleanLine.update({"retweeted" : False})
cleanLines.append(cleanLine)
except Exception as e:
print(e, " :: ", line)
ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))
ptrFile_out.close()
if __name__ == '__main__':
path_in = params.folder_path
path_out = params.clean_path
for f in listdir(path_in):
file_in = join(path_in, f)
file_out = join(path_out, f)
if isfile(file_in):
parse_file(file_in, file_out)
|
normal
|
{
"blob_id": "e3afaabc1f7f64b9189fc88dd478ed75e81f35e1",
"index": 4564,
"step-1": "<mask token>\n\n\ndef parse_file(file_in, file_out):\n ptrFile_in = open(file_in, 'r')\n ptrFile_out = open(file_out, 'w', encoding='utf-8')\n cleanLines = []\n for line in ptrFile_in:\n cleanLine = {}\n line = line.rstrip()\n if line != '':\n try:\n decoded = json.loads(line)\n cleanLine.update({'id': decoded['id']})\n cleanLine.update({'date': decoded['created_at']})\n if decoded.get('extended_tweet') is not None:\n cleanLine.update({'text': encodeText(decoded[\n 'extended_tweet']['full_text'])})\n else:\n cleanLine.update({'text': encodeText(decoded['text'])})\n cleanLine.update({'user_id': decoded['user']['id']})\n cleanLine.update({'user_name': '@' + decoded['user'][\n 'screen_name']})\n if decoded.get('place') is not None:\n cleanLine.update({'location': {'country': decoded[\n 'place']['country'], 'city': decoded['place']['name']}}\n )\n else:\n cleanLine.update({'location': {}})\n if decoded.get('retweeted_status') is not None:\n cleanLine.update({'retweeted': True})\n if decoded.get('retweeted_status').get('extended_tweet'\n ) is not None:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['extended_tweet'][\n 'full_text'])})\n else:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['text'])})\n cleanLine.update({'RT_user_id': decoded[\n 'retweeted_status']['user']['id']})\n cleanLine.update({'RT_user_name': '@' + decoded[\n 'retweeted_status']['user']['screen_name']})\n else:\n cleanLine.update({'retweeted': False})\n cleanLines.append(cleanLine)\n except Exception as e:\n print(e, ' :: ', line)\n ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))\n ptrFile_out.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef encodeText(tweet_text):\n tweet_text = tweet_text.replace('\\n', ' ')\n return str(tweet_text)\n\n\ndef parse_file(file_in, file_out):\n ptrFile_in = open(file_in, 'r')\n ptrFile_out = open(file_out, 'w', encoding='utf-8')\n cleanLines = []\n for line in ptrFile_in:\n cleanLine = {}\n line = line.rstrip()\n if line != '':\n try:\n decoded = json.loads(line)\n cleanLine.update({'id': decoded['id']})\n cleanLine.update({'date': decoded['created_at']})\n if decoded.get('extended_tweet') is not None:\n cleanLine.update({'text': encodeText(decoded[\n 'extended_tweet']['full_text'])})\n else:\n cleanLine.update({'text': encodeText(decoded['text'])})\n cleanLine.update({'user_id': decoded['user']['id']})\n cleanLine.update({'user_name': '@' + decoded['user'][\n 'screen_name']})\n if decoded.get('place') is not None:\n cleanLine.update({'location': {'country': decoded[\n 'place']['country'], 'city': decoded['place']['name']}}\n )\n else:\n cleanLine.update({'location': {}})\n if decoded.get('retweeted_status') is not None:\n cleanLine.update({'retweeted': True})\n if decoded.get('retweeted_status').get('extended_tweet'\n ) is not None:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['extended_tweet'][\n 'full_text'])})\n else:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['text'])})\n cleanLine.update({'RT_user_id': decoded[\n 'retweeted_status']['user']['id']})\n cleanLine.update({'RT_user_name': '@' + decoded[\n 'retweeted_status']['user']['screen_name']})\n else:\n cleanLine.update({'retweeted': False})\n cleanLines.append(cleanLine)\n except Exception as e:\n print(e, ' :: ', line)\n ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))\n ptrFile_out.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef encodeText(tweet_text):\n tweet_text = tweet_text.replace('\\n', ' ')\n return str(tweet_text)\n\n\ndef parse_file(file_in, file_out):\n ptrFile_in = open(file_in, 'r')\n ptrFile_out = open(file_out, 'w', encoding='utf-8')\n cleanLines = []\n for line in ptrFile_in:\n cleanLine = {}\n line = line.rstrip()\n if line != '':\n try:\n decoded = json.loads(line)\n cleanLine.update({'id': decoded['id']})\n cleanLine.update({'date': decoded['created_at']})\n if decoded.get('extended_tweet') is not None:\n cleanLine.update({'text': encodeText(decoded[\n 'extended_tweet']['full_text'])})\n else:\n cleanLine.update({'text': encodeText(decoded['text'])})\n cleanLine.update({'user_id': decoded['user']['id']})\n cleanLine.update({'user_name': '@' + decoded['user'][\n 'screen_name']})\n if decoded.get('place') is not None:\n cleanLine.update({'location': {'country': decoded[\n 'place']['country'], 'city': decoded['place']['name']}}\n )\n else:\n cleanLine.update({'location': {}})\n if decoded.get('retweeted_status') is not None:\n cleanLine.update({'retweeted': True})\n if decoded.get('retweeted_status').get('extended_tweet'\n ) is not None:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['extended_tweet'][\n 'full_text'])})\n else:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['text'])})\n cleanLine.update({'RT_user_id': decoded[\n 'retweeted_status']['user']['id']})\n cleanLine.update({'RT_user_name': '@' + decoded[\n 'retweeted_status']['user']['screen_name']})\n else:\n cleanLine.update({'retweeted': False})\n cleanLines.append(cleanLine)\n except Exception as e:\n print(e, ' :: ', line)\n ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))\n ptrFile_out.close()\n\n\nif __name__ == '__main__':\n path_in = params.folder_path\n path_out = params.clean_path\n for f in listdir(path_in):\n file_in = join(path_in, f)\n file_out = join(path_out, f)\n if isfile(file_in):\n parse_file(file_in, file_out)\n",
"step-4": "import json\nimport sys\nfrom os import listdir\nfrom os.path import isfile, join\nimport params\n\n\ndef encodeText(tweet_text):\n tweet_text = tweet_text.replace('\\n', ' ')\n return str(tweet_text)\n\n\ndef parse_file(file_in, file_out):\n ptrFile_in = open(file_in, 'r')\n ptrFile_out = open(file_out, 'w', encoding='utf-8')\n cleanLines = []\n for line in ptrFile_in:\n cleanLine = {}\n line = line.rstrip()\n if line != '':\n try:\n decoded = json.loads(line)\n cleanLine.update({'id': decoded['id']})\n cleanLine.update({'date': decoded['created_at']})\n if decoded.get('extended_tweet') is not None:\n cleanLine.update({'text': encodeText(decoded[\n 'extended_tweet']['full_text'])})\n else:\n cleanLine.update({'text': encodeText(decoded['text'])})\n cleanLine.update({'user_id': decoded['user']['id']})\n cleanLine.update({'user_name': '@' + decoded['user'][\n 'screen_name']})\n if decoded.get('place') is not None:\n cleanLine.update({'location': {'country': decoded[\n 'place']['country'], 'city': decoded['place']['name']}}\n )\n else:\n cleanLine.update({'location': {}})\n if decoded.get('retweeted_status') is not None:\n cleanLine.update({'retweeted': True})\n if decoded.get('retweeted_status').get('extended_tweet'\n ) is not None:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['extended_tweet'][\n 'full_text'])})\n else:\n cleanLine.update({'RT_text': encodeText(decoded[\n 'retweeted_status']['text'])})\n cleanLine.update({'RT_user_id': decoded[\n 'retweeted_status']['user']['id']})\n cleanLine.update({'RT_user_name': '@' + decoded[\n 'retweeted_status']['user']['screen_name']})\n else:\n cleanLine.update({'retweeted': False})\n cleanLines.append(cleanLine)\n except Exception as e:\n print(e, ' :: ', line)\n ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))\n ptrFile_out.close()\n\n\nif __name__ == '__main__':\n path_in = params.folder_path\n path_out = params.clean_path\n for f in listdir(path_in):\n file_in = join(path_in, f)\n file_out = join(path_out, f)\n if isfile(file_in):\n parse_file(file_in, file_out)\n",
"step-5": "import json\nimport sys\nfrom os import listdir\nfrom os.path import isfile, join\nimport params\n\ndef encodeText(tweet_text): \n tweet_text = tweet_text.replace('\\n',' ') \n return str(tweet_text)\n\ndef parse_file(file_in, file_out):\n ptrFile_in = open(file_in, \"r\")\n ptrFile_out = open(file_out, \"w\", encoding=\"utf-8\")\n\n cleanLines = []\n for line in ptrFile_in:\n cleanLine = {}\n line = line.rstrip()\n if line != \"\":\n try:\n decoded = json.loads(line)\n cleanLine.update({\"id\" : decoded['id']})\n cleanLine.update({\"date\" : decoded['created_at']})\n if decoded.get('extended_tweet') is not None: \n cleanLine.update({\"text\": encodeText(decoded['extended_tweet']['full_text'])})\n else:\n cleanLine.update({\"text\": encodeText(decoded['text'])}) \n cleanLine.update({\"user_id\" : decoded['user']['id']}) \n cleanLine.update({\"user_name\" : '@' + decoded['user']['screen_name']}) \n\n if decoded.get('place') is not None: \n cleanLine.update({\"location\" : {\"country\": decoded['place']['country'], \"city\": decoded['place']['name']} }) \n else:\n cleanLine.update({\"location\" : {} })\n\n if decoded.get('retweeted_status') is not None: \n cleanLine.update({\"retweeted\" : True })\n if decoded.get('retweeted_status').get('extended_tweet') is not None:\n cleanLine.update({\"RT_text\" : encodeText(decoded['retweeted_status']['extended_tweet']['full_text']) })\n else:\n cleanLine.update({\"RT_text\" : encodeText(decoded['retweeted_status']['text']) })\n cleanLine.update({\"RT_user_id\" : decoded['retweeted_status']['user']['id'] })\n cleanLine.update({\"RT_user_name\" : '@' + decoded['retweeted_status']['user']['screen_name'] }) \n else:\n cleanLine.update({\"retweeted\" : False}) \n \n cleanLines.append(cleanLine)\n except Exception as e:\n print(e, \" :: \", line)\n\n ptrFile_out.write(json.dumps(cleanLines, ensure_ascii=False))\n ptrFile_out.close() \n\nif __name__ == '__main__':\n path_in = params.folder_path \n path_out = params.clean_path\n\n for f in listdir(path_in):\n file_in = join(path_in, f) \n file_out = join(path_out, f) \n if isfile(file_in):\n parse_file(file_in, file_out)\n\n\n \n\n \n \n \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
result_dir = 'results'
data_dir = 'datasets'
cache_dir = f'{ROOT_PATH}/data/cache'
run_dir_ignore = ['results', 'datasets', 'cache']
use_treeconnect = False
treeconnect_threshold = 1024
vgg16 = 'vgg16_zhang_perceptual.pkl'
model = 'stylegan2-ffhq-config-f.pkl'
networks_urls = {'european': [
'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',
'generator_model-stylegan2-config-f.pkl'], 'asian': [
'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',
'generator_yellow-stylegan2-config-f.pkl'], 'asian beauty': [
'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',
'generator_star-stylegan2-config-f.pkl'], 'baby': [
'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',
'generator_baby-stylegan2-config-f.pkl']}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from facegan import ROOT_PATH
result_dir = 'results'
data_dir = 'datasets'
cache_dir = f'{ROOT_PATH}/data/cache'
run_dir_ignore = ['results', 'datasets', 'cache']
use_treeconnect = False
treeconnect_threshold = 1024
vgg16 = 'vgg16_zhang_perceptual.pkl'
model = 'stylegan2-ffhq-config-f.pkl'
networks_urls = {'european': [
'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',
'generator_model-stylegan2-config-f.pkl'], 'asian': [
'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',
'generator_yellow-stylegan2-config-f.pkl'], 'asian beauty': [
'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',
'generator_star-stylegan2-config-f.pkl'], 'baby': [
'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',
'generator_baby-stylegan2-config-f.pkl']}
<|reserved_special_token_1|>
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Global configuration."""
# ----------------------------------------------------------------------------
# Paths.
from facegan import ROOT_PATH
result_dir = 'results'
data_dir = 'datasets'
cache_dir = f'{ROOT_PATH}/data/cache'
run_dir_ignore = ['results', 'datasets', 'cache']
# experimental - replace Dense layers with TreeConnect
use_treeconnect = False
treeconnect_threshold = 1024
# ----------------------------------------------------------------------------
vgg16 = 'vgg16_zhang_perceptual.pkl'
model = 'stylegan2-ffhq-config-f.pkl'
networks_urls = {
'european': [
'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',
'generator_model-stylegan2-config-f.pkl'
],
'asian': [
'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',
'generator_yellow-stylegan2-config-f.pkl'
],
'asian beauty': [
'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',
'generator_star-stylegan2-config-f.pkl'
],
'baby': [
'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',
'generator_baby-stylegan2-config-f.pkl'
],
}
|
flexible
|
{
"blob_id": "cb904408486ad9ea8cc0c8ff2ec393e480309a57",
"index": 2403,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nresult_dir = 'results'\ndata_dir = 'datasets'\ncache_dir = f'{ROOT_PATH}/data/cache'\nrun_dir_ignore = ['results', 'datasets', 'cache']\nuse_treeconnect = False\ntreeconnect_threshold = 1024\nvgg16 = 'vgg16_zhang_perceptual.pkl'\nmodel = 'stylegan2-ffhq-config-f.pkl'\nnetworks_urls = {'european': [\n 'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',\n 'generator_model-stylegan2-config-f.pkl'], 'asian': [\n 'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',\n 'generator_yellow-stylegan2-config-f.pkl'], 'asian beauty': [\n 'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',\n 'generator_star-stylegan2-config-f.pkl'], 'baby': [\n 'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',\n 'generator_baby-stylegan2-config-f.pkl']}\n",
"step-3": "<mask token>\nfrom facegan import ROOT_PATH\nresult_dir = 'results'\ndata_dir = 'datasets'\ncache_dir = f'{ROOT_PATH}/data/cache'\nrun_dir_ignore = ['results', 'datasets', 'cache']\nuse_treeconnect = False\ntreeconnect_threshold = 1024\nvgg16 = 'vgg16_zhang_perceptual.pkl'\nmodel = 'stylegan2-ffhq-config-f.pkl'\nnetworks_urls = {'european': [\n 'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',\n 'generator_model-stylegan2-config-f.pkl'], 'asian': [\n 'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',\n 'generator_yellow-stylegan2-config-f.pkl'], 'asian beauty': [\n 'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',\n 'generator_star-stylegan2-config-f.pkl'], 'baby': [\n 'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',\n 'generator_baby-stylegan2-config-f.pkl']}\n",
"step-4": "# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# This work is licensed under the Creative Commons Attribution-NonCommercial\n# 4.0 International License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\n# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\"\"\"Global configuration.\"\"\"\n\n# ----------------------------------------------------------------------------\n# Paths.\nfrom facegan import ROOT_PATH\n\nresult_dir = 'results'\ndata_dir = 'datasets'\ncache_dir = f'{ROOT_PATH}/data/cache'\nrun_dir_ignore = ['results', 'datasets', 'cache']\n\n# experimental - replace Dense layers with TreeConnect\nuse_treeconnect = False\ntreeconnect_threshold = 1024\n\n# ----------------------------------------------------------------------------\n\nvgg16 = 'vgg16_zhang_perceptual.pkl'\nmodel = 'stylegan2-ffhq-config-f.pkl'\n\nnetworks_urls = {\n 'european': [\n 'https://drive.google.com/uc?id=1--kh2Em5U1qh-H7Lin9FzppkZCQ18c4W',\n 'generator_model-stylegan2-config-f.pkl'\n ],\n 'asian': [\n 'https://drive.google.com/uc?id=1-3XU6KzIVywFoKXx2zG1hW8mH4OYpyO9',\n 'generator_yellow-stylegan2-config-f.pkl'\n ],\n 'asian beauty': [\n 'https://drive.google.com/uc?id=1-04v78_pI59M0IvhcKxsm3YhK2-plnbj',\n 'generator_star-stylegan2-config-f.pkl'\n ],\n 'baby': [\n 'https://drive.google.com/uc?id=1--684mANXSgC3aDhLc7lPM7OBHWuVRXa',\n 'generator_baby-stylegan2-config-f.pkl'\n ],\n}\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('test 123123')
|
flexible
|
{
"blob_id": "c6d8b9faa610e817c449eee94d73c61cb62fa272",
"index": 8878,
"step-1": "<mask token>\n",
"step-2": "print('test 123123')\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def forc(X):
Xi = X['Xi']
Yi = X['Yi']
Zi = X['Zi']
SEi = X['SEi']
Pi = X['Pi']
Hc1 = X['Hc1']
Hc2 = X['Hc2']
Hb1 = X['Hb1']
Hb2 = X['Hb2']
style = {'description_width': 'initial'}
colorbar_widge = widgets.Checkbox(value=False, description=
'Show final FORC plot', style=style)
pval_widge = widgets.Checkbox(value=False, description=
'Show 0.05 significance contour', style=style)
colormin_widge = widgets.FloatSlider(value=0.0, min=0.0, max=0.999,
step=0.001, description='Rescale colormap minimum', disabled=False,
continuous_update=False, orientation='horizontal', readout=False,
readout_format='.2f', style=style)
colormax_widge = widgets.FloatSlider(value=1.0, min=0.001, max=1, step=
0.001, description='Rescale colormap maximum', disabled=False,
continuous_update=False, orientation='horizontal', readout=False,
readout_format='.2f', style=style)
contour_widge = widgets.Select(options=[['Select contour frequency', -1
], ['Every level', 1], ['Every 2nd level', 2], ['Every 3rd level',
3], ['Every 4th level', 4], ['Every 5th level', 5], [
'Every 10th level', 10], ['Every 20th level', 20], [
'Every 50th level', 50]], value=-1, rows=1, description=
'Plot contours', style=style)
contourpts_widge = widgets.FloatSlider(value=1.0, min=0.5, max=3.0,
step=0.5, description='Contour line width [pts]', style=style)
download_widge = widgets.Checkbox(value=False, description=
'Download plot', style=style)
level_widge = widgets.Select(options=[['20', 20], ['30', 30], ['50', 50
], ['75', 75], ['100', 100], ['200', 200], ['500', 500]], value=100,
rows=1, description='Number of color levels', style=style)
if X['unit'] == 'SI':
xmin_widge = widgets.FloatText(value=0, description=
'Minimum B$_\\mathrm{c}$ [Oe]', style=style, step=10)
xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,
description='Maximum B$_\\mathrm{c}$ [Oe]', style=style, step=10)
ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /
1000, description='Minimum B$_\\mathrm{u}$ [Oe]', style=style,
step=10)
ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,
description='Maximum B$_\\mathrm{u}$ [Oe]', style=style, step=10)
elif X['unit'] == 'cgs':
xmin_widge = widgets.FloatText(value=0, description=
'Minimum H$_\\mathrm{c}$ [Oe]', style=style, step=10)
xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,
description='Maximum H$_\\mathrm{c}$ [Oe]', style=style, step=10)
ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /
1000, description='Minimum H$_\\mathrm{u}$ [Oe]', style=style,
step=10)
ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,
description='Maximum H$_\\mathrm{u}$ [Oe]', style=style, step=10)
x = interactive(forcplot, Xi=fixed(Xi), Yi=fixed(Yi), Zi=fixed(Zi), SEi
=fixed(SEi), Pi=fixed(Pi), fn=fixed(X['sample']), mass=fixed(X[
'mass']), unit=fixed(X['unit']), colorbar=colorbar_widge, level=
level_widge, contour=contour_widge, contourpts=contourpts_widge,
xmin=xmin_widge, xmax=xmax_widge, ymin=ymin_widge, ymax=ymax_widge,
colormin=colormin_widge, colormax=colormax_widge, download=
download_widge)
tab_nest = widgets.Tab()
tab_nest.set_title(0, 'FORC PLOTTING')
tab_nest.children = [VBox(children=x.children)]
display(tab_nest)
def forcplot(Xi, Yi, Zi, SEi, Pi, fn, mass, unit, colorbar, level, contour,
contourpts, xmin, xmax, ymin, ymax, colormin, colormax, download):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
if mass.value < 0.0:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi
SEi_new = SEi
Pi_new = Pi
SEi_new[Zi_new == 0.0] = 0.0
SEi_new[np.isnan(SEi_new)] = 0.0
if unit == 'SI':
xlabel_text = 'B$_\\mathrm{c}$ [T]'
xlabel_csv = 'Bc [T]'
ylabel_text = 'B$_\\mathrm{u}$ [T]'
ylabel_csv = 'Bu [T]'
cbar_text = 'Am$^2$ T$^{-2}$'
se_csv = 'rho [Am**2 / T**2]'
elif unit == 'cgs':
xlabel_text = 'H$_\\mathrm{c}$ [Oe]'
xlabel_csv = 'Hc [Oe]'
ylabel_text = 'H$_\\mathrm{u}$ [Oe]'
ylabel_csv = 'Hu [Oe]'
cbar_text = 'emu$ Oe$^{-2}$'
se_csv = 'rho [emu / Oe**2]'
else:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi / (mass.value / 1000.0)
SEi_new = SEi / (mass.value / 1000.0)
SEi_new[Zi_new == 0.0] = 0.0
SEi_new[np.isnan(SEi_new)] = 0.0
Pi_new = Pi
if unit == 'SI':
Zi_new = Zi / (mass.value / 1000.0)
SEi_new = SEi / (mass.value / 1000.0)
xlabel_text = 'B$_\\mathrm{c}$ [T]'
xlabel_csv = 'Bc [T]'
ylabel_text = 'B$_\\mathrm{u}$ [T]'
ylabel_csv = 'Bu [T]'
cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'
se_csv = 'se [Am**2 / T**2 / kg]'
elif unit == 'cgs':
Zi_new = Zi / mass.value
SEi_new = SEi / mass.value
xlabel_text = 'H$_\\mathrm{c}$ [Oe]'
xlabel_csv = 'Hc [Oe]'
ylabel_text = 'H$_\\mathrm{u}$ [Oe]'
ylabel_csv = 'Hu [Oe]'
cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'
se_csv = 'se [emu/ Oe**2 / g]'
SEi_new[Zi_new == 0.0] = 0.0
SEi_new[np.isnan(SEi_new)] = 0.0
idx = (Xi_new >= xmin) & (Xi_new <= xmax) & (Yi_new >= ymin) & (Yi_new <=
ymax)
cmap, vmin, vmax = FORCinel_colormap(Zi_new[idx])
Zi_trunc = np.copy(Zi_new)
Zi_trunc[np.isnan(Zi_trunc)] = 0.0
Zi_trunc[Zi_trunc < vmin] = vmin
vmini = vmin * (1 - colormin)
vmaxi = vmax * colormax
idx = (Zi_trunc >= vmini) & (Zi_trunc <= vmaxi)
cmap, vmin, vmax = FORCinel_colormap(Zi_trunc[idx])
CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap=cmap, vmin=vmin,
vmax=vmax)
if (contour > 0) & (contour < level):
CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',
linewidths=contourpts)
ax.set_xlabel(xlabel_text, fontsize=14)
ax.set_ylabel(ylabel_text, fontsize=14)
xlimits = np.sort((xmin, xmax))
ax.set_xlim(xlimits)
ylimits = np.sort((ymin, ymax))
ax.set_ylim(ylimits)
ax.tick_params(labelsize=14)
ax.set_aspect('equal')
ax.minorticks_on()
if colorbar == True:
cbar = fig.colorbar(CS, fraction=0.04, pad=0.08, format='%.2e')
cbar.ax.tick_params(labelsize=14)
cbar.set_label(cbar_text, fontsize=14)
if download == True:
outputfile = fn.value + '_FORC.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches='tight')
ar = np.column_stack((np.reshape(Xi_new, (-1, 1)), np.reshape(
Yi_new, (-1, 1)), np.reshape(Zi_trunc, (-1, 1)), np.reshape(SEi,
(-1, 1))))
outputfile = fn.value + '_XYZ.csv'
with open(outputfile, 'w') as fp:
fp.write(xlabel_csv + ',' + ylabel_csv + ',' + se_csv + ',' +
se_csv + '\n')
np.savetxt(fp, ar, '%s', ',')
plt.show()
def FORCinel_colormap(Z):
cdict = {'red': ((0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 /
255), (0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255
), (0.3193, 102 / 255, 102 / 255), (0.563, 204 / 255, 204 / 255), (
0.6975, 204 / 255, 204 / 255), (0.8319, 153 / 255, 153 / 255), (
0.9748, 76 / 255, 76 / 255), (1.0, 76 / 255, 76 / 255)), 'green': (
(0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 / 255), (
0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (
0.3193, 178 / 255, 178 / 255), (0.563, 204 / 255, 204 / 255), (
0.6975, 76 / 255, 76 / 255), (0.8319, 102 / 255, 102 / 255), (
0.9748, 25 / 255, 25 / 255), (1.0, 25 / 255, 25 / 255)), 'blue': ((
0.0, 255 / 255, 255 / 255), (0.1387, 255 / 255, 255 / 255), (0.1597,
255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (0.3193, 102 /
255, 102 / 255), (0.563, 76 / 255, 76 / 255), (0.6975, 76 / 255, 76 /
255), (0.8319, 153 / 255, 153 / 255), (0.9748, 76 / 255, 76 / 255),
(1.0, 76 / 255, 76 / 255))}
if np.abs(np.min(Z)) <= np.max(Z) * 0.19:
vmin = -np.max(Z) * 0.19
vmax = np.max(Z)
else:
vmin = np.min(Z)
vmax = np.max(Z)
anchors = np.zeros(10)
anchors[1] = (-0.025 * vmax - vmin) / (vmax - vmin)
anchors[2] = (-0.005 * vmax - vmin) / (vmax - vmin)
anchors[3] = (0.025 * vmax - vmin) / (vmax - vmin)
anchors[4] = (0.19 * vmax - vmin) / (vmax - vmin)
anchors[5] = (0.48 * vmax - vmin) / (vmax - vmin)
anchors[6] = (0.64 * vmax - vmin) / (vmax - vmin)
anchors[7] = (0.8 * vmax - vmin) / (vmax - vmin)
anchors[8] = (0.97 * vmax - vmin) / (vmax - vmin)
anchors[9] = 1.0
Rlst = list(cdict['red'])
Glst = list(cdict['green'])
Blst = list(cdict['blue'])
for i in range(9):
Rlst[i] = tuple((anchors[i], Rlst[i][1], Rlst[i][2]))
Glst[i] = tuple((anchors[i], Glst[i][1], Glst[i][2]))
Blst[i] = tuple((anchors[i], Blst[i][1], Blst[i][2]))
cdict['red'] = tuple(Rlst)
cdict['green'] = tuple(Glst)
cdict['blue'] = tuple(Blst)
cmap = LinearSegmentedColormap('forc_cmap', cdict)
return cmap, vmin, vmax
def profile_options(X):
Hb1 = X['Hb1'] - X['Hc2']
Hb2 = X['Hb2']
Hc1 = np.maximum(X['Hc1'], 0)
Hc2 = X['Hc2']
style = {'description_width': 'initial'}
HL = widgets.HTML(value=
'<hr style="height:3px;border:none;color:#333;background-color:#333;" />'
)
P_title = widgets.HTML(value='<h3>Select profile type:</h3>')
P_widge = widgets.RadioButtons(options=[('Horizontal profile', 0), (
'Vertical profile', 1)], value=0, style=style)
H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')
if X['unit'] == 'SI':
x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=
0.001, description='B$_u$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=
10, description='H$_u$ [Oe]', disabled=False, continuous_update
=False, orientation='horizontal', readout=True, readout_format=
'.3f', layout={'width': '350px'}, style=style)
if X['unit'] == 'SI':
x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,
max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,
max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')
if X['unit'] == 'SI':
y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,
max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,
max=Hc2, step=10, description='H$_c$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
if X['unit'] == 'SI':
y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,
max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,
max=Hb2, step=10, description='H$_u$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
profile_widge = VBox([P_title, P_widge, HL, H_title, x_Hb_widge,
x_Hc_widge, HL, V_title, y_Hc_widge, y_Hb_widge])
profile_nest = widgets.Tab()
profile_nest.children = [profile_widge]
profile_nest.set_title(0, 'PLOT PROFILES')
display(profile_nest)
X['P_widge'] = P_widge
X['x_Hb_widge'] = x_Hb_widge
X['x_Hc_widge'] = x_Hc_widge
X['y_Hc_widge'] = y_Hc_widge
X['y_Hb_widge'] = y_Hb_widge
return X
<|reserved_special_token_0|>
def x_profile(X, Hc, Hb):
Hc1, Hc2 = Hc[0], Hc[1]
dH = X['dH']
NH = int(np.sqrt((Hc2 - Hc1) ** 2) / dH)
Hc0 = np.linspace(Hc1, Hc2, NH)
Hb0 = np.linspace(Hb, Hb, NH)
rho_int = X['Zint'](Hc0, Hb0)
coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))
CI_int = X['SEint'](Hc0, Hb0) * coef
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 1, 1)
if X['mass'].value > 0.0:
if X['unit'] == 'SI':
ax1.plot(Hc0, rho_int / (X['mass'].value / 1000.0), color='k')
ax1.fill_between(Hc0, (rho_int - CI_int) / (X['mass'].value /
1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),
color='lightgrey')
ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.plot(Hc0, rho_int / X['mass'].value, color='k')
ax1.fill_between(Hc0, (rho_int - CI_int) / X['mass'].value, (
rho_int + CI_int) / X['mass'].value, color='lightgrey')
ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)
else:
ax1.plot(Hc0, rho_int, color='k')
ax1.fill_between(Hc0, rho_int - CI_int, rho_int + CI_int, color=
'lightgrey')
if X['unit'] == 'SI':
ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)
ax1.tick_params(axis='both', which='major', direction='out', length=5,
width=1, color='k', labelsize='14')
ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,
width=1, color='k')
if X['unit'] == 'SI':
ax1.set_xlabel('B$_\\mathrm{c}$ [T]', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_xlabel('H$_\\mathrm{c}$ [Oe]', fontsize=14)
ax1.minorticks_on()
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
outputfile = X['sample'].value + '_Hc_PROFILE.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches='tight')
plt.show
return X
def y_profile(X, Hc, Hb):
Hb1, Hb2 = Hb[0], Hb[1]
dH = X['dH']
NH = int(np.sqrt((Hb2 - Hb1) ** 2) / dH)
Hc0 = np.linspace(Hc, Hc, NH)
Hb0 = np.linspace(Hb1, Hb2, NH)
rho_int = X['Zint'](Hc0, Hb0)
coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))
CI_int = X['SEint'](Hc0, Hb0) * coef
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 1, 1)
if X['mass'].value > 0.0:
if X['unit'] == 'SI':
ax1.plot(Hb0, rho_int / (X['mass'].value / 1000.0), color='k')
ax1.fill_between(Hb0, (rho_int - CI_int) / (X['mass'].value /
1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),
color='lightgrey')
ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.plot(Hb0, rho_int / X['mass'].value, color='k')
ax1.fill_between(Hb0, (rho_int - CI_int) / X['mass'].value, (
rho_int + CI_int) / X['mass'].value, color='lightgrey')
ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)
else:
ax1.plot(Hb0, rho_int, color='k')
ax1.fill_between(Hb0, rho_int - CI_int, rho_int + CI_int, color=
'lightgrey')
if X['unit'] == 'SI':
ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)
ax1.tick_params(axis='both', which='major', direction='out', length=5,
width=1, color='k', labelsize='14')
ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,
width=1, color='k')
if X['unit'] == 'SI':
ax1.set_xlabel('B$_\\mathrm{u}$ [T]', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_xlabel('H$_\\mathrm{u}$ [Oe]', fontsize=14)
ax1.minorticks_on()
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
outputfile = X['sample'].value + '_Hu_PROFILE.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches='tight')
plt.show
return X
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def forc(X):
Xi = X['Xi']
Yi = X['Yi']
Zi = X['Zi']
SEi = X['SEi']
Pi = X['Pi']
Hc1 = X['Hc1']
Hc2 = X['Hc2']
Hb1 = X['Hb1']
Hb2 = X['Hb2']
style = {'description_width': 'initial'}
colorbar_widge = widgets.Checkbox(value=False, description=
'Show final FORC plot', style=style)
pval_widge = widgets.Checkbox(value=False, description=
'Show 0.05 significance contour', style=style)
colormin_widge = widgets.FloatSlider(value=0.0, min=0.0, max=0.999,
step=0.001, description='Rescale colormap minimum', disabled=False,
continuous_update=False, orientation='horizontal', readout=False,
readout_format='.2f', style=style)
colormax_widge = widgets.FloatSlider(value=1.0, min=0.001, max=1, step=
0.001, description='Rescale colormap maximum', disabled=False,
continuous_update=False, orientation='horizontal', readout=False,
readout_format='.2f', style=style)
contour_widge = widgets.Select(options=[['Select contour frequency', -1
], ['Every level', 1], ['Every 2nd level', 2], ['Every 3rd level',
3], ['Every 4th level', 4], ['Every 5th level', 5], [
'Every 10th level', 10], ['Every 20th level', 20], [
'Every 50th level', 50]], value=-1, rows=1, description=
'Plot contours', style=style)
contourpts_widge = widgets.FloatSlider(value=1.0, min=0.5, max=3.0,
step=0.5, description='Contour line width [pts]', style=style)
download_widge = widgets.Checkbox(value=False, description=
'Download plot', style=style)
level_widge = widgets.Select(options=[['20', 20], ['30', 30], ['50', 50
], ['75', 75], ['100', 100], ['200', 200], ['500', 500]], value=100,
rows=1, description='Number of color levels', style=style)
if X['unit'] == 'SI':
xmin_widge = widgets.FloatText(value=0, description=
'Minimum B$_\\mathrm{c}$ [Oe]', style=style, step=10)
xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,
description='Maximum B$_\\mathrm{c}$ [Oe]', style=style, step=10)
ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /
1000, description='Minimum B$_\\mathrm{u}$ [Oe]', style=style,
step=10)
ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,
description='Maximum B$_\\mathrm{u}$ [Oe]', style=style, step=10)
elif X['unit'] == 'cgs':
xmin_widge = widgets.FloatText(value=0, description=
'Minimum H$_\\mathrm{c}$ [Oe]', style=style, step=10)
xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,
description='Maximum H$_\\mathrm{c}$ [Oe]', style=style, step=10)
ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /
1000, description='Minimum H$_\\mathrm{u}$ [Oe]', style=style,
step=10)
ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,
description='Maximum H$_\\mathrm{u}$ [Oe]', style=style, step=10)
x = interactive(forcplot, Xi=fixed(Xi), Yi=fixed(Yi), Zi=fixed(Zi), SEi
=fixed(SEi), Pi=fixed(Pi), fn=fixed(X['sample']), mass=fixed(X[
'mass']), unit=fixed(X['unit']), colorbar=colorbar_widge, level=
level_widge, contour=contour_widge, contourpts=contourpts_widge,
xmin=xmin_widge, xmax=xmax_widge, ymin=ymin_widge, ymax=ymax_widge,
colormin=colormin_widge, colormax=colormax_widge, download=
download_widge)
tab_nest = widgets.Tab()
tab_nest.set_title(0, 'FORC PLOTTING')
tab_nest.children = [VBox(children=x.children)]
display(tab_nest)
def forcplot(Xi, Yi, Zi, SEi, Pi, fn, mass, unit, colorbar, level, contour,
contourpts, xmin, xmax, ymin, ymax, colormin, colormax, download):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
if mass.value < 0.0:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi
SEi_new = SEi
Pi_new = Pi
SEi_new[Zi_new == 0.0] = 0.0
SEi_new[np.isnan(SEi_new)] = 0.0
if unit == 'SI':
xlabel_text = 'B$_\\mathrm{c}$ [T]'
xlabel_csv = 'Bc [T]'
ylabel_text = 'B$_\\mathrm{u}$ [T]'
ylabel_csv = 'Bu [T]'
cbar_text = 'Am$^2$ T$^{-2}$'
se_csv = 'rho [Am**2 / T**2]'
elif unit == 'cgs':
xlabel_text = 'H$_\\mathrm{c}$ [Oe]'
xlabel_csv = 'Hc [Oe]'
ylabel_text = 'H$_\\mathrm{u}$ [Oe]'
ylabel_csv = 'Hu [Oe]'
cbar_text = 'emu$ Oe$^{-2}$'
se_csv = 'rho [emu / Oe**2]'
else:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi / (mass.value / 1000.0)
SEi_new = SEi / (mass.value / 1000.0)
SEi_new[Zi_new == 0.0] = 0.0
SEi_new[np.isnan(SEi_new)] = 0.0
Pi_new = Pi
if unit == 'SI':
Zi_new = Zi / (mass.value / 1000.0)
SEi_new = SEi / (mass.value / 1000.0)
xlabel_text = 'B$_\\mathrm{c}$ [T]'
xlabel_csv = 'Bc [T]'
ylabel_text = 'B$_\\mathrm{u}$ [T]'
ylabel_csv = 'Bu [T]'
cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'
se_csv = 'se [Am**2 / T**2 / kg]'
elif unit == 'cgs':
Zi_new = Zi / mass.value
SEi_new = SEi / mass.value
xlabel_text = 'H$_\\mathrm{c}$ [Oe]'
xlabel_csv = 'Hc [Oe]'
ylabel_text = 'H$_\\mathrm{u}$ [Oe]'
ylabel_csv = 'Hu [Oe]'
cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'
se_csv = 'se [emu/ Oe**2 / g]'
SEi_new[Zi_new == 0.0] = 0.0
SEi_new[np.isnan(SEi_new)] = 0.0
idx = (Xi_new >= xmin) & (Xi_new <= xmax) & (Yi_new >= ymin) & (Yi_new <=
ymax)
cmap, vmin, vmax = FORCinel_colormap(Zi_new[idx])
Zi_trunc = np.copy(Zi_new)
Zi_trunc[np.isnan(Zi_trunc)] = 0.0
Zi_trunc[Zi_trunc < vmin] = vmin
vmini = vmin * (1 - colormin)
vmaxi = vmax * colormax
idx = (Zi_trunc >= vmini) & (Zi_trunc <= vmaxi)
cmap, vmin, vmax = FORCinel_colormap(Zi_trunc[idx])
CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap=cmap, vmin=vmin,
vmax=vmax)
if (contour > 0) & (contour < level):
CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',
linewidths=contourpts)
ax.set_xlabel(xlabel_text, fontsize=14)
ax.set_ylabel(ylabel_text, fontsize=14)
xlimits = np.sort((xmin, xmax))
ax.set_xlim(xlimits)
ylimits = np.sort((ymin, ymax))
ax.set_ylim(ylimits)
ax.tick_params(labelsize=14)
ax.set_aspect('equal')
ax.minorticks_on()
if colorbar == True:
cbar = fig.colorbar(CS, fraction=0.04, pad=0.08, format='%.2e')
cbar.ax.tick_params(labelsize=14)
cbar.set_label(cbar_text, fontsize=14)
if download == True:
outputfile = fn.value + '_FORC.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches='tight')
ar = np.column_stack((np.reshape(Xi_new, (-1, 1)), np.reshape(
Yi_new, (-1, 1)), np.reshape(Zi_trunc, (-1, 1)), np.reshape(SEi,
(-1, 1))))
outputfile = fn.value + '_XYZ.csv'
with open(outputfile, 'w') as fp:
fp.write(xlabel_csv + ',' + ylabel_csv + ',' + se_csv + ',' +
se_csv + '\n')
np.savetxt(fp, ar, '%s', ',')
plt.show()
def FORCinel_colormap(Z):
cdict = {'red': ((0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 /
255), (0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255
), (0.3193, 102 / 255, 102 / 255), (0.563, 204 / 255, 204 / 255), (
0.6975, 204 / 255, 204 / 255), (0.8319, 153 / 255, 153 / 255), (
0.9748, 76 / 255, 76 / 255), (1.0, 76 / 255, 76 / 255)), 'green': (
(0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 / 255), (
0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (
0.3193, 178 / 255, 178 / 255), (0.563, 204 / 255, 204 / 255), (
0.6975, 76 / 255, 76 / 255), (0.8319, 102 / 255, 102 / 255), (
0.9748, 25 / 255, 25 / 255), (1.0, 25 / 255, 25 / 255)), 'blue': ((
0.0, 255 / 255, 255 / 255), (0.1387, 255 / 255, 255 / 255), (0.1597,
255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (0.3193, 102 /
255, 102 / 255), (0.563, 76 / 255, 76 / 255), (0.6975, 76 / 255, 76 /
255), (0.8319, 153 / 255, 153 / 255), (0.9748, 76 / 255, 76 / 255),
(1.0, 76 / 255, 76 / 255))}
if np.abs(np.min(Z)) <= np.max(Z) * 0.19:
vmin = -np.max(Z) * 0.19
vmax = np.max(Z)
else:
vmin = np.min(Z)
vmax = np.max(Z)
anchors = np.zeros(10)
anchors[1] = (-0.025 * vmax - vmin) / (vmax - vmin)
anchors[2] = (-0.005 * vmax - vmin) / (vmax - vmin)
anchors[3] = (0.025 * vmax - vmin) / (vmax - vmin)
anchors[4] = (0.19 * vmax - vmin) / (vmax - vmin)
anchors[5] = (0.48 * vmax - vmin) / (vmax - vmin)
anchors[6] = (0.64 * vmax - vmin) / (vmax - vmin)
anchors[7] = (0.8 * vmax - vmin) / (vmax - vmin)
anchors[8] = (0.97 * vmax - vmin) / (vmax - vmin)
anchors[9] = 1.0
Rlst = list(cdict['red'])
Glst = list(cdict['green'])
Blst = list(cdict['blue'])
for i in range(9):
Rlst[i] = tuple((anchors[i], Rlst[i][1], Rlst[i][2]))
Glst[i] = tuple((anchors[i], Glst[i][1], Glst[i][2]))
Blst[i] = tuple((anchors[i], Blst[i][1], Blst[i][2]))
cdict['red'] = tuple(Rlst)
cdict['green'] = tuple(Glst)
cdict['blue'] = tuple(Blst)
cmap = LinearSegmentedColormap('forc_cmap', cdict)
return cmap, vmin, vmax
def profile_options(X):
Hb1 = X['Hb1'] - X['Hc2']
Hb2 = X['Hb2']
Hc1 = np.maximum(X['Hc1'], 0)
Hc2 = X['Hc2']
style = {'description_width': 'initial'}
HL = widgets.HTML(value=
'<hr style="height:3px;border:none;color:#333;background-color:#333;" />'
)
P_title = widgets.HTML(value='<h3>Select profile type:</h3>')
P_widge = widgets.RadioButtons(options=[('Horizontal profile', 0), (
'Vertical profile', 1)], value=0, style=style)
H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')
if X['unit'] == 'SI':
x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=
0.001, description='B$_u$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=
10, description='H$_u$ [Oe]', disabled=False, continuous_update
=False, orientation='horizontal', readout=True, readout_format=
'.3f', layout={'width': '350px'}, style=style)
if X['unit'] == 'SI':
x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,
max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,
max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')
if X['unit'] == 'SI':
y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,
max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,
max=Hc2, step=10, description='H$_c$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
if X['unit'] == 'SI':
y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,
max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,
max=Hb2, step=10, description='H$_u$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
profile_widge = VBox([P_title, P_widge, HL, H_title, x_Hb_widge,
x_Hc_widge, HL, V_title, y_Hc_widge, y_Hb_widge])
profile_nest = widgets.Tab()
profile_nest.children = [profile_widge]
profile_nest.set_title(0, 'PLOT PROFILES')
display(profile_nest)
X['P_widge'] = P_widge
X['x_Hb_widge'] = x_Hb_widge
X['x_Hc_widge'] = x_Hc_widge
X['y_Hc_widge'] = y_Hc_widge
X['y_Hb_widge'] = y_Hb_widge
return X
def profile_plot(X):
if X['P_widge'].value == 0:
X = x_profile(X, X['x_Hc_widge'].value, X['x_Hb_widge'].value)
else:
X = y_profile(X, X['y_Hc_widge'].value, X['y_Hb_widge'].value)
return X
def x_profile(X, Hc, Hb):
Hc1, Hc2 = Hc[0], Hc[1]
dH = X['dH']
NH = int(np.sqrt((Hc2 - Hc1) ** 2) / dH)
Hc0 = np.linspace(Hc1, Hc2, NH)
Hb0 = np.linspace(Hb, Hb, NH)
rho_int = X['Zint'](Hc0, Hb0)
coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))
CI_int = X['SEint'](Hc0, Hb0) * coef
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 1, 1)
if X['mass'].value > 0.0:
if X['unit'] == 'SI':
ax1.plot(Hc0, rho_int / (X['mass'].value / 1000.0), color='k')
ax1.fill_between(Hc0, (rho_int - CI_int) / (X['mass'].value /
1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),
color='lightgrey')
ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.plot(Hc0, rho_int / X['mass'].value, color='k')
ax1.fill_between(Hc0, (rho_int - CI_int) / X['mass'].value, (
rho_int + CI_int) / X['mass'].value, color='lightgrey')
ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)
else:
ax1.plot(Hc0, rho_int, color='k')
ax1.fill_between(Hc0, rho_int - CI_int, rho_int + CI_int, color=
'lightgrey')
if X['unit'] == 'SI':
ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)
ax1.tick_params(axis='both', which='major', direction='out', length=5,
width=1, color='k', labelsize='14')
ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,
width=1, color='k')
if X['unit'] == 'SI':
ax1.set_xlabel('B$_\\mathrm{c}$ [T]', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_xlabel('H$_\\mathrm{c}$ [Oe]', fontsize=14)
ax1.minorticks_on()
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
outputfile = X['sample'].value + '_Hc_PROFILE.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches='tight')
plt.show
return X
def y_profile(X, Hc, Hb):
Hb1, Hb2 = Hb[0], Hb[1]
dH = X['dH']
NH = int(np.sqrt((Hb2 - Hb1) ** 2) / dH)
Hc0 = np.linspace(Hc, Hc, NH)
Hb0 = np.linspace(Hb1, Hb2, NH)
rho_int = X['Zint'](Hc0, Hb0)
coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))
CI_int = X['SEint'](Hc0, Hb0) * coef
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 1, 1)
if X['mass'].value > 0.0:
if X['unit'] == 'SI':
ax1.plot(Hb0, rho_int / (X['mass'].value / 1000.0), color='k')
ax1.fill_between(Hb0, (rho_int - CI_int) / (X['mass'].value /
1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),
color='lightgrey')
ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.plot(Hb0, rho_int / X['mass'].value, color='k')
ax1.fill_between(Hb0, (rho_int - CI_int) / X['mass'].value, (
rho_int + CI_int) / X['mass'].value, color='lightgrey')
ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)
else:
ax1.plot(Hb0, rho_int, color='k')
ax1.fill_between(Hb0, rho_int - CI_int, rho_int + CI_int, color=
'lightgrey')
if X['unit'] == 'SI':
ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)
ax1.tick_params(axis='both', which='major', direction='out', length=5,
width=1, color='k', labelsize='14')
ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,
width=1, color='k')
if X['unit'] == 'SI':
ax1.set_xlabel('B$_\\mathrm{u}$ [T]', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_xlabel('H$_\\mathrm{u}$ [Oe]', fontsize=14)
ax1.minorticks_on()
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
outputfile = X['sample'].value + '_Hu_PROFILE.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches='tight')
plt.show
return X
<|reserved_special_token_1|>
<|reserved_special_token_0|>
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
def forc(X):
Xi = X['Xi']
Yi = X['Yi']
Zi = X['Zi']
SEi = X['SEi']
Pi = X['Pi']
Hc1 = X['Hc1']
Hc2 = X['Hc2']
Hb1 = X['Hb1']
Hb2 = X['Hb2']
style = {'description_width': 'initial'}
colorbar_widge = widgets.Checkbox(value=False, description=
'Show final FORC plot', style=style)
pval_widge = widgets.Checkbox(value=False, description=
'Show 0.05 significance contour', style=style)
colormin_widge = widgets.FloatSlider(value=0.0, min=0.0, max=0.999,
step=0.001, description='Rescale colormap minimum', disabled=False,
continuous_update=False, orientation='horizontal', readout=False,
readout_format='.2f', style=style)
colormax_widge = widgets.FloatSlider(value=1.0, min=0.001, max=1, step=
0.001, description='Rescale colormap maximum', disabled=False,
continuous_update=False, orientation='horizontal', readout=False,
readout_format='.2f', style=style)
contour_widge = widgets.Select(options=[['Select contour frequency', -1
], ['Every level', 1], ['Every 2nd level', 2], ['Every 3rd level',
3], ['Every 4th level', 4], ['Every 5th level', 5], [
'Every 10th level', 10], ['Every 20th level', 20], [
'Every 50th level', 50]], value=-1, rows=1, description=
'Plot contours', style=style)
contourpts_widge = widgets.FloatSlider(value=1.0, min=0.5, max=3.0,
step=0.5, description='Contour line width [pts]', style=style)
download_widge = widgets.Checkbox(value=False, description=
'Download plot', style=style)
level_widge = widgets.Select(options=[['20', 20], ['30', 30], ['50', 50
], ['75', 75], ['100', 100], ['200', 200], ['500', 500]], value=100,
rows=1, description='Number of color levels', style=style)
if X['unit'] == 'SI':
xmin_widge = widgets.FloatText(value=0, description=
'Minimum B$_\\mathrm{c}$ [Oe]', style=style, step=10)
xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,
description='Maximum B$_\\mathrm{c}$ [Oe]', style=style, step=10)
ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /
1000, description='Minimum B$_\\mathrm{u}$ [Oe]', style=style,
step=10)
ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,
description='Maximum B$_\\mathrm{u}$ [Oe]', style=style, step=10)
elif X['unit'] == 'cgs':
xmin_widge = widgets.FloatText(value=0, description=
'Minimum H$_\\mathrm{c}$ [Oe]', style=style, step=10)
xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,
description='Maximum H$_\\mathrm{c}$ [Oe]', style=style, step=10)
ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /
1000, description='Minimum H$_\\mathrm{u}$ [Oe]', style=style,
step=10)
ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,
description='Maximum H$_\\mathrm{u}$ [Oe]', style=style, step=10)
x = interactive(forcplot, Xi=fixed(Xi), Yi=fixed(Yi), Zi=fixed(Zi), SEi
=fixed(SEi), Pi=fixed(Pi), fn=fixed(X['sample']), mass=fixed(X[
'mass']), unit=fixed(X['unit']), colorbar=colorbar_widge, level=
level_widge, contour=contour_widge, contourpts=contourpts_widge,
xmin=xmin_widge, xmax=xmax_widge, ymin=ymin_widge, ymax=ymax_widge,
colormin=colormin_widge, colormax=colormax_widge, download=
download_widge)
tab_nest = widgets.Tab()
tab_nest.set_title(0, 'FORC PLOTTING')
tab_nest.children = [VBox(children=x.children)]
display(tab_nest)
def forcplot(Xi, Yi, Zi, SEi, Pi, fn, mass, unit, colorbar, level, contour,
contourpts, xmin, xmax, ymin, ymax, colormin, colormax, download):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
if mass.value < 0.0:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi
SEi_new = SEi
Pi_new = Pi
SEi_new[Zi_new == 0.0] = 0.0
SEi_new[np.isnan(SEi_new)] = 0.0
if unit == 'SI':
xlabel_text = 'B$_\\mathrm{c}$ [T]'
xlabel_csv = 'Bc [T]'
ylabel_text = 'B$_\\mathrm{u}$ [T]'
ylabel_csv = 'Bu [T]'
cbar_text = 'Am$^2$ T$^{-2}$'
se_csv = 'rho [Am**2 / T**2]'
elif unit == 'cgs':
xlabel_text = 'H$_\\mathrm{c}$ [Oe]'
xlabel_csv = 'Hc [Oe]'
ylabel_text = 'H$_\\mathrm{u}$ [Oe]'
ylabel_csv = 'Hu [Oe]'
cbar_text = 'emu$ Oe$^{-2}$'
se_csv = 'rho [emu / Oe**2]'
else:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi / (mass.value / 1000.0)
SEi_new = SEi / (mass.value / 1000.0)
SEi_new[Zi_new == 0.0] = 0.0
SEi_new[np.isnan(SEi_new)] = 0.0
Pi_new = Pi
if unit == 'SI':
Zi_new = Zi / (mass.value / 1000.0)
SEi_new = SEi / (mass.value / 1000.0)
xlabel_text = 'B$_\\mathrm{c}$ [T]'
xlabel_csv = 'Bc [T]'
ylabel_text = 'B$_\\mathrm{u}$ [T]'
ylabel_csv = 'Bu [T]'
cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'
se_csv = 'se [Am**2 / T**2 / kg]'
elif unit == 'cgs':
Zi_new = Zi / mass.value
SEi_new = SEi / mass.value
xlabel_text = 'H$_\\mathrm{c}$ [Oe]'
xlabel_csv = 'Hc [Oe]'
ylabel_text = 'H$_\\mathrm{u}$ [Oe]'
ylabel_csv = 'Hu [Oe]'
cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'
se_csv = 'se [emu/ Oe**2 / g]'
SEi_new[Zi_new == 0.0] = 0.0
SEi_new[np.isnan(SEi_new)] = 0.0
idx = (Xi_new >= xmin) & (Xi_new <= xmax) & (Yi_new >= ymin) & (Yi_new <=
ymax)
cmap, vmin, vmax = FORCinel_colormap(Zi_new[idx])
Zi_trunc = np.copy(Zi_new)
Zi_trunc[np.isnan(Zi_trunc)] = 0.0
Zi_trunc[Zi_trunc < vmin] = vmin
vmini = vmin * (1 - colormin)
vmaxi = vmax * colormax
idx = (Zi_trunc >= vmini) & (Zi_trunc <= vmaxi)
cmap, vmin, vmax = FORCinel_colormap(Zi_trunc[idx])
CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap=cmap, vmin=vmin,
vmax=vmax)
if (contour > 0) & (contour < level):
CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',
linewidths=contourpts)
ax.set_xlabel(xlabel_text, fontsize=14)
ax.set_ylabel(ylabel_text, fontsize=14)
xlimits = np.sort((xmin, xmax))
ax.set_xlim(xlimits)
ylimits = np.sort((ymin, ymax))
ax.set_ylim(ylimits)
ax.tick_params(labelsize=14)
ax.set_aspect('equal')
ax.minorticks_on()
if colorbar == True:
cbar = fig.colorbar(CS, fraction=0.04, pad=0.08, format='%.2e')
cbar.ax.tick_params(labelsize=14)
cbar.set_label(cbar_text, fontsize=14)
if download == True:
outputfile = fn.value + '_FORC.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches='tight')
ar = np.column_stack((np.reshape(Xi_new, (-1, 1)), np.reshape(
Yi_new, (-1, 1)), np.reshape(Zi_trunc, (-1, 1)), np.reshape(SEi,
(-1, 1))))
outputfile = fn.value + '_XYZ.csv'
with open(outputfile, 'w') as fp:
fp.write(xlabel_csv + ',' + ylabel_csv + ',' + se_csv + ',' +
se_csv + '\n')
np.savetxt(fp, ar, '%s', ',')
plt.show()
def FORCinel_colormap(Z):
cdict = {'red': ((0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 /
255), (0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255
), (0.3193, 102 / 255, 102 / 255), (0.563, 204 / 255, 204 / 255), (
0.6975, 204 / 255, 204 / 255), (0.8319, 153 / 255, 153 / 255), (
0.9748, 76 / 255, 76 / 255), (1.0, 76 / 255, 76 / 255)), 'green': (
(0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 / 255), (
0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (
0.3193, 178 / 255, 178 / 255), (0.563, 204 / 255, 204 / 255), (
0.6975, 76 / 255, 76 / 255), (0.8319, 102 / 255, 102 / 255), (
0.9748, 25 / 255, 25 / 255), (1.0, 25 / 255, 25 / 255)), 'blue': ((
0.0, 255 / 255, 255 / 255), (0.1387, 255 / 255, 255 / 255), (0.1597,
255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (0.3193, 102 /
255, 102 / 255), (0.563, 76 / 255, 76 / 255), (0.6975, 76 / 255, 76 /
255), (0.8319, 153 / 255, 153 / 255), (0.9748, 76 / 255, 76 / 255),
(1.0, 76 / 255, 76 / 255))}
if np.abs(np.min(Z)) <= np.max(Z) * 0.19:
vmin = -np.max(Z) * 0.19
vmax = np.max(Z)
else:
vmin = np.min(Z)
vmax = np.max(Z)
anchors = np.zeros(10)
anchors[1] = (-0.025 * vmax - vmin) / (vmax - vmin)
anchors[2] = (-0.005 * vmax - vmin) / (vmax - vmin)
anchors[3] = (0.025 * vmax - vmin) / (vmax - vmin)
anchors[4] = (0.19 * vmax - vmin) / (vmax - vmin)
anchors[5] = (0.48 * vmax - vmin) / (vmax - vmin)
anchors[6] = (0.64 * vmax - vmin) / (vmax - vmin)
anchors[7] = (0.8 * vmax - vmin) / (vmax - vmin)
anchors[8] = (0.97 * vmax - vmin) / (vmax - vmin)
anchors[9] = 1.0
Rlst = list(cdict['red'])
Glst = list(cdict['green'])
Blst = list(cdict['blue'])
for i in range(9):
Rlst[i] = tuple((anchors[i], Rlst[i][1], Rlst[i][2]))
Glst[i] = tuple((anchors[i], Glst[i][1], Glst[i][2]))
Blst[i] = tuple((anchors[i], Blst[i][1], Blst[i][2]))
cdict['red'] = tuple(Rlst)
cdict['green'] = tuple(Glst)
cdict['blue'] = tuple(Blst)
cmap = LinearSegmentedColormap('forc_cmap', cdict)
return cmap, vmin, vmax
def profile_options(X):
Hb1 = X['Hb1'] - X['Hc2']
Hb2 = X['Hb2']
Hc1 = np.maximum(X['Hc1'], 0)
Hc2 = X['Hc2']
style = {'description_width': 'initial'}
HL = widgets.HTML(value=
'<hr style="height:3px;border:none;color:#333;background-color:#333;" />'
)
P_title = widgets.HTML(value='<h3>Select profile type:</h3>')
P_widge = widgets.RadioButtons(options=[('Horizontal profile', 0), (
'Vertical profile', 1)], value=0, style=style)
H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')
if X['unit'] == 'SI':
x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=
0.001, description='B$_u$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=
10, description='H$_u$ [Oe]', disabled=False, continuous_update
=False, orientation='horizontal', readout=True, readout_format=
'.3f', layout={'width': '350px'}, style=style)
if X['unit'] == 'SI':
x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,
max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,
max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')
if X['unit'] == 'SI':
y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,
max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,
max=Hc2, step=10, description='H$_c$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
if X['unit'] == 'SI':
y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,
max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,
max=Hb2, step=10, description='H$_u$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
profile_widge = VBox([P_title, P_widge, HL, H_title, x_Hb_widge,
x_Hc_widge, HL, V_title, y_Hc_widge, y_Hb_widge])
profile_nest = widgets.Tab()
profile_nest.children = [profile_widge]
profile_nest.set_title(0, 'PLOT PROFILES')
display(profile_nest)
X['P_widge'] = P_widge
X['x_Hb_widge'] = x_Hb_widge
X['x_Hc_widge'] = x_Hc_widge
X['y_Hc_widge'] = y_Hc_widge
X['y_Hb_widge'] = y_Hb_widge
return X
def profile_plot(X):
if X['P_widge'].value == 0:
X = x_profile(X, X['x_Hc_widge'].value, X['x_Hb_widge'].value)
else:
X = y_profile(X, X['y_Hc_widge'].value, X['y_Hb_widge'].value)
return X
def x_profile(X, Hc, Hb):
Hc1, Hc2 = Hc[0], Hc[1]
dH = X['dH']
NH = int(np.sqrt((Hc2 - Hc1) ** 2) / dH)
Hc0 = np.linspace(Hc1, Hc2, NH)
Hb0 = np.linspace(Hb, Hb, NH)
rho_int = X['Zint'](Hc0, Hb0)
coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))
CI_int = X['SEint'](Hc0, Hb0) * coef
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 1, 1)
if X['mass'].value > 0.0:
if X['unit'] == 'SI':
ax1.plot(Hc0, rho_int / (X['mass'].value / 1000.0), color='k')
ax1.fill_between(Hc0, (rho_int - CI_int) / (X['mass'].value /
1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),
color='lightgrey')
ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.plot(Hc0, rho_int / X['mass'].value, color='k')
ax1.fill_between(Hc0, (rho_int - CI_int) / X['mass'].value, (
rho_int + CI_int) / X['mass'].value, color='lightgrey')
ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)
else:
ax1.plot(Hc0, rho_int, color='k')
ax1.fill_between(Hc0, rho_int - CI_int, rho_int + CI_int, color=
'lightgrey')
if X['unit'] == 'SI':
ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)
ax1.tick_params(axis='both', which='major', direction='out', length=5,
width=1, color='k', labelsize='14')
ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,
width=1, color='k')
if X['unit'] == 'SI':
ax1.set_xlabel('B$_\\mathrm{c}$ [T]', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_xlabel('H$_\\mathrm{c}$ [Oe]', fontsize=14)
ax1.minorticks_on()
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
outputfile = X['sample'].value + '_Hc_PROFILE.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches='tight')
plt.show
return X
def y_profile(X, Hc, Hb):
Hb1, Hb2 = Hb[0], Hb[1]
dH = X['dH']
NH = int(np.sqrt((Hb2 - Hb1) ** 2) / dH)
Hc0 = np.linspace(Hc, Hc, NH)
Hb0 = np.linspace(Hb1, Hb2, NH)
rho_int = X['Zint'](Hc0, Hb0)
coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))
CI_int = X['SEint'](Hc0, Hb0) * coef
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 1, 1)
if X['mass'].value > 0.0:
if X['unit'] == 'SI':
ax1.plot(Hb0, rho_int / (X['mass'].value / 1000.0), color='k')
ax1.fill_between(Hb0, (rho_int - CI_int) / (X['mass'].value /
1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),
color='lightgrey')
ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.plot(Hb0, rho_int / X['mass'].value, color='k')
ax1.fill_between(Hb0, (rho_int - CI_int) / X['mass'].value, (
rho_int + CI_int) / X['mass'].value, color='lightgrey')
ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)
else:
ax1.plot(Hb0, rho_int, color='k')
ax1.fill_between(Hb0, rho_int - CI_int, rho_int + CI_int, color=
'lightgrey')
if X['unit'] == 'SI':
ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)
ax1.tick_params(axis='both', which='major', direction='out', length=5,
width=1, color='k', labelsize='14')
ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,
width=1, color='k')
if X['unit'] == 'SI':
ax1.set_xlabel('B$_\\mathrm{u}$ [T]', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_xlabel('H$_\\mathrm{u}$ [Oe]', fontsize=14)
ax1.minorticks_on()
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
outputfile = X['sample'].value + '_Hu_PROFILE.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches='tight')
plt.show
return X
<|reserved_special_token_1|>
import numpy as np
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.tri as tri
import matplotlib.colors as colors
from matplotlib.colors import LinearSegmentedColormap
import scipy.stats as sps
import matplotlib.ticker as mtick
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
def forc(X):
Xi = X['Xi']
Yi = X['Yi']
Zi = X['Zi']
SEi = X['SEi']
Pi = X['Pi']
Hc1 = X['Hc1']
Hc2 = X['Hc2']
Hb1 = X['Hb1']
Hb2 = X['Hb2']
style = {'description_width': 'initial'}
colorbar_widge = widgets.Checkbox(value=False, description=
'Show final FORC plot', style=style)
pval_widge = widgets.Checkbox(value=False, description=
'Show 0.05 significance contour', style=style)
colormin_widge = widgets.FloatSlider(value=0.0, min=0.0, max=0.999,
step=0.001, description='Rescale colormap minimum', disabled=False,
continuous_update=False, orientation='horizontal', readout=False,
readout_format='.2f', style=style)
colormax_widge = widgets.FloatSlider(value=1.0, min=0.001, max=1, step=
0.001, description='Rescale colormap maximum', disabled=False,
continuous_update=False, orientation='horizontal', readout=False,
readout_format='.2f', style=style)
contour_widge = widgets.Select(options=[['Select contour frequency', -1
], ['Every level', 1], ['Every 2nd level', 2], ['Every 3rd level',
3], ['Every 4th level', 4], ['Every 5th level', 5], [
'Every 10th level', 10], ['Every 20th level', 20], [
'Every 50th level', 50]], value=-1, rows=1, description=
'Plot contours', style=style)
contourpts_widge = widgets.FloatSlider(value=1.0, min=0.5, max=3.0,
step=0.5, description='Contour line width [pts]', style=style)
download_widge = widgets.Checkbox(value=False, description=
'Download plot', style=style)
level_widge = widgets.Select(options=[['20', 20], ['30', 30], ['50', 50
], ['75', 75], ['100', 100], ['200', 200], ['500', 500]], value=100,
rows=1, description='Number of color levels', style=style)
if X['unit'] == 'SI':
xmin_widge = widgets.FloatText(value=0, description=
'Minimum B$_\\mathrm{c}$ [Oe]', style=style, step=10)
xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,
description='Maximum B$_\\mathrm{c}$ [Oe]', style=style, step=10)
ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /
1000, description='Minimum B$_\\mathrm{u}$ [Oe]', style=style,
step=10)
ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,
description='Maximum B$_\\mathrm{u}$ [Oe]', style=style, step=10)
elif X['unit'] == 'cgs':
xmin_widge = widgets.FloatText(value=0, description=
'Minimum H$_\\mathrm{c}$ [Oe]', style=style, step=10)
xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,
description='Maximum H$_\\mathrm{c}$ [Oe]', style=style, step=10)
ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /
1000, description='Minimum H$_\\mathrm{u}$ [Oe]', style=style,
step=10)
ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,
description='Maximum H$_\\mathrm{u}$ [Oe]', style=style, step=10)
x = interactive(forcplot, Xi=fixed(Xi), Yi=fixed(Yi), Zi=fixed(Zi), SEi
=fixed(SEi), Pi=fixed(Pi), fn=fixed(X['sample']), mass=fixed(X[
'mass']), unit=fixed(X['unit']), colorbar=colorbar_widge, level=
level_widge, contour=contour_widge, contourpts=contourpts_widge,
xmin=xmin_widge, xmax=xmax_widge, ymin=ymin_widge, ymax=ymax_widge,
colormin=colormin_widge, colormax=colormax_widge, download=
download_widge)
tab_nest = widgets.Tab()
tab_nest.set_title(0, 'FORC PLOTTING')
tab_nest.children = [VBox(children=x.children)]
display(tab_nest)
def forcplot(Xi, Yi, Zi, SEi, Pi, fn, mass, unit, colorbar, level, contour,
contourpts, xmin, xmax, ymin, ymax, colormin, colormax, download):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
if mass.value < 0.0:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi
SEi_new = SEi
Pi_new = Pi
SEi_new[Zi_new == 0.0] = 0.0
SEi_new[np.isnan(SEi_new)] = 0.0
if unit == 'SI':
xlabel_text = 'B$_\\mathrm{c}$ [T]'
xlabel_csv = 'Bc [T]'
ylabel_text = 'B$_\\mathrm{u}$ [T]'
ylabel_csv = 'Bu [T]'
cbar_text = 'Am$^2$ T$^{-2}$'
se_csv = 'rho [Am**2 / T**2]'
elif unit == 'cgs':
xlabel_text = 'H$_\\mathrm{c}$ [Oe]'
xlabel_csv = 'Hc [Oe]'
ylabel_text = 'H$_\\mathrm{u}$ [Oe]'
ylabel_csv = 'Hu [Oe]'
cbar_text = 'emu$ Oe$^{-2}$'
se_csv = 'rho [emu / Oe**2]'
else:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi / (mass.value / 1000.0)
SEi_new = SEi / (mass.value / 1000.0)
SEi_new[Zi_new == 0.0] = 0.0
SEi_new[np.isnan(SEi_new)] = 0.0
Pi_new = Pi
if unit == 'SI':
Zi_new = Zi / (mass.value / 1000.0)
SEi_new = SEi / (mass.value / 1000.0)
xlabel_text = 'B$_\\mathrm{c}$ [T]'
xlabel_csv = 'Bc [T]'
ylabel_text = 'B$_\\mathrm{u}$ [T]'
ylabel_csv = 'Bu [T]'
cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'
se_csv = 'se [Am**2 / T**2 / kg]'
elif unit == 'cgs':
Zi_new = Zi / mass.value
SEi_new = SEi / mass.value
xlabel_text = 'H$_\\mathrm{c}$ [Oe]'
xlabel_csv = 'Hc [Oe]'
ylabel_text = 'H$_\\mathrm{u}$ [Oe]'
ylabel_csv = 'Hu [Oe]'
cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'
se_csv = 'se [emu/ Oe**2 / g]'
SEi_new[Zi_new == 0.0] = 0.0
SEi_new[np.isnan(SEi_new)] = 0.0
idx = (Xi_new >= xmin) & (Xi_new <= xmax) & (Yi_new >= ymin) & (Yi_new <=
ymax)
cmap, vmin, vmax = FORCinel_colormap(Zi_new[idx])
Zi_trunc = np.copy(Zi_new)
Zi_trunc[np.isnan(Zi_trunc)] = 0.0
Zi_trunc[Zi_trunc < vmin] = vmin
vmini = vmin * (1 - colormin)
vmaxi = vmax * colormax
idx = (Zi_trunc >= vmini) & (Zi_trunc <= vmaxi)
cmap, vmin, vmax = FORCinel_colormap(Zi_trunc[idx])
CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap=cmap, vmin=vmin,
vmax=vmax)
if (contour > 0) & (contour < level):
CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',
linewidths=contourpts)
ax.set_xlabel(xlabel_text, fontsize=14)
ax.set_ylabel(ylabel_text, fontsize=14)
xlimits = np.sort((xmin, xmax))
ax.set_xlim(xlimits)
ylimits = np.sort((ymin, ymax))
ax.set_ylim(ylimits)
ax.tick_params(labelsize=14)
ax.set_aspect('equal')
ax.minorticks_on()
if colorbar == True:
cbar = fig.colorbar(CS, fraction=0.04, pad=0.08, format='%.2e')
cbar.ax.tick_params(labelsize=14)
cbar.set_label(cbar_text, fontsize=14)
if download == True:
outputfile = fn.value + '_FORC.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches='tight')
ar = np.column_stack((np.reshape(Xi_new, (-1, 1)), np.reshape(
Yi_new, (-1, 1)), np.reshape(Zi_trunc, (-1, 1)), np.reshape(SEi,
(-1, 1))))
outputfile = fn.value + '_XYZ.csv'
with open(outputfile, 'w') as fp:
fp.write(xlabel_csv + ',' + ylabel_csv + ',' + se_csv + ',' +
se_csv + '\n')
np.savetxt(fp, ar, '%s', ',')
plt.show()
def FORCinel_colormap(Z):
cdict = {'red': ((0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 /
255), (0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255
), (0.3193, 102 / 255, 102 / 255), (0.563, 204 / 255, 204 / 255), (
0.6975, 204 / 255, 204 / 255), (0.8319, 153 / 255, 153 / 255), (
0.9748, 76 / 255, 76 / 255), (1.0, 76 / 255, 76 / 255)), 'green': (
(0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 / 255), (
0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (
0.3193, 178 / 255, 178 / 255), (0.563, 204 / 255, 204 / 255), (
0.6975, 76 / 255, 76 / 255), (0.8319, 102 / 255, 102 / 255), (
0.9748, 25 / 255, 25 / 255), (1.0, 25 / 255, 25 / 255)), 'blue': ((
0.0, 255 / 255, 255 / 255), (0.1387, 255 / 255, 255 / 255), (0.1597,
255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (0.3193, 102 /
255, 102 / 255), (0.563, 76 / 255, 76 / 255), (0.6975, 76 / 255, 76 /
255), (0.8319, 153 / 255, 153 / 255), (0.9748, 76 / 255, 76 / 255),
(1.0, 76 / 255, 76 / 255))}
if np.abs(np.min(Z)) <= np.max(Z) * 0.19:
vmin = -np.max(Z) * 0.19
vmax = np.max(Z)
else:
vmin = np.min(Z)
vmax = np.max(Z)
anchors = np.zeros(10)
anchors[1] = (-0.025 * vmax - vmin) / (vmax - vmin)
anchors[2] = (-0.005 * vmax - vmin) / (vmax - vmin)
anchors[3] = (0.025 * vmax - vmin) / (vmax - vmin)
anchors[4] = (0.19 * vmax - vmin) / (vmax - vmin)
anchors[5] = (0.48 * vmax - vmin) / (vmax - vmin)
anchors[6] = (0.64 * vmax - vmin) / (vmax - vmin)
anchors[7] = (0.8 * vmax - vmin) / (vmax - vmin)
anchors[8] = (0.97 * vmax - vmin) / (vmax - vmin)
anchors[9] = 1.0
Rlst = list(cdict['red'])
Glst = list(cdict['green'])
Blst = list(cdict['blue'])
for i in range(9):
Rlst[i] = tuple((anchors[i], Rlst[i][1], Rlst[i][2]))
Glst[i] = tuple((anchors[i], Glst[i][1], Glst[i][2]))
Blst[i] = tuple((anchors[i], Blst[i][1], Blst[i][2]))
cdict['red'] = tuple(Rlst)
cdict['green'] = tuple(Glst)
cdict['blue'] = tuple(Blst)
cmap = LinearSegmentedColormap('forc_cmap', cdict)
return cmap, vmin, vmax
def profile_options(X):
Hb1 = X['Hb1'] - X['Hc2']
Hb2 = X['Hb2']
Hc1 = np.maximum(X['Hc1'], 0)
Hc2 = X['Hc2']
style = {'description_width': 'initial'}
HL = widgets.HTML(value=
'<hr style="height:3px;border:none;color:#333;background-color:#333;" />'
)
P_title = widgets.HTML(value='<h3>Select profile type:</h3>')
P_widge = widgets.RadioButtons(options=[('Horizontal profile', 0), (
'Vertical profile', 1)], value=0, style=style)
H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')
if X['unit'] == 'SI':
x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=
0.001, description='B$_u$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=
10, description='H$_u$ [Oe]', disabled=False, continuous_update
=False, orientation='horizontal', readout=True, readout_format=
'.3f', layout={'width': '350px'}, style=style)
if X['unit'] == 'SI':
x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,
max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,
max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')
if X['unit'] == 'SI':
y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,
max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,
max=Hc2, step=10, description='H$_c$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
if X['unit'] == 'SI':
y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,
max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
elif X['unit'] == 'cgs':
y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,
max=Hb2, step=10, description='H$_u$ [T]', disabled=False,
continuous_update=False, orientation='horizontal', readout=True,
readout_format='.3f', layout={'width': '350px'}, style=style)
profile_widge = VBox([P_title, P_widge, HL, H_title, x_Hb_widge,
x_Hc_widge, HL, V_title, y_Hc_widge, y_Hb_widge])
profile_nest = widgets.Tab()
profile_nest.children = [profile_widge]
profile_nest.set_title(0, 'PLOT PROFILES')
display(profile_nest)
X['P_widge'] = P_widge
X['x_Hb_widge'] = x_Hb_widge
X['x_Hc_widge'] = x_Hc_widge
X['y_Hc_widge'] = y_Hc_widge
X['y_Hb_widge'] = y_Hb_widge
return X
def profile_plot(X):
if X['P_widge'].value == 0:
X = x_profile(X, X['x_Hc_widge'].value, X['x_Hb_widge'].value)
else:
X = y_profile(X, X['y_Hc_widge'].value, X['y_Hb_widge'].value)
return X
def x_profile(X, Hc, Hb):
Hc1, Hc2 = Hc[0], Hc[1]
dH = X['dH']
NH = int(np.sqrt((Hc2 - Hc1) ** 2) / dH)
Hc0 = np.linspace(Hc1, Hc2, NH)
Hb0 = np.linspace(Hb, Hb, NH)
rho_int = X['Zint'](Hc0, Hb0)
coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))
CI_int = X['SEint'](Hc0, Hb0) * coef
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 1, 1)
if X['mass'].value > 0.0:
if X['unit'] == 'SI':
ax1.plot(Hc0, rho_int / (X['mass'].value / 1000.0), color='k')
ax1.fill_between(Hc0, (rho_int - CI_int) / (X['mass'].value /
1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),
color='lightgrey')
ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.plot(Hc0, rho_int / X['mass'].value, color='k')
ax1.fill_between(Hc0, (rho_int - CI_int) / X['mass'].value, (
rho_int + CI_int) / X['mass'].value, color='lightgrey')
ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)
else:
ax1.plot(Hc0, rho_int, color='k')
ax1.fill_between(Hc0, rho_int - CI_int, rho_int + CI_int, color=
'lightgrey')
if X['unit'] == 'SI':
ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)
ax1.tick_params(axis='both', which='major', direction='out', length=5,
width=1, color='k', labelsize='14')
ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,
width=1, color='k')
if X['unit'] == 'SI':
ax1.set_xlabel('B$_\\mathrm{c}$ [T]', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_xlabel('H$_\\mathrm{c}$ [Oe]', fontsize=14)
ax1.minorticks_on()
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
outputfile = X['sample'].value + '_Hc_PROFILE.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches='tight')
plt.show
return X
def y_profile(X, Hc, Hb):
Hb1, Hb2 = Hb[0], Hb[1]
dH = X['dH']
NH = int(np.sqrt((Hb2 - Hb1) ** 2) / dH)
Hc0 = np.linspace(Hc, Hc, NH)
Hb0 = np.linspace(Hb1, Hb2, NH)
rho_int = X['Zint'](Hc0, Hb0)
coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))
CI_int = X['SEint'](Hc0, Hb0) * coef
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_subplot(1, 1, 1)
if X['mass'].value > 0.0:
if X['unit'] == 'SI':
ax1.plot(Hb0, rho_int / (X['mass'].value / 1000.0), color='k')
ax1.fill_between(Hb0, (rho_int - CI_int) / (X['mass'].value /
1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),
color='lightgrey')
ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.plot(Hb0, rho_int / X['mass'].value, color='k')
ax1.fill_between(Hb0, (rho_int - CI_int) / X['mass'].value, (
rho_int + CI_int) / X['mass'].value, color='lightgrey')
ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)
else:
ax1.plot(Hb0, rho_int, color='k')
ax1.fill_between(Hb0, rho_int - CI_int, rho_int + CI_int, color=
'lightgrey')
if X['unit'] == 'SI':
ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)
ax1.tick_params(axis='both', which='major', direction='out', length=5,
width=1, color='k', labelsize='14')
ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,
width=1, color='k')
if X['unit'] == 'SI':
ax1.set_xlabel('B$_\\mathrm{u}$ [T]', fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_xlabel('H$_\\mathrm{u}$ [Oe]', fontsize=14)
ax1.minorticks_on()
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
outputfile = X['sample'].value + '_Hu_PROFILE.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches='tight')
plt.show
return X
<|reserved_special_token_1|>
import numpy as np
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.tri as tri
import matplotlib.colors as colors
from matplotlib.colors import LinearSegmentedColormap
import scipy.stats as sps
import matplotlib.ticker as mtick
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
#### FORC plotting ####
def forc(X):
#unpack data
Xi = X['Xi']
Yi = X['Yi']
Zi = X['Zi']
SEi = X['SEi']
Pi = X['Pi']
Hc1 = X['Hc1']
Hc2 = X['Hc2']
Hb1 = X['Hb1']
Hb2 = X['Hb2']
#Set up widgets for interactive plot
style = {'description_width': 'initial'} #general style settings
#DEFINE INTERACTIVE WIDGETS
#should a colorbar be included
colorbar_widge = widgets.Checkbox(value=False, description = 'Show final FORC plot',style=style)
pval_widge = widgets.Checkbox(value=False, description = 'Show 0.05 significance contour',style=style)
colormin_widge = widgets.FloatSlider(
value=0.0,
min=0.00,
max=0.999,
step=0.001,
description='Rescale colormap minimum',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=False,
readout_format='.2f',
style=style
)
colormax_widge = widgets.FloatSlider(
value=1.0,
min=0.001,
max=1,
step=0.001,
description='Rescale colormap maximum',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=False,
readout_format='.2f',
style=style
)
#Frequency for contour lines to be included in plot
contour_widge = widgets.Select(
options=[['Select contour frequency',-1],
['Every level',1],
['Every 2nd level',2],
['Every 3rd level',3],
['Every 4th level',4],
['Every 5th level',5],
['Every 10th level',10],
['Every 20th level',20],
['Every 50th level',50],
],
value=-1,
rows=1,
description='Plot contours',style=style)
contourpts_widge = widgets.FloatSlider(value=1.0,min=0.5,max=3.0,step=0.5, description = 'Contour line width [pts]',style=style)
#check box for plot download
download_widge = widgets.Checkbox(value=False, description = 'Download plot',style=style)
#How many contour levels should be included
level_widge = widgets.Select(
options=[['20',20],['30',30],['50',50],['75',75],['100',100],['200',200],['500',500]],
value=100,
rows=1,
description='Number of color levels',style=style)
#plot limit widgets
if X['unit']=='SI':
xmin_widge = widgets.FloatText(value=0,description='Minimum B$_\mathrm{c}$ [Oe]',style=style,step=10)
xmax_widge = widgets.FloatText(value=np.round(Hc2*1000)/1000,description='Maximum B$_\mathrm{c}$ [Oe]',style=style,step=10)
ymin_widge = widgets.FloatText(value=np.round((Hb1-Hc2)*1000)/1000,description='Minimum B$_\mathrm{u}$ [Oe]',style=style,step=10)
ymax_widge = widgets.FloatText(value=np.round(Hb2*1000)/1000,description='Maximum B$_\mathrm{u}$ [Oe]',style=style,step=10)
elif X['unit']=='cgs':
xmin_widge = widgets.FloatText(value=0,description='Minimum H$_\mathrm{c}$ [Oe]',style=style,step=10)
xmax_widge = widgets.FloatText(value=np.round(Hc2*1000)/1000,description='Maximum H$_\mathrm{c}$ [Oe]',style=style,step=10)
ymin_widge = widgets.FloatText(value=np.round((Hb1-Hc2)*1000)/1000,description='Minimum H$_\mathrm{u}$ [Oe]',style=style,step=10)
ymax_widge = widgets.FloatText(value=np.round(Hb2*1000)/1000,description='Maximum H$_\mathrm{u}$ [Oe]',style=style,step=10)
#launch the interactive FORC plot
x = interactive(forcplot,
Xi=fixed(Xi), #X point grid
Yi=fixed(Yi), #Y point grid
Zi=fixed(Zi), #interpolated Z values
SEi = fixed(SEi), #interpolated standard errors
Pi = fixed(Pi), #P values
fn=fixed(X['sample']), #File information
mass=fixed(X['mass']), #Preprocessing information
unit=fixed(X['unit']),
colorbar=colorbar_widge, #Include colorbar
level=level_widge, #Number of levels to plot
contour=contour_widge, #Contour levels to plot
contourpts=contourpts_widge, #Contour line width
xmin=xmin_widge, #X-minimum
xmax=xmax_widge, #X-maximum
ymin=ymin_widge, #Y-minimum
ymax=ymax_widge, #Y-maximum
colormin = colormin_widge, #adjust colormap minimum
colormax = colormax_widge, #adjust colormap minimum
download = download_widge #download plot
)
#create tabs
tab_nest = widgets.Tab()
# tab_nest.children = [tab_visualise]
tab_nest.set_title(0, 'FORC PLOTTING')
#interact function in isolation
tab_nest.children = [VBox(children = x.children)]
display(tab_nest)
#display(x) #display the interactive plot
def forcplot(Xi,Yi,Zi,SEi,Pi,fn,mass,unit,colorbar,level,contour,contourpts,xmin,xmax,ymin,ymax,colormin,colormax,download):
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(1,1,1)
if mass.value<0.0:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi
SEi_new = SEi
Pi_new = Pi
SEi_new[Zi_new==0.0]=0.0
SEi_new[np.isnan(SEi_new)]=0.0
if unit=='SI':
xlabel_text = 'B$_\mathrm{c}$ [T]' #label Hc axis [SI units]
xlabel_csv = 'Bc [T]'
ylabel_text = 'B$_\mathrm{u}$ [T]' #label Hu axis [SI units]
ylabel_csv = 'Bu [T]'
cbar_text = 'Am$^2$ T$^{-2}$'
se_csv = 'rho [Am**2 / T**2]'
elif unit=='cgs':
xlabel_text = 'H$_\mathrm{c}$ [Oe]' #label Hc axis [SI units]
xlabel_csv = 'Hc [Oe]'
ylabel_text = 'H$_\mathrm{u}$ [Oe]' #label Hu axis [SI units]
ylabel_csv = 'Hu [Oe]'
cbar_text = 'emu$ Oe$^{-2}$'
se_csv = 'rho [emu / Oe**2]'
else:
Xi_new = Xi
Yi_new = Yi
Zi_new = Zi / (mass.value/1000.0)
SEi_new = SEi / (mass.value/1000.0)
SEi_new[Zi_new==0.0]=0.0
SEi_new[np.isnan(SEi_new)]=0.0
Pi_new = Pi
if unit=='SI':
Zi_new = Zi / (mass.value/1000.0)
SEi_new = SEi / (mass.value/1000.0)
xlabel_text = 'B$_\mathrm{c}$ [T]' #label Hc axis [SI units]
xlabel_csv = 'Bc [T]'
ylabel_text = 'B$_\mathrm{u}$ [T]' #label Hu axis [SI units]
ylabel_csv = 'Bu [T]'
cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'
se_csv = 'se [Am**2 / T**2 / kg]'
elif unit=='cgs':
Zi_new = Zi / (mass.value)
SEi_new = SEi / (mass.value)
xlabel_text = 'H$_\mathrm{c}$ [Oe]' #label Hc axis [SI units]
xlabel_csv = 'Hc [Oe]'
ylabel_text = 'H$_\mathrm{u}$ [Oe]' #label Hu axis [SI units]
ylabel_csv = 'Hu [Oe]'
cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'
se_csv = 'se [emu/ Oe**2 / g]'
SEi_new[Zi_new==0.0]=0.0
SEi_new[np.isnan(SEi_new)]=0.0
#define colormaps
idx=(Xi_new>=xmin) & (Xi_new<=xmax) & (Yi_new>=ymin) & (Yi_new<=ymax) #find points currently in view
cmap,vmin,vmax = FORCinel_colormap(Zi_new[idx])
#cmap, norm = FORCinel_colormap(Zi_new[idx])
Zi_trunc = np.copy(Zi_new)
Zi_trunc[np.isnan(Zi_trunc)] = 0.0
Zi_trunc[Zi_trunc<vmin]=vmin
vmini = vmin*(1-colormin)
vmaxi = vmax*colormax
idx = (Zi_trunc>=vmini) & (Zi_trunc<=vmaxi)
cmap,vmin,vmax = FORCinel_colormap(Zi_trunc[idx])
CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap = cmap, vmin=vmin, vmax=vmax)
if (contour>0) & (contour<level):
CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',linewidths=contourpts)
#if pval==True:
# CS3 = ax.contour(Xi_new, Yi_new, Pi_new, levels=[0.05], colors=['r'])
ax.set_xlabel(xlabel_text,fontsize=14) #label Hc axis [SI units]
ax.set_ylabel(ylabel_text,fontsize=14) #label Hu axis [SI units]
# Set plot Xlimits
xlimits = np.sort((xmin,xmax))
ax.set_xlim(xlimits)
#Set plot Ylimits
ylimits = np.sort((ymin,ymax))
ax.set_ylim(ylimits)
#Set ticks and plot aspect ratio
ax.tick_params(labelsize=14)
ax.set_aspect('equal') #set 1:1 aspect ratio
ax.minorticks_on() #add minor ticks
#Add colorbar
if colorbar == True:
cbar = fig.colorbar(CS,fraction=0.04, pad=0.08,format='%.2e')
cbar.ax.tick_params(labelsize=14)
#cbar.ax.set_title(cbar_text,fontsize=14)
cbar.set_label(cbar_text,fontsize=14)
#cbar.ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
#Activate download to same folder as data file
if download==True:
outputfile = fn.value+'_FORC.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches="tight")
ar = np.column_stack((np.reshape(Xi_new,(-1,1)),np.reshape(Yi_new,(-1,1)),np.reshape(Zi_trunc,(-1,1)),np.reshape(SEi,(-1,1))))
outputfile = fn.value+'_XYZ.csv'
with open(outputfile, 'w') as fp:
fp.write(xlabel_csv+','+ylabel_csv+','+se_csv+','+se_csv + '\n')
np.savetxt(fp, ar, '%s', ',')
#show the final plot
plt.show()
def FORCinel_colormap(Z):
#setup initial colormap assuming that negative range does not require extension
cdict = {'red': ((0.0, 127/255, 127/255),
(0.1387, 255/255, 255/255),
(0.1597, 255/255, 255/255),
(0.1807, 255/255, 255/255),
(0.3193, 102/255, 102/255),
(0.563, 204/255, 204/255),
(0.6975, 204/255, 204/255),
(0.8319, 153/255, 153/255),
(0.9748, 76/255, 76/255),
(1.0, 76/255, 76/255)),
'green': ((0.0, 127/255, 127/255),
(0.1387, 255/255, 255/255),
(0.1597, 255/255, 255/255),
(0.1807, 255/255, 255/255),
(0.3193, 178/255, 178/255),
(0.563, 204/255, 204/255),
(0.6975, 76/255, 76/255),
(0.8319, 102/255, 102/255),
(0.9748, 25/255, 25/255),
(1.0, 25/255, 25/255)),
'blue': ((0.0, 255/255, 255/255),
(0.1387, 255/255, 255/255),
(0.1597, 255/255, 255/255),
(0.1807, 255/255, 255/255),
(0.3193, 102/255, 102/255),
(0.563, 76/255, 76/255),
(0.6975, 76/255, 76/255),
(0.8319, 153/255, 153/255),
(0.9748, 76/255, 76/255),
(1.0, 76/255, 76/255))}
if np.abs(np.min(Z))<=np.max(Z)*0.19: #negative extension is not required
#cmap = LinearSegmentedColormap('forc_cmap', cdict)
vmin = -np.max(Z)*0.19
vmax = np.max(Z)
else: #negative extension is required
vmin=np.min(Z)
vmax=np.max(Z)
anchors = np.zeros(10)
anchors[1]=(-0.025*vmax-vmin)/(vmax-vmin)
anchors[2]=(-0.005*vmax-vmin)/(vmax-vmin)
anchors[3]=(0.025*vmax-vmin)/(vmax-vmin)
anchors[4]=(0.19*vmax-vmin)/(vmax-vmin)
anchors[5]=(0.48*vmax-vmin)/(vmax-vmin)
anchors[6]=(0.64*vmax-vmin)/(vmax-vmin)
anchors[7]=(0.80*vmax-vmin)/(vmax-vmin)
anchors[8]=(0.97*vmax-vmin)/(vmax-vmin)
anchors[9]=1.0
Rlst = list(cdict['red'])
Glst = list(cdict['green'])
Blst = list(cdict['blue'])
for i in range(9):
Rlst[i] = tuple((anchors[i],Rlst[i][1],Rlst[i][2]))
Glst[i] = tuple((anchors[i],Glst[i][1],Glst[i][2]))
Blst[i] = tuple((anchors[i],Blst[i][1],Blst[i][2]))
cdict['red'] = tuple(Rlst)
cdict['green'] = tuple(Glst)
cdict['blue'] = tuple(Blst)
cmap = LinearSegmentedColormap('forc_cmap', cdict)
return cmap, vmin, vmax
#### Profile Plotting ####
#### Profile plotting ####
def profile_options(X):
Hb1 = X['Hb1']-X['Hc2']
Hb2 = X['Hb2']
Hc1 = np.maximum(X['Hc1'],0)
Hc2 = X['Hc2']
style = {'description_width': 'initial'} #general style settings
HL = widgets.HTML(value='<hr style="height:3px;border:none;color:#333;background-color:#333;" />')
P_title = widgets.HTML(value='<h3>Select profile type:</h3>')
P_widge = widgets.RadioButtons(options=[('Horizontal profile',0), ('Vertical profile',1)],
value=0,
style=style)
H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')
if X['unit'] == 'SI':
x_Hb_widge = widgets.FloatSlider(
value=0.0,
min=Hb1,
max=Hb2,
step=0.001,
description='B$_u$ [T]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
elif X['unit'] == 'cgs':
x_Hb_widge = widgets.FloatSlider(
value=0.0,
min=Hb1,
max=Hb2,
step=10,
description='H$_u$ [Oe]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
if X['unit'] == 'SI':
x_Hc_widge = widgets.FloatRangeSlider(
value=[Hc1,Hc2],
min=Hc1,
max=Hc2,
step=0.001,
description='B$_c$ [T]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
elif X['unit'] == 'cgs':
x_Hc_widge = widgets.FloatRangeSlider(
value=[Hc1,Hc2],
min=Hc1,
max=Hc2,
step=10,
description='H$_c$ [Oe]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')
if X['unit'] == 'SI':
y_Hc_widge = widgets.FloatSlider(
value=(Hc1+Hc2)/2.0,
min=Hc1,
max=Hc2,
step=0.001,
description='B$_c$ [T]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
elif X['unit'] == 'cgs':
y_Hc_widge = widgets.FloatSlider(
value=(Hc1+Hc2)/2.0,
min=Hc1,
max=Hc2,
step=10,
description='H$_c$ [T]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
if X['unit'] == 'SI':
y_Hb_widge = widgets.FloatRangeSlider(
value=[Hb1,Hb2],
min=Hb1,
max=Hb2,
step=0.001,
description='B$_u$ [T]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
elif X['unit'] == 'cgs':
y_Hb_widge = widgets.FloatRangeSlider(
value=[Hb1,Hb2],
min=Hb1,
max=Hb2,
step=10,
description='H$_u$ [T]',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.3f',
layout={'width': '350px'},
style = style
)
profile_widge = VBox([P_title,P_widge,HL,H_title,x_Hb_widge,x_Hc_widge, \
HL,V_title,y_Hc_widge,y_Hb_widge])
profile_nest = widgets.Tab()
profile_nest.children = [profile_widge]
profile_nest.set_title(0, 'PLOT PROFILES')
display(profile_nest)
X['P_widge'] = P_widge
X['x_Hb_widge'] = x_Hb_widge
X['x_Hc_widge'] = x_Hc_widge
X['y_Hc_widge'] = y_Hc_widge
X['y_Hb_widge'] = y_Hb_widge
return X
def profile_plot(X):
if X['P_widge'].value==0:
X = x_profile(X,X['x_Hc_widge'].value,X['x_Hb_widge'].value)
else:
X = y_profile(X,X['y_Hc_widge'].value,X['y_Hb_widge'].value)
return X
def x_profile(X,Hc,Hb):
Hc1, Hc2 = Hc[0], Hc[1]
dH = X['dH']
NH = int(np.sqrt((Hc2-Hc1)**2)/dH)
Hc0 = np.linspace(Hc1,Hc2,NH)
Hb0 = np.linspace(Hb,Hb,NH)
rho_int = X['Zint'](Hc0,Hb0)
coef = sps.norm.ppf(0.025/np.sum(rho_int.mask==False))
CI_int = X['SEint'](Hc0,Hb0)*coef
fig = plt.figure(figsize=(5,5))
ax1 = fig.add_subplot(1,1,1)
if X['mass'].value>0.0:
if X['unit'] == 'SI':
ax1.plot(Hc0,rho_int/(X['mass'].value/1000.0),color='k')
ax1.fill_between(Hc0, (rho_int-CI_int)/(X['mass'].value/1000.0), (rho_int+CI_int)/(X['mass'].value/1000.0),color='lightgrey')
ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$',fontsize=14)
elif X['unit'] == 'cgs':
ax1.plot(Hc0,rho_int/(X['mass'].value),color='k')
ax1.fill_between(Hc0, (rho_int-CI_int)/(X['mass'].value), (rho_int+CI_int)/(X['mass'].value),color='lightgrey')
ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$',fontsize=14)
else:
ax1.plot(Hc0,rho_int,color='k')
ax1.fill_between(Hc0, (rho_int-CI_int), (rho_int+CI_int),color='lightgrey')
if X['unit'] == 'SI':
ax1.set_ylabel('Am$^2$ T$^{-2}$',fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_ylabel('emu Oe$^{-2}$',fontsize=14)
ax1.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='14')
ax1.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k')
if X['unit'] == 'SI':
ax1.set_xlabel('B$_\mathrm{c}$ [T]',fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_xlabel('H$_\mathrm{c}$ [Oe]',fontsize=14)
ax1.minorticks_on()
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
outputfile = X['sample'].value+'_Hc_PROFILE.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches="tight")
plt.show
return X
def y_profile(X,Hc,Hb):
Hb1, Hb2 = Hb[0], Hb[1]
dH = X['dH']
NH = int(np.sqrt((Hb2-Hb1)**2)/dH)
Hc0 = np.linspace(Hc,Hc,NH)
Hb0 = np.linspace(Hb1,Hb2,NH)
rho_int = X['Zint'](Hc0,Hb0)
coef = sps.norm.ppf(0.025/np.sum(rho_int.mask==False))
CI_int = X['SEint'](Hc0,Hb0)*coef
fig = plt.figure(figsize=(5,5))
ax1 = fig.add_subplot(1,1,1)
if X['mass'].value>0.0:
if X['unit'] == 'SI':
ax1.plot(Hb0,rho_int/(X['mass'].value/1000.0),color='k')
ax1.fill_between(Hb0, (rho_int-CI_int)/(X['mass'].value/1000.0), (rho_int+CI_int)/(X['mass'].value/1000.0),color='lightgrey')
ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$',fontsize=14)
elif X['unit'] == 'cgs':
ax1.plot(Hb0,rho_int/(X['mass'].value),color='k')
ax1.fill_between(Hb0, (rho_int-CI_int)/(X['mass'].value), (rho_int+CI_int)/(X['mass'].value),color='lightgrey')
ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$',fontsize=14)
else:
ax1.plot(Hb0,rho_int,color='k')
ax1.fill_between(Hb0, (rho_int-CI_int), (rho_int+CI_int),color='lightgrey')
if X['unit'] == 'SI':
ax1.set_ylabel('Am$^2$ T$^{-2}$',fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_ylabel('emu Oe$^{-2}$',fontsize=14)
ax1.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='14')
ax1.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k')
if X['unit'] == 'SI':
ax1.set_xlabel('B$_\mathrm{u}$ [T]',fontsize=14)
elif X['unit'] == 'cgs':
ax1.set_xlabel('H$_\mathrm{u}$ [Oe]',fontsize=14)
ax1.minorticks_on()
ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))
outputfile = X['sample'].value+'_Hu_PROFILE.pdf'
plt.savefig(outputfile, dpi=300, bbox_inches="tight")
plt.show
return X
|
flexible
|
{
"blob_id": "e5a4ae2ec0fab1ca8cdce229c69725ece2dcc476",
"index": 8272,
"step-1": "<mask token>\n\n\ndef forc(X):\n Xi = X['Xi']\n Yi = X['Yi']\n Zi = X['Zi']\n SEi = X['SEi']\n Pi = X['Pi']\n Hc1 = X['Hc1']\n Hc2 = X['Hc2']\n Hb1 = X['Hb1']\n Hb2 = X['Hb2']\n style = {'description_width': 'initial'}\n colorbar_widge = widgets.Checkbox(value=False, description=\n 'Show final FORC plot', style=style)\n pval_widge = widgets.Checkbox(value=False, description=\n 'Show 0.05 significance contour', style=style)\n colormin_widge = widgets.FloatSlider(value=0.0, min=0.0, max=0.999,\n step=0.001, description='Rescale colormap minimum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n colormax_widge = widgets.FloatSlider(value=1.0, min=0.001, max=1, step=\n 0.001, description='Rescale colormap maximum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n contour_widge = widgets.Select(options=[['Select contour frequency', -1\n ], ['Every level', 1], ['Every 2nd level', 2], ['Every 3rd level', \n 3], ['Every 4th level', 4], ['Every 5th level', 5], [\n 'Every 10th level', 10], ['Every 20th level', 20], [\n 'Every 50th level', 50]], value=-1, rows=1, description=\n 'Plot contours', style=style)\n contourpts_widge = widgets.FloatSlider(value=1.0, min=0.5, max=3.0,\n step=0.5, description='Contour line width [pts]', style=style)\n download_widge = widgets.Checkbox(value=False, description=\n 'Download plot', style=style)\n level_widge = widgets.Select(options=[['20', 20], ['30', 30], ['50', 50\n ], ['75', 75], ['100', 100], ['200', 200], ['500', 500]], value=100,\n rows=1, description='Number of color levels', style=style)\n if X['unit'] == 'SI':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum B$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n elif X['unit'] == 'cgs':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum H$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n x = interactive(forcplot, Xi=fixed(Xi), Yi=fixed(Yi), Zi=fixed(Zi), SEi\n =fixed(SEi), Pi=fixed(Pi), fn=fixed(X['sample']), mass=fixed(X[\n 'mass']), unit=fixed(X['unit']), colorbar=colorbar_widge, level=\n level_widge, contour=contour_widge, contourpts=contourpts_widge,\n xmin=xmin_widge, xmax=xmax_widge, ymin=ymin_widge, ymax=ymax_widge,\n colormin=colormin_widge, colormax=colormax_widge, download=\n download_widge)\n tab_nest = widgets.Tab()\n tab_nest.set_title(0, 'FORC PLOTTING')\n tab_nest.children = [VBox(children=x.children)]\n display(tab_nest)\n\n\ndef forcplot(Xi, Yi, Zi, SEi, Pi, fn, mass, unit, colorbar, level, contour,\n contourpts, xmin, xmax, ymin, ymax, colormin, colormax, download):\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(1, 1, 1)\n if mass.value < 0.0:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi\n SEi_new = SEi\n Pi_new = Pi\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n if unit == 'SI':\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$'\n se_csv = 'rho [Am**2 / T**2]'\n elif unit == 'cgs':\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu$ Oe$^{-2}$'\n se_csv = 'rho [emu / Oe**2]'\n else:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n Pi_new = Pi\n if unit == 'SI':\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'\n se_csv = 'se [Am**2 / T**2 / kg]'\n elif unit == 'cgs':\n Zi_new = Zi / mass.value\n SEi_new = SEi / mass.value\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'\n se_csv = 'se [emu/ Oe**2 / g]'\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n idx = (Xi_new >= xmin) & (Xi_new <= xmax) & (Yi_new >= ymin) & (Yi_new <=\n ymax)\n cmap, vmin, vmax = FORCinel_colormap(Zi_new[idx])\n Zi_trunc = np.copy(Zi_new)\n Zi_trunc[np.isnan(Zi_trunc)] = 0.0\n Zi_trunc[Zi_trunc < vmin] = vmin\n vmini = vmin * (1 - colormin)\n vmaxi = vmax * colormax\n idx = (Zi_trunc >= vmini) & (Zi_trunc <= vmaxi)\n cmap, vmin, vmax = FORCinel_colormap(Zi_trunc[idx])\n CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap=cmap, vmin=vmin,\n vmax=vmax)\n if (contour > 0) & (contour < level):\n CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',\n linewidths=contourpts)\n ax.set_xlabel(xlabel_text, fontsize=14)\n ax.set_ylabel(ylabel_text, fontsize=14)\n xlimits = np.sort((xmin, xmax))\n ax.set_xlim(xlimits)\n ylimits = np.sort((ymin, ymax))\n ax.set_ylim(ylimits)\n ax.tick_params(labelsize=14)\n ax.set_aspect('equal')\n ax.minorticks_on()\n if colorbar == True:\n cbar = fig.colorbar(CS, fraction=0.04, pad=0.08, format='%.2e')\n cbar.ax.tick_params(labelsize=14)\n cbar.set_label(cbar_text, fontsize=14)\n if download == True:\n outputfile = fn.value + '_FORC.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n ar = np.column_stack((np.reshape(Xi_new, (-1, 1)), np.reshape(\n Yi_new, (-1, 1)), np.reshape(Zi_trunc, (-1, 1)), np.reshape(SEi,\n (-1, 1))))\n outputfile = fn.value + '_XYZ.csv'\n with open(outputfile, 'w') as fp:\n fp.write(xlabel_csv + ',' + ylabel_csv + ',' + se_csv + ',' +\n se_csv + '\\n')\n np.savetxt(fp, ar, '%s', ',')\n plt.show()\n\n\ndef FORCinel_colormap(Z):\n cdict = {'red': ((0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 /\n 255), (0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255\n ), (0.3193, 102 / 255, 102 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 204 / 255, 204 / 255), (0.8319, 153 / 255, 153 / 255), (\n 0.9748, 76 / 255, 76 / 255), (1.0, 76 / 255, 76 / 255)), 'green': (\n (0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 / 255), (\n 0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (\n 0.3193, 178 / 255, 178 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 76 / 255, 76 / 255), (0.8319, 102 / 255, 102 / 255), (\n 0.9748, 25 / 255, 25 / 255), (1.0, 25 / 255, 25 / 255)), 'blue': ((\n 0.0, 255 / 255, 255 / 255), (0.1387, 255 / 255, 255 / 255), (0.1597,\n 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (0.3193, 102 /\n 255, 102 / 255), (0.563, 76 / 255, 76 / 255), (0.6975, 76 / 255, 76 /\n 255), (0.8319, 153 / 255, 153 / 255), (0.9748, 76 / 255, 76 / 255),\n (1.0, 76 / 255, 76 / 255))}\n if np.abs(np.min(Z)) <= np.max(Z) * 0.19:\n vmin = -np.max(Z) * 0.19\n vmax = np.max(Z)\n else:\n vmin = np.min(Z)\n vmax = np.max(Z)\n anchors = np.zeros(10)\n anchors[1] = (-0.025 * vmax - vmin) / (vmax - vmin)\n anchors[2] = (-0.005 * vmax - vmin) / (vmax - vmin)\n anchors[3] = (0.025 * vmax - vmin) / (vmax - vmin)\n anchors[4] = (0.19 * vmax - vmin) / (vmax - vmin)\n anchors[5] = (0.48 * vmax - vmin) / (vmax - vmin)\n anchors[6] = (0.64 * vmax - vmin) / (vmax - vmin)\n anchors[7] = (0.8 * vmax - vmin) / (vmax - vmin)\n anchors[8] = (0.97 * vmax - vmin) / (vmax - vmin)\n anchors[9] = 1.0\n Rlst = list(cdict['red'])\n Glst = list(cdict['green'])\n Blst = list(cdict['blue'])\n for i in range(9):\n Rlst[i] = tuple((anchors[i], Rlst[i][1], Rlst[i][2]))\n Glst[i] = tuple((anchors[i], Glst[i][1], Glst[i][2]))\n Blst[i] = tuple((anchors[i], Blst[i][1], Blst[i][2]))\n cdict['red'] = tuple(Rlst)\n cdict['green'] = tuple(Glst)\n cdict['blue'] = tuple(Blst)\n cmap = LinearSegmentedColormap('forc_cmap', cdict)\n return cmap, vmin, vmax\n\n\ndef profile_options(X):\n Hb1 = X['Hb1'] - X['Hc2']\n Hb2 = X['Hb2']\n Hc1 = np.maximum(X['Hc1'], 0)\n Hc2 = X['Hc2']\n style = {'description_width': 'initial'}\n HL = widgets.HTML(value=\n '<hr style=\"height:3px;border:none;color:#333;background-color:#333;\" />'\n )\n P_title = widgets.HTML(value='<h3>Select profile type:</h3>')\n P_widge = widgets.RadioButtons(options=[('Horizontal profile', 0), (\n 'Vertical profile', 1)], value=0, style=style)\n H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')\n if X['unit'] == 'SI':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 10, description='H$_u$ [Oe]', disabled=False, continuous_update\n =False, orientation='horizontal', readout=True, readout_format=\n '.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')\n if X['unit'] == 'SI':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=10, description='H$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=10, description='H$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n profile_widge = VBox([P_title, P_widge, HL, H_title, x_Hb_widge,\n x_Hc_widge, HL, V_title, y_Hc_widge, y_Hb_widge])\n profile_nest = widgets.Tab()\n profile_nest.children = [profile_widge]\n profile_nest.set_title(0, 'PLOT PROFILES')\n display(profile_nest)\n X['P_widge'] = P_widge\n X['x_Hb_widge'] = x_Hb_widge\n X['x_Hc_widge'] = x_Hc_widge\n X['y_Hc_widge'] = y_Hc_widge\n X['y_Hb_widge'] = y_Hb_widge\n return X\n\n\n<mask token>\n\n\ndef x_profile(X, Hc, Hb):\n Hc1, Hc2 = Hc[0], Hc[1]\n dH = X['dH']\n NH = int(np.sqrt((Hc2 - Hc1) ** 2) / dH)\n Hc0 = np.linspace(Hc1, Hc2, NH)\n Hb0 = np.linspace(Hb, Hb, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hc0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hc0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hc0, rho_int, color='k')\n ax1.fill_between(Hc0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{c}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{c}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hc_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n\n\ndef y_profile(X, Hc, Hb):\n Hb1, Hb2 = Hb[0], Hb[1]\n dH = X['dH']\n NH = int(np.sqrt((Hb2 - Hb1) ** 2) / dH)\n Hc0 = np.linspace(Hc, Hc, NH)\n Hb0 = np.linspace(Hb1, Hb2, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hb0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hb0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hb0, rho_int, color='k')\n ax1.fill_between(Hb0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{u}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{u}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hu_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n",
"step-2": "<mask token>\n\n\ndef forc(X):\n Xi = X['Xi']\n Yi = X['Yi']\n Zi = X['Zi']\n SEi = X['SEi']\n Pi = X['Pi']\n Hc1 = X['Hc1']\n Hc2 = X['Hc2']\n Hb1 = X['Hb1']\n Hb2 = X['Hb2']\n style = {'description_width': 'initial'}\n colorbar_widge = widgets.Checkbox(value=False, description=\n 'Show final FORC plot', style=style)\n pval_widge = widgets.Checkbox(value=False, description=\n 'Show 0.05 significance contour', style=style)\n colormin_widge = widgets.FloatSlider(value=0.0, min=0.0, max=0.999,\n step=0.001, description='Rescale colormap minimum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n colormax_widge = widgets.FloatSlider(value=1.0, min=0.001, max=1, step=\n 0.001, description='Rescale colormap maximum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n contour_widge = widgets.Select(options=[['Select contour frequency', -1\n ], ['Every level', 1], ['Every 2nd level', 2], ['Every 3rd level', \n 3], ['Every 4th level', 4], ['Every 5th level', 5], [\n 'Every 10th level', 10], ['Every 20th level', 20], [\n 'Every 50th level', 50]], value=-1, rows=1, description=\n 'Plot contours', style=style)\n contourpts_widge = widgets.FloatSlider(value=1.0, min=0.5, max=3.0,\n step=0.5, description='Contour line width [pts]', style=style)\n download_widge = widgets.Checkbox(value=False, description=\n 'Download plot', style=style)\n level_widge = widgets.Select(options=[['20', 20], ['30', 30], ['50', 50\n ], ['75', 75], ['100', 100], ['200', 200], ['500', 500]], value=100,\n rows=1, description='Number of color levels', style=style)\n if X['unit'] == 'SI':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum B$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n elif X['unit'] == 'cgs':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum H$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n x = interactive(forcplot, Xi=fixed(Xi), Yi=fixed(Yi), Zi=fixed(Zi), SEi\n =fixed(SEi), Pi=fixed(Pi), fn=fixed(X['sample']), mass=fixed(X[\n 'mass']), unit=fixed(X['unit']), colorbar=colorbar_widge, level=\n level_widge, contour=contour_widge, contourpts=contourpts_widge,\n xmin=xmin_widge, xmax=xmax_widge, ymin=ymin_widge, ymax=ymax_widge,\n colormin=colormin_widge, colormax=colormax_widge, download=\n download_widge)\n tab_nest = widgets.Tab()\n tab_nest.set_title(0, 'FORC PLOTTING')\n tab_nest.children = [VBox(children=x.children)]\n display(tab_nest)\n\n\ndef forcplot(Xi, Yi, Zi, SEi, Pi, fn, mass, unit, colorbar, level, contour,\n contourpts, xmin, xmax, ymin, ymax, colormin, colormax, download):\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(1, 1, 1)\n if mass.value < 0.0:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi\n SEi_new = SEi\n Pi_new = Pi\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n if unit == 'SI':\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$'\n se_csv = 'rho [Am**2 / T**2]'\n elif unit == 'cgs':\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu$ Oe$^{-2}$'\n se_csv = 'rho [emu / Oe**2]'\n else:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n Pi_new = Pi\n if unit == 'SI':\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'\n se_csv = 'se [Am**2 / T**2 / kg]'\n elif unit == 'cgs':\n Zi_new = Zi / mass.value\n SEi_new = SEi / mass.value\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'\n se_csv = 'se [emu/ Oe**2 / g]'\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n idx = (Xi_new >= xmin) & (Xi_new <= xmax) & (Yi_new >= ymin) & (Yi_new <=\n ymax)\n cmap, vmin, vmax = FORCinel_colormap(Zi_new[idx])\n Zi_trunc = np.copy(Zi_new)\n Zi_trunc[np.isnan(Zi_trunc)] = 0.0\n Zi_trunc[Zi_trunc < vmin] = vmin\n vmini = vmin * (1 - colormin)\n vmaxi = vmax * colormax\n idx = (Zi_trunc >= vmini) & (Zi_trunc <= vmaxi)\n cmap, vmin, vmax = FORCinel_colormap(Zi_trunc[idx])\n CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap=cmap, vmin=vmin,\n vmax=vmax)\n if (contour > 0) & (contour < level):\n CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',\n linewidths=contourpts)\n ax.set_xlabel(xlabel_text, fontsize=14)\n ax.set_ylabel(ylabel_text, fontsize=14)\n xlimits = np.sort((xmin, xmax))\n ax.set_xlim(xlimits)\n ylimits = np.sort((ymin, ymax))\n ax.set_ylim(ylimits)\n ax.tick_params(labelsize=14)\n ax.set_aspect('equal')\n ax.minorticks_on()\n if colorbar == True:\n cbar = fig.colorbar(CS, fraction=0.04, pad=0.08, format='%.2e')\n cbar.ax.tick_params(labelsize=14)\n cbar.set_label(cbar_text, fontsize=14)\n if download == True:\n outputfile = fn.value + '_FORC.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n ar = np.column_stack((np.reshape(Xi_new, (-1, 1)), np.reshape(\n Yi_new, (-1, 1)), np.reshape(Zi_trunc, (-1, 1)), np.reshape(SEi,\n (-1, 1))))\n outputfile = fn.value + '_XYZ.csv'\n with open(outputfile, 'w') as fp:\n fp.write(xlabel_csv + ',' + ylabel_csv + ',' + se_csv + ',' +\n se_csv + '\\n')\n np.savetxt(fp, ar, '%s', ',')\n plt.show()\n\n\ndef FORCinel_colormap(Z):\n cdict = {'red': ((0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 /\n 255), (0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255\n ), (0.3193, 102 / 255, 102 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 204 / 255, 204 / 255), (0.8319, 153 / 255, 153 / 255), (\n 0.9748, 76 / 255, 76 / 255), (1.0, 76 / 255, 76 / 255)), 'green': (\n (0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 / 255), (\n 0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (\n 0.3193, 178 / 255, 178 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 76 / 255, 76 / 255), (0.8319, 102 / 255, 102 / 255), (\n 0.9748, 25 / 255, 25 / 255), (1.0, 25 / 255, 25 / 255)), 'blue': ((\n 0.0, 255 / 255, 255 / 255), (0.1387, 255 / 255, 255 / 255), (0.1597,\n 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (0.3193, 102 /\n 255, 102 / 255), (0.563, 76 / 255, 76 / 255), (0.6975, 76 / 255, 76 /\n 255), (0.8319, 153 / 255, 153 / 255), (0.9748, 76 / 255, 76 / 255),\n (1.0, 76 / 255, 76 / 255))}\n if np.abs(np.min(Z)) <= np.max(Z) * 0.19:\n vmin = -np.max(Z) * 0.19\n vmax = np.max(Z)\n else:\n vmin = np.min(Z)\n vmax = np.max(Z)\n anchors = np.zeros(10)\n anchors[1] = (-0.025 * vmax - vmin) / (vmax - vmin)\n anchors[2] = (-0.005 * vmax - vmin) / (vmax - vmin)\n anchors[3] = (0.025 * vmax - vmin) / (vmax - vmin)\n anchors[4] = (0.19 * vmax - vmin) / (vmax - vmin)\n anchors[5] = (0.48 * vmax - vmin) / (vmax - vmin)\n anchors[6] = (0.64 * vmax - vmin) / (vmax - vmin)\n anchors[7] = (0.8 * vmax - vmin) / (vmax - vmin)\n anchors[8] = (0.97 * vmax - vmin) / (vmax - vmin)\n anchors[9] = 1.0\n Rlst = list(cdict['red'])\n Glst = list(cdict['green'])\n Blst = list(cdict['blue'])\n for i in range(9):\n Rlst[i] = tuple((anchors[i], Rlst[i][1], Rlst[i][2]))\n Glst[i] = tuple((anchors[i], Glst[i][1], Glst[i][2]))\n Blst[i] = tuple((anchors[i], Blst[i][1], Blst[i][2]))\n cdict['red'] = tuple(Rlst)\n cdict['green'] = tuple(Glst)\n cdict['blue'] = tuple(Blst)\n cmap = LinearSegmentedColormap('forc_cmap', cdict)\n return cmap, vmin, vmax\n\n\ndef profile_options(X):\n Hb1 = X['Hb1'] - X['Hc2']\n Hb2 = X['Hb2']\n Hc1 = np.maximum(X['Hc1'], 0)\n Hc2 = X['Hc2']\n style = {'description_width': 'initial'}\n HL = widgets.HTML(value=\n '<hr style=\"height:3px;border:none;color:#333;background-color:#333;\" />'\n )\n P_title = widgets.HTML(value='<h3>Select profile type:</h3>')\n P_widge = widgets.RadioButtons(options=[('Horizontal profile', 0), (\n 'Vertical profile', 1)], value=0, style=style)\n H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')\n if X['unit'] == 'SI':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 10, description='H$_u$ [Oe]', disabled=False, continuous_update\n =False, orientation='horizontal', readout=True, readout_format=\n '.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')\n if X['unit'] == 'SI':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=10, description='H$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=10, description='H$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n profile_widge = VBox([P_title, P_widge, HL, H_title, x_Hb_widge,\n x_Hc_widge, HL, V_title, y_Hc_widge, y_Hb_widge])\n profile_nest = widgets.Tab()\n profile_nest.children = [profile_widge]\n profile_nest.set_title(0, 'PLOT PROFILES')\n display(profile_nest)\n X['P_widge'] = P_widge\n X['x_Hb_widge'] = x_Hb_widge\n X['x_Hc_widge'] = x_Hc_widge\n X['y_Hc_widge'] = y_Hc_widge\n X['y_Hb_widge'] = y_Hb_widge\n return X\n\n\ndef profile_plot(X):\n if X['P_widge'].value == 0:\n X = x_profile(X, X['x_Hc_widge'].value, X['x_Hb_widge'].value)\n else:\n X = y_profile(X, X['y_Hc_widge'].value, X['y_Hb_widge'].value)\n return X\n\n\ndef x_profile(X, Hc, Hb):\n Hc1, Hc2 = Hc[0], Hc[1]\n dH = X['dH']\n NH = int(np.sqrt((Hc2 - Hc1) ** 2) / dH)\n Hc0 = np.linspace(Hc1, Hc2, NH)\n Hb0 = np.linspace(Hb, Hb, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hc0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hc0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hc0, rho_int, color='k')\n ax1.fill_between(Hc0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{c}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{c}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hc_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n\n\ndef y_profile(X, Hc, Hb):\n Hb1, Hb2 = Hb[0], Hb[1]\n dH = X['dH']\n NH = int(np.sqrt((Hb2 - Hb1) ** 2) / dH)\n Hc0 = np.linspace(Hc, Hc, NH)\n Hb0 = np.linspace(Hb1, Hb2, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hb0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hb0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hb0, rho_int, color='k')\n ax1.fill_between(Hb0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{u}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{u}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hu_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n",
"step-3": "<mask token>\nmpl.rcParams['pdf.fonttype'] = 42\nmpl.rcParams['ps.fonttype'] = 42\n\n\ndef forc(X):\n Xi = X['Xi']\n Yi = X['Yi']\n Zi = X['Zi']\n SEi = X['SEi']\n Pi = X['Pi']\n Hc1 = X['Hc1']\n Hc2 = X['Hc2']\n Hb1 = X['Hb1']\n Hb2 = X['Hb2']\n style = {'description_width': 'initial'}\n colorbar_widge = widgets.Checkbox(value=False, description=\n 'Show final FORC plot', style=style)\n pval_widge = widgets.Checkbox(value=False, description=\n 'Show 0.05 significance contour', style=style)\n colormin_widge = widgets.FloatSlider(value=0.0, min=0.0, max=0.999,\n step=0.001, description='Rescale colormap minimum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n colormax_widge = widgets.FloatSlider(value=1.0, min=0.001, max=1, step=\n 0.001, description='Rescale colormap maximum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n contour_widge = widgets.Select(options=[['Select contour frequency', -1\n ], ['Every level', 1], ['Every 2nd level', 2], ['Every 3rd level', \n 3], ['Every 4th level', 4], ['Every 5th level', 5], [\n 'Every 10th level', 10], ['Every 20th level', 20], [\n 'Every 50th level', 50]], value=-1, rows=1, description=\n 'Plot contours', style=style)\n contourpts_widge = widgets.FloatSlider(value=1.0, min=0.5, max=3.0,\n step=0.5, description='Contour line width [pts]', style=style)\n download_widge = widgets.Checkbox(value=False, description=\n 'Download plot', style=style)\n level_widge = widgets.Select(options=[['20', 20], ['30', 30], ['50', 50\n ], ['75', 75], ['100', 100], ['200', 200], ['500', 500]], value=100,\n rows=1, description='Number of color levels', style=style)\n if X['unit'] == 'SI':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum B$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n elif X['unit'] == 'cgs':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum H$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n x = interactive(forcplot, Xi=fixed(Xi), Yi=fixed(Yi), Zi=fixed(Zi), SEi\n =fixed(SEi), Pi=fixed(Pi), fn=fixed(X['sample']), mass=fixed(X[\n 'mass']), unit=fixed(X['unit']), colorbar=colorbar_widge, level=\n level_widge, contour=contour_widge, contourpts=contourpts_widge,\n xmin=xmin_widge, xmax=xmax_widge, ymin=ymin_widge, ymax=ymax_widge,\n colormin=colormin_widge, colormax=colormax_widge, download=\n download_widge)\n tab_nest = widgets.Tab()\n tab_nest.set_title(0, 'FORC PLOTTING')\n tab_nest.children = [VBox(children=x.children)]\n display(tab_nest)\n\n\ndef forcplot(Xi, Yi, Zi, SEi, Pi, fn, mass, unit, colorbar, level, contour,\n contourpts, xmin, xmax, ymin, ymax, colormin, colormax, download):\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(1, 1, 1)\n if mass.value < 0.0:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi\n SEi_new = SEi\n Pi_new = Pi\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n if unit == 'SI':\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$'\n se_csv = 'rho [Am**2 / T**2]'\n elif unit == 'cgs':\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu$ Oe$^{-2}$'\n se_csv = 'rho [emu / Oe**2]'\n else:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n Pi_new = Pi\n if unit == 'SI':\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'\n se_csv = 'se [Am**2 / T**2 / kg]'\n elif unit == 'cgs':\n Zi_new = Zi / mass.value\n SEi_new = SEi / mass.value\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'\n se_csv = 'se [emu/ Oe**2 / g]'\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n idx = (Xi_new >= xmin) & (Xi_new <= xmax) & (Yi_new >= ymin) & (Yi_new <=\n ymax)\n cmap, vmin, vmax = FORCinel_colormap(Zi_new[idx])\n Zi_trunc = np.copy(Zi_new)\n Zi_trunc[np.isnan(Zi_trunc)] = 0.0\n Zi_trunc[Zi_trunc < vmin] = vmin\n vmini = vmin * (1 - colormin)\n vmaxi = vmax * colormax\n idx = (Zi_trunc >= vmini) & (Zi_trunc <= vmaxi)\n cmap, vmin, vmax = FORCinel_colormap(Zi_trunc[idx])\n CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap=cmap, vmin=vmin,\n vmax=vmax)\n if (contour > 0) & (contour < level):\n CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',\n linewidths=contourpts)\n ax.set_xlabel(xlabel_text, fontsize=14)\n ax.set_ylabel(ylabel_text, fontsize=14)\n xlimits = np.sort((xmin, xmax))\n ax.set_xlim(xlimits)\n ylimits = np.sort((ymin, ymax))\n ax.set_ylim(ylimits)\n ax.tick_params(labelsize=14)\n ax.set_aspect('equal')\n ax.minorticks_on()\n if colorbar == True:\n cbar = fig.colorbar(CS, fraction=0.04, pad=0.08, format='%.2e')\n cbar.ax.tick_params(labelsize=14)\n cbar.set_label(cbar_text, fontsize=14)\n if download == True:\n outputfile = fn.value + '_FORC.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n ar = np.column_stack((np.reshape(Xi_new, (-1, 1)), np.reshape(\n Yi_new, (-1, 1)), np.reshape(Zi_trunc, (-1, 1)), np.reshape(SEi,\n (-1, 1))))\n outputfile = fn.value + '_XYZ.csv'\n with open(outputfile, 'w') as fp:\n fp.write(xlabel_csv + ',' + ylabel_csv + ',' + se_csv + ',' +\n se_csv + '\\n')\n np.savetxt(fp, ar, '%s', ',')\n plt.show()\n\n\ndef FORCinel_colormap(Z):\n cdict = {'red': ((0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 /\n 255), (0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255\n ), (0.3193, 102 / 255, 102 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 204 / 255, 204 / 255), (0.8319, 153 / 255, 153 / 255), (\n 0.9748, 76 / 255, 76 / 255), (1.0, 76 / 255, 76 / 255)), 'green': (\n (0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 / 255), (\n 0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (\n 0.3193, 178 / 255, 178 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 76 / 255, 76 / 255), (0.8319, 102 / 255, 102 / 255), (\n 0.9748, 25 / 255, 25 / 255), (1.0, 25 / 255, 25 / 255)), 'blue': ((\n 0.0, 255 / 255, 255 / 255), (0.1387, 255 / 255, 255 / 255), (0.1597,\n 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (0.3193, 102 /\n 255, 102 / 255), (0.563, 76 / 255, 76 / 255), (0.6975, 76 / 255, 76 /\n 255), (0.8319, 153 / 255, 153 / 255), (0.9748, 76 / 255, 76 / 255),\n (1.0, 76 / 255, 76 / 255))}\n if np.abs(np.min(Z)) <= np.max(Z) * 0.19:\n vmin = -np.max(Z) * 0.19\n vmax = np.max(Z)\n else:\n vmin = np.min(Z)\n vmax = np.max(Z)\n anchors = np.zeros(10)\n anchors[1] = (-0.025 * vmax - vmin) / (vmax - vmin)\n anchors[2] = (-0.005 * vmax - vmin) / (vmax - vmin)\n anchors[3] = (0.025 * vmax - vmin) / (vmax - vmin)\n anchors[4] = (0.19 * vmax - vmin) / (vmax - vmin)\n anchors[5] = (0.48 * vmax - vmin) / (vmax - vmin)\n anchors[6] = (0.64 * vmax - vmin) / (vmax - vmin)\n anchors[7] = (0.8 * vmax - vmin) / (vmax - vmin)\n anchors[8] = (0.97 * vmax - vmin) / (vmax - vmin)\n anchors[9] = 1.0\n Rlst = list(cdict['red'])\n Glst = list(cdict['green'])\n Blst = list(cdict['blue'])\n for i in range(9):\n Rlst[i] = tuple((anchors[i], Rlst[i][1], Rlst[i][2]))\n Glst[i] = tuple((anchors[i], Glst[i][1], Glst[i][2]))\n Blst[i] = tuple((anchors[i], Blst[i][1], Blst[i][2]))\n cdict['red'] = tuple(Rlst)\n cdict['green'] = tuple(Glst)\n cdict['blue'] = tuple(Blst)\n cmap = LinearSegmentedColormap('forc_cmap', cdict)\n return cmap, vmin, vmax\n\n\ndef profile_options(X):\n Hb1 = X['Hb1'] - X['Hc2']\n Hb2 = X['Hb2']\n Hc1 = np.maximum(X['Hc1'], 0)\n Hc2 = X['Hc2']\n style = {'description_width': 'initial'}\n HL = widgets.HTML(value=\n '<hr style=\"height:3px;border:none;color:#333;background-color:#333;\" />'\n )\n P_title = widgets.HTML(value='<h3>Select profile type:</h3>')\n P_widge = widgets.RadioButtons(options=[('Horizontal profile', 0), (\n 'Vertical profile', 1)], value=0, style=style)\n H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')\n if X['unit'] == 'SI':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 10, description='H$_u$ [Oe]', disabled=False, continuous_update\n =False, orientation='horizontal', readout=True, readout_format=\n '.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')\n if X['unit'] == 'SI':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=10, description='H$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=10, description='H$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n profile_widge = VBox([P_title, P_widge, HL, H_title, x_Hb_widge,\n x_Hc_widge, HL, V_title, y_Hc_widge, y_Hb_widge])\n profile_nest = widgets.Tab()\n profile_nest.children = [profile_widge]\n profile_nest.set_title(0, 'PLOT PROFILES')\n display(profile_nest)\n X['P_widge'] = P_widge\n X['x_Hb_widge'] = x_Hb_widge\n X['x_Hc_widge'] = x_Hc_widge\n X['y_Hc_widge'] = y_Hc_widge\n X['y_Hb_widge'] = y_Hb_widge\n return X\n\n\ndef profile_plot(X):\n if X['P_widge'].value == 0:\n X = x_profile(X, X['x_Hc_widge'].value, X['x_Hb_widge'].value)\n else:\n X = y_profile(X, X['y_Hc_widge'].value, X['y_Hb_widge'].value)\n return X\n\n\ndef x_profile(X, Hc, Hb):\n Hc1, Hc2 = Hc[0], Hc[1]\n dH = X['dH']\n NH = int(np.sqrt((Hc2 - Hc1) ** 2) / dH)\n Hc0 = np.linspace(Hc1, Hc2, NH)\n Hb0 = np.linspace(Hb, Hb, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hc0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hc0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hc0, rho_int, color='k')\n ax1.fill_between(Hc0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{c}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{c}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hc_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n\n\ndef y_profile(X, Hc, Hb):\n Hb1, Hb2 = Hb[0], Hb[1]\n dH = X['dH']\n NH = int(np.sqrt((Hb2 - Hb1) ** 2) / dH)\n Hc0 = np.linspace(Hc, Hc, NH)\n Hb0 = np.linspace(Hb1, Hb2, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hb0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hb0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hb0, rho_int, color='k')\n ax1.fill_between(Hb0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{u}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{u}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hu_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n",
"step-4": "import numpy as np\nimport ipywidgets as widgets\nfrom ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.tri as tri\nimport matplotlib.colors as colors\nfrom matplotlib.colors import LinearSegmentedColormap\nimport scipy.stats as sps\nimport matplotlib.ticker as mtick\nmpl.rcParams['pdf.fonttype'] = 42\nmpl.rcParams['ps.fonttype'] = 42\n\n\ndef forc(X):\n Xi = X['Xi']\n Yi = X['Yi']\n Zi = X['Zi']\n SEi = X['SEi']\n Pi = X['Pi']\n Hc1 = X['Hc1']\n Hc2 = X['Hc2']\n Hb1 = X['Hb1']\n Hb2 = X['Hb2']\n style = {'description_width': 'initial'}\n colorbar_widge = widgets.Checkbox(value=False, description=\n 'Show final FORC plot', style=style)\n pval_widge = widgets.Checkbox(value=False, description=\n 'Show 0.05 significance contour', style=style)\n colormin_widge = widgets.FloatSlider(value=0.0, min=0.0, max=0.999,\n step=0.001, description='Rescale colormap minimum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n colormax_widge = widgets.FloatSlider(value=1.0, min=0.001, max=1, step=\n 0.001, description='Rescale colormap maximum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n contour_widge = widgets.Select(options=[['Select contour frequency', -1\n ], ['Every level', 1], ['Every 2nd level', 2], ['Every 3rd level', \n 3], ['Every 4th level', 4], ['Every 5th level', 5], [\n 'Every 10th level', 10], ['Every 20th level', 20], [\n 'Every 50th level', 50]], value=-1, rows=1, description=\n 'Plot contours', style=style)\n contourpts_widge = widgets.FloatSlider(value=1.0, min=0.5, max=3.0,\n step=0.5, description='Contour line width [pts]', style=style)\n download_widge = widgets.Checkbox(value=False, description=\n 'Download plot', style=style)\n level_widge = widgets.Select(options=[['20', 20], ['30', 30], ['50', 50\n ], ['75', 75], ['100', 100], ['200', 200], ['500', 500]], value=100,\n rows=1, description='Number of color levels', style=style)\n if X['unit'] == 'SI':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum B$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n elif X['unit'] == 'cgs':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum H$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n x = interactive(forcplot, Xi=fixed(Xi), Yi=fixed(Yi), Zi=fixed(Zi), SEi\n =fixed(SEi), Pi=fixed(Pi), fn=fixed(X['sample']), mass=fixed(X[\n 'mass']), unit=fixed(X['unit']), colorbar=colorbar_widge, level=\n level_widge, contour=contour_widge, contourpts=contourpts_widge,\n xmin=xmin_widge, xmax=xmax_widge, ymin=ymin_widge, ymax=ymax_widge,\n colormin=colormin_widge, colormax=colormax_widge, download=\n download_widge)\n tab_nest = widgets.Tab()\n tab_nest.set_title(0, 'FORC PLOTTING')\n tab_nest.children = [VBox(children=x.children)]\n display(tab_nest)\n\n\ndef forcplot(Xi, Yi, Zi, SEi, Pi, fn, mass, unit, colorbar, level, contour,\n contourpts, xmin, xmax, ymin, ymax, colormin, colormax, download):\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(1, 1, 1)\n if mass.value < 0.0:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi\n SEi_new = SEi\n Pi_new = Pi\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n if unit == 'SI':\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$'\n se_csv = 'rho [Am**2 / T**2]'\n elif unit == 'cgs':\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu$ Oe$^{-2}$'\n se_csv = 'rho [emu / Oe**2]'\n else:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n Pi_new = Pi\n if unit == 'SI':\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'\n se_csv = 'se [Am**2 / T**2 / kg]'\n elif unit == 'cgs':\n Zi_new = Zi / mass.value\n SEi_new = SEi / mass.value\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'\n se_csv = 'se [emu/ Oe**2 / g]'\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n idx = (Xi_new >= xmin) & (Xi_new <= xmax) & (Yi_new >= ymin) & (Yi_new <=\n ymax)\n cmap, vmin, vmax = FORCinel_colormap(Zi_new[idx])\n Zi_trunc = np.copy(Zi_new)\n Zi_trunc[np.isnan(Zi_trunc)] = 0.0\n Zi_trunc[Zi_trunc < vmin] = vmin\n vmini = vmin * (1 - colormin)\n vmaxi = vmax * colormax\n idx = (Zi_trunc >= vmini) & (Zi_trunc <= vmaxi)\n cmap, vmin, vmax = FORCinel_colormap(Zi_trunc[idx])\n CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap=cmap, vmin=vmin,\n vmax=vmax)\n if (contour > 0) & (contour < level):\n CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',\n linewidths=contourpts)\n ax.set_xlabel(xlabel_text, fontsize=14)\n ax.set_ylabel(ylabel_text, fontsize=14)\n xlimits = np.sort((xmin, xmax))\n ax.set_xlim(xlimits)\n ylimits = np.sort((ymin, ymax))\n ax.set_ylim(ylimits)\n ax.tick_params(labelsize=14)\n ax.set_aspect('equal')\n ax.minorticks_on()\n if colorbar == True:\n cbar = fig.colorbar(CS, fraction=0.04, pad=0.08, format='%.2e')\n cbar.ax.tick_params(labelsize=14)\n cbar.set_label(cbar_text, fontsize=14)\n if download == True:\n outputfile = fn.value + '_FORC.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n ar = np.column_stack((np.reshape(Xi_new, (-1, 1)), np.reshape(\n Yi_new, (-1, 1)), np.reshape(Zi_trunc, (-1, 1)), np.reshape(SEi,\n (-1, 1))))\n outputfile = fn.value + '_XYZ.csv'\n with open(outputfile, 'w') as fp:\n fp.write(xlabel_csv + ',' + ylabel_csv + ',' + se_csv + ',' +\n se_csv + '\\n')\n np.savetxt(fp, ar, '%s', ',')\n plt.show()\n\n\ndef FORCinel_colormap(Z):\n cdict = {'red': ((0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 /\n 255), (0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255\n ), (0.3193, 102 / 255, 102 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 204 / 255, 204 / 255), (0.8319, 153 / 255, 153 / 255), (\n 0.9748, 76 / 255, 76 / 255), (1.0, 76 / 255, 76 / 255)), 'green': (\n (0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 / 255), (\n 0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (\n 0.3193, 178 / 255, 178 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 76 / 255, 76 / 255), (0.8319, 102 / 255, 102 / 255), (\n 0.9748, 25 / 255, 25 / 255), (1.0, 25 / 255, 25 / 255)), 'blue': ((\n 0.0, 255 / 255, 255 / 255), (0.1387, 255 / 255, 255 / 255), (0.1597,\n 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (0.3193, 102 /\n 255, 102 / 255), (0.563, 76 / 255, 76 / 255), (0.6975, 76 / 255, 76 /\n 255), (0.8319, 153 / 255, 153 / 255), (0.9748, 76 / 255, 76 / 255),\n (1.0, 76 / 255, 76 / 255))}\n if np.abs(np.min(Z)) <= np.max(Z) * 0.19:\n vmin = -np.max(Z) * 0.19\n vmax = np.max(Z)\n else:\n vmin = np.min(Z)\n vmax = np.max(Z)\n anchors = np.zeros(10)\n anchors[1] = (-0.025 * vmax - vmin) / (vmax - vmin)\n anchors[2] = (-0.005 * vmax - vmin) / (vmax - vmin)\n anchors[3] = (0.025 * vmax - vmin) / (vmax - vmin)\n anchors[4] = (0.19 * vmax - vmin) / (vmax - vmin)\n anchors[5] = (0.48 * vmax - vmin) / (vmax - vmin)\n anchors[6] = (0.64 * vmax - vmin) / (vmax - vmin)\n anchors[7] = (0.8 * vmax - vmin) / (vmax - vmin)\n anchors[8] = (0.97 * vmax - vmin) / (vmax - vmin)\n anchors[9] = 1.0\n Rlst = list(cdict['red'])\n Glst = list(cdict['green'])\n Blst = list(cdict['blue'])\n for i in range(9):\n Rlst[i] = tuple((anchors[i], Rlst[i][1], Rlst[i][2]))\n Glst[i] = tuple((anchors[i], Glst[i][1], Glst[i][2]))\n Blst[i] = tuple((anchors[i], Blst[i][1], Blst[i][2]))\n cdict['red'] = tuple(Rlst)\n cdict['green'] = tuple(Glst)\n cdict['blue'] = tuple(Blst)\n cmap = LinearSegmentedColormap('forc_cmap', cdict)\n return cmap, vmin, vmax\n\n\ndef profile_options(X):\n Hb1 = X['Hb1'] - X['Hc2']\n Hb2 = X['Hb2']\n Hc1 = np.maximum(X['Hc1'], 0)\n Hc2 = X['Hc2']\n style = {'description_width': 'initial'}\n HL = widgets.HTML(value=\n '<hr style=\"height:3px;border:none;color:#333;background-color:#333;\" />'\n )\n P_title = widgets.HTML(value='<h3>Select profile type:</h3>')\n P_widge = widgets.RadioButtons(options=[('Horizontal profile', 0), (\n 'Vertical profile', 1)], value=0, style=style)\n H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')\n if X['unit'] == 'SI':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 10, description='H$_u$ [Oe]', disabled=False, continuous_update\n =False, orientation='horizontal', readout=True, readout_format=\n '.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')\n if X['unit'] == 'SI':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=10, description='H$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=10, description='H$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n profile_widge = VBox([P_title, P_widge, HL, H_title, x_Hb_widge,\n x_Hc_widge, HL, V_title, y_Hc_widge, y_Hb_widge])\n profile_nest = widgets.Tab()\n profile_nest.children = [profile_widge]\n profile_nest.set_title(0, 'PLOT PROFILES')\n display(profile_nest)\n X['P_widge'] = P_widge\n X['x_Hb_widge'] = x_Hb_widge\n X['x_Hc_widge'] = x_Hc_widge\n X['y_Hc_widge'] = y_Hc_widge\n X['y_Hb_widge'] = y_Hb_widge\n return X\n\n\ndef profile_plot(X):\n if X['P_widge'].value == 0:\n X = x_profile(X, X['x_Hc_widge'].value, X['x_Hb_widge'].value)\n else:\n X = y_profile(X, X['y_Hc_widge'].value, X['y_Hb_widge'].value)\n return X\n\n\ndef x_profile(X, Hc, Hb):\n Hc1, Hc2 = Hc[0], Hc[1]\n dH = X['dH']\n NH = int(np.sqrt((Hc2 - Hc1) ** 2) / dH)\n Hc0 = np.linspace(Hc1, Hc2, NH)\n Hb0 = np.linspace(Hb, Hb, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hc0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hc0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hc0, rho_int, color='k')\n ax1.fill_between(Hc0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{c}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{c}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hc_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n\n\ndef y_profile(X, Hc, Hb):\n Hb1, Hb2 = Hb[0], Hb[1]\n dH = X['dH']\n NH = int(np.sqrt((Hb2 - Hb1) ** 2) / dH)\n Hc0 = np.linspace(Hc, Hc, NH)\n Hb0 = np.linspace(Hb1, Hb2, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hb0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hb0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hb0, rho_int, color='k')\n ax1.fill_between(Hb0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{u}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{u}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hu_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n",
"step-5": "import numpy as np\nimport ipywidgets as widgets\nfrom ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.tri as tri\nimport matplotlib.colors as colors\nfrom matplotlib.colors import LinearSegmentedColormap\nimport scipy.stats as sps\nimport matplotlib.ticker as mtick\nmpl.rcParams['pdf.fonttype'] = 42\nmpl.rcParams['ps.fonttype'] = 42\n\n\n#### FORC plotting ####\ndef forc(X):\n\n #unpack data\n Xi = X['Xi']\n Yi = X['Yi']\n Zi = X['Zi']\n SEi = X['SEi']\n Pi = X['Pi']\n Hc1 = X['Hc1']\n Hc2 = X['Hc2']\n Hb1 = X['Hb1']\n Hb2 = X['Hb2']\n\n #Set up widgets for interactive plot\n style = {'description_width': 'initial'} #general style settings\n \n #DEFINE INTERACTIVE WIDGETS\n \n #should a colorbar be included\n colorbar_widge = widgets.Checkbox(value=False, description = 'Show final FORC plot',style=style) \n pval_widge = widgets.Checkbox(value=False, description = 'Show 0.05 significance contour',style=style) \n\n colormin_widge = widgets.FloatSlider(\n value=0.0,\n min=0.00,\n max=0.999,\n step=0.001,\n description='Rescale colormap minimum',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=False,\n readout_format='.2f',\n style=style\n )\n\n colormax_widge = widgets.FloatSlider(\n value=1.0,\n min=0.001,\n max=1,\n step=0.001,\n description='Rescale colormap maximum',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=False,\n readout_format='.2f',\n style=style\n )\n\n #Frequency for contour lines to be included in plot\n contour_widge = widgets.Select(\n options=[['Select contour frequency',-1],\n ['Every level',1],\n ['Every 2nd level',2],\n ['Every 3rd level',3],\n ['Every 4th level',4],\n ['Every 5th level',5],\n ['Every 10th level',10],\n ['Every 20th level',20],\n ['Every 50th level',50],\n ],\n value=-1,\n rows=1,\n description='Plot contours',style=style)\n \n contourpts_widge = widgets.FloatSlider(value=1.0,min=0.5,max=3.0,step=0.5, description = 'Contour line width [pts]',style=style)\n\n #check box for plot download\n download_widge = widgets.Checkbox(value=False, description = 'Download plot',style=style) \n \n #How many contour levels should be included\n level_widge = widgets.Select(\n options=[['20',20],['30',30],['50',50],['75',75],['100',100],['200',200],['500',500]],\n value=100,\n rows=1,\n description='Number of color levels',style=style)\n\n #plot limit widgets\n if X['unit']=='SI': \n xmin_widge = widgets.FloatText(value=0,description='Minimum B$_\\mathrm{c}$ [Oe]',style=style,step=10) \n xmax_widge = widgets.FloatText(value=np.round(Hc2*1000)/1000,description='Maximum B$_\\mathrm{c}$ [Oe]',style=style,step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1-Hc2)*1000)/1000,description='Minimum B$_\\mathrm{u}$ [Oe]',style=style,step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2*1000)/1000,description='Maximum B$_\\mathrm{u}$ [Oe]',style=style,step=10)\n elif X['unit']=='cgs':\n xmin_widge = widgets.FloatText(value=0,description='Minimum H$_\\mathrm{c}$ [Oe]',style=style,step=10) \n xmax_widge = widgets.FloatText(value=np.round(Hc2*1000)/1000,description='Maximum H$_\\mathrm{c}$ [Oe]',style=style,step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1-Hc2)*1000)/1000,description='Minimum H$_\\mathrm{u}$ [Oe]',style=style,step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2*1000)/1000,description='Maximum H$_\\mathrm{u}$ [Oe]',style=style,step=10) \n\n #launch the interactive FORC plot\n x = interactive(forcplot,\n Xi=fixed(Xi), #X point grid\n Yi=fixed(Yi), #Y point grid\n Zi=fixed(Zi), #interpolated Z values\n SEi = fixed(SEi), #interpolated standard errors\n Pi = fixed(Pi), #P values\n fn=fixed(X['sample']), #File information\n mass=fixed(X['mass']), #Preprocessing information\n unit=fixed(X['unit']),\n colorbar=colorbar_widge, #Include colorbar \n level=level_widge, #Number of levels to plot \n contour=contour_widge, #Contour levels to plot\n contourpts=contourpts_widge, #Contour line width\n xmin=xmin_widge, #X-minimum\n xmax=xmax_widge, #X-maximum\n ymin=ymin_widge, #Y-minimum\n ymax=ymax_widge, #Y-maximum\n colormin = colormin_widge, #adjust colormap minimum\n colormax = colormax_widge, #adjust colormap minimum\n download = download_widge #download plot\n )\n \n #create tabs\n tab_nest = widgets.Tab()\n # tab_nest.children = [tab_visualise]\n tab_nest.set_title(0, 'FORC PLOTTING')\n\n #interact function in isolation\n tab_nest.children = [VBox(children = x.children)]\n display(tab_nest)\n \n #display(x) #display the interactive plot\n\ndef forcplot(Xi,Yi,Zi,SEi,Pi,fn,mass,unit,colorbar,level,contour,contourpts,xmin,xmax,ymin,ymax,colormin,colormax,download):\n \n\n fig = plt.figure(figsize=(6,6))\n ax = fig.add_subplot(1,1,1)\n \n if mass.value<0.0: \n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi\n SEi_new = SEi\n Pi_new = Pi\n SEi_new[Zi_new==0.0]=0.0\n SEi_new[np.isnan(SEi_new)]=0.0\n if unit=='SI':\n xlabel_text = 'B$_\\mathrm{c}$ [T]' #label Hc axis [SI units]\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\mathrm{u}$ [T]' #label Hu axis [SI units]\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$'\n se_csv = 'rho [Am**2 / T**2]'\n elif unit=='cgs':\n xlabel_text = 'H$_\\mathrm{c}$ [Oe]' #label Hc axis [SI units]\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\mathrm{u}$ [Oe]' #label Hu axis [SI units]\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu$ Oe$^{-2}$'\n se_csv = 'rho [emu / Oe**2]' \n else:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi / (mass.value/1000.0)\n SEi_new = SEi / (mass.value/1000.0)\n SEi_new[Zi_new==0.0]=0.0\n SEi_new[np.isnan(SEi_new)]=0.0\n Pi_new = Pi\n if unit=='SI':\n Zi_new = Zi / (mass.value/1000.0)\n SEi_new = SEi / (mass.value/1000.0) \n xlabel_text = 'B$_\\mathrm{c}$ [T]' #label Hc axis [SI units]\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\mathrm{u}$ [T]' #label Hu axis [SI units]\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'\n se_csv = 'se [Am**2 / T**2 / kg]'\n elif unit=='cgs':\n Zi_new = Zi / (mass.value)\n SEi_new = SEi / (mass.value) \n xlabel_text = 'H$_\\mathrm{c}$ [Oe]' #label Hc axis [SI units]\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\mathrm{u}$ [Oe]' #label Hu axis [SI units]\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'\n se_csv = 'se [emu/ Oe**2 / g]' \n \n SEi_new[Zi_new==0.0]=0.0\n SEi_new[np.isnan(SEi_new)]=0.0 \n\n #define colormaps\n idx=(Xi_new>=xmin) & (Xi_new<=xmax) & (Yi_new>=ymin) & (Yi_new<=ymax) #find points currently in view\n cmap,vmin,vmax = FORCinel_colormap(Zi_new[idx])\n #cmap, norm = FORCinel_colormap(Zi_new[idx])\n\n Zi_trunc = np.copy(Zi_new)\n Zi_trunc[np.isnan(Zi_trunc)] = 0.0\n Zi_trunc[Zi_trunc<vmin]=vmin\n \n vmini = vmin*(1-colormin)\n vmaxi = vmax*colormax\n\n idx = (Zi_trunc>=vmini) & (Zi_trunc<=vmaxi)\n cmap,vmin,vmax = FORCinel_colormap(Zi_trunc[idx])\n\n CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap = cmap, vmin=vmin, vmax=vmax)\n \n if (contour>0) & (contour<level):\n CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',linewidths=contourpts)\n\n #if pval==True:\n # CS3 = ax.contour(Xi_new, Yi_new, Pi_new, levels=[0.05], colors=['r'])\n\n ax.set_xlabel(xlabel_text,fontsize=14) #label Hc axis [SI units]\n ax.set_ylabel(ylabel_text,fontsize=14) #label Hu axis [SI units] \n\n # Set plot Xlimits\n xlimits = np.sort((xmin,xmax))\n ax.set_xlim(xlimits)\n \n #Set plot Ylimits\n ylimits = np.sort((ymin,ymax))\n ax.set_ylim(ylimits)\n \n #Set ticks and plot aspect ratio\n ax.tick_params(labelsize=14)\n ax.set_aspect('equal') #set 1:1 aspect ratio\n ax.minorticks_on() #add minor ticks\n \n #Add colorbar\n if colorbar == True: \n cbar = fig.colorbar(CS,fraction=0.04, pad=0.08,format='%.2e')\n cbar.ax.tick_params(labelsize=14)\n #cbar.ax.set_title(cbar_text,fontsize=14)\n cbar.set_label(cbar_text,fontsize=14)\n #cbar.ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n\n \n #Activate download to same folder as data file\n if download==True:\n outputfile = fn.value+'_FORC.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches=\"tight\")\n \n ar = np.column_stack((np.reshape(Xi_new,(-1,1)),np.reshape(Yi_new,(-1,1)),np.reshape(Zi_trunc,(-1,1)),np.reshape(SEi,(-1,1))))\n outputfile = fn.value+'_XYZ.csv'\n with open(outputfile, 'w') as fp:\n fp.write(xlabel_csv+','+ylabel_csv+','+se_csv+','+se_csv + '\\n')\n np.savetxt(fp, ar, '%s', ',')\n \n #show the final plot\n plt.show()\n\ndef FORCinel_colormap(Z):\n\n #setup initial colormap assuming that negative range does not require extension\n cdict = {'red': ((0.0, 127/255, 127/255),\n (0.1387, 255/255, 255/255),\n (0.1597, 255/255, 255/255),\n (0.1807, 255/255, 255/255),\n (0.3193, 102/255, 102/255),\n (0.563, 204/255, 204/255),\n (0.6975, 204/255, 204/255),\n (0.8319, 153/255, 153/255),\n (0.9748, 76/255, 76/255),\n (1.0, 76/255, 76/255)),\n\n 'green': ((0.0, 127/255, 127/255),\n (0.1387, 255/255, 255/255),\n (0.1597, 255/255, 255/255),\n (0.1807, 255/255, 255/255),\n (0.3193, 178/255, 178/255),\n (0.563, 204/255, 204/255),\n (0.6975, 76/255, 76/255),\n (0.8319, 102/255, 102/255),\n (0.9748, 25/255, 25/255),\n (1.0, 25/255, 25/255)),\n\n 'blue': ((0.0, 255/255, 255/255),\n (0.1387, 255/255, 255/255),\n (0.1597, 255/255, 255/255),\n (0.1807, 255/255, 255/255),\n (0.3193, 102/255, 102/255),\n (0.563, 76/255, 76/255),\n (0.6975, 76/255, 76/255),\n (0.8319, 153/255, 153/255),\n (0.9748, 76/255, 76/255),\n (1.0, 76/255, 76/255))}\n\n if np.abs(np.min(Z))<=np.max(Z)*0.19: #negative extension is not required\n #cmap = LinearSegmentedColormap('forc_cmap', cdict)\n vmin = -np.max(Z)*0.19\n vmax = np.max(Z)\n else: #negative extension is required\n vmin=np.min(Z)\n vmax=np.max(Z) \n \n anchors = np.zeros(10)\n anchors[1]=(-0.025*vmax-vmin)/(vmax-vmin)\n anchors[2]=(-0.005*vmax-vmin)/(vmax-vmin)\n anchors[3]=(0.025*vmax-vmin)/(vmax-vmin)\n anchors[4]=(0.19*vmax-vmin)/(vmax-vmin)\n anchors[5]=(0.48*vmax-vmin)/(vmax-vmin)\n anchors[6]=(0.64*vmax-vmin)/(vmax-vmin)\n anchors[7]=(0.80*vmax-vmin)/(vmax-vmin)\n anchors[8]=(0.97*vmax-vmin)/(vmax-vmin)\n anchors[9]=1.0\n\n Rlst = list(cdict['red'])\n Glst = list(cdict['green'])\n Blst = list(cdict['blue'])\n\n for i in range(9):\n Rlst[i] = tuple((anchors[i],Rlst[i][1],Rlst[i][2]))\n Glst[i] = tuple((anchors[i],Glst[i][1],Glst[i][2]))\n Blst[i] = tuple((anchors[i],Blst[i][1],Blst[i][2]))\n \n cdict['red'] = tuple(Rlst)\n cdict['green'] = tuple(Glst)\n cdict['blue'] = tuple(Blst)\n\n cmap = LinearSegmentedColormap('forc_cmap', cdict)\n\n return cmap, vmin, vmax\n\n #### Profile Plotting ####\n\n#### Profile plotting ####\n\ndef profile_options(X):\n Hb1 = X['Hb1']-X['Hc2']\n Hb2 = X['Hb2']\n Hc1 = np.maximum(X['Hc1'],0)\n Hc2 = X['Hc2']\n style = {'description_width': 'initial'} #general style settings\n \n HL = widgets.HTML(value='<hr style=\"height:3px;border:none;color:#333;background-color:#333;\" />')\n \n P_title = widgets.HTML(value='<h3>Select profile type:</h3>')\n P_widge = widgets.RadioButtons(options=[('Horizontal profile',0), ('Vertical profile',1)],\n value=0,\n style=style)\n \n H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')\n\n if X['unit'] == 'SI':\n x_Hb_widge = widgets.FloatSlider(\n value=0.0,\n min=Hb1,\n max=Hb2,\n step=0.001,\n description='B$_u$ [T]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n elif X['unit'] == 'cgs':\n x_Hb_widge = widgets.FloatSlider(\n value=0.0,\n min=Hb1,\n max=Hb2,\n step=10,\n description='H$_u$ [Oe]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n \n if X['unit'] == 'SI':\n x_Hc_widge = widgets.FloatRangeSlider(\n value=[Hc1,Hc2],\n min=Hc1,\n max=Hc2,\n step=0.001,\n description='B$_c$ [T]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n elif X['unit'] == 'cgs':\n x_Hc_widge = widgets.FloatRangeSlider(\n value=[Hc1,Hc2],\n min=Hc1,\n max=Hc2,\n step=10,\n description='H$_c$ [Oe]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n \n V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')\n \n if X['unit'] == 'SI':\n y_Hc_widge = widgets.FloatSlider(\n value=(Hc1+Hc2)/2.0,\n min=Hc1,\n max=Hc2,\n step=0.001,\n description='B$_c$ [T]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n elif X['unit'] == 'cgs':\n y_Hc_widge = widgets.FloatSlider(\n value=(Hc1+Hc2)/2.0,\n min=Hc1,\n max=Hc2,\n step=10,\n description='H$_c$ [T]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n\n if X['unit'] == 'SI':\n y_Hb_widge = widgets.FloatRangeSlider(\n value=[Hb1,Hb2],\n min=Hb1,\n max=Hb2,\n step=0.001,\n description='B$_u$ [T]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n\n elif X['unit'] == 'cgs':\n y_Hb_widge = widgets.FloatRangeSlider(\n value=[Hb1,Hb2],\n min=Hb1,\n max=Hb2,\n step=10,\n description='H$_u$ [T]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n \n profile_widge = VBox([P_title,P_widge,HL,H_title,x_Hb_widge,x_Hc_widge, \\\n HL,V_title,y_Hc_widge,y_Hb_widge])\n \n profile_nest = widgets.Tab()\n profile_nest.children = [profile_widge]\n profile_nest.set_title(0, 'PLOT PROFILES')\n display(profile_nest) \n \n X['P_widge'] = P_widge\n X['x_Hb_widge'] = x_Hb_widge\n X['x_Hc_widge'] = x_Hc_widge\n X['y_Hc_widge'] = y_Hc_widge\n X['y_Hb_widge'] = y_Hb_widge\n\n return X\n\ndef profile_plot(X):\n\n if X['P_widge'].value==0:\n X = x_profile(X,X['x_Hc_widge'].value,X['x_Hb_widge'].value)\n else:\n X = y_profile(X,X['y_Hc_widge'].value,X['y_Hb_widge'].value)\n \n return X\n\ndef x_profile(X,Hc,Hb):\n\n Hc1, Hc2 = Hc[0], Hc[1]\n\n dH = X['dH']\n NH = int(np.sqrt((Hc2-Hc1)**2)/dH)\n Hc0 = np.linspace(Hc1,Hc2,NH)\n Hb0 = np.linspace(Hb,Hb,NH)\n \n rho_int = X['Zint'](Hc0,Hb0)\n coef = sps.norm.ppf(0.025/np.sum(rho_int.mask==False))\n CI_int = X['SEint'](Hc0,Hb0)*coef\n\n fig = plt.figure(figsize=(5,5))\n ax1 = fig.add_subplot(1,1,1)\n \n if X['mass'].value>0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hc0,rho_int/(X['mass'].value/1000.0),color='k')\n ax1.fill_between(Hc0, (rho_int-CI_int)/(X['mass'].value/1000.0), (rho_int+CI_int)/(X['mass'].value/1000.0),color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$',fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hc0,rho_int/(X['mass'].value),color='k')\n ax1.fill_between(Hc0, (rho_int-CI_int)/(X['mass'].value), (rho_int+CI_int)/(X['mass'].value),color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$',fontsize=14)\n else:\n ax1.plot(Hc0,rho_int,color='k')\n ax1.fill_between(Hc0, (rho_int-CI_int), (rho_int+CI_int),color='lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$',fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$',fontsize=14)\n\n\n ax1.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='14')\n ax1.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k')\n \n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\mathrm{c}$ [T]',fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\mathrm{c}$ [Oe]',fontsize=14) \n \n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n\n\n outputfile = X['sample'].value+'_Hc_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches=\"tight\")\n plt.show\n \n return X\n\ndef y_profile(X,Hc,Hb):\n\n Hb1, Hb2 = Hb[0], Hb[1]\n\n dH = X['dH']\n NH = int(np.sqrt((Hb2-Hb1)**2)/dH)\n Hc0 = np.linspace(Hc,Hc,NH)\n Hb0 = np.linspace(Hb1,Hb2,NH)\n \n rho_int = X['Zint'](Hc0,Hb0)\n coef = sps.norm.ppf(0.025/np.sum(rho_int.mask==False))\n CI_int = X['SEint'](Hc0,Hb0)*coef\n\n fig = plt.figure(figsize=(5,5))\n ax1 = fig.add_subplot(1,1,1)\n\n if X['mass'].value>0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hb0,rho_int/(X['mass'].value/1000.0),color='k')\n ax1.fill_between(Hb0, (rho_int-CI_int)/(X['mass'].value/1000.0), (rho_int+CI_int)/(X['mass'].value/1000.0),color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$',fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hb0,rho_int/(X['mass'].value),color='k')\n ax1.fill_between(Hb0, (rho_int-CI_int)/(X['mass'].value), (rho_int+CI_int)/(X['mass'].value),color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$',fontsize=14)\n else:\n ax1.plot(Hb0,rho_int,color='k')\n ax1.fill_between(Hb0, (rho_int-CI_int), (rho_int+CI_int),color='lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$',fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$',fontsize=14) \n \n ax1.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='14')\n ax1.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k')\n\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\mathrm{u}$ [T]',fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\mathrm{u}$ [Oe]',fontsize=14) \n \n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n\n\n outputfile = X['sample'].value+'_Hu_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches=\"tight\")\n plt.show\n \n return X",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Vocabulary(db.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Vocabulary(db.Model):
_id = db.Column(db.Integer, primary_key=True)
language = db.Column(db.String(64), index=True)
word = db.Column(db.String(64), index=True, unique=True)
date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
<|reserved_special_token_1|>
from datetime import datetime
from app import db
class Vocabulary(db.Model):
_id = db.Column(db.Integer, primary_key=True)
language = db.Column(db.String(64), index=True)
word = db.Column(db.String(64), index=True, unique=True)
date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
|
flexible
|
{
"blob_id": "834469f9c6e065fb29dfe1fd3e421fbb752f5094",
"index": 7708,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Vocabulary(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Vocabulary(db.Model):\n _id = db.Column(db.Integer, primary_key=True)\n language = db.Column(db.String(64), index=True)\n word = db.Column(db.String(64), index=True, unique=True)\n date = db.Column(db.DateTime, index=True, default=datetime.utcnow)\n",
"step-4": "from datetime import datetime\nfrom app import db\n\n\nclass Vocabulary(db.Model):\n _id = db.Column(db.Integer, primary_key=True)\n language = db.Column(db.String(64), index=True)\n word = db.Column(db.String(64), index=True, unique=True)\n date = db.Column(db.DateTime, index=True, default=datetime.utcnow)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('modi.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
#Write the for loop code here
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "759ff4cc123e85bdc8c1457bb521cd35841956cd",
"index": 482,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.imshow('img', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\nimg = cv2.imread('modi.jpg')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\ncv2.imshow('img', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-4": "import numpy as np\nimport cv2\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\nimg = cv2.imread('modi.jpg')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\ncv2.imshow('img', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-5": "import numpy as np\nimport cv2\n\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\n\nimg = cv2.imread('modi.jpg')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\n#Write the for loop code here\n\ncv2.imshow('img',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy.random as rnd
import numpy as np
B=100000
N1=50
N2=50
p1mle=0.3
p2mle=0.4
taumle=p2mle-p1mle
estimate=[]
for i in range(B):
p1=0.0
for j in range(N1):
if(rnd.uniform(0,1)<p1mle):
p1+=1
p1/=N1
p2=0.0
for j in range(N2):
if(rnd.uniform(0,1)<p2mle):
p2+=1
p2/=N2
estimate.append(p2-p1)
t=-10
estimate=np.array(estimate)
allt=[0.01*t for t in xrange(-5000,5000)]
target=0.95
tol=0.01
for t in allt:
cur=np.mean(np.sqrt(N1+N2)*(estimate-taumle)<t)
if(np.abs(target-cur)<tol):
print(t)
print(cur)
break
|
normal
|
{
"blob_id": "0db0daf9bea254cffaec1280cd13b2d70368cd94",
"index": 289,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(B):\n p1 = 0.0\n for j in range(N1):\n if rnd.uniform(0, 1) < p1mle:\n p1 += 1\n p1 /= N1\n p2 = 0.0\n for j in range(N2):\n if rnd.uniform(0, 1) < p2mle:\n p2 += 1\n p2 /= N2\n estimate.append(p2 - p1)\n<mask token>\nfor t in allt:\n cur = np.mean(np.sqrt(N1 + N2) * (estimate - taumle) < t)\n if np.abs(target - cur) < tol:\n print(t)\n print(cur)\n break\n",
"step-3": "<mask token>\nB = 100000\nN1 = 50\nN2 = 50\np1mle = 0.3\np2mle = 0.4\ntaumle = p2mle - p1mle\nestimate = []\nfor i in range(B):\n p1 = 0.0\n for j in range(N1):\n if rnd.uniform(0, 1) < p1mle:\n p1 += 1\n p1 /= N1\n p2 = 0.0\n for j in range(N2):\n if rnd.uniform(0, 1) < p2mle:\n p2 += 1\n p2 /= N2\n estimate.append(p2 - p1)\nt = -10\nestimate = np.array(estimate)\nallt = [(0.01 * t) for t in xrange(-5000, 5000)]\ntarget = 0.95\ntol = 0.01\nfor t in allt:\n cur = np.mean(np.sqrt(N1 + N2) * (estimate - taumle) < t)\n if np.abs(target - cur) < tol:\n print(t)\n print(cur)\n break\n",
"step-4": "import numpy.random as rnd\nimport numpy as np\nB = 100000\nN1 = 50\nN2 = 50\np1mle = 0.3\np2mle = 0.4\ntaumle = p2mle - p1mle\nestimate = []\nfor i in range(B):\n p1 = 0.0\n for j in range(N1):\n if rnd.uniform(0, 1) < p1mle:\n p1 += 1\n p1 /= N1\n p2 = 0.0\n for j in range(N2):\n if rnd.uniform(0, 1) < p2mle:\n p2 += 1\n p2 /= N2\n estimate.append(p2 - p1)\nt = -10\nestimate = np.array(estimate)\nallt = [(0.01 * t) for t in xrange(-5000, 5000)]\ntarget = 0.95\ntol = 0.01\nfor t in allt:\n cur = np.mean(np.sqrt(N1 + N2) * (estimate - taumle) < t)\n if np.abs(target - cur) < tol:\n print(t)\n print(cur)\n break\n",
"step-5": "import numpy.random as rnd\nimport numpy as np\n\nB=100000\nN1=50\nN2=50\n\np1mle=0.3\t\np2mle=0.4\ntaumle=p2mle-p1mle\n\nestimate=[]\n\nfor i in range(B):\n\n\tp1=0.0\n\tfor j in range(N1):\n\t\tif(rnd.uniform(0,1)<p1mle):\n\t\t\tp1+=1\n\n\tp1/=N1\n\n\tp2=0.0\n\tfor j in range(N2):\n\t\tif(rnd.uniform(0,1)<p2mle):\n\t\t\tp2+=1\n\n\tp2/=N2\n\n\testimate.append(p2-p1)\n\nt=-10\n\nestimate=np.array(estimate)\n\nallt=[0.01*t for t in xrange(-5000,5000)]\n\ntarget=0.95\ntol=0.01\n\nfor t in allt:\n\tcur=np.mean(np.sqrt(N1+N2)*(estimate-taumle)<t)\n\tif(np.abs(target-cur)<tol):\n\t\tprint(t)\n\t\tprint(cur)\n\t\tbreak",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""This program displays a customizable list of items by priority value,
with priority 1 being the highest. Allows the user to add, edit,
mark complete, show completed (hidden), and remove items. Stores the list of
items in a .txt file located where this program's main.py file is. All
changes are automatically saved to the .txt file. Also includes a fun
technical knowledge demonstration using numbers and text responses. The
program will create a new save file if none exists, and prompts for save
file overwrite if data cannot be read successfully. Menu navigation is
accomplished through numeric inputs due to the text-only interface and
tedium of typing out each word accurately and repeatedly."""
__author__ = 'Jordan Kooyman'
# 1/26/21 - 4/15/2021 To-Do List Program - Integration Project for COP 1500
# Spring 2021
# Configurable settings saved to a separate file (?)
# Ability to load a different data or config file (?)
# Color code items by group (?)
# Add a basic calculator to meet math (and string?) command requirements (?)
# TODO: Implement a group system that shows all groups combined, just one
# group, or all categorized by group, and group names - be able to change
# group names (new function) - all functions support groups (individual or
# combined)
import random
# Random number generation used as random verification number when
# overwriting the save file in the event of a failure to load from the save
# file
class ListItem: # Create a class object that will store the data for each
# entry in the list (custom variable)
"""A custom object that stores four pieces of data representing each
entry in the todo list. Contains the text of the todo list entry,
the priority of the entry, the group code (NYI), and the visibility of
the entry"""
def __init__(self, text, priority, group, visible): # From w3schools.com
self.text = text
self.priority = priority
self.group = group
self.visible = visible
def concept_demonstration():
"""The purpose of this function is to prompt the user for numbers and
strings and manipulate them to demonstrate programming fluency with
string and integer operations.
:returns nothing"""
number = clean_input("Please enter a positive number")
number2 = clean_input("Please enter a number")
while number2 == 0: # Rejects a 0 if it is input as the second number
print("Error: Cannot Divide by 0")
number2 = clean_input("Please enter a different number")
color = input("Please enter a color\n")
thing = input("Please enter a thing\n")
thing2 = thing + ' ' # Adding space so that when thing is repeated, it
# has a space in between
# Raise the first number to the second number
location = input("Please enter a location\n")
print(str(number) + " raised to the power of " + str(number2) + " is " +
str(number ** number2))
# Multiply the two numbers
print("{0} multiplied by {1} is {2}".format(str(number), str(number2),
str(number * number2)))
# Divide the first number by the second number
print("{0} divided by {1} is {2}".format(str(number), str(number2),
str(number / number2)))
# Find the modulus of the two numbers
print("The remainder from dividing {0} by {1} is {2}".format(str(number),
str(number2),
str(number %
number2))
)
# Divide the first number by the second and round it down (floor it)
print("{0} divided by {1} rounded down is {2}".format(str(number),
str(number2),
str(number // number2
)))
# Add the two numbers
print("{0} plus {1} is {2}".format(str(number), str(number2),
str(number + number2)))
# Subtract the second number from the first number
print("{0} minus {1} is {2}".format(str(number), str(number2),
str(number - number2)))
if number > 1: # if the first number entered is greater than 1
print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing,
location, thing2 *
int(number - 1) + thing))
# Combine two strings with + (no added space), repeat a string x
# number of times with * (must use an integer) (I have the minus 1
# and + thing to get the spacing to look proper and still repeat
# number amount of times) -if a negative number is used when
# multiplying a string, it does nothing (but does not crash) - but
# it is still handled in the other statement with some added user
# shaming
elif number < 0: # if the first number entered is negative
print("The {0} at {1} yelled '{2}'\nYou entered a negative number "
"when a positive number was requested, so you made the {3} "
"mute. Good Job.".format(color + ' ' + thing, location, thing2 *
int(number), thing))
# Same as above, expect that it will print nothing in the yelled
# section if the first number entered is negative
else: # if the first number entered is 0 or 1 (because of the int()
# function removing a decimal)
print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing,
location, thing *
int(number)))
# this is to prevent errant spaces or showing the phrase too many times
return
def cascade_list(priority_to_cascade_from, todo_list):
"""The purpose of this function is to decrement the priority number of
every item in the provided todo list greater than the priority number
provided.
:param priority_to_cascade_from: the number that is inserted by moving
everything equal to or greater than up by one
:param todo_list: the list of ListItem objects to check in"""
for item in todo_list:
if item.priority >= priority_to_cascade_from:
item.priority += 1
return
def check_priority_overlap(priority_to_check, todo_list):
"""The purpose of this function is to check if the user's priority
number input overlaps with a priority number already in the list,
and if it does, prompts the user whether they want to keep it, change
it, or move everything in the list that has a larger priority value up
by one.
:param priority_to_check: the number to check for overlap with
:param todo_list: the list of ListItem objects to check in
:returns the priority value, either changed or the original input"""
overlap = False
for item in todo_list:
if item.priority == priority_to_check:
overlap = True
if overlap:
answer = 0
while answer > 3 or answer < 1:
answer = clean_input("The priority number you entered overlaps "
"with another entry's priority. Enter:\n1 to "
"change priority number\n2 to leave as is "
"with overlap\n3 to push all priority numbers"
" below this entry down by 1")
if answer > 3 or answer < 1:
print("Invalid Option Selected\nPlease Try Again")
if answer == 1:
priority_to_check = check_priority_overlap(
int(clean_input("New Priority:")), todo_list)
# change the priority value input
elif answer == 3:
cascade_list(priority_to_check, todo_list)
return priority_to_check
def sorting(list_object): # Takes in a ListItem object and returns the
# priority value - from w3schools.com
"""The purpose of this function is to take in a ListItem custom object
and return the priority value stored in it to be used in sorting.
:param list_object: one ListItem object
:returns the priority value stored in the ListItem object"""
return list_object.priority
def print_list(save_file_location, my_list, to_save=False, show_hidden=False):
# Prints out the To-Do list from the common list variable and saves list
# to the .txt file
"""The purpose of this function is to take in the location of the save
file, the todo list variable, whether or not to save, and whether or not
to show hidden and print out the todo list variable, skipping items
marked as hidden unless it is told to show hidden, and saving the todo
list to the file in the save file location if it is told to save.
:param save_file_location: the file path to get to the .txt save file
:param my_list: the list of ListItem objects to check in
:param to_save: whether or not to save the list of items to the file,
default
is false
:param show_hidden: whether or not to display the hidden list items,
default
it false
:returns nothing"""
my_list.sort(key=sorting) # Uses a custom function to be able to get the
# right value to sort by
print("To-Do:")
for item_index in my_list: # The range needs to be the length of the list
# being printed
if item_index.visible and not show_hidden: # Only print visible items
# if show hidden is false
print(item_index.priority, item_index.text, sep='.\t')
elif show_hidden: # Print everything is show hidden is trues
if item_index.visible:
print(item_index.priority, item_index.text, sep='.\t')
else:
print("{0}.~\t{1}".format(item_index.priority, item_index.text)
)
# Indicate hidden items
# Printing the item priority with a dot, then the item, with a tab
# separating them
if to_save:
save_list(my_list, save_file_location)
return
def divider(size=100): # Draws a dividing line to go between sections
# (default 100 characters long)
"""The purpose of this function is to print a dashed line across the
screen with a specified length.
:param size: how many characters long the line should be, default is 100
:returns nothing"""
for i in range(size):
print('-', end='') # Prints out a single dash, no newline afterwards
# (the end= sets the last character to blank
print('') # Print out a newline (using the default ending of a print
# statement being a newline
return
def clean_input(prompt='Error'): # A special input function that will reject a
# user's input of text when a number is requested -- if no prompt is
# specified in the program, it will display "Error"
"""The purpose of this function is to prompt the user for a numerical
input and only accept a numerical input, rejects no input and text input.
:param prompt: the prompt the user sees, default is Error
:returns the user input as a float"""
text = True
phrase = '0'
while text:
phrase = input(prompt + '\n')
try: # Adapted from an example in the ThinkPython textbook (15.7) -
# Checks whether the input is a number, positive or negative. If
# not, rejects the input and user gets to try again
float(phrase)
text = False
except ValueError:
print("Error: Non-Numeric Entry Detected")
# if phrase.isnumeric(): # Checks for a positive number (negative
# rejected as well as text) - replaced with superior form from textbook
# example
# return float(phrase) # Return the number the user entered
# else:
# print("Error: Non-Numeric Entry Detected")
return float(phrase) # Return the number the user entered
def load_from_file(save_location): # This is a function for readability -
# opens txt file in read mode and loads it
"""The purpose of this function is to open the .txt save file and read
the contents into memory in the form of a list of custom ListItem
objects.
:param save_location: the location the save file is stored in
:returns a list of ListItem objects that is populated with the data from
the save file"""
# into an array (list) of ListItem variables
data_file_r = open(save_location, "r") # Open txt file in read mode
list_item = ["Text", -1, 2, True] # Item, Item Priority, group, is visible
todo = [] # make a list of lists
temp = 1 # Temporary counter variable to reconstruct lists from .txt file
line_counter = 1
try:
for item in data_file_r: # loop through each line in the file, one at
# a time - from w3schools.com
if (line_counter - 1) % 5 != 0 and line_counter > 0:
cleaned_item = ""
for character_index in range(len(
item)): # Loop through each character in the extracted
# string
if character_index != len(
item) - 1: # if it is not the last character, add
# it to the cleaned string
cleaned_item += item[character_index]
# Add every character to a
# but \n
if temp == 1: # Item Text
list_item[0] = cleaned_item
temp = 2
elif temp == 2: # Item Priority
list_item[1] = int(cleaned_item)
temp = 3
elif temp == 3: # Item Group
list_item[2] = int(cleaned_item)
temp = 4
elif temp == 4: # Is Visible
if cleaned_item == "False":
list_item[3] = False
else: # Assume the item is visible if the text is not
# False
list_item[3] = True
todo.insert(0, ListItem(list_item[0], list_item[1],
list_item[2], list_item[3]))
temp = 1
else: # If some error occurred and a condition outside of the
# possible four is met, restart
temp = 1
line_counter += 1
except ValueError:
print("An error has occurred trying to load the file")
result = int(clean_input(
"Please enter a 2 to overwrite the current save file and start "
"over or any other number to exit the program"))
if result == 2:
key = random.randint(2, 9) # Generate a random integer between 2
# and 9 to be used as a second dynamic check
if key == 2:
key = 1 # If the random number is 2, set it to one so that
# the same number (2) cannot be used as the verification number
result2 = int(clean_input("Are you sure you want to delete all "
"of your saved data\nEnter {0} to "
"proceed, or anything else to "
"cancel".format(str(key))))
if result2 == key:
data_file_w = open("C:Item_List.txt", "w")
data_file_w.close()
todo = []
print("Save Data Erased")
return todo # Return an empty list if file load failed
else:
print("Program Exiting")
quit(1)
else:
print("Program Exiting")
quit(1) # Exit the program with the exit code of 1
data_file_r.close()
# All the list functions above referenced from w3schools.com What is
# happening above: Opening the file, initializing a list to hold all
# four pieces of data, then after pulling the data from the file and
# storing in the list, it is copied (not referenced) into my main list
# of ListItem objects
return todo
def save_list(todo_list, save_location):
"""The purpose of this function is to save a list of ListItem objects to a
specified location in a .txt file with the first line of the document
being an explanation of the file format being used.
:param todo_list: the list of ListItem objects to save to the save file
:param save_location: the location to create or overwrite the save file
:returns nothing"""
data_file_w = open(save_location,
"w") # open the save file and clear the data from it
data_file_w.write("Warning: The Todo-List Program will not be able to "
"load this save file if it is incorrectly modified. "
"Modify at your own risk. The structure is Entry "
"Text, Entry Priority as a number, Entry Group as a "
"number (Not Yet Utilized, but necessary), and Entry "
"Visibility as a boolean, each on a separate line, a "
"single line gap in between, and the "
"very first line is skipped\n")
for item in todo_list:
data_file_w.write("{0}\n{1}\n{2}\n{3}\n\n".format(item.text,
str(item.priority),
str(item.group),
str(item.visible)))
data_file_w.close()
return
def add_item(todo_list):
"""The purpose of this function is to prompt the user for the two
fields of necessary information to make a new entry in the todo list,
the item name and priority, checking if the priority overlaps with an
existing entry in the todo list.
:param todo_list: the list of ListItem objects to add a new ListItem
object to
:returns nothing"""
text = input("Please enter the name of the new item\n")
priority = check_priority_overlap(
int(clean_input("Please enter the priority of this item")), todo_list)
# group = int(clean_input("Please enter the group number of this item"))
group = 0 # Set the group value to zero, group system NYI
visible = True
todo_list.insert(0, ListItem(text, priority, group, visible)) # Join
# the inputs to be added to the overall list
return
def select_item(todo_list, prompt='Error'): # Ask the user
# which item from the list is to be modified
"""The purpose of this function is to display a list of all items in the
todo list and number each individually to allow the user to select an
item to modify or delete. The available numbers may
skip some if some items are hidden
:param todo_list: the list of ListItem objects to display
:param prompt: the prompt to display to the user, default is Error
:returns the user selected item's index in a computer friendly form (
starting at 0 instead of 1)"""
valid = False
index = 0
while not valid:
counter = 1 # counter for index printing
for item in todo_list: # The range needs to be the length of the list
# being printed
if item.visible:
print(counter, item.text, sep='\t')
else:
print(counter, "~ {0} ~".format(item.text), sep='\t')
counter += 1
# Printing the item number, then the item, with a tab separating
# them
index = int(clean_input(prompt))
if index < counter:
valid = True
else:
print("Invalid Input: Number is too big")
return index - 1
def remove_item(todo_list):
"""The purpose of this function is to delete a ListItem object from a
list of ListItem objects by prompting the user for the index and
verifying they want to delete the item.
:param todo_list: the list of ListItem objects from which to remove
one object
:returns nothing"""
item = select_item(todo_list, "Please enter the item number you wish to "
"remove\nEnter a negative number or zero "
"to cancel")
if item >= 0: # 0, not 1 because the index returned is shifted to be
# computer friendly
todo_list.pop(item)
return
def mark_complete(todo_list):
"""The purpose of this function is to mark a selectedListItem object as
hidden and not to be printed unless specified, apart from selecting items.
:param todo_list: the list of ListItem objects to modify
:returns nothing"""
item = select_item(todo_list, "Please enter the item number you wish to "
"Mark Completed and hide from the "
"list\nEnter a negative number or zero to "
"cancel")
if item >= 0:
todo_list[item].visible = False
return
def edit_item(todo_list):
"""The purpose of this function is to edit a ListItem object in the
list of ListItem objects, changing either the name or priority
:param todo_list: the list of ListItem objects that gets one object
modified
:returns nothing"""
item = select_item(todo_list, "Please enter the item number you wish to "
"edit\nEnter a negative number or zero to "
"cancel")
if item >= 0:
while True:
value = clean_input("Which value would you like to edit? Enter:\n1"
" for the Item Text (Currently: {0})\n2 for "
"the Item Priority (Currently: {1})\n3 to "
"Cancel and Exit".format(todo_list[item].text,
str(todo_list[item].
priority)))
if value == 1: # Item Text Change
print("The Current Text is: {0}".format(todo_list[item].text))
todo_list[item].text = input("New Text:\n")
elif value == 2: # Item Priority Change
print("The Current Priority is: {0}".format(str(todo_list[item]
.priority)))
todo_list[item].priority = check_priority_overlap(
int(clean_input("New Priority:")), todo_list)
# elif value == 3: # Item Group Change
# print(f"The Current Group is: {todo_list[item].group}")
# todo_list[item].group = int(clean_input("New Group Number:"))
elif value == 3: # Exit Changing Menu
break
else:
print("Invalid Input - Please Try Again")
return
def check_list_status(todo_list): # Checks if the list is completely hidden
# (2), completely empty (1), or neither (0)
"""The purpose of this function is to check whether there are visible
items in the list, the entire list is hidden, or the list contains no
more ListItem objects
:param todo_list: the list of ListItem objects to check
:returns which condition using integer codes"""
if len(todo_list) == 0:
state = 1 # Empty List
else:
state = 2 # Entirely Hidden List
for item_index in range(len(todo_list)):
if todo_list[item_index].visible: # If an item is visible, then
# they are not all hidden
state = 0 # Neither
return state
def menu_loop(todo_list, save_file_location):
"""The purpose of this function is to repeatedly display the todo list
and user prompts menu until the program is closed
:param todo_list: the list of ListItem objects to display or modify
:param save_file_location: where the .txt save file is located for saving
:returns nothing"""
show_hidden = False
selection = 0
invalid_input = False
while selection != 6:
if invalid_input:
invalid_input = False
else:
print_list(save_file_location, todo_list, True, show_hidden)
divider(137 + 17) # Length of prompt statement below
list_status = check_list_status(todo_list)
if list_status == 0: # No Issues
selection = int(clean_input("Please enter: 1 for Add Item, 2 for "
"Remove Item, 3 for Edit Item, "
"4 for Mark Item Complete, "
"5 for Toggle Hidden, and 6 for "
"Exit, 7 for Concept "
"Demonstration\n"))
elif list_status == 1: # Empty List - No Remove, Edit, Mark, or Toggle
selection = int(clean_input("Please enter: 1 for Add Item, and 6 "
"for Exit, 7 for Concept "
"Demonstration\n"))
else: # Entirely Hidden List
selection = int(clean_input("Please enter: 1 for Add Item, 5 for "
"Toggle Hidden, and 6 for Exit, "
"7 for Concept Demonstration\n"))
# Uses the clean_input function above to get a number from the
# user, converting it to an int so a decimal won't return an
# invalid input in the following steps
print("") # Blank Print statement to add an extra blank line after
# user input before displaying response
if selection == 1: # Add Item - modify the list variable, then save
# to file
add_item(todo_list)
elif selection == 2: # Remove Item - modify the list variable, then
# save to file
if list_status == 0:
remove_item(todo_list)
elif list_status == 2:
print("Invalid Command: The Todo List has no visible items "
"to remove")
else:
print("Invalid Command: The Todo List has no items to remove")
elif selection == 3: # Edit Item - modify the list variable, then save
# to file
if list_status == 0:
edit_item(todo_list)
elif list_status == 2:
print("Invalid Command: The Todo List has no visible items "
"to edit")
else:
print("Invalid Command: The Todo List has no items to edit")
elif selection == 4: # Mark Item Complete - modify the list variable,
# then save to file
if list_status == 0:
mark_complete(todo_list)
elif list_status == 2:
print("Invalid Command: The Todo List has no visible items "
"to mark complete")
else:
print("Invalid Command: The Todo List has no items to mark "
"complete")
elif selection == 5: # Show Hidden - modify the list variable, then
# save to file
if list_status == 0 or list_status == 2:
if show_hidden:
print("No longer showing hidden items")
show_hidden = False
else:
print("Now showing hidden items")
show_hidden = True
else:
print("Invalid Command: The Todo List has no items to show or "
"hide")
elif selection == 6: # Exit Program
print("Now Closing")
elif selection == 7: # Extra section to demonstrate proficiency with
# topics covered in class - Sprint 1
concept_demonstration()
else:
invalid_input = True
print("Invalid Input\nPlease Try Again")
def main():
"""The purpose of this function is to ensure the save file exists at the
specified save file location, load the save file into memory, display a
welcome message with a divider, then start the menu loop until the
program is closed
:returns nothing"""
save_file_location = "Item_List.txt"
data_file_a = open(save_file_location, "a") # Opens ItemList.txt which
# is accessible in the file variable, in append mode (using this so that
# if the file exists, nothing happens, but if it does not exist, it gets
# created from w3schools.com
data_file_a.close() # Close the file, I now know it exists
loaded_list = load_from_file(save_file_location)
print("Welcome to the To-Do List - Version: 0.1.2")
divider(42) # Length of welcome statement above
menu_loop(loaded_list, save_file_location)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "168a12e6653a0526f29c163913def50147481154",
"index": 632,
"step-1": "<mask token>\n\n\nclass ListItem:\n \"\"\"A custom object that stores four pieces of data representing each\n entry in the todo list. Contains the text of the todo list entry,\n the priority of the entry, the group code (NYI), and the visibility of\n the entry\"\"\"\n\n def __init__(self, text, priority, group, visible):\n self.text = text\n self.priority = priority\n self.group = group\n self.visible = visible\n\n\n<mask token>\n\n\ndef check_priority_overlap(priority_to_check, todo_list):\n \"\"\"The purpose of this function is to check if the user's priority\n number input overlaps with a priority number already in the list,\n and if it does, prompts the user whether they want to keep it, change\n it, or move everything in the list that has a larger priority value up\n by one.\n :param priority_to_check: the number to check for overlap with\n :param todo_list: the list of ListItem objects to check in\n :returns the priority value, either changed or the original input\"\"\"\n overlap = False\n for item in todo_list:\n if item.priority == priority_to_check:\n overlap = True\n if overlap:\n answer = 0\n while answer > 3 or answer < 1:\n answer = clean_input(\n \"\"\"The priority number you entered overlaps with another entry's priority. Enter:\n1 to change priority number\n2 to leave as is with overlap\n3 to push all priority numbers below this entry down by 1\"\"\"\n )\n if answer > 3 or answer < 1:\n print('Invalid Option Selected\\nPlease Try Again')\n if answer == 1:\n priority_to_check = check_priority_overlap(int(clean_input(\n 'New Priority:')), todo_list)\n elif answer == 3:\n cascade_list(priority_to_check, todo_list)\n return priority_to_check\n\n\n<mask token>\n\n\ndef clean_input(prompt='Error'):\n \"\"\"The purpose of this function is to prompt the user for a numerical\n input and only accept a numerical input, rejects no input and text input.\n :param prompt: the prompt the user sees, default is Error\n :returns the user input as a float\"\"\"\n text = True\n phrase = '0'\n while text:\n phrase = input(prompt + '\\n')\n try:\n float(phrase)\n text = False\n except ValueError:\n print('Error: Non-Numeric Entry Detected')\n return float(phrase)\n\n\n<mask token>\n\n\ndef add_item(todo_list):\n \"\"\"The purpose of this function is to prompt the user for the two\n fields of necessary information to make a new entry in the todo list,\n the item name and priority, checking if the priority overlaps with an\n existing entry in the todo list.\n :param todo_list: the list of ListItem objects to add a new ListItem\n object to\n :returns nothing\"\"\"\n text = input('Please enter the name of the new item\\n')\n priority = check_priority_overlap(int(clean_input(\n 'Please enter the priority of this item')), todo_list)\n group = 0\n visible = True\n todo_list.insert(0, ListItem(text, priority, group, visible))\n return\n\n\n<mask token>\n\n\ndef mark_complete(todo_list):\n \"\"\"The purpose of this function is to mark a selectedListItem object as\n hidden and not to be printed unless specified, apart from selecting items.\n :param todo_list: the list of ListItem objects to modify\n :returns nothing\"\"\"\n item = select_item(todo_list,\n \"\"\"Please enter the item number you wish to Mark Completed and hide from the list\nEnter a negative number or zero to cancel\"\"\"\n )\n if item >= 0:\n todo_list[item].visible = False\n return\n\n\n<mask token>\n\n\ndef check_list_status(todo_list):\n \"\"\"The purpose of this function is to check whether there are visible\n items in the list, the entire list is hidden, or the list contains no\n more ListItem objects\n :param todo_list: the list of ListItem objects to check\n :returns which condition using integer codes\"\"\"\n if len(todo_list) == 0:\n state = 1\n else:\n state = 2\n for item_index in range(len(todo_list)):\n if todo_list[item_index].visible:\n state = 0\n return state\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ListItem:\n \"\"\"A custom object that stores four pieces of data representing each\n entry in the todo list. Contains the text of the todo list entry,\n the priority of the entry, the group code (NYI), and the visibility of\n the entry\"\"\"\n\n def __init__(self, text, priority, group, visible):\n self.text = text\n self.priority = priority\n self.group = group\n self.visible = visible\n\n\n<mask token>\n\n\ndef check_priority_overlap(priority_to_check, todo_list):\n \"\"\"The purpose of this function is to check if the user's priority\n number input overlaps with a priority number already in the list,\n and if it does, prompts the user whether they want to keep it, change\n it, or move everything in the list that has a larger priority value up\n by one.\n :param priority_to_check: the number to check for overlap with\n :param todo_list: the list of ListItem objects to check in\n :returns the priority value, either changed or the original input\"\"\"\n overlap = False\n for item in todo_list:\n if item.priority == priority_to_check:\n overlap = True\n if overlap:\n answer = 0\n while answer > 3 or answer < 1:\n answer = clean_input(\n \"\"\"The priority number you entered overlaps with another entry's priority. Enter:\n1 to change priority number\n2 to leave as is with overlap\n3 to push all priority numbers below this entry down by 1\"\"\"\n )\n if answer > 3 or answer < 1:\n print('Invalid Option Selected\\nPlease Try Again')\n if answer == 1:\n priority_to_check = check_priority_overlap(int(clean_input(\n 'New Priority:')), todo_list)\n elif answer == 3:\n cascade_list(priority_to_check, todo_list)\n return priority_to_check\n\n\n<mask token>\n\n\ndef clean_input(prompt='Error'):\n \"\"\"The purpose of this function is to prompt the user for a numerical\n input and only accept a numerical input, rejects no input and text input.\n :param prompt: the prompt the user sees, default is Error\n :returns the user input as a float\"\"\"\n text = True\n phrase = '0'\n while text:\n phrase = input(prompt + '\\n')\n try:\n float(phrase)\n text = False\n except ValueError:\n print('Error: Non-Numeric Entry Detected')\n return float(phrase)\n\n\n<mask token>\n\n\ndef save_list(todo_list, save_location):\n \"\"\"The purpose of this function is to save a list of ListItem objects to a\n specified location in a .txt file with the first line of the document\n being an explanation of the file format being used.\n :param todo_list: the list of ListItem objects to save to the save file\n :param save_location: the location to create or overwrite the save file\n :returns nothing\"\"\"\n data_file_w = open(save_location, 'w')\n data_file_w.write(\n \"\"\"Warning: The Todo-List Program will not be able to load this save file if it is incorrectly modified. Modify at your own risk. The structure is Entry Text, Entry Priority as a number, Entry Group as a number (Not Yet Utilized, but necessary), and Entry Visibility as a boolean, each on a separate line, a single line gap in between, and the very first line is skipped\n\"\"\"\n )\n for item in todo_list:\n data_file_w.write('{0}\\n{1}\\n{2}\\n{3}\\n\\n'.format(item.text, str(\n item.priority), str(item.group), str(item.visible)))\n data_file_w.close()\n return\n\n\ndef add_item(todo_list):\n \"\"\"The purpose of this function is to prompt the user for the two\n fields of necessary information to make a new entry in the todo list,\n the item name and priority, checking if the priority overlaps with an\n existing entry in the todo list.\n :param todo_list: the list of ListItem objects to add a new ListItem\n object to\n :returns nothing\"\"\"\n text = input('Please enter the name of the new item\\n')\n priority = check_priority_overlap(int(clean_input(\n 'Please enter the priority of this item')), todo_list)\n group = 0\n visible = True\n todo_list.insert(0, ListItem(text, priority, group, visible))\n return\n\n\n<mask token>\n\n\ndef remove_item(todo_list):\n \"\"\"The purpose of this function is to delete a ListItem object from a\n list of ListItem objects by prompting the user for the index and\n verifying they want to delete the item.\n :param todo_list: the list of ListItem objects from which to remove\n one object\n :returns nothing\"\"\"\n item = select_item(todo_list,\n \"\"\"Please enter the item number you wish to remove\nEnter a negative number or zero to cancel\"\"\"\n )\n if item >= 0:\n todo_list.pop(item)\n return\n\n\ndef mark_complete(todo_list):\n \"\"\"The purpose of this function is to mark a selectedListItem object as\n hidden and not to be printed unless specified, apart from selecting items.\n :param todo_list: the list of ListItem objects to modify\n :returns nothing\"\"\"\n item = select_item(todo_list,\n \"\"\"Please enter the item number you wish to Mark Completed and hide from the list\nEnter a negative number or zero to cancel\"\"\"\n )\n if item >= 0:\n todo_list[item].visible = False\n return\n\n\ndef edit_item(todo_list):\n \"\"\"The purpose of this function is to edit a ListItem object in the\n list of ListItem objects, changing either the name or priority\n :param todo_list: the list of ListItem objects that gets one object\n modified\n :returns nothing\"\"\"\n item = select_item(todo_list,\n \"\"\"Please enter the item number you wish to edit\nEnter a negative number or zero to cancel\"\"\"\n )\n if item >= 0:\n while True:\n value = clean_input(\n \"\"\"Which value would you like to edit? Enter:\n1 for the Item Text (Currently: {0})\n2 for the Item Priority (Currently: {1})\n3 to Cancel and Exit\"\"\"\n .format(todo_list[item].text, str(todo_list[item].priority)))\n if value == 1:\n print('The Current Text is: {0}'.format(todo_list[item].text))\n todo_list[item].text = input('New Text:\\n')\n elif value == 2:\n print('The Current Priority is: {0}'.format(str(todo_list[\n item].priority)))\n todo_list[item].priority = check_priority_overlap(int(\n clean_input('New Priority:')), todo_list)\n elif value == 3:\n break\n else:\n print('Invalid Input - Please Try Again')\n return\n\n\ndef check_list_status(todo_list):\n \"\"\"The purpose of this function is to check whether there are visible\n items in the list, the entire list is hidden, or the list contains no\n more ListItem objects\n :param todo_list: the list of ListItem objects to check\n :returns which condition using integer codes\"\"\"\n if len(todo_list) == 0:\n state = 1\n else:\n state = 2\n for item_index in range(len(todo_list)):\n if todo_list[item_index].visible:\n state = 0\n return state\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ListItem:\n \"\"\"A custom object that stores four pieces of data representing each\n entry in the todo list. Contains the text of the todo list entry,\n the priority of the entry, the group code (NYI), and the visibility of\n the entry\"\"\"\n\n def __init__(self, text, priority, group, visible):\n self.text = text\n self.priority = priority\n self.group = group\n self.visible = visible\n\n\ndef concept_demonstration():\n \"\"\"The purpose of this function is to prompt the user for numbers and\n strings and manipulate them to demonstrate programming fluency with\n string and integer operations.\n :returns nothing\"\"\"\n number = clean_input('Please enter a positive number')\n number2 = clean_input('Please enter a number')\n while number2 == 0:\n print('Error: Cannot Divide by 0')\n number2 = clean_input('Please enter a different number')\n color = input('Please enter a color\\n')\n thing = input('Please enter a thing\\n')\n thing2 = thing + ' '\n location = input('Please enter a location\\n')\n print(str(number) + ' raised to the power of ' + str(number2) + ' is ' +\n str(number ** number2))\n print('{0} multiplied by {1} is {2}'.format(str(number), str(number2),\n str(number * number2)))\n print('{0} divided by {1} is {2}'.format(str(number), str(number2), str\n (number / number2)))\n print('The remainder from dividing {0} by {1} is {2}'.format(str(\n number), str(number2), str(number % number2)))\n print('{0} divided by {1} rounded down is {2}'.format(str(number), str(\n number2), str(number // number2)))\n print('{0} plus {1} is {2}'.format(str(number), str(number2), str(\n number + number2)))\n print('{0} minus {1} is {2}'.format(str(number), str(number2), str(\n number - number2)))\n if number > 1:\n print(\"The {0} at {1} yelled '{2}'\".format(color + ' ' + thing,\n location, thing2 * int(number - 1) + thing))\n elif number < 0:\n print(\n \"\"\"The {0} at {1} yelled '{2}'\nYou entered a negative number when a positive number was requested, so you made the {3} mute. Good Job.\"\"\"\n .format(color + ' ' + thing, location, thing2 * int(number), thing)\n )\n else:\n print(\"The {0} at {1} yelled '{2}'\".format(color + ' ' + thing,\n location, thing * int(number)))\n return\n\n\n<mask token>\n\n\ndef check_priority_overlap(priority_to_check, todo_list):\n \"\"\"The purpose of this function is to check if the user's priority\n number input overlaps with a priority number already in the list,\n and if it does, prompts the user whether they want to keep it, change\n it, or move everything in the list that has a larger priority value up\n by one.\n :param priority_to_check: the number to check for overlap with\n :param todo_list: the list of ListItem objects to check in\n :returns the priority value, either changed or the original input\"\"\"\n overlap = False\n for item in todo_list:\n if item.priority == priority_to_check:\n overlap = True\n if overlap:\n answer = 0\n while answer > 3 or answer < 1:\n answer = clean_input(\n \"\"\"The priority number you entered overlaps with another entry's priority. Enter:\n1 to change priority number\n2 to leave as is with overlap\n3 to push all priority numbers below this entry down by 1\"\"\"\n )\n if answer > 3 or answer < 1:\n print('Invalid Option Selected\\nPlease Try Again')\n if answer == 1:\n priority_to_check = check_priority_overlap(int(clean_input(\n 'New Priority:')), todo_list)\n elif answer == 3:\n cascade_list(priority_to_check, todo_list)\n return priority_to_check\n\n\n<mask token>\n\n\ndef clean_input(prompt='Error'):\n \"\"\"The purpose of this function is to prompt the user for a numerical\n input and only accept a numerical input, rejects no input and text input.\n :param prompt: the prompt the user sees, default is Error\n :returns the user input as a float\"\"\"\n text = True\n phrase = '0'\n while text:\n phrase = input(prompt + '\\n')\n try:\n float(phrase)\n text = False\n except ValueError:\n print('Error: Non-Numeric Entry Detected')\n return float(phrase)\n\n\n<mask token>\n\n\ndef save_list(todo_list, save_location):\n \"\"\"The purpose of this function is to save a list of ListItem objects to a\n specified location in a .txt file with the first line of the document\n being an explanation of the file format being used.\n :param todo_list: the list of ListItem objects to save to the save file\n :param save_location: the location to create or overwrite the save file\n :returns nothing\"\"\"\n data_file_w = open(save_location, 'w')\n data_file_w.write(\n \"\"\"Warning: The Todo-List Program will not be able to load this save file if it is incorrectly modified. Modify at your own risk. The structure is Entry Text, Entry Priority as a number, Entry Group as a number (Not Yet Utilized, but necessary), and Entry Visibility as a boolean, each on a separate line, a single line gap in between, and the very first line is skipped\n\"\"\"\n )\n for item in todo_list:\n data_file_w.write('{0}\\n{1}\\n{2}\\n{3}\\n\\n'.format(item.text, str(\n item.priority), str(item.group), str(item.visible)))\n data_file_w.close()\n return\n\n\ndef add_item(todo_list):\n \"\"\"The purpose of this function is to prompt the user for the two\n fields of necessary information to make a new entry in the todo list,\n the item name and priority, checking if the priority overlaps with an\n existing entry in the todo list.\n :param todo_list: the list of ListItem objects to add a new ListItem\n object to\n :returns nothing\"\"\"\n text = input('Please enter the name of the new item\\n')\n priority = check_priority_overlap(int(clean_input(\n 'Please enter the priority of this item')), todo_list)\n group = 0\n visible = True\n todo_list.insert(0, ListItem(text, priority, group, visible))\n return\n\n\n<mask token>\n\n\ndef remove_item(todo_list):\n \"\"\"The purpose of this function is to delete a ListItem object from a\n list of ListItem objects by prompting the user for the index and\n verifying they want to delete the item.\n :param todo_list: the list of ListItem objects from which to remove\n one object\n :returns nothing\"\"\"\n item = select_item(todo_list,\n \"\"\"Please enter the item number you wish to remove\nEnter a negative number or zero to cancel\"\"\"\n )\n if item >= 0:\n todo_list.pop(item)\n return\n\n\ndef mark_complete(todo_list):\n \"\"\"The purpose of this function is to mark a selectedListItem object as\n hidden and not to be printed unless specified, apart from selecting items.\n :param todo_list: the list of ListItem objects to modify\n :returns nothing\"\"\"\n item = select_item(todo_list,\n \"\"\"Please enter the item number you wish to Mark Completed and hide from the list\nEnter a negative number or zero to cancel\"\"\"\n )\n if item >= 0:\n todo_list[item].visible = False\n return\n\n\ndef edit_item(todo_list):\n \"\"\"The purpose of this function is to edit a ListItem object in the\n list of ListItem objects, changing either the name or priority\n :param todo_list: the list of ListItem objects that gets one object\n modified\n :returns nothing\"\"\"\n item = select_item(todo_list,\n \"\"\"Please enter the item number you wish to edit\nEnter a negative number or zero to cancel\"\"\"\n )\n if item >= 0:\n while True:\n value = clean_input(\n \"\"\"Which value would you like to edit? Enter:\n1 for the Item Text (Currently: {0})\n2 for the Item Priority (Currently: {1})\n3 to Cancel and Exit\"\"\"\n .format(todo_list[item].text, str(todo_list[item].priority)))\n if value == 1:\n print('The Current Text is: {0}'.format(todo_list[item].text))\n todo_list[item].text = input('New Text:\\n')\n elif value == 2:\n print('The Current Priority is: {0}'.format(str(todo_list[\n item].priority)))\n todo_list[item].priority = check_priority_overlap(int(\n clean_input('New Priority:')), todo_list)\n elif value == 3:\n break\n else:\n print('Invalid Input - Please Try Again')\n return\n\n\ndef check_list_status(todo_list):\n \"\"\"The purpose of this function is to check whether there are visible\n items in the list, the entire list is hidden, or the list contains no\n more ListItem objects\n :param todo_list: the list of ListItem objects to check\n :returns which condition using integer codes\"\"\"\n if len(todo_list) == 0:\n state = 1\n else:\n state = 2\n for item_index in range(len(todo_list)):\n if todo_list[item_index].visible:\n state = 0\n return state\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ListItem:\n \"\"\"A custom object that stores four pieces of data representing each\n entry in the todo list. Contains the text of the todo list entry,\n the priority of the entry, the group code (NYI), and the visibility of\n the entry\"\"\"\n\n def __init__(self, text, priority, group, visible):\n self.text = text\n self.priority = priority\n self.group = group\n self.visible = visible\n\n\ndef concept_demonstration():\n \"\"\"The purpose of this function is to prompt the user for numbers and\n strings and manipulate them to demonstrate programming fluency with\n string and integer operations.\n :returns nothing\"\"\"\n number = clean_input('Please enter a positive number')\n number2 = clean_input('Please enter a number')\n while number2 == 0:\n print('Error: Cannot Divide by 0')\n number2 = clean_input('Please enter a different number')\n color = input('Please enter a color\\n')\n thing = input('Please enter a thing\\n')\n thing2 = thing + ' '\n location = input('Please enter a location\\n')\n print(str(number) + ' raised to the power of ' + str(number2) + ' is ' +\n str(number ** number2))\n print('{0} multiplied by {1} is {2}'.format(str(number), str(number2),\n str(number * number2)))\n print('{0} divided by {1} is {2}'.format(str(number), str(number2), str\n (number / number2)))\n print('The remainder from dividing {0} by {1} is {2}'.format(str(\n number), str(number2), str(number % number2)))\n print('{0} divided by {1} rounded down is {2}'.format(str(number), str(\n number2), str(number // number2)))\n print('{0} plus {1} is {2}'.format(str(number), str(number2), str(\n number + number2)))\n print('{0} minus {1} is {2}'.format(str(number), str(number2), str(\n number - number2)))\n if number > 1:\n print(\"The {0} at {1} yelled '{2}'\".format(color + ' ' + thing,\n location, thing2 * int(number - 1) + thing))\n elif number < 0:\n print(\n \"\"\"The {0} at {1} yelled '{2}'\nYou entered a negative number when a positive number was requested, so you made the {3} mute. Good Job.\"\"\"\n .format(color + ' ' + thing, location, thing2 * int(number), thing)\n )\n else:\n print(\"The {0} at {1} yelled '{2}'\".format(color + ' ' + thing,\n location, thing * int(number)))\n return\n\n\ndef cascade_list(priority_to_cascade_from, todo_list):\n \"\"\"The purpose of this function is to decrement the priority number of\n every item in the provided todo list greater than the priority number\n provided.\n :param priority_to_cascade_from: the number that is inserted by moving\n everything equal to or greater than up by one\n :param todo_list: the list of ListItem objects to check in\"\"\"\n for item in todo_list:\n if item.priority >= priority_to_cascade_from:\n item.priority += 1\n return\n\n\ndef check_priority_overlap(priority_to_check, todo_list):\n \"\"\"The purpose of this function is to check if the user's priority\n number input overlaps with a priority number already in the list,\n and if it does, prompts the user whether they want to keep it, change\n it, or move everything in the list that has a larger priority value up\n by one.\n :param priority_to_check: the number to check for overlap with\n :param todo_list: the list of ListItem objects to check in\n :returns the priority value, either changed or the original input\"\"\"\n overlap = False\n for item in todo_list:\n if item.priority == priority_to_check:\n overlap = True\n if overlap:\n answer = 0\n while answer > 3 or answer < 1:\n answer = clean_input(\n \"\"\"The priority number you entered overlaps with another entry's priority. Enter:\n1 to change priority number\n2 to leave as is with overlap\n3 to push all priority numbers below this entry down by 1\"\"\"\n )\n if answer > 3 or answer < 1:\n print('Invalid Option Selected\\nPlease Try Again')\n if answer == 1:\n priority_to_check = check_priority_overlap(int(clean_input(\n 'New Priority:')), todo_list)\n elif answer == 3:\n cascade_list(priority_to_check, todo_list)\n return priority_to_check\n\n\ndef sorting(list_object):\n \"\"\"The purpose of this function is to take in a ListItem custom object\n and return the priority value stored in it to be used in sorting.\n :param list_object: one ListItem object\n :returns the priority value stored in the ListItem object\"\"\"\n return list_object.priority\n\n\n<mask token>\n\n\ndef clean_input(prompt='Error'):\n \"\"\"The purpose of this function is to prompt the user for a numerical\n input and only accept a numerical input, rejects no input and text input.\n :param prompt: the prompt the user sees, default is Error\n :returns the user input as a float\"\"\"\n text = True\n phrase = '0'\n while text:\n phrase = input(prompt + '\\n')\n try:\n float(phrase)\n text = False\n except ValueError:\n print('Error: Non-Numeric Entry Detected')\n return float(phrase)\n\n\ndef load_from_file(save_location):\n \"\"\"The purpose of this function is to open the .txt save file and read\n the contents into memory in the form of a list of custom ListItem\n objects.\n :param save_location: the location the save file is stored in\n :returns a list of ListItem objects that is populated with the data from\n the save file\"\"\"\n data_file_r = open(save_location, 'r')\n list_item = ['Text', -1, 2, True]\n todo = []\n temp = 1\n line_counter = 1\n try:\n for item in data_file_r:\n if (line_counter - 1) % 5 != 0 and line_counter > 0:\n cleaned_item = ''\n for character_index in range(len(item)):\n if character_index != len(item) - 1:\n cleaned_item += item[character_index]\n if temp == 1:\n list_item[0] = cleaned_item\n temp = 2\n elif temp == 2:\n list_item[1] = int(cleaned_item)\n temp = 3\n elif temp == 3:\n list_item[2] = int(cleaned_item)\n temp = 4\n elif temp == 4:\n if cleaned_item == 'False':\n list_item[3] = False\n else:\n list_item[3] = True\n todo.insert(0, ListItem(list_item[0], list_item[1],\n list_item[2], list_item[3]))\n temp = 1\n else:\n temp = 1\n line_counter += 1\n except ValueError:\n print('An error has occurred trying to load the file')\n result = int(clean_input(\n 'Please enter a 2 to overwrite the current save file and start over or any other number to exit the program'\n ))\n if result == 2:\n key = random.randint(2, 9)\n if key == 2:\n key = 1\n result2 = int(clean_input(\n \"\"\"Are you sure you want to delete all of your saved data\nEnter {0} to proceed, or anything else to cancel\"\"\"\n .format(str(key))))\n if result2 == key:\n data_file_w = open('C:Item_List.txt', 'w')\n data_file_w.close()\n todo = []\n print('Save Data Erased')\n return todo\n else:\n print('Program Exiting')\n quit(1)\n else:\n print('Program Exiting')\n quit(1)\n data_file_r.close()\n return todo\n\n\ndef save_list(todo_list, save_location):\n \"\"\"The purpose of this function is to save a list of ListItem objects to a\n specified location in a .txt file with the first line of the document\n being an explanation of the file format being used.\n :param todo_list: the list of ListItem objects to save to the save file\n :param save_location: the location to create or overwrite the save file\n :returns nothing\"\"\"\n data_file_w = open(save_location, 'w')\n data_file_w.write(\n \"\"\"Warning: The Todo-List Program will not be able to load this save file if it is incorrectly modified. Modify at your own risk. The structure is Entry Text, Entry Priority as a number, Entry Group as a number (Not Yet Utilized, but necessary), and Entry Visibility as a boolean, each on a separate line, a single line gap in between, and the very first line is skipped\n\"\"\"\n )\n for item in todo_list:\n data_file_w.write('{0}\\n{1}\\n{2}\\n{3}\\n\\n'.format(item.text, str(\n item.priority), str(item.group), str(item.visible)))\n data_file_w.close()\n return\n\n\ndef add_item(todo_list):\n \"\"\"The purpose of this function is to prompt the user for the two\n fields of necessary information to make a new entry in the todo list,\n the item name and priority, checking if the priority overlaps with an\n existing entry in the todo list.\n :param todo_list: the list of ListItem objects to add a new ListItem\n object to\n :returns nothing\"\"\"\n text = input('Please enter the name of the new item\\n')\n priority = check_priority_overlap(int(clean_input(\n 'Please enter the priority of this item')), todo_list)\n group = 0\n visible = True\n todo_list.insert(0, ListItem(text, priority, group, visible))\n return\n\n\ndef select_item(todo_list, prompt='Error'):\n \"\"\"The purpose of this function is to display a list of all items in the\n todo list and number each individually to allow the user to select an\n item to modify or delete. The available numbers may\n skip some if some items are hidden\n :param todo_list: the list of ListItem objects to display\n :param prompt: the prompt to display to the user, default is Error\n :returns the user selected item's index in a computer friendly form (\n starting at 0 instead of 1)\"\"\"\n valid = False\n index = 0\n while not valid:\n counter = 1\n for item in todo_list:\n if item.visible:\n print(counter, item.text, sep='\\t')\n else:\n print(counter, '~ {0} ~'.format(item.text), sep='\\t')\n counter += 1\n index = int(clean_input(prompt))\n if index < counter:\n valid = True\n else:\n print('Invalid Input: Number is too big')\n return index - 1\n\n\ndef remove_item(todo_list):\n \"\"\"The purpose of this function is to delete a ListItem object from a\n list of ListItem objects by prompting the user for the index and\n verifying they want to delete the item.\n :param todo_list: the list of ListItem objects from which to remove\n one object\n :returns nothing\"\"\"\n item = select_item(todo_list,\n \"\"\"Please enter the item number you wish to remove\nEnter a negative number or zero to cancel\"\"\"\n )\n if item >= 0:\n todo_list.pop(item)\n return\n\n\ndef mark_complete(todo_list):\n \"\"\"The purpose of this function is to mark a selectedListItem object as\n hidden and not to be printed unless specified, apart from selecting items.\n :param todo_list: the list of ListItem objects to modify\n :returns nothing\"\"\"\n item = select_item(todo_list,\n \"\"\"Please enter the item number you wish to Mark Completed and hide from the list\nEnter a negative number or zero to cancel\"\"\"\n )\n if item >= 0:\n todo_list[item].visible = False\n return\n\n\ndef edit_item(todo_list):\n \"\"\"The purpose of this function is to edit a ListItem object in the\n list of ListItem objects, changing either the name or priority\n :param todo_list: the list of ListItem objects that gets one object\n modified\n :returns nothing\"\"\"\n item = select_item(todo_list,\n \"\"\"Please enter the item number you wish to edit\nEnter a negative number or zero to cancel\"\"\"\n )\n if item >= 0:\n while True:\n value = clean_input(\n \"\"\"Which value would you like to edit? Enter:\n1 for the Item Text (Currently: {0})\n2 for the Item Priority (Currently: {1})\n3 to Cancel and Exit\"\"\"\n .format(todo_list[item].text, str(todo_list[item].priority)))\n if value == 1:\n print('The Current Text is: {0}'.format(todo_list[item].text))\n todo_list[item].text = input('New Text:\\n')\n elif value == 2:\n print('The Current Priority is: {0}'.format(str(todo_list[\n item].priority)))\n todo_list[item].priority = check_priority_overlap(int(\n clean_input('New Priority:')), todo_list)\n elif value == 3:\n break\n else:\n print('Invalid Input - Please Try Again')\n return\n\n\ndef check_list_status(todo_list):\n \"\"\"The purpose of this function is to check whether there are visible\n items in the list, the entire list is hidden, or the list contains no\n more ListItem objects\n :param todo_list: the list of ListItem objects to check\n :returns which condition using integer codes\"\"\"\n if len(todo_list) == 0:\n state = 1\n else:\n state = 2\n for item_index in range(len(todo_list)):\n if todo_list[item_index].visible:\n state = 0\n return state\n\n\ndef menu_loop(todo_list, save_file_location):\n \"\"\"The purpose of this function is to repeatedly display the todo list\n and user prompts menu until the program is closed\n :param todo_list: the list of ListItem objects to display or modify\n :param save_file_location: where the .txt save file is located for saving\n :returns nothing\"\"\"\n show_hidden = False\n selection = 0\n invalid_input = False\n while selection != 6:\n if invalid_input:\n invalid_input = False\n else:\n print_list(save_file_location, todo_list, True, show_hidden)\n divider(137 + 17)\n list_status = check_list_status(todo_list)\n if list_status == 0:\n selection = int(clean_input(\n \"\"\"Please enter: 1 for Add Item, 2 for Remove Item, 3 for Edit Item, 4 for Mark Item Complete, 5 for Toggle Hidden, and 6 for Exit, 7 for Concept Demonstration\n\"\"\"\n ))\n elif list_status == 1:\n selection = int(clean_input(\n \"\"\"Please enter: 1 for Add Item, and 6 for Exit, 7 for Concept Demonstration\n\"\"\"\n ))\n else:\n selection = int(clean_input(\n \"\"\"Please enter: 1 for Add Item, 5 for Toggle Hidden, and 6 for Exit, 7 for Concept Demonstration\n\"\"\"\n ))\n print('')\n if selection == 1:\n add_item(todo_list)\n elif selection == 2:\n if list_status == 0:\n remove_item(todo_list)\n elif list_status == 2:\n print(\n 'Invalid Command: The Todo List has no visible items to remove'\n )\n else:\n print('Invalid Command: The Todo List has no items to remove')\n elif selection == 3:\n if list_status == 0:\n edit_item(todo_list)\n elif list_status == 2:\n print(\n 'Invalid Command: The Todo List has no visible items to edit'\n )\n else:\n print('Invalid Command: The Todo List has no items to edit')\n elif selection == 4:\n if list_status == 0:\n mark_complete(todo_list)\n elif list_status == 2:\n print(\n 'Invalid Command: The Todo List has no visible items to mark complete'\n )\n else:\n print(\n 'Invalid Command: The Todo List has no items to mark complete'\n )\n elif selection == 5:\n if list_status == 0 or list_status == 2:\n if show_hidden:\n print('No longer showing hidden items')\n show_hidden = False\n else:\n print('Now showing hidden items')\n show_hidden = True\n else:\n print(\n 'Invalid Command: The Todo List has no items to show or hide'\n )\n elif selection == 6:\n print('Now Closing')\n elif selection == 7:\n concept_demonstration()\n else:\n invalid_input = True\n print('Invalid Input\\nPlease Try Again')\n\n\ndef main():\n \"\"\"The purpose of this function is to ensure the save file exists at the\n specified save file location, load the save file into memory, display a\n welcome message with a divider, then start the menu loop until the\n program is closed\n :returns nothing\"\"\"\n save_file_location = 'Item_List.txt'\n data_file_a = open(save_file_location, 'a')\n data_file_a.close()\n loaded_list = load_from_file(save_file_location)\n print('Welcome to the To-Do List - Version: 0.1.2')\n divider(42)\n menu_loop(loaded_list, save_file_location)\n\n\n<mask token>\n",
"step-5": "\"\"\"This program displays a customizable list of items by priority value,\r\nwith priority 1 being the highest. Allows the user to add, edit,\r\nmark complete, show completed (hidden), and remove items. Stores the list of\r\nitems in a .txt file located where this program's main.py file is. All\r\nchanges are automatically saved to the .txt file. Also includes a fun\r\ntechnical knowledge demonstration using numbers and text responses. The\r\nprogram will create a new save file if none exists, and prompts for save\r\nfile overwrite if data cannot be read successfully. Menu navigation is\r\naccomplished through numeric inputs due to the text-only interface and\r\ntedium of typing out each word accurately and repeatedly.\"\"\"\r\n__author__ = 'Jordan Kooyman'\r\n\r\n# 1/26/21 - 4/15/2021 To-Do List Program - Integration Project for COP 1500\r\n# Spring 2021\r\n# Configurable settings saved to a separate file (?)\r\n# Ability to load a different data or config file (?)\r\n# Color code items by group (?)\r\n# Add a basic calculator to meet math (and string?) command requirements (?)\r\n\r\n# TODO: Implement a group system that shows all groups combined, just one\r\n# group, or all categorized by group, and group names - be able to change\r\n# group names (new function) - all functions support groups (individual or\r\n# combined)\r\n\r\nimport random\r\n\r\n\r\n# Random number generation used as random verification number when\r\n# overwriting the save file in the event of a failure to load from the save\r\n# file\r\n\r\n\r\nclass ListItem: # Create a class object that will store the data for each\r\n # entry in the list (custom variable)\r\n \"\"\"A custom object that stores four pieces of data representing each\r\n entry in the todo list. Contains the text of the todo list entry,\r\n the priority of the entry, the group code (NYI), and the visibility of\r\n the entry\"\"\"\r\n\r\n def __init__(self, text, priority, group, visible): # From w3schools.com\r\n self.text = text\r\n self.priority = priority\r\n self.group = group\r\n self.visible = visible\r\n\r\n\r\ndef concept_demonstration():\r\n \"\"\"The purpose of this function is to prompt the user for numbers and\r\n strings and manipulate them to demonstrate programming fluency with\r\n string and integer operations.\r\n :returns nothing\"\"\"\r\n number = clean_input(\"Please enter a positive number\")\r\n number2 = clean_input(\"Please enter a number\")\r\n while number2 == 0: # Rejects a 0 if it is input as the second number\r\n print(\"Error: Cannot Divide by 0\")\r\n number2 = clean_input(\"Please enter a different number\")\r\n color = input(\"Please enter a color\\n\")\r\n thing = input(\"Please enter a thing\\n\")\r\n thing2 = thing + ' ' # Adding space so that when thing is repeated, it\r\n # has a space in between\r\n # Raise the first number to the second number\r\n location = input(\"Please enter a location\\n\")\r\n print(str(number) + \" raised to the power of \" + str(number2) + \" is \" +\r\n str(number ** number2))\r\n # Multiply the two numbers\r\n print(\"{0} multiplied by {1} is {2}\".format(str(number), str(number2),\r\n str(number * number2)))\r\n # Divide the first number by the second number\r\n print(\"{0} divided by {1} is {2}\".format(str(number), str(number2),\r\n str(number / number2)))\r\n # Find the modulus of the two numbers\r\n print(\"The remainder from dividing {0} by {1} is {2}\".format(str(number),\r\n str(number2),\r\n str(number %\r\n number2))\r\n )\r\n # Divide the first number by the second and round it down (floor it)\r\n print(\"{0} divided by {1} rounded down is {2}\".format(str(number),\r\n str(number2),\r\n str(number // number2\r\n )))\r\n # Add the two numbers\r\n print(\"{0} plus {1} is {2}\".format(str(number), str(number2),\r\n str(number + number2)))\r\n # Subtract the second number from the first number\r\n print(\"{0} minus {1} is {2}\".format(str(number), str(number2),\r\n str(number - number2)))\r\n if number > 1: # if the first number entered is greater than 1\r\n print(\"The {0} at {1} yelled '{2}'\".format(color + ' ' + thing,\r\n location, thing2 *\r\n int(number - 1) + thing))\r\n # Combine two strings with + (no added space), repeat a string x\r\n # number of times with * (must use an integer) (I have the minus 1\r\n # and + thing to get the spacing to look proper and still repeat\r\n # number amount of times) -if a negative number is used when\r\n # multiplying a string, it does nothing (but does not crash) - but\r\n # it is still handled in the other statement with some added user\r\n # shaming\r\n elif number < 0: # if the first number entered is negative\r\n print(\"The {0} at {1} yelled '{2}'\\nYou entered a negative number \"\r\n \"when a positive number was requested, so you made the {3} \"\r\n \"mute. Good Job.\".format(color + ' ' + thing, location, thing2 *\r\n int(number), thing))\r\n # Same as above, expect that it will print nothing in the yelled\r\n # section if the first number entered is negative\r\n else: # if the first number entered is 0 or 1 (because of the int()\r\n # function removing a decimal)\r\n print(\"The {0} at {1} yelled '{2}'\".format(color + ' ' + thing,\r\n location, thing *\r\n int(number)))\r\n # this is to prevent errant spaces or showing the phrase too many times\r\n return\r\n\r\n\r\ndef cascade_list(priority_to_cascade_from, todo_list):\r\n \"\"\"The purpose of this function is to decrement the priority number of\r\n every item in the provided todo list greater than the priority number\r\n provided.\r\n :param priority_to_cascade_from: the number that is inserted by moving\r\n everything equal to or greater than up by one\r\n :param todo_list: the list of ListItem objects to check in\"\"\"\r\n for item in todo_list:\r\n if item.priority >= priority_to_cascade_from:\r\n item.priority += 1\r\n return\r\n\r\n\r\ndef check_priority_overlap(priority_to_check, todo_list):\r\n \"\"\"The purpose of this function is to check if the user's priority\r\n number input overlaps with a priority number already in the list,\r\n and if it does, prompts the user whether they want to keep it, change\r\n it, or move everything in the list that has a larger priority value up\r\n by one.\r\n :param priority_to_check: the number to check for overlap with\r\n :param todo_list: the list of ListItem objects to check in\r\n :returns the priority value, either changed or the original input\"\"\"\r\n overlap = False\r\n for item in todo_list:\r\n if item.priority == priority_to_check:\r\n overlap = True\r\n if overlap:\r\n answer = 0\r\n while answer > 3 or answer < 1:\r\n answer = clean_input(\"The priority number you entered overlaps \"\r\n \"with another entry's priority. Enter:\\n1 to \"\r\n \"change priority number\\n2 to leave as is \"\r\n \"with overlap\\n3 to push all priority numbers\"\r\n \" below this entry down by 1\")\r\n if answer > 3 or answer < 1:\r\n print(\"Invalid Option Selected\\nPlease Try Again\")\r\n if answer == 1:\r\n priority_to_check = check_priority_overlap(\r\n int(clean_input(\"New Priority:\")), todo_list)\r\n # change the priority value input\r\n elif answer == 3:\r\n cascade_list(priority_to_check, todo_list)\r\n return priority_to_check\r\n\r\n\r\ndef sorting(list_object): # Takes in a ListItem object and returns the\r\n # priority value - from w3schools.com\r\n \"\"\"The purpose of this function is to take in a ListItem custom object\r\n and return the priority value stored in it to be used in sorting.\r\n :param list_object: one ListItem object\r\n :returns the priority value stored in the ListItem object\"\"\"\r\n return list_object.priority\r\n\r\n\r\ndef print_list(save_file_location, my_list, to_save=False, show_hidden=False):\r\n # Prints out the To-Do list from the common list variable and saves list\r\n # to the .txt file\r\n \"\"\"The purpose of this function is to take in the location of the save\r\n file, the todo list variable, whether or not to save, and whether or not\r\n to show hidden and print out the todo list variable, skipping items\r\n marked as hidden unless it is told to show hidden, and saving the todo\r\n list to the file in the save file location if it is told to save.\r\n :param save_file_location: the file path to get to the .txt save file\r\n :param my_list: the list of ListItem objects to check in\r\n :param to_save: whether or not to save the list of items to the file,\r\n default\r\n is false\r\n :param show_hidden: whether or not to display the hidden list items,\r\n default\r\n it false\r\n :returns nothing\"\"\"\r\n my_list.sort(key=sorting) # Uses a custom function to be able to get the\r\n # right value to sort by\r\n print(\"To-Do:\")\r\n for item_index in my_list: # The range needs to be the length of the list\r\n # being printed\r\n if item_index.visible and not show_hidden: # Only print visible items\r\n # if show hidden is false\r\n print(item_index.priority, item_index.text, sep='.\\t')\r\n elif show_hidden: # Print everything is show hidden is trues\r\n if item_index.visible:\r\n print(item_index.priority, item_index.text, sep='.\\t')\r\n else:\r\n print(\"{0}.~\\t{1}\".format(item_index.priority, item_index.text)\r\n )\r\n # Indicate hidden items\r\n # Printing the item priority with a dot, then the item, with a tab\r\n # separating them\r\n if to_save:\r\n save_list(my_list, save_file_location)\r\n return\r\n\r\n\r\ndef divider(size=100): # Draws a dividing line to go between sections\r\n # (default 100 characters long)\r\n \"\"\"The purpose of this function is to print a dashed line across the\r\n screen with a specified length.\r\n :param size: how many characters long the line should be, default is 100\r\n :returns nothing\"\"\"\r\n for i in range(size):\r\n print('-', end='') # Prints out a single dash, no newline afterwards\r\n # (the end= sets the last character to blank\r\n print('') # Print out a newline (using the default ending of a print\r\n # statement being a newline\r\n return\r\n\r\n\r\ndef clean_input(prompt='Error'): # A special input function that will reject a\r\n # user's input of text when a number is requested -- if no prompt is\r\n # specified in the program, it will display \"Error\"\r\n \"\"\"The purpose of this function is to prompt the user for a numerical\r\n input and only accept a numerical input, rejects no input and text input.\r\n :param prompt: the prompt the user sees, default is Error\r\n :returns the user input as a float\"\"\"\r\n text = True\r\n phrase = '0'\r\n while text:\r\n phrase = input(prompt + '\\n')\r\n try: # Adapted from an example in the ThinkPython textbook (15.7) -\r\n # Checks whether the input is a number, positive or negative. If\r\n # not, rejects the input and user gets to try again\r\n float(phrase)\r\n text = False\r\n except ValueError:\r\n print(\"Error: Non-Numeric Entry Detected\")\r\n # if phrase.isnumeric(): # Checks for a positive number (negative\r\n # rejected as well as text) - replaced with superior form from textbook\r\n # example\r\n # return float(phrase) # Return the number the user entered\r\n # else:\r\n # print(\"Error: Non-Numeric Entry Detected\")\r\n return float(phrase) # Return the number the user entered\r\n\r\n\r\ndef load_from_file(save_location): # This is a function for readability -\r\n # opens txt file in read mode and loads it\r\n \"\"\"The purpose of this function is to open the .txt save file and read\r\n the contents into memory in the form of a list of custom ListItem\r\n objects.\r\n :param save_location: the location the save file is stored in\r\n :returns a list of ListItem objects that is populated with the data from\r\n the save file\"\"\"\r\n # into an array (list) of ListItem variables\r\n data_file_r = open(save_location, \"r\") # Open txt file in read mode\r\n list_item = [\"Text\", -1, 2, True] # Item, Item Priority, group, is visible\r\n todo = [] # make a list of lists\r\n temp = 1 # Temporary counter variable to reconstruct lists from .txt file\r\n line_counter = 1\r\n try:\r\n for item in data_file_r: # loop through each line in the file, one at\r\n # a time - from w3schools.com\r\n if (line_counter - 1) % 5 != 0 and line_counter > 0:\r\n cleaned_item = \"\"\r\n for character_index in range(len(\r\n item)): # Loop through each character in the extracted\r\n # string\r\n if character_index != len(\r\n item) - 1: # if it is not the last character, add\r\n # it to the cleaned string\r\n cleaned_item += item[character_index]\r\n # Add every character to a\r\n # but \\n\r\n if temp == 1: # Item Text\r\n list_item[0] = cleaned_item\r\n temp = 2\r\n elif temp == 2: # Item Priority\r\n list_item[1] = int(cleaned_item)\r\n temp = 3\r\n elif temp == 3: # Item Group\r\n list_item[2] = int(cleaned_item)\r\n temp = 4\r\n elif temp == 4: # Is Visible\r\n if cleaned_item == \"False\":\r\n list_item[3] = False\r\n else: # Assume the item is visible if the text is not\r\n # False\r\n list_item[3] = True\r\n todo.insert(0, ListItem(list_item[0], list_item[1],\r\n list_item[2], list_item[3]))\r\n temp = 1\r\n else: # If some error occurred and a condition outside of the\r\n # possible four is met, restart\r\n temp = 1\r\n line_counter += 1\r\n except ValueError:\r\n print(\"An error has occurred trying to load the file\")\r\n result = int(clean_input(\r\n \"Please enter a 2 to overwrite the current save file and start \"\r\n \"over or any other number to exit the program\"))\r\n if result == 2:\r\n key = random.randint(2, 9) # Generate a random integer between 2\r\n # and 9 to be used as a second dynamic check\r\n if key == 2:\r\n key = 1 # If the random number is 2, set it to one so that\r\n # the same number (2) cannot be used as the verification number\r\n result2 = int(clean_input(\"Are you sure you want to delete all \"\r\n \"of your saved data\\nEnter {0} to \"\r\n \"proceed, or anything else to \"\r\n \"cancel\".format(str(key))))\r\n if result2 == key:\r\n data_file_w = open(\"C:Item_List.txt\", \"w\")\r\n data_file_w.close()\r\n todo = []\r\n print(\"Save Data Erased\")\r\n return todo # Return an empty list if file load failed\r\n else:\r\n print(\"Program Exiting\")\r\n quit(1)\r\n else:\r\n print(\"Program Exiting\")\r\n quit(1) # Exit the program with the exit code of 1\r\n data_file_r.close()\r\n # All the list functions above referenced from w3schools.com What is\r\n # happening above: Opening the file, initializing a list to hold all\r\n # four pieces of data, then after pulling the data from the file and\r\n # storing in the list, it is copied (not referenced) into my main list\r\n # of ListItem objects\r\n return todo\r\n\r\n\r\ndef save_list(todo_list, save_location):\r\n \"\"\"The purpose of this function is to save a list of ListItem objects to a\r\n specified location in a .txt file with the first line of the document\r\n being an explanation of the file format being used.\r\n :param todo_list: the list of ListItem objects to save to the save file\r\n :param save_location: the location to create or overwrite the save file\r\n :returns nothing\"\"\"\r\n data_file_w = open(save_location,\r\n \"w\") # open the save file and clear the data from it\r\n data_file_w.write(\"Warning: The Todo-List Program will not be able to \"\r\n \"load this save file if it is incorrectly modified. \"\r\n \"Modify at your own risk. The structure is Entry \"\r\n \"Text, Entry Priority as a number, Entry Group as a \"\r\n \"number (Not Yet Utilized, but necessary), and Entry \"\r\n \"Visibility as a boolean, each on a separate line, a \"\r\n \"single line gap in between, and the \"\r\n \"very first line is skipped\\n\")\r\n for item in todo_list:\r\n data_file_w.write(\"{0}\\n{1}\\n{2}\\n{3}\\n\\n\".format(item.text,\r\n str(item.priority),\r\n str(item.group),\r\n str(item.visible)))\r\n data_file_w.close()\r\n return\r\n\r\n\r\ndef add_item(todo_list):\r\n \"\"\"The purpose of this function is to prompt the user for the two\r\n fields of necessary information to make a new entry in the todo list,\r\n the item name and priority, checking if the priority overlaps with an\r\n existing entry in the todo list.\r\n :param todo_list: the list of ListItem objects to add a new ListItem\r\n object to\r\n :returns nothing\"\"\"\r\n text = input(\"Please enter the name of the new item\\n\")\r\n priority = check_priority_overlap(\r\n int(clean_input(\"Please enter the priority of this item\")), todo_list)\r\n # group = int(clean_input(\"Please enter the group number of this item\"))\r\n group = 0 # Set the group value to zero, group system NYI\r\n visible = True\r\n todo_list.insert(0, ListItem(text, priority, group, visible)) # Join\r\n # the inputs to be added to the overall list\r\n return\r\n\r\n\r\ndef select_item(todo_list, prompt='Error'): # Ask the user\r\n # which item from the list is to be modified\r\n \"\"\"The purpose of this function is to display a list of all items in the\r\n todo list and number each individually to allow the user to select an\r\n item to modify or delete. The available numbers may\r\n skip some if some items are hidden\r\n :param todo_list: the list of ListItem objects to display\r\n :param prompt: the prompt to display to the user, default is Error\r\n :returns the user selected item's index in a computer friendly form (\r\n starting at 0 instead of 1)\"\"\"\r\n valid = False\r\n index = 0\r\n while not valid:\r\n counter = 1 # counter for index printing\r\n for item in todo_list: # The range needs to be the length of the list\r\n # being printed\r\n if item.visible:\r\n print(counter, item.text, sep='\\t')\r\n else:\r\n print(counter, \"~ {0} ~\".format(item.text), sep='\\t')\r\n counter += 1\r\n # Printing the item number, then the item, with a tab separating\r\n # them\r\n index = int(clean_input(prompt))\r\n if index < counter:\r\n valid = True\r\n else:\r\n print(\"Invalid Input: Number is too big\")\r\n return index - 1\r\n\r\n\r\ndef remove_item(todo_list):\r\n \"\"\"The purpose of this function is to delete a ListItem object from a\r\n list of ListItem objects by prompting the user for the index and\r\n verifying they want to delete the item.\r\n :param todo_list: the list of ListItem objects from which to remove\r\n one object\r\n :returns nothing\"\"\"\r\n item = select_item(todo_list, \"Please enter the item number you wish to \"\r\n \"remove\\nEnter a negative number or zero \"\r\n \"to cancel\")\r\n if item >= 0: # 0, not 1 because the index returned is shifted to be\r\n # computer friendly\r\n todo_list.pop(item)\r\n return\r\n\r\n\r\ndef mark_complete(todo_list):\r\n \"\"\"The purpose of this function is to mark a selectedListItem object as\r\n hidden and not to be printed unless specified, apart from selecting items.\r\n :param todo_list: the list of ListItem objects to modify\r\n :returns nothing\"\"\"\r\n item = select_item(todo_list, \"Please enter the item number you wish to \"\r\n \"Mark Completed and hide from the \"\r\n \"list\\nEnter a negative number or zero to \"\r\n \"cancel\")\r\n if item >= 0:\r\n todo_list[item].visible = False\r\n return\r\n\r\n\r\ndef edit_item(todo_list):\r\n \"\"\"The purpose of this function is to edit a ListItem object in the\r\n list of ListItem objects, changing either the name or priority\r\n :param todo_list: the list of ListItem objects that gets one object\r\n modified\r\n :returns nothing\"\"\"\r\n item = select_item(todo_list, \"Please enter the item number you wish to \"\r\n \"edit\\nEnter a negative number or zero to \"\r\n \"cancel\")\r\n if item >= 0:\r\n while True:\r\n value = clean_input(\"Which value would you like to edit? Enter:\\n1\"\r\n \" for the Item Text (Currently: {0})\\n2 for \"\r\n \"the Item Priority (Currently: {1})\\n3 to \"\r\n \"Cancel and Exit\".format(todo_list[item].text,\r\n str(todo_list[item].\r\n priority)))\r\n if value == 1: # Item Text Change\r\n print(\"The Current Text is: {0}\".format(todo_list[item].text))\r\n todo_list[item].text = input(\"New Text:\\n\")\r\n elif value == 2: # Item Priority Change\r\n print(\"The Current Priority is: {0}\".format(str(todo_list[item]\r\n .priority)))\r\n todo_list[item].priority = check_priority_overlap(\r\n int(clean_input(\"New Priority:\")), todo_list)\r\n # elif value == 3: # Item Group Change\r\n # print(f\"The Current Group is: {todo_list[item].group}\")\r\n # todo_list[item].group = int(clean_input(\"New Group Number:\"))\r\n elif value == 3: # Exit Changing Menu\r\n break\r\n else:\r\n print(\"Invalid Input - Please Try Again\")\r\n return\r\n\r\n\r\ndef check_list_status(todo_list): # Checks if the list is completely hidden\r\n # (2), completely empty (1), or neither (0)\r\n \"\"\"The purpose of this function is to check whether there are visible\r\n items in the list, the entire list is hidden, or the list contains no\r\n more ListItem objects\r\n :param todo_list: the list of ListItem objects to check\r\n :returns which condition using integer codes\"\"\"\r\n if len(todo_list) == 0:\r\n state = 1 # Empty List\r\n else:\r\n state = 2 # Entirely Hidden List\r\n for item_index in range(len(todo_list)):\r\n if todo_list[item_index].visible: # If an item is visible, then\r\n # they are not all hidden\r\n state = 0 # Neither\r\n return state\r\n\r\n\r\ndef menu_loop(todo_list, save_file_location):\r\n \"\"\"The purpose of this function is to repeatedly display the todo list\r\n and user prompts menu until the program is closed\r\n :param todo_list: the list of ListItem objects to display or modify\r\n :param save_file_location: where the .txt save file is located for saving\r\n :returns nothing\"\"\"\r\n show_hidden = False\r\n selection = 0\r\n invalid_input = False\r\n while selection != 6:\r\n if invalid_input:\r\n invalid_input = False\r\n else:\r\n print_list(save_file_location, todo_list, True, show_hidden)\r\n divider(137 + 17) # Length of prompt statement below\r\n list_status = check_list_status(todo_list)\r\n if list_status == 0: # No Issues\r\n selection = int(clean_input(\"Please enter: 1 for Add Item, 2 for \"\r\n \"Remove Item, 3 for Edit Item, \"\r\n \"4 for Mark Item Complete, \"\r\n \"5 for Toggle Hidden, and 6 for \"\r\n \"Exit, 7 for Concept \"\r\n \"Demonstration\\n\"))\r\n elif list_status == 1: # Empty List - No Remove, Edit, Mark, or Toggle\r\n selection = int(clean_input(\"Please enter: 1 for Add Item, and 6 \"\r\n \"for Exit, 7 for Concept \"\r\n \"Demonstration\\n\"))\r\n else: # Entirely Hidden List\r\n selection = int(clean_input(\"Please enter: 1 for Add Item, 5 for \"\r\n \"Toggle Hidden, and 6 for Exit, \"\r\n \"7 for Concept Demonstration\\n\"))\r\n # Uses the clean_input function above to get a number from the\r\n # user, converting it to an int so a decimal won't return an\r\n # invalid input in the following steps\r\n print(\"\") # Blank Print statement to add an extra blank line after\r\n # user input before displaying response\r\n if selection == 1: # Add Item - modify the list variable, then save\r\n # to file\r\n add_item(todo_list)\r\n elif selection == 2: # Remove Item - modify the list variable, then\r\n # save to file\r\n if list_status == 0:\r\n remove_item(todo_list)\r\n elif list_status == 2:\r\n print(\"Invalid Command: The Todo List has no visible items \"\r\n \"to remove\")\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to remove\")\r\n elif selection == 3: # Edit Item - modify the list variable, then save\r\n # to file\r\n if list_status == 0:\r\n edit_item(todo_list)\r\n elif list_status == 2:\r\n print(\"Invalid Command: The Todo List has no visible items \"\r\n \"to edit\")\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to edit\")\r\n elif selection == 4: # Mark Item Complete - modify the list variable,\r\n # then save to file\r\n if list_status == 0:\r\n mark_complete(todo_list)\r\n elif list_status == 2:\r\n print(\"Invalid Command: The Todo List has no visible items \"\r\n \"to mark complete\")\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to mark \"\r\n \"complete\")\r\n elif selection == 5: # Show Hidden - modify the list variable, then\r\n # save to file\r\n if list_status == 0 or list_status == 2:\r\n if show_hidden:\r\n print(\"No longer showing hidden items\")\r\n show_hidden = False\r\n else:\r\n print(\"Now showing hidden items\")\r\n show_hidden = True\r\n else:\r\n print(\"Invalid Command: The Todo List has no items to show or \"\r\n \"hide\")\r\n elif selection == 6: # Exit Program\r\n print(\"Now Closing\")\r\n elif selection == 7: # Extra section to demonstrate proficiency with\r\n # topics covered in class - Sprint 1\r\n concept_demonstration()\r\n else:\r\n invalid_input = True\r\n print(\"Invalid Input\\nPlease Try Again\")\r\n\r\n\r\ndef main():\r\n \"\"\"The purpose of this function is to ensure the save file exists at the\r\n specified save file location, load the save file into memory, display a\r\n welcome message with a divider, then start the menu loop until the\r\n program is closed\r\n :returns nothing\"\"\"\r\n save_file_location = \"Item_List.txt\"\r\n data_file_a = open(save_file_location, \"a\") # Opens ItemList.txt which\r\n # is accessible in the file variable, in append mode (using this so that\r\n # if the file exists, nothing happens, but if it does not exist, it gets\r\n # created from w3schools.com\r\n data_file_a.close() # Close the file, I now know it exists\r\n loaded_list = load_from_file(save_file_location)\r\n print(\"Welcome to the To-Do List - Version: 0.1.2\")\r\n divider(42) # Length of welcome statement above\r\n menu_loop(loaded_list, save_file_location)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n",
"step-ids": [
8,
11,
12,
18,
24
]
}
|
[
8,
11,
12,
18,
24
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('bookstore', '0003_auto_20201118_1325')]
operations = [migrations.AddField(model_name='book', name='seller',
field=models.ForeignKey(default=1, on_delete=django.db.models.
deletion.CASCADE, to='auth.user'), preserve_default=False)]
<|reserved_special_token_1|>
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('bookstore', '0003_auto_20201118_1325')]
operations = [migrations.AddField(model_name='book', name='seller',
field=models.ForeignKey(default=1, on_delete=django.db.models.
deletion.CASCADE, to='auth.user'), preserve_default=False)]
<|reserved_special_token_1|>
# Generated by Django 3.1.3 on 2020-11-18 13:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('bookstore', '0003_auto_20201118_1325'),
]
operations = [
migrations.AddField(
model_name='book',
name='seller',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
]
|
flexible
|
{
"blob_id": "fa09937ce64952795ae27cb91bf2c52dfb3ef4da",
"index": 4532,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('bookstore', '0003_auto_20201118_1325')]\n operations = [migrations.AddField(model_name='book', name='seller',\n field=models.ForeignKey(default=1, on_delete=django.db.models.\n deletion.CASCADE, to='auth.user'), preserve_default=False)]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('bookstore', '0003_auto_20201118_1325')]\n operations = [migrations.AddField(model_name='book', name='seller',\n field=models.ForeignKey(default=1, on_delete=django.db.models.\n deletion.CASCADE, to='auth.user'), preserve_default=False)]\n",
"step-5": "# Generated by Django 3.1.3 on 2020-11-18 13:26\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('bookstore', '0003_auto_20201118_1325'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='book',\n name='seller',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Blueprint, request, make_response
from flask_expects_json import expects_json
from server.validation.schemas import guest_calendar_schema
from tools.for_db.work_with_booking_info import add_booking_info_and_get_uuid
from tools.for_db.work_with_links import get_link
from tools.build_response import build_response
guest_calendar_post = Blueprint('guest_calendar_post', __name__)
@guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST'])
@expects_json(guest_calendar_schema)
def booking(link_id):
request_body = request.get_json()
link = get_link(link_id)
if link is None:
return build_response('link id is invalid', 401)
admin_id = link.admin_id
try:
uuid = add_booking_info_and_get_uuid(request_body['start'],
request_body['end'], admin_id, request_body['guest_name'],
request_body['guest_email'], request_body['topic'] if 'topic' in
request_body else None)
request_body['uuid'] = uuid
except Exception:
return build_response('already booked or deleted', 409)
return make_response(request_body, 200)
|
normal
|
{
"blob_id": "75ef5dd2b82cf79819f18045559f9850c74bb55a",
"index": 5565,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST'])\n@expects_json(guest_calendar_schema)\ndef booking(link_id):\n request_body = request.get_json()\n link = get_link(link_id)\n if link is None:\n return build_response('link id is invalid', 401)\n admin_id = link.admin_id\n try:\n uuid = add_booking_info_and_get_uuid(request_body['start'],\n request_body['end'], admin_id, request_body['guest_name'],\n request_body['guest_email'], request_body['topic'] if 'topic' in\n request_body else None)\n request_body['uuid'] = uuid\n except Exception:\n return build_response('already booked or deleted', 409)\n return make_response(request_body, 200)\n",
"step-3": "<mask token>\nguest_calendar_post = Blueprint('guest_calendar_post', __name__)\n\n\n@guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST'])\n@expects_json(guest_calendar_schema)\ndef booking(link_id):\n request_body = request.get_json()\n link = get_link(link_id)\n if link is None:\n return build_response('link id is invalid', 401)\n admin_id = link.admin_id\n try:\n uuid = add_booking_info_and_get_uuid(request_body['start'],\n request_body['end'], admin_id, request_body['guest_name'],\n request_body['guest_email'], request_body['topic'] if 'topic' in\n request_body else None)\n request_body['uuid'] = uuid\n except Exception:\n return build_response('already booked or deleted', 409)\n return make_response(request_body, 200)\n",
"step-4": "from flask import Blueprint, request, make_response\nfrom flask_expects_json import expects_json\nfrom server.validation.schemas import guest_calendar_schema\nfrom tools.for_db.work_with_booking_info import add_booking_info_and_get_uuid\nfrom tools.for_db.work_with_links import get_link\nfrom tools.build_response import build_response\nguest_calendar_post = Blueprint('guest_calendar_post', __name__)\n\n\n@guest_calendar_post.route('/calendars/<link_id>/bookings/', methods=['POST'])\n@expects_json(guest_calendar_schema)\ndef booking(link_id):\n request_body = request.get_json()\n link = get_link(link_id)\n if link is None:\n return build_response('link id is invalid', 401)\n admin_id = link.admin_id\n try:\n uuid = add_booking_info_and_get_uuid(request_body['start'],\n request_body['end'], admin_id, request_body['guest_name'],\n request_body['guest_email'], request_body['topic'] if 'topic' in\n request_body else None)\n request_body['uuid'] = uuid\n except Exception:\n return build_response('already booked or deleted', 409)\n return make_response(request_body, 200)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests, vars
def Cardid(name):
query = {"key":vars.Key, "token":vars.Token, "cards":"visible"}
execute = requests.request("GET", vars.BoardGetUrl, params=query).json()
for row in execute['cards']:
if row['name'] == name:
cardID = 1
break
else:
cardID = 0
return cardID
|
normal
|
{
"blob_id": "68493acce71060799da8c6cb03f2ddffce64aa92",
"index": 8970,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Cardid(name):\n query = {'key': vars.Key, 'token': vars.Token, 'cards': 'visible'}\n execute = requests.request('GET', vars.BoardGetUrl, params=query).json()\n for row in execute['cards']:\n if row['name'] == name:\n cardID = 1\n break\n else:\n cardID = 0\n return cardID\n",
"step-3": "import requests, vars\n\n\ndef Cardid(name):\n query = {'key': vars.Key, 'token': vars.Token, 'cards': 'visible'}\n execute = requests.request('GET', vars.BoardGetUrl, params=query).json()\n for row in execute['cards']:\n if row['name'] == name:\n cardID = 1\n break\n else:\n cardID = 0\n return cardID\n",
"step-4": "import requests, vars\n\ndef Cardid(name):\n query = {\"key\":vars.Key, \"token\":vars.Token, \"cards\":\"visible\"}\n execute = requests.request(\"GET\", vars.BoardGetUrl, params=query).json()\n for row in execute['cards']:\n if row['name'] == name:\n cardID = 1\n break\n else:\n cardID = 0\n return cardID\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import shell
def executeUpgrade():
shell.executeCommand('pkg upgrade')
def executeInstall(pkg_name):
shell.executeCommand('pkg install ' + pkg_name)
def executeRemove(pkg_name):
shell.executeCommand('pkg remove ' + pkg_name)
shell.executeCommand('pkg autoremove')
def executeFindByName(name):
shell.executeCommand('pkg search ' + name)
|
normal
|
{
"blob_id": "db55a603615c7d896569ada84f3110dd6c0ce45f",
"index": 1250,
"step-1": "<mask token>\n\n\ndef executeUpgrade():\n shell.executeCommand('pkg upgrade')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef executeUpgrade():\n shell.executeCommand('pkg upgrade')\n\n\n<mask token>\n\n\ndef executeFindByName(name):\n shell.executeCommand('pkg search ' + name)\n",
"step-3": "<mask token>\n\n\ndef executeUpgrade():\n shell.executeCommand('pkg upgrade')\n\n\n<mask token>\n\n\ndef executeRemove(pkg_name):\n shell.executeCommand('pkg remove ' + pkg_name)\n shell.executeCommand('pkg autoremove')\n\n\ndef executeFindByName(name):\n shell.executeCommand('pkg search ' + name)\n",
"step-4": "<mask token>\n\n\ndef executeUpgrade():\n shell.executeCommand('pkg upgrade')\n\n\ndef executeInstall(pkg_name):\n shell.executeCommand('pkg install ' + pkg_name)\n\n\ndef executeRemove(pkg_name):\n shell.executeCommand('pkg remove ' + pkg_name)\n shell.executeCommand('pkg autoremove')\n\n\ndef executeFindByName(name):\n shell.executeCommand('pkg search ' + name)\n",
"step-5": "import shell\n\n\ndef executeUpgrade():\n shell.executeCommand('pkg upgrade')\n\n\ndef executeInstall(pkg_name):\n shell.executeCommand('pkg install ' + pkg_name)\n\n\ndef executeRemove(pkg_name):\n shell.executeCommand('pkg remove ' + pkg_name)\n shell.executeCommand('pkg autoremove')\n\n\ndef executeFindByName(name):\n shell.executeCommand('pkg search ' + name)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
# coding: utf-8
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import tensorflow as tf
print(tf.__version__)
print(tf.keras.__version__)
print(tf.__path__)
import numpy as np
from tqdm import tqdm, tqdm_notebook
from utils import emphasis
import tensorflow.keras.backend as K
from tensorflow.keras.utils import Sequence
import librosa
import librosa.display
print(tf.test.is_gpu_available())
# ## SRCNN
class SubPixel1D(tf.keras.layers.Layer):
def __init__(self, r=2):
super(SubPixel1D, self).__init__()
self.r = r
def call(self, I):
"""One-dimensional subpixel upsampling layer
Calls a tensorflow function that directly implements this functionality.
We assume input has dim (batch, width, r)
"""
X = tf.transpose(I, [2,1,0]) # (r, w, b)
X = tf.batch_to_space_nd(X, [self.r], [[0,0]]) # (1, r*w, b)
X = tf.transpose(X, [2,1,0])
return X
noisy = tf.keras.layers.Input(shape=(None, 1))
x_input = noisy
x = x_input
# B = 8
# n_filters = [128, 256, 512, 512, 512, 512, 512, 512]
# kernel_sizes = [65, 33, 17, 9, 9, 9, 9, 9]
B = 4
n_filters = [128, 256, 512, 512]
kernel_sizes = [65, 33, 17, 9]
# B = 3
# n_filters = [128, 256, 512]
# kernel_sizes = [65, 33, 17]
# B = 3
# n_filters = [64, 128, 256]
# kernel_sizes = [65, 33, 17]
# Downsampling Layers
encoder_features = []
for k, n_filter, kernel_size in zip(range(B), n_filters, kernel_sizes):
x = tf.keras.layers.Conv1D(filters = n_filter,
kernel_size = kernel_size,
strides = 2,
padding = 'same',
kernel_initializer = 'Orthogonal')(x)
# x = tf.keras.layers.PReLU()(x)
x = tf.keras.layers.LeakyReLU(0.2)(x)
encoder_features.append(x)
# Bottleneck Layer
x = tf.keras.layers.Conv1D(filters = 512,
kernel_size = 9,
strides = 2,
padding = 'same',
kernel_initializer = 'Orthogonal')(x)
x = tf.keras.layers.Dropout(rate=0.5)(x)
# x = tf.keras.layers.PReLU()(x)
x = tf.keras.layers.LeakyReLU(0.2)(x)
# Upsampling Layer
for k, n_filter, kernel_size, enc in reversed(list(zip(range(B),
n_filters,
kernel_sizes,
encoder_features))):
x = tf.keras.layers.Conv1D(filters = 2 * n_filter,
kernel_size = kernel_size,
strides = 1,
padding = 'same',
kernel_initializer = 'Orthogonal')(x)
x = tf.keras.layers.Dropout(rate=0.5)(x)
# x = tf.keras.layers.PReLU()(x)
x = tf.keras.layers.ReLU()(x)
x = SubPixel1D()(x)
x = tf.keras.layers.Concatenate(axis=2)([x, enc])
# Final Conv Layer
x = tf.keras.layers.Conv1D(filters = 2,
kernel_size = 9,
strides = 1,
padding = 'same')(x)
x = SubPixel1D()(x)
x_final = tf.keras.layers.Add()([x, x_input])
G = tf.keras.models.Model(inputs = [noisy], outputs = [x_final])
# Train Model
# Initialize Model
optim = tf.keras.optimizers.Adam(lr=1e-4)
def G_loss(true, fake):
return K.mean(K.sqrt(K.mean((fake - true) ** 2 + 1e-6, axis=[1, 2])), axis=0)
def G_LSD_loss(y_clean, y_noisy):
y_clean = tf.squeeze(y_clean)
y_noisy = tf.squeeze(y_noisy)
D_clean = tf.signal.stft(signals = y_clean,
frame_length = 2048,
frame_step = 1024)
D_noisy = tf.signal.stft(signals = y_noisy,
frame_length = 2048,
frame_step = 1024)
D_clean_log = K.log(K.abs(D_clean) ** 2 + 1e-6)
D_noisy_log = K.log(K.abs(D_noisy) ** 2 + 1e-6)
return K.mean(K.sqrt(K.mean((D_clean_log - D_noisy_log) ** 2, axis = [2])), axis = [0, 1])
G.compile(loss = G_LSD_loss,
optimizer = optim)
G.summary()
# tf.keras.utils.plot_model(G, to_file='./generator.png', show_shapes=True)
# Training
class data_sequence(Sequence):
def __init__(self, data_path, batch_size = 64):
self.filenames = [os.path.join(data_path, filename) for filename in os.listdir(data_path)]
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.filenames) / float(self.batch_size)))
def on_epoch_end(self):
np.random.shuffle(self.filenames)
def __getitem__(self, idx):
noisy_batch = []
clean_batch = []
for i in range(idx * self.batch_size, min(len(self.filenames), (idx + 1) * self.batch_size)):
pair = np.load(self.filenames[i])
# pair = emphasis(pair[np.newaxis, :, :], emph_coeff=0.95).reshape(2, -1)
clean = pair[0].reshape(-1, 1).astype('float32')
noisy = pair[1].reshape(-1, 1).astype('float32')
noisy_batch.append(noisy)
clean_batch.append(clean)
return np.array(noisy_batch), np.array(clean_batch)
train_data_path = '../dataset/serialized_train_data'
val_data_path = '../dataset/serialized_val_data'
callbacks = [
tf.keras.callbacks.ModelCheckpoint(filepath='./model/weights_LSD.hdf5',
verbose=1,
save_best_only=True,
save_weights_only=True),
tf.keras.callbacks.TensorBoard(log_dir='./logs/LSD', update_freq='batch'),
# tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-8),
]
G.fit_generator(generator = data_sequence(train_data_path, 64),
validation_data = data_sequence(val_data_path, 2),
steps_per_epoch = 3325 // 64,
verbose = 1,
epochs = 400,
callbacks = callbacks,
max_queue_size = 10,
use_multiprocessing = True,
workers = 6,
initial_epoch = 0)
|
normal
|
{
"blob_id": "08a0ab888886184f7447465508b6494b502821ea",
"index": 8903,
"step-1": "#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nimport tensorflow as tf\nprint(tf.__version__)\nprint(tf.keras.__version__)\nprint(tf.__path__)\nimport numpy as np\n\nfrom tqdm import tqdm, tqdm_notebook\nfrom utils import emphasis\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.utils import Sequence\nimport librosa\nimport librosa.display\n\nprint(tf.test.is_gpu_available())\n\n\n# ## SRCNN\nclass SubPixel1D(tf.keras.layers.Layer):\n def __init__(self, r=2):\n super(SubPixel1D, self).__init__()\n self.r = r\n def call(self, I):\n \"\"\"One-dimensional subpixel upsampling layer\n Calls a tensorflow function that directly implements this functionality.\n We assume input has dim (batch, width, r)\n \"\"\"\n\n X = tf.transpose(I, [2,1,0]) # (r, w, b)\n X = tf.batch_to_space_nd(X, [self.r], [[0,0]]) # (1, r*w, b)\n X = tf.transpose(X, [2,1,0])\n return X\n\nnoisy = tf.keras.layers.Input(shape=(None, 1))\nx_input = noisy\nx = x_input\n\n# B = 8\n# n_filters = [128, 256, 512, 512, 512, 512, 512, 512]\n# kernel_sizes = [65, 33, 17, 9, 9, 9, 9, 9]\n\nB = 4\nn_filters = [128, 256, 512, 512]\nkernel_sizes = [65, 33, 17, 9]\n\n# B = 3\n# n_filters = [128, 256, 512]\n# kernel_sizes = [65, 33, 17]\n\n# B = 3\n# n_filters = [64, 128, 256]\n# kernel_sizes = [65, 33, 17]\n\n\n# Downsampling Layers\nencoder_features = []\nfor k, n_filter, kernel_size in zip(range(B), n_filters, kernel_sizes):\n x = tf.keras.layers.Conv1D(filters = n_filter,\n kernel_size = kernel_size,\n strides = 2,\n padding = 'same',\n kernel_initializer = 'Orthogonal')(x)\n # x = tf.keras.layers.PReLU()(x)\n x = tf.keras.layers.LeakyReLU(0.2)(x)\n encoder_features.append(x)\n \n# Bottleneck Layer\nx = tf.keras.layers.Conv1D(filters = 512,\n kernel_size = 9,\n strides = 2,\n padding = 'same',\n kernel_initializer = 'Orthogonal')(x)\nx = tf.keras.layers.Dropout(rate=0.5)(x)\n# x = tf.keras.layers.PReLU()(x)\nx = tf.keras.layers.LeakyReLU(0.2)(x)\n\n# Upsampling Layer\nfor k, n_filter, kernel_size, enc in reversed(list(zip(range(B), \n n_filters, \n kernel_sizes, \n encoder_features))):\n x = tf.keras.layers.Conv1D(filters = 2 * n_filter,\n kernel_size = kernel_size,\n strides = 1,\n padding = 'same',\n kernel_initializer = 'Orthogonal')(x)\n x = tf.keras.layers.Dropout(rate=0.5)(x)\n # x = tf.keras.layers.PReLU()(x)\n x = tf.keras.layers.ReLU()(x)\n x = SubPixel1D()(x)\n x = tf.keras.layers.Concatenate(axis=2)([x, enc])\n\n# Final Conv Layer\nx = tf.keras.layers.Conv1D(filters = 2,\n kernel_size = 9,\n strides = 1,\n padding = 'same')(x)\nx = SubPixel1D()(x)\nx_final = tf.keras.layers.Add()([x, x_input]) \nG = tf.keras.models.Model(inputs = [noisy], outputs = [x_final]) \n\n\n# Train Model\n# Initialize Model\n\noptim = tf.keras.optimizers.Adam(lr=1e-4)\ndef G_loss(true, fake):\n return K.mean(K.sqrt(K.mean((fake - true) ** 2 + 1e-6, axis=[1, 2])), axis=0)\n\ndef G_LSD_loss(y_clean, y_noisy):\n y_clean = tf.squeeze(y_clean)\n y_noisy = tf.squeeze(y_noisy)\n \n D_clean = tf.signal.stft(signals = y_clean,\n frame_length = 2048,\n frame_step = 1024)\n D_noisy = tf.signal.stft(signals = y_noisy,\n frame_length = 2048,\n frame_step = 1024)\n \n D_clean_log = K.log(K.abs(D_clean) ** 2 + 1e-6)\n D_noisy_log = K.log(K.abs(D_noisy) ** 2 + 1e-6)\n\n\treturn K.mean(K.sqrt(K.mean((D_clean_log - D_noisy_log) ** 2, axis = [2])), axis = [0, 1])\n\nG.compile(loss = G_LSD_loss,\n optimizer = optim)\nG.summary()\n# tf.keras.utils.plot_model(G, to_file='./generator.png', show_shapes=True)\n\n\n# Training\n\nclass data_sequence(Sequence):\n def __init__(self, data_path, batch_size = 64):\n self.filenames = [os.path.join(data_path, filename) for filename in os.listdir(data_path)]\n self.batch_size = batch_size\n \n def __len__(self):\n return int(np.ceil(len(self.filenames) / float(self.batch_size)))\n \n def on_epoch_end(self):\n np.random.shuffle(self.filenames)\n\n def __getitem__(self, idx):\n noisy_batch = []\n clean_batch = []\n \n for i in range(idx * self.batch_size, min(len(self.filenames), (idx + 1) * self.batch_size)):\n pair = np.load(self.filenames[i])\n # pair = emphasis(pair[np.newaxis, :, :], emph_coeff=0.95).reshape(2, -1)\n clean = pair[0].reshape(-1, 1).astype('float32')\n noisy = pair[1].reshape(-1, 1).astype('float32')\n \n noisy_batch.append(noisy)\n clean_batch.append(clean)\n\n return np.array(noisy_batch), np.array(clean_batch)\n \n \ntrain_data_path = '../dataset/serialized_train_data'\nval_data_path = '../dataset/serialized_val_data' \n \ncallbacks = [\n tf.keras.callbacks.ModelCheckpoint(filepath='./model/weights_LSD.hdf5', \n verbose=1,\n save_best_only=True,\n save_weights_only=True),\n tf.keras.callbacks.TensorBoard(log_dir='./logs/LSD', update_freq='batch'),\n # tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-8),\n ]\n \nG.fit_generator(generator = data_sequence(train_data_path, 64),\n validation_data = data_sequence(val_data_path, 2),\n steps_per_epoch = 3325 // 64, \n verbose = 1,\n epochs = 400,\n callbacks = callbacks,\n max_queue_size = 10,\n use_multiprocessing = True,\n workers = 6,\n initial_epoch = 0)\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from odoo import models, fields, api
class Aceptar_letras_wizard(models.TransientModel):
_name = 'aceptar_letras_wizard'
_description = "Aceptar letras"
def _get_letras(self):
if self.env.context and self.env.context.get('active_ids'):
return self.env.context.get('active_ids')
return []
letra_ids = fields.Many2many('letra_cambio.letra', default=_get_letras, string='Letras')
@api.multi
def aceptar_letras(self):
active_ids = self.env.context.get('active_ids', []) or []
records = self.env['letra_cambio.letra'].browse(active_ids)
self.env['letra_cambio.letra'].cambiar_estado_all(records, "ACE")
|
normal
|
{
"blob_id": "4ad3390f8f2c92f35acde507be7a7b713af997f2",
"index": 5092,
"step-1": "<mask token>\n\n\nclass Aceptar_letras_wizard(models.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @api.multi\n def aceptar_letras(self):\n active_ids = self.env.context.get('active_ids', []) or []\n records = self.env['letra_cambio.letra'].browse(active_ids)\n self.env['letra_cambio.letra'].cambiar_estado_all(records, 'ACE')\n",
"step-2": "<mask token>\n\n\nclass Aceptar_letras_wizard(models.TransientModel):\n <mask token>\n <mask token>\n\n def _get_letras(self):\n if self.env.context and self.env.context.get('active_ids'):\n return self.env.context.get('active_ids')\n return []\n <mask token>\n\n @api.multi\n def aceptar_letras(self):\n active_ids = self.env.context.get('active_ids', []) or []\n records = self.env['letra_cambio.letra'].browse(active_ids)\n self.env['letra_cambio.letra'].cambiar_estado_all(records, 'ACE')\n",
"step-3": "<mask token>\n\n\nclass Aceptar_letras_wizard(models.TransientModel):\n _name = 'aceptar_letras_wizard'\n _description = 'Aceptar letras'\n\n def _get_letras(self):\n if self.env.context and self.env.context.get('active_ids'):\n return self.env.context.get('active_ids')\n return []\n letra_ids = fields.Many2many('letra_cambio.letra', default=_get_letras,\n string='Letras')\n\n @api.multi\n def aceptar_letras(self):\n active_ids = self.env.context.get('active_ids', []) or []\n records = self.env['letra_cambio.letra'].browse(active_ids)\n self.env['letra_cambio.letra'].cambiar_estado_all(records, 'ACE')\n",
"step-4": "from odoo import models, fields, api\n\n\nclass Aceptar_letras_wizard(models.TransientModel):\n _name = 'aceptar_letras_wizard'\n _description = 'Aceptar letras'\n\n def _get_letras(self):\n if self.env.context and self.env.context.get('active_ids'):\n return self.env.context.get('active_ids')\n return []\n letra_ids = fields.Many2many('letra_cambio.letra', default=_get_letras,\n string='Letras')\n\n @api.multi\n def aceptar_letras(self):\n active_ids = self.env.context.get('active_ids', []) or []\n records = self.env['letra_cambio.letra'].browse(active_ids)\n self.env['letra_cambio.letra'].cambiar_estado_all(records, 'ACE')\n",
"step-5": "from odoo import models, fields, api\n\n\nclass Aceptar_letras_wizard(models.TransientModel):\n _name = 'aceptar_letras_wizard'\n _description = \"Aceptar letras\"\n\n def _get_letras(self):\n if self.env.context and self.env.context.get('active_ids'):\n return self.env.context.get('active_ids')\n return []\n\n\n letra_ids = fields.Many2many('letra_cambio.letra', default=_get_letras, string='Letras')\n\n @api.multi\n def aceptar_letras(self):\n active_ids = self.env.context.get('active_ids', []) or []\n records = self.env['letra_cambio.letra'].browse(active_ids)\n self.env['letra_cambio.letra'].cambiar_estado_all(records, \"ACE\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""PriceTrail URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from .views import validate_product, display_product
#user related views
from .views import index_view, login_view, register_view, profile_view
#products related views
from .views import my_products_view, delete_product, add_new_product, dashboard_view, test_email_notifications, edit_profile_view, product_details_view, \
test_update_prices, test_update_all_prices
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
# implemented views
url(r'^$', index_view, name='index'),#this will became index
url(r'^login/$', login_view, name='login'),
url(r'^register/$', register_view, name='register'),
url(r'^profile/$', profile_view, name='profile'),
url(r'^my-products/$', my_products_view, name='my-products'),
url(r'^my-products/(?P<filter>[\w-]+)', my_products_view, name='my-products'),
url(r'^delete-product/(?P<id>\d+)/', delete_product, name='delete-product'),
url(r'^add-new-product/$', add_new_product, name='add-new-product'),
url(r'^validate-product/$', validate_product, name='validate-product'),
url(r'^dashboard/$', dashboard_view, name='dashboard'),
url(r'^edit-profile/$', edit_profile_view, name='edit-profile'),
#modal window
url(r'^display-product/(?P<id>\d+)/', display_product, name='display-product'),
url(r'^product-details/(?P<id>\d+)/', product_details_view, name='product-details'),
#superuser endpoints
url(r'^test_notifications/$', test_email_notifications, name='test-view'),
url(r'^test_update_prices/(?P<id>\w+)/', test_update_prices, name='update-prices'),
url(r'^test_update_all_prices/$', test_update_all_prices, name='update-all-prices'),
]
|
normal
|
{
"blob_id": "06627821c09d02543974a3c90664e84e11c980ed",
"index": 7631,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^admin/', admin.site.urls), url('^logout/$', auth_views\n .logout, {'next_page': '/'}, name='logout'), url('^$', index_view, name\n ='index'), url('^login/$', login_view, name='login'), url('^register/$',\n register_view, name='register'), url('^profile/$', profile_view, name=\n 'profile'), url('^my-products/$', my_products_view, name='my-products'),\n url('^my-products/(?P<filter>[\\\\w-]+)', my_products_view, name=\n 'my-products'), url('^delete-product/(?P<id>\\\\d+)/', delete_product,\n name='delete-product'), url('^add-new-product/$', add_new_product, name\n ='add-new-product'), url('^validate-product/$', validate_product, name=\n 'validate-product'), url('^dashboard/$', dashboard_view, name=\n 'dashboard'), url('^edit-profile/$', edit_profile_view, name=\n 'edit-profile'), url('^display-product/(?P<id>\\\\d+)/', display_product,\n name='display-product'), url('^product-details/(?P<id>\\\\d+)/',\n product_details_view, name='product-details'), url(\n '^test_notifications/$', test_email_notifications, name='test-view'),\n url('^test_update_prices/(?P<id>\\\\w+)/', test_update_prices, name=\n 'update-prices'), url('^test_update_all_prices/$',\n test_update_all_prices, name='update-all-prices')]\n",
"step-3": "<mask token>\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nfrom .views import validate_product, display_product\nfrom .views import index_view, login_view, register_view, profile_view\nfrom .views import my_products_view, delete_product, add_new_product, dashboard_view, test_email_notifications, edit_profile_view, product_details_view, test_update_prices, test_update_all_prices\nurlpatterns = [url('^admin/', admin.site.urls), url('^logout/$', auth_views\n .logout, {'next_page': '/'}, name='logout'), url('^$', index_view, name\n ='index'), url('^login/$', login_view, name='login'), url('^register/$',\n register_view, name='register'), url('^profile/$', profile_view, name=\n 'profile'), url('^my-products/$', my_products_view, name='my-products'),\n url('^my-products/(?P<filter>[\\\\w-]+)', my_products_view, name=\n 'my-products'), url('^delete-product/(?P<id>\\\\d+)/', delete_product,\n name='delete-product'), url('^add-new-product/$', add_new_product, name\n ='add-new-product'), url('^validate-product/$', validate_product, name=\n 'validate-product'), url('^dashboard/$', dashboard_view, name=\n 'dashboard'), url('^edit-profile/$', edit_profile_view, name=\n 'edit-profile'), url('^display-product/(?P<id>\\\\d+)/', display_product,\n name='display-product'), url('^product-details/(?P<id>\\\\d+)/',\n product_details_view, name='product-details'), url(\n '^test_notifications/$', test_email_notifications, name='test-view'),\n url('^test_update_prices/(?P<id>\\\\w+)/', test_update_prices, name=\n 'update-prices'), url('^test_update_all_prices/$',\n test_update_all_prices, name='update-all-prices')]\n",
"step-4": "\"\"\"PriceTrail URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nfrom .views import validate_product, display_product\n\n#user related views\nfrom .views import index_view, login_view, register_view, profile_view\n#products related views\nfrom .views import my_products_view, delete_product, add_new_product, dashboard_view, test_email_notifications, edit_profile_view, product_details_view, \\\n test_update_prices, test_update_all_prices\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),\n\n # implemented views\n url(r'^$', index_view, name='index'),#this will became index\n url(r'^login/$', login_view, name='login'),\n url(r'^register/$', register_view, name='register'),\n url(r'^profile/$', profile_view, name='profile'),\n url(r'^my-products/$', my_products_view, name='my-products'),\n url(r'^my-products/(?P<filter>[\\w-]+)', my_products_view, name='my-products'),\n url(r'^delete-product/(?P<id>\\d+)/', delete_product, name='delete-product'),\n url(r'^add-new-product/$', add_new_product, name='add-new-product'),\n url(r'^validate-product/$', validate_product, name='validate-product'),\n url(r'^dashboard/$', dashboard_view, name='dashboard'),\n url(r'^edit-profile/$', edit_profile_view, name='edit-profile'),\n\n #modal window\n url(r'^display-product/(?P<id>\\d+)/', display_product, name='display-product'),\n url(r'^product-details/(?P<id>\\d+)/', product_details_view, name='product-details'),\n\n #superuser endpoints\n url(r'^test_notifications/$', test_email_notifications, name='test-view'),\n url(r'^test_update_prices/(?P<id>\\w+)/', test_update_prices, name='update-prices'),\n url(r'^test_update_all_prices/$', test_update_all_prices, name='update-all-prices'),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ReloadModelHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
super(ReloadModelHandler, self).__init__(application, request, **kwargs
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ReloadModelHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
super(ReloadModelHandler, self).__init__(application, request, **kwargs
)
def do_action(self):
model_name = self.get_argument('modelname', None)
if model_name is None:
for model_name in os.listdir(model_path):
if model_name.find('.model') == -1:
continue
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={'message': 'server has reload all models'})
else:
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={'message': 'server has reload {model}'.
format(model=model_name)})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
module_path = os.path.abspath(os.path.join(os.curdir))
model_path = os.path.join(module_path, 'model')
class ReloadModelHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
super(ReloadModelHandler, self).__init__(application, request, **kwargs
)
def do_action(self):
model_name = self.get_argument('modelname', None)
if model_name is None:
for model_name in os.listdir(model_path):
if model_name.find('.model') == -1:
continue
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={'message': 'server has reload all models'})
else:
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={'message': 'server has reload {model}'.
format(model=model_name)})
<|reserved_special_token_1|>
from src.handler.base.base_handler import BaseHandler
from src.utils.tools import read_model
from tornado.options import options
import os
module_path = os.path.abspath(os.path.join(os.curdir))
model_path = os.path.join(module_path, 'model')
class ReloadModelHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
super(ReloadModelHandler, self).__init__(application, request, **kwargs
)
def do_action(self):
model_name = self.get_argument('modelname', None)
if model_name is None:
for model_name in os.listdir(model_path):
if model_name.find('.model') == -1:
continue
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={'message': 'server has reload all models'})
else:
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={'message': 'server has reload {model}'.
format(model=model_name)})
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# @Time : 2019/3/5 上午9:55
# @Author : yidxue
from src.handler.base.base_handler import BaseHandler
from src.utils.tools import read_model
from tornado.options import options
import os
module_path = os.path.abspath(os.path.join(os.curdir))
model_path = os.path.join(module_path, 'model')
class ReloadModelHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
super(ReloadModelHandler, self).__init__(application, request, **kwargs)
def do_action(self):
model_name = self.get_argument('modelname', None)
if model_name is None:
for model_name in os.listdir(model_path):
if model_name.find(".model") == -1:
continue
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={"message": "server has reload all models"})
else:
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={"message": "server has reload {model}".format(model=model_name)})
|
flexible
|
{
"blob_id": "a8ae59bb525c52ef852655f0ef1e32d96c8914d6",
"index": 1356,
"step-1": "<mask token>\n\n\nclass ReloadModelHandler(BaseHandler):\n\n def __init__(self, application, request, **kwargs):\n super(ReloadModelHandler, self).__init__(application, request, **kwargs\n )\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ReloadModelHandler(BaseHandler):\n\n def __init__(self, application, request, **kwargs):\n super(ReloadModelHandler, self).__init__(application, request, **kwargs\n )\n\n def do_action(self):\n model_name = self.get_argument('modelname', None)\n if model_name is None:\n for model_name in os.listdir(model_path):\n if model_name.find('.model') == -1:\n continue\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={'message': 'server has reload all models'})\n else:\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={'message': 'server has reload {model}'.\n format(model=model_name)})\n",
"step-3": "<mask token>\nmodule_path = os.path.abspath(os.path.join(os.curdir))\nmodel_path = os.path.join(module_path, 'model')\n\n\nclass ReloadModelHandler(BaseHandler):\n\n def __init__(self, application, request, **kwargs):\n super(ReloadModelHandler, self).__init__(application, request, **kwargs\n )\n\n def do_action(self):\n model_name = self.get_argument('modelname', None)\n if model_name is None:\n for model_name in os.listdir(model_path):\n if model_name.find('.model') == -1:\n continue\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={'message': 'server has reload all models'})\n else:\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={'message': 'server has reload {model}'.\n format(model=model_name)})\n",
"step-4": "from src.handler.base.base_handler import BaseHandler\nfrom src.utils.tools import read_model\nfrom tornado.options import options\nimport os\nmodule_path = os.path.abspath(os.path.join(os.curdir))\nmodel_path = os.path.join(module_path, 'model')\n\n\nclass ReloadModelHandler(BaseHandler):\n\n def __init__(self, application, request, **kwargs):\n super(ReloadModelHandler, self).__init__(application, request, **kwargs\n )\n\n def do_action(self):\n model_name = self.get_argument('modelname', None)\n if model_name is None:\n for model_name in os.listdir(model_path):\n if model_name.find('.model') == -1:\n continue\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={'message': 'server has reload all models'})\n else:\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={'message': 'server has reload {model}'.\n format(model=model_name)})\n",
"step-5": "# -*- coding: utf-8 -*-\n# @Time : 2019/3/5 上午9:55\n# @Author : yidxue\nfrom src.handler.base.base_handler import BaseHandler\nfrom src.utils.tools import read_model\nfrom tornado.options import options\nimport os\n\nmodule_path = os.path.abspath(os.path.join(os.curdir))\nmodel_path = os.path.join(module_path, 'model')\n\n\nclass ReloadModelHandler(BaseHandler):\n\n def __init__(self, application, request, **kwargs):\n super(ReloadModelHandler, self).__init__(application, request, **kwargs)\n\n def do_action(self):\n model_name = self.get_argument('modelname', None)\n if model_name is None:\n for model_name in os.listdir(model_path):\n if model_name.find(\".model\") == -1:\n continue\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={\"message\": \"server has reload all models\"})\n else:\n model = read_model(os.path.join(model_path, model_name))\n options.models[model_name] = model\n self.set_result(result={\"message\": \"server has reload {model}\".format(model=model_name)})\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# CS 5010 Project
# Team Metro
# Test the data cleaning
import unittest
from cleaning_data import dfClean # import the dataframe we created after cleaning the data
class DataTypesTestCase(unittest.TestCase):
# we will test that each column has the correct data type
# note that there is a strange occurence seen below when converting to a pandas dataframe
def test_is_holiday_a_string(self):
holiday = dfClean.iloc[4908,0]
self.assertTrue(isinstance(holiday, str))
def test_is_temperature_a_float(self):
temp = dfClean.iloc[4908,1]
self.assertTrue(isinstance(temp, float))
def test_is_rain_a_float(self):
rain = dfClean.iloc[4908,2]
self.assertTrue(isinstance(rain, float))
def test_is_snow_a_float(self):
snow = dfClean.iloc[4908,3]
self.assertTrue(isinstance(snow, float))
def test_is_clouds_an_int(self):
clouds = dfClean.iloc[4908,4]
self.assertEqual(str(type(clouds)), "<class 'numpy.int64'>")
# pandas converts all of the ints in the list to numpy.int64
# could not figure out how to avoid this
def test_is_weather_main_a_string(self):
weather = dfClean.iloc[4908,5]
self.assertTrue(isinstance(weather, str))
def test_is_weather_descrip_a_string(self):
weather = dfClean.iloc[4908,6]
self.assertTrue(isinstance(weather, str))
def test_is_date_time_a_string(self):
dateTime = dfClean.iloc[4908,7]
self.assertTrue(isinstance(dateTime, str))
def test_is_traffic_an_int(self):
traffic = dfClean.iloc[4908,8]
self.assertEqual(str(type(traffic)), "<class 'numpy.int64'>")
def test_is_month_an_int(self):
month = dfClean.iloc[4908,9]
self.assertEqual(str(type(month)), "<class 'numpy.int64'>")
def test_is_day_an_int(self):
day = dfClean.iloc[4908,10]
self.assertEqual(str(type(day)), "<class 'numpy.int64'>")
def test_is_year_an_int(self):
year = dfClean.iloc[4908,11]
self.assertEqual(str(type(year)), "<class 'numpy.int64'>")
def test_is_hour_an_int(self):
hour = dfClean.iloc[4908,12]
self.assertEqual(str(type(hour)), "<class 'numpy.int64'>")
class DateTimeFormatTestCase(unittest.TestCase):
def test_does_month_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i,7][2] != "/":
booln = False
i += 1
self.assertTrue(booln)
# make sure that every data point has a two digit month
# in cleaning, 0 should have been added to make it two digits
def test_does_day_have_two_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i,7][5] != "/":
booln = False
i += 1
self.assertTrue(booln)
# all months in the date/time string should have two digits after cleaning
def test_does_year_have_four_digits(self):
i = 0
booln = True
while i < len(dfClean):
if dfClean.iloc[i,7][6:8] != "20":
booln = False
i += 1
self.assertTrue(booln)
# all years should be in the form 20xx in the date/time string
def test_does_hour_have_two_digits(self):
i = 0
booln = True # since we already tested all of the other cleaning items on the date/time string
while i < len(dfClean): # we can check the hour by checking the length of the whole string
if len(dfClean.iloc[i,7]) != 16: # all in column should have the form "mm/dd/yyyy hh:00"
booln = False
i += 1
self.assertTrue(booln)
# in cleaning, 0 should have been added to make a one digit hour (0-9) two digits (00-09)
# without the other tests this would be a way to check all in one test but would not
# tell us what part of the cleaning on the date/time string did not work correctly
class AppendColumnsTestCase(unittest.TestCase):
# we will check that each of the four new columns (month, day, year, and hour)
# appended correctly to the dataset
def test_is_month_column_appending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i,9]) != int(dfClean.iloc[i,7][:2]):
booln = False
i += 1
self.assertTrue(booln)
# we check that the month in the month column matches that in the original date/time column
def test_is_day_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i,10]) != int(dfClean.iloc[i,7][3:5]):
booln = False
i += 1
self.assertTrue(booln)
# we check that the day in the day column matches that in the original date/time column
def test_is_year_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i,11]) != int(dfClean.iloc[i,7][6:10]):
booln = False
i += 1
self.assertTrue(booln)
# we check that the year in the year column matches that in the original date/time column
def test_is_hour_column_apending_correctly(self):
i = 0
booln = True
while i < len(dfClean):
if int(dfClean.iloc[i,12]) != int(dfClean.iloc[i,7][11:13]):
booln = False
i += 1
self.assertTrue(booln)
# we check that the hour in the hour column matches that in the original date/time column
class HolidayTestCase(unittest.TestCase):
# we test that every hour of the same day has a consistent holiday
def test_are_all_hours_correct_holiday(self):
i = 0
booln = True
hol = "None"
while i < len(dfClean):
if dfClean.iloc[i,12] == 0:
hol = dfClean.iloc[i,0]
else:
if dfClean.iloc[i,0] != hol:
booln = False
i += 1
self.assertTrue(booln)
class UniqueDataPointsTestCase(unittest.TestCase):
# this test ensures that no two data points have the exact same date and hour
def test_are_all_datetimes_unique(self):
i = 1
booln = True
while i < len(dfClean):
if dfClean.iloc[i,7] == dfClean.iloc[i-1,7]:
booln = False
i += 1
self.assertTrue(booln)
class TemperatureConversionTestCase(unittest.TestCase):
# we test that the temperature was converted to Fahrenheit
# note that since we overrode the original temperature, we simply check for
# outlier that would make sense as Kelvin values but not Fahrenheit values
# This how we discovered there were some missing temperatures input as 0 Kelvin
# because they converted to -450 Fahrenheit
def test_is_temp_converting_from_kelvin_to_F(self):
i = 1
booln = True
while i < len(dfClean):
if (dfClean.iloc[i,1] > 120) | (dfClean.iloc[i,1] < -50):
booln = False
i += 1
self.assertTrue(booln)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "9d0727970c760a9a8123c5c07359ba5c538cea3c",
"index": 5926,
"step-1": "<mask token>\n\n\nclass DataTypesTestCase(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908, 2]\n self.assertTrue(isinstance(rain, float))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n\n def test_does_month_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][2] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_day_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][5] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_year_have_four_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][6:8] != '20':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if len(dfClean.iloc[i, 7]) != 16:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass HolidayTestCase(unittest.TestCase):\n\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = 'None'\n while i < len(dfClean):\n if dfClean.iloc[i, 12] == 0:\n hol = dfClean.iloc[i, 0]\n elif dfClean.iloc[i, 0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass TemperatureConversionTestCase(unittest.TestCase):\n\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DataTypesTestCase(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908, 2]\n self.assertTrue(isinstance(rain, float))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_is_hour_an_int(self):\n hour = dfClean.iloc[4908, 12]\n self.assertEqual(str(type(hour)), \"<class 'numpy.int64'>\")\n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n\n def test_does_month_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][2] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_day_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][5] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_year_have_four_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][6:8] != '20':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if len(dfClean.iloc[i, 7]) != 16:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass HolidayTestCase(unittest.TestCase):\n\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = 'None'\n while i < len(dfClean):\n if dfClean.iloc[i, 12] == 0:\n hol = dfClean.iloc[i, 0]\n elif dfClean.iloc[i, 0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass TemperatureConversionTestCase(unittest.TestCase):\n\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DataTypesTestCase(unittest.TestCase):\n <mask token>\n\n def test_is_temperature_a_float(self):\n temp = dfClean.iloc[4908, 1]\n self.assertTrue(isinstance(temp, float))\n\n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908, 2]\n self.assertTrue(isinstance(rain, float))\n\n def test_is_snow_a_float(self):\n snow = dfClean.iloc[4908, 3]\n self.assertTrue(isinstance(snow, float))\n\n def test_is_clouds_an_int(self):\n clouds = dfClean.iloc[4908, 4]\n self.assertEqual(str(type(clouds)), \"<class 'numpy.int64'>\")\n\n def test_is_weather_main_a_string(self):\n weather = dfClean.iloc[4908, 5]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_weather_descrip_a_string(self):\n weather = dfClean.iloc[4908, 6]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_date_time_a_string(self):\n dateTime = dfClean.iloc[4908, 7]\n self.assertTrue(isinstance(dateTime, str))\n\n def test_is_traffic_an_int(self):\n traffic = dfClean.iloc[4908, 8]\n self.assertEqual(str(type(traffic)), \"<class 'numpy.int64'>\")\n\n def test_is_month_an_int(self):\n month = dfClean.iloc[4908, 9]\n self.assertEqual(str(type(month)), \"<class 'numpy.int64'>\")\n\n def test_is_day_an_int(self):\n day = dfClean.iloc[4908, 10]\n self.assertEqual(str(type(day)), \"<class 'numpy.int64'>\")\n\n def test_is_year_an_int(self):\n year = dfClean.iloc[4908, 11]\n self.assertEqual(str(type(year)), \"<class 'numpy.int64'>\")\n\n def test_is_hour_an_int(self):\n hour = dfClean.iloc[4908, 12]\n self.assertEqual(str(type(hour)), \"<class 'numpy.int64'>\")\n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n\n def test_does_month_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][2] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_day_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][5] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_year_have_four_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][6:8] != '20':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if len(dfClean.iloc[i, 7]) != 16:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass HolidayTestCase(unittest.TestCase):\n\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = 'None'\n while i < len(dfClean):\n if dfClean.iloc[i, 12] == 0:\n hol = dfClean.iloc[i, 0]\n elif dfClean.iloc[i, 0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass TemperatureConversionTestCase(unittest.TestCase):\n\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass DataTypesTestCase(unittest.TestCase):\n\n def test_is_holiday_a_string(self):\n holiday = dfClean.iloc[4908, 0]\n self.assertTrue(isinstance(holiday, str))\n\n def test_is_temperature_a_float(self):\n temp = dfClean.iloc[4908, 1]\n self.assertTrue(isinstance(temp, float))\n\n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908, 2]\n self.assertTrue(isinstance(rain, float))\n\n def test_is_snow_a_float(self):\n snow = dfClean.iloc[4908, 3]\n self.assertTrue(isinstance(snow, float))\n\n def test_is_clouds_an_int(self):\n clouds = dfClean.iloc[4908, 4]\n self.assertEqual(str(type(clouds)), \"<class 'numpy.int64'>\")\n\n def test_is_weather_main_a_string(self):\n weather = dfClean.iloc[4908, 5]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_weather_descrip_a_string(self):\n weather = dfClean.iloc[4908, 6]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_date_time_a_string(self):\n dateTime = dfClean.iloc[4908, 7]\n self.assertTrue(isinstance(dateTime, str))\n\n def test_is_traffic_an_int(self):\n traffic = dfClean.iloc[4908, 8]\n self.assertEqual(str(type(traffic)), \"<class 'numpy.int64'>\")\n\n def test_is_month_an_int(self):\n month = dfClean.iloc[4908, 9]\n self.assertEqual(str(type(month)), \"<class 'numpy.int64'>\")\n\n def test_is_day_an_int(self):\n day = dfClean.iloc[4908, 10]\n self.assertEqual(str(type(day)), \"<class 'numpy.int64'>\")\n\n def test_is_year_an_int(self):\n year = dfClean.iloc[4908, 11]\n self.assertEqual(str(type(year)), \"<class 'numpy.int64'>\")\n\n def test_is_hour_an_int(self):\n hour = dfClean.iloc[4908, 12]\n self.assertEqual(str(type(hour)), \"<class 'numpy.int64'>\")\n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n\n def test_does_month_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][2] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_day_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][5] != '/':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_year_have_four_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7][6:8] != '20':\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if len(dfClean.iloc[i, 7]) != 16:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 9]) != int(dfClean.iloc[i, 7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 10]) != int(dfClean.iloc[i, 7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 11]) != int(dfClean.iloc[i, 7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i, 12]) != int(dfClean.iloc[i, 7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass HolidayTestCase(unittest.TestCase):\n\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = 'None'\n while i < len(dfClean):\n if dfClean.iloc[i, 12] == 0:\n hol = dfClean.iloc[i, 0]\n elif dfClean.iloc[i, 0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i, 7] == dfClean.iloc[i - 1, 7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass TemperatureConversionTestCase(unittest.TestCase):\n\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i, 1] > 120) | (dfClean.iloc[i, 1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "# CS 5010 Project \n\n# Team Metro\n\n# Test the data cleaning\n\nimport unittest\nfrom cleaning_data import dfClean # import the dataframe we created after cleaning the data\n\n\nclass DataTypesTestCase(unittest.TestCase):\n\n # we will test that each column has the correct data type\n # note that there is a strange occurence seen below when converting to a pandas dataframe\n\n def test_is_holiday_a_string(self):\n holiday = dfClean.iloc[4908,0]\n self.assertTrue(isinstance(holiday, str))\n \n def test_is_temperature_a_float(self):\n temp = dfClean.iloc[4908,1]\n self.assertTrue(isinstance(temp, float))\n \n def test_is_rain_a_float(self):\n rain = dfClean.iloc[4908,2]\n self.assertTrue(isinstance(rain, float))\n\n def test_is_snow_a_float(self):\n snow = dfClean.iloc[4908,3]\n self.assertTrue(isinstance(snow, float))\n\n def test_is_clouds_an_int(self):\n clouds = dfClean.iloc[4908,4]\n self.assertEqual(str(type(clouds)), \"<class 'numpy.int64'>\")\n # pandas converts all of the ints in the list to numpy.int64 \n # could not figure out how to avoid this\n\n def test_is_weather_main_a_string(self):\n weather = dfClean.iloc[4908,5]\n self.assertTrue(isinstance(weather, str))\n \n def test_is_weather_descrip_a_string(self):\n weather = dfClean.iloc[4908,6]\n self.assertTrue(isinstance(weather, str))\n\n def test_is_date_time_a_string(self):\n dateTime = dfClean.iloc[4908,7]\n self.assertTrue(isinstance(dateTime, str))\n\n def test_is_traffic_an_int(self):\n traffic = dfClean.iloc[4908,8]\n self.assertEqual(str(type(traffic)), \"<class 'numpy.int64'>\")\n\n def test_is_month_an_int(self):\n month = dfClean.iloc[4908,9]\n self.assertEqual(str(type(month)), \"<class 'numpy.int64'>\")\n\n def test_is_day_an_int(self):\n day = dfClean.iloc[4908,10]\n self.assertEqual(str(type(day)), \"<class 'numpy.int64'>\")\n\n def test_is_year_an_int(self):\n year = dfClean.iloc[4908,11]\n self.assertEqual(str(type(year)), \"<class 'numpy.int64'>\")\n \n def test_is_hour_an_int(self):\n hour = dfClean.iloc[4908,12]\n self.assertEqual(str(type(hour)), \"<class 'numpy.int64'>\")\n\n \n\n\nclass DateTimeFormatTestCase(unittest.TestCase):\n def test_does_month_have_two_digits(self):\n i = 0 \n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i,7][2] != \"/\":\n booln = False\n i += 1\n self.assertTrue(booln)\n # make sure that every data point has a two digit month\n # in cleaning, 0 should have been added to make it two digits\n \n def test_does_day_have_two_digits(self):\n i = 0 \n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i,7][5] != \"/\":\n booln = False\n i += 1\n self.assertTrue(booln)\n # all months in the date/time string should have two digits after cleaning\n\n def test_does_year_have_four_digits(self):\n i = 0 \n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i,7][6:8] != \"20\":\n booln = False\n i += 1\n self.assertTrue(booln)\n # all years should be in the form 20xx in the date/time string\n \n def test_does_hour_have_two_digits(self):\n i = 0\n booln = True # since we already tested all of the other cleaning items on the date/time string\n while i < len(dfClean): # we can check the hour by checking the length of the whole string\n if len(dfClean.iloc[i,7]) != 16: # all in column should have the form \"mm/dd/yyyy hh:00\"\n booln = False\n i += 1\n self.assertTrue(booln) \n # in cleaning, 0 should have been added to make a one digit hour (0-9) two digits (00-09)\n # without the other tests this would be a way to check all in one test but would not\n # tell us what part of the cleaning on the date/time string did not work correctly\n\n\nclass AppendColumnsTestCase(unittest.TestCase):\n # we will check that each of the four new columns (month, day, year, and hour)\n # appended correctly to the dataset\n def test_is_month_column_appending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i,9]) != int(dfClean.iloc[i,7][:2]):\n booln = False\n i += 1\n self.assertTrue(booln)\n # we check that the month in the month column matches that in the original date/time column\n \n def test_is_day_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i,10]) != int(dfClean.iloc[i,7][3:5]):\n booln = False\n i += 1\n self.assertTrue(booln)\n # we check that the day in the day column matches that in the original date/time column\n\n def test_is_year_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i,11]) != int(dfClean.iloc[i,7][6:10]):\n booln = False\n i += 1\n self.assertTrue(booln)\n # we check that the year in the year column matches that in the original date/time column\n\n\n def test_is_hour_column_apending_correctly(self):\n i = 0\n booln = True\n while i < len(dfClean):\n if int(dfClean.iloc[i,12]) != int(dfClean.iloc[i,7][11:13]):\n booln = False\n i += 1\n self.assertTrue(booln)\n # we check that the hour in the hour column matches that in the original date/time column\n \n\nclass HolidayTestCase(unittest.TestCase):\n # we test that every hour of the same day has a consistent holiday\n def test_are_all_hours_correct_holiday(self):\n i = 0\n booln = True\n hol = \"None\"\n while i < len(dfClean):\n if dfClean.iloc[i,12] == 0:\n hol = dfClean.iloc[i,0]\n else:\n if dfClean.iloc[i,0] != hol:\n booln = False\n i += 1\n self.assertTrue(booln)\n\n\nclass UniqueDataPointsTestCase(unittest.TestCase):\n # this test ensures that no two data points have the exact same date and hour\n def test_are_all_datetimes_unique(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if dfClean.iloc[i,7] == dfClean.iloc[i-1,7]:\n booln = False\n i += 1\n self.assertTrue(booln)\n \n\nclass TemperatureConversionTestCase(unittest.TestCase):\n # we test that the temperature was converted to Fahrenheit\n # note that since we overrode the original temperature, we simply check for \n # outlier that would make sense as Kelvin values but not Fahrenheit values\n # This how we discovered there were some missing temperatures input as 0 Kelvin\n # because they converted to -450 Fahrenheit\n def test_is_temp_converting_from_kelvin_to_F(self):\n i = 1\n booln = True\n while i < len(dfClean):\n if (dfClean.iloc[i,1] > 120) | (dfClean.iloc[i,1] < -50):\n booln = False\n i += 1\n self.assertTrue(booln)\n\nif __name__ == '__main__': \n unittest.main() ",
"step-ids": [
18,
19,
29,
31,
33
]
}
|
[
18,
19,
29,
31,
33
] |
def test(name,message):
print("用户是:" , name)
print("欢迎消息是:",message)
my_list = ['孙悟空','欢迎来疯狂软件']
test(*my_list)
print('*****')
# ###########################
def foo(name,*nums):
print("name参数:",name)
print("nums参数:",nums)
my_tuple = (1,2,3)
foo('fkit',*my_tuple)
print('********')
foo(*my_tuple)
print('*******')
foo(my_tuple)
#############################
def bar(book,price,desc):
print(book,'这本书的价格是:',price)
print('描述信息是:',desc)
print('********')
my_dict = {'price':89,'book':'疯狂python讲义','desc':'这是一本系统全面的python学习图书'}
bar(**my_dict)
print('*******')
#如果是下面的调用形式,不采用逆向参数收集将报错
# TypeError: bar() missing 2 required positional arguments: 'price' and 'desc'
bar(my_dict)
|
normal
|
{
"blob_id": "64fb006ea5ff0d101000dd4329b3d957a326ed1a",
"index": 2387,
"step-1": "def test(name, message):\n print('用户是:', name)\n print('欢迎消息是:', message)\n\n\n<mask token>\n",
"step-2": "def test(name, message):\n print('用户是:', name)\n print('欢迎消息是:', message)\n\n\n<mask token>\n\n\ndef foo(name, *nums):\n print('name参数:', name)\n print('nums参数:', nums)\n\n\n<mask token>\n\n\ndef bar(book, price, desc):\n print(book, '这本书的价格是:', price)\n print('描述信息是:', desc)\n\n\n<mask token>\n",
"step-3": "def test(name, message):\n print('用户是:', name)\n print('欢迎消息是:', message)\n\n\n<mask token>\ntest(*my_list)\nprint('*****')\n\n\ndef foo(name, *nums):\n print('name参数:', name)\n print('nums参数:', nums)\n\n\n<mask token>\nfoo('fkit', *my_tuple)\nprint('********')\nfoo(*my_tuple)\nprint('*******')\nfoo(my_tuple)\n\n\ndef bar(book, price, desc):\n print(book, '这本书的价格是:', price)\n print('描述信息是:', desc)\n\n\nprint('********')\n<mask token>\nbar(**my_dict)\nprint('*******')\nbar(my_dict)\n",
"step-4": "def test(name, message):\n print('用户是:', name)\n print('欢迎消息是:', message)\n\n\nmy_list = ['孙悟空', '欢迎来疯狂软件']\ntest(*my_list)\nprint('*****')\n\n\ndef foo(name, *nums):\n print('name参数:', name)\n print('nums参数:', nums)\n\n\nmy_tuple = 1, 2, 3\nfoo('fkit', *my_tuple)\nprint('********')\nfoo(*my_tuple)\nprint('*******')\nfoo(my_tuple)\n\n\ndef bar(book, price, desc):\n print(book, '这本书的价格是:', price)\n print('描述信息是:', desc)\n\n\nprint('********')\nmy_dict = {'price': 89, 'book': '疯狂python讲义', 'desc': '这是一本系统全面的python学习图书'}\nbar(**my_dict)\nprint('*******')\nbar(my_dict)\n",
"step-5": "def test(name,message):\n print(\"用户是:\" , name)\n print(\"欢迎消息是:\",message)\n\nmy_list = ['孙悟空','欢迎来疯狂软件']\ntest(*my_list)\nprint('*****')\n# ###########################\ndef foo(name,*nums):\n print(\"name参数:\",name)\n print(\"nums参数:\",nums)\nmy_tuple = (1,2,3)\n\nfoo('fkit',*my_tuple)\nprint('********')\nfoo(*my_tuple)\nprint('*******')\nfoo(my_tuple)\n#############################\n\ndef bar(book,price,desc):\n print(book,'这本书的价格是:',price)\n print('描述信息是:',desc)\n\nprint('********')\nmy_dict = {'price':89,'book':'疯狂python讲义','desc':'这是一本系统全面的python学习图书'}\nbar(**my_dict)\nprint('*******')\n#如果是下面的调用形式,不采用逆向参数收集将报错\n# TypeError: bar() missing 2 required positional arguments: 'price' and 'desc'\nbar(my_dict)",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def containsDuplicate(self, nums) ->bool:
d = {}
for elem in nums:
if elem in d:
return True
else:
d[elem] = 1
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def containsDuplicate(self, nums) ->bool:
d = {}
for elem in nums:
if elem in d:
return True
else:
d[elem] = 1
return False
print(Solution().containsDuplicate([0]))
<|reserved_special_token_1|>
class Solution:
def containsDuplicate(self, nums) -> bool:
d = {} # store the elements which already exist
for elem in nums:
if elem in d:
return True
else:
d[elem] = 1
return False
print(Solution().containsDuplicate([0]))
|
flexible
|
{
"blob_id": "89256a38208be92f87115b110edc986cebc95306",
"index": 8440,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def containsDuplicate(self, nums) ->bool:\n d = {}\n for elem in nums:\n if elem in d:\n return True\n else:\n d[elem] = 1\n return False\n\n\n<mask token>\n",
"step-4": "class Solution:\n\n def containsDuplicate(self, nums) ->bool:\n d = {}\n for elem in nums:\n if elem in d:\n return True\n else:\n d[elem] = 1\n return False\n\n\nprint(Solution().containsDuplicate([0]))\n",
"step-5": "class Solution:\n def containsDuplicate(self, nums) -> bool:\n d = {} # store the elements which already exist\n\n for elem in nums:\n if elem in d:\n return True\n else:\n d[elem] = 1\n\n return False\n\nprint(Solution().containsDuplicate([0]))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#encoding=utf-8
import pytest
from frame_project.实战2.main_page import MainPage
class TestMian:
def test_mian(self):
MainPage().goto_marketpage().goto_search().search()
if __name__ == '__main__':
pytest.main(['test_case.py','-s','-v'])
|
normal
|
{
"blob_id": "e1751cc6f76f56e62cd02d61db65f1c27a4ff1b9",
"index": 7351,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMian:\n\n def test_mian(self):\n MainPage().goto_marketpage().goto_search().search()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestMian:\n\n def test_mian(self):\n MainPage().goto_marketpage().goto_search().search()\n\n\nif __name__ == '__main__':\n pytest.main(['test_case.py', '-s', '-v'])\n",
"step-4": "import pytest\nfrom frame_project.实战2.main_page import MainPage\n\n\nclass TestMian:\n\n def test_mian(self):\n MainPage().goto_marketpage().goto_search().search()\n\n\nif __name__ == '__main__':\n pytest.main(['test_case.py', '-s', '-v'])\n",
"step-5": "#encoding=utf-8\nimport pytest\n\nfrom frame_project.实战2.main_page import MainPage\n\n\nclass TestMian:\n def test_mian(self):\n MainPage().goto_marketpage().goto_search().search()\n\nif __name__ == '__main__':\n pytest.main(['test_case.py','-s','-v'])\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LabeledArray:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LabeledArray:
@staticmethod
def get_label_for_indexes_upto(input_data, input_label, input_index):
df_input_data = pd.DataFrame(input_data)
df_labels = pd.DataFrame(input_label)
df_data_labels = pd.concat([df_input_data, df_labels], axis=1)
df_data_labels.columns = ['input_data', 'input_label']
df_data_labels.sort_values(by=['input_data'], ascending=True,
inplace=True)
return np.array(df_data_labels.iloc[:, 1].head(input_index))
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
class LabeledArray:
@staticmethod
def get_label_for_indexes_upto(input_data, input_label, input_index):
df_input_data = pd.DataFrame(input_data)
df_labels = pd.DataFrame(input_label)
df_data_labels = pd.concat([df_input_data, df_labels], axis=1)
df_data_labels.columns = ['input_data', 'input_label']
df_data_labels.sort_values(by=['input_data'], ascending=True,
inplace=True)
return np.array(df_data_labels.iloc[:, 1].head(input_index))
|
flexible
|
{
"blob_id": "0dea8675d8050a91c284a13bcbce6fd0943b604e",
"index": 5135,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LabeledArray:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass LabeledArray:\n\n @staticmethod\n def get_label_for_indexes_upto(input_data, input_label, input_index):\n df_input_data = pd.DataFrame(input_data)\n df_labels = pd.DataFrame(input_label)\n df_data_labels = pd.concat([df_input_data, df_labels], axis=1)\n df_data_labels.columns = ['input_data', 'input_label']\n df_data_labels.sort_values(by=['input_data'], ascending=True,\n inplace=True)\n return np.array(df_data_labels.iloc[:, 1].head(input_index))\n",
"step-4": "import pandas as pd\nimport numpy as np\n\n\nclass LabeledArray:\n\n @staticmethod\n def get_label_for_indexes_upto(input_data, input_label, input_index):\n df_input_data = pd.DataFrame(input_data)\n df_labels = pd.DataFrame(input_label)\n df_data_labels = pd.concat([df_input_data, df_labels], axis=1)\n df_data_labels.columns = ['input_data', 'input_label']\n df_data_labels.sort_values(by=['input_data'], ascending=True,\n inplace=True)\n return np.array(df_data_labels.iloc[:, 1].head(input_index))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class DongPhamTestCli(CLI):
<|reserved_special_token_0|>
def __init__(self, _mininet, _env):
self.env = _env
self.net = _mininet
self._testCLI = {}
CLI.__init__(self, _mininet)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def do_all(self, _):
__wait__(self.do_ips, self.do_weights, self.do_costs, self.
do_routes, self.do_paths, self.do_flows, self.do_stats)
def do_info(self, line):
locals = self.getLocals()
_nodes = line.split()
display.section('All functions')
if not _nodes:
_nodes = self.mn.keys()
for n in _nodes:
if not locals.__contains__(n):
break
obj = locals[n]
display.subsection('%s (%s)' % (n, obj.IP()))
print(dir(obj))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def do_costs(self, _):
switches = self.mn.topo.switches()
weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.
mn.topo._slinks]
display.section('Total path costs')
print('From\\To'), '\t'.join(switches)
for start in switches:
print(start + '\t'),
for end in switches:
if start == end:
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
if isDirect(route):
print(cost),
else:
print(brightLabel(cost)),
print('\t'),
print('')
def do_routes(self, _):
switches = self.mn.topo.switches()
weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.
mn.topo._slinks]
display.section('First-Hop with lowest cost')
print('From\\To\t'), '\t'.join(switches)
for start in switches:
print(start + '\t'),
for end in switches:
if start == end:
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
if isDirect(route):
print(end),
else:
print(brightLabel(route[1])),
print('\t'),
print('')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def do_arps(self, _line):
display.section('ARP caches of all hosts')
sh = 'arp -a'
for h in self.mn.hosts:
h.cmdPrint(sh)
def do_netstats(self, _line):
display.section('Routing Tables')
sh = 'netstat -rn'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def do_xxx_xterms(self, _line):
locals = self.getLocals()
terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])
self.mn.terms += terms
def do_xxx_sharks(self, line):
display.section('Launching Wireshark')
sh = 'sudo wireshark &'
locals = self.getLocals()
_nodes = line.split()
if not _nodes:
_nodes = self.mn.keys()
for n in _nodes:
if not locals.__contains__(n):
break
obj = locals[n]
obj.cmdPrint(sh)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DongPhamTestCli(CLI):
<|reserved_special_token_0|>
def __init__(self, _mininet, _env):
self.env = _env
self.net = _mininet
self._testCLI = {}
CLI.__init__(self, _mininet)
<|reserved_special_token_0|>
def doPrint(self, shell):
display.cmdHighlight(True)
self.mn.controller.cmdPrint(shell)
display.cmdHighlight(False)
def do_all(self, _):
__wait__(self.do_ips, self.do_weights, self.do_costs, self.
do_routes, self.do_paths, self.do_flows, self.do_stats)
def do_info(self, line):
locals = self.getLocals()
_nodes = line.split()
display.section('All functions')
if not _nodes:
_nodes = self.mn.keys()
for n in _nodes:
if not locals.__contains__(n):
break
obj = locals[n]
display.subsection('%s (%s)' % (n, obj.IP()))
print(dir(obj))
def do_ips(self, _):
display.section('IP Addresses')
locals = self.getLocals()
def showIP(*keys):
for key in keys:
display.message('%s\t%s' % (key.name, key.IP()))
def showAll(*keys):
for key in keys:
display.message('%s\t%s\t%s' % (key.name, key.IP(), key.MAC()))
display.subsection('Controllers')
for c in self.mn.controllers:
showIP(locals[c.name])
display.subsection('Switches')
for s in self.mn.switches:
showIP(locals[s.name])
display.subsection('Hosts')
for h in self.mn.hosts:
showAll(locals[h.name])
def do_weights(self, _):
display.section('Weights')
log.infoln('Link\t\tWeight')
log.infoln('--------------------')
for i, j, w in self.mn.topo._slinks:
log.infoln('{s%s, s%s}\t%s' % (i, j, w))
def do_costs(self, _):
switches = self.mn.topo.switches()
weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.
mn.topo._slinks]
display.section('Total path costs')
print('From\\To'), '\t'.join(switches)
for start in switches:
print(start + '\t'),
for end in switches:
if start == end:
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
if isDirect(route):
print(cost),
else:
print(brightLabel(cost)),
print('\t'),
print('')
def do_routes(self, _):
switches = self.mn.topo.switches()
weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.
mn.topo._slinks]
display.section('First-Hop with lowest cost')
print('From\\To\t'), '\t'.join(switches)
for start in switches:
print(start + '\t'),
for end in switches:
if start == end:
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
if isDirect(route):
print(end),
else:
print(brightLabel(route[1])),
print('\t'),
print('')
def do_paths(self, line):
switches = self.mn.topo.switches()
weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.
mn.topo._slinks]
display.section('Least-cost paths to other nodes')
display.message('From -> To\tCost\t\tFull Shortest Path')
for start in switches:
display.subsection('%s' % start)
for end in switches:
if start == end:
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
display.message('%s -> %s\t%s\t\t%s' % (start, end, cost,
route))
def do_flows(self, _line):
display.section('Showing all flows of all OVSSwitches')
for s in self.mn.switches:
self.doPrint('sudo ovs-ofctl dump-flows %s' % s)
def do_deleteFlows(self, _line):
display.section('Deleting all flows of all OVSSwitches')
for s in self.mn.switches:
self.doPrint('sudo ovs-ofctl del-flows %s' % s)
def do_stats(self, _):
display.section('OpenFlow: Sent/Received Packets')
display.message(
'Packets passing through a switch on the way host with IP address = "nw_dst"'
)
for s in self.mn.switches:
display.subsection('%s - Traffic' % s.name)
self.doPrint(
'sudo ovs-ofctl dump-flows %s | grep -e "n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]" -To'
% s.name)
def do_arps(self, _line):
display.section('ARP caches of all hosts')
sh = 'arp -a'
for h in self.mn.hosts:
h.cmdPrint(sh)
def do_netstats(self, _line):
display.section('Routing Tables')
sh = 'netstat -rn'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
def do_ifconfigs(self, _line):
display.section('Showing Interface Configuration')
sh = 'ifconfig -a'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def do_xxx_xterms(self, _line):
locals = self.getLocals()
terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])
self.mn.terms += terms
def do_xxx_sharks(self, line):
display.section('Launching Wireshark')
sh = 'sudo wireshark &'
locals = self.getLocals()
_nodes = line.split()
if not _nodes:
_nodes = self.mn.keys()
for n in _nodes:
if not locals.__contains__(n):
break
obj = locals[n]
obj.cmdPrint(sh)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DongPhamTestCli(CLI):
<|reserved_special_token_0|>
def __init__(self, _mininet, _env):
self.env = _env
self.net = _mininet
self._testCLI = {}
CLI.__init__(self, _mininet)
def do(self, shell, quiet=False):
if quiet:
return self.mn.controller.cmd(shell)
return self.mn.controller.cmdPrint(shell)
def doPrint(self, shell):
display.cmdHighlight(True)
self.mn.controller.cmdPrint(shell)
display.cmdHighlight(False)
def do_all(self, _):
__wait__(self.do_ips, self.do_weights, self.do_costs, self.
do_routes, self.do_paths, self.do_flows, self.do_stats)
def do_info(self, line):
locals = self.getLocals()
_nodes = line.split()
display.section('All functions')
if not _nodes:
_nodes = self.mn.keys()
for n in _nodes:
if not locals.__contains__(n):
break
obj = locals[n]
display.subsection('%s (%s)' % (n, obj.IP()))
print(dir(obj))
def do_ips(self, _):
display.section('IP Addresses')
locals = self.getLocals()
def showIP(*keys):
for key in keys:
display.message('%s\t%s' % (key.name, key.IP()))
def showAll(*keys):
for key in keys:
display.message('%s\t%s\t%s' % (key.name, key.IP(), key.MAC()))
display.subsection('Controllers')
for c in self.mn.controllers:
showIP(locals[c.name])
display.subsection('Switches')
for s in self.mn.switches:
showIP(locals[s.name])
display.subsection('Hosts')
for h in self.mn.hosts:
showAll(locals[h.name])
def do_weights(self, _):
display.section('Weights')
log.infoln('Link\t\tWeight')
log.infoln('--------------------')
for i, j, w in self.mn.topo._slinks:
log.infoln('{s%s, s%s}\t%s' % (i, j, w))
def do_costs(self, _):
switches = self.mn.topo.switches()
weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.
mn.topo._slinks]
display.section('Total path costs')
print('From\\To'), '\t'.join(switches)
for start in switches:
print(start + '\t'),
for end in switches:
if start == end:
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
if isDirect(route):
print(cost),
else:
print(brightLabel(cost)),
print('\t'),
print('')
def do_routes(self, _):
switches = self.mn.topo.switches()
weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.
mn.topo._slinks]
display.section('First-Hop with lowest cost')
print('From\\To\t'), '\t'.join(switches)
for start in switches:
print(start + '\t'),
for end in switches:
if start == end:
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
if isDirect(route):
print(end),
else:
print(brightLabel(route[1])),
print('\t'),
print('')
def do_paths(self, line):
switches = self.mn.topo.switches()
weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.
mn.topo._slinks]
display.section('Least-cost paths to other nodes')
display.message('From -> To\tCost\t\tFull Shortest Path')
for start in switches:
display.subsection('%s' % start)
for end in switches:
if start == end:
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
display.message('%s -> %s\t%s\t\t%s' % (start, end, cost,
route))
def do_flows(self, _line):
display.section('Showing all flows of all OVSSwitches')
for s in self.mn.switches:
self.doPrint('sudo ovs-ofctl dump-flows %s' % s)
def do_deleteFlows(self, _line):
display.section('Deleting all flows of all OVSSwitches')
for s in self.mn.switches:
self.doPrint('sudo ovs-ofctl del-flows %s' % s)
def do_stats(self, _):
display.section('OpenFlow: Sent/Received Packets')
display.message(
'Packets passing through a switch on the way host with IP address = "nw_dst"'
)
for s in self.mn.switches:
display.subsection('%s - Traffic' % s.name)
self.doPrint(
'sudo ovs-ofctl dump-flows %s | grep -e "n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]" -To'
% s.name)
def do_arps(self, _line):
display.section('ARP caches of all hosts')
sh = 'arp -a'
for h in self.mn.hosts:
h.cmdPrint(sh)
def do_netstats(self, _line):
display.section('Routing Tables')
sh = 'netstat -rn'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
def do_ifconfigs(self, _line):
display.section('Showing Interface Configuration')
sh = 'ifconfig -a'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
def do_xxx_testFlows1(self, _line):
display.section('Adding test flows to Tiny Network')
self.do(
'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')
self.do(
'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')
self.do(
'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')
self.do(
'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')
self.do(
'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')
self.do(
'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')
self.do(
'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')
self.do(
'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')
def do_xxx_traffic(self, _line):
for h in self.mn.hosts:
h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)
def do_xxx_xterms(self, _line):
locals = self.getLocals()
terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])
self.mn.terms += terms
def do_xxx_sharks(self, line):
display.section('Launching Wireshark')
sh = 'sudo wireshark &'
locals = self.getLocals()
_nodes = line.split()
if not _nodes:
_nodes = self.mn.keys()
for n in _nodes:
if not locals.__contains__(n):
break
obj = locals[n]
obj.cmdPrint(sh)
<|reserved_special_token_1|>
from mininet.cli import CLI
from mininet.term import makeTerms
from mininet.util import irange
from log import log
from utils import UITextStyle, display
from dijkstra import get_routing_decision, get_route_cost
def isDirect(route):
return len(route) == 2
def brightLabel(text):
return UITextStyle.BackgroundColor.purple + str(text
) + UITextStyle.Format.reset
def __wait__(*commandList):
steps = len(commandList)
for i in range(steps):
commandList[i]('')
display.prompt('\n\nPress <Return> to continue (%s/%s)' % (i + 1,
steps))
try:
x = input('')
except:
x = ''
class DongPhamTestCli(CLI):
prompt = 'dongpham> '
def __init__(self, _mininet, _env):
self.env = _env
self.net = _mininet
self._testCLI = {}
CLI.__init__(self, _mininet)
def do(self, shell, quiet=False):
if quiet:
return self.mn.controller.cmd(shell)
return self.mn.controller.cmdPrint(shell)
def doPrint(self, shell):
display.cmdHighlight(True)
self.mn.controller.cmdPrint(shell)
display.cmdHighlight(False)
def do_all(self, _):
__wait__(self.do_ips, self.do_weights, self.do_costs, self.
do_routes, self.do_paths, self.do_flows, self.do_stats)
def do_info(self, line):
locals = self.getLocals()
_nodes = line.split()
display.section('All functions')
if not _nodes:
_nodes = self.mn.keys()
for n in _nodes:
if not locals.__contains__(n):
break
obj = locals[n]
display.subsection('%s (%s)' % (n, obj.IP()))
print(dir(obj))
def do_ips(self, _):
display.section('IP Addresses')
locals = self.getLocals()
def showIP(*keys):
for key in keys:
display.message('%s\t%s' % (key.name, key.IP()))
def showAll(*keys):
for key in keys:
display.message('%s\t%s\t%s' % (key.name, key.IP(), key.MAC()))
display.subsection('Controllers')
for c in self.mn.controllers:
showIP(locals[c.name])
display.subsection('Switches')
for s in self.mn.switches:
showIP(locals[s.name])
display.subsection('Hosts')
for h in self.mn.hosts:
showAll(locals[h.name])
def do_weights(self, _):
display.section('Weights')
log.infoln('Link\t\tWeight')
log.infoln('--------------------')
for i, j, w in self.mn.topo._slinks:
log.infoln('{s%s, s%s}\t%s' % (i, j, w))
def do_costs(self, _):
switches = self.mn.topo.switches()
weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.
mn.topo._slinks]
display.section('Total path costs')
print('From\\To'), '\t'.join(switches)
for start in switches:
print(start + '\t'),
for end in switches:
if start == end:
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
if isDirect(route):
print(cost),
else:
print(brightLabel(cost)),
print('\t'),
print('')
def do_routes(self, _):
switches = self.mn.topo.switches()
weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.
mn.topo._slinks]
display.section('First-Hop with lowest cost')
print('From\\To\t'), '\t'.join(switches)
for start in switches:
print(start + '\t'),
for end in switches:
if start == end:
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
if isDirect(route):
print(end),
else:
print(brightLabel(route[1])),
print('\t'),
print('')
def do_paths(self, line):
switches = self.mn.topo.switches()
weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.
mn.topo._slinks]
display.section('Least-cost paths to other nodes')
display.message('From -> To\tCost\t\tFull Shortest Path')
for start in switches:
display.subsection('%s' % start)
for end in switches:
if start == end:
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
display.message('%s -> %s\t%s\t\t%s' % (start, end, cost,
route))
def do_flows(self, _line):
display.section('Showing all flows of all OVSSwitches')
for s in self.mn.switches:
self.doPrint('sudo ovs-ofctl dump-flows %s' % s)
def do_deleteFlows(self, _line):
display.section('Deleting all flows of all OVSSwitches')
for s in self.mn.switches:
self.doPrint('sudo ovs-ofctl del-flows %s' % s)
def do_stats(self, _):
display.section('OpenFlow: Sent/Received Packets')
display.message(
'Packets passing through a switch on the way host with IP address = "nw_dst"'
)
for s in self.mn.switches:
display.subsection('%s - Traffic' % s.name)
self.doPrint(
'sudo ovs-ofctl dump-flows %s | grep -e "n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]" -To'
% s.name)
def do_arps(self, _line):
display.section('ARP caches of all hosts')
sh = 'arp -a'
for h in self.mn.hosts:
h.cmdPrint(sh)
def do_netstats(self, _line):
display.section('Routing Tables')
sh = 'netstat -rn'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
def do_ifconfigs(self, _line):
display.section('Showing Interface Configuration')
sh = 'ifconfig -a'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
def do_xxx_testFlows1(self, _line):
display.section('Adding test flows to Tiny Network')
self.do(
'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')
self.do(
'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')
self.do(
'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')
self.do(
'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')
self.do(
'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')
self.do(
'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')
self.do(
'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')
self.do(
'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')
def do_xxx_traffic(self, _line):
for h in self.mn.hosts:
h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)
def do_xxx_xterms(self, _line):
locals = self.getLocals()
terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])
self.mn.terms += terms
def do_xxx_sharks(self, line):
display.section('Launching Wireshark')
sh = 'sudo wireshark &'
locals = self.getLocals()
_nodes = line.split()
if not _nodes:
_nodes = self.mn.keys()
for n in _nodes:
if not locals.__contains__(n):
break
obj = locals[n]
obj.cmdPrint(sh)
<|reserved_special_token_1|>
from mininet.cli import CLI
from mininet.term import makeTerms
from mininet.util import irange
from log import log
from utils import (UITextStyle, display)
from dijkstra import (get_routing_decision, get_route_cost)
# Check if route directly connects two switches
def isDirect(route):
return (len(route) == 2)
# Add purple background for indirect routes
def brightLabel(text):
return (UITextStyle.BackgroundColor.purple + str(text) + UITextStyle.Format.reset)
# Execute commands one by one
def __wait__(*commandList):
steps = len(commandList)
for i in range(steps):
commandList[i]('')
display.prompt('\n\nPress <Return> to continue (%s/%s)' %
(i + 1, steps))
try:
x = input('')
except:
x = ''
# Mininet Command Line Interface extension
class DongPhamTestCli(CLI):
prompt = 'dongpham> '
def __init__(self, _mininet, _env):
self.env = _env
self.net = _mininet
self._testCLI = {}
CLI.__init__(self, _mininet)
# Tell the controller to do a command
def do(self, shell, quiet=False):
if (quiet):
return self.mn.controller.cmd(shell)
return self.mn.controller.cmdPrint(shell)
def doPrint(self, shell):
display.cmdHighlight(True)
self.mn.controller.cmdPrint(shell)
display.cmdHighlight(False)
# Run all commands in the wait list
def do_all(self, _):
__wait__(
# Show ip
self.do_ips,
# Routing commands
self.do_weights, self.do_costs, self.do_routes, self.do_paths,
# Flow commands
self.do_flows, self.do_stats
)
# Show object info
# info [node1, node2, ...]
def do_info(self, line):
locals = self.getLocals()
_nodes = line.split()
display.section("All functions")
if not (_nodes):
_nodes = self.mn.keys()
for n in _nodes:
if not (locals.__contains__(n)):
break
obj = locals[n]
display.subsection('%s (%s)' % (n, obj.IP()))
print(dir(obj))
# Show IP addresses
# ips
def do_ips(self, _):
display.section("IP Addresses")
locals = self.getLocals()
def showIP(*keys):
for key in keys:
display.message('%s\t%s' % (key.name, key.IP()))
def showAll(*keys):
for key in keys:
display.message('%s\t%s\t%s' % (key.name, key.IP(), key.MAC()))
# For each node
display.subsection('Controllers')
for c in self.mn.controllers:
showIP(locals[c.name])
display.subsection('Switches')
for s in self.mn.switches:
showIP(locals[s.name])
display.subsection('Hosts')
for h in self.mn.hosts:
showAll(locals[h.name])
#MARK: - Routing
# Show link weights
# weights
def do_weights(self, _):
display.section("Weights")
log.infoln('Link\t\tWeight')
log.infoln('--------------------')
for (i, j, w) in self.mn.topo._slinks:
log.infoln('{s%s, s%s}\t%s' % (i, j, w))
# Show costs of reaching every other switch
# costs
def do_costs(self, _):
# Algorithm input
switches = self.mn.topo.switches()
weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])
for i in self.mn.topo._slinks]
# Print cost of reaching 'end' switch from 'start' switch
display.section("Total path costs")
print('From\\To'), ('\t'.join(switches))
for start in switches:
print(start + '\t'),
for end in switches:
if (start == end):
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
if (isDirect(route)):
# Print result for directly connected switches
print(cost),
else:
# Print and highlight routes with intermediate switches
print(brightLabel(cost)),
print('\t'),
print('')
# Show least-cost paths from every switch to every other switch
# routes
def do_routes(self, _):
# Algorithm input
switches = self.mn.topo.switches()
weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])
for i in self.mn.topo._slinks]
# Print next hop switch
display.section("First-Hop with lowest cost")
print('From\\To\t'), ('\t'.join(switches))
for start in switches:
print(start + '\t'),
for end in switches:
if (start == end):
print('--\t'),
continue
route = get_routing_decision(start, weights, end)
if (isDirect(route)):
# Print result for directly connected switches
print(end),
else:
# Print and highlight routes with intermediate switches
print(brightLabel(route[1])),
print('\t'),
print('')
# Show the complete shortest path from one switch to every other switch
# paths
def do_paths(self, line):
# Algorithm input
switches = self.mn.topo.switches()
weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])
for i in self.mn.topo._slinks]
# Least cost paths to every node
display.section("Least-cost paths to other nodes")
display.message('From -> To\tCost\t\tFull Shortest Path')
for start in switches:
display.subsection('%s' % start)
for end in switches:
if (start == end):
continue
route = get_routing_decision(start, weights, end)
cost = get_route_cost([route])
display.message('%s -> %s\t%s\t\t%s' %
(start, end, cost, route))
#MARK: - OpenFlow
# Display flows
# flows
def do_flows(self, _line):
display.section("Showing all flows of all OVSSwitches")
for s in self.mn.switches:
self.doPrint('sudo ovs-ofctl dump-flows %s' % s)
# Delete flows
# deleteFlows
def do_deleteFlows(self, _line):
display.section("Deleting all flows of all OVSSwitches")
for s in self.mn.switches:
self.doPrint('sudo ovs-ofctl del-flows %s' % s)
# Display flow statistics
# stats
def do_stats(self, _):
display.section("OpenFlow: Sent/Received Packets")
display.message(
'Packets passing through a switch on the way host with IP address = "nw_dst"')
for s in self.mn.switches:
display.subsection('%s - Traffic' % s.name)
self.doPrint(
'sudo ovs-ofctl dump-flows %s | grep -e "n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]" -To' % (s.name))
# MARK: - Run on every node
# arps
def do_arps(self, _line):
display.section("ARP caches of all hosts")
sh = 'arp -a'
for h in self.mn.hosts:
h.cmdPrint(sh)
# netstats
def do_netstats(self, _line):
display.section("Routing Tables")
sh = 'netstat -rn'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
# ifconfigs
def do_ifconfigs(self, _line):
display.section("Showing Interface Configuration")
sh = 'ifconfig -a'
display.subsection('Hosts')
for h in self.mn.hosts:
h.cmdPrint(sh)
display.subsection('Controller')
self.doPrint(sh)
#MARK: - Other
def do_xxx_testFlows1(self, _line):
display.section("Adding test flows to Tiny Network")
self.do('sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')
self.do('sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')
self.do('sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')
self.do('sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')
self.do('sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')
self.do('sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')
self.do('sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')
self.do('sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')
def do_xxx_traffic(self, _line):
# display.section("Monitoring sent and received packets of all hosts")
for h in self.mn.hosts:
h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)
def do_xxx_xterms(self, _line):
locals = self.getLocals()
terms = makeTerms([locals[name]
for name in ['h1', 'h2', 's1', 's2']])
self.mn.terms += terms
def do_xxx_sharks(self, line):
display.section("Launching Wireshark")
sh = 'sudo wireshark &'
locals = self.getLocals()
_nodes = line.split()
if not (_nodes):
_nodes = self.mn.keys()
for n in _nodes:
if not (locals.__contains__(n)):
break
obj = locals[n]
obj.cmdPrint(sh)
|
flexible
|
{
"blob_id": "7636925982434b12307383ba7b01f931f7ea6e24",
"index": 5927,
"step-1": "<mask token>\n\n\nclass DongPhamTestCli(CLI):\n <mask token>\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n <mask token>\n <mask token>\n\n def do_all(self, _):\n __wait__(self.do_ips, self.do_weights, self.do_costs, self.\n do_routes, self.do_paths, self.do_flows, self.do_stats)\n\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section('All functions')\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n <mask token>\n <mask token>\n\n def do_costs(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Total path costs')\n print('From\\\\To'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if isDirect(route):\n print(cost),\n else:\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n def do_routes(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('First-Hop with lowest cost')\n print('From\\\\To\\t'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if isDirect(route):\n print(end),\n else:\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def do_arps(self, _line):\n display.section('ARP caches of all hosts')\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n\n def do_netstats(self, _line):\n display.section('Routing Tables')\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n <mask token>\n <mask token>\n <mask token>\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section('Launching Wireshark')\n sh = 'sudo wireshark &'\n locals = self.getLocals()\n _nodes = line.split()\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-2": "<mask token>\n\n\nclass DongPhamTestCli(CLI):\n <mask token>\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n <mask token>\n\n def doPrint(self, shell):\n display.cmdHighlight(True)\n self.mn.controller.cmdPrint(shell)\n display.cmdHighlight(False)\n\n def do_all(self, _):\n __wait__(self.do_ips, self.do_weights, self.do_costs, self.\n do_routes, self.do_paths, self.do_flows, self.do_stats)\n\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section('All functions')\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n\n def do_ips(self, _):\n display.section('IP Addresses')\n locals = self.getLocals()\n\n def showIP(*keys):\n for key in keys:\n display.message('%s\\t%s' % (key.name, key.IP()))\n\n def showAll(*keys):\n for key in keys:\n display.message('%s\\t%s\\t%s' % (key.name, key.IP(), key.MAC()))\n display.subsection('Controllers')\n for c in self.mn.controllers:\n showIP(locals[c.name])\n display.subsection('Switches')\n for s in self.mn.switches:\n showIP(locals[s.name])\n display.subsection('Hosts')\n for h in self.mn.hosts:\n showAll(locals[h.name])\n\n def do_weights(self, _):\n display.section('Weights')\n log.infoln('Link\\t\\tWeight')\n log.infoln('--------------------')\n for i, j, w in self.mn.topo._slinks:\n log.infoln('{s%s, s%s}\\t%s' % (i, j, w))\n\n def do_costs(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Total path costs')\n print('From\\\\To'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if isDirect(route):\n print(cost),\n else:\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n def do_routes(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('First-Hop with lowest cost')\n print('From\\\\To\\t'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if isDirect(route):\n print(end),\n else:\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n\n def do_paths(self, line):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Least-cost paths to other nodes')\n display.message('From -> To\\tCost\\t\\tFull Shortest Path')\n for start in switches:\n display.subsection('%s' % start)\n for end in switches:\n if start == end:\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n display.message('%s -> %s\\t%s\\t\\t%s' % (start, end, cost,\n route))\n\n def do_flows(self, _line):\n display.section('Showing all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl dump-flows %s' % s)\n\n def do_deleteFlows(self, _line):\n display.section('Deleting all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl del-flows %s' % s)\n\n def do_stats(self, _):\n display.section('OpenFlow: Sent/Received Packets')\n display.message(\n 'Packets passing through a switch on the way host with IP address = \"nw_dst\"'\n )\n for s in self.mn.switches:\n display.subsection('%s - Traffic' % s.name)\n self.doPrint(\n 'sudo ovs-ofctl dump-flows %s | grep -e \"n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]\" -To'\n % s.name)\n\n def do_arps(self, _line):\n display.section('ARP caches of all hosts')\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n\n def do_netstats(self, _line):\n display.section('Routing Tables')\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_ifconfigs(self, _line):\n display.section('Showing Interface Configuration')\n sh = 'ifconfig -a'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n <mask token>\n <mask token>\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section('Launching Wireshark')\n sh = 'sudo wireshark &'\n locals = self.getLocals()\n _nodes = line.split()\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-3": "<mask token>\n\n\nclass DongPhamTestCli(CLI):\n <mask token>\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n\n def do(self, shell, quiet=False):\n if quiet:\n return self.mn.controller.cmd(shell)\n return self.mn.controller.cmdPrint(shell)\n\n def doPrint(self, shell):\n display.cmdHighlight(True)\n self.mn.controller.cmdPrint(shell)\n display.cmdHighlight(False)\n\n def do_all(self, _):\n __wait__(self.do_ips, self.do_weights, self.do_costs, self.\n do_routes, self.do_paths, self.do_flows, self.do_stats)\n\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section('All functions')\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n\n def do_ips(self, _):\n display.section('IP Addresses')\n locals = self.getLocals()\n\n def showIP(*keys):\n for key in keys:\n display.message('%s\\t%s' % (key.name, key.IP()))\n\n def showAll(*keys):\n for key in keys:\n display.message('%s\\t%s\\t%s' % (key.name, key.IP(), key.MAC()))\n display.subsection('Controllers')\n for c in self.mn.controllers:\n showIP(locals[c.name])\n display.subsection('Switches')\n for s in self.mn.switches:\n showIP(locals[s.name])\n display.subsection('Hosts')\n for h in self.mn.hosts:\n showAll(locals[h.name])\n\n def do_weights(self, _):\n display.section('Weights')\n log.infoln('Link\\t\\tWeight')\n log.infoln('--------------------')\n for i, j, w in self.mn.topo._slinks:\n log.infoln('{s%s, s%s}\\t%s' % (i, j, w))\n\n def do_costs(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Total path costs')\n print('From\\\\To'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if isDirect(route):\n print(cost),\n else:\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n def do_routes(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('First-Hop with lowest cost')\n print('From\\\\To\\t'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if isDirect(route):\n print(end),\n else:\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n\n def do_paths(self, line):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Least-cost paths to other nodes')\n display.message('From -> To\\tCost\\t\\tFull Shortest Path')\n for start in switches:\n display.subsection('%s' % start)\n for end in switches:\n if start == end:\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n display.message('%s -> %s\\t%s\\t\\t%s' % (start, end, cost,\n route))\n\n def do_flows(self, _line):\n display.section('Showing all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl dump-flows %s' % s)\n\n def do_deleteFlows(self, _line):\n display.section('Deleting all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl del-flows %s' % s)\n\n def do_stats(self, _):\n display.section('OpenFlow: Sent/Received Packets')\n display.message(\n 'Packets passing through a switch on the way host with IP address = \"nw_dst\"'\n )\n for s in self.mn.switches:\n display.subsection('%s - Traffic' % s.name)\n self.doPrint(\n 'sudo ovs-ofctl dump-flows %s | grep -e \"n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]\" -To'\n % s.name)\n\n def do_arps(self, _line):\n display.section('ARP caches of all hosts')\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n\n def do_netstats(self, _line):\n display.section('Routing Tables')\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_ifconfigs(self, _line):\n display.section('Showing Interface Configuration')\n sh = 'ifconfig -a'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_xxx_testFlows1(self, _line):\n display.section('Adding test flows to Tiny Network')\n self.do(\n 'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')\n self.do(\n 'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')\n self.do(\n 'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')\n self.do(\n 'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')\n\n def do_xxx_traffic(self, _line):\n for h in self.mn.hosts:\n h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section('Launching Wireshark')\n sh = 'sudo wireshark &'\n locals = self.getLocals()\n _nodes = line.split()\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-4": "from mininet.cli import CLI\nfrom mininet.term import makeTerms\nfrom mininet.util import irange\nfrom log import log\nfrom utils import UITextStyle, display\nfrom dijkstra import get_routing_decision, get_route_cost\n\n\ndef isDirect(route):\n return len(route) == 2\n\n\ndef brightLabel(text):\n return UITextStyle.BackgroundColor.purple + str(text\n ) + UITextStyle.Format.reset\n\n\ndef __wait__(*commandList):\n steps = len(commandList)\n for i in range(steps):\n commandList[i]('')\n display.prompt('\\n\\nPress <Return> to continue (%s/%s)' % (i + 1,\n steps))\n try:\n x = input('')\n except:\n x = ''\n\n\nclass DongPhamTestCli(CLI):\n prompt = 'dongpham> '\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n\n def do(self, shell, quiet=False):\n if quiet:\n return self.mn.controller.cmd(shell)\n return self.mn.controller.cmdPrint(shell)\n\n def doPrint(self, shell):\n display.cmdHighlight(True)\n self.mn.controller.cmdPrint(shell)\n display.cmdHighlight(False)\n\n def do_all(self, _):\n __wait__(self.do_ips, self.do_weights, self.do_costs, self.\n do_routes, self.do_paths, self.do_flows, self.do_stats)\n\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section('All functions')\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n\n def do_ips(self, _):\n display.section('IP Addresses')\n locals = self.getLocals()\n\n def showIP(*keys):\n for key in keys:\n display.message('%s\\t%s' % (key.name, key.IP()))\n\n def showAll(*keys):\n for key in keys:\n display.message('%s\\t%s\\t%s' % (key.name, key.IP(), key.MAC()))\n display.subsection('Controllers')\n for c in self.mn.controllers:\n showIP(locals[c.name])\n display.subsection('Switches')\n for s in self.mn.switches:\n showIP(locals[s.name])\n display.subsection('Hosts')\n for h in self.mn.hosts:\n showAll(locals[h.name])\n\n def do_weights(self, _):\n display.section('Weights')\n log.infoln('Link\\t\\tWeight')\n log.infoln('--------------------')\n for i, j, w in self.mn.topo._slinks:\n log.infoln('{s%s, s%s}\\t%s' % (i, j, w))\n\n def do_costs(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Total path costs')\n print('From\\\\To'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if isDirect(route):\n print(cost),\n else:\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n def do_routes(self, _):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('First-Hop with lowest cost')\n print('From\\\\To\\t'), '\\t'.join(switches)\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if start == end:\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if isDirect(route):\n print(end),\n else:\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n\n def do_paths(self, line):\n switches = self.mn.topo.switches()\n weights = [('s' + str(i[0]), 's' + str(i[1]), i[2]) for i in self.\n mn.topo._slinks]\n display.section('Least-cost paths to other nodes')\n display.message('From -> To\\tCost\\t\\tFull Shortest Path')\n for start in switches:\n display.subsection('%s' % start)\n for end in switches:\n if start == end:\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n display.message('%s -> %s\\t%s\\t\\t%s' % (start, end, cost,\n route))\n\n def do_flows(self, _line):\n display.section('Showing all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl dump-flows %s' % s)\n\n def do_deleteFlows(self, _line):\n display.section('Deleting all flows of all OVSSwitches')\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl del-flows %s' % s)\n\n def do_stats(self, _):\n display.section('OpenFlow: Sent/Received Packets')\n display.message(\n 'Packets passing through a switch on the way host with IP address = \"nw_dst\"'\n )\n for s in self.mn.switches:\n display.subsection('%s - Traffic' % s.name)\n self.doPrint(\n 'sudo ovs-ofctl dump-flows %s | grep -e \"n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]\" -To'\n % s.name)\n\n def do_arps(self, _line):\n display.section('ARP caches of all hosts')\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n\n def do_netstats(self, _line):\n display.section('Routing Tables')\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_ifconfigs(self, _line):\n display.section('Showing Interface Configuration')\n sh = 'ifconfig -a'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n def do_xxx_testFlows1(self, _line):\n display.section('Adding test flows to Tiny Network')\n self.do(\n 'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')\n self.do(\n 'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')\n self.do(\n 'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')\n self.do(\n 'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')\n self.do(\n 'sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')\n\n def do_xxx_traffic(self, _line):\n for h in self.mn.hosts:\n h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name] for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section('Launching Wireshark')\n sh = 'sudo wireshark &'\n locals = self.getLocals()\n _nodes = line.split()\n if not _nodes:\n _nodes = self.mn.keys()\n for n in _nodes:\n if not locals.__contains__(n):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-5": "\nfrom mininet.cli import CLI\nfrom mininet.term import makeTerms\nfrom mininet.util import irange\n\nfrom log import log\nfrom utils import (UITextStyle, display)\n\nfrom dijkstra import (get_routing_decision, get_route_cost)\n\n# Check if route directly connects two switches\ndef isDirect(route):\n return (len(route) == 2)\n# Add purple background for indirect routes\n\n\ndef brightLabel(text):\n return (UITextStyle.BackgroundColor.purple + str(text) + UITextStyle.Format.reset)\n# Execute commands one by one\n\n\ndef __wait__(*commandList):\n steps = len(commandList)\n for i in range(steps):\n commandList[i]('')\n display.prompt('\\n\\nPress <Return> to continue (%s/%s)' %\n (i + 1, steps))\n try:\n x = input('')\n except:\n x = ''\n\n\n# Mininet Command Line Interface extension\nclass DongPhamTestCli(CLI):\n prompt = 'dongpham> '\n\n def __init__(self, _mininet, _env):\n self.env = _env\n self.net = _mininet\n self._testCLI = {}\n CLI.__init__(self, _mininet)\n\n # Tell the controller to do a command\n def do(self, shell, quiet=False):\n if (quiet):\n return self.mn.controller.cmd(shell)\n return self.mn.controller.cmdPrint(shell)\n\n def doPrint(self, shell):\n display.cmdHighlight(True)\n self.mn.controller.cmdPrint(shell)\n display.cmdHighlight(False)\n\n # Run all commands in the wait list\n def do_all(self, _):\n __wait__(\n # Show ip\n self.do_ips,\n # Routing commands\n self.do_weights, self.do_costs, self.do_routes, self.do_paths,\n # Flow commands\n self.do_flows, self.do_stats\n )\n\n # Show object info\n # info [node1, node2, ...]\n def do_info(self, line):\n locals = self.getLocals()\n _nodes = line.split()\n display.section(\"All functions\")\n if not (_nodes):\n _nodes = self.mn.keys()\n for n in _nodes:\n if not (locals.__contains__(n)):\n break\n obj = locals[n]\n display.subsection('%s (%s)' % (n, obj.IP()))\n print(dir(obj))\n\n # Show IP addresses\n # ips\n def do_ips(self, _):\n display.section(\"IP Addresses\")\n locals = self.getLocals()\n\n def showIP(*keys):\n for key in keys:\n display.message('%s\\t%s' % (key.name, key.IP()))\n\n def showAll(*keys):\n for key in keys:\n display.message('%s\\t%s\\t%s' % (key.name, key.IP(), key.MAC()))\n # For each node\n display.subsection('Controllers')\n for c in self.mn.controllers:\n showIP(locals[c.name])\n display.subsection('Switches')\n for s in self.mn.switches:\n showIP(locals[s.name])\n display.subsection('Hosts')\n for h in self.mn.hosts:\n showAll(locals[h.name])\n\n #MARK: - Routing\n # Show link weights\n # weights\n def do_weights(self, _):\n display.section(\"Weights\")\n log.infoln('Link\\t\\tWeight')\n log.infoln('--------------------')\n for (i, j, w) in self.mn.topo._slinks:\n log.infoln('{s%s, s%s}\\t%s' % (i, j, w))\n\n # Show costs of reaching every other switch\n # costs\n def do_costs(self, _):\n # Algorithm input\n switches = self.mn.topo.switches()\n weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])\n for i in self.mn.topo._slinks]\n # Print cost of reaching 'end' switch from 'start' switch\n display.section(\"Total path costs\")\n print('From\\\\To'), ('\\t'.join(switches))\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if (start == end):\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n if (isDirect(route)):\n # Print result for directly connected switches\n print(cost),\n else:\n # Print and highlight routes with intermediate switches\n print(brightLabel(cost)),\n print('\\t'),\n print('')\n\n # Show least-cost paths from every switch to every other switch\n # routes\n def do_routes(self, _):\n # Algorithm input\n switches = self.mn.topo.switches()\n weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])\n for i in self.mn.topo._slinks]\n # Print next hop switch\n display.section(\"First-Hop with lowest cost\")\n print('From\\\\To\\t'), ('\\t'.join(switches))\n for start in switches:\n print(start + '\\t'),\n for end in switches:\n if (start == end):\n print('--\\t'),\n continue\n route = get_routing_decision(start, weights, end)\n if (isDirect(route)):\n # Print result for directly connected switches\n print(end),\n else:\n # Print and highlight routes with intermediate switches\n print(brightLabel(route[1])),\n print('\\t'),\n print('')\n\n # Show the complete shortest path from one switch to every other switch\n # paths\n def do_paths(self, line):\n # Algorithm input\n switches = self.mn.topo.switches()\n weights = [('s'+str(i[0]), 's'+str(i[1]), i[2])\n for i in self.mn.topo._slinks]\n # Least cost paths to every node\n display.section(\"Least-cost paths to other nodes\")\n display.message('From -> To\\tCost\\t\\tFull Shortest Path')\n for start in switches:\n display.subsection('%s' % start)\n for end in switches:\n if (start == end):\n continue\n route = get_routing_decision(start, weights, end)\n cost = get_route_cost([route])\n display.message('%s -> %s\\t%s\\t\\t%s' %\n (start, end, cost, route))\n\n #MARK: - OpenFlow\n # Display flows\n # flows\n def do_flows(self, _line):\n display.section(\"Showing all flows of all OVSSwitches\")\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl dump-flows %s' % s)\n # Delete flows\n # deleteFlows\n\n def do_deleteFlows(self, _line):\n display.section(\"Deleting all flows of all OVSSwitches\")\n for s in self.mn.switches:\n self.doPrint('sudo ovs-ofctl del-flows %s' % s)\n # Display flow statistics\n # stats\n\n def do_stats(self, _):\n display.section(\"OpenFlow: Sent/Received Packets\")\n display.message(\n 'Packets passing through a switch on the way host with IP address = \"nw_dst\"')\n for s in self.mn.switches:\n display.subsection('%s - Traffic' % s.name)\n self.doPrint(\n 'sudo ovs-ofctl dump-flows %s | grep -e \"n_packets=[1-9]*.*n_bytes=[1-9]*.*nw_dst=10.10.[1-9].[1-9]\" -To' % (s.name))\n\n # MARK: - Run on every node\n # arps\n def do_arps(self, _line):\n display.section(\"ARP caches of all hosts\")\n sh = 'arp -a'\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n # netstats\n\n def do_netstats(self, _line):\n display.section(\"Routing Tables\")\n sh = 'netstat -rn'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n # ifconfigs\n\n def do_ifconfigs(self, _line):\n display.section(\"Showing Interface Configuration\")\n sh = 'ifconfig -a'\n display.subsection('Hosts')\n for h in self.mn.hosts:\n h.cmdPrint(sh)\n display.subsection('Controller')\n self.doPrint(sh)\n\n #MARK: - Other\n def do_xxx_testFlows1(self, _line):\n display.section(\"Adding test flows to Tiny Network\")\n self.do('sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.2,actions=output:1')\n self.do('sudo ovs-ofctl add-flow s1 ip,ip_dst=10.0.0.1,actions=output:4')\n self.do('sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.1,actions=output:1')\n self.do('sudo ovs-ofctl add-flow s2 ip,ip_dst=10.0.0.2,actions=output:3')\n\n self.do('sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.2,actions=output:1')\n self.do('sudo ovs-ofctl add-flow s1 arp,arp_tpa=10.0.0.1,actions=output:4')\n self.do('sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.1,actions=output:1')\n self.do('sudo ovs-ofctl add-flow s2 arp,arp_tpa=10.0.0.2,actions=output:3')\n\n def do_xxx_traffic(self, _line):\n #\t\tdisplay.section(\"Monitoring sent and received packets of all hosts\")\n for h in self.mn.hosts:\n h.cmdPrint('tcpdump -i %s' % h.defaultIntf().name)\n\n def do_xxx_xterms(self, _line):\n locals = self.getLocals()\n terms = makeTerms([locals[name]\n for name in ['h1', 'h2', 's1', 's2']])\n self.mn.terms += terms\n\n def do_xxx_sharks(self, line):\n display.section(\"Launching Wireshark\")\n sh = 'sudo wireshark &'\n\n locals = self.getLocals()\n _nodes = line.split()\n if not (_nodes):\n _nodes = self.mn.keys()\n for n in _nodes:\n if not (locals.__contains__(n)):\n break\n obj = locals[n]\n obj.cmdPrint(sh)\n",
"step-ids": [
10,
18,
21,
26,
27
]
}
|
[
10,
18,
21,
26,
27
] |
#!/usr/bin/env python
# USAGE: day_22_01.py
# Michael Chambers, 2017
class Grid:
def __init__(self, startFile):
# Load initial infected sites
# Origin is top-left of input file
self.infected = set()
posx = 0
with open(startFile, 'r') as fo:
for i, line in enumerate(fo):
line = line.rstrip()
posx = int((len(line) -1) / 2)
for j, char in enumerate(line):
if char == "#":
self.infected.add((i, j))
# Set initial position to middle of start grid
posy = int((sum(1 for line in open(startFile)) - 1) / 2)
self.pos = (posx, posy)
self.vec = (-1,0)
self.infectionEvents = 0
def update(self):
if self.pos in self.infected:
self.infected.remove(self.pos)
self.turnRight()
else:
self.infectionEvents += 1
self.infected.add(self.pos)
self.turnLeft()
self.pos = (self.pos[0] + self.vec[0], self.pos[1] + self.vec[1])
def turnLeft(self):
if self.vec == (-1, 0):
self.vec = (0, -1)
elif self.vec == (0, -1):
self.vec = (1,0)
elif self.vec == (1, 0):
self.vec = (0, 1)
else:
self.vec = (-1, 0)
def turnRight(self):
if self.vec == (-1, 0):
self.vec = (0, 1)
elif self.vec == (0, 1):
self.vec = (1, 0)
elif self.vec == (1, 0):
self.vec = (0, -1)
else:
self.vec = (-1, 0)
class ComplexGrid:
# clean : 0
# weakened : 1
# infected : 2
# flagged : 3
def __init__(self, startFile):
# Load initial infected sites
# Origin is top-left of input file
self.weakened = set()
self.infected = set()
self.flagged = set()
posx = 0
with open(startFile, 'r') as fo:
for i, line in enumerate(fo):
line = line.rstrip()
posx = int((len(line) -1) / 2)
for j, char in enumerate(line):
if char == "#":
self.infected.add((i, j))
# Set initial position to middle of start grid
posy = int((sum(1 for line in open(startFile)) - 1) / 2)
self.pos = (posx, posy)
self.vec = (-1,0)
self.infectionEvents = 0
def update(self):
if self.pos in self.weakened:
self.weakened.remove(self.pos)
self.infected.add(self.pos)
self.infectionEvents += 1
elif self.pos in self.infected:
self.infected.remove(self.pos)
self.flagged.add(self.pos)
self.turnRight()
elif self.pos in self.flagged:
self.flagged.remove(self.pos)
self.reverse()
else:
self.weakened.add(self.pos)
self.turnLeft()
self.pos = (self.pos[0] + self.vec[0], self.pos[1] + self.vec[1])
def turnLeft(self):
if self.vec == (-1, 0):
self.vec = (0, -1)
elif self.vec == (0, -1):
self.vec = (1,0)
elif self.vec == (1, 0):
self.vec = (0, 1)
else:
self.vec = (-1, 0)
def turnRight(self):
if self.vec == (-1, 0):
self.vec = (0, 1)
elif self.vec == (0, 1):
self.vec = (1, 0)
elif self.vec == (1, 0):
self.vec = (0, -1)
else:
self.vec = (-1, 0)
def reverse(self):
self.vec = tuple(-x for x in self.vec)
def main():
file = "day_22_input.txt"
# file = "day_22_test.txt"
g = Grid(file)
# print(g.infected)
# print("Pos {} Vec {}".format(g.pos, g.vec))
for i in range(10000):
g.update()
# print(g.infected)
# print("Pos {} Vec {}".format(g.pos, g.vec))
print("Part 1: {}".format(g.infectionEvents))
cg = ComplexGrid(file)
for i in range(10000000):
if i % 500000 == 0:
print(i)
cg.update()
print("Part 2: {}".format(cg.infectionEvents))
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "f840624ec11679d576fbb80f8e753c59663a7ee2",
"index": 9168,
"step-1": "<mask token>\n\n\nclass ComplexGrid:\n\n def __init__(self, startFile):\n self.weakened = set()\n self.infected = set()\n self.flagged = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.weakened:\n self.weakened.remove(self.pos)\n self.infected.add(self.pos)\n self.infectionEvents += 1\n elif self.pos in self.infected:\n self.infected.remove(self.pos)\n self.flagged.add(self.pos)\n self.turnRight()\n elif self.pos in self.flagged:\n self.flagged.remove(self.pos)\n self.reverse()\n else:\n self.weakened.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n def reverse(self):\n self.vec = tuple(-x for x in self.vec)\n\n\n<mask token>\n",
"step-2": "class Grid:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ComplexGrid:\n\n def __init__(self, startFile):\n self.weakened = set()\n self.infected = set()\n self.flagged = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.weakened:\n self.weakened.remove(self.pos)\n self.infected.add(self.pos)\n self.infectionEvents += 1\n elif self.pos in self.infected:\n self.infected.remove(self.pos)\n self.flagged.add(self.pos)\n self.turnRight()\n elif self.pos in self.flagged:\n self.flagged.remove(self.pos)\n self.reverse()\n else:\n self.weakened.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n def reverse(self):\n self.vec = tuple(-x for x in self.vec)\n\n\n<mask token>\n",
"step-3": "class Grid:\n\n def __init__(self, startFile):\n self.infected = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.infected:\n self.infected.remove(self.pos)\n self.turnRight()\n else:\n self.infectionEvents += 1\n self.infected.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n\nclass ComplexGrid:\n\n def __init__(self, startFile):\n self.weakened = set()\n self.infected = set()\n self.flagged = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.weakened:\n self.weakened.remove(self.pos)\n self.infected.add(self.pos)\n self.infectionEvents += 1\n elif self.pos in self.infected:\n self.infected.remove(self.pos)\n self.flagged.add(self.pos)\n self.turnRight()\n elif self.pos in self.flagged:\n self.flagged.remove(self.pos)\n self.reverse()\n else:\n self.weakened.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n def reverse(self):\n self.vec = tuple(-x for x in self.vec)\n\n\n<mask token>\n",
"step-4": "class Grid:\n\n def __init__(self, startFile):\n self.infected = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.infected:\n self.infected.remove(self.pos)\n self.turnRight()\n else:\n self.infectionEvents += 1\n self.infected.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n\nclass ComplexGrid:\n\n def __init__(self, startFile):\n self.weakened = set()\n self.infected = set()\n self.flagged = set()\n posx = 0\n with open(startFile, 'r') as fo:\n for i, line in enumerate(fo):\n line = line.rstrip()\n posx = int((len(line) - 1) / 2)\n for j, char in enumerate(line):\n if char == '#':\n self.infected.add((i, j))\n posy = int((sum(1 for line in open(startFile)) - 1) / 2)\n self.pos = posx, posy\n self.vec = -1, 0\n self.infectionEvents = 0\n\n def update(self):\n if self.pos in self.weakened:\n self.weakened.remove(self.pos)\n self.infected.add(self.pos)\n self.infectionEvents += 1\n elif self.pos in self.infected:\n self.infected.remove(self.pos)\n self.flagged.add(self.pos)\n self.turnRight()\n elif self.pos in self.flagged:\n self.flagged.remove(self.pos)\n self.reverse()\n else:\n self.weakened.add(self.pos)\n self.turnLeft()\n self.pos = self.pos[0] + self.vec[0], self.pos[1] + self.vec[1]\n\n def turnLeft(self):\n if self.vec == (-1, 0):\n self.vec = 0, -1\n elif self.vec == (0, -1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, 1\n else:\n self.vec = -1, 0\n\n def turnRight(self):\n if self.vec == (-1, 0):\n self.vec = 0, 1\n elif self.vec == (0, 1):\n self.vec = 1, 0\n elif self.vec == (1, 0):\n self.vec = 0, -1\n else:\n self.vec = -1, 0\n\n def reverse(self):\n self.vec = tuple(-x for x in self.vec)\n\n\ndef main():\n file = 'day_22_input.txt'\n g = Grid(file)\n for i in range(10000):\n g.update()\n print('Part 1: {}'.format(g.infectionEvents))\n cg = ComplexGrid(file)\n for i in range(10000000):\n if i % 500000 == 0:\n print(i)\n cg.update()\n print('Part 2: {}'.format(cg.infectionEvents))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\n# USAGE: day_22_01.py\n# Michael Chambers, 2017\n\nclass Grid:\n\tdef __init__(self, startFile):\n\t\t# Load initial infected sites\n\t\t# Origin is top-left of input file\n\t\tself.infected = set()\n\t\tposx = 0\n\t\twith open(startFile, 'r') as fo:\n\t\t\tfor i, line in enumerate(fo):\n\t\t\t\tline = line.rstrip()\n\t\t\t\tposx = int((len(line) -1) / 2)\n\t\t\t\tfor j, char in enumerate(line):\n\t\t\t\t\tif char == \"#\":\n\t\t\t\t\t\tself.infected.add((i, j))\n\n\t\t# Set initial position to middle of start grid\n\t\tposy = int((sum(1 for line in open(startFile)) - 1) / 2)\n\t\tself.pos = (posx, posy)\n\t\tself.vec = (-1,0)\n\t\tself.infectionEvents = 0\n\n\tdef update(self):\n\t\tif self.pos in self.infected:\n\t\t\tself.infected.remove(self.pos)\n\t\t\tself.turnRight()\n\t\telse:\n\t\t\tself.infectionEvents += 1\n\t\t\tself.infected.add(self.pos)\n\t\t\tself.turnLeft()\n\t\tself.pos = (self.pos[0] + self.vec[0], self.pos[1] + self.vec[1])\n\n\tdef turnLeft(self):\n\t\tif self.vec == (-1, 0):\n\t\t\tself.vec = (0, -1)\n\t\telif self.vec == (0, -1):\n\t\t\tself.vec = (1,0)\n\t\telif self.vec == (1, 0):\n\t\t\tself.vec = (0, 1)\n\t\telse:\n\t\t\tself.vec = (-1, 0)\n\n\tdef turnRight(self):\n\t\tif self.vec == (-1, 0):\n\t\t\tself.vec = (0, 1)\n\t\telif self.vec == (0, 1):\n\t\t\tself.vec = (1, 0)\n\t\telif self.vec == (1, 0):\n\t\t\tself.vec = (0, -1)\n\t\telse:\n\t\t\tself.vec = (-1, 0)\n\n\nclass ComplexGrid:\n\t# clean : 0\n\t# weakened : 1\n\t# infected : 2\n\t# flagged : 3\n\n\tdef __init__(self, startFile):\n\t\t# Load initial infected sites\n\t\t# Origin is top-left of input file\n\t\tself.weakened = set()\n\t\tself.infected = set()\n\t\tself.flagged = set()\n\t\tposx = 0\n\t\twith open(startFile, 'r') as fo:\n\t\t\tfor i, line in enumerate(fo):\n\t\t\t\tline = line.rstrip()\n\t\t\t\tposx = int((len(line) -1) / 2)\n\t\t\t\tfor j, char in enumerate(line):\n\t\t\t\t\tif char == \"#\":\n\t\t\t\t\t\tself.infected.add((i, j))\n\n\t\t# Set initial position to middle of start grid\n\t\tposy = int((sum(1 for line in open(startFile)) - 1) / 2)\n\t\tself.pos = (posx, posy)\n\t\tself.vec = (-1,0)\n\t\tself.infectionEvents = 0\n\n\tdef update(self):\n\t\tif self.pos in self.weakened:\n\t\t\tself.weakened.remove(self.pos)\n\t\t\tself.infected.add(self.pos)\n\t\t\tself.infectionEvents += 1\n\t\telif self.pos in self.infected:\n\t\t\tself.infected.remove(self.pos)\n\t\t\tself.flagged.add(self.pos)\n\t\t\tself.turnRight()\n\t\telif self.pos in self.flagged:\n\t\t\tself.flagged.remove(self.pos)\n\t\t\tself.reverse()\n\t\telse:\n\t\t\tself.weakened.add(self.pos)\n\t\t\tself.turnLeft()\n\t\tself.pos = (self.pos[0] + self.vec[0], self.pos[1] + self.vec[1])\n\n\tdef turnLeft(self):\n\t\tif self.vec == (-1, 0):\n\t\t\tself.vec = (0, -1)\n\t\telif self.vec == (0, -1):\n\t\t\tself.vec = (1,0)\n\t\telif self.vec == (1, 0):\n\t\t\tself.vec = (0, 1)\n\t\telse:\n\t\t\tself.vec = (-1, 0)\n\n\tdef turnRight(self):\n\t\tif self.vec == (-1, 0):\n\t\t\tself.vec = (0, 1)\n\t\telif self.vec == (0, 1):\n\t\t\tself.vec = (1, 0)\n\t\telif self.vec == (1, 0):\n\t\t\tself.vec = (0, -1)\n\t\telse:\n\t\t\tself.vec = (-1, 0)\t\n\n\tdef reverse(self):\n\t\tself.vec = tuple(-x for x in self.vec)\t\n\ndef main():\n\tfile = \"day_22_input.txt\"\n\t# file = \"day_22_test.txt\"\n\tg = Grid(file)\n\t# print(g.infected)\n\t# print(\"Pos {} Vec {}\".format(g.pos, g.vec))\n\tfor i in range(10000):\n\t\tg.update()\n\t\t# print(g.infected)\n\t\t# print(\"Pos {} Vec {}\".format(g.pos, g.vec))\n\tprint(\"Part 1: {}\".format(g.infectionEvents))\n\n\tcg = ComplexGrid(file)\n\tfor i in range(10000000):\n\t\tif i % 500000 == 0:\n\t\t\tprint(i)\n\t\tcg.update()\n\tprint(\"Part 2: {}\".format(cg.infectionEvents))\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n",
"step-ids": [
6,
7,
11,
13,
14
]
}
|
[
6,
7,
11,
13,
14
] |
from django.shortcuts import render
from rest_framework import status, viewsets , response
from . import models
from . import serializers
# Create your views here.
class TodoViewset(viewsets.ModelViewSet):
queryset = models.Todo.objects.all()
serializer_class = serializers.TodoSerializer
|
normal
|
{
"blob_id": "1c668cf6f145b85a09b248fefda46e928de64e41",
"index": 5041,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TodoViewset(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TodoViewset(viewsets.ModelViewSet):\n queryset = models.Todo.objects.all()\n serializer_class = serializers.TodoSerializer\n",
"step-4": "from django.shortcuts import render\nfrom rest_framework import status, viewsets, response\nfrom . import models\nfrom . import serializers\n\n\nclass TodoViewset(viewsets.ModelViewSet):\n queryset = models.Todo.objects.all()\n serializer_class = serializers.TodoSerializer\n",
"step-5": "from django.shortcuts import render\nfrom rest_framework import status, viewsets , response\n\nfrom . import models\nfrom . import serializers\n\n# Create your views here.\n\nclass TodoViewset(viewsets.ModelViewSet):\n queryset = models.Todo.objects.all()\n serializer_class = serializers.TodoSerializer\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding: utf-8
from __future__ import division, unicode_literals
import unittest
from monty.inspect import *
class LittleCatA(object):
pass
class LittleCatB(LittleCatA):
pass
class LittleCatC(object):
pass
class LittleCatD(LittleCatB):
pass
class InspectTest(unittest.TestCase):
def test_func(self):
# Not a real test. Need something better.
self.assertTrue(find_top_pyfile())
self.assertTrue(caller_name())
def test_all_subclasses(self):
self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])
if __name__ == "__main__":
unittest.main()
|
normal
|
{
"blob_id": "89605ff723d2f78e85cae458d576494718b5d456",
"index": 1193,
"step-1": "<mask token>\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LittleCatC(object):\n pass\n\n\nclass LittleCatD(LittleCatB):\n pass\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n\n def test_all_subclasses(self):\n self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LittleCatB(LittleCatA):\n pass\n\n\nclass LittleCatC(object):\n pass\n\n\nclass LittleCatD(LittleCatB):\n pass\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n\n def test_all_subclasses(self):\n self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])\n\n\n<mask token>\n",
"step-4": "from __future__ import division, unicode_literals\nimport unittest\nfrom monty.inspect import *\n\n\nclass LittleCatA(object):\n pass\n\n\nclass LittleCatB(LittleCatA):\n pass\n\n\nclass LittleCatC(object):\n pass\n\n\nclass LittleCatD(LittleCatB):\n pass\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n\n def test_all_subclasses(self):\n self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "# coding: utf-8\nfrom __future__ import division, unicode_literals\n\nimport unittest\n\nfrom monty.inspect import *\n\nclass LittleCatA(object):\n pass\n\nclass LittleCatB(LittleCatA):\n pass\n\nclass LittleCatC(object):\n pass\n\nclass LittleCatD(LittleCatB):\n pass\n\n\nclass InspectTest(unittest.TestCase):\n\n def test_func(self):\n # Not a real test. Need something better.\n self.assertTrue(find_top_pyfile())\n self.assertTrue(caller_name())\n\n def test_all_subclasses(self):\n self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
2,
5,
6,
9,
10
]
}
|
[
2,
5,
6,
9,
10
] |
def towers_of_hanoi(n, src, dest, temp,res):
if n==1:
s = 'disk 1 from ',src,'->',dest
res.append(s)
return
towers_of_hanoi(n-1, src, temp, dest, res)
s = 'disk ',n, ' from ',src,'->',dest
res.append(s)
towers_of_hanoi(n-1, temp, dest, src, res)
return res
def steps_in_tower_of_hanoi(no_of_disks):
res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B',[])
return res
if __name__ == "__main__":
no_of_disks = int(input())
res = steps_in_tower_of_hanoi(no_of_disks)
print('\n'.join([' '.join(map(str, x)) for x in res]))
print('\n')
|
normal
|
{
"blob_id": "f23bfef2daf8fda4249435821dbc2e0b1846e3d6",
"index": 9842,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef steps_in_tower_of_hanoi(no_of_disks):\n res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B', [])\n return res\n\n\n<mask token>\n",
"step-3": "def towers_of_hanoi(n, src, dest, temp, res):\n if n == 1:\n s = 'disk 1 from ', src, '->', dest\n res.append(s)\n return\n towers_of_hanoi(n - 1, src, temp, dest, res)\n s = 'disk ', n, ' from ', src, '->', dest\n res.append(s)\n towers_of_hanoi(n - 1, temp, dest, src, res)\n return res\n\n\ndef steps_in_tower_of_hanoi(no_of_disks):\n res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B', [])\n return res\n\n\n<mask token>\n",
"step-4": "def towers_of_hanoi(n, src, dest, temp, res):\n if n == 1:\n s = 'disk 1 from ', src, '->', dest\n res.append(s)\n return\n towers_of_hanoi(n - 1, src, temp, dest, res)\n s = 'disk ', n, ' from ', src, '->', dest\n res.append(s)\n towers_of_hanoi(n - 1, temp, dest, src, res)\n return res\n\n\ndef steps_in_tower_of_hanoi(no_of_disks):\n res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B', [])\n return res\n\n\nif __name__ == '__main__':\n no_of_disks = int(input())\n res = steps_in_tower_of_hanoi(no_of_disks)\n print('\\n'.join([' '.join(map(str, x)) for x in res]))\n print('\\n')\n",
"step-5": "\ndef towers_of_hanoi(n, src, dest, temp,res):\n if n==1:\n s = 'disk 1 from ',src,'->',dest\n res.append(s)\n return\n towers_of_hanoi(n-1, src, temp, dest, res)\n s = 'disk ',n, ' from ',src,'->',dest\n res.append(s)\n towers_of_hanoi(n-1, temp, dest, src, res)\n return res\n \ndef steps_in_tower_of_hanoi(no_of_disks):\n res = towers_of_hanoi(no_of_disks, 'A', 'C', 'B',[])\n return res\n\nif __name__ == \"__main__\":\n\n no_of_disks = int(input())\n\n res = steps_in_tower_of_hanoi(no_of_disks)\n\n print('\\n'.join([' '.join(map(str, x)) for x in res]))\n print('\\n')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def test_board_can_be_instatiated_with_any_set_of_pieces():
board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})
assert board.pieces_quantity() == 2
def test_piece_cant_capture_an_ally():
board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})
with pytest.raises(ImpossibleMove):
board.move('f3', 'e5')
<|reserved_special_token_0|>
def test_pieces_can_capture_opponent_pieces():
board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'),
'f3': Knight('white')})
assert board.pieces_quantity() == 3
knight = board.get_piece('f3')
board.move('f3', 'e5')
assert board.get_piece('e5') is knight
assert board.pieces_quantity() == 2
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_board_has_32_pieces():
board = Board()
assert board.pieces_quantity() == 32
def test_board_can_be_instatiated_with_any_set_of_pieces():
board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})
assert board.pieces_quantity() == 2
def test_piece_cant_capture_an_ally():
board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})
with pytest.raises(ImpossibleMove):
board.move('f3', 'e5')
<|reserved_special_token_0|>
def test_players_can_put_opponent_in_check():
board = Board({'e1': King('black'), 'f8': Rook('white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
def test_players_can_get_out_of_check():
board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(
'white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
board.move('e1', 'f1')
assert board.check is None
def test_player_should_to_get_out_of_check():
board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(
'white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
with pytest.raises(ImpossibleMove):
board.move('e1', 'e2')
def test_pieces_can_capture_opponent_pieces():
board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'),
'f3': Knight('white')})
assert board.pieces_quantity() == 3
knight = board.get_piece('f3')
board.move('f3', 'e5')
assert board.get_piece('e5') is knight
assert board.pieces_quantity() == 2
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_board_has_32_pieces():
board = Board()
assert board.pieces_quantity() == 32
def test_board_can_be_instatiated_with_any_set_of_pieces():
board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})
assert board.pieces_quantity() == 2
def test_piece_cant_capture_an_ally():
board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})
with pytest.raises(ImpossibleMove):
board.move('f3', 'e5')
def test_alternating_between_players():
board = Board()
assert board.turn == 'white'
board.move('g2', 'g3')
assert board.turn == 'black'
board.move('b7', 'b6')
assert board.turn == 'white'
board.move('f1', 'g2')
assert board.turn == 'black'
def test_only_white_pieces_can_start():
board = Board()
assert board.turn == 'white'
with pytest.raises(ImpossibleMove):
board.move('b7', 'b6')
def test_players_can_put_opponent_in_check():
board = Board({'e1': King('black'), 'f8': Rook('white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
def test_players_can_get_out_of_check():
board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(
'white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
board.move('e1', 'f1')
assert board.check is None
def test_player_should_to_get_out_of_check():
board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(
'white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
with pytest.raises(ImpossibleMove):
board.move('e1', 'e2')
def test_pieces_can_capture_opponent_pieces():
board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'),
'f3': Knight('white')})
assert board.pieces_quantity() == 3
knight = board.get_piece('f3')
board.move('f3', 'e5')
assert board.get_piece('e5') is knight
assert board.pieces_quantity() == 2
<|reserved_special_token_1|>
import pytest
from chess.board import Board, ImpossibleMove
from chess.pieces import King, Rook, Pawn, Knight
def test_board_has_32_pieces():
board = Board()
assert board.pieces_quantity() == 32
def test_board_can_be_instatiated_with_any_set_of_pieces():
board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})
assert board.pieces_quantity() == 2
def test_piece_cant_capture_an_ally():
board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})
with pytest.raises(ImpossibleMove):
board.move('f3', 'e5')
def test_alternating_between_players():
board = Board()
assert board.turn == 'white'
board.move('g2', 'g3')
assert board.turn == 'black'
board.move('b7', 'b6')
assert board.turn == 'white'
board.move('f1', 'g2')
assert board.turn == 'black'
def test_only_white_pieces_can_start():
board = Board()
assert board.turn == 'white'
with pytest.raises(ImpossibleMove):
board.move('b7', 'b6')
def test_players_can_put_opponent_in_check():
board = Board({'e1': King('black'), 'f8': Rook('white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
def test_players_can_get_out_of_check():
board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(
'white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
board.move('e1', 'f1')
assert board.check is None
def test_player_should_to_get_out_of_check():
board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(
'white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
with pytest.raises(ImpossibleMove):
board.move('e1', 'e2')
def test_pieces_can_capture_opponent_pieces():
board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'),
'f3': Knight('white')})
assert board.pieces_quantity() == 3
knight = board.get_piece('f3')
board.move('f3', 'e5')
assert board.get_piece('e5') is knight
assert board.pieces_quantity() == 2
<|reserved_special_token_1|>
import pytest
from chess.board import Board, ImpossibleMove
from chess.pieces import King, Rook, Pawn, Knight
def test_board_has_32_pieces():
board = Board()
assert board.pieces_quantity() == 32
def test_board_can_be_instatiated_with_any_set_of_pieces():
board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})
assert board.pieces_quantity() == 2
def test_piece_cant_capture_an_ally():
board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})
with pytest.raises(ImpossibleMove):
board.move('f3', 'e5')
def test_alternating_between_players():
board = Board()
assert board.turn == 'white'
board.move('g2', 'g3') # white pawn moves
assert board.turn == 'black'
board.move('b7', 'b6') # black pawn moves
assert board.turn == 'white'
board.move('f1', 'g2') # white bishop moves
assert board.turn == 'black'
def test_only_white_pieces_can_start():
board = Board()
assert board.turn == 'white'
with pytest.raises(ImpossibleMove):
board.move('b7', 'b6')
def test_players_can_put_opponent_in_check():
board = Board({'e1': King('black'), 'f8': Rook('white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
def test_players_can_get_out_of_check():
board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King('white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
board.move('e1', 'f1')
assert board.check is None
def test_player_should_to_get_out_of_check():
board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King('white')})
assert board.check is None
board.move('f8', 'e8')
assert board.check == 'black'
with pytest.raises(ImpossibleMove):
board.move('e1', 'e2')
def test_pieces_can_capture_opponent_pieces():
board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'), 'f3': Knight('white')})
assert board.pieces_quantity() == 3
knight = board.get_piece('f3')
board.move('f3', 'e5')
assert board.get_piece('e5') is knight
assert board.pieces_quantity() == 2
|
flexible
|
{
"blob_id": "5f471fb75b1c4f6fc7aa4cb4f99f9c1a1a9f0ea1",
"index": 8595,
"step-1": "<mask token>\n\n\ndef test_board_can_be_instatiated_with_any_set_of_pieces():\n board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})\n assert board.pieces_quantity() == 2\n\n\ndef test_piece_cant_capture_an_ally():\n board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})\n with pytest.raises(ImpossibleMove):\n board.move('f3', 'e5')\n\n\n<mask token>\n\n\ndef test_pieces_can_capture_opponent_pieces():\n board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'),\n 'f3': Knight('white')})\n assert board.pieces_quantity() == 3\n knight = board.get_piece('f3')\n board.move('f3', 'e5')\n assert board.get_piece('e5') is knight\n assert board.pieces_quantity() == 2\n",
"step-2": "<mask token>\n\n\ndef test_board_has_32_pieces():\n board = Board()\n assert board.pieces_quantity() == 32\n\n\ndef test_board_can_be_instatiated_with_any_set_of_pieces():\n board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})\n assert board.pieces_quantity() == 2\n\n\ndef test_piece_cant_capture_an_ally():\n board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})\n with pytest.raises(ImpossibleMove):\n board.move('f3', 'e5')\n\n\n<mask token>\n\n\ndef test_players_can_put_opponent_in_check():\n board = Board({'e1': King('black'), 'f8': Rook('white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n\n\ndef test_players_can_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(\n 'white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n board.move('e1', 'f1')\n assert board.check is None\n\n\ndef test_player_should_to_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(\n 'white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n with pytest.raises(ImpossibleMove):\n board.move('e1', 'e2')\n\n\ndef test_pieces_can_capture_opponent_pieces():\n board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'),\n 'f3': Knight('white')})\n assert board.pieces_quantity() == 3\n knight = board.get_piece('f3')\n board.move('f3', 'e5')\n assert board.get_piece('e5') is knight\n assert board.pieces_quantity() == 2\n",
"step-3": "<mask token>\n\n\ndef test_board_has_32_pieces():\n board = Board()\n assert board.pieces_quantity() == 32\n\n\ndef test_board_can_be_instatiated_with_any_set_of_pieces():\n board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})\n assert board.pieces_quantity() == 2\n\n\ndef test_piece_cant_capture_an_ally():\n board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})\n with pytest.raises(ImpossibleMove):\n board.move('f3', 'e5')\n\n\ndef test_alternating_between_players():\n board = Board()\n assert board.turn == 'white'\n board.move('g2', 'g3')\n assert board.turn == 'black'\n board.move('b7', 'b6')\n assert board.turn == 'white'\n board.move('f1', 'g2')\n assert board.turn == 'black'\n\n\ndef test_only_white_pieces_can_start():\n board = Board()\n assert board.turn == 'white'\n with pytest.raises(ImpossibleMove):\n board.move('b7', 'b6')\n\n\ndef test_players_can_put_opponent_in_check():\n board = Board({'e1': King('black'), 'f8': Rook('white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n\n\ndef test_players_can_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(\n 'white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n board.move('e1', 'f1')\n assert board.check is None\n\n\ndef test_player_should_to_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(\n 'white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n with pytest.raises(ImpossibleMove):\n board.move('e1', 'e2')\n\n\ndef test_pieces_can_capture_opponent_pieces():\n board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'),\n 'f3': Knight('white')})\n assert board.pieces_quantity() == 3\n knight = board.get_piece('f3')\n board.move('f3', 'e5')\n assert board.get_piece('e5') is knight\n assert board.pieces_quantity() == 2\n",
"step-4": "import pytest\nfrom chess.board import Board, ImpossibleMove\nfrom chess.pieces import King, Rook, Pawn, Knight\n\n\ndef test_board_has_32_pieces():\n board = Board()\n assert board.pieces_quantity() == 32\n\n\ndef test_board_can_be_instatiated_with_any_set_of_pieces():\n board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})\n assert board.pieces_quantity() == 2\n\n\ndef test_piece_cant_capture_an_ally():\n board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})\n with pytest.raises(ImpossibleMove):\n board.move('f3', 'e5')\n\n\ndef test_alternating_between_players():\n board = Board()\n assert board.turn == 'white'\n board.move('g2', 'g3')\n assert board.turn == 'black'\n board.move('b7', 'b6')\n assert board.turn == 'white'\n board.move('f1', 'g2')\n assert board.turn == 'black'\n\n\ndef test_only_white_pieces_can_start():\n board = Board()\n assert board.turn == 'white'\n with pytest.raises(ImpossibleMove):\n board.move('b7', 'b6')\n\n\ndef test_players_can_put_opponent_in_check():\n board = Board({'e1': King('black'), 'f8': Rook('white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n\n\ndef test_players_can_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(\n 'white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n board.move('e1', 'f1')\n assert board.check is None\n\n\ndef test_player_should_to_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(\n 'white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n with pytest.raises(ImpossibleMove):\n board.move('e1', 'e2')\n\n\ndef test_pieces_can_capture_opponent_pieces():\n board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'),\n 'f3': Knight('white')})\n assert board.pieces_quantity() == 3\n knight = board.get_piece('f3')\n board.move('f3', 'e5')\n assert board.get_piece('e5') is knight\n assert board.pieces_quantity() == 2\n",
"step-5": "import pytest\n\nfrom chess.board import Board, ImpossibleMove\nfrom chess.pieces import King, Rook, Pawn, Knight\n\n\ndef test_board_has_32_pieces():\n board = Board()\n assert board.pieces_quantity() == 32\n\n\ndef test_board_can_be_instatiated_with_any_set_of_pieces():\n board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})\n assert board.pieces_quantity() == 2\n\n\ndef test_piece_cant_capture_an_ally():\n board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})\n with pytest.raises(ImpossibleMove):\n board.move('f3', 'e5')\n\n\ndef test_alternating_between_players():\n board = Board()\n assert board.turn == 'white'\n board.move('g2', 'g3') # white pawn moves\n assert board.turn == 'black'\n board.move('b7', 'b6') # black pawn moves\n assert board.turn == 'white'\n board.move('f1', 'g2') # white bishop moves\n assert board.turn == 'black'\n\n\ndef test_only_white_pieces_can_start():\n board = Board()\n assert board.turn == 'white'\n with pytest.raises(ImpossibleMove):\n board.move('b7', 'b6')\n\n\ndef test_players_can_put_opponent_in_check():\n board = Board({'e1': King('black'), 'f8': Rook('white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n\n\ndef test_players_can_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King('white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n board.move('e1', 'f1')\n assert board.check is None\n\n\ndef test_player_should_to_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King('white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n with pytest.raises(ImpossibleMove):\n board.move('e1', 'e2')\n\n\ndef test_pieces_can_capture_opponent_pieces():\n board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'), 'f3': Knight('white')})\n assert board.pieces_quantity() == 3\n\n knight = board.get_piece('f3')\n board.move('f3', 'e5')\n assert board.get_piece('e5') is knight\n assert board.pieces_quantity() == 2\n",
"step-ids": [
3,
7,
9,
10,
11
]
}
|
[
3,
7,
9,
10,
11
] |
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
l, res, freq = 0, 0, [(False) for _ in range(256)]
for idx, char in enumerate(s):
if freq[ord(char)]:
while s[l] != char:
freq[ord(s[l])] = False
l += 1
l += 1
else:
freq[ord(char)] = True
res = max(idx - l + 1, res)
return res
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
"""
Time: O(n)
Space:O(1)
"""
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
l, res, freq = 0, 0, [(False) for _ in range(256)]
for idx, char in enumerate(s):
if freq[ord(char)]:
while s[l] != char:
freq[ord(s[l])] = False
l += 1
l += 1
else:
freq[ord(char)] = True
res = max(idx - l + 1, res)
return res
<|reserved_special_token_1|>
__author__ = 'yyp'
__date__ = '2018-5-26 3:42'
<|reserved_special_token_0|>
class Solution:
"""
Time: O(n)
Space:O(1)
"""
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
l, res, freq = 0, 0, [(False) for _ in range(256)]
for idx, char in enumerate(s):
if freq[ord(char)]:
while s[l] != char:
freq[ord(s[l])] = False
l += 1
l += 1
else:
freq[ord(char)] = True
res = max(idx - l + 1, res)
return res
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
__author__ = 'yyp'
__date__ = '2018-5-26 3:42'
'''
Given a string, find the length of the longest substring without repeating characters.
Examples:
Given "abcabcbb", the answer is "abc", which the length is 3.
Given "bbbbb", the answer is "b", with the length of 1.
Given "pwwkew", the answer is "wke", with the length of 3. Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
'''
class Solution:
"""
Time: O(n)
Space:O(1)
"""
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
l, res, freq = 0, 0, [False for _ in range(256)]
for idx, char in enumerate(s):
if freq[ord(char)]:
while s[l] != char:
freq[ord(s[l])] = False
l += 1
l += 1
else:
freq[ord(char)] = True
res = max(idx - l + 1, res)
return res
|
flexible
|
{
"blob_id": "b7c43f4242e38318c9e5423ea73e9d9d86759a53",
"index": 4663,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n l, res, freq = 0, 0, [(False) for _ in range(256)]\n for idx, char in enumerate(s):\n if freq[ord(char)]:\n while s[l] != char:\n freq[ord(s[l])] = False\n l += 1\n l += 1\n else:\n freq[ord(char)] = True\n res = max(idx - l + 1, res)\n return res\n",
"step-3": "<mask token>\n\n\nclass Solution:\n \"\"\"\n Time: O(n)\n Space:O(1)\n \"\"\"\n\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n l, res, freq = 0, 0, [(False) for _ in range(256)]\n for idx, char in enumerate(s):\n if freq[ord(char)]:\n while s[l] != char:\n freq[ord(s[l])] = False\n l += 1\n l += 1\n else:\n freq[ord(char)] = True\n res = max(idx - l + 1, res)\n return res\n",
"step-4": "__author__ = 'yyp'\n__date__ = '2018-5-26 3:42'\n<mask token>\n\n\nclass Solution:\n \"\"\"\n Time: O(n)\n Space:O(1)\n \"\"\"\n\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n l, res, freq = 0, 0, [(False) for _ in range(256)]\n for idx, char in enumerate(s):\n if freq[ord(char)]:\n while s[l] != char:\n freq[ord(s[l])] = False\n l += 1\n l += 1\n else:\n freq[ord(char)] = True\n res = max(idx - l + 1, res)\n return res\n",
"step-5": "# -*- coding:utf-8 -*-\n__author__ = 'yyp'\n__date__ = '2018-5-26 3:42'\n\n'''\nGiven a string, find the length of the longest substring without repeating characters.\nExamples:\nGiven \"abcabcbb\", the answer is \"abc\", which the length is 3.\nGiven \"bbbbb\", the answer is \"b\", with the length of 1.\nGiven \"pwwkew\", the answer is \"wke\", with the length of 3. Note that the answer must be a substring, \"pwke\" is a subsequence and not a substring.\n'''\n\n\nclass Solution:\n \"\"\"\n Time: O(n)\n Space:O(1)\n \"\"\"\n\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n l, res, freq = 0, 0, [False for _ in range(256)]\n for idx, char in enumerate(s):\n if freq[ord(char)]:\n while s[l] != char:\n freq[ord(s[l])] = False\n l += 1\n l += 1\n else:\n freq[ord(char)] = True\n res = max(idx - l + 1, res)\n return res\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import random
def take_second(element):
return element[1]
import string
def get_random_name():
name = ""
for i in range(random.randint(5, 15)):
name += random.choice(string.ascii_letters)
return name
imenik = [(777, "zejneba"), (324, "fahro"), (23, "fatih"), (2334, "muamer"), (435, "kerim"),(4568,"zzzzzzz")]
print(sorted(imenik,key=take_second))
for i in range(100000):
novi_element = (random.randint(1, 10000), get_random_name())
imenik.append(novi_element)
imenik.sort(key=take_second)
print(imenik)
name = input('enter a name: ')
min_index = 0
max_index = len(imenik)
previous_guess_name = ""
counter = 0
while True:
mid_index = (max_index + min_index) // 2
guess_score = imenik[mid_index][0]
guess_name = imenik[mid_index][1]
if guess_name == previous_guess_name:
print("Not found")
break
if guess_name == name:
print("your score is", guess_score)
break
elif name > guess_name:
min_index = mid_index
else:
max_index = mid_index
previous_guess_name = guess_name
counter += 1
print("Number of comparisons", counter)
print("after")
found = False
counter = 0
for i in range(len(imenik)):
counter += 1
if imenik[i][1] == name:
print("your score is", guess_score)
found = True
break
if not found:
print("Not found")
print("Number of comparisons after", counter)
|
normal
|
{
"blob_id": "21ef8103a5880a07d8c681b2367c2beef727260f",
"index": 6536,
"step-1": "<mask token>\n\n\ndef take_second(element):\n return element[1]\n\n\n<mask token>\n\n\ndef get_random_name():\n name = ''\n for i in range(random.randint(5, 15)):\n name += random.choice(string.ascii_letters)\n return name\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef take_second(element):\n return element[1]\n\n\n<mask token>\n\n\ndef get_random_name():\n name = ''\n for i in range(random.randint(5, 15)):\n name += random.choice(string.ascii_letters)\n return name\n\n\n<mask token>\nprint(sorted(imenik, key=take_second))\nfor i in range(100000):\n novi_element = random.randint(1, 10000), get_random_name()\n imenik.append(novi_element)\nimenik.sort(key=take_second)\nprint(imenik)\n<mask token>\nwhile True:\n mid_index = (max_index + min_index) // 2\n guess_score = imenik[mid_index][0]\n guess_name = imenik[mid_index][1]\n if guess_name == previous_guess_name:\n print('Not found')\n break\n if guess_name == name:\n print('your score is', guess_score)\n break\n elif name > guess_name:\n min_index = mid_index\n else:\n max_index = mid_index\n previous_guess_name = guess_name\n counter += 1\nprint('Number of comparisons', counter)\nprint('after')\n<mask token>\nfor i in range(len(imenik)):\n counter += 1\n if imenik[i][1] == name:\n print('your score is', guess_score)\n found = True\n break\nif not found:\n print('Not found')\nprint('Number of comparisons after', counter)\n",
"step-3": "<mask token>\n\n\ndef take_second(element):\n return element[1]\n\n\n<mask token>\n\n\ndef get_random_name():\n name = ''\n for i in range(random.randint(5, 15)):\n name += random.choice(string.ascii_letters)\n return name\n\n\nimenik = [(777, 'zejneba'), (324, 'fahro'), (23, 'fatih'), (2334, 'muamer'),\n (435, 'kerim'), (4568, 'zzzzzzz')]\nprint(sorted(imenik, key=take_second))\nfor i in range(100000):\n novi_element = random.randint(1, 10000), get_random_name()\n imenik.append(novi_element)\nimenik.sort(key=take_second)\nprint(imenik)\nname = input('enter a name: ')\nmin_index = 0\nmax_index = len(imenik)\nprevious_guess_name = ''\ncounter = 0\nwhile True:\n mid_index = (max_index + min_index) // 2\n guess_score = imenik[mid_index][0]\n guess_name = imenik[mid_index][1]\n if guess_name == previous_guess_name:\n print('Not found')\n break\n if guess_name == name:\n print('your score is', guess_score)\n break\n elif name > guess_name:\n min_index = mid_index\n else:\n max_index = mid_index\n previous_guess_name = guess_name\n counter += 1\nprint('Number of comparisons', counter)\nprint('after')\nfound = False\ncounter = 0\nfor i in range(len(imenik)):\n counter += 1\n if imenik[i][1] == name:\n print('your score is', guess_score)\n found = True\n break\nif not found:\n print('Not found')\nprint('Number of comparisons after', counter)\n",
"step-4": "import random\n\n\ndef take_second(element):\n return element[1]\n\n\nimport string\n\n\ndef get_random_name():\n name = ''\n for i in range(random.randint(5, 15)):\n name += random.choice(string.ascii_letters)\n return name\n\n\nimenik = [(777, 'zejneba'), (324, 'fahro'), (23, 'fatih'), (2334, 'muamer'),\n (435, 'kerim'), (4568, 'zzzzzzz')]\nprint(sorted(imenik, key=take_second))\nfor i in range(100000):\n novi_element = random.randint(1, 10000), get_random_name()\n imenik.append(novi_element)\nimenik.sort(key=take_second)\nprint(imenik)\nname = input('enter a name: ')\nmin_index = 0\nmax_index = len(imenik)\nprevious_guess_name = ''\ncounter = 0\nwhile True:\n mid_index = (max_index + min_index) // 2\n guess_score = imenik[mid_index][0]\n guess_name = imenik[mid_index][1]\n if guess_name == previous_guess_name:\n print('Not found')\n break\n if guess_name == name:\n print('your score is', guess_score)\n break\n elif name > guess_name:\n min_index = mid_index\n else:\n max_index = mid_index\n previous_guess_name = guess_name\n counter += 1\nprint('Number of comparisons', counter)\nprint('after')\nfound = False\ncounter = 0\nfor i in range(len(imenik)):\n counter += 1\n if imenik[i][1] == name:\n print('your score is', guess_score)\n found = True\n break\nif not found:\n print('Not found')\nprint('Number of comparisons after', counter)\n",
"step-5": "import random\n\n\ndef take_second(element):\n return element[1]\n\n\nimport string\n\n\ndef get_random_name():\n name = \"\"\n for i in range(random.randint(5, 15)):\n name += random.choice(string.ascii_letters)\n return name\n\n\nimenik = [(777, \"zejneba\"), (324, \"fahro\"), (23, \"fatih\"), (2334, \"muamer\"), (435, \"kerim\"),(4568,\"zzzzzzz\")]\n\nprint(sorted(imenik,key=take_second))\nfor i in range(100000):\n novi_element = (random.randint(1, 10000), get_random_name())\n imenik.append(novi_element)\n\nimenik.sort(key=take_second)\nprint(imenik)\n\nname = input('enter a name: ')\n\nmin_index = 0\nmax_index = len(imenik)\n\nprevious_guess_name = \"\"\ncounter = 0\nwhile True:\n\n mid_index = (max_index + min_index) // 2\n guess_score = imenik[mid_index][0]\n guess_name = imenik[mid_index][1]\n\n if guess_name == previous_guess_name:\n print(\"Not found\")\n break\n\n if guess_name == name:\n print(\"your score is\", guess_score)\n break\n elif name > guess_name:\n min_index = mid_index\n else:\n max_index = mid_index\n\n previous_guess_name = guess_name\n counter += 1\n\nprint(\"Number of comparisons\", counter)\n\nprint(\"after\")\nfound = False\ncounter = 0\nfor i in range(len(imenik)):\n counter += 1\n if imenik[i][1] == name:\n print(\"your score is\", guess_score)\n found = True\n break\n\nif not found:\n print(\"Not found\")\n\nprint(\"Number of comparisons after\", counter)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python3
import argparse
import boutvecma
import easyvvuq as uq
import chaospy
import os
import numpy as np
import time
from dask.distributed import Client
from dask_jobqueue import SLURMCluster
import matplotlib.pyplot as plt
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="EasyVVUQ applied to BOUT++")
parser.add_argument(
"--batch",
"-b",
help="Run on a batch (SLURM) system",
action="store_true",
default=False,
)
args = parser.parse_args()
campaign = uq.CampaignDask(name="Conduction.")
print(f"Running in {campaign.campaign_dir}")
encoder = boutvecma.BOUTEncoder(template_input="models/conduction/data/BOUT.inp")
decoder = boutvecma.BOUTDecoder(variables=["T"])
params = {
"conduction:chi": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:scale": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:gauss_width": {"type": "float", "min": 0.0, "max": 1e3, "default": 0.2},
"T:gauss_centre": {
"type": "float",
"min": 0.0,
"max": 2 * np.pi,
"default": np.pi,
},
}
campaign.add_app("1D_conduction", params=params, encoder=encoder, decoder=decoder)
vary = {
"conduction:chi": chaospy.Uniform(0.2, 4.0),
"T:scale": chaospy.Uniform(0.5, 1.5),
"T:gauss_width": chaospy.Uniform(0.01, 0.4),
"T:gauss_centre": chaospy.Uniform(0.0, 2 * np.pi),
}
sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)
campaign.set_sampler(sampler)
campaign.draw_samples()
run_dirs = campaign.populate_runs_dir()
print(f"Created run directories: {run_dirs}")
if args.batch:
# Example of use on Viking
cluster = SLURMCluster(
job_extra=[
"--job-name=VVUQ",
"--account=PHYS-YPIRSE-2019",
],
cores=1,
memory="1 GB",
processes=1,
walltime="00:10:00",
interface="ib0",
)
cluster.scale(16)
print(f"Job script:\n{cluster.job_script()}")
client = Client(cluster)
else:
client = Client(processes=True, threads_per_worker=1)
print(client)
time_start = time.time()
campaign.apply_for_each_run_dir(
uq.actions.ExecuteLocal(
os.path.abspath("build/models/conduction/conduction -q -q -q -d .")
),
client,
)
client.close()
time_end = time.time()
print(f"Finished, took {time_end - time_start}")
campaign.collate()
campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler, qoi_cols=["T"]))
results = campaign.get_last_analysis()
state_filename = os.path.join(campaign.campaign_dir, "campaign_state.json")
campaign.save_state(state_filename)
plt.figure()
results.plot_moments(
"T", xlabel=r"$\rho$", filename=f"{campaign.campaign_dir}/moments.png"
)
plt.figure()
results.plot_sobols_first(
"T", xlabel=r"$\rho$", filename=f"{campaign.campaign_dir}/sobols_first.png"
)
|
normal
|
{
"blob_id": "416f4c6bbd2f2b9562ab2d1477df4ebc45070d8d",
"index": 5060,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='EasyVVUQ applied to BOUT++')\n parser.add_argument('--batch', '-b', help=\n 'Run on a batch (SLURM) system', action='store_true', default=False)\n args = parser.parse_args()\n campaign = uq.CampaignDask(name='Conduction.')\n print(f'Running in {campaign.campaign_dir}')\n encoder = boutvecma.BOUTEncoder(template_input=\n 'models/conduction/data/BOUT.inp')\n decoder = boutvecma.BOUTDecoder(variables=['T'])\n params = {'conduction:chi': {'type': 'float', 'min': 0.0, 'max': 1000.0,\n 'default': 1.0}, 'T:scale': {'type': 'float', 'min': 0.0, 'max': \n 1000.0, 'default': 1.0}, 'T:gauss_width': {'type': 'float', 'min': \n 0.0, 'max': 1000.0, 'default': 0.2}, 'T:gauss_centre': {'type':\n 'float', 'min': 0.0, 'max': 2 * np.pi, 'default': np.pi}}\n campaign.add_app('1D_conduction', params=params, encoder=encoder,\n decoder=decoder)\n vary = {'conduction:chi': chaospy.Uniform(0.2, 4.0), 'T:scale': chaospy\n .Uniform(0.5, 1.5), 'T:gauss_width': chaospy.Uniform(0.01, 0.4),\n 'T:gauss_centre': chaospy.Uniform(0.0, 2 * np.pi)}\n sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)\n campaign.set_sampler(sampler)\n campaign.draw_samples()\n run_dirs = campaign.populate_runs_dir()\n print(f'Created run directories: {run_dirs}')\n if args.batch:\n cluster = SLURMCluster(job_extra=['--job-name=VVUQ',\n '--account=PHYS-YPIRSE-2019'], cores=1, memory='1 GB',\n processes=1, walltime='00:10:00', interface='ib0')\n cluster.scale(16)\n print(f'Job script:\\n{cluster.job_script()}')\n client = Client(cluster)\n else:\n client = Client(processes=True, threads_per_worker=1)\n print(client)\n time_start = time.time()\n campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(os.path.abspath\n ('build/models/conduction/conduction -q -q -q -d .')), client)\n client.close()\n time_end = time.time()\n print(f'Finished, took {time_end - time_start}')\n campaign.collate()\n campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler,\n qoi_cols=['T']))\n results = campaign.get_last_analysis()\n state_filename = os.path.join(campaign.campaign_dir, 'campaign_state.json')\n campaign.save_state(state_filename)\n plt.figure()\n results.plot_moments('T', xlabel='$\\\\rho$', filename=\n f'{campaign.campaign_dir}/moments.png')\n plt.figure()\n results.plot_sobols_first('T', xlabel='$\\\\rho$', filename=\n f'{campaign.campaign_dir}/sobols_first.png')\n",
"step-3": "import argparse\nimport boutvecma\nimport easyvvuq as uq\nimport chaospy\nimport os\nimport numpy as np\nimport time\nfrom dask.distributed import Client\nfrom dask_jobqueue import SLURMCluster\nimport matplotlib.pyplot as plt\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='EasyVVUQ applied to BOUT++')\n parser.add_argument('--batch', '-b', help=\n 'Run on a batch (SLURM) system', action='store_true', default=False)\n args = parser.parse_args()\n campaign = uq.CampaignDask(name='Conduction.')\n print(f'Running in {campaign.campaign_dir}')\n encoder = boutvecma.BOUTEncoder(template_input=\n 'models/conduction/data/BOUT.inp')\n decoder = boutvecma.BOUTDecoder(variables=['T'])\n params = {'conduction:chi': {'type': 'float', 'min': 0.0, 'max': 1000.0,\n 'default': 1.0}, 'T:scale': {'type': 'float', 'min': 0.0, 'max': \n 1000.0, 'default': 1.0}, 'T:gauss_width': {'type': 'float', 'min': \n 0.0, 'max': 1000.0, 'default': 0.2}, 'T:gauss_centre': {'type':\n 'float', 'min': 0.0, 'max': 2 * np.pi, 'default': np.pi}}\n campaign.add_app('1D_conduction', params=params, encoder=encoder,\n decoder=decoder)\n vary = {'conduction:chi': chaospy.Uniform(0.2, 4.0), 'T:scale': chaospy\n .Uniform(0.5, 1.5), 'T:gauss_width': chaospy.Uniform(0.01, 0.4),\n 'T:gauss_centre': chaospy.Uniform(0.0, 2 * np.pi)}\n sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)\n campaign.set_sampler(sampler)\n campaign.draw_samples()\n run_dirs = campaign.populate_runs_dir()\n print(f'Created run directories: {run_dirs}')\n if args.batch:\n cluster = SLURMCluster(job_extra=['--job-name=VVUQ',\n '--account=PHYS-YPIRSE-2019'], cores=1, memory='1 GB',\n processes=1, walltime='00:10:00', interface='ib0')\n cluster.scale(16)\n print(f'Job script:\\n{cluster.job_script()}')\n client = Client(cluster)\n else:\n client = Client(processes=True, threads_per_worker=1)\n print(client)\n time_start = time.time()\n campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(os.path.abspath\n ('build/models/conduction/conduction -q -q -q -d .')), client)\n client.close()\n time_end = time.time()\n print(f'Finished, took {time_end - time_start}')\n campaign.collate()\n campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler,\n qoi_cols=['T']))\n results = campaign.get_last_analysis()\n state_filename = os.path.join(campaign.campaign_dir, 'campaign_state.json')\n campaign.save_state(state_filename)\n plt.figure()\n results.plot_moments('T', xlabel='$\\\\rho$', filename=\n f'{campaign.campaign_dir}/moments.png')\n plt.figure()\n results.plot_sobols_first('T', xlabel='$\\\\rho$', filename=\n f'{campaign.campaign_dir}/sobols_first.png')\n",
"step-4": "#!/usr/bin/env python3\n\nimport argparse\nimport boutvecma\nimport easyvvuq as uq\nimport chaospy\nimport os\nimport numpy as np\nimport time\nfrom dask.distributed import Client\nfrom dask_jobqueue import SLURMCluster\nimport matplotlib.pyplot as plt\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"EasyVVUQ applied to BOUT++\")\n parser.add_argument(\n \"--batch\",\n \"-b\",\n help=\"Run on a batch (SLURM) system\",\n action=\"store_true\",\n default=False,\n )\n args = parser.parse_args()\n\n campaign = uq.CampaignDask(name=\"Conduction.\")\n print(f\"Running in {campaign.campaign_dir}\")\n encoder = boutvecma.BOUTEncoder(template_input=\"models/conduction/data/BOUT.inp\")\n decoder = boutvecma.BOUTDecoder(variables=[\"T\"])\n params = {\n \"conduction:chi\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 1.0},\n \"T:scale\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 1.0},\n \"T:gauss_width\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 0.2},\n \"T:gauss_centre\": {\n \"type\": \"float\",\n \"min\": 0.0,\n \"max\": 2 * np.pi,\n \"default\": np.pi,\n },\n }\n\n campaign.add_app(\"1D_conduction\", params=params, encoder=encoder, decoder=decoder)\n\n vary = {\n \"conduction:chi\": chaospy.Uniform(0.2, 4.0),\n \"T:scale\": chaospy.Uniform(0.5, 1.5),\n \"T:gauss_width\": chaospy.Uniform(0.01, 0.4),\n \"T:gauss_centre\": chaospy.Uniform(0.0, 2 * np.pi),\n }\n\n sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)\n campaign.set_sampler(sampler)\n\n campaign.draw_samples()\n\n run_dirs = campaign.populate_runs_dir()\n\n print(f\"Created run directories: {run_dirs}\")\n\n if args.batch:\n # Example of use on Viking\n cluster = SLURMCluster(\n job_extra=[\n \"--job-name=VVUQ\",\n \"--account=PHYS-YPIRSE-2019\",\n ],\n cores=1,\n memory=\"1 GB\",\n processes=1,\n walltime=\"00:10:00\",\n interface=\"ib0\",\n )\n cluster.scale(16)\n print(f\"Job script:\\n{cluster.job_script()}\")\n client = Client(cluster)\n else:\n client = Client(processes=True, threads_per_worker=1)\n\n print(client)\n\n time_start = time.time()\n campaign.apply_for_each_run_dir(\n uq.actions.ExecuteLocal(\n os.path.abspath(\"build/models/conduction/conduction -q -q -q -d .\")\n ),\n client,\n )\n client.close()\n\n time_end = time.time()\n\n print(f\"Finished, took {time_end - time_start}\")\n\n campaign.collate()\n\n campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler, qoi_cols=[\"T\"]))\n\n results = campaign.get_last_analysis()\n\n state_filename = os.path.join(campaign.campaign_dir, \"campaign_state.json\")\n campaign.save_state(state_filename)\n\n plt.figure()\n results.plot_moments(\n \"T\", xlabel=r\"$\\rho$\", filename=f\"{campaign.campaign_dir}/moments.png\"\n )\n plt.figure()\n results.plot_sobols_first(\n \"T\", xlabel=r\"$\\rho$\", filename=f\"{campaign.campaign_dir}/sobols_first.png\"\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
from sklearn.svm import SVR
# In[2]:
from sklearn.preprocessing import StandardScaler
# In[3]:
#import matplotlib.pyplot as plt
# %matplotlib inline
# In[90]:
aapl = pd.read_csv('return_fcast.csv')
# In[79]:
y = aapl['return']
# In[80]:
X = aapl[['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI']]
# In[14]:
X = X.reshape((2475,8))
# In[21]:
y = np.array(y).reshape((2475,1))
# In[22]:
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
# In[25]:
regressor = SVR(kernel='rbf')
regressor.fit(X,y)
# In[27]:
testing_df = pd.read_csv('testing.csv')
# In[28]:
X_test = testing_df[['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI']]
# In[29]:
X_test.shape
# In[33]:
X_test = sc_X.fit_transform(X_test)
# In[34]:
# In[35]:
y_pred
# In[36]:
y_pred = regressor.predict(X_test)
y_pred = sc_y.inverse_transform(y_pred)
# In[37]:
y_pred
# In[38]:
for i in range(len(y_pred)):
print(y_pred[i])
# In[3]:
# run SVR for the AXP-...DD stocks
axp = pd.DataFrame(columns=aapl.columns)
# In[3]:
#stocks = ['AAPL','AXP','BA','CAT','CSCO','CVX','DIS','DD','GS']
stocks = ['MCD']
# In[4]:
# read indicators 09-18
ADXR = pd.read_csv('data/djADXR.csv')
ATR = pd.read_csv('data/djATR.csv')
SMA = pd.read_csv('data/sma.csv')
Hurst = pd.read_csv('data/hurst.csv')
EMA = pd.read_csv('data/ema.csv')
MACD = pd.read_csv('data/macd.csv')
VIX = pd.read_csv('data/vix.csv')
RSI = pd.read_csv('data/rsi.csv')
# In[5]:
VIX.iloc[40:2476]
# In[121]:
# read stock prices 09-18
# In[3]:
dj_df = pd.read_csv('data/djindex.csv')
# In[7]:
dj_df = pd.read_csv('data/djindex.csv')
#dj_df = dj_df[['Date','AAPL','AXP','BA','CAT','CSCO','CVX','DIS','DD','GS']].iloc[39:2516]
#dj_df = dj_df[['Date','MCD']].iloc[39:2516]
return_df = pd.DataFrame(columns=dj_df.columns[1:],index=dj_df['Date'])
for i in dj_df.columns[1:]:
return_df[i] = list(np.log(dj_df[i]/dj_df[i].shift(1)))
# In[9]:
return_df = return_df.dropna()
# In[10]:
cov = return_df.cov()
cov.to_csv('cov0918.csv')
# In[41]:
list(dj_df.columns[1:])
# In[39]:
dj_df.index = dj_df
# In[65]:
stock18 = dj_df[dj_df.columns[1:]].iloc[-252:]
# In[67]:
stock18
# In[44]:
dj_df
# In[68]:
dj_df = pd.read_csv('data/djindex.csv')
dj_df.index = dj_df['Date']
return_df = pd.DataFrame(columns=list(dj_df.columns[1:]))
for i in dj_df.columns[1:]:
return_df[i] = list(np.log(stock18[i]/stock18[i].shift(1)))
# In[69]:
return_df = return_df.dropna()
# In[70]:
return_df
# In[71]:
cov = return_df.cov()
# In[72]:
cov
# In[73]:
cov.to_csv('cov.csv')
# In[10]:
# store return prediction
result = pd.DataFrame(columns=stocks)
# In[21]:
# indicators forecast
ADXR_f = pd.read_csv('data/tesingadxr740.csv')
ATR_f = pd.read_csv('data/tesingatr740.csv')
SMA_f = pd.read_csv('data/sma_forecast.csv')
Hurst_f = pd.read_csv('data/hurst_forecast.csv')
EMA_f = pd.read_csv('data/ema_fcast.csv')
MACD_f = pd.read_csv('data/macd_fcast.csv')
VIX_f = pd.read_csv('data/vix_fcast.csv')
RSI_f = pd.read_csv('data/rsi_fcast.csv')
# In[22]:
# Initialized scaler in order to transform variables into (-1,1)
sc_X = StandardScaler()
sc_y = StandardScaler()
regressor = SVR(kernel='rbf')
temp = pd.DataFrame(columns=['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI'])
temp['VIX'] = list(VIX['VIX'].iloc[40:2516]) # all stocks share the same vix
temp_f = pd.DataFrame(columns=['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI'])
temp_f['VIX'] = list(VIX_f['VIX Forecast'].iloc[0:250])
for i in ['MCD']: # iterate each stock
# First, extract training data set, including indicators(X) and return(y)
temp['ADXR'] = list(ADXR[i].iloc[40:2516])
temp['ATR'] = list(ATR[i].iloc[40:2516])
temp['SMA'] = list(SMA['SMA_'+i].iloc[40:2516])
temp['Hurst'] = list(Hurst['Hurst'+i].iloc[40:2516])
temp['EMA'] = list(EMA[i].iloc[40:2516])
temp['MACD'] = list(MACD[i].iloc[40:2516])
temp['RSI'] = list(RSI[i].iloc[40:2516])
# transformation
X = sc_X.fit_transform(temp[['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI']])
#print(X.shape)
y = sc_y.fit_transform(np.array(return_df[i].dropna()).reshape(2476,1))
#print(y.shape)
# training
regressor.fit(X,y)
# predicting
temp_f['ADXR'] = list(ADXR_f[i+'.1976.10.11.20.00.00'].iloc[0:250])
temp_f['ATR'] = list(ATR_f[i+'.1976.11.07.19.00.00'].iloc[0:250])
temp_f['SMA'] = list(SMA_f[i].iloc[0:250])
temp_f['Hurst'] = list(Hurst_f[i].iloc[0:250])
temp_f['EMA'] = list(EMA_f[i].iloc[0:250])
temp_f['MACD'] = list(MACD_f[i].iloc[0:250])
temp_f['RSI'] = list(RSI_f[i].iloc[0:250])
X_test = temp_f[['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI']]
X_test = sc_X.fit_transform(X_test)
y_pred = regressor.predict(X_test)
y_pred = sc_y.inverse_transform(y_pred)
# write predicted returns into result
result[i] = y_pred
print(i)
# In[23]:
result.to_csv('mac_fcast.csv')
# In[161]:
ADXR['AAPL'].iloc[40:2516]
# In[ ]:
|
normal
|
{
"blob_id": "4a8d203872a1e86c54142dea6cd04c1cac6bcfb2",
"index": 5067,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nregressor.fit(X, y)\n<mask token>\nX_test.shape\n<mask token>\ny_pred\n<mask token>\ny_pred\nfor i in range(len(y_pred)):\n print(y_pred[i])\n<mask token>\nVIX.iloc[40:2476]\n<mask token>\nfor i in dj_df.columns[1:]:\n return_df[i] = list(np.log(dj_df[i] / dj_df[i].shift(1)))\n<mask token>\ncov.to_csv('cov0918.csv')\nlist(dj_df.columns[1:])\n<mask token>\nstock18\ndj_df\n<mask token>\nfor i in dj_df.columns[1:]:\n return_df[i] = list(np.log(stock18[i] / stock18[i].shift(1)))\n<mask token>\nreturn_df\n<mask token>\ncov\ncov.to_csv('cov.csv')\n<mask token>\nfor i in ['MCD']:\n temp['ADXR'] = list(ADXR[i].iloc[40:2516])\n temp['ATR'] = list(ATR[i].iloc[40:2516])\n temp['SMA'] = list(SMA['SMA_' + i].iloc[40:2516])\n temp['Hurst'] = list(Hurst['Hurst' + i].iloc[40:2516])\n temp['EMA'] = list(EMA[i].iloc[40:2516])\n temp['MACD'] = list(MACD[i].iloc[40:2516])\n temp['RSI'] = list(RSI[i].iloc[40:2516])\n X = sc_X.fit_transform(temp[['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA',\n 'MACD', 'VIX', 'RSI']])\n y = sc_y.fit_transform(np.array(return_df[i].dropna()).reshape(2476, 1))\n regressor.fit(X, y)\n temp_f['ADXR'] = list(ADXR_f[i + '.1976.10.11.20.00.00'].iloc[0:250])\n temp_f['ATR'] = list(ATR_f[i + '.1976.11.07.19.00.00'].iloc[0:250])\n temp_f['SMA'] = list(SMA_f[i].iloc[0:250])\n temp_f['Hurst'] = list(Hurst_f[i].iloc[0:250])\n temp_f['EMA'] = list(EMA_f[i].iloc[0:250])\n temp_f['MACD'] = list(MACD_f[i].iloc[0:250])\n temp_f['RSI'] = list(RSI_f[i].iloc[0:250])\n X_test = temp_f[['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA', 'MACD', 'VIX',\n 'RSI']]\n X_test = sc_X.fit_transform(X_test)\n y_pred = regressor.predict(X_test)\n y_pred = sc_y.inverse_transform(y_pred)\n result[i] = y_pred\n print(i)\nresult.to_csv('mac_fcast.csv')\nADXR['AAPL'].iloc[40:2516]\n",
"step-3": "<mask token>\naapl = pd.read_csv('return_fcast.csv')\ny = aapl['return']\nX = aapl[['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA', 'MACD', 'VIX', 'RSI']]\nX = X.reshape((2475, 8))\ny = np.array(y).reshape((2475, 1))\nsc_X = StandardScaler()\nsc_y = StandardScaler()\nX = sc_X.fit_transform(X)\ny = sc_y.fit_transform(y)\nregressor = SVR(kernel='rbf')\nregressor.fit(X, y)\ntesting_df = pd.read_csv('testing.csv')\nX_test = testing_df[['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA', 'MACD', 'VIX',\n 'RSI']]\nX_test.shape\nX_test = sc_X.fit_transform(X_test)\ny_pred\ny_pred = regressor.predict(X_test)\ny_pred = sc_y.inverse_transform(y_pred)\ny_pred\nfor i in range(len(y_pred)):\n print(y_pred[i])\naxp = pd.DataFrame(columns=aapl.columns)\nstocks = ['MCD']\nADXR = pd.read_csv('data/djADXR.csv')\nATR = pd.read_csv('data/djATR.csv')\nSMA = pd.read_csv('data/sma.csv')\nHurst = pd.read_csv('data/hurst.csv')\nEMA = pd.read_csv('data/ema.csv')\nMACD = pd.read_csv('data/macd.csv')\nVIX = pd.read_csv('data/vix.csv')\nRSI = pd.read_csv('data/rsi.csv')\nVIX.iloc[40:2476]\ndj_df = pd.read_csv('data/djindex.csv')\ndj_df = pd.read_csv('data/djindex.csv')\nreturn_df = pd.DataFrame(columns=dj_df.columns[1:], index=dj_df['Date'])\nfor i in dj_df.columns[1:]:\n return_df[i] = list(np.log(dj_df[i] / dj_df[i].shift(1)))\nreturn_df = return_df.dropna()\ncov = return_df.cov()\ncov.to_csv('cov0918.csv')\nlist(dj_df.columns[1:])\ndj_df.index = dj_df\nstock18 = dj_df[dj_df.columns[1:]].iloc[-252:]\nstock18\ndj_df\ndj_df = pd.read_csv('data/djindex.csv')\ndj_df.index = dj_df['Date']\nreturn_df = pd.DataFrame(columns=list(dj_df.columns[1:]))\nfor i in dj_df.columns[1:]:\n return_df[i] = list(np.log(stock18[i] / stock18[i].shift(1)))\nreturn_df = return_df.dropna()\nreturn_df\ncov = return_df.cov()\ncov\ncov.to_csv('cov.csv')\nresult = pd.DataFrame(columns=stocks)\nADXR_f = pd.read_csv('data/tesingadxr740.csv')\nATR_f = pd.read_csv('data/tesingatr740.csv')\nSMA_f = pd.read_csv('data/sma_forecast.csv')\nHurst_f = pd.read_csv('data/hurst_forecast.csv')\nEMA_f = pd.read_csv('data/ema_fcast.csv')\nMACD_f = pd.read_csv('data/macd_fcast.csv')\nVIX_f = pd.read_csv('data/vix_fcast.csv')\nRSI_f = pd.read_csv('data/rsi_fcast.csv')\nsc_X = StandardScaler()\nsc_y = StandardScaler()\nregressor = SVR(kernel='rbf')\ntemp = pd.DataFrame(columns=['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA', 'MACD',\n 'VIX', 'RSI'])\ntemp['VIX'] = list(VIX['VIX'].iloc[40:2516])\ntemp_f = pd.DataFrame(columns=['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA', 'MACD',\n 'VIX', 'RSI'])\ntemp_f['VIX'] = list(VIX_f['VIX Forecast'].iloc[0:250])\nfor i in ['MCD']:\n temp['ADXR'] = list(ADXR[i].iloc[40:2516])\n temp['ATR'] = list(ATR[i].iloc[40:2516])\n temp['SMA'] = list(SMA['SMA_' + i].iloc[40:2516])\n temp['Hurst'] = list(Hurst['Hurst' + i].iloc[40:2516])\n temp['EMA'] = list(EMA[i].iloc[40:2516])\n temp['MACD'] = list(MACD[i].iloc[40:2516])\n temp['RSI'] = list(RSI[i].iloc[40:2516])\n X = sc_X.fit_transform(temp[['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA',\n 'MACD', 'VIX', 'RSI']])\n y = sc_y.fit_transform(np.array(return_df[i].dropna()).reshape(2476, 1))\n regressor.fit(X, y)\n temp_f['ADXR'] = list(ADXR_f[i + '.1976.10.11.20.00.00'].iloc[0:250])\n temp_f['ATR'] = list(ATR_f[i + '.1976.11.07.19.00.00'].iloc[0:250])\n temp_f['SMA'] = list(SMA_f[i].iloc[0:250])\n temp_f['Hurst'] = list(Hurst_f[i].iloc[0:250])\n temp_f['EMA'] = list(EMA_f[i].iloc[0:250])\n temp_f['MACD'] = list(MACD_f[i].iloc[0:250])\n temp_f['RSI'] = list(RSI_f[i].iloc[0:250])\n X_test = temp_f[['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA', 'MACD', 'VIX',\n 'RSI']]\n X_test = sc_X.fit_transform(X_test)\n y_pred = regressor.predict(X_test)\n y_pred = sc_y.inverse_transform(y_pred)\n result[i] = y_pred\n print(i)\nresult.to_csv('mac_fcast.csv')\nADXR['AAPL'].iloc[40:2516]\n",
"step-4": "import numpy as np\nimport pandas as pd\nfrom sklearn.svm import SVR\nfrom sklearn.preprocessing import StandardScaler\naapl = pd.read_csv('return_fcast.csv')\ny = aapl['return']\nX = aapl[['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA', 'MACD', 'VIX', 'RSI']]\nX = X.reshape((2475, 8))\ny = np.array(y).reshape((2475, 1))\nsc_X = StandardScaler()\nsc_y = StandardScaler()\nX = sc_X.fit_transform(X)\ny = sc_y.fit_transform(y)\nregressor = SVR(kernel='rbf')\nregressor.fit(X, y)\ntesting_df = pd.read_csv('testing.csv')\nX_test = testing_df[['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA', 'MACD', 'VIX',\n 'RSI']]\nX_test.shape\nX_test = sc_X.fit_transform(X_test)\ny_pred\ny_pred = regressor.predict(X_test)\ny_pred = sc_y.inverse_transform(y_pred)\ny_pred\nfor i in range(len(y_pred)):\n print(y_pred[i])\naxp = pd.DataFrame(columns=aapl.columns)\nstocks = ['MCD']\nADXR = pd.read_csv('data/djADXR.csv')\nATR = pd.read_csv('data/djATR.csv')\nSMA = pd.read_csv('data/sma.csv')\nHurst = pd.read_csv('data/hurst.csv')\nEMA = pd.read_csv('data/ema.csv')\nMACD = pd.read_csv('data/macd.csv')\nVIX = pd.read_csv('data/vix.csv')\nRSI = pd.read_csv('data/rsi.csv')\nVIX.iloc[40:2476]\ndj_df = pd.read_csv('data/djindex.csv')\ndj_df = pd.read_csv('data/djindex.csv')\nreturn_df = pd.DataFrame(columns=dj_df.columns[1:], index=dj_df['Date'])\nfor i in dj_df.columns[1:]:\n return_df[i] = list(np.log(dj_df[i] / dj_df[i].shift(1)))\nreturn_df = return_df.dropna()\ncov = return_df.cov()\ncov.to_csv('cov0918.csv')\nlist(dj_df.columns[1:])\ndj_df.index = dj_df\nstock18 = dj_df[dj_df.columns[1:]].iloc[-252:]\nstock18\ndj_df\ndj_df = pd.read_csv('data/djindex.csv')\ndj_df.index = dj_df['Date']\nreturn_df = pd.DataFrame(columns=list(dj_df.columns[1:]))\nfor i in dj_df.columns[1:]:\n return_df[i] = list(np.log(stock18[i] / stock18[i].shift(1)))\nreturn_df = return_df.dropna()\nreturn_df\ncov = return_df.cov()\ncov\ncov.to_csv('cov.csv')\nresult = pd.DataFrame(columns=stocks)\nADXR_f = pd.read_csv('data/tesingadxr740.csv')\nATR_f = pd.read_csv('data/tesingatr740.csv')\nSMA_f = pd.read_csv('data/sma_forecast.csv')\nHurst_f = pd.read_csv('data/hurst_forecast.csv')\nEMA_f = pd.read_csv('data/ema_fcast.csv')\nMACD_f = pd.read_csv('data/macd_fcast.csv')\nVIX_f = pd.read_csv('data/vix_fcast.csv')\nRSI_f = pd.read_csv('data/rsi_fcast.csv')\nsc_X = StandardScaler()\nsc_y = StandardScaler()\nregressor = SVR(kernel='rbf')\ntemp = pd.DataFrame(columns=['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA', 'MACD',\n 'VIX', 'RSI'])\ntemp['VIX'] = list(VIX['VIX'].iloc[40:2516])\ntemp_f = pd.DataFrame(columns=['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA', 'MACD',\n 'VIX', 'RSI'])\ntemp_f['VIX'] = list(VIX_f['VIX Forecast'].iloc[0:250])\nfor i in ['MCD']:\n temp['ADXR'] = list(ADXR[i].iloc[40:2516])\n temp['ATR'] = list(ATR[i].iloc[40:2516])\n temp['SMA'] = list(SMA['SMA_' + i].iloc[40:2516])\n temp['Hurst'] = list(Hurst['Hurst' + i].iloc[40:2516])\n temp['EMA'] = list(EMA[i].iloc[40:2516])\n temp['MACD'] = list(MACD[i].iloc[40:2516])\n temp['RSI'] = list(RSI[i].iloc[40:2516])\n X = sc_X.fit_transform(temp[['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA',\n 'MACD', 'VIX', 'RSI']])\n y = sc_y.fit_transform(np.array(return_df[i].dropna()).reshape(2476, 1))\n regressor.fit(X, y)\n temp_f['ADXR'] = list(ADXR_f[i + '.1976.10.11.20.00.00'].iloc[0:250])\n temp_f['ATR'] = list(ATR_f[i + '.1976.11.07.19.00.00'].iloc[0:250])\n temp_f['SMA'] = list(SMA_f[i].iloc[0:250])\n temp_f['Hurst'] = list(Hurst_f[i].iloc[0:250])\n temp_f['EMA'] = list(EMA_f[i].iloc[0:250])\n temp_f['MACD'] = list(MACD_f[i].iloc[0:250])\n temp_f['RSI'] = list(RSI_f[i].iloc[0:250])\n X_test = temp_f[['ADXR', 'ATR', 'SMA', 'Hurst', 'EMA', 'MACD', 'VIX',\n 'RSI']]\n X_test = sc_X.fit_transform(X_test)\n y_pred = regressor.predict(X_test)\n y_pred = sc_y.inverse_transform(y_pred)\n result[i] = y_pred\n print(i)\nresult.to_csv('mac_fcast.csv')\nADXR['AAPL'].iloc[40:2516]\n",
"step-5": "\n# coding: utf-8\n\n# In[1]:\n\nimport numpy as np\n\nimport pandas as pd\nfrom sklearn.svm import SVR\n\n\n# In[2]:\n\nfrom sklearn.preprocessing import StandardScaler\n\n\n# In[3]:\n\n#import matplotlib.pyplot as plt\n# %matplotlib inline\n\n\n# In[90]:\n\naapl = pd.read_csv('return_fcast.csv')\n\n\n# In[79]:\n\ny = aapl['return']\n\n\n# In[80]:\n\nX = aapl[['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI']]\n\n\n# In[14]:\n\nX = X.reshape((2475,8))\n\n\n# In[21]:\n\ny = np.array(y).reshape((2475,1))\n\n\n# In[22]:\n\nsc_X = StandardScaler()\nsc_y = StandardScaler()\nX = sc_X.fit_transform(X)\ny = sc_y.fit_transform(y)\n\n\n# In[25]:\n\nregressor = SVR(kernel='rbf')\nregressor.fit(X,y)\n\n\n# In[27]:\n\ntesting_df = pd.read_csv('testing.csv')\n\n\n# In[28]:\n\nX_test = testing_df[['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI']]\n\n\n# In[29]:\n\nX_test.shape\n\n\n# In[33]:\n\nX_test = sc_X.fit_transform(X_test)\n\n\n# In[34]:\n\n\n\n\n# In[35]:\n\ny_pred\n\n\n# In[36]:\n\ny_pred = regressor.predict(X_test)\ny_pred = sc_y.inverse_transform(y_pred)\n\n\n# In[37]:\n\ny_pred\n\n\n# In[38]:\n\nfor i in range(len(y_pred)):\n print(y_pred[i])\n\n\n# In[3]:\n\n# run SVR for the AXP-...DD stocks\naxp = pd.DataFrame(columns=aapl.columns)\n\n\n# In[3]:\n\n#stocks = ['AAPL','AXP','BA','CAT','CSCO','CVX','DIS','DD','GS']\nstocks = ['MCD']\n\n\n# In[4]:\n\n# read indicators 09-18\nADXR = pd.read_csv('data/djADXR.csv')\nATR = pd.read_csv('data/djATR.csv')\nSMA = pd.read_csv('data/sma.csv')\nHurst = pd.read_csv('data/hurst.csv')\nEMA = pd.read_csv('data/ema.csv')\nMACD = pd.read_csv('data/macd.csv')\nVIX = pd.read_csv('data/vix.csv')\nRSI = pd.read_csv('data/rsi.csv')\n\n\n# In[5]:\n\nVIX.iloc[40:2476]\n\n\n# In[121]:\n\n# read stock prices 09-18\n\n\n# In[3]:\n\ndj_df = pd.read_csv('data/djindex.csv')\n\n\n# In[7]:\n\ndj_df = pd.read_csv('data/djindex.csv')\n#dj_df = dj_df[['Date','AAPL','AXP','BA','CAT','CSCO','CVX','DIS','DD','GS']].iloc[39:2516]\n#dj_df = dj_df[['Date','MCD']].iloc[39:2516]\n\nreturn_df = pd.DataFrame(columns=dj_df.columns[1:],index=dj_df['Date'])\n\nfor i in dj_df.columns[1:]:\n return_df[i] = list(np.log(dj_df[i]/dj_df[i].shift(1)))\n\n\n# In[9]:\n\nreturn_df = return_df.dropna()\n\n\n# In[10]:\n\ncov = return_df.cov()\ncov.to_csv('cov0918.csv')\n\n\n# In[41]:\n\nlist(dj_df.columns[1:])\n\n\n# In[39]:\n\ndj_df.index = dj_df\n\n\n# In[65]:\n\nstock18 = dj_df[dj_df.columns[1:]].iloc[-252:]\n\n\n# In[67]:\n\nstock18\n\n\n# In[44]:\n\ndj_df\n\n\n# In[68]:\n\ndj_df = pd.read_csv('data/djindex.csv')\ndj_df.index = dj_df['Date']\n\nreturn_df = pd.DataFrame(columns=list(dj_df.columns[1:]))\n\nfor i in dj_df.columns[1:]:\n return_df[i] = list(np.log(stock18[i]/stock18[i].shift(1)))\n\n\n# In[69]:\n\nreturn_df = return_df.dropna()\n\n\n# In[70]:\n\nreturn_df\n\n\n# In[71]:\n\ncov = return_df.cov()\n\n\n# In[72]:\n\ncov\n\n\n# In[73]:\n\ncov.to_csv('cov.csv')\n\n\n# In[10]:\n\n# store return prediction\nresult = pd.DataFrame(columns=stocks)\n\n\n# In[21]:\n\n# indicators forecast\nADXR_f = pd.read_csv('data/tesingadxr740.csv')\nATR_f = pd.read_csv('data/tesingatr740.csv')\nSMA_f = pd.read_csv('data/sma_forecast.csv')\nHurst_f = pd.read_csv('data/hurst_forecast.csv')\nEMA_f = pd.read_csv('data/ema_fcast.csv')\nMACD_f = pd.read_csv('data/macd_fcast.csv')\nVIX_f = pd.read_csv('data/vix_fcast.csv')\nRSI_f = pd.read_csv('data/rsi_fcast.csv')\n\n\n# In[22]:\n\n# Initialized scaler in order to transform variables into (-1,1)\nsc_X = StandardScaler()\nsc_y = StandardScaler()\nregressor = SVR(kernel='rbf')\n\ntemp = pd.DataFrame(columns=['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI'])\ntemp['VIX'] = list(VIX['VIX'].iloc[40:2516]) # all stocks share the same vix\n\ntemp_f = pd.DataFrame(columns=['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI'])\ntemp_f['VIX'] = list(VIX_f['VIX Forecast'].iloc[0:250])\n\nfor i in ['MCD']: # iterate each stock\n \n # First, extract training data set, including indicators(X) and return(y) \n temp['ADXR'] = list(ADXR[i].iloc[40:2516])\n temp['ATR'] = list(ATR[i].iloc[40:2516])\n temp['SMA'] = list(SMA['SMA_'+i].iloc[40:2516])\n temp['Hurst'] = list(Hurst['Hurst'+i].iloc[40:2516])\n temp['EMA'] = list(EMA[i].iloc[40:2516])\n temp['MACD'] = list(MACD[i].iloc[40:2516])\n temp['RSI'] = list(RSI[i].iloc[40:2516])\n # transformation\n X = sc_X.fit_transform(temp[['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI']])\n #print(X.shape)\n y = sc_y.fit_transform(np.array(return_df[i].dropna()).reshape(2476,1))\n #print(y.shape)\n # training \n regressor.fit(X,y)\n # predicting\n temp_f['ADXR'] = list(ADXR_f[i+'.1976.10.11.20.00.00'].iloc[0:250])\n temp_f['ATR'] = list(ATR_f[i+'.1976.11.07.19.00.00'].iloc[0:250])\n temp_f['SMA'] = list(SMA_f[i].iloc[0:250])\n temp_f['Hurst'] = list(Hurst_f[i].iloc[0:250])\n temp_f['EMA'] = list(EMA_f[i].iloc[0:250])\n temp_f['MACD'] = list(MACD_f[i].iloc[0:250])\n temp_f['RSI'] = list(RSI_f[i].iloc[0:250])\n X_test = temp_f[['ADXR','ATR','SMA','Hurst','EMA','MACD','VIX','RSI']]\n X_test = sc_X.fit_transform(X_test)\n y_pred = regressor.predict(X_test)\n y_pred = sc_y.inverse_transform(y_pred)\n # write predicted returns into result\n result[i] = y_pred\n print(i)\n\n\n# In[23]:\n\nresult.to_csv('mac_fcast.csv')\n\n\n# In[161]:\n\nADXR['AAPL'].iloc[40:2516]\n\n\n# In[ ]:\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render, HttpResponse, redirect
from ..login.models import *
from ..dashboard.models import *
def display(request, id):
context = {'job': Job.objects.get(id=int(id))}
return render(request, 'handy_helper_exam/display.html', context)
|
normal
|
{
"blob_id": "f1fdba1c07a29aa22ee8d0dcbd6f902aa2e8b4c2",
"index": 9342,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef display(request, id):\n context = {'job': Job.objects.get(id=int(id))}\n return render(request, 'handy_helper_exam/display.html', context)\n",
"step-3": "from django.shortcuts import render, HttpResponse, redirect\nfrom ..login.models import *\nfrom ..dashboard.models import *\n\n\ndef display(request, id):\n context = {'job': Job.objects.get(id=int(id))}\n return render(request, 'handy_helper_exam/display.html', context)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 3 16:04:19 2018
@author: khanhle
"""
# Create first network with Keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Activation
from keras.utils import np_utils
from keras.layers.convolutional import Convolution2D, ZeroPadding2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Dropout, Flatten
from keras.callbacks import ModelCheckpoint
import numpy as np
from sklearn.metrics import confusion_matrix
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
print(__doc__)
import h5py
import os
import sys
from keras.models import model_from_json
#define params
trn_file = sys.argv[1]
tst_file = sys.argv[2]
json_file = sys.argv[3]
h5_file = sys.argv[4]
nb_classes = 2
nb_kernels = 3
nb_pools = 2
window_sizes = 19
# load training dataset
dataset = np.loadtxt(trn_file, delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:window_sizes*20].reshape(len(dataset),1,20,window_sizes)
Y = dataset[:,window_sizes*20]
Y = np_utils.to_categorical(Y,nb_classes)
#print X,Y
#nb_classes = Y.shape[1]
#print nb_classes
# load testing dataset
dataset1 = np.loadtxt(tst_file, delimiter=",")
# split into input (X) and output (Y) variables
X1 = dataset1[:,0:window_sizes*20].reshape(len(dataset1),1,20,window_sizes)
Y1 = dataset1[:,window_sizes*20]
true_labels = np.asarray(Y1)
Y1 = np_utils.to_categorical(Y1,nb_classes)
#print('label : ', Y[i,:])
def cnn_model():
model = Sequential()
model.add(ZeroPadding2D((1,1), input_shape = (1,20,window_sizes)))
model.add(Convolution2D(32, nb_kernels, nb_kernels))
model.add(Activation('relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
# model.add(ZeroPadding2D((1,1)))
# model.add(Convolution2D(32, nb_kernels, nb_kernels, activation='relu'))
# model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))
# model.add(Activation('relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))
model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
# model.add(ZeroPadding2D((1,1)))
# model.add(Convolution2D(256, nb_kernels, nb_kernels, activation='relu'))
# model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering="th"))
## add the model on top of the convolutional base
#model.add(top_model)
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(128))
#model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(nb_classes))
#model.add(BatchNormalization())
model.add(Activation('softmax'))
# f = open('model_summary.txt','w')
# f.write(str(model.summary()))
# f.close()
#model.compile(loss='categorical_crossentropy', optimizer='adadelta')
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
#plot_filters(model.layers[0],32,1)
# Fit the model
# save best weights
model = cnn_model()
#plot_model(model, to_file='model.png')
filepath = "weights.best.hdf5"
checkpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True)
# balance data
model.fit(X, Y, nb_epoch=150, batch_size=10, class_weight = 'auto', validation_data=(X1,Y1), callbacks=[checkpointer])
## evaluate the model
scores = model.evaluate(X1, Y1)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
model.load_weights(filepath)
predictions = model.predict_classes(X1)
print(confusion_matrix(true_labels, predictions))
#serialize model to JSON
model_json = model.to_json()
with open(json_file, "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(h5_file)
print("Saved model to disk")
|
normal
|
{
"blob_id": "721f23d2b6109194b8bca54b1cd04263e30cdf24",
"index": 3964,
"step-1": "<mask token>\n\n\ndef cnn_model():\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics\n =['accuracy'])\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(seed)\nprint(__doc__)\n<mask token>\n\n\ndef cnn_model():\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics\n =['accuracy'])\n return model\n\n\n<mask token>\nmodel.fit(X, Y, nb_epoch=150, batch_size=10, class_weight='auto',\n validation_data=(X1, Y1), callbacks=[checkpointer])\n<mask token>\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\nmodel.load_weights(filepath)\n<mask token>\nprint(confusion_matrix(true_labels, predictions))\n<mask token>\nwith open(json_file, 'w') as json_file:\n json_file.write(model_json)\nmodel.save_weights(h5_file)\nprint('Saved model to disk')\n",
"step-3": "<mask token>\nseed = 7\nnp.random.seed(seed)\nprint(__doc__)\n<mask token>\ntrn_file = sys.argv[1]\ntst_file = sys.argv[2]\njson_file = sys.argv[3]\nh5_file = sys.argv[4]\nnb_classes = 2\nnb_kernels = 3\nnb_pools = 2\nwindow_sizes = 19\ndataset = np.loadtxt(trn_file, delimiter=',')\nX = dataset[:, 0:window_sizes * 20].reshape(len(dataset), 1, 20, window_sizes)\nY = dataset[:, window_sizes * 20]\nY = np_utils.to_categorical(Y, nb_classes)\ndataset1 = np.loadtxt(tst_file, delimiter=',')\nX1 = dataset1[:, 0:window_sizes * 20].reshape(len(dataset1), 1, 20,\n window_sizes)\nY1 = dataset1[:, window_sizes * 20]\ntrue_labels = np.asarray(Y1)\nY1 = np_utils.to_categorical(Y1, nb_classes)\n\n\ndef cnn_model():\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics\n =['accuracy'])\n return model\n\n\nmodel = cnn_model()\nfilepath = 'weights.best.hdf5'\ncheckpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,\n save_best_only=True, save_weights_only=True)\nmodel.fit(X, Y, nb_epoch=150, batch_size=10, class_weight='auto',\n validation_data=(X1, Y1), callbacks=[checkpointer])\nscores = model.evaluate(X1, Y1)\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\nmodel.load_weights(filepath)\npredictions = model.predict_classes(X1)\nprint(confusion_matrix(true_labels, predictions))\nmodel_json = model.to_json()\nwith open(json_file, 'w') as json_file:\n json_file.write(model_json)\nmodel.save_weights(h5_file)\nprint('Saved model to disk')\n",
"step-4": "<mask token>\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Activation\nfrom keras.utils import np_utils\nfrom keras.layers.convolutional import Convolution2D, ZeroPadding2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers.core import Dropout, Flatten\nfrom keras.callbacks import ModelCheckpoint\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nseed = 7\nnp.random.seed(seed)\nprint(__doc__)\nimport h5py\nimport os\nimport sys\nfrom keras.models import model_from_json\ntrn_file = sys.argv[1]\ntst_file = sys.argv[2]\njson_file = sys.argv[3]\nh5_file = sys.argv[4]\nnb_classes = 2\nnb_kernels = 3\nnb_pools = 2\nwindow_sizes = 19\ndataset = np.loadtxt(trn_file, delimiter=',')\nX = dataset[:, 0:window_sizes * 20].reshape(len(dataset), 1, 20, window_sizes)\nY = dataset[:, window_sizes * 20]\nY = np_utils.to_categorical(Y, nb_classes)\ndataset1 = np.loadtxt(tst_file, delimiter=',')\nX1 = dataset1[:, 0:window_sizes * 20].reshape(len(dataset1), 1, 20,\n window_sizes)\nY1 = dataset1[:, window_sizes * 20]\ntrue_labels = np.asarray(Y1)\nY1 = np_utils.to_categorical(Y1, nb_classes)\n\n\ndef cnn_model():\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(1, 20, window_sizes)))\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering='th'))\n model.add(Flatten())\n model.add(Dropout(0.5))\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics\n =['accuracy'])\n return model\n\n\nmodel = cnn_model()\nfilepath = 'weights.best.hdf5'\ncheckpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,\n save_best_only=True, save_weights_only=True)\nmodel.fit(X, Y, nb_epoch=150, batch_size=10, class_weight='auto',\n validation_data=(X1, Y1), callbacks=[checkpointer])\nscores = model.evaluate(X1, Y1)\nprint('%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))\nmodel.load_weights(filepath)\npredictions = model.predict_classes(X1)\nprint(confusion_matrix(true_labels, predictions))\nmodel_json = model.to_json()\nwith open(json_file, 'w') as json_file:\n json_file.write(model_json)\nmodel.save_weights(h5_file)\nprint('Saved model to disk')\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 3 16:04:19 2018\r\n\r\n@author: khanhle\r\n\"\"\"\r\n\r\n\r\n\r\n# Create first network with Keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Activation\r\nfrom keras.utils import np_utils\r\nfrom keras.layers.convolutional import Convolution2D, ZeroPadding2D\r\nfrom keras.layers.pooling import MaxPooling2D\r\nfrom keras.layers.core import Dropout, Flatten\r\nfrom keras.callbacks import ModelCheckpoint\r\nimport numpy as np\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\n# fix random seed for reproducibility\r\nseed = 7\r\nnp.random.seed(seed)\r\n\r\nprint(__doc__)\r\n\r\nimport h5py\r\nimport os\r\nimport sys\r\nfrom keras.models import model_from_json\r\n\r\n#define params\r\ntrn_file = sys.argv[1]\r\ntst_file = sys.argv[2]\r\njson_file = sys.argv[3]\r\nh5_file = sys.argv[4]\r\n\r\nnb_classes = 2\r\nnb_kernels = 3\r\nnb_pools = 2\r\nwindow_sizes = 19\r\n\r\n# load training dataset\r\ndataset = np.loadtxt(trn_file, delimiter=\",\")\r\n# split into input (X) and output (Y) variables\r\nX = dataset[:,0:window_sizes*20].reshape(len(dataset),1,20,window_sizes)\r\nY = dataset[:,window_sizes*20]\r\n\r\nY = np_utils.to_categorical(Y,nb_classes)\r\n#print X,Y\r\n#nb_classes = Y.shape[1]\r\n#print nb_classes\r\n\r\n# load testing dataset\r\ndataset1 = np.loadtxt(tst_file, delimiter=\",\")\r\n# split into input (X) and output (Y) variables\r\nX1 = dataset1[:,0:window_sizes*20].reshape(len(dataset1),1,20,window_sizes)\r\nY1 = dataset1[:,window_sizes*20]\r\ntrue_labels = np.asarray(Y1)\r\n\r\nY1 = np_utils.to_categorical(Y1,nb_classes)\r\n#print('label : ', Y[i,:])\r\n\r\ndef cnn_model():\r\n model = Sequential()\r\n\r\n model.add(ZeroPadding2D((1,1), input_shape = (1,20,window_sizes)))\r\n model.add(Convolution2D(32, nb_kernels, nb_kernels))\r\n model.add(Activation('relu'))\r\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n # model.add(ZeroPadding2D((1,1)))\r\n # model.add(Convolution2D(32, nb_kernels, nb_kernels, activation='relu'))\r\n # model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(64, nb_kernels, nb_kernels, activation='relu'))\r\n # model.add(Activation('relu'))\r\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n model.add(ZeroPadding2D((1,1)))\r\n model.add(Convolution2D(128, nb_kernels, nb_kernels, activation='relu'))\r\n model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n # model.add(ZeroPadding2D((1,1)))\r\n # model.add(Convolution2D(256, nb_kernels, nb_kernels, activation='relu'))\r\n # model.add(MaxPooling2D(strides=(nb_pools, nb_pools), dim_ordering=\"th\"))\r\n\r\n ## add the model on top of the convolutional base\r\n #model.add(top_model)\r\n model.add(Flatten())\r\n model.add(Dropout(0.5))\r\n model.add(Dense(128))\r\n #model.add(BatchNormalization())\r\n model.add(Activation('relu'))\r\n\r\n model.add(Dense(nb_classes))\r\n #model.add(BatchNormalization())\r\n model.add(Activation('softmax'))\r\n\r\n # f = open('model_summary.txt','w')\r\n # f.write(str(model.summary()))\r\n # f.close()\r\n\r\n #model.compile(loss='categorical_crossentropy', optimizer='adadelta')\r\n # Compile model\r\n model.compile(loss='binary_crossentropy', optimizer='adadelta', metrics=['accuracy'])\r\n return model\r\n\r\n#plot_filters(model.layers[0],32,1)\r\n# Fit the model\r\n# save best weights\r\nmodel = cnn_model()\r\n#plot_model(model, to_file='model.png')\r\n\r\nfilepath = \"weights.best.hdf5\"\r\ncheckpointer = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True)\r\n# balance data\r\nmodel.fit(X, Y, nb_epoch=150, batch_size=10, class_weight = 'auto', validation_data=(X1,Y1), callbacks=[checkpointer])\r\n## evaluate the model\r\nscores = model.evaluate(X1, Y1)\r\nprint(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\r\n\r\nmodel.load_weights(filepath)\r\npredictions = model.predict_classes(X1)\r\n\r\nprint(confusion_matrix(true_labels, predictions))\r\n\r\n#serialize model to JSON\r\nmodel_json = model.to_json()\r\nwith open(json_file, \"w\") as json_file:\r\n json_file.write(model_json)\r\n# serialize weights to HDF5\r\nmodel.save_weights(h5_file)\r\nprint(\"Saved model to disk\")\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!python3
import requests
import time
log_file = open("logfile.txt", "w")
def generateLog(ctime1, request_obj):
log_file.write(ctime1 + "\t")
log_file.write("Status code: " + str(request_obj.status_code))
log_file.write("\n")
def is_internet():
"""Internet function"""
print(time.ctime())
current_time = time.ctime()
try:
r = requests.get("https://www.google.com/") # sends requests to google.com
if r.status_code == 200: # if ok, prints msg
print("Connection established successfully!")
else: # if not ok, prints msg
print("Error, try again")
# generateLog("logfile", current _time, r)
except ConnectionError: # if this error is enountered, it will print this message
print(f"No internet connection, time: {time.ctime()}")
finally:
print("Generating log file...")
# time.sleep(0.25)
generateLog(current_time, r) # calls the generateLog function
print("Exiting the program...")
t = 0
while t < 30:
try:
is_internet()
# time.sleep(10)
except KeyboardInterrupt:
print("Keyboard Interrupt error ")
break
finally:
t += 1
log_file.close()
input()
|
normal
|
{
"blob_id": "f229f525c610d9925c9300ef22208f9926d6cb69",
"index": 9985,
"step-1": "<mask token>\n\n\ndef generateLog(ctime1, request_obj):\n log_file.write(ctime1 + '\\t')\n log_file.write('Status code: ' + str(request_obj.status_code))\n log_file.write('\\n')\n\n\ndef is_internet():\n \"\"\"Internet function\"\"\"\n print(time.ctime())\n current_time = time.ctime()\n try:\n r = requests.get('https://www.google.com/')\n if r.status_code == 200:\n print('Connection established successfully!')\n else:\n print('Error, try again')\n except ConnectionError:\n print(f'No internet connection, time: {time.ctime()}')\n finally:\n print('Generating log file...')\n generateLog(current_time, r)\n print('Exiting the program...')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generateLog(ctime1, request_obj):\n log_file.write(ctime1 + '\\t')\n log_file.write('Status code: ' + str(request_obj.status_code))\n log_file.write('\\n')\n\n\ndef is_internet():\n \"\"\"Internet function\"\"\"\n print(time.ctime())\n current_time = time.ctime()\n try:\n r = requests.get('https://www.google.com/')\n if r.status_code == 200:\n print('Connection established successfully!')\n else:\n print('Error, try again')\n except ConnectionError:\n print(f'No internet connection, time: {time.ctime()}')\n finally:\n print('Generating log file...')\n generateLog(current_time, r)\n print('Exiting the program...')\n\n\n<mask token>\nwhile t < 30:\n try:\n is_internet()\n except KeyboardInterrupt:\n print('Keyboard Interrupt error ')\n break\n finally:\n t += 1\nlog_file.close()\ninput()\n",
"step-3": "<mask token>\nlog_file = open('logfile.txt', 'w')\n\n\ndef generateLog(ctime1, request_obj):\n log_file.write(ctime1 + '\\t')\n log_file.write('Status code: ' + str(request_obj.status_code))\n log_file.write('\\n')\n\n\ndef is_internet():\n \"\"\"Internet function\"\"\"\n print(time.ctime())\n current_time = time.ctime()\n try:\n r = requests.get('https://www.google.com/')\n if r.status_code == 200:\n print('Connection established successfully!')\n else:\n print('Error, try again')\n except ConnectionError:\n print(f'No internet connection, time: {time.ctime()}')\n finally:\n print('Generating log file...')\n generateLog(current_time, r)\n print('Exiting the program...')\n\n\nt = 0\nwhile t < 30:\n try:\n is_internet()\n except KeyboardInterrupt:\n print('Keyboard Interrupt error ')\n break\n finally:\n t += 1\nlog_file.close()\ninput()\n",
"step-4": "import requests\nimport time\nlog_file = open('logfile.txt', 'w')\n\n\ndef generateLog(ctime1, request_obj):\n log_file.write(ctime1 + '\\t')\n log_file.write('Status code: ' + str(request_obj.status_code))\n log_file.write('\\n')\n\n\ndef is_internet():\n \"\"\"Internet function\"\"\"\n print(time.ctime())\n current_time = time.ctime()\n try:\n r = requests.get('https://www.google.com/')\n if r.status_code == 200:\n print('Connection established successfully!')\n else:\n print('Error, try again')\n except ConnectionError:\n print(f'No internet connection, time: {time.ctime()}')\n finally:\n print('Generating log file...')\n generateLog(current_time, r)\n print('Exiting the program...')\n\n\nt = 0\nwhile t < 30:\n try:\n is_internet()\n except KeyboardInterrupt:\n print('Keyboard Interrupt error ')\n break\n finally:\n t += 1\nlog_file.close()\ninput()\n",
"step-5": "#!python3\r\nimport requests\r\nimport time\r\n\r\nlog_file = open(\"logfile.txt\", \"w\")\r\n\r\n\r\ndef generateLog(ctime1, request_obj):\r\n log_file.write(ctime1 + \"\\t\")\r\n log_file.write(\"Status code: \" + str(request_obj.status_code))\r\n log_file.write(\"\\n\")\r\n\r\n\r\ndef is_internet():\r\n \"\"\"Internet function\"\"\"\r\n print(time.ctime())\r\n current_time = time.ctime()\r\n try:\r\n r = requests.get(\"https://www.google.com/\") # sends requests to google.com\r\n if r.status_code == 200: # if ok, prints msg\r\n print(\"Connection established successfully!\")\r\n else: # if not ok, prints msg\r\n print(\"Error, try again\")\r\n # generateLog(\"logfile\", current _time, r)\r\n except ConnectionError: # if this error is enountered, it will print this message\r\n print(f\"No internet connection, time: {time.ctime()}\")\r\n finally:\r\n print(\"Generating log file...\")\r\n # time.sleep(0.25)\r\n generateLog(current_time, r) # calls the generateLog function\r\n print(\"Exiting the program...\")\r\n\r\n\r\nt = 0\r\nwhile t < 30:\r\n try:\r\n is_internet()\r\n # time.sleep(10)\r\n except KeyboardInterrupt:\r\n print(\"Keyboard Interrupt error \")\r\n break\r\n finally:\r\n t += 1\r\nlog_file.close()\r\ninput()\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import argparse
from ray.tune.config_parser import make_parser
from ray.tune.result import DEFAULT_RESULTS_DIR
EXAMPLE_USAGE = """
Training example:
python ./train.py --run DQN --env CartPole-v0 --no-log-flatland-stats
Training with Config:
python ./train.py -f experiments/flatland_random_sparse_small/global_obs/ppo.yaml
Note that -f overrides all other trial-specific command-line options.
"""
def create_parser(parser_creator=None):
parser = make_parser(
parser_creator=parser_creator,
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Train a reinforcement learning agent.",
epilog=EXAMPLE_USAGE)
# See also the base parser definition in ray/tune/config_parser.py
parser.add_argument(
"--ray-address",
default=None,
type=str,
help="Connect to an existing Ray cluster at this address instead "
"of starting a new one.")
parser.add_argument(
"--ray-num-cpus",
default=None,
type=int,
help="--num-cpus to use if starting a new cluster.")
parser.add_argument(
"--ray-num-gpus",
default=None,
type=int,
help="--num-gpus to use if starting a new cluster.")
parser.add_argument(
"--ray-num-nodes",
default=None,
type=int,
help="Emulate multiple cluster nodes for debugging.")
parser.add_argument(
"--ray-redis-max-memory",
default=None,
type=int,
help="--redis-max-memory to use if starting a new cluster.")
parser.add_argument(
"--ray-memory",
default=None,
type=int,
help="--memory to use if starting a new cluster.")
parser.add_argument(
"--ray-object-store-memory",
default=None,
type=int,
help="--object-store-memory to use if starting a new cluster.")
parser.add_argument(
"--experiment-name",
default="default",
type=str,
help="Name of the subdirectory under `local_dir` to put results in.")
parser.add_argument(
"--local-dir",
default=DEFAULT_RESULTS_DIR,
type=str,
help="Local dir to save training results to. Defaults to '{}'.".format(
DEFAULT_RESULTS_DIR))
parser.add_argument(
"--upload-dir",
default="",
type=str,
help="Optional URI to sync training results to (e.g. s3://bucket).")
parser.add_argument(
"-v", action="store_true", help="Whether to use INFO level logging.")
parser.add_argument(
"-vv", action="store_true", help="Whether to use DEBUG level logging.")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume previous Tune experiments.")
parser.add_argument(
"--torch",
action="store_true",
help="Whether to use PyTorch (instead of tf) as the DL framework.")
parser.add_argument(
"--eager",
action="store_true",
help="Whether to attempt to enable TF eager execution.")
parser.add_argument(
"--trace",
action="store_true",
help="Whether to attempt to enable tracing for eager mode.")
parser.add_argument(
"--log-flatland-stats",
action="store_true",
default=True,
help="Whether to log additional flatland specfic metrics such as percentage complete or normalized score.")
parser.add_argument(
"-e",
"--eval",
action="store_true",
help="Whether to run evaluation. Default evaluation config is default.yaml "
"to use custom evaluation config set (eval_generator:high_eval) under configs")
parser.add_argument(
"--bind-all",
action="store_true",
default=False,
help="Whether to expose on network (binding on all network interfaces).")
parser.add_argument(
"--env", default=None, type=str, help="The gym environment to use.")
parser.add_argument(
"--queue-trials",
action="store_true",
help=(
"Whether to queue trials when the cluster does not currently have "
"enough resources to launch one. This should be set to True when "
"running on an autoscaling cluster to enable automatic scale-up."))
parser.add_argument(
"-f",
"--config-file",
default=None,
type=str,
help="If specified, use config options from this file. Note that this "
"overrides any trial-specific options set via flags above.")
return parser
|
normal
|
{
"blob_id": "79a8ff0000f3be79a62d693ed6bae7480673d970",
"index": 6075,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_parser(parser_creator=None):\n parser = make_parser(parser_creator=parser_creator, formatter_class=\n argparse.RawDescriptionHelpFormatter, description=\n 'Train a reinforcement learning agent.', epilog=EXAMPLE_USAGE)\n parser.add_argument('--ray-address', default=None, type=str, help=\n 'Connect to an existing Ray cluster at this address instead of starting a new one.'\n )\n parser.add_argument('--ray-num-cpus', default=None, type=int, help=\n '--num-cpus to use if starting a new cluster.')\n parser.add_argument('--ray-num-gpus', default=None, type=int, help=\n '--num-gpus to use if starting a new cluster.')\n parser.add_argument('--ray-num-nodes', default=None, type=int, help=\n 'Emulate multiple cluster nodes for debugging.')\n parser.add_argument('--ray-redis-max-memory', default=None, type=int,\n help='--redis-max-memory to use if starting a new cluster.')\n parser.add_argument('--ray-memory', default=None, type=int, help=\n '--memory to use if starting a new cluster.')\n parser.add_argument('--ray-object-store-memory', default=None, type=int,\n help='--object-store-memory to use if starting a new cluster.')\n parser.add_argument('--experiment-name', default='default', type=str,\n help='Name of the subdirectory under `local_dir` to put results in.')\n parser.add_argument('--local-dir', default=DEFAULT_RESULTS_DIR, type=\n str, help=\n \"Local dir to save training results to. Defaults to '{}'.\".format(\n DEFAULT_RESULTS_DIR))\n parser.add_argument('--upload-dir', default='', type=str, help=\n 'Optional URI to sync training results to (e.g. s3://bucket).')\n parser.add_argument('-v', action='store_true', help=\n 'Whether to use INFO level logging.')\n parser.add_argument('-vv', action='store_true', help=\n 'Whether to use DEBUG level logging.')\n parser.add_argument('--resume', action='store_true', help=\n 'Whether to attempt to resume previous Tune experiments.')\n parser.add_argument('--torch', action='store_true', help=\n 'Whether to use PyTorch (instead of tf) as the DL framework.')\n parser.add_argument('--eager', action='store_true', help=\n 'Whether to attempt to enable TF eager execution.')\n parser.add_argument('--trace', action='store_true', help=\n 'Whether to attempt to enable tracing for eager mode.')\n parser.add_argument('--log-flatland-stats', action='store_true',\n default=True, help=\n 'Whether to log additional flatland specfic metrics such as percentage complete or normalized score.'\n )\n parser.add_argument('-e', '--eval', action='store_true', help=\n 'Whether to run evaluation. Default evaluation config is default.yaml to use custom evaluation config set (eval_generator:high_eval) under configs'\n )\n parser.add_argument('--bind-all', action='store_true', default=False,\n help=\n 'Whether to expose on network (binding on all network interfaces).')\n parser.add_argument('--env', default=None, type=str, help=\n 'The gym environment to use.')\n parser.add_argument('--queue-trials', action='store_true', help=\n 'Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up.'\n )\n parser.add_argument('-f', '--config-file', default=None, type=str, help\n =\n 'If specified, use config options from this file. Note that this overrides any trial-specific options set via flags above.'\n )\n return parser\n",
"step-3": "<mask token>\nEXAMPLE_USAGE = \"\"\"\nTraining example:\n python ./train.py --run DQN --env CartPole-v0 --no-log-flatland-stats\n\nTraining with Config:\n python ./train.py -f experiments/flatland_random_sparse_small/global_obs/ppo.yaml\n\n\nNote that -f overrides all other trial-specific command-line options.\n\"\"\"\n\n\ndef create_parser(parser_creator=None):\n parser = make_parser(parser_creator=parser_creator, formatter_class=\n argparse.RawDescriptionHelpFormatter, description=\n 'Train a reinforcement learning agent.', epilog=EXAMPLE_USAGE)\n parser.add_argument('--ray-address', default=None, type=str, help=\n 'Connect to an existing Ray cluster at this address instead of starting a new one.'\n )\n parser.add_argument('--ray-num-cpus', default=None, type=int, help=\n '--num-cpus to use if starting a new cluster.')\n parser.add_argument('--ray-num-gpus', default=None, type=int, help=\n '--num-gpus to use if starting a new cluster.')\n parser.add_argument('--ray-num-nodes', default=None, type=int, help=\n 'Emulate multiple cluster nodes for debugging.')\n parser.add_argument('--ray-redis-max-memory', default=None, type=int,\n help='--redis-max-memory to use if starting a new cluster.')\n parser.add_argument('--ray-memory', default=None, type=int, help=\n '--memory to use if starting a new cluster.')\n parser.add_argument('--ray-object-store-memory', default=None, type=int,\n help='--object-store-memory to use if starting a new cluster.')\n parser.add_argument('--experiment-name', default='default', type=str,\n help='Name of the subdirectory under `local_dir` to put results in.')\n parser.add_argument('--local-dir', default=DEFAULT_RESULTS_DIR, type=\n str, help=\n \"Local dir to save training results to. Defaults to '{}'.\".format(\n DEFAULT_RESULTS_DIR))\n parser.add_argument('--upload-dir', default='', type=str, help=\n 'Optional URI to sync training results to (e.g. s3://bucket).')\n parser.add_argument('-v', action='store_true', help=\n 'Whether to use INFO level logging.')\n parser.add_argument('-vv', action='store_true', help=\n 'Whether to use DEBUG level logging.')\n parser.add_argument('--resume', action='store_true', help=\n 'Whether to attempt to resume previous Tune experiments.')\n parser.add_argument('--torch', action='store_true', help=\n 'Whether to use PyTorch (instead of tf) as the DL framework.')\n parser.add_argument('--eager', action='store_true', help=\n 'Whether to attempt to enable TF eager execution.')\n parser.add_argument('--trace', action='store_true', help=\n 'Whether to attempt to enable tracing for eager mode.')\n parser.add_argument('--log-flatland-stats', action='store_true',\n default=True, help=\n 'Whether to log additional flatland specfic metrics such as percentage complete or normalized score.'\n )\n parser.add_argument('-e', '--eval', action='store_true', help=\n 'Whether to run evaluation. Default evaluation config is default.yaml to use custom evaluation config set (eval_generator:high_eval) under configs'\n )\n parser.add_argument('--bind-all', action='store_true', default=False,\n help=\n 'Whether to expose on network (binding on all network interfaces).')\n parser.add_argument('--env', default=None, type=str, help=\n 'The gym environment to use.')\n parser.add_argument('--queue-trials', action='store_true', help=\n 'Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up.'\n )\n parser.add_argument('-f', '--config-file', default=None, type=str, help\n =\n 'If specified, use config options from this file. Note that this overrides any trial-specific options set via flags above.'\n )\n return parser\n",
"step-4": "import argparse\nfrom ray.tune.config_parser import make_parser\nfrom ray.tune.result import DEFAULT_RESULTS_DIR\nEXAMPLE_USAGE = \"\"\"\nTraining example:\n python ./train.py --run DQN --env CartPole-v0 --no-log-flatland-stats\n\nTraining with Config:\n python ./train.py -f experiments/flatland_random_sparse_small/global_obs/ppo.yaml\n\n\nNote that -f overrides all other trial-specific command-line options.\n\"\"\"\n\n\ndef create_parser(parser_creator=None):\n parser = make_parser(parser_creator=parser_creator, formatter_class=\n argparse.RawDescriptionHelpFormatter, description=\n 'Train a reinforcement learning agent.', epilog=EXAMPLE_USAGE)\n parser.add_argument('--ray-address', default=None, type=str, help=\n 'Connect to an existing Ray cluster at this address instead of starting a new one.'\n )\n parser.add_argument('--ray-num-cpus', default=None, type=int, help=\n '--num-cpus to use if starting a new cluster.')\n parser.add_argument('--ray-num-gpus', default=None, type=int, help=\n '--num-gpus to use if starting a new cluster.')\n parser.add_argument('--ray-num-nodes', default=None, type=int, help=\n 'Emulate multiple cluster nodes for debugging.')\n parser.add_argument('--ray-redis-max-memory', default=None, type=int,\n help='--redis-max-memory to use if starting a new cluster.')\n parser.add_argument('--ray-memory', default=None, type=int, help=\n '--memory to use if starting a new cluster.')\n parser.add_argument('--ray-object-store-memory', default=None, type=int,\n help='--object-store-memory to use if starting a new cluster.')\n parser.add_argument('--experiment-name', default='default', type=str,\n help='Name of the subdirectory under `local_dir` to put results in.')\n parser.add_argument('--local-dir', default=DEFAULT_RESULTS_DIR, type=\n str, help=\n \"Local dir to save training results to. Defaults to '{}'.\".format(\n DEFAULT_RESULTS_DIR))\n parser.add_argument('--upload-dir', default='', type=str, help=\n 'Optional URI to sync training results to (e.g. s3://bucket).')\n parser.add_argument('-v', action='store_true', help=\n 'Whether to use INFO level logging.')\n parser.add_argument('-vv', action='store_true', help=\n 'Whether to use DEBUG level logging.')\n parser.add_argument('--resume', action='store_true', help=\n 'Whether to attempt to resume previous Tune experiments.')\n parser.add_argument('--torch', action='store_true', help=\n 'Whether to use PyTorch (instead of tf) as the DL framework.')\n parser.add_argument('--eager', action='store_true', help=\n 'Whether to attempt to enable TF eager execution.')\n parser.add_argument('--trace', action='store_true', help=\n 'Whether to attempt to enable tracing for eager mode.')\n parser.add_argument('--log-flatland-stats', action='store_true',\n default=True, help=\n 'Whether to log additional flatland specfic metrics such as percentage complete or normalized score.'\n )\n parser.add_argument('-e', '--eval', action='store_true', help=\n 'Whether to run evaluation. Default evaluation config is default.yaml to use custom evaluation config set (eval_generator:high_eval) under configs'\n )\n parser.add_argument('--bind-all', action='store_true', default=False,\n help=\n 'Whether to expose on network (binding on all network interfaces).')\n parser.add_argument('--env', default=None, type=str, help=\n 'The gym environment to use.')\n parser.add_argument('--queue-trials', action='store_true', help=\n 'Whether to queue trials when the cluster does not currently have enough resources to launch one. This should be set to True when running on an autoscaling cluster to enable automatic scale-up.'\n )\n parser.add_argument('-f', '--config-file', default=None, type=str, help\n =\n 'If specified, use config options from this file. Note that this overrides any trial-specific options set via flags above.'\n )\n return parser\n",
"step-5": "import argparse\n\nfrom ray.tune.config_parser import make_parser\nfrom ray.tune.result import DEFAULT_RESULTS_DIR\n\nEXAMPLE_USAGE = \"\"\"\nTraining example:\n python ./train.py --run DQN --env CartPole-v0 --no-log-flatland-stats\n\nTraining with Config:\n python ./train.py -f experiments/flatland_random_sparse_small/global_obs/ppo.yaml\n\n\nNote that -f overrides all other trial-specific command-line options.\n\"\"\"\n\n\ndef create_parser(parser_creator=None):\n parser = make_parser(\n parser_creator=parser_creator,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Train a reinforcement learning agent.\",\n epilog=EXAMPLE_USAGE)\n\n # See also the base parser definition in ray/tune/config_parser.py\n parser.add_argument(\n \"--ray-address\",\n default=None,\n type=str,\n help=\"Connect to an existing Ray cluster at this address instead \"\n \"of starting a new one.\")\n parser.add_argument(\n \"--ray-num-cpus\",\n default=None,\n type=int,\n help=\"--num-cpus to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-num-gpus\",\n default=None,\n type=int,\n help=\"--num-gpus to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-num-nodes\",\n default=None,\n type=int,\n help=\"Emulate multiple cluster nodes for debugging.\")\n parser.add_argument(\n \"--ray-redis-max-memory\",\n default=None,\n type=int,\n help=\"--redis-max-memory to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-memory\",\n default=None,\n type=int,\n help=\"--memory to use if starting a new cluster.\")\n parser.add_argument(\n \"--ray-object-store-memory\",\n default=None,\n type=int,\n help=\"--object-store-memory to use if starting a new cluster.\")\n parser.add_argument(\n \"--experiment-name\",\n default=\"default\",\n type=str,\n help=\"Name of the subdirectory under `local_dir` to put results in.\")\n parser.add_argument(\n \"--local-dir\",\n default=DEFAULT_RESULTS_DIR,\n type=str,\n help=\"Local dir to save training results to. Defaults to '{}'.\".format(\n DEFAULT_RESULTS_DIR))\n parser.add_argument(\n \"--upload-dir\",\n default=\"\",\n type=str,\n help=\"Optional URI to sync training results to (e.g. s3://bucket).\")\n parser.add_argument(\n \"-v\", action=\"store_true\", help=\"Whether to use INFO level logging.\")\n parser.add_argument(\n \"-vv\", action=\"store_true\", help=\"Whether to use DEBUG level logging.\")\n parser.add_argument(\n \"--resume\",\n action=\"store_true\",\n help=\"Whether to attempt to resume previous Tune experiments.\")\n parser.add_argument(\n \"--torch\",\n action=\"store_true\",\n help=\"Whether to use PyTorch (instead of tf) as the DL framework.\")\n parser.add_argument(\n \"--eager\",\n action=\"store_true\",\n help=\"Whether to attempt to enable TF eager execution.\")\n parser.add_argument(\n \"--trace\",\n action=\"store_true\",\n help=\"Whether to attempt to enable tracing for eager mode.\")\n parser.add_argument(\n \"--log-flatland-stats\",\n action=\"store_true\",\n default=True,\n help=\"Whether to log additional flatland specfic metrics such as percentage complete or normalized score.\")\n parser.add_argument(\n \"-e\",\n \"--eval\",\n action=\"store_true\",\n help=\"Whether to run evaluation. Default evaluation config is default.yaml \"\n \"to use custom evaluation config set (eval_generator:high_eval) under configs\")\n parser.add_argument(\n \"--bind-all\",\n action=\"store_true\",\n default=False,\n help=\"Whether to expose on network (binding on all network interfaces).\")\n parser.add_argument(\n \"--env\", default=None, type=str, help=\"The gym environment to use.\")\n parser.add_argument(\n \"--queue-trials\",\n action=\"store_true\",\n help=(\n \"Whether to queue trials when the cluster does not currently have \"\n \"enough resources to launch one. This should be set to True when \"\n \"running on an autoscaling cluster to enable automatic scale-up.\"))\n parser.add_argument(\n \"-f\",\n \"--config-file\",\n default=None,\n type=str,\n help=\"If specified, use config options from this file. Note that this \"\n \"overrides any trial-specific options set via flags above.\")\n return parser\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Unit(Atom, RelMeths, ArithMeths):
is_positive = True
is_commutative = True
def __init__(self, name, abbrev):
self.name = name
self.abbrev = abbrev
def tostr(self, level=0):
return self.abbrev
def __eq__(self, other):
return isinstance(other, Unit) and self.name == other.name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Unit(Atom, RelMeths, ArithMeths):
is_positive = True
is_commutative = True
def __init__(self, name, abbrev):
self.name = name
self.abbrev = abbrev
def tostr(self, level=0):
return self.abbrev
def __eq__(self, other):
return isinstance(other, Unit) and self.name == other.name
def defunit(value, *names):
u = value
g = globals()
for name in names:
g[name] = u
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Unit(Atom, RelMeths, ArithMeths):
is_positive = True
is_commutative = True
def __init__(self, name, abbrev):
self.name = name
self.abbrev = abbrev
def tostr(self, level=0):
return self.abbrev
def __eq__(self, other):
return isinstance(other, Unit) and self.name == other.name
def defunit(value, *names):
u = value
g = globals()
for name in names:
g[name] = u
<|reserved_special_token_0|>
defunit(Unit('meter', 'm'), 'm', 'meter', 'meters')
defunit(Unit('kilogram', 'kg'), 'kg', 'kilogram', 'kilograms')
defunit(Unit('second', 's'), 's', 'second', 'seconds')
defunit(Unit('ampere', 'A'), 'A', 'ampere', 'amperes')
defunit(Unit('kelvin', 'K'), 'K', 'kelvin', 'kelvins')
defunit(Unit('mole', 'mol'), 'mol', 'mole', 'moles')
defunit(Unit('candela', 'cd'), 'cd', 'candela', 'candelas')
defunit(1 / s, 'Hz', 'hz', 'hertz')
defunit(m * kg / s ** 2, 'N', 'newton', 'newtons')
defunit(N * m, 'J', 'joule', 'joules')
defunit(J / s, 'W', 'watt', 'watts')
defunit(N / m ** 2, 'Pa', 'pa', 'pascal', 'pascals')
defunit(s * A, 'C', 'coulomb', 'coulombs')
defunit(W / A, 'v', 'V', 'volt', 'volts')
defunit(V / A, 'ohm', 'ohms')
defunit(A / V, 'S', 'siemens', 'mho', 'mhos')
defunit(C / V, 'F', 'farad', 'farads')
defunit(J / A, 'Wb', 'wb', 'weber', 'webers')
defunit(V * s / m ** 2, 'T', 'tesla', 'teslas')
defunit(V * s / A, 'H', 'henry', 'henrys')
defunit(kilo * m, 'km', 'kilometer', 'kilometers')
defunit(deci * m, 'dm', 'decimeter', 'decimeters')
defunit(centi * m, 'cm', 'centimeter', 'centimeters')
defunit(milli * m, 'mm', 'millimeter', 'millimeters')
defunit(micro * m, 'um', 'micrometer', 'micrometers', 'micron', 'microns')
defunit(nano * m, 'nm', 'nanometer', 'nanometers')
defunit(pico * m, 'pm', 'picometer', 'picometers')
defunit(Rational('0.3048') * m, 'ft', 'foot', 'feet')
defunit(Rational('25.4') * mm, 'inch', 'inches')
defunit(3 * ft, 'yd', 'yard', 'yards')
defunit(5280 * ft, 'mi', 'mile', 'miles')
defunit(m ** 3 / 1000, 'l', 'liter', 'liters')
defunit(deci * l, 'dl', 'deciliter', 'deciliters')
defunit(centi * l, 'cl', 'centiliter', 'centiliters')
defunit(milli * l, 'ml', 'milliliter', 'milliliters')
defunit(milli * s, 'ms', 'millisecond', 'milliseconds')
defunit(micro * s, 'us', 'microsecond', 'microseconds')
defunit(nano * s, 'ns', 'nanosecond', 'nanoseconds')
defunit(pico * s, 'ps', 'picosecond', 'picoseconds')
defunit(60 * s, 'minute', 'minutes')
defunit(60 * minute, 'h', 'hour', 'hours')
defunit(24 * hour, 'day', 'days')
defunit(Rational('31558149.540') * s, 'sidereal_year', 'sidereal_years')
defunit(Rational('365.24219') * day, 'tropical_year', 'tropical_years')
defunit(Rational('365') * day, 'common_year', 'common_years')
defunit(Rational('365.25') * day, 'julian_year', 'julian_years')
<|reserved_special_token_0|>
defunit(kilogram / kilo, 'g', 'gram', 'grams')
defunit(milli * g, 'mg', 'milligram', 'milligrams')
defunit(micro * g, 'ug', 'microgram', 'micrograms')
<|reserved_special_token_0|>
defunit(c * julian_year, 'ly', 'lightyear', 'lightyears')
defunit(149597870691 * m, 'au', 'astronomical_unit', 'astronomical_units')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from sympy import *
from sympy.core.basic import Atom
from sympy.core.methods import ArithMeths, RelMeths
class Unit(Atom, RelMeths, ArithMeths):
is_positive = True
is_commutative = True
def __init__(self, name, abbrev):
self.name = name
self.abbrev = abbrev
def tostr(self, level=0):
return self.abbrev
def __eq__(self, other):
return isinstance(other, Unit) and self.name == other.name
def defunit(value, *names):
u = value
g = globals()
for name in names:
g[name] = u
percent = percents = Rational(1, 100)
permille = permille = Rational(1, 1000)
ten = Rational(10)
yotta = ten ** 24
zetta = ten ** 21
exa = ten ** 18
peta = ten ** 15
tera = ten ** 12
giga = ten ** 9
mega = ten ** 6
kilo = ten ** 3
deca = ten ** 1
deci = ten ** -1
centi = ten ** -2
milli = ten ** -3
micro = ten ** -6
nano = ten ** -9
pico = ten ** -12
femto = ten ** -15
atto = ten ** -18
zepto = ten ** -21
yocto = ten ** -24
rad = radian = radians = 1
deg = degree = degrees = pi / 180
defunit(Unit('meter', 'm'), 'm', 'meter', 'meters')
defunit(Unit('kilogram', 'kg'), 'kg', 'kilogram', 'kilograms')
defunit(Unit('second', 's'), 's', 'second', 'seconds')
defunit(Unit('ampere', 'A'), 'A', 'ampere', 'amperes')
defunit(Unit('kelvin', 'K'), 'K', 'kelvin', 'kelvins')
defunit(Unit('mole', 'mol'), 'mol', 'mole', 'moles')
defunit(Unit('candela', 'cd'), 'cd', 'candela', 'candelas')
defunit(1 / s, 'Hz', 'hz', 'hertz')
defunit(m * kg / s ** 2, 'N', 'newton', 'newtons')
defunit(N * m, 'J', 'joule', 'joules')
defunit(J / s, 'W', 'watt', 'watts')
defunit(N / m ** 2, 'Pa', 'pa', 'pascal', 'pascals')
defunit(s * A, 'C', 'coulomb', 'coulombs')
defunit(W / A, 'v', 'V', 'volt', 'volts')
defunit(V / A, 'ohm', 'ohms')
defunit(A / V, 'S', 'siemens', 'mho', 'mhos')
defunit(C / V, 'F', 'farad', 'farads')
defunit(J / A, 'Wb', 'wb', 'weber', 'webers')
defunit(V * s / m ** 2, 'T', 'tesla', 'teslas')
defunit(V * s / A, 'H', 'henry', 'henrys')
defunit(kilo * m, 'km', 'kilometer', 'kilometers')
defunit(deci * m, 'dm', 'decimeter', 'decimeters')
defunit(centi * m, 'cm', 'centimeter', 'centimeters')
defunit(milli * m, 'mm', 'millimeter', 'millimeters')
defunit(micro * m, 'um', 'micrometer', 'micrometers', 'micron', 'microns')
defunit(nano * m, 'nm', 'nanometer', 'nanometers')
defunit(pico * m, 'pm', 'picometer', 'picometers')
defunit(Rational('0.3048') * m, 'ft', 'foot', 'feet')
defunit(Rational('25.4') * mm, 'inch', 'inches')
defunit(3 * ft, 'yd', 'yard', 'yards')
defunit(5280 * ft, 'mi', 'mile', 'miles')
defunit(m ** 3 / 1000, 'l', 'liter', 'liters')
defunit(deci * l, 'dl', 'deciliter', 'deciliters')
defunit(centi * l, 'cl', 'centiliter', 'centiliters')
defunit(milli * l, 'ml', 'milliliter', 'milliliters')
defunit(milli * s, 'ms', 'millisecond', 'milliseconds')
defunit(micro * s, 'us', 'microsecond', 'microseconds')
defunit(nano * s, 'ns', 'nanosecond', 'nanoseconds')
defunit(pico * s, 'ps', 'picosecond', 'picoseconds')
defunit(60 * s, 'minute', 'minutes')
defunit(60 * minute, 'h', 'hour', 'hours')
defunit(24 * hour, 'day', 'days')
defunit(Rational('31558149.540') * s, 'sidereal_year', 'sidereal_years')
defunit(Rational('365.24219') * day, 'tropical_year', 'tropical_years')
defunit(Rational('365') * day, 'common_year', 'common_years')
defunit(Rational('365.25') * day, 'julian_year', 'julian_years')
year = years = tropical_year
defunit(kilogram / kilo, 'g', 'gram', 'grams')
defunit(milli * g, 'mg', 'milligram', 'milligrams')
defunit(micro * g, 'ug', 'microgram', 'micrograms')
c = speed_of_light = 299792458 * m / s
G = gravitational_constant = Rational('6.67428'
) * ten ** -11 * m ** 3 / kg / s ** 2
u0 = magnetic_constant = 4 * pi * ten ** -7 * N / A ** 2
e0 = electric_constant = 1 / (u0 * c ** 2)
Z0 = vacuum_impedance = u0 * c
planck = Rational('6.2606896') * ten ** -34 * J * s
hbar = planck / (2 * pi)
avogadro = Rational('6.02214179') * 10 ** 23 / mol
boltzmann = Rational('1.3806505') * ten ** -23 * J / K
gee = gees = Rational('9.80665') * m / s ** 2
atmosphere = atmospheres = atm = 101325 * pascal
defunit(c * julian_year, 'ly', 'lightyear', 'lightyears')
defunit(149597870691 * m, 'au', 'astronomical_unit', 'astronomical_units')
<|reserved_special_token_1|>
"""
Physical units and dimensions
"""
from sympy import *
from sympy.core.basic import Atom
from sympy.core.methods import ArithMeths, RelMeths
class Unit(Atom, RelMeths, ArithMeths):
is_positive = True # make (m**2)**Rational(1,2) --> m
is_commutative = True
def __init__(self, name, abbrev):
self.name = name
self.abbrev = abbrev
def tostr(self, level=0):
return self.abbrev
def __eq__(self, other):
return isinstance(other, Unit) and self.name == other.name
def defunit(value, *names):
u = value
g = globals()
for name in names:
g[name] = u
# Dimensionless
percent = percents = Rational(1,100)
permille = permille = Rational(1,1000)
ten = Rational(10)
yotta = ten**24
zetta = ten**21
exa = ten**18
peta = ten**15
tera = ten**12
giga = ten**9
mega = ten**6
kilo = ten**3
deca = ten**1
deci = ten**-1
centi = ten**-2
milli = ten**-3
micro = ten**-6
nano = ten**-9
pico = ten**-12
femto = ten**-15
atto = ten**-18
zepto = ten**-21
yocto = ten**-24
rad = radian = radians = 1
deg = degree = degrees = pi/180
# Base units
defunit(Unit('meter', 'm'), 'm', 'meter', 'meters')
defunit(Unit('kilogram', 'kg'), 'kg', 'kilogram', 'kilograms')
defunit(Unit('second', 's'), 's', 'second', 'seconds')
defunit(Unit('ampere', 'A'), 'A', 'ampere', 'amperes')
defunit(Unit('kelvin', 'K'), 'K', 'kelvin', 'kelvins')
defunit(Unit('mole', 'mol'), 'mol', 'mole', 'moles')
defunit(Unit('candela', 'cd'), 'cd', 'candela', 'candelas')
# Derived units
defunit(1/s, 'Hz', 'hz', 'hertz')
defunit(m*kg/s**2, 'N', 'newton', 'newtons')
defunit(N*m, 'J', 'joule', 'joules')
defunit(J/s, 'W', 'watt', 'watts')
defunit(N/m**2, 'Pa', 'pa', 'pascal', 'pascals')
defunit(s*A, 'C', 'coulomb', 'coulombs')
defunit(W/A, 'v', 'V', 'volt', 'volts')
defunit(V/A, 'ohm', 'ohms')
defunit(A/V, 'S', 'siemens', 'mho', 'mhos')
defunit(C/V, 'F', 'farad', 'farads')
defunit(J/A, 'Wb', 'wb', 'weber', 'webers')
defunit(V*s/m**2, 'T', 'tesla', 'teslas')
defunit(V*s/A, 'H', 'henry', 'henrys')
# Common length units
defunit(kilo*m, 'km', 'kilometer', 'kilometers')
defunit(deci*m, 'dm', 'decimeter', 'decimeters')
defunit(centi*m, 'cm', 'centimeter', 'centimeters')
defunit(milli*m, 'mm', 'millimeter', 'millimeters')
defunit(micro*m, 'um', 'micrometer', 'micrometers', 'micron', 'microns')
defunit(nano*m, 'nm', 'nanometer', 'nanometers')
defunit(pico*m, 'pm', 'picometer', 'picometers')
defunit(Rational('0.3048')*m, 'ft', 'foot', 'feet')
defunit(Rational('25.4')*mm, 'inch', 'inches')
defunit(3*ft, 'yd', 'yard', 'yards')
defunit(5280*ft, 'mi', 'mile', 'miles')
# Common volume and area units
defunit(m**3 / 1000, 'l', 'liter', 'liters')
defunit(deci*l, 'dl', 'deciliter', 'deciliters')
defunit(centi*l, 'cl', 'centiliter', 'centiliters')
defunit(milli*l, 'ml', 'milliliter', 'milliliters')
# Common time units
defunit(milli*s, 'ms', 'millisecond', 'milliseconds')
defunit(micro*s, 'us', 'microsecond', 'microseconds')
defunit(nano*s, 'ns', 'nanosecond', 'nanoseconds')
defunit(pico*s, 'ps', 'picosecond', 'picoseconds')
defunit(60*s, 'minute', 'minutes')
defunit(60*minute, 'h', 'hour', 'hours')
defunit(24*hour, 'day', 'days')
defunit(Rational('31558149.540')*s, 'sidereal_year', 'sidereal_years')
defunit(Rational('365.24219')*day, 'tropical_year', 'tropical_years')
defunit(Rational('365')*day, 'common_year', 'common_years')
defunit(Rational('365.25')*day, 'julian_year', 'julian_years')
year = years = tropical_year
# Common mass units
defunit(kilogram / kilo, 'g', 'gram', 'grams')
defunit(milli * g, 'mg', 'milligram', 'milligrams')
defunit(micro * g, 'ug', 'microgram', 'micrograms')
#----------------------------------------------------------------------------
# Physical constants
#
c = speed_of_light = 299792458 * m/s
G = gravitational_constant = Rational('6.67428') * ten**-11 * m**3 / kg / s**2
u0 = magnetic_constant = 4*pi * ten**-7 * N/A**2
e0 = electric_constant = 1/(u0 * c**2)
Z0 = vacuum_impedance = u0 * c
planck = Rational('6.2606896') * ten**-34 * J*s
hbar = planck / (2*pi)
avogadro = (Rational('6.02214179') * 10**23) / mol
boltzmann = Rational('1.3806505') * ten**-23 * J / K
gee = gees = Rational('9.80665') * m/s**2
atmosphere = atmospheres = atm = 101325 * pascal
# Other convenient units and magnitudes
defunit(c*julian_year, 'ly', 'lightyear', 'lightyears')
defunit(149597870691*m, 'au', 'astronomical_unit', 'astronomical_units')
|
flexible
|
{
"blob_id": "c0e1c0c4545777a669fac19900239ab9baade242",
"index": 5993,
"step-1": "<mask token>\n\n\nclass Unit(Atom, RelMeths, ArithMeths):\n is_positive = True\n is_commutative = True\n\n def __init__(self, name, abbrev):\n self.name = name\n self.abbrev = abbrev\n\n def tostr(self, level=0):\n return self.abbrev\n\n def __eq__(self, other):\n return isinstance(other, Unit) and self.name == other.name\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Unit(Atom, RelMeths, ArithMeths):\n is_positive = True\n is_commutative = True\n\n def __init__(self, name, abbrev):\n self.name = name\n self.abbrev = abbrev\n\n def tostr(self, level=0):\n return self.abbrev\n\n def __eq__(self, other):\n return isinstance(other, Unit) and self.name == other.name\n\n\ndef defunit(value, *names):\n u = value\n g = globals()\n for name in names:\n g[name] = u\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Unit(Atom, RelMeths, ArithMeths):\n is_positive = True\n is_commutative = True\n\n def __init__(self, name, abbrev):\n self.name = name\n self.abbrev = abbrev\n\n def tostr(self, level=0):\n return self.abbrev\n\n def __eq__(self, other):\n return isinstance(other, Unit) and self.name == other.name\n\n\ndef defunit(value, *names):\n u = value\n g = globals()\n for name in names:\n g[name] = u\n\n\n<mask token>\ndefunit(Unit('meter', 'm'), 'm', 'meter', 'meters')\ndefunit(Unit('kilogram', 'kg'), 'kg', 'kilogram', 'kilograms')\ndefunit(Unit('second', 's'), 's', 'second', 'seconds')\ndefunit(Unit('ampere', 'A'), 'A', 'ampere', 'amperes')\ndefunit(Unit('kelvin', 'K'), 'K', 'kelvin', 'kelvins')\ndefunit(Unit('mole', 'mol'), 'mol', 'mole', 'moles')\ndefunit(Unit('candela', 'cd'), 'cd', 'candela', 'candelas')\ndefunit(1 / s, 'Hz', 'hz', 'hertz')\ndefunit(m * kg / s ** 2, 'N', 'newton', 'newtons')\ndefunit(N * m, 'J', 'joule', 'joules')\ndefunit(J / s, 'W', 'watt', 'watts')\ndefunit(N / m ** 2, 'Pa', 'pa', 'pascal', 'pascals')\ndefunit(s * A, 'C', 'coulomb', 'coulombs')\ndefunit(W / A, 'v', 'V', 'volt', 'volts')\ndefunit(V / A, 'ohm', 'ohms')\ndefunit(A / V, 'S', 'siemens', 'mho', 'mhos')\ndefunit(C / V, 'F', 'farad', 'farads')\ndefunit(J / A, 'Wb', 'wb', 'weber', 'webers')\ndefunit(V * s / m ** 2, 'T', 'tesla', 'teslas')\ndefunit(V * s / A, 'H', 'henry', 'henrys')\ndefunit(kilo * m, 'km', 'kilometer', 'kilometers')\ndefunit(deci * m, 'dm', 'decimeter', 'decimeters')\ndefunit(centi * m, 'cm', 'centimeter', 'centimeters')\ndefunit(milli * m, 'mm', 'millimeter', 'millimeters')\ndefunit(micro * m, 'um', 'micrometer', 'micrometers', 'micron', 'microns')\ndefunit(nano * m, 'nm', 'nanometer', 'nanometers')\ndefunit(pico * m, 'pm', 'picometer', 'picometers')\ndefunit(Rational('0.3048') * m, 'ft', 'foot', 'feet')\ndefunit(Rational('25.4') * mm, 'inch', 'inches')\ndefunit(3 * ft, 'yd', 'yard', 'yards')\ndefunit(5280 * ft, 'mi', 'mile', 'miles')\ndefunit(m ** 3 / 1000, 'l', 'liter', 'liters')\ndefunit(deci * l, 'dl', 'deciliter', 'deciliters')\ndefunit(centi * l, 'cl', 'centiliter', 'centiliters')\ndefunit(milli * l, 'ml', 'milliliter', 'milliliters')\ndefunit(milli * s, 'ms', 'millisecond', 'milliseconds')\ndefunit(micro * s, 'us', 'microsecond', 'microseconds')\ndefunit(nano * s, 'ns', 'nanosecond', 'nanoseconds')\ndefunit(pico * s, 'ps', 'picosecond', 'picoseconds')\ndefunit(60 * s, 'minute', 'minutes')\ndefunit(60 * minute, 'h', 'hour', 'hours')\ndefunit(24 * hour, 'day', 'days')\ndefunit(Rational('31558149.540') * s, 'sidereal_year', 'sidereal_years')\ndefunit(Rational('365.24219') * day, 'tropical_year', 'tropical_years')\ndefunit(Rational('365') * day, 'common_year', 'common_years')\ndefunit(Rational('365.25') * day, 'julian_year', 'julian_years')\n<mask token>\ndefunit(kilogram / kilo, 'g', 'gram', 'grams')\ndefunit(milli * g, 'mg', 'milligram', 'milligrams')\ndefunit(micro * g, 'ug', 'microgram', 'micrograms')\n<mask token>\ndefunit(c * julian_year, 'ly', 'lightyear', 'lightyears')\ndefunit(149597870691 * m, 'au', 'astronomical_unit', 'astronomical_units')\n",
"step-4": "<mask token>\nfrom sympy import *\nfrom sympy.core.basic import Atom\nfrom sympy.core.methods import ArithMeths, RelMeths\n\n\nclass Unit(Atom, RelMeths, ArithMeths):\n is_positive = True\n is_commutative = True\n\n def __init__(self, name, abbrev):\n self.name = name\n self.abbrev = abbrev\n\n def tostr(self, level=0):\n return self.abbrev\n\n def __eq__(self, other):\n return isinstance(other, Unit) and self.name == other.name\n\n\ndef defunit(value, *names):\n u = value\n g = globals()\n for name in names:\n g[name] = u\n\n\npercent = percents = Rational(1, 100)\npermille = permille = Rational(1, 1000)\nten = Rational(10)\nyotta = ten ** 24\nzetta = ten ** 21\nexa = ten ** 18\npeta = ten ** 15\ntera = ten ** 12\ngiga = ten ** 9\nmega = ten ** 6\nkilo = ten ** 3\ndeca = ten ** 1\ndeci = ten ** -1\ncenti = ten ** -2\nmilli = ten ** -3\nmicro = ten ** -6\nnano = ten ** -9\npico = ten ** -12\nfemto = ten ** -15\natto = ten ** -18\nzepto = ten ** -21\nyocto = ten ** -24\nrad = radian = radians = 1\ndeg = degree = degrees = pi / 180\ndefunit(Unit('meter', 'm'), 'm', 'meter', 'meters')\ndefunit(Unit('kilogram', 'kg'), 'kg', 'kilogram', 'kilograms')\ndefunit(Unit('second', 's'), 's', 'second', 'seconds')\ndefunit(Unit('ampere', 'A'), 'A', 'ampere', 'amperes')\ndefunit(Unit('kelvin', 'K'), 'K', 'kelvin', 'kelvins')\ndefunit(Unit('mole', 'mol'), 'mol', 'mole', 'moles')\ndefunit(Unit('candela', 'cd'), 'cd', 'candela', 'candelas')\ndefunit(1 / s, 'Hz', 'hz', 'hertz')\ndefunit(m * kg / s ** 2, 'N', 'newton', 'newtons')\ndefunit(N * m, 'J', 'joule', 'joules')\ndefunit(J / s, 'W', 'watt', 'watts')\ndefunit(N / m ** 2, 'Pa', 'pa', 'pascal', 'pascals')\ndefunit(s * A, 'C', 'coulomb', 'coulombs')\ndefunit(W / A, 'v', 'V', 'volt', 'volts')\ndefunit(V / A, 'ohm', 'ohms')\ndefunit(A / V, 'S', 'siemens', 'mho', 'mhos')\ndefunit(C / V, 'F', 'farad', 'farads')\ndefunit(J / A, 'Wb', 'wb', 'weber', 'webers')\ndefunit(V * s / m ** 2, 'T', 'tesla', 'teslas')\ndefunit(V * s / A, 'H', 'henry', 'henrys')\ndefunit(kilo * m, 'km', 'kilometer', 'kilometers')\ndefunit(deci * m, 'dm', 'decimeter', 'decimeters')\ndefunit(centi * m, 'cm', 'centimeter', 'centimeters')\ndefunit(milli * m, 'mm', 'millimeter', 'millimeters')\ndefunit(micro * m, 'um', 'micrometer', 'micrometers', 'micron', 'microns')\ndefunit(nano * m, 'nm', 'nanometer', 'nanometers')\ndefunit(pico * m, 'pm', 'picometer', 'picometers')\ndefunit(Rational('0.3048') * m, 'ft', 'foot', 'feet')\ndefunit(Rational('25.4') * mm, 'inch', 'inches')\ndefunit(3 * ft, 'yd', 'yard', 'yards')\ndefunit(5280 * ft, 'mi', 'mile', 'miles')\ndefunit(m ** 3 / 1000, 'l', 'liter', 'liters')\ndefunit(deci * l, 'dl', 'deciliter', 'deciliters')\ndefunit(centi * l, 'cl', 'centiliter', 'centiliters')\ndefunit(milli * l, 'ml', 'milliliter', 'milliliters')\ndefunit(milli * s, 'ms', 'millisecond', 'milliseconds')\ndefunit(micro * s, 'us', 'microsecond', 'microseconds')\ndefunit(nano * s, 'ns', 'nanosecond', 'nanoseconds')\ndefunit(pico * s, 'ps', 'picosecond', 'picoseconds')\ndefunit(60 * s, 'minute', 'minutes')\ndefunit(60 * minute, 'h', 'hour', 'hours')\ndefunit(24 * hour, 'day', 'days')\ndefunit(Rational('31558149.540') * s, 'sidereal_year', 'sidereal_years')\ndefunit(Rational('365.24219') * day, 'tropical_year', 'tropical_years')\ndefunit(Rational('365') * day, 'common_year', 'common_years')\ndefunit(Rational('365.25') * day, 'julian_year', 'julian_years')\nyear = years = tropical_year\ndefunit(kilogram / kilo, 'g', 'gram', 'grams')\ndefunit(milli * g, 'mg', 'milligram', 'milligrams')\ndefunit(micro * g, 'ug', 'microgram', 'micrograms')\nc = speed_of_light = 299792458 * m / s\nG = gravitational_constant = Rational('6.67428'\n ) * ten ** -11 * m ** 3 / kg / s ** 2\nu0 = magnetic_constant = 4 * pi * ten ** -7 * N / A ** 2\ne0 = electric_constant = 1 / (u0 * c ** 2)\nZ0 = vacuum_impedance = u0 * c\nplanck = Rational('6.2606896') * ten ** -34 * J * s\nhbar = planck / (2 * pi)\navogadro = Rational('6.02214179') * 10 ** 23 / mol\nboltzmann = Rational('1.3806505') * ten ** -23 * J / K\ngee = gees = Rational('9.80665') * m / s ** 2\natmosphere = atmospheres = atm = 101325 * pascal\ndefunit(c * julian_year, 'ly', 'lightyear', 'lightyears')\ndefunit(149597870691 * m, 'au', 'astronomical_unit', 'astronomical_units')\n",
"step-5": "\"\"\"\r\nPhysical units and dimensions\r\n\r\n\"\"\"\r\n\r\nfrom sympy import *\r\nfrom sympy.core.basic import Atom\r\nfrom sympy.core.methods import ArithMeths, RelMeths\r\n\r\n\r\nclass Unit(Atom, RelMeths, ArithMeths):\r\n is_positive = True # make (m**2)**Rational(1,2) --> m\r\n is_commutative = True\r\n\r\n def __init__(self, name, abbrev):\r\n self.name = name\r\n self.abbrev = abbrev\r\n\r\n def tostr(self, level=0):\r\n return self.abbrev\r\n\r\n def __eq__(self, other):\r\n return isinstance(other, Unit) and self.name == other.name\r\n\r\n\r\ndef defunit(value, *names):\r\n u = value\r\n g = globals()\r\n for name in names:\r\n g[name] = u\r\n\r\n\r\n# Dimensionless\r\n\r\npercent = percents = Rational(1,100)\r\npermille = permille = Rational(1,1000)\r\n\r\nten = Rational(10)\r\n\r\nyotta = ten**24\r\nzetta = ten**21\r\nexa = ten**18\r\npeta = ten**15\r\ntera = ten**12\r\ngiga = ten**9\r\nmega = ten**6\r\nkilo = ten**3\r\ndeca = ten**1\r\ndeci = ten**-1\r\ncenti = ten**-2\r\nmilli = ten**-3\r\nmicro = ten**-6\r\nnano = ten**-9\r\npico = ten**-12\r\nfemto = ten**-15\r\natto = ten**-18\r\nzepto = ten**-21\r\nyocto = ten**-24\r\n\r\nrad = radian = radians = 1\r\ndeg = degree = degrees = pi/180\r\n\r\n\r\n# Base units\r\n\r\ndefunit(Unit('meter', 'm'), 'm', 'meter', 'meters')\r\ndefunit(Unit('kilogram', 'kg'), 'kg', 'kilogram', 'kilograms')\r\ndefunit(Unit('second', 's'), 's', 'second', 'seconds')\r\ndefunit(Unit('ampere', 'A'), 'A', 'ampere', 'amperes')\r\ndefunit(Unit('kelvin', 'K'), 'K', 'kelvin', 'kelvins')\r\ndefunit(Unit('mole', 'mol'), 'mol', 'mole', 'moles')\r\ndefunit(Unit('candela', 'cd'), 'cd', 'candela', 'candelas')\r\n\r\n\r\n# Derived units\r\n\r\ndefunit(1/s, 'Hz', 'hz', 'hertz')\r\ndefunit(m*kg/s**2, 'N', 'newton', 'newtons')\r\ndefunit(N*m, 'J', 'joule', 'joules')\r\ndefunit(J/s, 'W', 'watt', 'watts')\r\ndefunit(N/m**2, 'Pa', 'pa', 'pascal', 'pascals')\r\ndefunit(s*A, 'C', 'coulomb', 'coulombs')\r\ndefunit(W/A, 'v', 'V', 'volt', 'volts')\r\ndefunit(V/A, 'ohm', 'ohms')\r\ndefunit(A/V, 'S', 'siemens', 'mho', 'mhos')\r\ndefunit(C/V, 'F', 'farad', 'farads')\r\ndefunit(J/A, 'Wb', 'wb', 'weber', 'webers')\r\ndefunit(V*s/m**2, 'T', 'tesla', 'teslas')\r\ndefunit(V*s/A, 'H', 'henry', 'henrys')\r\n\r\n\r\n# Common length units\r\n\r\ndefunit(kilo*m, 'km', 'kilometer', 'kilometers')\r\ndefunit(deci*m, 'dm', 'decimeter', 'decimeters')\r\ndefunit(centi*m, 'cm', 'centimeter', 'centimeters')\r\ndefunit(milli*m, 'mm', 'millimeter', 'millimeters')\r\ndefunit(micro*m, 'um', 'micrometer', 'micrometers', 'micron', 'microns')\r\ndefunit(nano*m, 'nm', 'nanometer', 'nanometers')\r\ndefunit(pico*m, 'pm', 'picometer', 'picometers')\r\n\r\ndefunit(Rational('0.3048')*m, 'ft', 'foot', 'feet')\r\ndefunit(Rational('25.4')*mm, 'inch', 'inches')\r\ndefunit(3*ft, 'yd', 'yard', 'yards')\r\ndefunit(5280*ft, 'mi', 'mile', 'miles')\r\n\r\n\r\n# Common volume and area units\r\n\r\ndefunit(m**3 / 1000, 'l', 'liter', 'liters')\r\ndefunit(deci*l, 'dl', 'deciliter', 'deciliters')\r\ndefunit(centi*l, 'cl', 'centiliter', 'centiliters')\r\ndefunit(milli*l, 'ml', 'milliliter', 'milliliters')\r\n\r\n\r\n# Common time units\r\n\r\ndefunit(milli*s, 'ms', 'millisecond', 'milliseconds')\r\ndefunit(micro*s, 'us', 'microsecond', 'microseconds')\r\ndefunit(nano*s, 'ns', 'nanosecond', 'nanoseconds')\r\ndefunit(pico*s, 'ps', 'picosecond', 'picoseconds')\r\n\r\ndefunit(60*s, 'minute', 'minutes')\r\ndefunit(60*minute, 'h', 'hour', 'hours')\r\ndefunit(24*hour, 'day', 'days')\r\n\r\ndefunit(Rational('31558149.540')*s, 'sidereal_year', 'sidereal_years')\r\ndefunit(Rational('365.24219')*day, 'tropical_year', 'tropical_years')\r\ndefunit(Rational('365')*day, 'common_year', 'common_years')\r\ndefunit(Rational('365.25')*day, 'julian_year', 'julian_years')\r\n\r\nyear = years = tropical_year\r\n\r\n\r\n# Common mass units\r\n\r\ndefunit(kilogram / kilo, 'g', 'gram', 'grams')\r\ndefunit(milli * g, 'mg', 'milligram', 'milligrams')\r\ndefunit(micro * g, 'ug', 'microgram', 'micrograms')\r\n\r\n\r\n\r\n#----------------------------------------------------------------------------\r\n# Physical constants\r\n#\r\n\r\nc = speed_of_light = 299792458 * m/s\r\nG = gravitational_constant = Rational('6.67428') * ten**-11 * m**3 / kg / s**2\r\nu0 = magnetic_constant = 4*pi * ten**-7 * N/A**2\r\ne0 = electric_constant = 1/(u0 * c**2)\r\nZ0 = vacuum_impedance = u0 * c\r\n\r\nplanck = Rational('6.2606896') * ten**-34 * J*s\r\nhbar = planck / (2*pi)\r\n\r\navogadro = (Rational('6.02214179') * 10**23) / mol\r\nboltzmann = Rational('1.3806505') * ten**-23 * J / K\r\n\r\ngee = gees = Rational('9.80665') * m/s**2\r\natmosphere = atmospheres = atm = 101325 * pascal\r\n\r\n\r\n# Other convenient units and magnitudes\r\n\r\ndefunit(c*julian_year, 'ly', 'lightyear', 'lightyears')\r\ndefunit(149597870691*m, 'au', 'astronomical_unit', 'astronomical_units')\r\n",
"step-ids": [
5,
6,
7,
9,
10
]
}
|
[
5,
6,
7,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for key in my_dict:
print('{} - {}'.format(key, my_dict[key]))
<|reserved_special_token_1|>
my_dict = {'one': '1', 'two': '2'}
for key in my_dict:
print('{} - {}'.format(key, my_dict[key]))
|
flexible
|
{
"blob_id": "1d524312cbd3b735850046131f31c03fdfa90bbc",
"index": 483,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor key in my_dict:\n print('{} - {}'.format(key, my_dict[key]))\n",
"step-3": "my_dict = {'one': '1', 'two': '2'}\nfor key in my_dict:\n print('{} - {}'.format(key, my_dict[key]))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def similarity_metric(embedding1: numpy.ndarray, embedding2: numpy.ndarray
) ->float:
return numpy.nan_to_num(1 - cosine(embedding1, embedding2), 0)
<|reserved_special_token_1|>
import numpy
from scipy.spatial.distance import cosine
def similarity_metric(embedding1: numpy.ndarray, embedding2: numpy.ndarray
) ->float:
return numpy.nan_to_num(1 - cosine(embedding1, embedding2), 0)
|
flexible
|
{
"blob_id": "ec9f27b4313f72ae6eb7e8280d47de226aeb6bb1",
"index": 2270,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef similarity_metric(embedding1: numpy.ndarray, embedding2: numpy.ndarray\n ) ->float:\n return numpy.nan_to_num(1 - cosine(embedding1, embedding2), 0)\n",
"step-3": "import numpy\nfrom scipy.spatial.distance import cosine\n\n\ndef similarity_metric(embedding1: numpy.ndarray, embedding2: numpy.ndarray\n ) ->float:\n return numpy.nan_to_num(1 - cosine(embedding1, embedding2), 0)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
@tf.function
def train_discrepancy_1(main_data, main_labels, target_data):
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(
NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
main_loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(
main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]
shared_target = [shared[i](target_data, training=True) for i in
range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(
target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for
i in range(NUM_MODELS)]
loss = [(main_loss[i] - adv_loss[i]) for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1
[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2
[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(
gradients_main_classifier_1[i], main_classifier_1[i].
trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(
gradients_main_classifier_2[i], main_classifier_2[i].
trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@tf.function
def train_discrepancy_1(main_data, main_labels, target_data):
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(
NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
main_loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(
main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]
shared_target = [shared[i](target_data, training=True) for i in
range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(
target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for
i in range(NUM_MODELS)]
loss = [(main_loss[i] - adv_loss[i]) for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1
[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2
[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(
gradients_main_classifier_1[i], main_classifier_1[i].
trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(
gradients_main_classifier_2[i], main_classifier_2[i].
trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
@tf.function
def train_discrepancy_2(target_data):
with tf.GradientTape(persistent=True) as tape:
shared_target = [shared[i](target_data, training=True) for i in
range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) -
tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]
gradients_shared = [tape.gradient(adv_loss[i], shared[i].
trainable_variables) for i in range(NUM_MODELS)]
[optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]
.trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
@tf.function
def train_step_erm(main_data, main_labels):
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(
NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(
main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]
gradients_shared = [tape.gradient(loss[i], shared[i].
trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1
[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2
[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]
.trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(
gradients_main_classifier_1[i], main_classifier_1[i].
trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(
gradients_main_classifier_2[i], main_classifier_2[i].
trainable_variables)) for i in range(NUM_MODELS)]
return loss
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('../')
<|reserved_special_token_0|>
parser.add_argument('--USE_POISON', type=int, default=1, help=
'POISON used or not')
<|reserved_special_token_0|>
@tf.function
def train_discrepancy_1(main_data, main_labels, target_data):
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(
NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
main_loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(
main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]
shared_target = [shared[i](target_data, training=True) for i in
range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(
target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for
i in range(NUM_MODELS)]
loss = [(main_loss[i] - adv_loss[i]) for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1
[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2
[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(
gradients_main_classifier_1[i], main_classifier_1[i].
trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(
gradients_main_classifier_2[i], main_classifier_2[i].
trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
@tf.function
def train_discrepancy_2(target_data):
with tf.GradientTape(persistent=True) as tape:
shared_target = [shared[i](target_data, training=True) for i in
range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) -
tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]
gradients_shared = [tape.gradient(adv_loss[i], shared[i].
trainable_variables) for i in range(NUM_MODELS)]
[optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]
.trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
@tf.function
def train_step_erm(main_data, main_labels):
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(
NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(
main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]
gradients_shared = [tape.gradient(loss[i], shared[i].
trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1
[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2
[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]
.trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(
gradients_main_classifier_1[i], main_classifier_1[i].
trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(
gradients_main_classifier_2[i], main_classifier_2[i].
trainable_variables)) for i in range(NUM_MODELS)]
return loss
<|reserved_special_token_0|>
if USE_POISON:
x_poison = np.load('data/' + METHOD + '_GENERATED_POISON_DATA.npy')
y_poison = np.load('data/' + METHOD + '_GENERATED_POISON_LABELS.npy')
x_train_mnist = np.concatenate([x_train_mnist, x_poison])
y_train_mnist = np.concatenate([y_train_mnist, y_poison])
for epoch in range(EPOCHS):
nb_batches_train = int(len(x_train_mnist) / BATCH_SIZE)
if len(x_train_mnist) % BATCH_SIZE != 0:
nb_batches_train += 1
ind_shuf = np.arange(len(x_train_mnist))
np.random.shuffle(ind_shuf)
for batch in range(nb_batches_train):
ind_batch = range(BATCH_SIZE * batch, min(BATCH_SIZE * (1 + batch),
len(x_train_mnist)))
ind_source = ind_shuf[ind_batch]
ind_target = np.random.choice(len(x_train_mnistm), size=len(
ind_source), replace=False)
x_source_batch = x_train_mnist[ind_source]
y_source_batch = y_train_mnist[ind_source]
x_target_batch = x_train_mnistm[ind_target]
train_step_erm(x_source_batch, y_source_batch)
train_discrepancy_1(x_source_batch, y_source_batch, x_target_batch)
train_discrepancy_2(x_target_batch)
if epoch % 20 == 0:
print('Full training Poisoning:', USE_POISON, 'MNIST->MNIST_M:',
epoch, 'METHOD:', METHOD, '\n')
print([eval_accuracy_main_cdan(x_target_test,
y_target_test_incorrect_label, shared[i], main_classifier_1[i]) for
i in range(NUM_MODELS)])
print([eval_accuracy_main_cdan(x_target_test, y_target_test, shared
[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
print([eval_accuracy_main_cdan(x_test_mnistm, y_test_mnist, shared[
i], main_classifier_1[i]) for i in range(NUM_MODELS)])
if USE_POISON:
print([eval_accuracy_main_cdan(x_poison, y_poison, shared[i],
main_classifier_1[i]) for i in range(NUM_MODELS)])
print('\n')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('../')
<|reserved_special_token_0|>
parser = argparse.ArgumentParser(description='Training', formatter_class=
argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--USE_POISON', type=int, default=1, help=
'POISON used or not')
args = parser.parse_args()
USE_POISON = bool(args.USE_POISON)
METHOD = 'mcd'
IMG_WIDTH = 28
IMG_HEIGHT = 28
NCH = 3
NUM_CLASSES_MAIN = 2
NUM_CLASSES_DC = 2
EPOCHS = 101
BATCH_SIZE = 64
PLOT_POINTS = 100
NUM_MODELS = 5
ce_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
shared = [mnist2mnistm_shared_discrepancy([50000, IMG_HEIGHT, IMG_WIDTH,
NCH]) for i in range(NUM_MODELS)]
main_classifier_1 = [mnist2mnistm_predictor_discrepancy(shared[i],
NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]
main_classifier_2 = [mnist2mnistm_predictor_discrepancy(shared[i],
NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]
optimizer_shared = [tf.keras.optimizers.Adam(0.001, beta_1=0.5) for i in
range(NUM_MODELS)]
optimizer_main_classifier_1 = [tf.keras.optimizers.Adam(0.001, beta_1=0.5) for
i in range(NUM_MODELS)]
optimizer_main_classifier_2 = [tf.keras.optimizers.Adam(0.001, beta_1=0.5) for
i in range(NUM_MODELS)]
@tf.function
def train_discrepancy_1(main_data, main_labels, target_data):
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(
NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
main_loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(
main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]
shared_target = [shared[i](target_data, training=True) for i in
range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(
target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for
i in range(NUM_MODELS)]
loss = [(main_loss[i] - adv_loss[i]) for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1
[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2
[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(
gradients_main_classifier_1[i], main_classifier_1[i].
trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(
gradients_main_classifier_2[i], main_classifier_2[i].
trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
@tf.function
def train_discrepancy_2(target_data):
with tf.GradientTape(persistent=True) as tape:
shared_target = [shared[i](target_data, training=True) for i in
range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=
True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) -
tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]
gradients_shared = [tape.gradient(adv_loss[i], shared[i].
trainable_variables) for i in range(NUM_MODELS)]
[optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]
.trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
@tf.function
def train_step_erm(main_data, main_labels):
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(
NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True
) for i in range(NUM_MODELS)]
loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(
main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]
gradients_shared = [tape.gradient(loss[i], shared[i].
trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1
[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2
[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]
.trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(
gradients_main_classifier_1[i], main_classifier_1[i].
trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(
gradients_main_classifier_2[i], main_classifier_2[i].
trainable_variables)) for i in range(NUM_MODELS)]
return loss
mnist = tf.keras.datasets.mnist
(x_train_mnist_all, y_train_mnist_all), (x_test_mnist_all, y_test_mnist_all
) = mnist.load_data()
x_train_mnist_all = np.stack((x_train_mnist_all,) * 3, axis=-1) / 255.0
x_test_mnist_all = np.stack((x_test_mnist_all,) * 3, axis=-1) / 255.0
mnistm = pkl.load(open('../../../../MNIST_MNIST-m/mnistm_data.pkl', 'rb'))
x_train_mnistm_all = mnistm['train'] / 255.0
x_test_mnistm_all = mnistm['test'] / 255.0
picked_class = 3
picked_class_next = 8
train_points_class_0 = np.argwhere(y_train_mnist_all == picked_class).flatten()
train_points_class_1 = np.argwhere(y_train_mnist_all == picked_class_next
).flatten()
test_points_class_0 = np.argwhere(y_test_mnist_all == picked_class).flatten()
test_points_class_1 = np.argwhere(y_test_mnist_all == picked_class_next
).flatten()
x_train_mnist = x_train_mnist_all[np.concatenate([train_points_class_0,
train_points_class_1])]
y_train_mnist = y_train_mnist_all[np.concatenate([train_points_class_0,
train_points_class_1])]
x_test_mnist = x_test_mnist_all[np.concatenate([test_points_class_0,
test_points_class_1])]
y_test_mnist = y_test_mnist_all[np.concatenate([test_points_class_0,
test_points_class_1])]
x_train_mnistm = x_train_mnistm_all[np.concatenate([train_points_class_0,
train_points_class_1])]
x_test_mnistm = x_test_mnistm_all[np.concatenate([test_points_class_0,
test_points_class_1])]
zeros_train = np.argwhere(y_train_mnist == picked_class).flatten()
ones_train = np.argwhere(y_train_mnist == picked_class_next).flatten()
zeros_test = np.argwhere(y_test_mnist == picked_class).flatten()
ones_test = np.argwhere(y_test_mnist == picked_class_next).flatten()
y_train_mnist[zeros_train] = 0
y_train_mnist[ones_train] = 1
y_test_mnist[zeros_test] = 0
y_test_mnist[ones_test] = 1
y_train_mnist = keras.utils.to_categorical(y_train_mnist, NUM_CLASSES_MAIN)
y_test_mnist = keras.utils.to_categorical(y_test_mnist, NUM_CLASSES_MAIN)
x_target_test = np.load('data/' + METHOD + '_TARGET_DATA.npy')
y_target_test = np.load('data/' + METHOD + '_TARGET_LABEL.npy')
y_target_test_incorrect_label = np.zeros([1, NUM_CLASSES_MAIN])
target_correct_label = np.argmax(y_target_test, 1).flatten()[0]
y_target_test_incorrect_label[0][(target_correct_label + 1) % NUM_CLASSES_MAIN
] = 1
if USE_POISON:
x_poison = np.load('data/' + METHOD + '_GENERATED_POISON_DATA.npy')
y_poison = np.load('data/' + METHOD + '_GENERATED_POISON_LABELS.npy')
x_train_mnist = np.concatenate([x_train_mnist, x_poison])
y_train_mnist = np.concatenate([y_train_mnist, y_poison])
for epoch in range(EPOCHS):
nb_batches_train = int(len(x_train_mnist) / BATCH_SIZE)
if len(x_train_mnist) % BATCH_SIZE != 0:
nb_batches_train += 1
ind_shuf = np.arange(len(x_train_mnist))
np.random.shuffle(ind_shuf)
for batch in range(nb_batches_train):
ind_batch = range(BATCH_SIZE * batch, min(BATCH_SIZE * (1 + batch),
len(x_train_mnist)))
ind_source = ind_shuf[ind_batch]
ind_target = np.random.choice(len(x_train_mnistm), size=len(
ind_source), replace=False)
x_source_batch = x_train_mnist[ind_source]
y_source_batch = y_train_mnist[ind_source]
x_target_batch = x_train_mnistm[ind_target]
train_step_erm(x_source_batch, y_source_batch)
train_discrepancy_1(x_source_batch, y_source_batch, x_target_batch)
train_discrepancy_2(x_target_batch)
if epoch % 20 == 0:
print('Full training Poisoning:', USE_POISON, 'MNIST->MNIST_M:',
epoch, 'METHOD:', METHOD, '\n')
print([eval_accuracy_main_cdan(x_target_test,
y_target_test_incorrect_label, shared[i], main_classifier_1[i]) for
i in range(NUM_MODELS)])
print([eval_accuracy_main_cdan(x_target_test, y_target_test, shared
[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
print([eval_accuracy_main_cdan(x_test_mnistm, y_test_mnist, shared[
i], main_classifier_1[i]) for i in range(NUM_MODELS)])
if USE_POISON:
print([eval_accuracy_main_cdan(x_poison, y_poison, shared[i],
main_classifier_1[i]) for i in range(NUM_MODELS)])
print('\n')
<|reserved_special_token_1|>
import sys
sys.path.append("../")
import numpy as np
import tensorflow as tf
from utils import eval_accuracy_main_cdan
from models import mnist2mnistm_shared_discrepancy, mnist2mnistm_predictor_discrepancy
import keras
import argparse
import pickle as pkl
parser = argparse.ArgumentParser(description='Training', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--USE_POISON', type=int, default=1, help='POISON used or not')
args = parser.parse_args()
USE_POISON = bool(args.USE_POISON)
METHOD = "mcd"
IMG_WIDTH = 28
IMG_HEIGHT = 28
NCH = 3
NUM_CLASSES_MAIN = 2
NUM_CLASSES_DC = 2
EPOCHS = 101
BATCH_SIZE = 64
PLOT_POINTS = 100
NUM_MODELS = 5
ce_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
shared = [mnist2mnistm_shared_discrepancy([50000, IMG_HEIGHT, IMG_WIDTH, NCH]) for i in range(NUM_MODELS)]
main_classifier_1 = [mnist2mnistm_predictor_discrepancy(shared[i], NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]#48*4*4, 500
main_classifier_2 = [mnist2mnistm_predictor_discrepancy(shared[i], NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]
optimizer_shared = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]
optimizer_main_classifier_1 = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]
optimizer_main_classifier_2 = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]
@tf.function
def train_discrepancy_1(main_data, main_labels, target_data):
# persistent is set to True because the tape is used more than
# once to calculate the gradients.
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True) for i in range(NUM_MODELS)]
main_loss = [ce_loss(main_labels, main_logits_1[i]) + ce_loss(main_labels, main_logits_2[i]) for i in range(NUM_MODELS)]
shared_target = [shared[i](target_data, training=True) for i in range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for i in range(NUM_MODELS)]
loss = [main_loss[i] - adv_loss[i] for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(gradients_main_classifier_1[i], main_classifier_1[i].trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(gradients_main_classifier_2[i], main_classifier_2[i].trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
@tf.function
def train_discrepancy_2(target_data):
# persistent is set to True because the tape is used more than
# once to calculate the gradients.
with tf.GradientTape(persistent=True) as tape:
shared_target = [shared[i](target_data, training=True) for i in range(NUM_MODELS)]
target_logits_1 = [main_classifier_1[i](shared_target[i], training=True) for i in range(NUM_MODELS)]
target_logits_2 = [main_classifier_2[i](shared_target[i], training=True) for i in range(NUM_MODELS)]
adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) - tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]
gradients_shared = [tape.gradient(adv_loss[i], shared[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i].trainable_variables)) for i in range(NUM_MODELS)]
return adv_loss
@tf.function
def train_step_erm(main_data, main_labels):
# persistent is set to True because the tape is used more than
# once to calculate the gradients.
with tf.GradientTape(persistent=True) as tape:
shared_main = [shared[i](main_data, training=True) for i in range(NUM_MODELS)]
main_logits_1 = [main_classifier_1[i](shared_main[i], training=True) for i in range(NUM_MODELS)]
main_logits_2 = [main_classifier_2[i](shared_main[i], training=True) for i in range(NUM_MODELS)]
loss = [ce_loss(main_labels, main_logits_1[i]) + ce_loss(main_labels, main_logits_2[i]) for i in range(NUM_MODELS)]
gradients_shared = [tape.gradient(loss[i], shared[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1[i].trainable_variables) for i in range(NUM_MODELS)]
gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2[i].trainable_variables) for i in range(NUM_MODELS)]
[optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i].trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_1[i].apply_gradients(zip(gradients_main_classifier_1[i], main_classifier_1[i].trainable_variables)) for i in range(NUM_MODELS)]
[optimizer_main_classifier_2[i].apply_gradients(zip(gradients_main_classifier_2[i], main_classifier_2[i].trainable_variables)) for i in range(NUM_MODELS)]
return loss
mnist = tf.keras.datasets.mnist
(x_train_mnist_all, y_train_mnist_all), (x_test_mnist_all, y_test_mnist_all) = mnist.load_data()
x_train_mnist_all = np.stack((x_train_mnist_all,)*3, axis=-1)/255.
x_test_mnist_all = np.stack((x_test_mnist_all,)*3, axis=-1)/255.
mnistm = pkl.load(open('../../../../MNIST_MNIST-m/mnistm_data.pkl', 'rb'))
x_train_mnistm_all = mnistm['train']/255.
x_test_mnistm_all = mnistm['test']/255.
picked_class = 3
picked_class_next = 8
train_points_class_0 = np.argwhere(y_train_mnist_all == picked_class).flatten()
train_points_class_1 = np.argwhere(y_train_mnist_all == picked_class_next).flatten()
test_points_class_0 = np.argwhere(y_test_mnist_all == picked_class).flatten()
test_points_class_1 = np.argwhere(y_test_mnist_all == picked_class_next).flatten()
x_train_mnist = x_train_mnist_all[np.concatenate([train_points_class_0, train_points_class_1])]
y_train_mnist = y_train_mnist_all[np.concatenate([train_points_class_0, train_points_class_1])]
x_test_mnist = x_test_mnist_all[np.concatenate([test_points_class_0, test_points_class_1])]
y_test_mnist = y_test_mnist_all[np.concatenate([test_points_class_0, test_points_class_1])]
x_train_mnistm = x_train_mnistm_all[np.concatenate([train_points_class_0, train_points_class_1])]
x_test_mnistm = x_test_mnistm_all[np.concatenate([test_points_class_0, test_points_class_1])]
zeros_train = np.argwhere(y_train_mnist == picked_class).flatten()
ones_train = np.argwhere(y_train_mnist == picked_class_next).flatten()
zeros_test = np.argwhere(y_test_mnist == picked_class).flatten()
ones_test = np.argwhere(y_test_mnist == picked_class_next).flatten()
y_train_mnist[zeros_train] = 0
y_train_mnist[ones_train] = 1
y_test_mnist[zeros_test] = 0
y_test_mnist[ones_test] = 1
y_train_mnist = keras.utils.to_categorical(y_train_mnist, NUM_CLASSES_MAIN)
y_test_mnist = keras.utils.to_categorical(y_test_mnist, NUM_CLASSES_MAIN)
x_target_test = np.load("data/" + METHOD + "_TARGET_DATA.npy")
y_target_test = np.load("data/" + METHOD + "_TARGET_LABEL.npy")
y_target_test_incorrect_label = np.zeros([1, NUM_CLASSES_MAIN])
target_correct_label = np.argmax(y_target_test,1).flatten()[0]
y_target_test_incorrect_label[0][(target_correct_label+1)%NUM_CLASSES_MAIN]=1
if USE_POISON:
x_poison = np.load("data/" + METHOD + "_GENERATED_POISON_DATA.npy")
y_poison = np.load("data/" + METHOD + "_GENERATED_POISON_LABELS.npy")
x_train_mnist = np.concatenate([x_train_mnist, x_poison])
y_train_mnist = np.concatenate([y_train_mnist, y_poison])
for epoch in range(EPOCHS):
nb_batches_train = int(len(x_train_mnist)/BATCH_SIZE)
if len(x_train_mnist) % BATCH_SIZE != 0:
nb_batches_train += 1
ind_shuf = np.arange(len(x_train_mnist))
np.random.shuffle(ind_shuf)
for batch in range(nb_batches_train):
ind_batch = range(BATCH_SIZE * batch, min(BATCH_SIZE * (1+batch), len(x_train_mnist)))
ind_source = ind_shuf[ind_batch]
ind_target = np.random.choice(len(x_train_mnistm), size=len(ind_source), replace=False)
x_source_batch = x_train_mnist[ind_source]
y_source_batch = y_train_mnist[ind_source]
x_target_batch = x_train_mnistm[ind_target]
train_step_erm(x_source_batch, y_source_batch)
train_discrepancy_1(x_source_batch, y_source_batch, x_target_batch)
train_discrepancy_2(x_target_batch)
if epoch % 20 == 0:
print("Full training Poisoning:", USE_POISON, "MNIST->MNIST_M:", epoch, "METHOD:", METHOD, "\n")
print([eval_accuracy_main_cdan(x_target_test, y_target_test_incorrect_label, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
print([eval_accuracy_main_cdan(x_target_test, y_target_test, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
print([eval_accuracy_main_cdan(x_test_mnistm, y_test_mnist, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
if USE_POISON:
print([eval_accuracy_main_cdan(x_poison, y_poison, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])
print("\n")
|
flexible
|
{
"blob_id": "465d5baae8d5be77fbf3d550d10667da420a8fbe",
"index": 8608,
"step-1": "<mask token>\n\n\n@tf.function\ndef train_discrepancy_1(main_data, main_labels, target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(\n target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for\n i in range(NUM_MODELS)]\n loss = [(main_loss[i] - adv_loss[i]) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@tf.function\ndef train_discrepancy_1(main_data, main_labels, target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(\n target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for\n i in range(NUM_MODELS)]\n loss = [(main_loss[i] - adv_loss[i]) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n@tf.function\ndef train_discrepancy_2(target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) -\n tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]\n gradients_shared = [tape.gradient(adv_loss[i], shared[i].\n trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]\n .trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n@tf.function\ndef train_step_erm(main_data, main_labels):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n gradients_shared = [tape.gradient(loss[i], shared[i].\n trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]\n .trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return loss\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('../')\n<mask token>\nparser.add_argument('--USE_POISON', type=int, default=1, help=\n 'POISON used or not')\n<mask token>\n\n\n@tf.function\ndef train_discrepancy_1(main_data, main_labels, target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(\n target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for\n i in range(NUM_MODELS)]\n loss = [(main_loss[i] - adv_loss[i]) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n@tf.function\ndef train_discrepancy_2(target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) -\n tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]\n gradients_shared = [tape.gradient(adv_loss[i], shared[i].\n trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]\n .trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n@tf.function\ndef train_step_erm(main_data, main_labels):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n gradients_shared = [tape.gradient(loss[i], shared[i].\n trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]\n .trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return loss\n\n\n<mask token>\nif USE_POISON:\n x_poison = np.load('data/' + METHOD + '_GENERATED_POISON_DATA.npy')\n y_poison = np.load('data/' + METHOD + '_GENERATED_POISON_LABELS.npy')\n x_train_mnist = np.concatenate([x_train_mnist, x_poison])\n y_train_mnist = np.concatenate([y_train_mnist, y_poison])\nfor epoch in range(EPOCHS):\n nb_batches_train = int(len(x_train_mnist) / BATCH_SIZE)\n if len(x_train_mnist) % BATCH_SIZE != 0:\n nb_batches_train += 1\n ind_shuf = np.arange(len(x_train_mnist))\n np.random.shuffle(ind_shuf)\n for batch in range(nb_batches_train):\n ind_batch = range(BATCH_SIZE * batch, min(BATCH_SIZE * (1 + batch),\n len(x_train_mnist)))\n ind_source = ind_shuf[ind_batch]\n ind_target = np.random.choice(len(x_train_mnistm), size=len(\n ind_source), replace=False)\n x_source_batch = x_train_mnist[ind_source]\n y_source_batch = y_train_mnist[ind_source]\n x_target_batch = x_train_mnistm[ind_target]\n train_step_erm(x_source_batch, y_source_batch)\n train_discrepancy_1(x_source_batch, y_source_batch, x_target_batch)\n train_discrepancy_2(x_target_batch)\n if epoch % 20 == 0:\n print('Full training Poisoning:', USE_POISON, 'MNIST->MNIST_M:',\n epoch, 'METHOD:', METHOD, '\\n')\n print([eval_accuracy_main_cdan(x_target_test,\n y_target_test_incorrect_label, shared[i], main_classifier_1[i]) for\n i in range(NUM_MODELS)])\n print([eval_accuracy_main_cdan(x_target_test, y_target_test, shared\n [i], main_classifier_1[i]) for i in range(NUM_MODELS)])\n print([eval_accuracy_main_cdan(x_test_mnistm, y_test_mnist, shared[\n i], main_classifier_1[i]) for i in range(NUM_MODELS)])\n if USE_POISON:\n print([eval_accuracy_main_cdan(x_poison, y_poison, shared[i],\n main_classifier_1[i]) for i in range(NUM_MODELS)])\n print('\\n')\n",
"step-4": "<mask token>\nsys.path.append('../')\n<mask token>\nparser = argparse.ArgumentParser(description='Training', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--USE_POISON', type=int, default=1, help=\n 'POISON used or not')\nargs = parser.parse_args()\nUSE_POISON = bool(args.USE_POISON)\nMETHOD = 'mcd'\nIMG_WIDTH = 28\nIMG_HEIGHT = 28\nNCH = 3\nNUM_CLASSES_MAIN = 2\nNUM_CLASSES_DC = 2\nEPOCHS = 101\nBATCH_SIZE = 64\nPLOT_POINTS = 100\nNUM_MODELS = 5\nce_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\nshared = [mnist2mnistm_shared_discrepancy([50000, IMG_HEIGHT, IMG_WIDTH,\n NCH]) for i in range(NUM_MODELS)]\nmain_classifier_1 = [mnist2mnistm_predictor_discrepancy(shared[i],\n NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]\nmain_classifier_2 = [mnist2mnistm_predictor_discrepancy(shared[i],\n NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]\noptimizer_shared = [tf.keras.optimizers.Adam(0.001, beta_1=0.5) for i in\n range(NUM_MODELS)]\noptimizer_main_classifier_1 = [tf.keras.optimizers.Adam(0.001, beta_1=0.5) for\n i in range(NUM_MODELS)]\noptimizer_main_classifier_2 = [tf.keras.optimizers.Adam(0.001, beta_1=0.5) for\n i in range(NUM_MODELS)]\n\n\n@tf.function\ndef train_discrepancy_1(main_data, main_labels, target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(\n target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for\n i in range(NUM_MODELS)]\n loss = [(main_loss[i] - adv_loss[i]) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n@tf.function\ndef train_discrepancy_2(target_data):\n with tf.GradientTape(persistent=True) as tape:\n shared_target = [shared[i](target_data, training=True) for i in\n range(NUM_MODELS)]\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=\n True) for i in range(NUM_MODELS)]\n adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) -\n tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]\n gradients_shared = [tape.gradient(adv_loss[i], shared[i].\n trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]\n .trainable_variables)) for i in range(NUM_MODELS)]\n return adv_loss\n\n\n@tf.function\ndef train_step_erm(main_data, main_labels):\n with tf.GradientTape(persistent=True) as tape:\n shared_main = [shared[i](main_data, training=True) for i in range(\n NUM_MODELS)]\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True\n ) for i in range(NUM_MODELS)]\n loss = [(ce_loss(main_labels, main_logits_1[i]) + ce_loss(\n main_labels, main_logits_2[i])) for i in range(NUM_MODELS)]\n gradients_shared = [tape.gradient(loss[i], shared[i].\n trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1\n [i].trainable_variables) for i in range(NUM_MODELS)]\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2\n [i].trainable_variables) for i in range(NUM_MODELS)]\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i]\n .trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_1[i].apply_gradients(zip(\n gradients_main_classifier_1[i], main_classifier_1[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n [optimizer_main_classifier_2[i].apply_gradients(zip(\n gradients_main_classifier_2[i], main_classifier_2[i].\n trainable_variables)) for i in range(NUM_MODELS)]\n return loss\n\n\nmnist = tf.keras.datasets.mnist\n(x_train_mnist_all, y_train_mnist_all), (x_test_mnist_all, y_test_mnist_all\n ) = mnist.load_data()\nx_train_mnist_all = np.stack((x_train_mnist_all,) * 3, axis=-1) / 255.0\nx_test_mnist_all = np.stack((x_test_mnist_all,) * 3, axis=-1) / 255.0\nmnistm = pkl.load(open('../../../../MNIST_MNIST-m/mnistm_data.pkl', 'rb'))\nx_train_mnistm_all = mnistm['train'] / 255.0\nx_test_mnistm_all = mnistm['test'] / 255.0\npicked_class = 3\npicked_class_next = 8\ntrain_points_class_0 = np.argwhere(y_train_mnist_all == picked_class).flatten()\ntrain_points_class_1 = np.argwhere(y_train_mnist_all == picked_class_next\n ).flatten()\ntest_points_class_0 = np.argwhere(y_test_mnist_all == picked_class).flatten()\ntest_points_class_1 = np.argwhere(y_test_mnist_all == picked_class_next\n ).flatten()\nx_train_mnist = x_train_mnist_all[np.concatenate([train_points_class_0,\n train_points_class_1])]\ny_train_mnist = y_train_mnist_all[np.concatenate([train_points_class_0,\n train_points_class_1])]\nx_test_mnist = x_test_mnist_all[np.concatenate([test_points_class_0,\n test_points_class_1])]\ny_test_mnist = y_test_mnist_all[np.concatenate([test_points_class_0,\n test_points_class_1])]\nx_train_mnistm = x_train_mnistm_all[np.concatenate([train_points_class_0,\n train_points_class_1])]\nx_test_mnistm = x_test_mnistm_all[np.concatenate([test_points_class_0,\n test_points_class_1])]\nzeros_train = np.argwhere(y_train_mnist == picked_class).flatten()\nones_train = np.argwhere(y_train_mnist == picked_class_next).flatten()\nzeros_test = np.argwhere(y_test_mnist == picked_class).flatten()\nones_test = np.argwhere(y_test_mnist == picked_class_next).flatten()\ny_train_mnist[zeros_train] = 0\ny_train_mnist[ones_train] = 1\ny_test_mnist[zeros_test] = 0\ny_test_mnist[ones_test] = 1\ny_train_mnist = keras.utils.to_categorical(y_train_mnist, NUM_CLASSES_MAIN)\ny_test_mnist = keras.utils.to_categorical(y_test_mnist, NUM_CLASSES_MAIN)\nx_target_test = np.load('data/' + METHOD + '_TARGET_DATA.npy')\ny_target_test = np.load('data/' + METHOD + '_TARGET_LABEL.npy')\ny_target_test_incorrect_label = np.zeros([1, NUM_CLASSES_MAIN])\ntarget_correct_label = np.argmax(y_target_test, 1).flatten()[0]\ny_target_test_incorrect_label[0][(target_correct_label + 1) % NUM_CLASSES_MAIN\n ] = 1\nif USE_POISON:\n x_poison = np.load('data/' + METHOD + '_GENERATED_POISON_DATA.npy')\n y_poison = np.load('data/' + METHOD + '_GENERATED_POISON_LABELS.npy')\n x_train_mnist = np.concatenate([x_train_mnist, x_poison])\n y_train_mnist = np.concatenate([y_train_mnist, y_poison])\nfor epoch in range(EPOCHS):\n nb_batches_train = int(len(x_train_mnist) / BATCH_SIZE)\n if len(x_train_mnist) % BATCH_SIZE != 0:\n nb_batches_train += 1\n ind_shuf = np.arange(len(x_train_mnist))\n np.random.shuffle(ind_shuf)\n for batch in range(nb_batches_train):\n ind_batch = range(BATCH_SIZE * batch, min(BATCH_SIZE * (1 + batch),\n len(x_train_mnist)))\n ind_source = ind_shuf[ind_batch]\n ind_target = np.random.choice(len(x_train_mnistm), size=len(\n ind_source), replace=False)\n x_source_batch = x_train_mnist[ind_source]\n y_source_batch = y_train_mnist[ind_source]\n x_target_batch = x_train_mnistm[ind_target]\n train_step_erm(x_source_batch, y_source_batch)\n train_discrepancy_1(x_source_batch, y_source_batch, x_target_batch)\n train_discrepancy_2(x_target_batch)\n if epoch % 20 == 0:\n print('Full training Poisoning:', USE_POISON, 'MNIST->MNIST_M:',\n epoch, 'METHOD:', METHOD, '\\n')\n print([eval_accuracy_main_cdan(x_target_test,\n y_target_test_incorrect_label, shared[i], main_classifier_1[i]) for\n i in range(NUM_MODELS)])\n print([eval_accuracy_main_cdan(x_target_test, y_target_test, shared\n [i], main_classifier_1[i]) for i in range(NUM_MODELS)])\n print([eval_accuracy_main_cdan(x_test_mnistm, y_test_mnist, shared[\n i], main_classifier_1[i]) for i in range(NUM_MODELS)])\n if USE_POISON:\n print([eval_accuracy_main_cdan(x_poison, y_poison, shared[i],\n main_classifier_1[i]) for i in range(NUM_MODELS)])\n print('\\n')\n",
"step-5": "import sys\r\nsys.path.append(\"../\")\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom utils import eval_accuracy_main_cdan\r\nfrom models import mnist2mnistm_shared_discrepancy, mnist2mnistm_predictor_discrepancy\r\nimport keras\r\nimport argparse\r\nimport pickle as pkl \r\n\r\nparser = argparse.ArgumentParser(description='Training', formatter_class=argparse.ArgumentDefaultsHelpFormatter)\r\nparser.add_argument('--USE_POISON', type=int, default=1, help='POISON used or not')\r\nargs = parser.parse_args()\r\nUSE_POISON = bool(args.USE_POISON)\r\nMETHOD = \"mcd\"\r\n\r\nIMG_WIDTH = 28\r\nIMG_HEIGHT = 28\r\nNCH = 3\r\n\r\nNUM_CLASSES_MAIN = 2\r\nNUM_CLASSES_DC = 2\r\n\r\nEPOCHS = 101\r\nBATCH_SIZE = 64\r\nPLOT_POINTS = 100\r\n\r\nNUM_MODELS = 5\r\n\r\nce_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\r\n\r\nshared = [mnist2mnistm_shared_discrepancy([50000, IMG_HEIGHT, IMG_WIDTH, NCH]) for i in range(NUM_MODELS)]\r\n\r\nmain_classifier_1 = [mnist2mnistm_predictor_discrepancy(shared[i], NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]#48*4*4, 500\r\nmain_classifier_2 = [mnist2mnistm_predictor_discrepancy(shared[i], NUM_CLASSES_MAIN, 768) for i in range(NUM_MODELS)]\r\n\r\noptimizer_shared = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]\r\n\r\noptimizer_main_classifier_1 = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]\r\noptimizer_main_classifier_2 = [tf.keras.optimizers.Adam(1E-3, beta_1=0.5) for i in range(NUM_MODELS)]\r\n\r\n@tf.function\r\ndef train_discrepancy_1(main_data, main_labels, target_data):\r\n # persistent is set to True because the tape is used more than\r\n # once to calculate the gradients.\r\n \r\n with tf.GradientTape(persistent=True) as tape:\r\n shared_main = [shared[i](main_data, training=True) for i in range(NUM_MODELS)]\r\n \r\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True) for i in range(NUM_MODELS)]\r\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True) for i in range(NUM_MODELS)]\r\n \r\n main_loss = [ce_loss(main_labels, main_logits_1[i]) + ce_loss(main_labels, main_logits_2[i]) for i in range(NUM_MODELS)]\r\n \r\n shared_target = [shared[i](target_data, training=True) for i in range(NUM_MODELS)]\r\n \r\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=True) for i in range(NUM_MODELS)]\r\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=True) for i in range(NUM_MODELS)]\r\n \r\n adv_loss = [tf.reduce_mean(tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) - tf.nn.softmax(target_logits_2[i])), 1)) for i in range(NUM_MODELS)]\r\n \r\n loss = [main_loss[i] - adv_loss[i] for i in range(NUM_MODELS)]\r\n \r\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1[i].trainable_variables) for i in range(NUM_MODELS)]\r\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2[i].trainable_variables) for i in range(NUM_MODELS)]\r\n \r\n [optimizer_main_classifier_1[i].apply_gradients(zip(gradients_main_classifier_1[i], main_classifier_1[i].trainable_variables)) for i in range(NUM_MODELS)]\r\n [optimizer_main_classifier_2[i].apply_gradients(zip(gradients_main_classifier_2[i], main_classifier_2[i].trainable_variables)) for i in range(NUM_MODELS)]\r\n \r\n return adv_loss\r\n\r\n@tf.function\r\ndef train_discrepancy_2(target_data):\r\n # persistent is set to True because the tape is used more than\r\n # once to calculate the gradients.\r\n \r\n with tf.GradientTape(persistent=True) as tape:\r\n shared_target = [shared[i](target_data, training=True) for i in range(NUM_MODELS)]\r\n \r\n target_logits_1 = [main_classifier_1[i](shared_target[i], training=True) for i in range(NUM_MODELS)]\r\n target_logits_2 = [main_classifier_2[i](shared_target[i], training=True) for i in range(NUM_MODELS)]\r\n \r\n adv_loss = [tf.reduce_mean(tf.abs(tf.nn.softmax(target_logits_1[i]) - tf.nn.softmax(target_logits_2[i]))) for i in range(NUM_MODELS)]\r\n \r\n gradients_shared = [tape.gradient(adv_loss[i], shared[i].trainable_variables) for i in range(NUM_MODELS)]\r\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i].trainable_variables)) for i in range(NUM_MODELS)]\r\n \r\n return adv_loss\r\n\r\n\r\n@tf.function\r\ndef train_step_erm(main_data, main_labels):\r\n # persistent is set to True because the tape is used more than\r\n # once to calculate the gradients.\r\n \r\n with tf.GradientTape(persistent=True) as tape:\r\n shared_main = [shared[i](main_data, training=True) for i in range(NUM_MODELS)]\r\n \r\n main_logits_1 = [main_classifier_1[i](shared_main[i], training=True) for i in range(NUM_MODELS)]\r\n main_logits_2 = [main_classifier_2[i](shared_main[i], training=True) for i in range(NUM_MODELS)]\r\n \r\n loss = [ce_loss(main_labels, main_logits_1[i]) + ce_loss(main_labels, main_logits_2[i]) for i in range(NUM_MODELS)]\r\n\r\n gradients_shared = [tape.gradient(loss[i], shared[i].trainable_variables) for i in range(NUM_MODELS)]\r\n gradients_main_classifier_1 = [tape.gradient(loss[i], main_classifier_1[i].trainable_variables) for i in range(NUM_MODELS)]\r\n gradients_main_classifier_2 = [tape.gradient(loss[i], main_classifier_2[i].trainable_variables) for i in range(NUM_MODELS)]\r\n \r\n [optimizer_shared[i].apply_gradients(zip(gradients_shared[i], shared[i].trainable_variables)) for i in range(NUM_MODELS)]\r\n [optimizer_main_classifier_1[i].apply_gradients(zip(gradients_main_classifier_1[i], main_classifier_1[i].trainable_variables)) for i in range(NUM_MODELS)]\r\n [optimizer_main_classifier_2[i].apply_gradients(zip(gradients_main_classifier_2[i], main_classifier_2[i].trainable_variables)) for i in range(NUM_MODELS)]\r\n \r\n return loss\r\n\r\nmnist = tf.keras.datasets.mnist\r\n(x_train_mnist_all, y_train_mnist_all), (x_test_mnist_all, y_test_mnist_all) = mnist.load_data()\r\n\r\nx_train_mnist_all = np.stack((x_train_mnist_all,)*3, axis=-1)/255.\r\nx_test_mnist_all = np.stack((x_test_mnist_all,)*3, axis=-1)/255.\r\n\r\nmnistm = pkl.load(open('../../../../MNIST_MNIST-m/mnistm_data.pkl', 'rb'))\r\nx_train_mnistm_all = mnistm['train']/255.\r\nx_test_mnistm_all = mnistm['test']/255.\r\n\r\npicked_class = 3\r\npicked_class_next = 8\r\n\r\ntrain_points_class_0 = np.argwhere(y_train_mnist_all == picked_class).flatten()\r\ntrain_points_class_1 = np.argwhere(y_train_mnist_all == picked_class_next).flatten()\r\n\r\ntest_points_class_0 = np.argwhere(y_test_mnist_all == picked_class).flatten()\r\ntest_points_class_1 = np.argwhere(y_test_mnist_all == picked_class_next).flatten()\r\n\r\nx_train_mnist = x_train_mnist_all[np.concatenate([train_points_class_0, train_points_class_1])]\r\ny_train_mnist = y_train_mnist_all[np.concatenate([train_points_class_0, train_points_class_1])]\r\n\r\nx_test_mnist = x_test_mnist_all[np.concatenate([test_points_class_0, test_points_class_1])]\r\ny_test_mnist = y_test_mnist_all[np.concatenate([test_points_class_0, test_points_class_1])]\r\n\r\nx_train_mnistm = x_train_mnistm_all[np.concatenate([train_points_class_0, train_points_class_1])]\r\nx_test_mnistm = x_test_mnistm_all[np.concatenate([test_points_class_0, test_points_class_1])]\r\n\r\nzeros_train = np.argwhere(y_train_mnist == picked_class).flatten()\r\nones_train = np.argwhere(y_train_mnist == picked_class_next).flatten()\r\nzeros_test = np.argwhere(y_test_mnist == picked_class).flatten()\r\nones_test = np.argwhere(y_test_mnist == picked_class_next).flatten()\r\n\r\ny_train_mnist[zeros_train] = 0\r\ny_train_mnist[ones_train] = 1\r\ny_test_mnist[zeros_test] = 0\r\ny_test_mnist[ones_test] = 1\r\n\r\ny_train_mnist = keras.utils.to_categorical(y_train_mnist, NUM_CLASSES_MAIN)\r\ny_test_mnist = keras.utils.to_categorical(y_test_mnist, NUM_CLASSES_MAIN)\r\n\r\nx_target_test = np.load(\"data/\" + METHOD + \"_TARGET_DATA.npy\")\r\ny_target_test = np.load(\"data/\" + METHOD + \"_TARGET_LABEL.npy\")\r\ny_target_test_incorrect_label = np.zeros([1, NUM_CLASSES_MAIN])\r\ntarget_correct_label = np.argmax(y_target_test,1).flatten()[0]\r\ny_target_test_incorrect_label[0][(target_correct_label+1)%NUM_CLASSES_MAIN]=1\r\n\r\nif USE_POISON:\r\n\r\n x_poison = np.load(\"data/\" + METHOD + \"_GENERATED_POISON_DATA.npy\")\r\n y_poison = np.load(\"data/\" + METHOD + \"_GENERATED_POISON_LABELS.npy\") \r\n\r\n x_train_mnist = np.concatenate([x_train_mnist, x_poison])\r\n y_train_mnist = np.concatenate([y_train_mnist, y_poison])\r\n \r\nfor epoch in range(EPOCHS):\r\n nb_batches_train = int(len(x_train_mnist)/BATCH_SIZE)\r\n if len(x_train_mnist) % BATCH_SIZE != 0:\r\n nb_batches_train += 1\r\n ind_shuf = np.arange(len(x_train_mnist))\r\n np.random.shuffle(ind_shuf)\r\n \r\n for batch in range(nb_batches_train):\r\n ind_batch = range(BATCH_SIZE * batch, min(BATCH_SIZE * (1+batch), len(x_train_mnist)))\r\n ind_source = ind_shuf[ind_batch]\r\n \r\n ind_target = np.random.choice(len(x_train_mnistm), size=len(ind_source), replace=False)\r\n \r\n x_source_batch = x_train_mnist[ind_source]\r\n y_source_batch = y_train_mnist[ind_source]\r\n \r\n x_target_batch = x_train_mnistm[ind_target]\r\n \r\n train_step_erm(x_source_batch, y_source_batch)\r\n train_discrepancy_1(x_source_batch, y_source_batch, x_target_batch)\r\n train_discrepancy_2(x_target_batch)\r\n \r\n if epoch % 20 == 0: \r\n print(\"Full training Poisoning:\", USE_POISON, \"MNIST->MNIST_M:\", epoch, \"METHOD:\", METHOD, \"\\n\")\r\n print([eval_accuracy_main_cdan(x_target_test, y_target_test_incorrect_label, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])\r\n print([eval_accuracy_main_cdan(x_target_test, y_target_test, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])\r\n print([eval_accuracy_main_cdan(x_test_mnistm, y_test_mnist, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])\r\n if USE_POISON:\r\n print([eval_accuracy_main_cdan(x_poison, y_poison, shared[i], main_classifier_1[i]) for i in range(NUM_MODELS)])\r\n print(\"\\n\")\r\n \r\n",
"step-ids": [
1,
3,
4,
5,
7
]
}
|
[
1,
3,
4,
5,
7
] |
<|reserved_special_token_0|>
class RecipeAdmin(admin.ModelAdmin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = Recipe
def make_visible(self, request, queryset):
queryset.update(visible=True)
queryset.update(date_posted=timezone.now())
def make_hidden(self, request, queryset):
queryset.update(visible=False)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ChosenIngredientInLine(admin.TabularInline):
<|reserved_special_token_0|>
class RecipeAdmin(admin.ModelAdmin):
list_display = 'title', 'visible', 'author'
actions = ['make_visible', 'make_hidden', 'delete_selected']
exclude = 'date_posted', 'ingredients'
inlines = [ChosenIngredientInLine]
class Meta:
model = Recipe
def make_visible(self, request, queryset):
queryset.update(visible=True)
queryset.update(date_posted=timezone.now())
def make_hidden(self, request, queryset):
queryset.update(visible=False)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ChosenIngredientInLine(admin.TabularInline):
model = ChosenIngredient
class RecipeAdmin(admin.ModelAdmin):
list_display = 'title', 'visible', 'author'
actions = ['make_visible', 'make_hidden', 'delete_selected']
exclude = 'date_posted', 'ingredients'
inlines = [ChosenIngredientInLine]
class Meta:
model = Recipe
def make_visible(self, request, queryset):
queryset.update(visible=True)
queryset.update(date_posted=timezone.now())
def make_hidden(self, request, queryset):
queryset.update(visible=False)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(Ingredient)
admin.site.site_header = "Chef's Apprentice Admin"
admin.site.site_title = "Chef's Apprentice Admin Portal"
admin.site.index_title = "Welcome to Chef's Apprentice Admin Portal"
class ChosenIngredientInLine(admin.TabularInline):
model = ChosenIngredient
class RecipeAdmin(admin.ModelAdmin):
list_display = 'title', 'visible', 'author'
actions = ['make_visible', 'make_hidden', 'delete_selected']
exclude = 'date_posted', 'ingredients'
inlines = [ChosenIngredientInLine]
class Meta:
model = Recipe
def make_visible(self, request, queryset):
queryset.update(visible=True)
queryset.update(date_posted=timezone.now())
def make_hidden(self, request, queryset):
queryset.update(visible=False)
admin.site.register(Recipe, RecipeAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Recipe, Ingredient, ChosenIngredient, timezone
# Register your models here.)
admin.site.register(Ingredient)
admin.site.site_header = "Chef's Apprentice Admin"
admin.site.site_title = "Chef's Apprentice Admin Portal"
admin.site.index_title = "Welcome to Chef's Apprentice Admin Portal"
class ChosenIngredientInLine(admin.TabularInline):
model = ChosenIngredient
# definerer hva som skal vises på Recipe displayet i admin siden
class RecipeAdmin(admin.ModelAdmin):
list_display = ("title", "visible", "author")
actions = ["make_visible", "make_hidden", "delete_selected"]
exclude = ('date_posted', 'ingredients')
inlines = [
ChosenIngredientInLine,
]
class Meta:
model = Recipe
# funksjon for å sette make_visible og hidden som actions i admin siden
def make_visible(self, request, queryset):
queryset.update(visible=True)
queryset.update(date_posted=timezone.now())
def make_hidden(self, request, queryset):
queryset.update(visible=False)
# synliggjør disse modellene i admin-siden
admin.site.register(Recipe, RecipeAdmin)
|
flexible
|
{
"blob_id": "65bb3743ca569c295d85016c82c4f6f043778d3f",
"index": 8848,
"step-1": "<mask token>\n\n\nclass RecipeAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Recipe\n\n def make_visible(self, request, queryset):\n queryset.update(visible=True)\n queryset.update(date_posted=timezone.now())\n\n def make_hidden(self, request, queryset):\n queryset.update(visible=False)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ChosenIngredientInLine(admin.TabularInline):\n <mask token>\n\n\nclass RecipeAdmin(admin.ModelAdmin):\n list_display = 'title', 'visible', 'author'\n actions = ['make_visible', 'make_hidden', 'delete_selected']\n exclude = 'date_posted', 'ingredients'\n inlines = [ChosenIngredientInLine]\n\n\n class Meta:\n model = Recipe\n\n def make_visible(self, request, queryset):\n queryset.update(visible=True)\n queryset.update(date_posted=timezone.now())\n\n def make_hidden(self, request, queryset):\n queryset.update(visible=False)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ChosenIngredientInLine(admin.TabularInline):\n model = ChosenIngredient\n\n\nclass RecipeAdmin(admin.ModelAdmin):\n list_display = 'title', 'visible', 'author'\n actions = ['make_visible', 'make_hidden', 'delete_selected']\n exclude = 'date_posted', 'ingredients'\n inlines = [ChosenIngredientInLine]\n\n\n class Meta:\n model = Recipe\n\n def make_visible(self, request, queryset):\n queryset.update(visible=True)\n queryset.update(date_posted=timezone.now())\n\n def make_hidden(self, request, queryset):\n queryset.update(visible=False)\n\n\n<mask token>\n",
"step-4": "<mask token>\nadmin.site.register(Ingredient)\nadmin.site.site_header = \"Chef's Apprentice Admin\"\nadmin.site.site_title = \"Chef's Apprentice Admin Portal\"\nadmin.site.index_title = \"Welcome to Chef's Apprentice Admin Portal\"\n\n\nclass ChosenIngredientInLine(admin.TabularInline):\n model = ChosenIngredient\n\n\nclass RecipeAdmin(admin.ModelAdmin):\n list_display = 'title', 'visible', 'author'\n actions = ['make_visible', 'make_hidden', 'delete_selected']\n exclude = 'date_posted', 'ingredients'\n inlines = [ChosenIngredientInLine]\n\n\n class Meta:\n model = Recipe\n\n def make_visible(self, request, queryset):\n queryset.update(visible=True)\n queryset.update(date_posted=timezone.now())\n\n def make_hidden(self, request, queryset):\n queryset.update(visible=False)\n\n\nadmin.site.register(Recipe, RecipeAdmin)\n",
"step-5": "from django.contrib import admin\nfrom .models import Recipe, Ingredient, ChosenIngredient, timezone\n\n# Register your models here.)\nadmin.site.register(Ingredient)\nadmin.site.site_header = \"Chef's Apprentice Admin\"\nadmin.site.site_title = \"Chef's Apprentice Admin Portal\"\nadmin.site.index_title = \"Welcome to Chef's Apprentice Admin Portal\"\n\n\nclass ChosenIngredientInLine(admin.TabularInline):\n model = ChosenIngredient\n\n# definerer hva som skal vises på Recipe displayet i admin siden\nclass RecipeAdmin(admin.ModelAdmin):\n list_display = (\"title\", \"visible\", \"author\")\n actions = [\"make_visible\", \"make_hidden\", \"delete_selected\"]\n exclude = ('date_posted', 'ingredients')\n inlines = [\n ChosenIngredientInLine,\n ]\n\n class Meta:\n model = Recipe\n\n # funksjon for å sette make_visible og hidden som actions i admin siden\n def make_visible(self, request, queryset):\n queryset.update(visible=True)\n queryset.update(date_posted=timezone.now())\n\n def make_hidden(self, request, queryset):\n queryset.update(visible=False)\n\n# synliggjør disse modellene i admin-siden\nadmin.site.register(Recipe, RecipeAdmin)\n",
"step-ids": [
3,
5,
6,
8,
10
]
}
|
[
3,
5,
6,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ShortenConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ShortenConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'shorten'
<|reserved_special_token_1|>
from django.apps import AppConfig
class ShortenConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'shorten'
|
flexible
|
{
"blob_id": "8c2920db7fc49d56aa8da6289cd22272ed3e3283",
"index": 4402,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ShortenConfig(AppConfig):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ShortenConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'shorten'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass ShortenConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'shorten'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@DatasetReader.register('bertclassification')
class ClassificationReader(DatasetReader):
<|reserved_special_token_0|>
@overrides
def _read(self, file_path: str) ->Iterable[Instance]:
file_path = cached_path(file_path)
with open(file_path, 'r') as data_file:
logger.info('Reading instances from lines in file at: %s',
file_path)
for line in data_file:
polar, sent = line.strip().split(',')
tokens = [Token(token) for token in sent]
yield self.text_to_instance(tokens, polar)
def text_to_instance(self, tokens: List[Token], polar) ->Instance:
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {'tokens': sequence}
instance_fields['label'] = LabelField(polar)
return Instance(instance_fields)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@DatasetReader.register('bertclassification')
class ClassificationReader(DatasetReader):
def __init__(self, token_indexers: Dict[str, TokenIndexer]=None, lazy:
bool=False) ->None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens':
SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str) ->Iterable[Instance]:
file_path = cached_path(file_path)
with open(file_path, 'r') as data_file:
logger.info('Reading instances from lines in file at: %s',
file_path)
for line in data_file:
polar, sent = line.strip().split(',')
tokens = [Token(token) for token in sent]
yield self.text_to_instance(tokens, polar)
def text_to_instance(self, tokens: List[Token], polar) ->Instance:
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {'tokens': sequence}
instance_fields['label'] = LabelField(polar)
return Instance(instance_fields)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger(__name__)
polar_dict = {'1': 'Ture', '0': 'False'}
@DatasetReader.register('bertclassification')
class ClassificationReader(DatasetReader):
def __init__(self, token_indexers: Dict[str, TokenIndexer]=None, lazy:
bool=False) ->None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens':
SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str) ->Iterable[Instance]:
file_path = cached_path(file_path)
with open(file_path, 'r') as data_file:
logger.info('Reading instances from lines in file at: %s',
file_path)
for line in data_file:
polar, sent = line.strip().split(',')
tokens = [Token(token) for token in sent]
yield self.text_to_instance(tokens, polar)
def text_to_instance(self, tokens: List[Token], polar) ->Instance:
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {'tokens': sequence}
instance_fields['label'] = LabelField(polar)
return Instance(instance_fields)
<|reserved_special_token_1|>
from typing import Dict, List, Sequence, Iterable, Tuple
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.common.file_utils import cached_path
import logging
from overrides import overrides
import itertools
from allennlp.data.tokenizers import Token
from allennlp.data.fields import ListField, TextField, SequenceLabelField, Field, MetadataField, SpanField, LabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__)
polar_dict = {'1': 'Ture', '0': 'False'}
@DatasetReader.register('bertclassification')
class ClassificationReader(DatasetReader):
def __init__(self, token_indexers: Dict[str, TokenIndexer]=None, lazy:
bool=False) ->None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens':
SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str) ->Iterable[Instance]:
file_path = cached_path(file_path)
with open(file_path, 'r') as data_file:
logger.info('Reading instances from lines in file at: %s',
file_path)
for line in data_file:
polar, sent = line.strip().split(',')
tokens = [Token(token) for token in sent]
yield self.text_to_instance(tokens, polar)
def text_to_instance(self, tokens: List[Token], polar) ->Instance:
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {'tokens': sequence}
instance_fields['label'] = LabelField(polar)
return Instance(instance_fields)
<|reserved_special_token_1|>
from typing import Dict, List, Sequence, Iterable, Tuple
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.common.file_utils import cached_path
import logging
from overrides import overrides
import itertools
from allennlp.data.tokenizers import Token
from allennlp.data.fields import ListField, TextField, SequenceLabelField, Field, MetadataField, SpanField, LabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
logger = logging.getLogger(__name__)
polar_dict = {
"1": "Ture",
"0": "False"
}
@DatasetReader.register("bertclassification")
class ClassificationReader(DatasetReader):
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
lazy: bool = False
) -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for line in data_file:
polar, sent = line.strip().split(",")
tokens = [Token(token) for token in sent]
yield self.text_to_instance(tokens, polar)
def text_to_instance(
self,
tokens:List[Token],
polar
) -> Instance:
sequence = TextField(tokens, self._token_indexers)
instance_fields: Dict[str, Field] = {'tokens': sequence}
instance_fields['label'] = LabelField(polar)
return Instance(instance_fields)
|
flexible
|
{
"blob_id": "21172985bf36302f6b0b2101e353d9fbcafb0673",
"index": 6653,
"step-1": "<mask token>\n\n\n@DatasetReader.register('bertclassification')\nclass ClassificationReader(DatasetReader):\n <mask token>\n\n @overrides\n def _read(self, file_path: str) ->Iterable[Instance]:\n file_path = cached_path(file_path)\n with open(file_path, 'r') as data_file:\n logger.info('Reading instances from lines in file at: %s',\n file_path)\n for line in data_file:\n polar, sent = line.strip().split(',')\n tokens = [Token(token) for token in sent]\n yield self.text_to_instance(tokens, polar)\n\n def text_to_instance(self, tokens: List[Token], polar) ->Instance:\n sequence = TextField(tokens, self._token_indexers)\n instance_fields: Dict[str, Field] = {'tokens': sequence}\n instance_fields['label'] = LabelField(polar)\n return Instance(instance_fields)\n",
"step-2": "<mask token>\n\n\n@DatasetReader.register('bertclassification')\nclass ClassificationReader(DatasetReader):\n\n def __init__(self, token_indexers: Dict[str, TokenIndexer]=None, lazy:\n bool=False) ->None:\n super().__init__(lazy)\n self._token_indexers = token_indexers or {'tokens':\n SingleIdTokenIndexer()}\n\n @overrides\n def _read(self, file_path: str) ->Iterable[Instance]:\n file_path = cached_path(file_path)\n with open(file_path, 'r') as data_file:\n logger.info('Reading instances from lines in file at: %s',\n file_path)\n for line in data_file:\n polar, sent = line.strip().split(',')\n tokens = [Token(token) for token in sent]\n yield self.text_to_instance(tokens, polar)\n\n def text_to_instance(self, tokens: List[Token], polar) ->Instance:\n sequence = TextField(tokens, self._token_indexers)\n instance_fields: Dict[str, Field] = {'tokens': sequence}\n instance_fields['label'] = LabelField(polar)\n return Instance(instance_fields)\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\npolar_dict = {'1': 'Ture', '0': 'False'}\n\n\n@DatasetReader.register('bertclassification')\nclass ClassificationReader(DatasetReader):\n\n def __init__(self, token_indexers: Dict[str, TokenIndexer]=None, lazy:\n bool=False) ->None:\n super().__init__(lazy)\n self._token_indexers = token_indexers or {'tokens':\n SingleIdTokenIndexer()}\n\n @overrides\n def _read(self, file_path: str) ->Iterable[Instance]:\n file_path = cached_path(file_path)\n with open(file_path, 'r') as data_file:\n logger.info('Reading instances from lines in file at: %s',\n file_path)\n for line in data_file:\n polar, sent = line.strip().split(',')\n tokens = [Token(token) for token in sent]\n yield self.text_to_instance(tokens, polar)\n\n def text_to_instance(self, tokens: List[Token], polar) ->Instance:\n sequence = TextField(tokens, self._token_indexers)\n instance_fields: Dict[str, Field] = {'tokens': sequence}\n instance_fields['label'] = LabelField(polar)\n return Instance(instance_fields)\n",
"step-4": "from typing import Dict, List, Sequence, Iterable, Tuple\nfrom allennlp.data.dataset_readers.dataset_reader import DatasetReader\nfrom allennlp.data.instance import Instance\nfrom allennlp.common.file_utils import cached_path\nimport logging\nfrom overrides import overrides\nimport itertools\nfrom allennlp.data.tokenizers import Token\nfrom allennlp.data.fields import ListField, TextField, SequenceLabelField, Field, MetadataField, SpanField, LabelField\nfrom allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer\nlogger = logging.getLogger(__name__)\npolar_dict = {'1': 'Ture', '0': 'False'}\n\n\n@DatasetReader.register('bertclassification')\nclass ClassificationReader(DatasetReader):\n\n def __init__(self, token_indexers: Dict[str, TokenIndexer]=None, lazy:\n bool=False) ->None:\n super().__init__(lazy)\n self._token_indexers = token_indexers or {'tokens':\n SingleIdTokenIndexer()}\n\n @overrides\n def _read(self, file_path: str) ->Iterable[Instance]:\n file_path = cached_path(file_path)\n with open(file_path, 'r') as data_file:\n logger.info('Reading instances from lines in file at: %s',\n file_path)\n for line in data_file:\n polar, sent = line.strip().split(',')\n tokens = [Token(token) for token in sent]\n yield self.text_to_instance(tokens, polar)\n\n def text_to_instance(self, tokens: List[Token], polar) ->Instance:\n sequence = TextField(tokens, self._token_indexers)\n instance_fields: Dict[str, Field] = {'tokens': sequence}\n instance_fields['label'] = LabelField(polar)\n return Instance(instance_fields)\n",
"step-5": "from typing import Dict, List, Sequence, Iterable, Tuple\nfrom allennlp.data.dataset_readers.dataset_reader import DatasetReader\nfrom allennlp.data.instance import Instance\nfrom allennlp.common.file_utils import cached_path\nimport logging\nfrom overrides import overrides\nimport itertools\nfrom allennlp.data.tokenizers import Token\nfrom allennlp.data.fields import ListField, TextField, SequenceLabelField, Field, MetadataField, SpanField, LabelField\nfrom allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer\n\nlogger = logging.getLogger(__name__)\npolar_dict = {\n \"1\": \"Ture\",\n \"0\": \"False\"\n}\n\n@DatasetReader.register(\"bertclassification\")\nclass ClassificationReader(DatasetReader):\n def __init__(\n self,\n token_indexers: Dict[str, TokenIndexer] = None,\n lazy: bool = False\n ) -> None:\n super().__init__(lazy)\n self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}\n \n @overrides\n def _read(self, file_path: str) -> Iterable[Instance]:\n file_path = cached_path(file_path)\n\n with open(file_path, \"r\") as data_file:\n logger.info(\"Reading instances from lines in file at: %s\", file_path)\n for line in data_file:\n polar, sent = line.strip().split(\",\")\n tokens = [Token(token) for token in sent]\n yield self.text_to_instance(tokens, polar)\n \n def text_to_instance(\n self,\n tokens:List[Token],\n polar\n ) -> Instance:\n sequence = TextField(tokens, self._token_indexers)\n instance_fields: Dict[str, Field] = {'tokens': sequence}\n instance_fields['label'] = LabelField(polar)\n return Instance(instance_fields) \n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def main():
x = float(input('Coordenada x: '))
y = float(input('Coordenada y: '))
if 1 <= y <= 2 and -3 <= x <= 3:
print('dentro')
elif (4 <= y <= 5 or 6 <= x <= 7) and (-4 <= x <= -3 or -2 <= x <= -1 or
1 <= x <= 2 or 3 <= x <= 4):
print('dentro')
else:
print('fora')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def main():
x = float(input('Coordenada x: '))
y = float(input('Coordenada y: '))
if 1 <= y <= 2 and -3 <= x <= 3:
print('dentro')
elif (4 <= y <= 5 or 6 <= x <= 7) and (-4 <= x <= -3 or -2 <= x <= -1 or
1 <= x <= 2 or 3 <= x <= 4):
print('dentro')
else:
print('fora')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
def main():
x = float(input("Coordenada x: "))
y = float(input("Coordenada y: "))
if 1 <= y <= 2 and -3 <= x <= 3:
print("dentro")
elif (4 <= y <= 5 or 6 <= x <= 7) and ( -4 <= x <= -3 or -2 <= x <= -1 or 1 <= x <= 2 or 3 <= x <= 4):
print("dentro")
else:
print("fora")
#-----------------------------------------------------
if __name__ == '__main__': # chamada da funcao principal
main()
|
flexible
|
{
"blob_id": "06cb832c3adae95fcd1d1d2d0663641d3ac671ef",
"index": 9132,
"step-1": "<mask token>\n",
"step-2": "def main():\n x = float(input('Coordenada x: '))\n y = float(input('Coordenada y: '))\n if 1 <= y <= 2 and -3 <= x <= 3:\n print('dentro')\n elif (4 <= y <= 5 or 6 <= x <= 7) and (-4 <= x <= -3 or -2 <= x <= -1 or\n 1 <= x <= 2 or 3 <= x <= 4):\n print('dentro')\n else:\n print('fora')\n\n\n<mask token>\n",
"step-3": "def main():\n x = float(input('Coordenada x: '))\n y = float(input('Coordenada y: '))\n if 1 <= y <= 2 and -3 <= x <= 3:\n print('dentro')\n elif (4 <= y <= 5 or 6 <= x <= 7) and (-4 <= x <= -3 or -2 <= x <= -1 or\n 1 <= x <= 2 or 3 <= x <= 4):\n print('dentro')\n else:\n print('fora')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "def main():\r\n x = float(input(\"Coordenada x: \"))\r\n y = float(input(\"Coordenada y: \"))\r\n \r\n if 1 <= y <= 2 and -3 <= x <= 3:\r\n print(\"dentro\")\r\n \r\n elif (4 <= y <= 5 or 6 <= x <= 7) and ( -4 <= x <= -3 or -2 <= x <= -1 or 1 <= x <= 2 or 3 <= x <= 4):\r\n print(\"dentro\")\r\n \r\n else:\r\n print(\"fora\")\r\n\r\n\r\n\r\n#-----------------------------------------------------\r\nif __name__ == '__main__': # chamada da funcao principal\r\n main()\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def mk_dir_recursive(dir_path):
if os.path.isdir(dir_path):
return
h, t = os.path.split(dir_path)
if not os.path.isdir(h):
mk_dir_recursive(h)
new_path = join_paths(h, t)
if not os.path.isdir(new_path):
os.mkdir(new_path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mk_dir_recursive(dir_path):
if os.path.isdir(dir_path):
return
h, t = os.path.split(dir_path)
if not os.path.isdir(h):
mk_dir_recursive(h)
new_path = join_paths(h, t)
if not os.path.isdir(new_path):
os.mkdir(new_path)
<|reserved_special_token_0|>
if not os.path.exists(workpath):
mk_dir_recursive(workpath)
if domain == 'OSMOSIS':
extent = [-19.5, -11.5, 45.0, 55.0]
indLat = 200
indLon = 160
elif domain == 'GULFSTREAM':
extent = [-65.0, -55.0, 33.0, 43.0]
indLat = 200
indLon = 200
else:
extent = [-65.0, -55.0, 30.0, 40.0]
indLat = 200
indLon = 200
<|reserved_special_token_0|>
with open(AnDA_nadir_lag_0_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadir_0 = AnDA_ssh_1
itrp_dineof_nadir_0 = itrp_dineof
with open(AnDA_nadirswot_lag_0_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1
itrp_dineof_nadirswot_0 = itrp_dineof
with open(AnDA_nadir_lag_5_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadir_5 = AnDA_ssh_1
itrp_dineof_nadir_5 = itrp_dineof
with open(AnDA_nadirswot_lag_5_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1
itrp_dineof_nadirswot_5 = itrp_dineof
with open(FP_GENN_nadir_lag_0_file, 'rb') as handle:
itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]
with open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:
itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9
]
with open(FP_GENN_nadir_lag_5_file, 'rb') as handle:
itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]
with open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:
itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9
]
<|reserved_special_token_0|>
list_data.append(GT)
list_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:, :indLat, :indLon])
list_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:, :indLat, :indLon])
<|reserved_special_token_0|>
for i in range(len(list_data)):
min_res = min(min_res, list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1] > min_res:
dwscale = int(list_data[i].shape[1] / min_res)
list_data[i] = einops.reduce(list_data[i],
'(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,
reduction=np.nanmean)
print(list_data[i].shape)
<|reserved_special_token_0|>
plot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,
resfile, gradient=False)
<|reserved_special_token_0|>
list_data.append(GT)
list_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:, :indLat, :indLon])
list_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:, :indLat, :indLon])
<|reserved_special_token_0|>
for i in range(len(list_data)):
min_res = min(min_res, list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1] > min_res:
dwscale = int(list_data[i].shape[1] / min_res)
list_data[i] = einops.reduce(list_data[i],
'(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,
reduction=np.nanmean)
print(list_data[i].shape)
<|reserved_special_token_0|>
plot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,
resfile, gradient=False)
<|reserved_special_token_0|>
list_data.append(GT)
list_data.append(itrp_FP_GENN_nadir_0[:, :indLat, :indLon])
list_data.append(itrp_FP_GENN_nadir_5[:, :indLat, :indLon])
<|reserved_special_token_0|>
for i in range(len(list_data)):
min_res = min(min_res, list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1] > min_res:
dwscale = int(list_data[i].shape[1] / min_res)
list_data[i] = einops.reduce(list_data[i],
'(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,
reduction=np.nanmean)
print(list_data[i].shape)
<|reserved_special_token_0|>
plot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,
resfile, gradient=False)
<|reserved_special_token_0|>
list_data.append(GT)
list_data.append(itrp_FP_GENN_nadirswot_0[:, :indLat, :indLon])
list_data.append(itrp_FP_GENN_nadirswot_5[:, :indLat, :indLon])
<|reserved_special_token_0|>
for i in range(len(list_data)):
min_res = min(min_res, list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1] > min_res:
dwscale = int(list_data[i].shape[1] / min_res)
list_data[i] = einops.reduce(list_data[i],
'(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,
reduction=np.nanmean)
print(list_data[i].shape)
<|reserved_special_token_0|>
plot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,
resfile, gradient=False)
<|reserved_special_token_1|>
__author__ = 'Maxime Beauchamp'
__version__ = '0.1'
__date__ = '2020-12-10'
__email__ = 'maxime.beauchamp@imt-atantique.fr'
<|reserved_special_token_0|>
def mk_dir_recursive(dir_path):
if os.path.isdir(dir_path):
return
h, t = os.path.split(dir_path)
if not os.path.isdir(h):
mk_dir_recursive(h)
new_path = join_paths(h, t)
if not os.path.isdir(new_path):
os.mkdir(new_path)
type_obs = sys.argv[1]
domain = sys.argv[2]
workpath = ('/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain +
'/OSSE/scores_allmethods_nadlag_' + type_obs)
scratchpath = '/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain + '/OSSE'
if not os.path.exists(workpath):
mk_dir_recursive(workpath)
if domain == 'OSMOSIS':
extent = [-19.5, -11.5, 45.0, 55.0]
indLat = 200
indLon = 160
elif domain == 'GULFSTREAM':
extent = [-65.0, -55.0, 33.0, 43.0]
indLat = 200
indLon = 200
else:
extent = [-65.0, -55.0, 30.0, 40.0]
indLat = 200
indLon = 200
AnDA_nadir_lag_0_file = (scratchpath + '/resAnDA_nadir_nadlag_0_' +
type_obs + '/saved_path.pickle')
FP_GENN_nadir_lag_0_file = (scratchpath + '/resIA_nadir_nadlag_0' +
type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')
AnDA_nadir_lag_5_file = (scratchpath + '/resAnDA_nadir_nadlag_5_' +
type_obs + '/saved_path.pickle')
FP_GENN_nadir_lag_5_file = (scratchpath + '/resIA_nadir_nadlag_5' +
type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')
AnDA_nadirswot_lag_0_file = (scratchpath + '/resAnDA_nadirswot_nadlag_0' +
type_obs + '/saved_path.pickle')
FP_GENN_nadirswot_lag_0_file = (scratchpath + '/resIA_nadirswot_nadlag_0' +
type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')
AnDA_nadirswot_lag_5_file = (scratchpath + '/resAnDA_nadirswot_nadlag_5' +
type_obs + '/saved_path.pickle')
FP_GENN_nadirswot_lag_5_file = (scratchpath + '/resIA_nadirswot_nadlag_5' +
type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')
with open(AnDA_nadir_lag_0_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadir_0 = AnDA_ssh_1
itrp_dineof_nadir_0 = itrp_dineof
with open(AnDA_nadirswot_lag_0_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1
itrp_dineof_nadirswot_0 = itrp_dineof
with open(AnDA_nadir_lag_5_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadir_5 = AnDA_ssh_1
itrp_dineof_nadir_5 = itrp_dineof
with open(AnDA_nadirswot_lag_5_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1
itrp_dineof_nadirswot_5 = itrp_dineof
with open(FP_GENN_nadir_lag_0_file, 'rb') as handle:
itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]
with open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:
itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9
]
with open(FP_GENN_nadir_lag_5_file, 'rb') as handle:
itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]
with open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:
itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9
]
lday1 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +
timedelta(days=60 + i), '%Y-%m-%d') for i in range(20)]
lday2 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +
timedelta(days=140 + i), '%Y-%m-%d') for i in range(20)]
lday3 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +
timedelta(days=220 + i), '%Y-%m-%d') for i in range(20)]
lday4 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +
timedelta(days=300 + i), '%Y-%m-%d') for i in range(20)]
lday = np.concatenate([lday1, lday2, lday3, lday4])
lday2 = [datetime.strptime(lday[i], '%Y-%m-%d') for i in range(len(lday))]
GT = AnDA_ssh_1_nadir.GT[:, :indLat, :indLon]
list_data = []
list_data.append(GT)
list_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:, :indLat, :indLon])
list_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:, :indLat, :indLon])
labels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])
colors = np.array(['k', '', 'red', 'blue'])
symbols = np.array(['k', '', 'o', 'o'])
lstyle = np.array(['solid', '', 'solid', 'solid'])
lwidth = np.array([2, 2, 1, 1])
min_res = 1000000000.0
for i in range(len(list_data)):
min_res = min(min_res, list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1] > min_res:
dwscale = int(list_data[i].shape[1] / min_res)
list_data[i] = einops.reduce(list_data[i],
'(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,
reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200 / min_res)
indLon = int(indLon / dwscale)
indLat = int(indLat / dwscale)
lon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))
lat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))
resfile = workpath + '/TS_AnDA_nadir_nadlag.png'
plot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,
resfile, gradient=False)
list_data = []
list_data.append(GT)
list_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:, :indLat, :indLon])
list_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:, :indLat, :indLon])
labels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])
colors = np.array(['k', '', 'red', 'blue'])
symbols = np.array(['k', '', 'o', 'o'])
lstyle = np.array(['solid', '', 'solid', 'solid'])
lwidth = np.array([2, 2, 1, 1])
min_res = 1000000000.0
for i in range(len(list_data)):
min_res = min(min_res, list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1] > min_res:
dwscale = int(list_data[i].shape[1] / min_res)
list_data[i] = einops.reduce(list_data[i],
'(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,
reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200 / min_res)
indLon = int(indLon / dwscale)
indLat = int(indLat / dwscale)
lon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))
lat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))
resfile = workpath + '/TS_AnDA_nadirswot_nadlag.png'
plot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,
resfile, gradient=False)
list_data = []
list_data.append(GT)
list_data.append(itrp_FP_GENN_nadir_0[:, :indLat, :indLon])
list_data.append(itrp_FP_GENN_nadir_5[:, :indLat, :indLon])
labels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])
colors = np.array(['k', '', 'red', 'blue'])
symbols = np.array(['k', '', 'o', 'o'])
lstyle = np.array(['solid', '', 'solid', 'solid'])
lwidth = np.array([2, 2, 1, 1])
min_res = 1000000000.0
for i in range(len(list_data)):
min_res = min(min_res, list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1] > min_res:
dwscale = int(list_data[i].shape[1] / min_res)
list_data[i] = einops.reduce(list_data[i],
'(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,
reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200 / min_res)
indLon = int(indLon / dwscale)
indLat = int(indLat / dwscale)
lon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))
lat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))
resfile = workpath + '/TS_GENN_nadir_nadlag.png'
plot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,
resfile, gradient=False)
list_data = []
list_data.append(GT)
list_data.append(itrp_FP_GENN_nadirswot_0[:, :indLat, :indLon])
list_data.append(itrp_FP_GENN_nadirswot_5[:, :indLat, :indLon])
labels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])
colors = np.array(['k', '', 'red', 'blue'])
symbols = np.array(['k', '', 'o', 'o'])
lstyle = np.array(['solid', '', 'solid', 'solid'])
lwidth = np.array([2, 2, 1, 1])
min_res = 1000000000.0
for i in range(len(list_data)):
min_res = min(min_res, list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1] > min_res:
dwscale = int(list_data[i].shape[1] / min_res)
list_data[i] = einops.reduce(list_data[i],
'(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,
reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200 / min_res)
indLon = int(indLon / dwscale)
indLat = int(indLat / dwscale)
lon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))
lat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))
resfile = workpath + '/TS_GENN_nadirswot_nadlag.png'
plot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,
resfile, gradient=False)
<|reserved_special_token_1|>
__author__ = 'Maxime Beauchamp'
__version__ = '0.1'
__date__ = '2020-12-10'
__email__ = 'maxime.beauchamp@imt-atantique.fr'
from graphics_OSSE import *
def mk_dir_recursive(dir_path):
if os.path.isdir(dir_path):
return
h, t = os.path.split(dir_path)
if not os.path.isdir(h):
mk_dir_recursive(h)
new_path = join_paths(h, t)
if not os.path.isdir(new_path):
os.mkdir(new_path)
type_obs = sys.argv[1]
domain = sys.argv[2]
workpath = ('/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain +
'/OSSE/scores_allmethods_nadlag_' + type_obs)
scratchpath = '/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain + '/OSSE'
if not os.path.exists(workpath):
mk_dir_recursive(workpath)
if domain == 'OSMOSIS':
extent = [-19.5, -11.5, 45.0, 55.0]
indLat = 200
indLon = 160
elif domain == 'GULFSTREAM':
extent = [-65.0, -55.0, 33.0, 43.0]
indLat = 200
indLon = 200
else:
extent = [-65.0, -55.0, 30.0, 40.0]
indLat = 200
indLon = 200
AnDA_nadir_lag_0_file = (scratchpath + '/resAnDA_nadir_nadlag_0_' +
type_obs + '/saved_path.pickle')
FP_GENN_nadir_lag_0_file = (scratchpath + '/resIA_nadir_nadlag_0' +
type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')
AnDA_nadir_lag_5_file = (scratchpath + '/resAnDA_nadir_nadlag_5_' +
type_obs + '/saved_path.pickle')
FP_GENN_nadir_lag_5_file = (scratchpath + '/resIA_nadir_nadlag_5' +
type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')
AnDA_nadirswot_lag_0_file = (scratchpath + '/resAnDA_nadirswot_nadlag_0' +
type_obs + '/saved_path.pickle')
FP_GENN_nadirswot_lag_0_file = (scratchpath + '/resIA_nadirswot_nadlag_0' +
type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')
AnDA_nadirswot_lag_5_file = (scratchpath + '/resAnDA_nadirswot_nadlag_5' +
type_obs + '/saved_path.pickle')
FP_GENN_nadirswot_lag_5_file = (scratchpath + '/resIA_nadirswot_nadlag_5' +
type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')
with open(AnDA_nadir_lag_0_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadir_0 = AnDA_ssh_1
itrp_dineof_nadir_0 = itrp_dineof
with open(AnDA_nadirswot_lag_0_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1
itrp_dineof_nadirswot_0 = itrp_dineof
with open(AnDA_nadir_lag_5_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadir_5 = AnDA_ssh_1
itrp_dineof_nadir_5 = itrp_dineof
with open(AnDA_nadirswot_lag_5_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1
itrp_dineof_nadirswot_5 = itrp_dineof
with open(FP_GENN_nadir_lag_0_file, 'rb') as handle:
itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]
with open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:
itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9
]
with open(FP_GENN_nadir_lag_5_file, 'rb') as handle:
itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]
with open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:
itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9
]
lday1 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +
timedelta(days=60 + i), '%Y-%m-%d') for i in range(20)]
lday2 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +
timedelta(days=140 + i), '%Y-%m-%d') for i in range(20)]
lday3 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +
timedelta(days=220 + i), '%Y-%m-%d') for i in range(20)]
lday4 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +
timedelta(days=300 + i), '%Y-%m-%d') for i in range(20)]
lday = np.concatenate([lday1, lday2, lday3, lday4])
lday2 = [datetime.strptime(lday[i], '%Y-%m-%d') for i in range(len(lday))]
GT = AnDA_ssh_1_nadir.GT[:, :indLat, :indLon]
list_data = []
list_data.append(GT)
list_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:, :indLat, :indLon])
list_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:, :indLat, :indLon])
labels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])
colors = np.array(['k', '', 'red', 'blue'])
symbols = np.array(['k', '', 'o', 'o'])
lstyle = np.array(['solid', '', 'solid', 'solid'])
lwidth = np.array([2, 2, 1, 1])
min_res = 1000000000.0
for i in range(len(list_data)):
min_res = min(min_res, list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1] > min_res:
dwscale = int(list_data[i].shape[1] / min_res)
list_data[i] = einops.reduce(list_data[i],
'(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,
reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200 / min_res)
indLon = int(indLon / dwscale)
indLat = int(indLat / dwscale)
lon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))
lat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))
resfile = workpath + '/TS_AnDA_nadir_nadlag.png'
plot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,
resfile, gradient=False)
list_data = []
list_data.append(GT)
list_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:, :indLat, :indLon])
list_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:, :indLat, :indLon])
labels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])
colors = np.array(['k', '', 'red', 'blue'])
symbols = np.array(['k', '', 'o', 'o'])
lstyle = np.array(['solid', '', 'solid', 'solid'])
lwidth = np.array([2, 2, 1, 1])
min_res = 1000000000.0
for i in range(len(list_data)):
min_res = min(min_res, list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1] > min_res:
dwscale = int(list_data[i].shape[1] / min_res)
list_data[i] = einops.reduce(list_data[i],
'(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,
reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200 / min_res)
indLon = int(indLon / dwscale)
indLat = int(indLat / dwscale)
lon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))
lat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))
resfile = workpath + '/TS_AnDA_nadirswot_nadlag.png'
plot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,
resfile, gradient=False)
list_data = []
list_data.append(GT)
list_data.append(itrp_FP_GENN_nadir_0[:, :indLat, :indLon])
list_data.append(itrp_FP_GENN_nadir_5[:, :indLat, :indLon])
labels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])
colors = np.array(['k', '', 'red', 'blue'])
symbols = np.array(['k', '', 'o', 'o'])
lstyle = np.array(['solid', '', 'solid', 'solid'])
lwidth = np.array([2, 2, 1, 1])
min_res = 1000000000.0
for i in range(len(list_data)):
min_res = min(min_res, list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1] > min_res:
dwscale = int(list_data[i].shape[1] / min_res)
list_data[i] = einops.reduce(list_data[i],
'(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,
reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200 / min_res)
indLon = int(indLon / dwscale)
indLat = int(indLat / dwscale)
lon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))
lat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))
resfile = workpath + '/TS_GENN_nadir_nadlag.png'
plot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,
resfile, gradient=False)
list_data = []
list_data.append(GT)
list_data.append(itrp_FP_GENN_nadirswot_0[:, :indLat, :indLon])
list_data.append(itrp_FP_GENN_nadirswot_5[:, :indLat, :indLon])
labels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])
colors = np.array(['k', '', 'red', 'blue'])
symbols = np.array(['k', '', 'o', 'o'])
lstyle = np.array(['solid', '', 'solid', 'solid'])
lwidth = np.array([2, 2, 1, 1])
min_res = 1000000000.0
for i in range(len(list_data)):
min_res = min(min_res, list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1] > min_res:
dwscale = int(list_data[i].shape[1] / min_res)
list_data[i] = einops.reduce(list_data[i],
'(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,
reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200 / min_res)
indLon = int(indLon / dwscale)
indLat = int(indLat / dwscale)
lon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))
lat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))
resfile = workpath + '/TS_GENN_nadirswot_nadlag.png'
plot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,
resfile, gradient=False)
<|reserved_special_token_1|>
#!/usr/bin/env python
__author__ = "Maxime Beauchamp"
__version__ = "0.1"
__date__ = "2020-12-10"
__email__ = "maxime.beauchamp@imt-atantique.fr"
from graphics_OSSE import *
# function to create recursive paths
def mk_dir_recursive(dir_path):
if os.path.isdir(dir_path):
return
h, t = os.path.split(dir_path) # head/tail
if not os.path.isdir(h):
mk_dir_recursive(h)
new_path = join_paths(h, t)
if not os.path.isdir(new_path):
os.mkdir(new_path)
type_obs = sys.argv[1]
domain = sys.argv[2]
workpath = "/users/local/m19beauc/4DVARNN-DinAE_xp/"+domain+"/OSSE/scores_allmethods_nadlag_"+type_obs
scratchpath = "/users/local/m19beauc/4DVARNN-DinAE_xp/"+domain+"/OSSE"
if not os.path.exists(workpath):
mk_dir_recursive(workpath)
#else:
# shutil.rmtree(workpath)
# mk_dir_recursive(workpath)
## parameters
if domain=="OSMOSIS":
extent = [-19.5,-11.5,45.,55.]
indLat = 200
indLon = 160
elif domain=='GULFSTREAM':
extent = [-65.,-55.,33.,43.]
indLat = 200
indLon = 200
else:
extent=[-65.,-55.,30.,40.]
indLat = 200
indLon = 200
#lon = lon[:indLon]
#lat = lat[:indLat]
## store all data in a list
AnDA_nadir_lag_0_file = scratchpath+'/resAnDA_nadir_nadlag_0_'+type_obs+'/saved_path.pickle'
FP_GENN_nadir_lag_0_file = scratchpath+'/resIA_nadir_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'
AnDA_nadir_lag_5_file = scratchpath+'/resAnDA_nadir_nadlag_5_'+type_obs+'/saved_path.pickle'
FP_GENN_nadir_lag_5_file = scratchpath+'/resIA_nadir_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'
AnDA_nadirswot_lag_0_file = scratchpath+'/resAnDA_nadirswot_nadlag_0'+type_obs+'/saved_path.pickle'
FP_GENN_nadirswot_lag_0_file = scratchpath+'/resIA_nadirswot_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'
AnDA_nadirswot_lag_5_file = scratchpath+'/resAnDA_nadirswot_nadlag_5'+type_obs+'/saved_path.pickle'
FP_GENN_nadirswot_lag_5_file = scratchpath+'/resIA_nadirswot_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'
# Reload saved AnDA result
with open(AnDA_nadir_lag_0_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadir_0 = AnDA_ssh_1
itrp_dineof_nadir_0 = itrp_dineof
with open(AnDA_nadirswot_lag_0_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1
itrp_dineof_nadirswot_0 = itrp_dineof
with open(AnDA_nadir_lag_5_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadir_5 = AnDA_ssh_1
itrp_dineof_nadir_5 = itrp_dineof
with open(AnDA_nadirswot_lag_5_file, 'rb') as handle:
AnDA_ssh_1, itrp_dineof = pickle.load(handle)
AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1
itrp_dineof_nadirswot_5 = itrp_dineof
# Reload saved ConvAE and GE-NN results
with open(FP_GENN_nadir_lag_0_file, 'rb') as handle:
itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]
with open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:
itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9]
with open(FP_GENN_nadir_lag_5_file, 'rb') as handle:
itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]
with open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:
itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9]
## list of dates
lday1 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\
+ timedelta(days=60+i),"%Y-%m-%d") for i in range(20) ]
lday2 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\
+ timedelta(days=140+i),"%Y-%m-%d") for i in range(20) ]
lday3 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\
+ timedelta(days=220+i),"%Y-%m-%d") for i in range(20) ]
lday4 = [ datetime.strftime(datetime.strptime("2012-10-01",'%Y-%m-%d')\
+ timedelta(days=300+i),"%Y-%m-%d") for i in range(20) ]
lday = np.concatenate([lday1,lday2,lday3,lday4])
lday2 = [ datetime.strptime(lday[i],'%Y-%m-%d') for i in range(len(lday)) ]
GT = AnDA_ssh_1_nadir.GT[:,:indLat,:indLon]
# list_data (AnDA nadir)
list_data = []
list_data.append(GT)
list_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:,:indLat,:indLon])
list_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:,:indLat,:indLon])
# arguments for plots (nadir)
labels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)'])
colors = np.array(['k','','red','blue'])
symbols = np.array(['k','','o','o'])
lstyle = np.array(['solid','','solid','solid'])
lwidth = np.array([2,2,1,1])
# compare shapes and do appropriate downscaling with minimal resolution
min_res=1e9
for i in range(len(list_data)):
min_res=min(min_res,list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1]>min_res:
dwscale = int(list_data[i].shape[1]/min_res)
list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200/min_res)
indLon = int(indLon/dwscale)
indLat = int(indLat/dwscale)
lon = np.arange(extent[0],extent[1],1/(20/dwscale))
lat = np.arange(extent[2],extent[3],1/(20/dwscale))
## nRMSE time series
resfile=workpath+"/TS_AnDA_nadir_nadlag.png"
plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
# list_data (AnDA nadirswot)
list_data = []
list_data.append(GT)
list_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:,:indLat,:indLon])
list_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:,:indLat,:indLon])
# arguments for plots (nadirswot)
labels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)'])
colors = np.array(['k','','red','blue'])
symbols = np.array(['k','','o','o'])
lstyle = np.array(['solid','','solid','solid'])
lwidth = np.array([2,2,1,1])
# compare shapes and do appropriate downscaling with minimal resolution
min_res=1e9
for i in range(len(list_data)):
min_res=min(min_res,list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1]>min_res:
dwscale = int(list_data[i].shape[1]/min_res)
list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200/min_res)
indLon = int(indLon/dwscale)
indLat = int(indLat/dwscale)
lon = np.arange(extent[0],extent[1],1/(20/dwscale))
lat = np.arange(extent[2],extent[3],1/(20/dwscale))
## nRMSE time series
resfile=workpath+"/TS_AnDA_nadirswot_nadlag.png"
plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
# list_data (GENN nadir)
list_data = []
list_data.append(GT)
list_data.append(itrp_FP_GENN_nadir_0[:,:indLat,:indLon])
list_data.append(itrp_FP_GENN_nadir_5[:,:indLat,:indLon])
# arguments for plots (nadir)
labels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)'])
colors = np.array(['k','','red','blue'])
symbols = np.array(['k','','o','o'])
lstyle = np.array(['solid','','solid','solid'])
lwidth = np.array([2,2,1,1])
# compare shapes and do appropriate downscaling with minimal resolution
min_res=1e9
for i in range(len(list_data)):
min_res=min(min_res,list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1]>min_res:
dwscale = int(list_data[i].shape[1]/min_res)
list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200/min_res)
indLon = int(indLon/dwscale)
indLat = int(indLat/dwscale)
lon = np.arange(extent[0],extent[1],1/(20/dwscale))
lat = np.arange(extent[2],extent[3],1/(20/dwscale))
## nRMSE time series
resfile=workpath+"/TS_GENN_nadir_nadlag.png"
plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
# list_data (GENN nadirswot)
list_data = []
list_data.append(GT)
list_data.append(itrp_FP_GENN_nadirswot_0[:,:indLat,:indLon])
list_data.append(itrp_FP_GENN_nadirswot_5[:,:indLat,:indLon])
# arguments for plots (nadirswot)
labels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)'])
colors = np.array(['k','','red','blue'])
symbols = np.array(['k','','o','o'])
lstyle = np.array(['solid','','solid','solid'])
lwidth = np.array([2,2,1,1])
# compare shapes and do appropriate downscaling with minimal resolution
min_res=1e9
for i in range(len(list_data)):
min_res=min(min_res,list_data[i].shape[1])
for i in range(len(list_data)):
if list_data[i].shape[1]>min_res:
dwscale = int(list_data[i].shape[1]/min_res)
list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)
print(list_data[i].shape)
dwscale = int(200/min_res)
indLon = int(indLon/dwscale)
indLat = int(indLat/dwscale)
lon = np.arange(extent[0],extent[1],1/(20/dwscale))
lat = np.arange(extent[2],extent[3],1/(20/dwscale))
## nRMSE time series
resfile=workpath+"/TS_GENN_nadirswot_nadlag.png"
plot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)
|
flexible
|
{
"blob_id": "9f4cd9ed8aea03f5908aef4a154d964f0810619b",
"index": 9820,
"step-1": "<mask token>\n\n\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path)\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path)\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\n\n<mask token>\nif not os.path.exists(workpath):\n mk_dir_recursive(workpath)\nif domain == 'OSMOSIS':\n extent = [-19.5, -11.5, 45.0, 55.0]\n indLat = 200\n indLon = 160\nelif domain == 'GULFSTREAM':\n extent = [-65.0, -55.0, 33.0, 43.0]\n indLat = 200\n indLon = 200\nelse:\n extent = [-65.0, -55.0, 30.0, 40.0]\n indLat = 200\n indLon = 200\n<mask token>\nwith open(AnDA_nadir_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_0 = AnDA_ssh_1\n itrp_dineof_nadir_0 = itrp_dineof\nwith open(AnDA_nadirswot_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1\n itrp_dineof_nadirswot_0 = itrp_dineof\nwith open(AnDA_nadir_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_5 = AnDA_ssh_1\n itrp_dineof_nadir_5 = itrp_dineof\nwith open(AnDA_nadirswot_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1\n itrp_dineof_nadirswot_5 = itrp_dineof\nwith open(FP_GENN_nadir_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9\n ]\nwith open(FP_GENN_nadir_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9\n ]\n<mask token>\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:, :indLat, :indLon])\n<mask token>\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\n<mask token>\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n<mask token>\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:, :indLat, :indLon])\n<mask token>\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\n<mask token>\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n<mask token>\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadir_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadir_5[:, :indLat, :indLon])\n<mask token>\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\n<mask token>\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n<mask token>\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadirswot_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadirswot_5[:, :indLat, :indLon])\n<mask token>\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\n<mask token>\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n",
"step-3": "__author__ = 'Maxime Beauchamp'\n__version__ = '0.1'\n__date__ = '2020-12-10'\n__email__ = 'maxime.beauchamp@imt-atantique.fr'\n<mask token>\n\n\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path)\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\n\ntype_obs = sys.argv[1]\ndomain = sys.argv[2]\nworkpath = ('/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain +\n '/OSSE/scores_allmethods_nadlag_' + type_obs)\nscratchpath = '/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain + '/OSSE'\nif not os.path.exists(workpath):\n mk_dir_recursive(workpath)\nif domain == 'OSMOSIS':\n extent = [-19.5, -11.5, 45.0, 55.0]\n indLat = 200\n indLon = 160\nelif domain == 'GULFSTREAM':\n extent = [-65.0, -55.0, 33.0, 43.0]\n indLat = 200\n indLon = 200\nelse:\n extent = [-65.0, -55.0, 30.0, 40.0]\n indLat = 200\n indLon = 200\nAnDA_nadir_lag_0_file = (scratchpath + '/resAnDA_nadir_nadlag_0_' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadir_lag_0_file = (scratchpath + '/resIA_nadir_nadlag_0' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadir_lag_5_file = (scratchpath + '/resAnDA_nadir_nadlag_5_' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadir_lag_5_file = (scratchpath + '/resIA_nadir_nadlag_5' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadirswot_lag_0_file = (scratchpath + '/resAnDA_nadirswot_nadlag_0' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadirswot_lag_0_file = (scratchpath + '/resIA_nadirswot_nadlag_0' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadirswot_lag_5_file = (scratchpath + '/resAnDA_nadirswot_nadlag_5' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadirswot_lag_5_file = (scratchpath + '/resIA_nadirswot_nadlag_5' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nwith open(AnDA_nadir_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_0 = AnDA_ssh_1\n itrp_dineof_nadir_0 = itrp_dineof\nwith open(AnDA_nadirswot_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1\n itrp_dineof_nadirswot_0 = itrp_dineof\nwith open(AnDA_nadir_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_5 = AnDA_ssh_1\n itrp_dineof_nadir_5 = itrp_dineof\nwith open(AnDA_nadirswot_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1\n itrp_dineof_nadirswot_5 = itrp_dineof\nwith open(FP_GENN_nadir_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9\n ]\nwith open(FP_GENN_nadir_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9\n ]\nlday1 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=60 + i), '%Y-%m-%d') for i in range(20)]\nlday2 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=140 + i), '%Y-%m-%d') for i in range(20)]\nlday3 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=220 + i), '%Y-%m-%d') for i in range(20)]\nlday4 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=300 + i), '%Y-%m-%d') for i in range(20)]\nlday = np.concatenate([lday1, lday2, lday3, lday4])\nlday2 = [datetime.strptime(lday[i], '%Y-%m-%d') for i in range(len(lday))]\nGT = AnDA_ssh_1_nadir.GT[:, :indLat, :indLon]\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_AnDA_nadir_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_AnDA_nadirswot_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadir_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadir_5[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_GENN_nadir_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadirswot_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadirswot_5[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_GENN_nadirswot_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n",
"step-4": "__author__ = 'Maxime Beauchamp'\n__version__ = '0.1'\n__date__ = '2020-12-10'\n__email__ = 'maxime.beauchamp@imt-atantique.fr'\nfrom graphics_OSSE import *\n\n\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path)\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\n\ntype_obs = sys.argv[1]\ndomain = sys.argv[2]\nworkpath = ('/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain +\n '/OSSE/scores_allmethods_nadlag_' + type_obs)\nscratchpath = '/users/local/m19beauc/4DVARNN-DinAE_xp/' + domain + '/OSSE'\nif not os.path.exists(workpath):\n mk_dir_recursive(workpath)\nif domain == 'OSMOSIS':\n extent = [-19.5, -11.5, 45.0, 55.0]\n indLat = 200\n indLon = 160\nelif domain == 'GULFSTREAM':\n extent = [-65.0, -55.0, 33.0, 43.0]\n indLat = 200\n indLon = 200\nelse:\n extent = [-65.0, -55.0, 30.0, 40.0]\n indLat = 200\n indLon = 200\nAnDA_nadir_lag_0_file = (scratchpath + '/resAnDA_nadir_nadlag_0_' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadir_lag_0_file = (scratchpath + '/resIA_nadir_nadlag_0' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadir_lag_5_file = (scratchpath + '/resAnDA_nadir_nadlag_5_' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadir_lag_5_file = (scratchpath + '/resIA_nadir_nadlag_5' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadirswot_lag_0_file = (scratchpath + '/resAnDA_nadirswot_nadlag_0' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadirswot_lag_0_file = (scratchpath + '/resIA_nadirswot_nadlag_0' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nAnDA_nadirswot_lag_5_file = (scratchpath + '/resAnDA_nadirswot_nadlag_5' +\n type_obs + '/saved_path.pickle')\nFP_GENN_nadirswot_lag_5_file = (scratchpath + '/resIA_nadirswot_nadlag_5' +\n type_obs + '/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle')\nwith open(AnDA_nadir_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_0 = AnDA_ssh_1\n itrp_dineof_nadir_0 = itrp_dineof\nwith open(AnDA_nadirswot_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1\n itrp_dineof_nadirswot_0 = itrp_dineof\nwith open(AnDA_nadir_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_5 = AnDA_ssh_1\n itrp_dineof_nadir_5 = itrp_dineof\nwith open(AnDA_nadirswot_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1\n itrp_dineof_nadirswot_5 = itrp_dineof\nwith open(FP_GENN_nadir_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9\n ]\nwith open(FP_GENN_nadir_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9\n ]\nlday1 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=60 + i), '%Y-%m-%d') for i in range(20)]\nlday2 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=140 + i), '%Y-%m-%d') for i in range(20)]\nlday3 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=220 + i), '%Y-%m-%d') for i in range(20)]\nlday4 = [datetime.strftime(datetime.strptime('2012-10-01', '%Y-%m-%d') +\n timedelta(days=300 + i), '%Y-%m-%d') for i in range(20)]\nlday = np.concatenate([lday1, lday2, lday3, lday4])\nlday2 = [datetime.strptime(lday[i], '%Y-%m-%d') for i in range(len(lday))]\nGT = AnDA_ssh_1_nadir.GT[:, :indLat, :indLon]\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_AnDA_nadir_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:, :indLat, :indLon])\nlist_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'Post-AnDA (lag=0)', 'Post-AnDA (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_AnDA_nadirswot_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadir_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadir_5[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_GENN_nadir_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadirswot_0[:, :indLat, :indLon])\nlist_data.append(itrp_FP_GENN_nadirswot_5[:, :indLat, :indLon])\nlabels_data = np.array(['GT', 'Obs', 'FP-GENN (lag=0)', 'FP-GENN (lag=5)'])\ncolors = np.array(['k', '', 'red', 'blue'])\nsymbols = np.array(['k', '', 'o', 'o'])\nlstyle = np.array(['solid', '', 'solid', 'solid'])\nlwidth = np.array([2, 2, 1, 1])\nmin_res = 1000000000.0\nfor i in range(len(list_data)):\n min_res = min(min_res, list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1] > min_res:\n dwscale = int(list_data[i].shape[1] / min_res)\n list_data[i] = einops.reduce(list_data[i],\n '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale,\n reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200 / min_res)\nindLon = int(indLon / dwscale)\nindLat = int(indLat / dwscale)\nlon = np.arange(extent[0], extent[1], 1 / (20 / dwscale))\nlat = np.arange(extent[2], extent[3], 1 / (20 / dwscale))\nresfile = workpath + '/TS_GENN_nadirswot_nadlag.png'\nplot_nRMSE(list_data, labels_data, colors, symbols, lstyle, lwidth, lday,\n resfile, gradient=False)\n",
"step-5": "#!/usr/bin/env python\n\n__author__ = \"Maxime Beauchamp\"\n__version__ = \"0.1\"\n__date__ = \"2020-12-10\"\n__email__ = \"maxime.beauchamp@imt-atantique.fr\"\n\nfrom graphics_OSSE import *\n\n# function to create recursive paths\ndef mk_dir_recursive(dir_path):\n if os.path.isdir(dir_path):\n return\n h, t = os.path.split(dir_path) # head/tail\n if not os.path.isdir(h):\n mk_dir_recursive(h)\n\n new_path = join_paths(h, t)\n if not os.path.isdir(new_path):\n os.mkdir(new_path)\n\ntype_obs = sys.argv[1]\ndomain = sys.argv[2] \n\nworkpath = \"/users/local/m19beauc/4DVARNN-DinAE_xp/\"+domain+\"/OSSE/scores_allmethods_nadlag_\"+type_obs\nscratchpath = \"/users/local/m19beauc/4DVARNN-DinAE_xp/\"+domain+\"/OSSE\"\nif not os.path.exists(workpath):\n mk_dir_recursive(workpath)\n#else:\n# shutil.rmtree(workpath)\n# mk_dir_recursive(workpath) \n\n## parameters\nif domain==\"OSMOSIS\":\n extent = [-19.5,-11.5,45.,55.]\n indLat = 200\n indLon = 160\nelif domain=='GULFSTREAM':\n extent = [-65.,-55.,33.,43.]\n indLat = 200\n indLon = 200\nelse:\n extent=[-65.,-55.,30.,40.]\n indLat = 200\n indLon = 200\n#lon = lon[:indLon]\n#lat = lat[:indLat]\n\n## store all data in a list\nAnDA_nadir_lag_0_file = scratchpath+'/resAnDA_nadir_nadlag_0_'+type_obs+'/saved_path.pickle'\nFP_GENN_nadir_lag_0_file = scratchpath+'/resIA_nadir_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'\nAnDA_nadir_lag_5_file = scratchpath+'/resAnDA_nadir_nadlag_5_'+type_obs+'/saved_path.pickle'\nFP_GENN_nadir_lag_5_file = scratchpath+'/resIA_nadir_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'\nAnDA_nadirswot_lag_0_file = scratchpath+'/resAnDA_nadirswot_nadlag_0'+type_obs+'/saved_path.pickle'\nFP_GENN_nadirswot_lag_0_file = scratchpath+'/resIA_nadirswot_nadlag_0'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'\nAnDA_nadirswot_lag_5_file = scratchpath+'/resAnDA_nadirswot_nadlag_5'+type_obs+'/saved_path.pickle'\nFP_GENN_nadirswot_lag_5_file = scratchpath+'/resIA_nadirswot_nadlag_5'+type_obs+'/FP_GENN_wmissing_wOI/saved_path_000_GENN_wmissing.pickle'\n\n# Reload saved AnDA result\nwith open(AnDA_nadir_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_0 = AnDA_ssh_1 \n itrp_dineof_nadir_0 = itrp_dineof\nwith open(AnDA_nadirswot_lag_0_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_0 = AnDA_ssh_1 \n itrp_dineof_nadirswot_0 = itrp_dineof\nwith open(AnDA_nadir_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadir_5 = AnDA_ssh_1\n itrp_dineof_nadir_5 = itrp_dineof\nwith open(AnDA_nadirswot_lag_5_file, 'rb') as handle:\n AnDA_ssh_1, itrp_dineof = pickle.load(handle)\n AnDA_ssh_1_nadirswot_5 = AnDA_ssh_1\n itrp_dineof_nadirswot_5 = itrp_dineof\n# Reload saved ConvAE and GE-NN results\nwith open(FP_GENN_nadir_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadir_0, rec_FP_GENN_nadir_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_0_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_0, rec_FP_GENN_nadirswot_0 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadir_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadir_5, rec_FP_GENN_nadir_5 = pickle.load(handle)[7:9]\nwith open(FP_GENN_nadirswot_lag_5_file, 'rb') as handle:\n itrp_FP_GENN_nadirswot_5, rec_FP_GENN_nadirswot_5 = pickle.load(handle)[7:9]\n\n\n## list of dates\nlday1 = [ datetime.strftime(datetime.strptime(\"2012-10-01\",'%Y-%m-%d')\\\n + timedelta(days=60+i),\"%Y-%m-%d\") for i in range(20) ]\nlday2 = [ datetime.strftime(datetime.strptime(\"2012-10-01\",'%Y-%m-%d')\\\n + timedelta(days=140+i),\"%Y-%m-%d\") for i in range(20) ]\nlday3 = [ datetime.strftime(datetime.strptime(\"2012-10-01\",'%Y-%m-%d')\\\n + timedelta(days=220+i),\"%Y-%m-%d\") for i in range(20) ]\nlday4 = [ datetime.strftime(datetime.strptime(\"2012-10-01\",'%Y-%m-%d')\\\n + timedelta(days=300+i),\"%Y-%m-%d\") for i in range(20) ]\nlday = np.concatenate([lday1,lday2,lday3,lday4])\nlday2 = [ datetime.strptime(lday[i],'%Y-%m-%d') for i in range(len(lday)) ]\n\nGT = AnDA_ssh_1_nadir.GT[:,:indLat,:indLon]\n# list_data (AnDA nadir)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadir_0.itrp_postAnDA[:,:indLat,:indLon])\nlist_data.append(AnDA_ssh_1_nadir_5.itrp_postAnDA[:,:indLat,:indLon])\n# arguments for plots (nadir)\nlabels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)'])\ncolors = np.array(['k','','red','blue'])\nsymbols = np.array(['k','','o','o'])\nlstyle = np.array(['solid','','solid','solid'])\nlwidth = np.array([2,2,1,1])\n# compare shapes and do appropriate downscaling with minimal resolution\nmin_res=1e9\nfor i in range(len(list_data)):\n min_res=min(min_res,list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1]>min_res:\n dwscale = int(list_data[i].shape[1]/min_res)\n list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200/min_res)\nindLon = int(indLon/dwscale)\nindLat = int(indLat/dwscale)\nlon = np.arange(extent[0],extent[1],1/(20/dwscale))\nlat = np.arange(extent[2],extent[3],1/(20/dwscale))\n## nRMSE time series\nresfile=workpath+\"/TS_AnDA_nadir_nadlag.png\"\nplot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)\n\n# list_data (AnDA nadirswot)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(AnDA_ssh_1_nadirswot_0.itrp_postAnDA[:,:indLat,:indLon])\nlist_data.append(AnDA_ssh_1_nadirswot_5.itrp_postAnDA[:,:indLat,:indLon])\n# arguments for plots (nadirswot)\nlabels_data = np.array(['GT','Obs','Post-AnDA (lag=0)','Post-AnDA (lag=5)'])\ncolors = np.array(['k','','red','blue'])\nsymbols = np.array(['k','','o','o'])\nlstyle = np.array(['solid','','solid','solid'])\nlwidth = np.array([2,2,1,1])\n# compare shapes and do appropriate downscaling with minimal resolution\nmin_res=1e9\nfor i in range(len(list_data)):\n min_res=min(min_res,list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1]>min_res:\n dwscale = int(list_data[i].shape[1]/min_res)\n list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200/min_res)\nindLon = int(indLon/dwscale)\nindLat = int(indLat/dwscale)\nlon = np.arange(extent[0],extent[1],1/(20/dwscale))\nlat = np.arange(extent[2],extent[3],1/(20/dwscale))\n## nRMSE time series\nresfile=workpath+\"/TS_AnDA_nadirswot_nadlag.png\"\nplot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)\n\n# list_data (GENN nadir)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadir_0[:,:indLat,:indLon])\nlist_data.append(itrp_FP_GENN_nadir_5[:,:indLat,:indLon])\n# arguments for plots (nadir)\nlabels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)'])\ncolors = np.array(['k','','red','blue'])\nsymbols = np.array(['k','','o','o'])\nlstyle = np.array(['solid','','solid','solid'])\nlwidth = np.array([2,2,1,1])\n# compare shapes and do appropriate downscaling with minimal resolution\nmin_res=1e9\nfor i in range(len(list_data)):\n min_res=min(min_res,list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1]>min_res:\n dwscale = int(list_data[i].shape[1]/min_res)\n list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200/min_res)\nindLon = int(indLon/dwscale)\nindLat = int(indLat/dwscale)\nlon = np.arange(extent[0],extent[1],1/(20/dwscale))\nlat = np.arange(extent[2],extent[3],1/(20/dwscale))\n## nRMSE time series\nresfile=workpath+\"/TS_GENN_nadir_nadlag.png\"\nplot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)\n\n# list_data (GENN nadirswot)\nlist_data = []\nlist_data.append(GT)\nlist_data.append(itrp_FP_GENN_nadirswot_0[:,:indLat,:indLon])\nlist_data.append(itrp_FP_GENN_nadirswot_5[:,:indLat,:indLon])\n# arguments for plots (nadirswot)\nlabels_data = np.array(['GT','Obs','FP-GENN (lag=0)','FP-GENN (lag=5)'])\ncolors = np.array(['k','','red','blue'])\nsymbols = np.array(['k','','o','o'])\nlstyle = np.array(['solid','','solid','solid'])\nlwidth = np.array([2,2,1,1])\n# compare shapes and do appropriate downscaling with minimal resolution\nmin_res=1e9\nfor i in range(len(list_data)):\n min_res=min(min_res,list_data[i].shape[1])\nfor i in range(len(list_data)):\n if list_data[i].shape[1]>min_res:\n dwscale = int(list_data[i].shape[1]/min_res)\n list_data[i] = einops.reduce(list_data[i], '(t t1) (h h1) (w w1) -> t h w', t1=1, h1=dwscale, w1=dwscale, reduction=np.nanmean)\n print(list_data[i].shape)\ndwscale = int(200/min_res)\nindLon = int(indLon/dwscale)\nindLat = int(indLat/dwscale)\nlon = np.arange(extent[0],extent[1],1/(20/dwscale))\nlat = np.arange(extent[2],extent[3],1/(20/dwscale))\n## nRMSE time series\nresfile=workpath+\"/TS_GENN_nadirswot_nadlag.png\"\nplot_nRMSE(list_data,labels_data,colors,symbols,lstyle,lwidth,lday,resfile,gradient=False)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print((lambda myself: lambda n: IF(IS_ZERO(n))(lambda _: ONE)(lambda _:
MULT(n)(myself(myself)(SUB1(n)))))(lambda myself: lambda n: IF(IS_ZERO(
n))(lambda _: ONE)(lambda _: MULT(n)(myself(myself)(SUB1(n)))))(6))
<|reserved_special_token_1|>
IS_ZERO = lambda x: x == 0
ONE = 1
SUB1 = lambda x: x - 1
MULT = lambda x: lambda y: x * y
IF = lambda cond: lambda t_func: lambda f_func: t_func(None
) if cond else f_func(None)
print((lambda myself: lambda n: IF(IS_ZERO(n))(lambda _: ONE)(lambda _:
MULT(n)(myself(myself)(SUB1(n)))))(lambda myself: lambda n: IF(IS_ZERO(
n))(lambda _: ONE)(lambda _: MULT(n)(myself(myself)(SUB1(n)))))(6))
<|reserved_special_token_1|>
IS_ZERO = lambda x: x == 0
ONE = 1
SUB1 = lambda x: x - 1
MULT = lambda x: lambda y: x * y
IF = lambda cond: lambda t_func: lambda f_func: t_func(None) if cond else f_func(None)
print(
(
lambda myself: (
lambda n: (
IF(
IS_ZERO(n)
)(
lambda _: ONE
)(
lambda _: MULT(n)( myself(myself)(SUB1(n)) )
)
)
)
)(
lambda myself: (
lambda n: (
IF(
IS_ZERO(n)
)(
lambda _: ONE
)(
lambda _: MULT(n)( myself(myself)(SUB1(n)) )
)
)
)
)
(6)
)
|
flexible
|
{
"blob_id": "f8601ed7ba7c2b8d2dd8d5f74f7b5ae8e99dad78",
"index": 186,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint((lambda myself: lambda n: IF(IS_ZERO(n))(lambda _: ONE)(lambda _:\n MULT(n)(myself(myself)(SUB1(n)))))(lambda myself: lambda n: IF(IS_ZERO(\n n))(lambda _: ONE)(lambda _: MULT(n)(myself(myself)(SUB1(n)))))(6))\n",
"step-3": "IS_ZERO = lambda x: x == 0\nONE = 1\nSUB1 = lambda x: x - 1\nMULT = lambda x: lambda y: x * y\nIF = lambda cond: lambda t_func: lambda f_func: t_func(None\n ) if cond else f_func(None)\nprint((lambda myself: lambda n: IF(IS_ZERO(n))(lambda _: ONE)(lambda _:\n MULT(n)(myself(myself)(SUB1(n)))))(lambda myself: lambda n: IF(IS_ZERO(\n n))(lambda _: ONE)(lambda _: MULT(n)(myself(myself)(SUB1(n)))))(6))\n",
"step-4": "IS_ZERO = lambda x: x == 0\nONE = 1\nSUB1 = lambda x: x - 1\nMULT = lambda x: lambda y: x * y\nIF = lambda cond: lambda t_func: lambda f_func: t_func(None) if cond else f_func(None)\n\nprint(\n (\n lambda myself: (\n lambda n: (\n IF(\n IS_ZERO(n)\n )(\n lambda _: ONE\n )(\n lambda _: MULT(n)( myself(myself)(SUB1(n)) )\n )\n )\n )\n )(\n lambda myself: (\n lambda n: (\n IF(\n IS_ZERO(n)\n )(\n lambda _: ONE\n )(\n lambda _: MULT(n)( myself(myself)(SUB1(n)) )\n )\n )\n )\n )\n (6)\n)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyTestCase(TestCase):
def test_mark_done(self):
user = User.objects.create_user(email='user@…', username='user',
password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)
res = todo.mark_done(user)
self.assertTrue(res)
self.assertEqual(Todo.objects.count(), 1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyTestCase(TestCase):
def test_mark_done(self):
user = User.objects.create_user(email='user@…', username='user',
password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)
res = todo.mark_done(user)
self.assertTrue(res)
self.assertEqual(Todo.objects.count(), 1)
def test_mark_done_already_done(self):
user = User.objects.create_user(email='user@…', username='user',
password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', is_done=
True, done_by=user, owner=user)
res = todo.mark_done(user)
self.assertIsNone(res)
self.assertEqual(Todo.objects.count(), 0)
<|reserved_special_token_1|>
from django.test import TestCase
from django.contrib.auth.models import User
from ..models import Todo
class MyTestCase(TestCase):
def test_mark_done(self):
user = User.objects.create_user(email='user@…', username='user',
password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)
res = todo.mark_done(user)
self.assertTrue(res)
self.assertEqual(Todo.objects.count(), 1)
def test_mark_done_already_done(self):
user = User.objects.create_user(email='user@…', username='user',
password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', is_done=
True, done_by=user, owner=user)
res = todo.mark_done(user)
self.assertIsNone(res)
self.assertEqual(Todo.objects.count(), 0)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.contrib.auth.models import User
from ..models import Todo
class MyTestCase(TestCase):
def test_mark_done(self):
user = User.objects.create_user(email='user@…', username='user', password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)
res = todo.mark_done(user)
self.assertTrue(res)
self.assertEqual(Todo.objects.count(), 1)
def test_mark_done_already_done(self):
user = User.objects.create_user(email='user@…', username='user', password='somepasswd')
todo = Todo(title='SomeTitle', description='SomeDescr', is_done=True, done_by=user, owner=user)
res = todo.mark_done(user)
self.assertIsNone(res)
# todo not saved because mark_done don't save already done todos
self.assertEqual(Todo.objects.count(), 0)
|
flexible
|
{
"blob_id": "5c81ddbc8f5a162949a100dbef1c69551d9e267a",
"index": 37,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyTestCase(TestCase):\n\n def test_mark_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)\n res = todo.mark_done(user)\n self.assertTrue(res)\n self.assertEqual(Todo.objects.count(), 1)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MyTestCase(TestCase):\n\n def test_mark_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)\n res = todo.mark_done(user)\n self.assertTrue(res)\n self.assertEqual(Todo.objects.count(), 1)\n\n def test_mark_done_already_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', is_done=\n True, done_by=user, owner=user)\n res = todo.mark_done(user)\n self.assertIsNone(res)\n self.assertEqual(Todo.objects.count(), 0)\n",
"step-4": "from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom ..models import Todo\n\n\nclass MyTestCase(TestCase):\n\n def test_mark_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)\n res = todo.mark_done(user)\n self.assertTrue(res)\n self.assertEqual(Todo.objects.count(), 1)\n\n def test_mark_done_already_done(self):\n user = User.objects.create_user(email='user@…', username='user',\n password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', is_done=\n True, done_by=user, owner=user)\n res = todo.mark_done(user)\n self.assertIsNone(res)\n self.assertEqual(Todo.objects.count(), 0)\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom ..models import Todo\n\n\nclass MyTestCase(TestCase):\n\n def test_mark_done(self):\n user = User.objects.create_user(email='user@…', username='user', password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', owner=user)\n res = todo.mark_done(user)\n self.assertTrue(res)\n self.assertEqual(Todo.objects.count(), 1)\n\n def test_mark_done_already_done(self):\n user = User.objects.create_user(email='user@…', username='user', password='somepasswd')\n todo = Todo(title='SomeTitle', description='SomeDescr', is_done=True, done_by=user, owner=user)\n res = todo.mark_done(user)\n self.assertIsNone(res)\n # todo not saved because mark_done don't save already done todos\n self.assertEqual(Todo.objects.count(), 0)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@csrf_exempt
def TBGRApi(request, tbgrno=0):
if request.method == 'GET':
tbgrs = TBGR.objects.all()
tbgrs_serializer = TBGRSerializer(tbgrs, many=True)
return JsonResponse(tbgrs_serializer.data, safe=False)
elif request.method == 'POST':
tbgr_data = JSONParser().parse(request)
tbgr_serializer = TBGRSerializer(data=tbgr_data)
if tbgr_serializer.is_valid():
tbgr_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False)
elif request.method == 'PUT':
tbgr_data = JSONParser().parse(request)
tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])
tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)
if tbgr_serializer.is_valid():
tbgr_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
tbgr = TBGR.objects.get(tbgrno=tbgrno)
tbgr.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
<|reserved_special_token_0|>
@csrf_exempt
def VillageApi(request, villageid=0):
if request.method == 'GET':
villages = Village.objects.all()
villages_serializer = VillageSerializer(villages, many=True)
return JsonResponse(villages_serializer.data, safe=False)
elif request.method == 'POST':
village_data = JSONParser().parse(request)
village_serializer = VillageSerializer(data=village_data)
if village_serializer.is_valid():
village_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False)
elif request.method == 'PUT':
village_data = JSONParser().parse(request)
village = Village.objects.get(villageid=village_data['villageid'])
village_serializer = VillageSerializer(village, data=village_data)
if village_serializer.is_valid():
village_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
village = Village.objects.get(villageid=villageid)
village.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
<|reserved_special_token_0|>
@csrf_exempt
def ContactApi(request, phone=0):
if request.method == 'GET':
contacts = Contacts.objects.all()
contacts_serializer = ContactSerializer(contacts, many=True)
return JsonResponse(contacts_serializer.data, safe=False)
elif request.method == 'POST':
contact_data = JSONParser().parse(request)
contact_serializer = ContactSerializer(data=contact_data)
if contact_serializer.is_valid():
contact_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False, status=404)
elif request.method == 'PUT':
contact_data = JSONParser().parse(request)
contact = Contacts.objects.get(phone=contact_data['phone'])
contact_serializer = ContactSerializer(contact, data=contact_data)
if contact_serializer.is_valid():
contact_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
contact = Contacts.objects.get(phone=phone)
contact.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@csrf_exempt
def TBGRApi(request, tbgrno=0):
if request.method == 'GET':
tbgrs = TBGR.objects.all()
tbgrs_serializer = TBGRSerializer(tbgrs, many=True)
return JsonResponse(tbgrs_serializer.data, safe=False)
elif request.method == 'POST':
tbgr_data = JSONParser().parse(request)
tbgr_serializer = TBGRSerializer(data=tbgr_data)
if tbgr_serializer.is_valid():
tbgr_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False)
elif request.method == 'PUT':
tbgr_data = JSONParser().parse(request)
tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])
tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)
if tbgr_serializer.is_valid():
tbgr_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
tbgr = TBGR.objects.get(tbgrno=tbgrno)
tbgr.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
<|reserved_special_token_0|>
@csrf_exempt
def VillageApi(request, villageid=0):
if request.method == 'GET':
villages = Village.objects.all()
villages_serializer = VillageSerializer(villages, many=True)
return JsonResponse(villages_serializer.data, safe=False)
elif request.method == 'POST':
village_data = JSONParser().parse(request)
village_serializer = VillageSerializer(data=village_data)
if village_serializer.is_valid():
village_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False)
elif request.method == 'PUT':
village_data = JSONParser().parse(request)
village = Village.objects.get(villageid=village_data['villageid'])
village_serializer = VillageSerializer(village, data=village_data)
if village_serializer.is_valid():
village_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
village = Village.objects.get(villageid=villageid)
village.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
<|reserved_special_token_0|>
@csrf_exempt
def GradeApi(request):
if request.method == 'GET':
grades = Grades.objects.all()
grades_serializer = GradeSerializer(grades, many=True)
return JsonResponse(grades_serializer.data, safe=False)
@csrf_exempt
def ContactApi(request, phone=0):
if request.method == 'GET':
contacts = Contacts.objects.all()
contacts_serializer = ContactSerializer(contacts, many=True)
return JsonResponse(contacts_serializer.data, safe=False)
elif request.method == 'POST':
contact_data = JSONParser().parse(request)
contact_serializer = ContactSerializer(data=contact_data)
if contact_serializer.is_valid():
contact_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False, status=404)
elif request.method == 'PUT':
contact_data = JSONParser().parse(request)
contact = Contacts.objects.get(phone=contact_data['phone'])
contact_serializer = ContactSerializer(contact, data=contact_data)
if contact_serializer.is_valid():
contact_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
contact = Contacts.objects.get(phone=phone)
contact.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@csrf_exempt
def TBGRApi(request, tbgrno=0):
if request.method == 'GET':
tbgrs = TBGR.objects.all()
tbgrs_serializer = TBGRSerializer(tbgrs, many=True)
return JsonResponse(tbgrs_serializer.data, safe=False)
elif request.method == 'POST':
tbgr_data = JSONParser().parse(request)
tbgr_serializer = TBGRSerializer(data=tbgr_data)
if tbgr_serializer.is_valid():
tbgr_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False)
elif request.method == 'PUT':
tbgr_data = JSONParser().parse(request)
tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])
tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)
if tbgr_serializer.is_valid():
tbgr_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
tbgr = TBGR.objects.get(tbgrno=tbgrno)
tbgr.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
<|reserved_special_token_0|>
@csrf_exempt
def VillageApi(request, villageid=0):
if request.method == 'GET':
villages = Village.objects.all()
villages_serializer = VillageSerializer(villages, many=True)
return JsonResponse(villages_serializer.data, safe=False)
elif request.method == 'POST':
village_data = JSONParser().parse(request)
village_serializer = VillageSerializer(data=village_data)
if village_serializer.is_valid():
village_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False)
elif request.method == 'PUT':
village_data = JSONParser().parse(request)
village = Village.objects.get(villageid=village_data['villageid'])
village_serializer = VillageSerializer(village, data=village_data)
if village_serializer.is_valid():
village_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
village = Village.objects.get(villageid=villageid)
village.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
@csrf_exempt
def SlipApi(request, lotno=0):
if request.method == 'GET':
slips = Slip.objects.all()
slips_serializer = SlipSerializer(slips, many=True)
return JsonResponse(slips_serializer.data, safe=False)
elif request.method == 'POST':
slip_data = JSONParser().parse(request)
slip_serializer = SlipSerializer(data=slip_data)
if slip_serializer.is_valid():
slip_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False)
elif request.method == 'PUT':
slip_data = JSONParser().parse(request)
slip = Slip.objects.get(lotno=slip_data['lotno'])
slip_serializer = SlipSerializer(slip, data=slip_data)
if slip_serializer.is_valid():
slip_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
slip = Slip.objects.get(lotno=lotno)
slip.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
@csrf_exempt
def GradeApi(request):
if request.method == 'GET':
grades = Grades.objects.all()
grades_serializer = GradeSerializer(grades, many=True)
return JsonResponse(grades_serializer.data, safe=False)
@csrf_exempt
def ContactApi(request, phone=0):
if request.method == 'GET':
contacts = Contacts.objects.all()
contacts_serializer = ContactSerializer(contacts, many=True)
return JsonResponse(contacts_serializer.data, safe=False)
elif request.method == 'POST':
contact_data = JSONParser().parse(request)
contact_serializer = ContactSerializer(data=contact_data)
if contact_serializer.is_valid():
contact_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False, status=404)
elif request.method == 'PUT':
contact_data = JSONParser().parse(request)
contact = Contacts.objects.get(phone=contact_data['phone'])
contact_serializer = ContactSerializer(contact, data=contact_data)
if contact_serializer.is_valid():
contact_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
contact = Contacts.objects.get(phone=phone)
contact.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
<|reserved_special_token_1|>
from django.shortcuts import render
from django_filters.rest_framework import DjangoFilterBackend
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from django.http import JsonResponse, Http404
from .serializers import *
from .models import *
from .filter import *
from rest_framework import generics
from rest_framework.filters import SearchFilter, OrderingFilter
@csrf_exempt
def TBGRApi(request, tbgrno=0):
if request.method == 'GET':
tbgrs = TBGR.objects.all()
tbgrs_serializer = TBGRSerializer(tbgrs, many=True)
return JsonResponse(tbgrs_serializer.data, safe=False)
elif request.method == 'POST':
tbgr_data = JSONParser().parse(request)
tbgr_serializer = TBGRSerializer(data=tbgr_data)
if tbgr_serializer.is_valid():
tbgr_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False)
elif request.method == 'PUT':
tbgr_data = JSONParser().parse(request)
tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])
tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)
if tbgr_serializer.is_valid():
tbgr_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
tbgr = TBGR.objects.get(tbgrno=tbgrno)
tbgr.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
@csrf_exempt
def BoardApi(request):
if request.method == 'GET':
boards = Board.objects.all()
boards_serializer = BoardSerializer(boards, many=True)
return JsonResponse(boards_serializer.data, safe=False)
@csrf_exempt
def VillageApi(request, villageid=0):
if request.method == 'GET':
villages = Village.objects.all()
villages_serializer = VillageSerializer(villages, many=True)
return JsonResponse(villages_serializer.data, safe=False)
elif request.method == 'POST':
village_data = JSONParser().parse(request)
village_serializer = VillageSerializer(data=village_data)
if village_serializer.is_valid():
village_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False)
elif request.method == 'PUT':
village_data = JSONParser().parse(request)
village = Village.objects.get(villageid=village_data['villageid'])
village_serializer = VillageSerializer(village, data=village_data)
if village_serializer.is_valid():
village_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
village = Village.objects.get(villageid=villageid)
village.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
@csrf_exempt
def SlipApi(request, lotno=0):
if request.method == 'GET':
slips = Slip.objects.all()
slips_serializer = SlipSerializer(slips, many=True)
return JsonResponse(slips_serializer.data, safe=False)
elif request.method == 'POST':
slip_data = JSONParser().parse(request)
slip_serializer = SlipSerializer(data=slip_data)
if slip_serializer.is_valid():
slip_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False)
elif request.method == 'PUT':
slip_data = JSONParser().parse(request)
slip = Slip.objects.get(lotno=slip_data['lotno'])
slip_serializer = SlipSerializer(slip, data=slip_data)
if slip_serializer.is_valid():
slip_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
slip = Slip.objects.get(lotno=lotno)
slip.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
@csrf_exempt
def GradeApi(request):
if request.method == 'GET':
grades = Grades.objects.all()
grades_serializer = GradeSerializer(grades, many=True)
return JsonResponse(grades_serializer.data, safe=False)
@csrf_exempt
def ContactApi(request, phone=0):
if request.method == 'GET':
contacts = Contacts.objects.all()
contacts_serializer = ContactSerializer(contacts, many=True)
return JsonResponse(contacts_serializer.data, safe=False)
elif request.method == 'POST':
contact_data = JSONParser().parse(request)
contact_serializer = ContactSerializer(data=contact_data)
if contact_serializer.is_valid():
contact_serializer.save()
return JsonResponse('Added Successfully!!', safe=False)
return JsonResponse('Failed to Add.', safe=False, status=404)
elif request.method == 'PUT':
contact_data = JSONParser().parse(request)
contact = Contacts.objects.get(phone=contact_data['phone'])
contact_serializer = ContactSerializer(contact, data=contact_data)
if contact_serializer.is_valid():
contact_serializer.save()
return JsonResponse('Updated Successfully!!', safe=False)
return JsonResponse('Failed to Update.', safe=False)
elif request.method == 'DELETE':
contact = Contacts.objects.get(phone=phone)
contact.delete()
return JsonResponse('Deleted Succeffully!!', safe=False)
<|reserved_special_token_1|>
from django.shortcuts import render
from django_filters.rest_framework import DjangoFilterBackend
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from django.http import JsonResponse, Http404
from .serializers import *
from .models import *
from .filter import *
from rest_framework import generics
from rest_framework.filters import SearchFilter, OrderingFilter
# Create your views here.
@csrf_exempt
def TBGRApi(request, tbgrno=0):
if request.method == 'GET':
tbgrs = TBGR.objects.all()
tbgrs_serializer = TBGRSerializer(tbgrs, many=True)
return JsonResponse(tbgrs_serializer.data, safe=False)
elif request.method == 'POST':
tbgr_data = JSONParser().parse(request)
tbgr_serializer = TBGRSerializer(data=tbgr_data)
if tbgr_serializer.is_valid():
tbgr_serializer.save()
return JsonResponse("Added Successfully!!", safe=False)
return JsonResponse("Failed to Add.", safe=False)
elif request.method == 'PUT':
tbgr_data = JSONParser().parse(request)
tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])
tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)
if tbgr_serializer.is_valid():
tbgr_serializer.save()
return JsonResponse("Updated Successfully!!", safe=False)
return JsonResponse("Failed to Update.", safe=False)
elif request.method == 'DELETE':
tbgr = TBGR.objects.get(tbgrno=tbgrno)
tbgr.delete()
return JsonResponse("Deleted Succeffully!!", safe=False)
@csrf_exempt
def BoardApi(request):
if request.method=='GET':
boards = Board.objects.all()
boards_serializer = BoardSerializer(boards, many=True)
return JsonResponse(boards_serializer.data, safe=False)
@csrf_exempt
def VillageApi(request, villageid=0):
if request.method == 'GET':
villages = Village.objects.all()
villages_serializer = VillageSerializer(villages, many=True)
return JsonResponse(villages_serializer.data, safe=False)
elif request.method == 'POST':
village_data = JSONParser().parse(request)
village_serializer = VillageSerializer(data=village_data)
if village_serializer.is_valid():
village_serializer.save()
return JsonResponse("Added Successfully!!", safe=False)
return JsonResponse("Failed to Add.", safe=False)
elif request.method == 'PUT':
village_data = JSONParser().parse(request)
village = Village.objects.get(villageid=village_data['villageid'])
village_serializer = VillageSerializer(village, data=village_data)
if village_serializer.is_valid():
village_serializer.save()
return JsonResponse("Updated Successfully!!", safe=False)
return JsonResponse("Failed to Update.", safe=False)
elif request.method == 'DELETE':
village = Village.objects.get(villageid=villageid)
village.delete()
return JsonResponse("Deleted Succeffully!!", safe=False)
@csrf_exempt
def SlipApi(request, lotno=0):
if request.method == 'GET':
slips = Slip.objects.all()
slips_serializer = SlipSerializer(slips, many=True)
return JsonResponse(slips_serializer.data, safe=False)
elif request.method == 'POST':
slip_data = JSONParser().parse(request)
slip_serializer = SlipSerializer(data=slip_data)
if slip_serializer.is_valid():
slip_serializer.save()
return JsonResponse("Added Successfully!!", safe=False)
return JsonResponse("Failed to Add.", safe=False)
# elif request.method == 'POST':
# slip_data = JSONParser().parse(request)
# slips = Slip.objects.all()
# if slip_data['lotno']:
# slips = slips.filter(lotno=slip_data['lotno'])
# if slip_data['tbgrno']:
# slips = slips.filter(tbgrno=slip_data['tbgrno'])
# if slip_data['grade']:
# slips = slips.filter(grade=slip_data['grade'])
# slips_serializer = SlipSerializer(slips, many=True)
# return JsonResponse(slips_serializer.data, safe=False)
elif request.method == 'PUT':
slip_data = JSONParser().parse(request)
slip = Slip.objects.get(lotno=slip_data['lotno'])
slip_serializer = SlipSerializer(slip, data=slip_data)
if slip_serializer.is_valid():
slip_serializer.save()
return JsonResponse("Updated Successfully!!", safe=False)
return JsonResponse("Failed to Update.", safe=False)
elif request.method == 'DELETE':
slip = Slip.objects.get(lotno=lotno)
slip.delete()
return JsonResponse("Deleted Succeffully!!", safe=False)
@csrf_exempt
def GradeApi(request):
if request.method == 'GET':
grades = Grades.objects.all()
grades_serializer = GradeSerializer(grades, many=True)
return JsonResponse(grades_serializer.data, safe=False)
@csrf_exempt
def ContactApi(request, phone=0):
if request.method == 'GET':
contacts = Contacts.objects.all()
contacts_serializer = ContactSerializer(contacts, many=True)
return JsonResponse(contacts_serializer.data, safe=False)
elif request.method == 'POST':
contact_data = JSONParser().parse(request)
contact_serializer = ContactSerializer(data=contact_data)
if contact_serializer.is_valid():
contact_serializer.save()
return JsonResponse("Added Successfully!!", safe=False)
return JsonResponse("Failed to Add.", safe=False, status=404)
elif request.method == 'PUT':
contact_data = JSONParser().parse(request)
contact = Contacts.objects.get(phone=contact_data['phone'])
contact_serializer = ContactSerializer(contact, data=contact_data)
if contact_serializer.is_valid():
contact_serializer.save()
return JsonResponse("Updated Successfully!!", safe=False)
return JsonResponse("Failed to Update.", safe=False)
elif request.method == 'DELETE':
contact = Contacts.objects.get(phone=phone)
contact.delete()
return JsonResponse("Deleted Succeffully!!", safe=False)
|
flexible
|
{
"blob_id": "e0c6fb414d87c0a6377538089226e37b044edc70",
"index": 8383,
"step-1": "<mask token>\n\n\n@csrf_exempt\ndef TBGRApi(request, tbgrno=0):\n if request.method == 'GET':\n tbgrs = TBGR.objects.all()\n tbgrs_serializer = TBGRSerializer(tbgrs, many=True)\n return JsonResponse(tbgrs_serializer.data, safe=False)\n elif request.method == 'POST':\n tbgr_data = JSONParser().parse(request)\n tbgr_serializer = TBGRSerializer(data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n tbgr_data = JSONParser().parse(request)\n tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])\n tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n tbgr = TBGR.objects.get(tbgrno=tbgrno)\n tbgr.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef VillageApi(request, villageid=0):\n if request.method == 'GET':\n villages = Village.objects.all()\n villages_serializer = VillageSerializer(villages, many=True)\n return JsonResponse(villages_serializer.data, safe=False)\n elif request.method == 'POST':\n village_data = JSONParser().parse(request)\n village_serializer = VillageSerializer(data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n village_data = JSONParser().parse(request)\n village = Village.objects.get(villageid=village_data['villageid'])\n village_serializer = VillageSerializer(village, data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n village = Village.objects.get(villageid=villageid)\n village.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef ContactApi(request, phone=0):\n if request.method == 'GET':\n contacts = Contacts.objects.all()\n contacts_serializer = ContactSerializer(contacts, many=True)\n return JsonResponse(contacts_serializer.data, safe=False)\n elif request.method == 'POST':\n contact_data = JSONParser().parse(request)\n contact_serializer = ContactSerializer(data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False, status=404)\n elif request.method == 'PUT':\n contact_data = JSONParser().parse(request)\n contact = Contacts.objects.get(phone=contact_data['phone'])\n contact_serializer = ContactSerializer(contact, data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n contact = Contacts.objects.get(phone=phone)\n contact.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n",
"step-2": "<mask token>\n\n\n@csrf_exempt\ndef TBGRApi(request, tbgrno=0):\n if request.method == 'GET':\n tbgrs = TBGR.objects.all()\n tbgrs_serializer = TBGRSerializer(tbgrs, many=True)\n return JsonResponse(tbgrs_serializer.data, safe=False)\n elif request.method == 'POST':\n tbgr_data = JSONParser().parse(request)\n tbgr_serializer = TBGRSerializer(data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n tbgr_data = JSONParser().parse(request)\n tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])\n tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n tbgr = TBGR.objects.get(tbgrno=tbgrno)\n tbgr.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef VillageApi(request, villageid=0):\n if request.method == 'GET':\n villages = Village.objects.all()\n villages_serializer = VillageSerializer(villages, many=True)\n return JsonResponse(villages_serializer.data, safe=False)\n elif request.method == 'POST':\n village_data = JSONParser().parse(request)\n village_serializer = VillageSerializer(data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n village_data = JSONParser().parse(request)\n village = Village.objects.get(villageid=village_data['villageid'])\n village_serializer = VillageSerializer(village, data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n village = Village.objects.get(villageid=villageid)\n village.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef GradeApi(request):\n if request.method == 'GET':\n grades = Grades.objects.all()\n grades_serializer = GradeSerializer(grades, many=True)\n return JsonResponse(grades_serializer.data, safe=False)\n\n\n@csrf_exempt\ndef ContactApi(request, phone=0):\n if request.method == 'GET':\n contacts = Contacts.objects.all()\n contacts_serializer = ContactSerializer(contacts, many=True)\n return JsonResponse(contacts_serializer.data, safe=False)\n elif request.method == 'POST':\n contact_data = JSONParser().parse(request)\n contact_serializer = ContactSerializer(data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False, status=404)\n elif request.method == 'PUT':\n contact_data = JSONParser().parse(request)\n contact = Contacts.objects.get(phone=contact_data['phone'])\n contact_serializer = ContactSerializer(contact, data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n contact = Contacts.objects.get(phone=phone)\n contact.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n",
"step-3": "<mask token>\n\n\n@csrf_exempt\ndef TBGRApi(request, tbgrno=0):\n if request.method == 'GET':\n tbgrs = TBGR.objects.all()\n tbgrs_serializer = TBGRSerializer(tbgrs, many=True)\n return JsonResponse(tbgrs_serializer.data, safe=False)\n elif request.method == 'POST':\n tbgr_data = JSONParser().parse(request)\n tbgr_serializer = TBGRSerializer(data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n tbgr_data = JSONParser().parse(request)\n tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])\n tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n tbgr = TBGR.objects.get(tbgrno=tbgrno)\n tbgr.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef VillageApi(request, villageid=0):\n if request.method == 'GET':\n villages = Village.objects.all()\n villages_serializer = VillageSerializer(villages, many=True)\n return JsonResponse(villages_serializer.data, safe=False)\n elif request.method == 'POST':\n village_data = JSONParser().parse(request)\n village_serializer = VillageSerializer(data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n village_data = JSONParser().parse(request)\n village = Village.objects.get(villageid=village_data['villageid'])\n village_serializer = VillageSerializer(village, data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n village = Village.objects.get(villageid=villageid)\n village.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n@csrf_exempt\ndef SlipApi(request, lotno=0):\n if request.method == 'GET':\n slips = Slip.objects.all()\n slips_serializer = SlipSerializer(slips, many=True)\n return JsonResponse(slips_serializer.data, safe=False)\n elif request.method == 'POST':\n slip_data = JSONParser().parse(request)\n slip_serializer = SlipSerializer(data=slip_data)\n if slip_serializer.is_valid():\n slip_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n slip_data = JSONParser().parse(request)\n slip = Slip.objects.get(lotno=slip_data['lotno'])\n slip_serializer = SlipSerializer(slip, data=slip_data)\n if slip_serializer.is_valid():\n slip_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n slip = Slip.objects.get(lotno=lotno)\n slip.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n@csrf_exempt\ndef GradeApi(request):\n if request.method == 'GET':\n grades = Grades.objects.all()\n grades_serializer = GradeSerializer(grades, many=True)\n return JsonResponse(grades_serializer.data, safe=False)\n\n\n@csrf_exempt\ndef ContactApi(request, phone=0):\n if request.method == 'GET':\n contacts = Contacts.objects.all()\n contacts_serializer = ContactSerializer(contacts, many=True)\n return JsonResponse(contacts_serializer.data, safe=False)\n elif request.method == 'POST':\n contact_data = JSONParser().parse(request)\n contact_serializer = ContactSerializer(data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False, status=404)\n elif request.method == 'PUT':\n contact_data = JSONParser().parse(request)\n contact = Contacts.objects.get(phone=contact_data['phone'])\n contact_serializer = ContactSerializer(contact, data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n contact = Contacts.objects.get(phone=phone)\n contact.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n",
"step-4": "from django.shortcuts import render\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\nfrom django.http import JsonResponse, Http404\nfrom .serializers import *\nfrom .models import *\nfrom .filter import *\nfrom rest_framework import generics\nfrom rest_framework.filters import SearchFilter, OrderingFilter\n\n\n@csrf_exempt\ndef TBGRApi(request, tbgrno=0):\n if request.method == 'GET':\n tbgrs = TBGR.objects.all()\n tbgrs_serializer = TBGRSerializer(tbgrs, many=True)\n return JsonResponse(tbgrs_serializer.data, safe=False)\n elif request.method == 'POST':\n tbgr_data = JSONParser().parse(request)\n tbgr_serializer = TBGRSerializer(data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n tbgr_data = JSONParser().parse(request)\n tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])\n tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n tbgr = TBGR.objects.get(tbgrno=tbgrno)\n tbgr.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n@csrf_exempt\ndef BoardApi(request):\n if request.method == 'GET':\n boards = Board.objects.all()\n boards_serializer = BoardSerializer(boards, many=True)\n return JsonResponse(boards_serializer.data, safe=False)\n\n\n@csrf_exempt\ndef VillageApi(request, villageid=0):\n if request.method == 'GET':\n villages = Village.objects.all()\n villages_serializer = VillageSerializer(villages, many=True)\n return JsonResponse(villages_serializer.data, safe=False)\n elif request.method == 'POST':\n village_data = JSONParser().parse(request)\n village_serializer = VillageSerializer(data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n village_data = JSONParser().parse(request)\n village = Village.objects.get(villageid=village_data['villageid'])\n village_serializer = VillageSerializer(village, data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n village = Village.objects.get(villageid=villageid)\n village.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n@csrf_exempt\ndef SlipApi(request, lotno=0):\n if request.method == 'GET':\n slips = Slip.objects.all()\n slips_serializer = SlipSerializer(slips, many=True)\n return JsonResponse(slips_serializer.data, safe=False)\n elif request.method == 'POST':\n slip_data = JSONParser().parse(request)\n slip_serializer = SlipSerializer(data=slip_data)\n if slip_serializer.is_valid():\n slip_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False)\n elif request.method == 'PUT':\n slip_data = JSONParser().parse(request)\n slip = Slip.objects.get(lotno=slip_data['lotno'])\n slip_serializer = SlipSerializer(slip, data=slip_data)\n if slip_serializer.is_valid():\n slip_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n slip = Slip.objects.get(lotno=lotno)\n slip.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n\n\n@csrf_exempt\ndef GradeApi(request):\n if request.method == 'GET':\n grades = Grades.objects.all()\n grades_serializer = GradeSerializer(grades, many=True)\n return JsonResponse(grades_serializer.data, safe=False)\n\n\n@csrf_exempt\ndef ContactApi(request, phone=0):\n if request.method == 'GET':\n contacts = Contacts.objects.all()\n contacts_serializer = ContactSerializer(contacts, many=True)\n return JsonResponse(contacts_serializer.data, safe=False)\n elif request.method == 'POST':\n contact_data = JSONParser().parse(request)\n contact_serializer = ContactSerializer(data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Added Successfully!!', safe=False)\n return JsonResponse('Failed to Add.', safe=False, status=404)\n elif request.method == 'PUT':\n contact_data = JSONParser().parse(request)\n contact = Contacts.objects.get(phone=contact_data['phone'])\n contact_serializer = ContactSerializer(contact, data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse('Updated Successfully!!', safe=False)\n return JsonResponse('Failed to Update.', safe=False)\n elif request.method == 'DELETE':\n contact = Contacts.objects.get(phone=phone)\n contact.delete()\n return JsonResponse('Deleted Succeffully!!', safe=False)\n",
"step-5": "from django.shortcuts import render\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\nfrom django.http import JsonResponse, Http404\nfrom .serializers import *\nfrom .models import *\nfrom .filter import *\nfrom rest_framework import generics\nfrom rest_framework.filters import SearchFilter, OrderingFilter\n\n# Create your views here.\n@csrf_exempt\ndef TBGRApi(request, tbgrno=0):\n if request.method == 'GET':\n tbgrs = TBGR.objects.all()\n tbgrs_serializer = TBGRSerializer(tbgrs, many=True)\n return JsonResponse(tbgrs_serializer.data, safe=False)\n\n elif request.method == 'POST':\n tbgr_data = JSONParser().parse(request)\n tbgr_serializer = TBGRSerializer(data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse(\"Added Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Add.\", safe=False)\n\n elif request.method == 'PUT':\n tbgr_data = JSONParser().parse(request)\n tbgr = TBGR.objects.get(tbgrno=tbgr_data['tbgrno'])\n tbgr_serializer = TBGRSerializer(tbgr, data=tbgr_data)\n if tbgr_serializer.is_valid():\n tbgr_serializer.save()\n return JsonResponse(\"Updated Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Update.\", safe=False)\n\n elif request.method == 'DELETE':\n tbgr = TBGR.objects.get(tbgrno=tbgrno)\n tbgr.delete()\n return JsonResponse(\"Deleted Succeffully!!\", safe=False)\n\n@csrf_exempt\ndef BoardApi(request):\n if request.method=='GET':\n boards = Board.objects.all()\n boards_serializer = BoardSerializer(boards, many=True)\n return JsonResponse(boards_serializer.data, safe=False)\n\n@csrf_exempt\ndef VillageApi(request, villageid=0):\n if request.method == 'GET':\n villages = Village.objects.all()\n villages_serializer = VillageSerializer(villages, many=True)\n return JsonResponse(villages_serializer.data, safe=False)\n\n elif request.method == 'POST':\n village_data = JSONParser().parse(request)\n village_serializer = VillageSerializer(data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse(\"Added Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Add.\", safe=False)\n\n elif request.method == 'PUT':\n village_data = JSONParser().parse(request)\n village = Village.objects.get(villageid=village_data['villageid'])\n village_serializer = VillageSerializer(village, data=village_data)\n if village_serializer.is_valid():\n village_serializer.save()\n return JsonResponse(\"Updated Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Update.\", safe=False)\n\n elif request.method == 'DELETE':\n village = Village.objects.get(villageid=villageid)\n village.delete()\n return JsonResponse(\"Deleted Succeffully!!\", safe=False)\n\n@csrf_exempt\n\ndef SlipApi(request, lotno=0):\n if request.method == 'GET':\n slips = Slip.objects.all()\n slips_serializer = SlipSerializer(slips, many=True)\n return JsonResponse(slips_serializer.data, safe=False)\n\n elif request.method == 'POST':\n slip_data = JSONParser().parse(request)\n slip_serializer = SlipSerializer(data=slip_data)\n if slip_serializer.is_valid():\n slip_serializer.save()\n return JsonResponse(\"Added Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Add.\", safe=False)\n\n # elif request.method == 'POST':\n # slip_data = JSONParser().parse(request)\n # slips = Slip.objects.all()\n # if slip_data['lotno']:\n # slips = slips.filter(lotno=slip_data['lotno'])\n # if slip_data['tbgrno']:\n # slips = slips.filter(tbgrno=slip_data['tbgrno'])\n # if slip_data['grade']:\n # slips = slips.filter(grade=slip_data['grade'])\n # slips_serializer = SlipSerializer(slips, many=True)\n # return JsonResponse(slips_serializer.data, safe=False)\n\n elif request.method == 'PUT':\n slip_data = JSONParser().parse(request)\n slip = Slip.objects.get(lotno=slip_data['lotno'])\n slip_serializer = SlipSerializer(slip, data=slip_data)\n if slip_serializer.is_valid():\n slip_serializer.save()\n return JsonResponse(\"Updated Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Update.\", safe=False)\n\n elif request.method == 'DELETE':\n slip = Slip.objects.get(lotno=lotno)\n slip.delete()\n return JsonResponse(\"Deleted Succeffully!!\", safe=False)\n\n@csrf_exempt\ndef GradeApi(request):\n if request.method == 'GET':\n grades = Grades.objects.all()\n grades_serializer = GradeSerializer(grades, many=True)\n return JsonResponse(grades_serializer.data, safe=False)\n\n@csrf_exempt\ndef ContactApi(request, phone=0):\n if request.method == 'GET':\n contacts = Contacts.objects.all()\n contacts_serializer = ContactSerializer(contacts, many=True)\n return JsonResponse(contacts_serializer.data, safe=False)\n\n elif request.method == 'POST':\n contact_data = JSONParser().parse(request)\n contact_serializer = ContactSerializer(data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse(\"Added Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Add.\", safe=False, status=404)\n\n elif request.method == 'PUT':\n contact_data = JSONParser().parse(request)\n contact = Contacts.objects.get(phone=contact_data['phone'])\n contact_serializer = ContactSerializer(contact, data=contact_data)\n if contact_serializer.is_valid():\n contact_serializer.save()\n return JsonResponse(\"Updated Successfully!!\", safe=False)\n return JsonResponse(\"Failed to Update.\", safe=False)\n\n elif request.method == 'DELETE':\n contact = Contacts.objects.get(phone=phone)\n contact.delete()\n return JsonResponse(\"Deleted Succeffully!!\", safe=False)\n",
"step-ids": [
3,
4,
5,
7,
8
]
}
|
[
3,
4,
5,
7,
8
] |
# O(logn) T O(1) S
def binarySearch(array, target):
if len(array) == 0:
return -1
else:
return binarySearchR(array, target, 0, len(array) - 1)
def binarySearchR(array, target, leftPointer, rightPointer):
if leftPointer > rightPointer:
return -1
else:
midPointer = (leftPointer + rightPointer) // 2
if target == array[midPointer]:
return midPointer
elif target < array[midPointer]:
return binarySearchR(array, target, leftPointer, midPointer - 1)
else:
return binarySearchR(array, target, midPointer + 1, rightPointer)
|
normal
|
{
"blob_id": "57d6b9e7f48d32e5d10bfd6a340ea56281f5d82d",
"index": 1890,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef binarySearchR(array, target, leftPointer, rightPointer):\n if leftPointer > rightPointer:\n return -1\n else:\n midPointer = (leftPointer + rightPointer) // 2\n if target == array[midPointer]:\n return midPointer\n elif target < array[midPointer]:\n return binarySearchR(array, target, leftPointer, midPointer - 1)\n else:\n return binarySearchR(array, target, midPointer + 1, rightPointer)\n",
"step-3": "def binarySearch(array, target):\n if len(array) == 0:\n return -1\n else:\n return binarySearchR(array, target, 0, len(array) - 1)\n\n\ndef binarySearchR(array, target, leftPointer, rightPointer):\n if leftPointer > rightPointer:\n return -1\n else:\n midPointer = (leftPointer + rightPointer) // 2\n if target == array[midPointer]:\n return midPointer\n elif target < array[midPointer]:\n return binarySearchR(array, target, leftPointer, midPointer - 1)\n else:\n return binarySearchR(array, target, midPointer + 1, rightPointer)\n",
"step-4": "# O(logn) T O(1) S\ndef binarySearch(array, target):\n if len(array) == 0:\n return -1\n else:\n return binarySearchR(array, target, 0, len(array) - 1)\n\n\ndef binarySearchR(array, target, leftPointer, rightPointer):\n if leftPointer > rightPointer:\n return -1\n else:\n midPointer = (leftPointer + rightPointer) // 2\n if target == array[midPointer]:\n return midPointer\n elif target < array[midPointer]:\n return binarySearchR(array, target, leftPointer, midPointer - 1)\n else:\n return binarySearchR(array, target, midPointer + 1, rightPointer)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
import scrapy
import re
class LeedsAcUkSpider(scrapy.Spider):
name = 'leeds_ac_uk'
allowed_domains = ['webprod3.leeds.ac.uk']
start_urls = ['http://webprod3.leeds.ac.uk/catalogue/dynmodules.asp?Y=201920&M=ANAT-3105']
def parse(self, response):
item = {}
item['Subject'] = response.css('div#module-programmes h2::text').get().split()[-1]
item['Subject short'] = response.css('div#module-programmes h2::text').get().split()[0].split('3')[0]
item['Subject code1'] = response.css('div#module-programmes h2::text').get().split()[0]
item['Topic'] = response.css('div#module-programmes h2::text').get().split('\n')[-1]
Syllabus = response.css('div#module-programmes')
|
normal
|
{
"blob_id": "fb4a95197882cc6fe72a5f3c2420a474d9cd97aa",
"index": 7751,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LeedsAcUkSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n item = {}\n item['Subject'] = response.css('div#module-programmes h2::text').get(\n ).split()[-1]\n item['Subject short'] = response.css('div#module-programmes h2::text'\n ).get().split()[0].split('3')[0]\n item['Subject code1'] = response.css('div#module-programmes h2::text'\n ).get().split()[0]\n item['Topic'] = response.css('div#module-programmes h2::text').get(\n ).split('\\n')[-1]\n Syllabus = response.css('div#module-programmes')\n",
"step-3": "<mask token>\n\n\nclass LeedsAcUkSpider(scrapy.Spider):\n name = 'leeds_ac_uk'\n allowed_domains = ['webprod3.leeds.ac.uk']\n start_urls = [\n 'http://webprod3.leeds.ac.uk/catalogue/dynmodules.asp?Y=201920&M=ANAT-3105'\n ]\n\n def parse(self, response):\n item = {}\n item['Subject'] = response.css('div#module-programmes h2::text').get(\n ).split()[-1]\n item['Subject short'] = response.css('div#module-programmes h2::text'\n ).get().split()[0].split('3')[0]\n item['Subject code1'] = response.css('div#module-programmes h2::text'\n ).get().split()[0]\n item['Topic'] = response.css('div#module-programmes h2::text').get(\n ).split('\\n')[-1]\n Syllabus = response.css('div#module-programmes')\n",
"step-4": "import scrapy\nimport re\n\n\nclass LeedsAcUkSpider(scrapy.Spider):\n name = 'leeds_ac_uk'\n allowed_domains = ['webprod3.leeds.ac.uk']\n start_urls = [\n 'http://webprod3.leeds.ac.uk/catalogue/dynmodules.asp?Y=201920&M=ANAT-3105'\n ]\n\n def parse(self, response):\n item = {}\n item['Subject'] = response.css('div#module-programmes h2::text').get(\n ).split()[-1]\n item['Subject short'] = response.css('div#module-programmes h2::text'\n ).get().split()[0].split('3')[0]\n item['Subject code1'] = response.css('div#module-programmes h2::text'\n ).get().split()[0]\n item['Topic'] = response.css('div#module-programmes h2::text').get(\n ).split('\\n')[-1]\n Syllabus = response.css('div#module-programmes')\n",
"step-5": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nclass LeedsAcUkSpider(scrapy.Spider):\n name = 'leeds_ac_uk'\n allowed_domains = ['webprod3.leeds.ac.uk']\n start_urls = ['http://webprod3.leeds.ac.uk/catalogue/dynmodules.asp?Y=201920&M=ANAT-3105']\n\n def parse(self, response):\n item = {}\n item['Subject'] = response.css('div#module-programmes h2::text').get().split()[-1]\n item['Subject short'] = response.css('div#module-programmes h2::text').get().split()[0].split('3')[0]\n item['Subject code1'] = response.css('div#module-programmes h2::text').get().split()[0]\n item['Topic'] = response.css('div#module-programmes h2::text').get().split('\\n')[-1]\n Syllabus = response.css('div#module-programmes')",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.