code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import argparse
import os
from tqdm import tqdm
from couch import db
from jpeg import get_QTs, identify_quality
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='+')
parser.add_argument('--type', default='JpgImg')
parser.add_argument('--dataset')
args = parser.parse_args()
for filename in tqdm(args.filenames):
data = {
"type": args.type,
"name": os.path.basename(filename),
"quantization": get_QTs(filename),
"quality": identify_quality(filename),
}
if args.dataset:
data["dataset"] = args.dataset
db.save(data)
|
[
"tqdm.tqdm",
"argparse.ArgumentParser",
"os.path.basename",
"couch.db.save",
"jpeg.identify_quality",
"jpeg.get_QTs"
] |
[((124, 149), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (147, 149), False, 'import argparse\n'), ((319, 339), 'tqdm.tqdm', 'tqdm', (['args.filenames'], {}), '(args.filenames)\n', (323, 339), False, 'from tqdm import tqdm\n'), ((587, 600), 'couch.db.save', 'db.save', (['data'], {}), '(data)\n', (594, 600), False, 'from couch import db\n'), ((397, 423), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (413, 423), False, 'import os\n'), ((449, 466), 'jpeg.get_QTs', 'get_QTs', (['filename'], {}), '(filename)\n', (456, 466), False, 'from jpeg import get_QTs, identify_quality\n'), ((487, 513), 'jpeg.identify_quality', 'identify_quality', (['filename'], {}), '(filename)\n', (503, 513), False, 'from jpeg import get_QTs, identify_quality\n')]
|
#!/usr/bin/env python3
# Author: @haithamaouati
# Version:1.0
import argparse
import colorama
import os
import requests
import time
from colorama import Fore, Back, Style
colorama.init()
os.system('cls' if os.name == 'nt' else 'clear')
print('''\
_._ _,-'""`-._
(,-.`._,'( |\`-/|
`-.-' \ )-`( , o o)
`- \`_`"'-
''')
print(' Author: ' + Fore.CYAN + '@haithamaouati' + Fore.WHITE + ' Version: ' + Fore.YELLOW + '1.0\n' + Fore.WHITE)
print(' A simple admin panel finder tool\n')
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', metavar='<url>', type=str, help='URL website (e.g. http://127.0.0.1/)')
parser.add_argument('-w', '--wordlist', metavar='<wordlist>', type=str, help='Wordlist file (e.g. wordlist.txt)')
args = parser.parse_args()
if args.url == None or args.wordlist == None:
parser.print_help()
exit();
url = args.url
wordlist = args.wordlist
with open(wordlist,'r') as list:
for i in list:
time.sleep(1)
x = i.rstrip('\n')
check = requests.get(url + x)
try:
if check.status_code == 200:
with open("result.txt",'a') as result:
result.write(url + x +"\n")
print(Fore.GREEN + '[+] ' + Fore.WHITE + url + x + Fore.GREEN + ' [200]')
print(Fore.GREEN + '[*] ' + Fore.WHITE + 'Saved to: ' + Fore.YELLOW + 'result.txt')
result.close()
else:
print(Fore.RED + '[-] ' + Fore.WHITE + url + x + Fore.RED + ' [404]')
except ValueError:
print(Fore.RED + '[!] ' + Fore.WHITE + 'Something wrong')
|
[
"colorama.init",
"argparse.ArgumentParser",
"os.system",
"time.sleep",
"requests.get"
] |
[((184, 199), 'colorama.init', 'colorama.init', ([], {}), '()\n', (197, 199), False, 'import colorama\n'), ((203, 251), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (212, 251), False, 'import os\n'), ((550, 575), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (573, 575), False, 'import argparse\n'), ((1011, 1024), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1021, 1024), False, 'import time\n'), ((1062, 1083), 'requests.get', 'requests.get', (['(url + x)'], {}), '(url + x)\n', (1074, 1083), False, 'import requests\n')]
|
"""
Module for collection cog
"""
import math
import discord
from discord.ext import commands
from discord_slash import cog_ext, SlashContext
from discord_slash.utils.manage_commands import create_choice, create_option
from unsigned_bot import IMAGE_PATH
from unsigned_bot.constants import MAX_AMOUNT
from unsigned_bot.config import GUILD_IDS
from unsigned_bot.log import logger
from unsigned_bot.emojis import *
from unsigned_bot.draw import (
gen_grid,
delete_image_files
)
from unsigned_bot.matching import get_similar_unsigs
from unsigned_bot.parsing import get_numbers_from_string
from unsigned_bot.embedding import add_disclaimer
from unsigned_bot.cogs.checks import valid_channel, valid_unsig
from .embeds import embed_siblings, embed_collection_grid
class CollectionCog(commands.Cog, name="Collection"):
"""commands for your unsig collection"""
COG_EMOJI = EMOJI_FRAME
def __init__(self, bot: commands.Bot):
self.bot = bot
@cog_ext.cog_slash(
name="siblings",
description="show siblings of your unsig",
guild_ids=GUILD_IDS,
options=[
create_option(
name="number",
description="number of your unsig",
required=True,
option_type=3,
)
]
)
async def _siblings(self, ctx: SlashContext, number: str):
"show siblings of your unsig"
if not await valid_channel(ctx):
return
if not await valid_unsig(ctx, number):
return
collection_numbers = range(0,MAX_AMOUNT)
similar_unsigs = get_similar_unsigs(number, collection_numbers, structural=False)
siblings_numbers = list(set().union(*similar_unsigs.values()))
selected_numbers = [int(number), *siblings_numbers]
embed = embed_siblings(number, siblings_numbers, selected_numbers, self.bot.offers, cols=2)
if self.bot.offers and siblings_numbers:
add_disclaimer(embed, self.bot.offers_updated)
if not siblings_numbers:
await ctx.send(embed=embed)
return
try:
image_path = await gen_grid(selected_numbers, cols=2)
image_file = discord.File(image_path, filename="siblings.png")
embed.set_image(url="attachment://siblings.png")
delete_image_files(IMAGE_PATH)
except:
await ctx.send(content=f"I can't generate the siblings of your unsig.")
return
else:
await ctx.send(file=image_file, embed=embed)
@cog_ext.cog_slash(
name="show",
description="show collection of your unsigs",
guild_ids=GUILD_IDS,
options=[
create_option(
name="numbers",
description="Numbers of your unsigs",
required=True,
option_type=3,
),
create_option(
name="columns",
description="no. of unsigs side by side",
required=False,
option_type=3,
),
]
)
async def _show(self, ctx: SlashContext, numbers: str, columns: str = None):
"""show collection of your unsigs"""
if not await valid_channel(ctx):
return
unsig_numbers = get_numbers_from_string(numbers)
if not unsig_numbers:
await ctx.send(content=f"Please enter numbers of your unsigs")
return
numbers_cleaned = list()
for number in unsig_numbers:
try:
number = str(int(number))
except:
await ctx.send(content=f"unsig{number} does not exist!\nPlease enter number between 0 and {MAX_AMOUNT-1}.")
return
else:
numbers_cleaned.append(number)
LIMIT_DISPLAY = 20
if len(numbers_cleaned) > LIMIT_DISPLAY:
numbers_cleaned = numbers_cleaned[:LIMIT_DISPLAY]
if not columns:
columns = math.ceil(math.sqrt(len(numbers_cleaned)))
else:
try:
columns = int(columns)
except:
await ctx.send(content=f"Please enter the number of unsigs you want to show")
return
embed = embed_collection_grid(numbers_cleaned)
try:
image_path = await gen_grid(numbers_cleaned, columns)
image_file = discord.File(image_path, filename="collection.png")
embed.set_image(url="attachment://collection.png")
delete_image_files(IMAGE_PATH)
except:
await ctx.send(content=f"I can't generate the collection of your unsigs.")
return
else:
await ctx.send(file=image_file, embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(CollectionCog(bot))
logger.debug(f"{CollectionCog.__name__} loaded")
|
[
"unsigned_bot.parsing.get_numbers_from_string",
"unsigned_bot.cogs.checks.valid_channel",
"discord.File",
"unsigned_bot.matching.get_similar_unsigs",
"discord_slash.utils.manage_commands.create_option",
"unsigned_bot.draw.delete_image_files",
"unsigned_bot.embedding.add_disclaimer",
"unsigned_bot.draw.gen_grid",
"unsigned_bot.log.logger.debug",
"unsigned_bot.cogs.checks.valid_unsig"
] |
[((4911, 4959), 'unsigned_bot.log.logger.debug', 'logger.debug', (['f"""{CollectionCog.__name__} loaded"""'], {}), "(f'{CollectionCog.__name__} loaded')\n", (4923, 4959), False, 'from unsigned_bot.log import logger\n'), ((1635, 1699), 'unsigned_bot.matching.get_similar_unsigs', 'get_similar_unsigs', (['number', 'collection_numbers'], {'structural': '(False)'}), '(number, collection_numbers, structural=False)\n', (1653, 1699), False, 'from unsigned_bot.matching import get_similar_unsigs\n'), ((3363, 3395), 'unsigned_bot.parsing.get_numbers_from_string', 'get_numbers_from_string', (['numbers'], {}), '(numbers)\n', (3386, 3395), False, 'from unsigned_bot.parsing import get_numbers_from_string\n'), ((2003, 2049), 'unsigned_bot.embedding.add_disclaimer', 'add_disclaimer', (['embed', 'self.bot.offers_updated'], {}), '(embed, self.bot.offers_updated)\n', (2017, 2049), False, 'from unsigned_bot.embedding import add_disclaimer\n'), ((2248, 2297), 'discord.File', 'discord.File', (['image_path'], {'filename': '"""siblings.png"""'}), "(image_path, filename='siblings.png')\n", (2260, 2297), False, 'import discord\n'), ((2372, 2402), 'unsigned_bot.draw.delete_image_files', 'delete_image_files', (['IMAGE_PATH'], {}), '(IMAGE_PATH)\n', (2390, 2402), False, 'from unsigned_bot.draw import gen_grid, delete_image_files\n'), ((4487, 4538), 'discord.File', 'discord.File', (['image_path'], {'filename': '"""collection.png"""'}), "(image_path, filename='collection.png')\n", (4499, 4538), False, 'import discord\n'), ((4615, 4645), 'unsigned_bot.draw.delete_image_files', 'delete_image_files', (['IMAGE_PATH'], {}), '(IMAGE_PATH)\n', (4633, 4645), False, 'from unsigned_bot.draw import gen_grid, delete_image_files\n'), ((1446, 1464), 'unsigned_bot.cogs.checks.valid_channel', 'valid_channel', (['ctx'], {}), '(ctx)\n', (1459, 1464), False, 'from unsigned_bot.cogs.checks import valid_channel, valid_unsig\n'), ((1515, 1539), 'unsigned_bot.cogs.checks.valid_unsig', 'valid_unsig', (['ctx', 'number'], {}), '(ctx, number)\n', (1526, 1539), False, 'from unsigned_bot.cogs.checks import valid_channel, valid_unsig\n'), ((2188, 2222), 'unsigned_bot.draw.gen_grid', 'gen_grid', (['selected_numbers'], {'cols': '(2)'}), '(selected_numbers, cols=2)\n', (2196, 2222), False, 'from unsigned_bot.draw import gen_grid, delete_image_files\n'), ((1127, 1226), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""number"""', 'description': '"""number of your unsig"""', 'required': '(True)', 'option_type': '(3)'}), "(name='number', description='number of your unsig', required=\n True, option_type=3)\n", (1140, 1226), False, 'from discord_slash.utils.manage_commands import create_choice, create_option\n'), ((3299, 3317), 'unsigned_bot.cogs.checks.valid_channel', 'valid_channel', (['ctx'], {}), '(ctx)\n', (3312, 3317), False, 'from unsigned_bot.cogs.checks import valid_channel, valid_unsig\n'), ((4427, 4461), 'unsigned_bot.draw.gen_grid', 'gen_grid', (['numbers_cleaned', 'columns'], {}), '(numbers_cleaned, columns)\n', (4435, 4461), False, 'from unsigned_bot.draw import gen_grid, delete_image_files\n'), ((2762, 2863), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""numbers"""', 'description': '"""Numbers of your unsigs"""', 'required': '(True)', 'option_type': '(3)'}), "(name='numbers', description='Numbers of your unsigs',\n required=True, option_type=3)\n", (2775, 2863), False, 'from discord_slash.utils.manage_commands import create_choice, create_option\n'), ((2952, 3058), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""columns"""', 'description': '"""no. of unsigs side by side"""', 'required': '(False)', 'option_type': '(3)'}), "(name='columns', description='no. of unsigs side by side',\n required=False, option_type=3)\n", (2965, 3058), False, 'from discord_slash.utils.manage_commands import create_choice, create_option\n')]
|
import datetime
import logging
import tushare as ts
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow import keras
import tensorflow as tf
from ts.build_model import BuildModel
from ts.db_utils import get_daily_by_trade_date
from ts.simulation_history import SimulationHistory
from ts.st_history_data import x_train_col_index
class Change(SimulationHistory):
model_cache = {}
t1_predictions = None
t0_predictions = 0
t0_index = ''
def is_sell(self, index, row):
logging.debug('index: %s, date: %s', index, row['date'])
today = datetime.datetime.strptime(row['date'], '%Y-%m-%d %H:%M:%S')
df = get_daily_by_trade_date(self.get_code(), today.strftime('%Y%m%d'))
change_predictions, true_predictions = self.predictions(df, ['open', 'high', 'low', 'close'], 'pct_chg',
self.get_code() + '_pct_chg_model.h5')
logging.debug('change_predictions:%s, true_predictions:%s', change_predictions, true_predictions)
if len(df) == 0:
return False
if self.t0_predictions == None:
return False
if self.t0_predictions <= 0:
return False
logging.debug('row[ma5] * (1+self.t0_predictions/100) :%s, ma5: %s, price:%s', row['ma5'] * (1+self.t0_predictions/100), row['ma5'], row['close'])
return row['close'] > row['ma5'] * (1+self.t0_predictions/100)
def is_buy(self, index, row):
logging.debug('index: %s, date: %s', index, row['date'])
today = datetime.datetime.strptime(row['date'], '%Y-%m-%d %H:%M:%S')
df = get_daily_by_trade_date(self.get_code(), today.strftime('%Y%m%d'))
change_predictions, true_predictions = self.predictions(df, ['open', 'high', 'low', 'close'], 'pct_chg',
self.get_code() + '_pct_chg_model.h5')
self.t0_predictions = change_predictions
logging.debug('change_predictions:%s, true_predictions:%s', change_predictions, true_predictions)
if self.t0_index != index:
self.t1_predictions = self.t0_predictions
self.t0_index = index
if len(df) == 0:
return False
if self.t0_predictions <= 0:
return False
logging.debug('row[ma5] * (1-change_predictions/100) :%s, ma5: %s, price:%s', row['ma5'] * (1-self.t0_predictions/100), row['ma5'], row['close'])
return row['close'] < row['ma5'] * (1-self.t0_predictions/100)
def predictions(self, df, column_names, label_name, module_name):
columns = df.columns.values.tolist()
stock_data = np.array(df)
x_train_col = x_train_col_index(columns, column_names)
y_train_col = x_train_col_index(columns, [label_name])[0]
x = np.array(stock_data[:, x_train_col])
y = np.array(stock_data[:, y_train_col])
if len(x) == 0:
return 0, 0
model = self.model_cache.get(module_name)
if model == None:
model = keras.models.load_model(module_name)
optimizer = tf.train.RMSPropOptimizer(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae'])
self.model_cache[module_name] = model
predictions = model.predict(x).flatten()[0]/10 + 1.5
return predictions, y[0]
|
[
"logging.debug",
"tensorflow.keras.models.load_model",
"tensorflow.train.RMSPropOptimizer",
"datetime.datetime.strptime",
"ts.st_history_data.x_train_col_index",
"numpy.array"
] |
[((532, 588), 'logging.debug', 'logging.debug', (['"""index: %s, date: %s"""', 'index', "row['date']"], {}), "('index: %s, date: %s', index, row['date'])\n", (545, 588), False, 'import logging\n'), ((605, 665), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["row['date']", '"""%Y-%m-%d %H:%M:%S"""'], {}), "(row['date'], '%Y-%m-%d %H:%M:%S')\n", (631, 665), False, 'import datetime\n'), ((966, 1067), 'logging.debug', 'logging.debug', (['"""change_predictions:%s, true_predictions:%s"""', 'change_predictions', 'true_predictions'], {}), "('change_predictions:%s, true_predictions:%s',\n change_predictions, true_predictions)\n", (979, 1067), False, 'import logging\n'), ((1253, 1407), 'logging.debug', 'logging.debug', (['"""row[ma5] * (1+self.t0_predictions/100) :%s, ma5: %s, price:%s"""', "(row['ma5'] * (1 + self.t0_predictions / 100))", "row['ma5']", "row['close']"], {}), "('row[ma5] * (1+self.t0_predictions/100) :%s, ma5: %s, price:%s',\n row['ma5'] * (1 + self.t0_predictions / 100), row['ma5'], row['close'])\n", (1266, 1407), False, 'import logging\n'), ((1515, 1571), 'logging.debug', 'logging.debug', (['"""index: %s, date: %s"""', 'index', "row['date']"], {}), "('index: %s, date: %s', index, row['date'])\n", (1528, 1571), False, 'import logging\n'), ((1588, 1648), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["row['date']", '"""%Y-%m-%d %H:%M:%S"""'], {}), "(row['date'], '%Y-%m-%d %H:%M:%S')\n", (1614, 1648), False, 'import datetime\n'), ((1999, 2100), 'logging.debug', 'logging.debug', (['"""change_predictions:%s, true_predictions:%s"""', 'change_predictions', 'true_predictions'], {}), "('change_predictions:%s, true_predictions:%s',\n change_predictions, true_predictions)\n", (2012, 2100), False, 'import logging\n'), ((2344, 2497), 'logging.debug', 'logging.debug', (['"""row[ma5] * (1-change_predictions/100) :%s, ma5: %s, price:%s"""', "(row['ma5'] * (1 - self.t0_predictions / 100))", "row['ma5']", "row['close']"], {}), "('row[ma5] * (1-change_predictions/100) :%s, ma5: %s, price:%s',\n row['ma5'] * (1 - self.t0_predictions / 100), row['ma5'], row['close'])\n", (2357, 2497), False, 'import logging\n'), ((2699, 2711), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (2707, 2711), True, 'import numpy as np\n'), ((2735, 2775), 'ts.st_history_data.x_train_col_index', 'x_train_col_index', (['columns', 'column_names'], {}), '(columns, column_names)\n', (2752, 2775), False, 'from ts.st_history_data import x_train_col_index\n'), ((2855, 2891), 'numpy.array', 'np.array', (['stock_data[:, x_train_col]'], {}), '(stock_data[:, x_train_col])\n', (2863, 2891), True, 'import numpy as np\n'), ((2904, 2940), 'numpy.array', 'np.array', (['stock_data[:, y_train_col]'], {}), '(stock_data[:, y_train_col])\n', (2912, 2940), True, 'import numpy as np\n'), ((2798, 2838), 'ts.st_history_data.x_train_col_index', 'x_train_col_index', (['columns', '[label_name]'], {}), '(columns, [label_name])\n', (2815, 2838), False, 'from ts.st_history_data import x_train_col_index\n'), ((3088, 3124), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['module_name'], {}), '(module_name)\n', (3111, 3124), False, 'from tensorflow import keras\n'), ((3150, 3182), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['(0.001)'], {}), '(0.001)\n', (3175, 3182), True, 'import tensorflow as tf\n')]
|
import numpy as np
from tensorflow.keras.models import load_model
from buffer import (
ReplayBuffer,
build
)
class DQNAgent:
def __init__(self, alpha, gamma, n_actions, epsilon,
batch_size, input_dims, fc1_dims, fc2_dims, epsilon_dec=0.996,
epsilon_end=0.01, mem_size=1000000,
fname='dqn_model.h5'):
self.action_space = [i for i in range(n_actions)]
self.n_actions = n_actions
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_dec = epsilon_dec
self.epsilon_min = epsilon_end
self.batch_size = batch_size
self.model_file = fname
self.memory = ReplayBuffer(mem_size, input_dims, n_actions, discrete=True)
self.q_eval = build(alpha, n_actions, input_dims, fc1_dims, fc2_dims)
def remember(self, state, action, reward, new_state, done):
self.memory.store_transition(state, action, reward, new_state, done)
def choose_action(self, state):
"""
Choose action given state of the game.
"""
state = state[np.newaxis, :]
# epsilon greedy
rand = np.random.random()
if rand < self.epsilon:
action = np.random.choice(self.action_space)
else:
actions = self.q_eval.predict(state)
action = np.argmax(actions)
return action
def learn(self):
if self.memory.mem_cntr < self.batch_size:
return
state, action, reward, new_state, done = self.memory.sample_buffer(self.batch_size)
action_values = np.array(self.action_space, dtype=np.int8)
action_indices = np.dot(action, action_values)
q_eval = self.q_eval.predict(state)
q_next = self.q_eval.predict(new_state)
q_target = q_eval.copy()
batch_index = np.arange(self.batch_size, dtype=np.int32)
q_target[batch_index, action_indices] = reward + self.gamma * np.max(q_next, axis=1) * done
_ = self.q_eval.fit(state, q_target, verbose=0)
self.epsilon = self.epsilon*self.epsilon_dec if self.epsilon > self.epsilon_min else self.epsilon_min
def save_model(self):
self.q_eval.save(self.model_file)
def load_model(self):
self.q_eval = load_model(self.model_file)
|
[
"tensorflow.keras.models.load_model",
"numpy.argmax",
"buffer.build",
"numpy.max",
"numpy.random.random",
"numpy.array",
"numpy.arange",
"numpy.random.choice",
"numpy.dot",
"buffer.ReplayBuffer"
] |
[((717, 777), 'buffer.ReplayBuffer', 'ReplayBuffer', (['mem_size', 'input_dims', 'n_actions'], {'discrete': '(True)'}), '(mem_size, input_dims, n_actions, discrete=True)\n', (729, 777), False, 'from buffer import ReplayBuffer, build\n'), ((809, 864), 'buffer.build', 'build', (['alpha', 'n_actions', 'input_dims', 'fc1_dims', 'fc2_dims'], {}), '(alpha, n_actions, input_dims, fc1_dims, fc2_dims)\n', (814, 864), False, 'from buffer import ReplayBuffer, build\n'), ((1222, 1240), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1238, 1240), True, 'import numpy as np\n'), ((1707, 1749), 'numpy.array', 'np.array', (['self.action_space'], {'dtype': 'np.int8'}), '(self.action_space, dtype=np.int8)\n', (1715, 1749), True, 'import numpy as np\n'), ((1775, 1804), 'numpy.dot', 'np.dot', (['action', 'action_values'], {}), '(action, action_values)\n', (1781, 1804), True, 'import numpy as np\n'), ((1970, 2012), 'numpy.arange', 'np.arange', (['self.batch_size'], {'dtype': 'np.int32'}), '(self.batch_size, dtype=np.int32)\n', (1979, 2012), True, 'import numpy as np\n'), ((2436, 2463), 'tensorflow.keras.models.load_model', 'load_model', (['self.model_file'], {}), '(self.model_file)\n', (2446, 2463), False, 'from tensorflow.keras.models import load_model\n'), ((1294, 1329), 'numpy.random.choice', 'np.random.choice', (['self.action_space'], {}), '(self.action_space)\n', (1310, 1329), True, 'import numpy as np\n'), ((1414, 1432), 'numpy.argmax', 'np.argmax', (['actions'], {}), '(actions)\n', (1423, 1432), True, 'import numpy as np\n'), ((2083, 2105), 'numpy.max', 'np.max', (['q_next'], {'axis': '(1)'}), '(q_next, axis=1)\n', (2089, 2105), True, 'import numpy as np\n')]
|
from __future__ import print_function, division
from sympy.core.containers import Tuple
from sympy.core.core import C
from sympy.core.expr import Expr
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.sympify import sympify
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.polys import quo, roots
from sympy.simplify import powsimp
from sympy.core.compatibility import xrange
class Product(Expr):
r"""Represents unevaluated products.
``Product`` represents a finite or infinite product, with the first
argument being the general form of terms in the series, and the second
argument being ``(dummy_variable, start, end)``, with ``dummy_variable``
taking all integer values from ``start`` through ``end``. In accordance
with long-standing mathematical convention, the end term is included in
the product.
Finite products
===============
For finite products (and products with symbolic limits assumed to be finite)
we follow the analogue of the summation convention described by Karr [1],
especially definition 3 of section 1.4. The product:
.. math::
\prod_{m \leq i < n} f(i)
has *the obvious meaning* for `m < n`, namely:
.. math::
\prod_{m \leq i < n} f(i) = f(m) f(m+1) \cdot \ldots \cdot f(n-2) f(n-1)
with the upper limit value `f(n)` excluded. The product over an empty set is
one if and only if `m = n`:
.. math::
\prod_{m \leq i < n} f(i) = 1 \quad \mathrm{for} \quad m = n
Finally, for all other products over empty sets we assume the following
definition:
.. math::
\prod_{m \leq i < n} f(i) = \frac{1}{\prod_{n \leq i < m} f(i)} \quad \mathrm{for} \quad m > n
It is important to note that above we define all products with the upper
limit being exclusive. This is in contrast to the usual mathematical notation,
but does not affect the product convention. Indeed we have:
.. math::
\prod_{m \leq i < n} f(i) = \prod_{i = m}^{n - 1} f(i)
where the difference in notation is intentional to emphasize the meaning,
with limits typeset on the top being inclusive.
Examples
========
>>> from sympy.abc import a, b, i, k, m, n, x
>>> from sympy import Product, factorial, oo
>>> Product(k,(k,1,m))
Product(k, (k, 1, m))
>>> Product(k,(k,1,m)).doit()
factorial(m)
>>> Product(k**2,(k,1,m))
Product(k**2, (k, 1, m))
>>> Product(k**2,(k,1,m)).doit()
(factorial(m))**2
Wallis' product for pi:
>>> W = Product(2*i/(2*i-1) * 2*i/(2*i+1), (i, 1, oo))
>>> W
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
Direct computation currently fails:
>>> W.doit()
nan
But we can approach the infinite product by a limit of finite products:
>>> from sympy import limit
>>> W2 = Product(2*i/(2*i-1)*2*i/(2*i+1), (i, 1, n))
>>> W2
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, n))
>>> W2e = W2.doit()
>>> W2e
2**(-2*n)*4**n*(factorial(n))**2/(RisingFactorial(1/2, n)*RisingFactorial(3/2, n))
>>> limit(W2e, n, oo)
pi/2
By the same formula we can compute sin(pi/2):
>>> from sympy import pi, gamma, simplify
>>> P = pi * x * Product(1 - x**2/k**2,(k,1,n))
>>> P = P.subs(x, pi/2)
>>> P
pi**2*Product(1 - pi**2/(4*k**2), (k, 1, n))/2
>>> Pe = P.doit()
>>> Pe
pi**2*RisingFactorial(1 + pi/2, n)*RisingFactorial(-pi/2 + 1, n)/(2*(factorial(n))**2)
>>> Pe = Pe.rewrite(gamma)
>>> Pe
pi**2*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/(2*gamma(1 + pi/2)*gamma(-pi/2 + 1)*gamma(n + 1)**2)
>>> Pe = simplify(Pe)
>>> Pe
sin(pi**2/2)*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/gamma(n + 1)**2
>>> limit(Pe, n, oo)
sin(pi**2/2)
Products with the lower limit being larger than the upper one:
>>> Product(1/i, (i, 6, 1)).doit()
120
>>> Product(i, (i, 2, 5)).doit()
120
The empty product:
>>> Product(i, (i, n, n-1)).doit()
1
An example showing that the symbolic result of a product is still
valid for seemingly nonsensical values of the limits. Then the Karr
convention allows us to give a perfectly valid interpretation to
those products by interchanging the limits according to the above rules:
>>> P = Product(2, (i, 10, n)).doit()
>>> P
2**(n - 9)
>>> P.subs(n, 5)
1/16
>>> Product(2, (i, 10, 5)).doit()
1/16
>>> 1/Product(2, (i, 6, 9)).doit()
1/16
An explicit example of the Karr summation convention applied to products:
>>> P1 = Product(x, (i, a, b)).doit()
>>> P1
x**(-a + b + 1)
>>> P2 = Product(x, (i, b+1, a-1)).doit()
>>> P2
x**(a - b - 1)
>>> simplify(P1 * P2)
1
And another one:
>>> P1 = Product(i, (i, b, a)).doit()
>>> P1
RisingFactorial(b, a - b + 1)
>>> P2 = Product(i, (i, a+1, b-1)).doit()
>>> P2
RisingFactorial(a + 1, -a + b - 1)
>>> P1 * P2
RisingFactorial(b, a - b + 1)*RisingFactorial(a + 1, -a + b - 1)
>>> simplify(P1 * P2)
1
See Also
========
Sum, summation
product
References
==========
.. [1] <NAME>, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
.. [2] http://en.wikipedia.org/wiki/Multiplication#Capital_Pi_notation
.. [3] http://en.wikipedia.org/wiki/Empty_product
"""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
from sympy.integrals.integrals import _process_limits
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = piecewise_fold(sympify(function))
if function is S.NaN:
return S.NaN
if not symbols:
raise ValueError("Product variables must be given")
limits, sign = _process_limits(*symbols)
# Only limits with lower and upper bounds are supported; the indefinite
# Product is not supported
if any(len(l) != 3 or None in l for l in limits):
raise ValueError(
'Product requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [sign*function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def term(self):
return self._args[0]
function = term
@property
def limits(self):
return self._args[1:]
@property
def variables(self):
"""Return a list of the product variables
>>> from sympy import Product
>>> from sympy.abc import x, i
>>> Product(x**i, (i, 1, 3)).variables
[i]
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols that will affect the value of
the Product when evaluated. This is useful if one is trying to
determine whether a product depends on a certain symbol or not.
>>> from sympy import Product
>>> from sympy.abc import x, y
>>> Product(x, (x, y, 1)).free_symbols
set([y])
"""
from sympy.integrals.integrals import _free_symbols
if self.function.is_zero or self.function == 1:
return set()
return _free_symbols(self)
@property
def is_zero(self):
"""A Product is zero only if its term is zero.
"""
return self.term.is_zero
@property
def is_number(self):
"""
Return True if the Product will result in a number, else False.
Examples
========
>>> from sympy import log, Product
>>> from sympy.abc import x, y, z
>>> log(2).is_number
True
>>> Product(x, (x, 1, 2)).is_number
True
>>> Product(y, (x, 1, 2)).is_number
False
>>> Product(1, (x, y, z)).is_number
True
>>> Product(2, (x, y, z)).is_number
False
"""
return self.function.is_zero or self.function == 1 or not self.free_symbols
def as_dummy(self):
from sympy.integrals.integrals import _as_dummy
return _as_dummy(self)
def doit(self, **hints):
f = self.function
for index, limit in enumerate(self.limits):
i, a, b = limit
dif = b - a
if dif.is_Integer and dif < 0:
a, b = b + 1, a - 1
f = 1 / f
g = self._eval_product(f, (i, a, b))
if g is None:
return self.func(powsimp(f), *self.limits[index:])
else:
f = g
if hints.get('deep', True):
return f.doit(**hints)
else:
return powsimp(f)
def _eval_adjoint(self):
if self.is_commutative:
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
return self.func(self.function.conjugate(), *self.limits)
def _eval_product(self, term, limits):
from sympy.concrete.delta import deltaproduct, _has_simple_delta
from sympy.concrete.summations import summation
from sympy.functions import KroneckerDelta
(k, a, n) = limits
if k not in term.free_symbols:
return term**(n - a + 1)
if a == n:
return term.subs(k, a)
if term.has(KroneckerDelta) and _has_simple_delta(term, limits[0]):
return deltaproduct(term, limits)
dif = n - a
if dif.is_Integer:
return Mul(*[term.subs(k, a + i) for i in xrange(dif + 1)])
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
all_roots = roots(poly, multiple=True)
for r in all_roots:
A *= C.RisingFactorial(a - r, n - a + 1)
Q *= n - r
if len(all_roots) < poly.degree():
arg = quo(poly, Q.as_poly(k))
B = self.func(arg, (k, a, n)).doit()
return poly.LC()**(n - a + 1) * A * B
elif term.is_Add:
p, q = term.as_numer_denom()
p = self._eval_product(p, (k, a, n))
q = self._eval_product(q, (k, a, n))
return p / q
elif term.is_Mul:
exclude, include = [], []
for t in term.args:
p = self._eval_product(t, (k, a, n))
if p is not None:
exclude.append(p)
else:
include.append(t)
if not exclude:
return None
else:
arg = term._new_rawargs(*include)
A = Mul(*exclude)
B = self.func(arg, (k, a, n)).doit()
return A * B
elif term.is_Pow:
if not term.base.has(k):
s = summation(term.exp, (k, a, n))
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base, (k, a, n))
if p is not None:
return p**term.exp
elif isinstance(term, Product):
evaluated = term.doit()
f = self._eval_product(evaluated, limits)
if f is None:
return self.func(evaluated, limits)
else:
return f
def _eval_transpose(self):
if self.is_commutative:
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_subs(self, old, new):
from sympy.integrals.integrals import _eval_subs
return _eval_subs(self, old, new)
def product(*args, **kwargs):
r"""
Compute the product.
The notation for symbols is similiar to the notation used in Sum or
Integral. product(f, (i, a, b)) computes the product of f with
respect to i from a to b, i.e.,
::
b
_____
product(f(n), (i, a, b)) = | | f(n)
| |
i = a
If it cannot compute the product, it returns an unevaluated Product object.
Repeated products can be computed by introducing additional symbols tuples::
>>> from sympy import product, symbols
>>> i, n, m, k = symbols('i n m k', integer=True)
>>> product(i, (i, 1, k))
factorial(k)
>>> product(m, (i, 1, k))
m**k
>>> product(i, (i, 1, k), (k, 1, n))
Product(factorial(k), (k, 1, n))
"""
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit(deep=False)
else:
return prod
|
[
"sympy.integrals.integrals._free_symbols",
"sympy.simplify.powsimp",
"sympy.integrals.integrals._as_dummy",
"sympy.concrete.summations.summation",
"sympy.core.expr.Expr.__new__",
"sympy.integrals.integrals._eval_subs",
"sympy.polys.roots",
"sympy.core.core.C.RisingFactorial",
"sympy.core.mul.Mul",
"sympy.concrete.delta._has_simple_delta",
"sympy.integrals.integrals._process_limits",
"sympy.concrete.delta.deltaproduct",
"sympy.core.compatibility.xrange",
"sympy.core.sympify.sympify"
] |
[((6090, 6115), 'sympy.integrals.integrals._process_limits', '_process_limits', (['*symbols'], {}), '(*symbols)\n', (6105, 6115), False, 'from sympy.integrals.integrals import _process_limits\n'), ((6406, 6438), 'sympy.core.expr.Expr.__new__', 'Expr.__new__', (['cls'], {}), '(cls, **assumptions)\n', (6418, 6438), False, 'from sympy.core.expr import Expr\n'), ((7649, 7668), 'sympy.integrals.integrals._free_symbols', '_free_symbols', (['self'], {}), '(self)\n', (7662, 7668), False, 'from sympy.integrals.integrals import _free_symbols\n'), ((8517, 8532), 'sympy.integrals.integrals._as_dummy', '_as_dummy', (['self'], {}), '(self)\n', (8526, 8532), False, 'from sympy.integrals.integrals import _as_dummy\n'), ((12002, 12028), 'sympy.integrals.integrals._eval_subs', '_eval_subs', (['self', 'old', 'new'], {}), '(self, old, new)\n', (12012, 12028), False, 'from sympy.integrals.integrals import _eval_subs\n'), ((5902, 5919), 'sympy.core.sympify.sympify', 'sympify', (['function'], {}), '(function)\n', (5909, 5919), False, 'from sympy.core.sympify import sympify\n'), ((9087, 9097), 'sympy.simplify.powsimp', 'powsimp', (['f'], {}), '(f)\n', (9094, 9097), False, 'from sympy.simplify import powsimp\n'), ((9771, 9805), 'sympy.concrete.delta._has_simple_delta', '_has_simple_delta', (['term', 'limits[0]'], {}), '(term, limits[0])\n', (9788, 9805), False, 'from sympy.concrete.delta import deltaproduct, _has_simple_delta\n'), ((9826, 9852), 'sympy.concrete.delta.deltaproduct', 'deltaproduct', (['term', 'limits'], {}), '(term, limits)\n', (9838, 9852), False, 'from sympy.concrete.delta import deltaproduct, _has_simple_delta\n'), ((10101, 10127), 'sympy.polys.roots', 'roots', (['poly'], {'multiple': '(True)'}), '(poly, multiple=True)\n', (10106, 10127), False, 'from sympy.polys import quo, roots\n'), ((8908, 8918), 'sympy.simplify.powsimp', 'powsimp', (['f'], {}), '(f)\n', (8915, 8918), False, 'from sympy.simplify import powsimp\n'), ((10182, 10217), 'sympy.core.core.C.RisingFactorial', 'C.RisingFactorial', (['(a - r)', '(n - a + 1)'], {}), '(a - r, n - a + 1)\n', (10199, 10217), False, 'from sympy.core.core import C\n'), ((9955, 9970), 'sympy.core.compatibility.xrange', 'xrange', (['(dif + 1)'], {}), '(dif + 1)\n', (9961, 9970), False, 'from sympy.core.compatibility import xrange\n'), ((11065, 11078), 'sympy.core.mul.Mul', 'Mul', (['*exclude'], {}), '(*exclude)\n', (11068, 11078), False, 'from sympy.core.mul import Mul\n'), ((11245, 11275), 'sympy.concrete.summations.summation', 'summation', (['term.exp', '(k, a, n)'], {}), '(term.exp, (k, a, n))\n', (11254, 11275), False, 'from sympy.concrete.summations import summation\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests the BigQuery collector."""
import unittest
import mock
from dftimewolf.lib import state
from dftimewolf.lib.collectors import bigquery
from dftimewolf import config
class BigQueryCollectorTest(unittest.TestCase):
"""Tests for the BigQuery collector."""
def testInitialization(self):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
bq_collector = bigquery.BigQueryCollector(test_state)
self.assertIsNotNone(bq_collector)
@mock.patch('google.cloud.bigquery.Client')
def testQuery(self, mock_bq):
"""Tests that the collector calls the BQ client."""
mock_bq().query().to_dataframe().to_json.return_value = "{'foo':1}"
test_state = state.DFTimewolfState(config.Config)
bq_collector = bigquery.BigQueryCollector(test_state)
bq_collector.SetUp('test_project', 'test_query', 'test_description')
bq_collector.Process()
mock_bq().query.assert_called_with('test_query')
mock_bq().query().to_dataframe().to_json.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"dftimewolf.lib.collectors.bigquery.BigQueryCollector",
"dftimewolf.lib.state.DFTimewolfState",
"mock.patch"
] |
[((559, 601), 'mock.patch', 'mock.patch', (['"""google.cloud.bigquery.Client"""'], {}), "('google.cloud.bigquery.Client')\n", (569, 601), False, 'import mock\n'), ((1124, 1139), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1137, 1139), False, 'import unittest\n'), ((421, 457), 'dftimewolf.lib.state.DFTimewolfState', 'state.DFTimewolfState', (['config.Config'], {}), '(config.Config)\n', (442, 457), False, 'from dftimewolf.lib import state\n'), ((477, 515), 'dftimewolf.lib.collectors.bigquery.BigQueryCollector', 'bigquery.BigQueryCollector', (['test_state'], {}), '(test_state)\n', (503, 515), False, 'from dftimewolf.lib.collectors import bigquery\n'), ((779, 815), 'dftimewolf.lib.state.DFTimewolfState', 'state.DFTimewolfState', (['config.Config'], {}), '(config.Config)\n', (800, 815), False, 'from dftimewolf.lib import state\n'), ((835, 873), 'dftimewolf.lib.collectors.bigquery.BigQueryCollector', 'bigquery.BigQueryCollector', (['test_state'], {}), '(test_state)\n', (861, 873), False, 'from dftimewolf.lib.collectors import bigquery\n')]
|
#!/usr/bin/env python3
import re
import sys
import jwt
import requests
from Crypto.PublicKey import RSA
from Crypto.Util.number import inverse
def recover_d(n, e, p):
q = n // p
phi = (p - 1) * (q - 1)
return inverse(e, phi)
def main(url):
n = 16158503035655503650357438344334975980222051334857742016065172713762327569433945446598600705761456731844358980460949009747059779575245460547544076193224141560315438683650498045875098875194826053398028819192033784138396109321309878080919047169238085235290822926018152521443787945791354642779162369073575510464676307738745137368236340488336468229438062757591864495832519542800353637624510899960409953559448637052209587086542698189198304456374481505084668512138820151645511298243115434132458196567714649584872980882921433923431323025741438248940081524739535046106494564661952078162997692569171205852699326923237775905163
e = 65537
p = 0b10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010111011001
d = recover_d(n, e, p)
rsa = RSA.construct((n, e, d))
session = {'broken': False}
cookie = jwt.encode(session, rsa.exportKey(), algorithm='RS512')
response = requests.get(url, cookies={'session': cookie.decode()}).text
flag = re.search(r'flag\{.*?\}', response).group(0)
print(flag)
if __name__ == '__main__':
main(sys.argv[1])
|
[
"Crypto.PublicKey.RSA.construct",
"Crypto.Util.number.inverse",
"re.search"
] |
[((225, 240), 'Crypto.Util.number.inverse', 'inverse', (['e', 'phi'], {}), '(e, phi)\n', (232, 240), False, 'from Crypto.Util.number import inverse\n'), ((1884, 1908), 'Crypto.PublicKey.RSA.construct', 'RSA.construct', (['(n, e, d)'], {}), '((n, e, d))\n', (1897, 1908), False, 'from Crypto.PublicKey import RSA\n'), ((2097, 2133), 're.search', 're.search', (['"""flag\\\\{.*?\\\\}"""', 'response'], {}), "('flag\\\\{.*?\\\\}', response)\n", (2106, 2133), False, 'import re\n')]
|
from tornado import escape
from tornado.web import asynchronous
from tornado.options import options
from tornado.auth import TwitterMixin
from june.lib.decorators import require_user, require_system
from june.lib.handler import BaseHandler
from june.models import Social
class TwitterHandler(BaseHandler, TwitterMixin):
def check_xsrf_cookie(self):
# disable xsrf check
return
def _oauth_consumer_token(self):
# reset method to get consumer token
return {'key': options.twitter_key, 'secret': options.twitter_secret}
@require_user
@asynchronous
def get(self):
if 'twitter' in self.get_user_social(self.current_user.id):
enabled = self.get_argument('enabled', 'a')
if enabled not in ('y', 'n'):
self.redirect('/account/setting')
return
q = self.db.query(Social).filter_by(service='twitter')
t = q.filter_by(user_id=self.current_user.id).first()
t.enabled = enabled
self.db.add(t)
self.db.commit()
self.cache.delete('social:%s' % self.current_user.id)
self.redirect('/account/setting')
return
if self.get_argument('oauth_token', None):
self.get_authenticated_user(self._on_auth)
return
self.authorize_redirect()
def _on_auth(self, user):
if not user:
self.write('Twitter auth failed')
self.finish()
return
access_token = escape.json_encode(user['access_token'])
network = Social(service='twitter', user_id=self.current_user.id,
token=access_token)
self.db.add(network)
self.db.commit()
self.cache.delete('social:%s' % self.current_user.id)
self.redirect('/account/setting')
@require_system
@asynchronous
def post(self):
content = self.get_argument('content', None)
token = self.get_argument('token', None)
if not (content and token):
self.finish('deny')
return
token = escape.json_decode(token)
status = escape.utf8(content)
self.twitter_request(
'/statuses/update',
post_args={'status': status},
access_token=token,
callback=self._on_post)
def _on_post(self, entry):
if not entry:
self.finish('fail')
return
self.finish('ok')
handlers = [
('/social/twitter', TwitterHandler),
]
|
[
"tornado.escape.json_decode",
"june.models.Social",
"tornado.escape.json_encode",
"tornado.escape.utf8"
] |
[((1532, 1572), 'tornado.escape.json_encode', 'escape.json_encode', (["user['access_token']"], {}), "(user['access_token'])\n", (1550, 1572), False, 'from tornado import escape\n'), ((1591, 1666), 'june.models.Social', 'Social', ([], {'service': '"""twitter"""', 'user_id': 'self.current_user.id', 'token': 'access_token'}), "(service='twitter', user_id=self.current_user.id, token=access_token)\n", (1597, 1666), False, 'from june.models import Social\n'), ((2114, 2139), 'tornado.escape.json_decode', 'escape.json_decode', (['token'], {}), '(token)\n', (2132, 2139), False, 'from tornado import escape\n'), ((2157, 2177), 'tornado.escape.utf8', 'escape.utf8', (['content'], {}), '(content)\n', (2168, 2177), False, 'from tornado import escape\n')]
|
#!/usr/bin python3.7
# -*- coding: utf-8 -*-
from pymongo.errors import DuplicateKeyError
from lib.mongoConnector import connect_to_mongodb, pop_offers_already_saved
from lib.queryBuilder import query_immo_offers, get_amount_of_pages
def get_offers_ids(immotype, total_pages, sort="asc"):
ids = []
if total_pages == 0:
return
i = 0
while i <= total_pages and i <= 500:
result = query_immo_offers(i, immotype, sort)
print("Processing data... " + "{0:.2f}".format((i / total_pages) * 100) + "%, i=" + str(i) + ", total="
+ str(total_pages) + '\n')
if result:
for k in result:
ids.append(k['id'])
i += 1
return ids
if __name__ == "__main__":
client = connect_to_mongodb()
db = client['antunedo']
mongo_collection = db['offers']
online_offers = []
categories_done = []
print('starting ...')
for j in range(0, 52):
immotype_id = str(j + 1)
total_page_count = get_amount_of_pages(immotype_id)
print("\nNouvel Immotype : " + immotype_id + ", avec un total de " + str(total_page_count) + " pages\n\n")
if total_page_count > 0:
if total_page_count > 1000:
# TODO : filter this immo category into smaller elements
print('DEAAAAAD --- immotype ID : ' + immotype_id + '\n\n')
elif total_page_count > 500:
online_offers += get_offers_ids(immotype_id, total_page_count)
online_offers += get_offers_ids(immotype_id, total_page_count - 500, "desc")
categories_done.append(int(immotype_id))
else:
online_offers += get_offers_ids(immotype_id, total_page_count)
categories_done.append(int(immotype_id))
mongo_collection.update_many({'id': {'$in': online_offers}}, {'$set': {'is_online': True}})
print(online_offers)
online_offers = []
print(categories_done)
client.close()
|
[
"lib.queryBuilder.query_immo_offers",
"lib.queryBuilder.get_amount_of_pages",
"lib.mongoConnector.connect_to_mongodb"
] |
[((759, 779), 'lib.mongoConnector.connect_to_mongodb', 'connect_to_mongodb', ([], {}), '()\n', (777, 779), False, 'from lib.mongoConnector import connect_to_mongodb, pop_offers_already_saved\n'), ((413, 449), 'lib.queryBuilder.query_immo_offers', 'query_immo_offers', (['i', 'immotype', 'sort'], {}), '(i, immotype, sort)\n', (430, 449), False, 'from lib.queryBuilder import query_immo_offers, get_amount_of_pages\n'), ((1006, 1038), 'lib.queryBuilder.get_amount_of_pages', 'get_amount_of_pages', (['immotype_id'], {}), '(immotype_id)\n', (1025, 1038), False, 'from lib.queryBuilder import query_immo_offers, get_amount_of_pages\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import yaml
from metrics.instance_property import instance_property
from metrics.metrics_dicts import (
delta_metrics_dict,
eval_metrics_dict,
job_metrics_dict,
metrics_meta_dict,
)
def get_eval_metrics(task, predictions: list, targets: list) -> tuple:
perf_metric_type = yaml.load(task.config_yaml, yaml.SafeLoader)["perf_metric"][
"type"
]
# NOTE:
# right now, the returned eval metric scores are just the perf metric, but we
# could add a feature that allows for the display of multiple eval metrics
metric_result = eval_metrics_dict[perf_metric_type](predictions, targets)
if isinstance(metric_result, dict):
score_dict = metric_result
else:
score_dict = {perf_metric_type: metric_result}
return score_dict[perf_metric_type], score_dict
def get_job_metrics(job, dataset, decen=False) -> dict:
if not job.aws_metrics:
return {}
instance_config = instance_property[dataset.task.instance_type]
job_metrics = instance_config["aws_metrics"]
return_dict = {}
for key in job_metrics:
if key == "examples_per_second":
return_dict[key] = job_metrics_dict[key](job, dataset, decen=decen)
else:
return_dict[key] = job_metrics_dict[key](job, dataset)
return return_dict
def get_delta_metrics(
task, predictions: list, targets: list, perturb_prefix: str
) -> dict:
"""
predictions: a list of list of predictions
targets: a list of labels
"""
perf_metric_type = yaml.load(task.config_yaml, yaml.SafeLoader)["perf_metric"][
"type"
]
perf_metric = eval_metrics_dict[perf_metric_type]
delta_metrics_scores = {
perturb_prefix: delta_metrics_dict[perturb_prefix](
predictions, targets, perf_metric
)
}
return delta_metrics_scores
def get_task_metrics_meta(task):
instance_config = instance_property[task.instance_type]
task_config = yaml.load(task.config_yaml, yaml.SafeLoader)
perf_metric_type = task_config["perf_metric"]["type"]
delta_metric_types = [obj["type"] for obj in task_config.get("delta_metrics", [])]
aws_metric_names = instance_config["aws_metrics"]
# TODO: make it possible to display some modes with aws metrics and some
# models without aws metrics on the same leaderboard?
if task.has_predictions_upload or "train_file_metric" in task_config:
aws_metric_names = []
ordered_metric_field_names = (
[perf_metric_type] + aws_metric_names + delta_metric_types
)
metrics_meta = {
metric: metrics_meta_dict.get(metric, metrics_meta_dict[perf_metric_type])(task)
for metric in ordered_metric_field_names
}
return metrics_meta, ordered_metric_field_names
|
[
"yaml.load",
"metrics.metrics_dicts.metrics_meta_dict.get"
] |
[((2140, 2184), 'yaml.load', 'yaml.load', (['task.config_yaml', 'yaml.SafeLoader'], {}), '(task.config_yaml, yaml.SafeLoader)\n', (2149, 2184), False, 'import yaml\n'), ((470, 514), 'yaml.load', 'yaml.load', (['task.config_yaml', 'yaml.SafeLoader'], {}), '(task.config_yaml, yaml.SafeLoader)\n', (479, 514), False, 'import yaml\n'), ((1708, 1752), 'yaml.load', 'yaml.load', (['task.config_yaml', 'yaml.SafeLoader'], {}), '(task.config_yaml, yaml.SafeLoader)\n', (1717, 1752), False, 'import yaml\n'), ((2770, 2836), 'metrics.metrics_dicts.metrics_meta_dict.get', 'metrics_meta_dict.get', (['metric', 'metrics_meta_dict[perf_metric_type]'], {}), '(metric, metrics_meta_dict[perf_metric_type])\n', (2791, 2836), False, 'from metrics.metrics_dicts import delta_metrics_dict, eval_metrics_dict, job_metrics_dict, metrics_meta_dict\n')]
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines DSL for the RobustFill domain."""
import abc
import collections
import enum
import functools
import inspect
import re
import string
from typing import TypeVar, List, Dict, Tuple, Any, Optional
ProgramTask = collections.namedtuple('ProgramTask',
['program', 'inputs', 'outputs'])
# Describes range of possible indices for a character (for SubStr expression).
POSITION = [-100, 100]
# Describes range of possible indices for a regex.
INDEX = [-5, -4, -3, -2, -1, 1, 2, 3, 4, 5]
DELIMITER = '&,.?!@()[]%{}/:;$#"\' '
CHARACTER = string.ascii_letters + string.digits + DELIMITER
BOS = 'BOS'
EOS = 'EOS'
class Type(enum.Enum):
NUMBER = 1
WORD = 2
ALPHANUM = 3
ALL_CAPS = 4
PROP_CASE = 5
LOWER = 6
DIGIT = 7
CHAR = 8
class Case(enum.Enum):
PROPER = 1
ALL_CAPS = 2
LOWER = 3
class Boundary(enum.Enum):
START = 1
END = 2
Regex = TypeVar('Regex', Type, str)
def regex_for_type(t):
"""Map types to their regex string."""
if t == Type.NUMBER:
return '[0-9]+'
elif t == Type.WORD:
return '[A-Za-z]+'
elif t == Type.ALPHANUM:
return '[A-Za-z0-9]+'
elif t == Type.ALL_CAPS:
return '[A-Z]+'
elif t == Type.PROP_CASE:
return '[A-Z][a-z]+'
elif t == Type.LOWER:
return '[a-z]+'
elif t == Type.DIGIT:
return '[0-9]'
elif t == Type.CHAR:
return '[A-Za-z0-9' + ''.join([re.escape(x) for x in DELIMITER]) + ']'
else:
raise ValueError('Unsupported type: {}'.format(t))
def match_regex_substr(t, value):
regex = regex_for_type(t)
return re.findall(regex, value)
def match_regex_span(r, value):
if isinstance(r, Type):
regex = regex_for_type(r)
else:
assert (len(r) == 1) and (r in DELIMITER)
regex = '[' + re.escape(r) + ']'
return [match.span() for match in re.finditer(regex, value)]
class Base(abc.ABC):
"""Base class for DSL."""
@abc.abstractmethod
def __call__(self, value):
raise NotImplementedError
@abc.abstractmethod
def to_string(self):
raise NotImplementedError
def __repr__(self):
return self.to_string()
@abc.abstractmethod
def encode(self, token_id_table):
raise NotImplementedError
class Program(Base):
pass
class Concat(Program):
"""Concatenation of expressions."""
def __init__(self, *args):
self.expressions = args
def __call__(self, value):
return ''.join([e(value) for e in self.expressions])
def to_string(self):
return ' | '.join([e.to_string() for e in self.expressions])
def encode(self, token_id_table):
sub_token_ids = [e.encode(token_id_table) for e in self.expressions]
return (functools.reduce(lambda a, b: a + b, sub_token_ids)
+ [token_id_table[EOS]])
class Expression(Base):
pass
class Substring(Expression):
pass
class Modification(Expression):
pass
class Compose(Expression):
"""Composition of two modifications or modification and substring."""
def __init__(self, modification,
modification_or_substring):
self.modification = modification
self.modification_or_substring = modification_or_substring
def __call__(self, value):
return self.modification(self.modification_or_substring(value))
def to_string(self):
return (self.modification.to_string() + '('
+ self.modification_or_substring.to_string() + ')')
def encode(self, token_id_table):
return ([token_id_table[self.__class__]]
+ self.modification.encode(token_id_table)
+ self.modification_or_substring.encode(token_id_table))
class ConstStr(Expression):
"""Fixed character."""
def __init__(self, char):
self.char = char
def __call__(self, value):
return self.char
def to_string(self):
return 'Const(' + self.char + ')'
def encode(self, token_id_table):
return [token_id_table[self.__class__], token_id_table[self.char]]
class SubStr(Substring):
"""Return substring given indices."""
def __init__(self, pos1, pos2):
self.pos1 = pos1
self.pos2 = pos2
def __call__(self, value):
# Positive indices start at 1.
p1 = self.pos1 - 1 if self.pos1 > 0 else len(value) + self.pos1
p2 = self.pos2 - 1 if self.pos2 > 0 else len(value) + self.pos2
if p1 >= p2: # Handle edge cases.
return ''
if p2 == len(value):
return value[p1:]
return value[p1:p2 + 1]
def to_string(self):
return 'SubStr(' + str(self.pos1) + ', ' + str(self.pos2) + ')'
def encode(self, token_id_table):
return [
token_id_table[self.__class__],
token_id_table[self.pos1],
token_id_table[self.pos2],
]
class GetSpan(Substring):
"""Return substring given indices of regex matches."""
def __init__(self, regex1, index1, bound1,
regex2, index2, bound2):
self.regex1 = regex1
self.index1 = index1
self.bound1 = bound1
self.regex2 = regex2
self.index2 = index2
self.bound2 = bound2
@staticmethod
def _index(r, index, bound,
value):
"""Get index in string of regex match."""
matches = match_regex_span(r, value)
# Positive indices start at 1.
index = index - 1 if index > 0 else len(matches) + index
if not matches:
return -1
if index >= len(matches): # Handle edge cases.
return len(matches) - 1
if index < 0:
return 0
span = matches[index]
return span[0] if bound == Boundary.START else span[1]
def __call__(self, value):
p1 = GetSpan._index(self.regex1, self.index1, self.bound1, value)
p2 = GetSpan._index(self.regex2, self.index2, self.bound2, value)
if min(p1, p2) < 0: # pytype: disable=unsupported-operands
return ''
return value[p1:p2]
def to_string(self):
return ('GetSpan('
+ ', '.join(map(str, [self.regex1,
self.index1,
self.bound1,
self.regex2,
self.index2,
self.bound2]))
+ ')')
def encode(self, token_id_table):
return list(map(lambda x: token_id_table[x],
[self.__class__,
self.regex1,
self.index1,
self.bound1,
self.regex2,
self.index2,
self.bound2]))
class GetToken(Substring):
"""Get regex match."""
def __init__(self, regex_type, index):
self.regex_type = regex_type
self.index = index
def __call__(self, value):
matches = match_regex_substr(self.regex_type, value)
# Positive indices start at 1.
index = self.index - 1 if self.index > 0 else len(matches) + self.index
if not matches:
return ''
if index >= len(matches) or index < 0: # Handle edge cases.
return ''
return matches[index]
def to_string(self):
return 'GetToken_' + str(self.regex_type) + '_' + str(self.index)
def encode(self, token_id_table):
return [
token_id_table[self.__class__],
token_id_table[self.regex_type],
token_id_table[self.index],
]
class ToCase(Modification):
"""Convert to case."""
def __init__(self, case):
self.case = case
def __call__(self, value):
if self.case == Case.PROPER:
return value.capitalize()
elif self.case == Case.ALL_CAPS:
return value.upper()
elif self.case == Case.LOWER:
return value.lower()
else:
raise ValueError('Invalid case: {}'.format(self.case))
def to_string(self):
return 'ToCase_' + str(self.case)
def encode(self, token_id_table):
return [token_id_table[self.__class__], token_id_table[self.case]]
class Replace(Modification):
"""Replace delimitors."""
def __init__(self, delim1, delim2):
self.delim1 = delim1
self.delim2 = delim2
def __call__(self, value):
return value.replace(self.delim1, self.delim2)
def to_string(self):
return 'Replace_' + str(self.delim1) + '_' + str(self.delim2)
def encode(self, token_id_table):
return [
token_id_table[self.__class__],
token_id_table[self.delim1],
token_id_table[self.delim2],
]
class Trim(Modification):
"""Trim whitspace."""
def __init__(self):
pass
def __call__(self, value):
return value.strip()
def to_string(self):
return 'Trim'
def encode(self, token_id_table):
return [token_id_table[self.__class__]]
class GetUpto(Substring):
"""Get substring up to regex match."""
def __init__(self, regex):
self.regex = regex
def __call__(self, value):
matches = match_regex_span(self.regex, value)
if not matches:
return ''
first = matches[0]
return value[:first[1]]
def to_string(self):
return 'GetUpto_' + str(self.regex)
def encode(self, token_id_table):
return [token_id_table[self.__class__], token_id_table[self.regex]]
class GetFrom(Substring):
"""Get substring from regex match."""
def __init__(self, regex):
self.regex = regex
def __call__(self, value):
matches = match_regex_span(self.regex, value)
if not matches:
return ''
first = matches[0]
return value[first[1]:]
def to_string(self):
return 'GetFrom_' + str(self.regex)
def encode(self, token_id_table):
return [token_id_table[self.__class__], token_id_table[self.regex]]
class GetFirst(Modification):
"""Get first occurrences of regex match."""
def __init__(self, regex_type, index):
self.regex_type = regex_type
self.index = index
def __call__(self, value):
matches = match_regex_substr(self.regex_type, value)
if not matches:
return ''
if self.index >= len(matches):
return ''.join(matches)
return ''.join(matches[:self.index])
def to_string(self):
return 'GetFirst_' + str(self.regex_type) + '_' + str(self.index)
def encode(self, token_id_table):
return [
token_id_table[self.__class__],
token_id_table[self.regex_type],
token_id_table[self.index],
]
class GetAll(Modification):
"""Get all occurrences of regex match."""
def __init__(self, regex_type):
self.regex_type = regex_type
def __call__(self, value):
return ''.join(match_regex_substr(self.regex_type, value))
def to_string(self):
return 'GetAll_' + str(self.regex_type)
def encode(self, token_id_table):
return [token_id_table[self.__class__], token_id_table[self.regex_type]]
# New Functions
# ---------------------------------------------------------------------------
class Substitute(Modification):
"""Replace i-th occurence of regex match with constant."""
def __init__(self, regex_type, index, char):
self.regex_type = regex_type
self.index = index
self.char = char
def __call__(self, value):
matches = match_regex_substr(self.regex_type, value)
# Positive indices start at 1.
index = self.index - 1 if self.index > 0 else len(matches) + self.index
if not matches:
return value
if index >= len(matches) or index < 0: # Handle edge cases.
return value
return value.replace(matches[index], self.char, 1)
def to_string(self):
return ('Substitute_' + str(self.regex_type) + '_' + str(self.index) + '_'
+ self.char)
def encode(self, token_id_table):
return [
token_id_table[self.__class__],
token_id_table[self.regex_type],
token_id_table[self.index],
token_id_table[self.char],
]
class SubstituteAll(Modification):
"""Replace all occurences of regex match with constant."""
def __init__(self, regex_type, char):
self.regex_type = regex_type
self.char = char
def __call__(self, value):
matches = match_regex_substr(self.regex_type, value)
for match in matches:
value = value.replace(match, self.char, 1)
return value
def to_string(self):
return 'SubstituteAll_' + str(self.regex_type) + '_' + self.char
def encode(self, token_id_table):
return [
token_id_table[self.__class__],
token_id_table[self.regex_type],
token_id_table[self.char],
]
class Remove(Modification):
"""Remove i-th occurence of regex match."""
def __init__(self, regex_type, index):
self.regex_type = regex_type
self.index = index
def __call__(self, value):
matches = match_regex_substr(self.regex_type, value)
# Positive indices start at 1.
index = self.index - 1 if self.index > 0 else len(matches) + self.index
if not matches:
return value
if index >= len(matches) or index < 0: # Handle edge cases.
return value
return value.replace(matches[index], '', 1)
def to_string(self):
return 'Remove_' + str(self.regex_type) + '_' + str(self.index)
def encode(self, token_id_table):
return [
token_id_table[self.__class__],
token_id_table[self.regex_type],
token_id_table[self.index],
]
class RemoveAll(Modification):
"""Remove all occurences of regex match."""
def __init__(self, regex_type):
self.regex_type = regex_type
def __call__(self, value):
matches = match_regex_substr(self.regex_type, value)
for match in matches:
value = value.replace(match, '', 1)
return value
def to_string(self):
return 'RemoveAll_' + str(self.regex_type)
def encode(self, token_id_table):
return [
token_id_table[self.__class__],
token_id_table[self.regex_type],
]
def decode_expression(encoding,
id_token_table):
"""Decode sequence of token ids to expression (excluding Compose)."""
cls = id_token_table[encoding[0]]
return cls(*list(map(lambda x: id_token_table[x], encoding[1:])))
def decode_program(encoding,
id_token_table):
"""Decode sequence of token ids into a Concat program."""
expressions = []
idx = 0
while idx < len(encoding) - 1:
elem = id_token_table[encoding[idx]]
if elem == Compose: # Handle Compose separately.
idx += 1
modification_elem = id_token_table[encoding[idx]]
n_args = len(inspect.signature(modification_elem.__init__).parameters)
modification = decode_expression(encoding[idx:idx+n_args], id_token_table)
idx += n_args
modification_or_substring_elem = id_token_table[encoding[idx]]
n_args = len(
inspect.signature(modification_or_substring_elem.__init__).parameters)
modification_or_substring = decode_expression(encoding[idx:idx+n_args],
id_token_table)
idx += n_args
next_e = Compose(modification, modification_or_substring)
else:
n_args = len(inspect.signature(elem.__init__).parameters)
next_e = decode_expression(encoding[idx:idx+n_args], id_token_table)
idx += n_args
expressions.append(next_e)
assert id_token_table[encoding[idx]] == EOS
return Concat(*expressions)
|
[
"re.finditer",
"re.escape",
"re.findall",
"inspect.signature",
"collections.namedtuple",
"functools.reduce",
"typing.TypeVar"
] |
[((829, 900), 'collections.namedtuple', 'collections.namedtuple', (['"""ProgramTask"""', "['program', 'inputs', 'outputs']"], {}), "('ProgramTask', ['program', 'inputs', 'outputs'])\n", (851, 900), False, 'import collections\n'), ((1516, 1543), 'typing.TypeVar', 'TypeVar', (['"""Regex"""', 'Type', 'str'], {}), "('Regex', Type, str)\n", (1523, 1543), False, 'from typing import TypeVar, List, Dict, Tuple, Any, Optional\n'), ((2173, 2197), 're.findall', 're.findall', (['regex', 'value'], {}), '(regex, value)\n', (2183, 2197), False, 'import re\n'), ((2416, 2441), 're.finditer', 're.finditer', (['regex', 'value'], {}), '(regex, value)\n', (2427, 2441), False, 'import re\n'), ((3242, 3293), 'functools.reduce', 'functools.reduce', (['(lambda a, b: a + b)', 'sub_token_ids'], {}), '(lambda a, b: a + b, sub_token_ids)\n', (3258, 3293), False, 'import functools\n'), ((2360, 2372), 're.escape', 're.escape', (['r'], {}), '(r)\n', (2369, 2372), False, 'import re\n'), ((14709, 14754), 'inspect.signature', 'inspect.signature', (['modification_elem.__init__'], {}), '(modification_elem.__init__)\n', (14726, 14754), False, 'import inspect\n'), ((14967, 15025), 'inspect.signature', 'inspect.signature', (['modification_or_substring_elem.__init__'], {}), '(modification_or_substring_elem.__init__)\n', (14984, 15025), False, 'import inspect\n'), ((15297, 15329), 'inspect.signature', 'inspect.signature', (['elem.__init__'], {}), '(elem.__init__)\n', (15314, 15329), False, 'import inspect\n'), ((1997, 2009), 're.escape', 're.escape', (['x'], {}), '(x)\n', (2006, 2009), False, 'import re\n')]
|
import mysamplefunctions
variable = -5
print("variable is:", variable)
abs = mysamplefunctions.absolute(variable)
print ("absolute value is:", abs)
from mysamplefunctions import areatriangle
w = 5
h = 10
print("area is:", areatriangle(width=w, height=h) )
from mysamplefunctions import areacircle, summation
radius = 10
area1 = areacircle(radius)
area2 = areacircle(radius, 3)
area3 = areacircle(radius, pi = 3.14)
print("area1: ", area1, "area2: ", area2, "area3: ", area3)
total1 = summation (1,2,3,4,5)
print("total1:", total1)
from mysamplefunctions import *
mynumbers = [1,2,3,4,5]
total3 = sumbylist (mynumbers)
print("total3:", total3)
total4 = summation (1,2,3,4,5)
print("total4:", total4)
|
[
"mysamplefunctions.absolute",
"mysamplefunctions.areacircle",
"mysamplefunctions.summation",
"mysamplefunctions.areatriangle"
] |
[((78, 114), 'mysamplefunctions.absolute', 'mysamplefunctions.absolute', (['variable'], {}), '(variable)\n', (104, 114), False, 'import mysamplefunctions\n'), ((333, 351), 'mysamplefunctions.areacircle', 'areacircle', (['radius'], {}), '(radius)\n', (343, 351), False, 'from mysamplefunctions import areacircle, summation\n'), ((360, 381), 'mysamplefunctions.areacircle', 'areacircle', (['radius', '(3)'], {}), '(radius, 3)\n', (370, 381), False, 'from mysamplefunctions import areacircle, summation\n'), ((390, 417), 'mysamplefunctions.areacircle', 'areacircle', (['radius'], {'pi': '(3.14)'}), '(radius, pi=3.14)\n', (400, 417), False, 'from mysamplefunctions import areacircle, summation\n'), ((490, 514), 'mysamplefunctions.summation', 'summation', (['(1)', '(2)', '(3)', '(4)', '(5)'], {}), '(1, 2, 3, 4, 5)\n', (499, 514), False, 'from mysamplefunctions import areacircle, summation\n'), ((661, 685), 'mysamplefunctions.summation', 'summation', (['(1)', '(2)', '(3)', '(4)', '(5)'], {}), '(1, 2, 3, 4, 5)\n', (670, 685), False, 'from mysamplefunctions import areacircle, summation\n'), ((225, 256), 'mysamplefunctions.areatriangle', 'areatriangle', ([], {'width': 'w', 'height': 'h'}), '(width=w, height=h)\n', (237, 256), False, 'from mysamplefunctions import areatriangle\n')]
|
'''H5 data prep'''
## External modules.
import csv
import numpy as np
import os
import tables
## Internal modules.
from mml.config import dir_data_toread
from mml.config import dir_data_towrite
from mml.utils import makedir_safe
###############################################################################
## Clerical setup.
data_name = "adult"
toread_tr = os.path.join(dir_data_toread, data_name, "adult.data")
toread_te = os.path.join(dir_data_toread, data_name, "adult.test")
newdir = os.path.join(dir_data_towrite, data_name)
towrite = os.path.join(newdir, "adult.h5")
attribute_names = [
"age", "workclass", "fnlwgt", "education", "education-num",
"marital-status", "occupation", "relationship", "race", "sex",
"capital-gain", "capital-loss", "hours-per-week", "native-country"
] # order is important.
attribute_dict = {
"age": ["continuous"],
"workclass": ["Private", "Self-emp-not-inc", "Self-emp-inc",
"Federal-gov", "Local-gov", "State-gov",
"Without-pay", "Never-worked"],
"fnlwgt": ["continuous"],
"education": ["Bachelors", "Some-college", "11th", "HS-grad",
"Prof-school", "Assoc-acdm", "Assoc-voc", "9th",
"7th-8th", "12th", "Masters", "1st-4th", "10th",
"Doctorate", "5th-6th", "Preschool"],
"education-num": ["continuous"],
"marital-status": ["Married-civ-spouse", "Divorced",
"Never-married", "Separated", "Widowed",
"Married-spouse-absent", "Married-AF-spouse"],
"occupation": ["Tech-support", "Craft-repair", "Other-service",
"Sales", "Exec-managerial", "Prof-specialty",
"Handlers-cleaners", "Machine-op-inspct",
"Adm-clerical", "Farming-fishing",
"Transport-moving", "Priv-house-serv",
"Protective-serv", "Armed-Forces"],
"relationship": ["Wife", "Own-child", "Husband", "Not-in-family",
"Other-relative", "Unmarried"],
"race": ["White", "Asian-Pac-Islander", "Amer-Indian-Eskimo",
"Other", "Black"],
"sex": ["Female", "Male"],
"capital-gain": ["continuous"],
"capital-loss": ["continuous"],
"hours-per-week": ["continuous"],
"native-country": ["United-States", "Cambodia", "England",
"Puerto-Rico", "Canada", "Germany",
"Outlying-US(Guam-USVI-etc)", "India",
"Japan", "Greece", "South", "China", "Cuba",
"Iran", "Honduras", "Philippines", "Italy",
"Poland", "Jamaica", "Vietnam", "Mexico",
"Portugal", "Ireland", "France",
"Dominican-Republic", "Laos", "Ecuador",
"Taiwan", "Haiti", "Columbia", "Hungary",
"Guatemala", "Nicaragua", "Scotland",
"Thailand", "Yugoslavia", "El-Salvador",
"Trinadad&Tobago", "Peru", "Hong",
"Holand-Netherlands"]
}
label_dict = {"<=50K": 0,
">50K": 1}
n_tr = 30162 # number of clean instances.
n_te = 15060 # number of clean instances.
n_all = n_tr+n_te
num_features = np.array(
[ len(attribute_dict[key]) for key in attribute_dict.keys() ]
).sum() # number of features after a one-hot encoding.
num_classes = 2
num_labels = 1
title = data_name+": Full dataset"
title_X = data_name+": Features"
title_y = data_name+": Labels"
dtype_X = np.float32
atom_X = tables.Float32Atom()
dtype_y = np.uint8
atom_y = tables.UInt8Atom()
def parse_line(x, y):
## Inputs are a bit complicated.
x_out_list = []
for j in range(len(x)):
value = x[j]
attribute = attribute_names[j]
num_distinct = len(attribute_dict[attribute])
## Ignore all points with missing entries.
if value == "?":
return (None, None)
else:
if num_distinct > 1:
idx_hot = attribute_dict[attribute].index(value)
onehot = np.zeros(num_distinct, dtype=dtype_X)
onehot[idx_hot] = 1.0
x_out_list.append(onehot)
else:
x_out_list.append(np.array([value], dtype=dtype_X))
x_out = np.concatenate(x_out_list)
if len(x_out) != num_features:
raise ValueError("Something is wrong with the feature vec parser.")
## Labels are easy.
y_out = np.array([label_dict[y]], dtype=dtype_y)
return x_out, y_out
def raw_to_h5():
'''
Transform the raw dataset into one of HDF5 type.
'''
X_raw_tr = np.zeros((n_tr,num_features), dtype=dtype_X)
y_raw_tr = np.zeros((n_tr,num_labels), dtype=dtype_y)
X_raw_te = np.zeros((n_te,num_features), dtype=dtype_X)
y_raw_te = np.zeros((n_te,num_labels), dtype=dtype_y)
print("Preparation: {}".format(data_name))
## Read in the raw training data.
with open(toread_tr, newline="") as f_table:
print("Read {}.".format(toread_tr))
f_reader = csv.reader(f_table, delimiter=",",
skipinitialspace=True)
## Populate the placeholder numpy arrays.
idx = 0
for line in f_reader:
if len(line) == 0:
continue # do nothing for blank lines.
## Numpy arrays for individual instance.
x, y = parse_line(x=line[0:-1], y=line[-1])
if x is None:
continue # skip instances with missing values.
else:
X_raw_tr[idx,:] = x
y_raw_tr[idx,0] = y
## Update the index (also counts the clean data points).
idx += 1
## Check that number of *clean* instances is as expected.
print(
"Number of clean guys (tr): {}. Note n_tr = {}".format(idx,n_tr)
)
## Read in the raw test data.
with open(toread_te, newline="") as f_table:
print("Read {}.".format(toread_te))
f_reader = csv.reader(f_table, delimiter=",",
skipinitialspace=True)
## Populate the placeholder numpy arrays.
idx = 0
for i, line in enumerate(f_reader):
if i == 0:
continue # skip the first line, only for TEST data.
if len(line) == 0:
continue # do nothing for blank lines.
## Numpy arrays for individual instance.
x, y = parse_line(x=line[0:-1], y=line[-1][0:-1])
# Note: for test data, we strip training "." from labels.
if x is None:
continue # skip instances with missing values.
else:
X_raw_te[idx,:] = x
y_raw_te[idx,0] = y
## Update the index (also counts the clean data points).
idx += 1
## Check that number of *clean* instances is as expected.
print(
"Number of clean guys (te): {}. Note n_te = {}".format(idx,n_te)
)
## Concatenate.
X_raw = np.vstack((X_raw_tr, X_raw_te))
y_raw = np.vstack((y_raw_tr, y_raw_te))
## Create and populate the HDF5 file.
makedir_safe(newdir)
with tables.open_file(towrite, mode="w", title=title) as myh5:
myh5.create_array(where=myh5.root,
name="X",
obj=X_raw,
atom=atom_X,
title=title_X)
myh5.create_array(where=myh5.root,
name="y",
obj=y_raw,
atom=atom_y,
title=title_y)
print(myh5)
print("Wrote {}.".format(towrite))
## Exit all context managers before returning.
print("Done ({}).".format(data_name))
return None
if __name__ == "__main__":
raw_to_h5()
###############################################################################
|
[
"csv.reader",
"numpy.zeros",
"tables.Float32Atom",
"numpy.vstack",
"mml.utils.makedir_safe",
"numpy.array",
"tables.UInt8Atom",
"tables.open_file",
"os.path.join",
"numpy.concatenate"
] |
[((368, 422), 'os.path.join', 'os.path.join', (['dir_data_toread', 'data_name', '"""adult.data"""'], {}), "(dir_data_toread, data_name, 'adult.data')\n", (380, 422), False, 'import os\n'), ((435, 489), 'os.path.join', 'os.path.join', (['dir_data_toread', 'data_name', '"""adult.test"""'], {}), "(dir_data_toread, data_name, 'adult.test')\n", (447, 489), False, 'import os\n'), ((499, 540), 'os.path.join', 'os.path.join', (['dir_data_towrite', 'data_name'], {}), '(dir_data_towrite, data_name)\n', (511, 540), False, 'import os\n'), ((551, 583), 'os.path.join', 'os.path.join', (['newdir', '"""adult.h5"""'], {}), "(newdir, 'adult.h5')\n", (563, 583), False, 'import os\n'), ((3551, 3571), 'tables.Float32Atom', 'tables.Float32Atom', ([], {}), '()\n', (3569, 3571), False, 'import tables\n'), ((3600, 3618), 'tables.UInt8Atom', 'tables.UInt8Atom', ([], {}), '()\n', (3616, 3618), False, 'import tables\n'), ((4331, 4357), 'numpy.concatenate', 'np.concatenate', (['x_out_list'], {}), '(x_out_list)\n', (4345, 4357), True, 'import numpy as np\n'), ((4506, 4546), 'numpy.array', 'np.array', (['[label_dict[y]]'], {'dtype': 'dtype_y'}), '([label_dict[y]], dtype=dtype_y)\n', (4514, 4546), True, 'import numpy as np\n'), ((4680, 4725), 'numpy.zeros', 'np.zeros', (['(n_tr, num_features)'], {'dtype': 'dtype_X'}), '((n_tr, num_features), dtype=dtype_X)\n', (4688, 4725), True, 'import numpy as np\n'), ((4740, 4783), 'numpy.zeros', 'np.zeros', (['(n_tr, num_labels)'], {'dtype': 'dtype_y'}), '((n_tr, num_labels), dtype=dtype_y)\n', (4748, 4783), True, 'import numpy as np\n'), ((4798, 4843), 'numpy.zeros', 'np.zeros', (['(n_te, num_features)'], {'dtype': 'dtype_X'}), '((n_te, num_features), dtype=dtype_X)\n', (4806, 4843), True, 'import numpy as np\n'), ((4858, 4901), 'numpy.zeros', 'np.zeros', (['(n_te, num_labels)'], {'dtype': 'dtype_y'}), '((n_te, num_labels), dtype=dtype_y)\n', (4866, 4901), True, 'import numpy as np\n'), ((7196, 7227), 'numpy.vstack', 'np.vstack', (['(X_raw_tr, X_raw_te)'], {}), '((X_raw_tr, X_raw_te))\n', (7205, 7227), True, 'import numpy as np\n'), ((7240, 7271), 'numpy.vstack', 'np.vstack', (['(y_raw_tr, y_raw_te)'], {}), '((y_raw_tr, y_raw_te))\n', (7249, 7271), True, 'import numpy as np\n'), ((7319, 7339), 'mml.utils.makedir_safe', 'makedir_safe', (['newdir'], {}), '(newdir)\n', (7331, 7339), False, 'from mml.utils import makedir_safe\n'), ((5118, 5175), 'csv.reader', 'csv.reader', (['f_table'], {'delimiter': '""","""', 'skipinitialspace': '(True)'}), "(f_table, delimiter=',', skipinitialspace=True)\n", (5128, 5175), False, 'import csv\n'), ((6118, 6175), 'csv.reader', 'csv.reader', (['f_table'], {'delimiter': '""","""', 'skipinitialspace': '(True)'}), "(f_table, delimiter=',', skipinitialspace=True)\n", (6128, 6175), False, 'import csv\n'), ((7349, 7397), 'tables.open_file', 'tables.open_file', (['towrite'], {'mode': '"""w"""', 'title': 'title'}), "(towrite, mode='w', title=title)\n", (7365, 7397), False, 'import tables\n'), ((4098, 4135), 'numpy.zeros', 'np.zeros', (['num_distinct'], {'dtype': 'dtype_X'}), '(num_distinct, dtype=dtype_X)\n', (4106, 4135), True, 'import numpy as np\n'), ((4268, 4300), 'numpy.array', 'np.array', (['[value]'], {'dtype': 'dtype_X'}), '([value], dtype=dtype_X)\n', (4276, 4300), True, 'import numpy as np\n')]
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to read, decode and pre-process input data for the Model.
"""
import collections
import sys
import tensorflow as tf
from tensorflow.python.data.experimental.ops import threadpool
# from tensorflow.contrib import slim
InputEndpoints = collections.namedtuple(
'InputEndpoints', ['images', 'images_orig', 'labels', 'labels_one_hot'])
ShuffleBatchConfig = collections.namedtuple('ShuffleBatchConfig', [
'num_batching_threads', 'queue_capacity', 'min_after_dequeue'
])
DEFAULT_SHUFFLE_CONFIG = ShuffleBatchConfig(
num_batching_threads=8, queue_capacity=3000, min_after_dequeue=1000)
def get_data_files(data_sources):
from tensorflow.python.platform import gfile
if isinstance(data_sources, (list, tuple)):
data_files = []
for source in data_sources:
data_files += get_data_files(source)
else:
if '*' in data_sources or '?' in data_sources or '[' in data_sources:
data_files = gfile.Glob(data_sources)
else:
data_files = [data_sources]
if not data_files:
raise ValueError('No data files found in %s' % (data_sources,))
return data_files
def preprocess_image(image, location, label_one_hot, height=224, width=224):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would cropt the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
# if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
# if central_fraction:
# image = tf.image.central_crop(image, central_fraction=central_fraction)
# if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, [0])
# image = tf.cast(image, tf.float32)
# image = tf.multiply(image, 1/255.)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image, location, label_one_hot
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def parse_example_proto(example_serialized, num_classes, labels_offset, image_preprocessing_fn):
feature_map = {
'image/encoded': tf.FixedLenFeature([], tf.string, ''),
'image/class/label': tf.FixedLenFeature([1], tf.int64, -1),
'image/class/text': tf.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32)
}
with tf.compat.v1.name_scope('deserialize_image_record'):
obj = tf.io.parse_single_example(serialized=example_serialized, features=feature_map)
image = tf.image.decode_jpeg(obj['image/encoded'], channels=3, fancy_upscaling=False,
dct_method='INTEGER_FAST')
if image_preprocessing_fn:
image = image_preprocessing_fn(image, 224, 224)
else:
image = tf.image.resize(image, [224, 224])
label = tf.cast(obj['image/class/label'], tf.int32)
label = tf.squeeze(label)
label -= labels_offset
label = tf.one_hot(label, num_classes - labels_offset)
return image, label
def parse_example_decode(example_serialized):
feature_map = {
'image/encoded': tf.FixedLenFeature([], tf.string, ''),
'image/class/label': tf.FixedLenFeature([1], tf.int64, -1),
'image/class/text': tf.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32)
}
with tf.compat.v1.name_scope('deserialize_image_record'):
obj = tf.io.parse_single_example(serialized=example_serialized, features=feature_map)
image = tf.image.decode_jpeg(obj['image/encoded'], channels=3, fancy_upscaling=False,
dct_method='INTEGER_FAST')
return image, obj['image/class/label']
def parse_example(image, label, num_classes, labels_offset, image_preprocessing_fn):
with tf.compat.v1.name_scope('deserialize_image_record'):
if image_preprocessing_fn:
image = image_preprocessing_fn(image, 224, 224)
else:
image = tf.image.resize(image, [224, 224])
label = tf.cast(label, tf.int32)
label = tf.squeeze(label)
label -= labels_offset
label = tf.one_hot(label, num_classes - labels_offset)
return image, label
def parse_example1(example_serialized, image_preprocessing_fn1):
feature_map = {
'image/encoded': tf.FixedLenFeature([], tf.string, ''),
'image/class/label': tf.FixedLenFeature([1], tf.int64, -1),
'image/class/text': tf.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32)
}
with tf.compat.v1.name_scope('deserialize_image_record'):
obj = tf.io.parse_single_example(serialized=example_serialized, features=feature_map)
image = tf.image.decode_jpeg(obj['image/encoded'], channels=3, fancy_upscaling=False,
dct_method='INTEGER_FAST')
image = image_preprocessing_fn1(image, 224, 224)
return image, obj['image/class/label']
def parse_example2(image, label, num_classes, labels_offset, image_preprocessing_fn2):
with tf.compat.v1.name_scope('deserialize_image_record'):
image = image_preprocessing_fn2(image, 224, 224)
label = tf.cast(label, tf.int32)
label = tf.squeeze(label)
label -= labels_offset
label = tf.one_hot(label, num_classes - labels_offset)
return image, label
def get_data(dataset, batch_size, num_classes, labels_offset, is_training,
preprocessing_name=None, use_grayscale=None, add_image_summaries=False):
return get_data_united(dataset, batch_size, num_classes, labels_offset, is_training,
preprocessing_name, use_grayscale, add_image_summaries)
def create_ds(data_sources, is_training):
data_files = get_data_files(data_sources)
ds = tf.data.Dataset.from_tensor_slices(data_files)
if is_training:
ds = ds.shuffle(1000)
# add for eval
else:
ds = ds.take(50000)
##### change #####
num_readers = 10
ds = ds.interleave(
tf.data.TFRecordDataset, cycle_length=num_readers, block_length=1, num_parallel_calls=tf.data.experimental.AUTOTUNE)
counter = tf.data.Dataset.range(sys.maxsize)
ds = tf.data.Dataset.zip((ds, counter))
##### change #####
if is_training:
ds = ds.repeat()
return ds
def get_data_united(dataset, batch_size, num_classes, labels_offset, is_training,
preprocessing_name=None, use_grayscale=None, add_image_summaries=False):
from preprocessing import preprocessing_factory
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
name='inception_v2',
is_training=is_training,
use_grayscale=use_grayscale,
add_image_summaries=add_image_summaries
)
ds = create_ds(dataset.data_sources, is_training)
ds = ds.map(lambda example, counter: parse_example_proto(example, num_classes, labels_offset, image_preprocessing_fn), num_parallel_calls=24)
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
iterator = ds.make_initializable_iterator()
ds = threadpool.override_threadpool(ds,threadpool.PrivateThreadPool(128, display_name='input_pipeline_thread_pool'))
return iterator, ds
|
[
"tensorflow.train.Int64List",
"tensorflow.image.resize_bilinear",
"tensorflow.multiply",
"tensorflow.one_hot",
"tensorflow.subtract",
"tensorflow.compat.v1.name_scope",
"tensorflow.cast",
"tensorflow.squeeze",
"preprocessing.preprocessing_factory.get_preprocessing",
"tensorflow.VarLenFeature",
"tensorflow.data.Dataset.range",
"tensorflow.data.Dataset.zip",
"tensorflow.python.data.experimental.ops.threadpool.PrivateThreadPool",
"tensorflow.expand_dims",
"tensorflow.io.parse_single_example",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.python.platform.gfile.Glob",
"collections.namedtuple",
"tensorflow.FixedLenFeature",
"tensorflow.image.decode_jpeg",
"tensorflow.image.resize",
"tensorflow.image.convert_image_dtype"
] |
[((939, 1038), 'collections.namedtuple', 'collections.namedtuple', (['"""InputEndpoints"""', "['images', 'images_orig', 'labels', 'labels_one_hot']"], {}), "('InputEndpoints', ['images', 'images_orig', 'labels',\n 'labels_one_hot'])\n", (961, 1038), False, 'import collections\n'), ((1061, 1174), 'collections.namedtuple', 'collections.namedtuple', (['"""ShuffleBatchConfig"""', "['num_batching_threads', 'queue_capacity', 'min_after_dequeue']"], {}), "('ShuffleBatchConfig', ['num_batching_threads',\n 'queue_capacity', 'min_after_dequeue'])\n", (1083, 1174), False, 'import collections\n'), ((2758, 2811), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['image'], {'dtype': 'tf.float32'}), '(image, dtype=tf.float32)\n', (2786, 2811), True, 'import tensorflow as tf\n'), ((3118, 3142), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (3132, 3142), True, 'import tensorflow as tf\n'), ((3155, 3224), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['image', '[height, width]'], {'align_corners': '(False)'}), '(image, [height, width], align_corners=False)\n', (3179, 3224), True, 'import tensorflow as tf\n'), ((3237, 3259), 'tensorflow.squeeze', 'tf.squeeze', (['image', '[0]'], {}), '(image, [0])\n', (3247, 3259), True, 'import tensorflow as tf\n'), ((3355, 3378), 'tensorflow.subtract', 'tf.subtract', (['image', '(0.5)'], {}), '(image, 0.5)\n', (3366, 3378), True, 'import tensorflow as tf\n'), ((3391, 3414), 'tensorflow.multiply', 'tf.multiply', (['image', '(2.0)'], {}), '(image, 2.0)\n', (3402, 3414), True, 'import tensorflow as tf\n'), ((8280, 8326), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['data_files'], {}), '(data_files)\n', (8314, 8326), True, 'import tensorflow as tf\n'), ((8643, 8677), 'tensorflow.data.Dataset.range', 'tf.data.Dataset.range', (['sys.maxsize'], {}), '(sys.maxsize)\n', (8664, 8677), True, 'import tensorflow as tf\n'), ((8687, 8721), 'tensorflow.data.Dataset.zip', 'tf.data.Dataset.zip', (['(ds, counter)'], {}), '((ds, counter))\n', (8706, 8721), True, 'import tensorflow as tf\n'), ((9048, 9213), 'preprocessing.preprocessing_factory.get_preprocessing', 'preprocessing_factory.get_preprocessing', ([], {'name': '"""inception_v2"""', 'is_training': 'is_training', 'use_grayscale': 'use_grayscale', 'add_image_summaries': 'add_image_summaries'}), "(name='inception_v2', is_training=\n is_training, use_grayscale=use_grayscale, add_image_summaries=\n add_image_summaries)\n", (9087, 9213), False, 'from preprocessing import preprocessing_factory\n'), ((3830, 3867), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string', '""""""'], {}), "([], tf.string, '')\n", (3848, 3867), True, 'import tensorflow as tf\n'), ((3898, 3935), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.int64', '(-1)'], {}), '([1], tf.int64, -1)\n', (3916, 3935), True, 'import tensorflow as tf\n'), ((3965, 4002), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string', '""""""'], {}), "([], tf.string, '')\n", (3983, 4002), True, 'import tensorflow as tf\n'), ((4038, 4072), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (4054, 4072), True, 'import tensorflow as tf\n'), ((4108, 4142), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (4124, 4142), True, 'import tensorflow as tf\n'), ((4178, 4212), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (4194, 4212), True, 'import tensorflow as tf\n'), ((4248, 4282), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (4264, 4282), True, 'import tensorflow as tf\n'), ((4298, 4349), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""deserialize_image_record"""'], {}), "('deserialize_image_record')\n", (4321, 4349), True, 'import tensorflow as tf\n'), ((4365, 4444), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', ([], {'serialized': 'example_serialized', 'features': 'feature_map'}), '(serialized=example_serialized, features=feature_map)\n', (4391, 4444), True, 'import tensorflow as tf\n'), ((4461, 4570), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (["obj['image/encoded']"], {'channels': '(3)', 'fancy_upscaling': '(False)', 'dct_method': '"""INTEGER_FAST"""'}), "(obj['image/encoded'], channels=3, fancy_upscaling=\n False, dct_method='INTEGER_FAST')\n", (4481, 4570), True, 'import tensorflow as tf\n'), ((4812, 4855), 'tensorflow.cast', 'tf.cast', (["obj['image/class/label']", 'tf.int32'], {}), "(obj['image/class/label'], tf.int32)\n", (4819, 4855), True, 'import tensorflow as tf\n'), ((4872, 4889), 'tensorflow.squeeze', 'tf.squeeze', (['label'], {}), '(label)\n', (4882, 4889), True, 'import tensorflow as tf\n'), ((4937, 4983), 'tensorflow.one_hot', 'tf.one_hot', (['label', '(num_classes - labels_offset)'], {}), '(label, num_classes - labels_offset)\n', (4947, 4983), True, 'import tensorflow as tf\n'), ((5105, 5142), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string', '""""""'], {}), "([], tf.string, '')\n", (5123, 5142), True, 'import tensorflow as tf\n'), ((5173, 5210), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.int64', '(-1)'], {}), '([1], tf.int64, -1)\n', (5191, 5210), True, 'import tensorflow as tf\n'), ((5240, 5277), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string', '""""""'], {}), "([], tf.string, '')\n", (5258, 5277), True, 'import tensorflow as tf\n'), ((5313, 5347), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (5329, 5347), True, 'import tensorflow as tf\n'), ((5383, 5417), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (5399, 5417), True, 'import tensorflow as tf\n'), ((5453, 5487), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (5469, 5487), True, 'import tensorflow as tf\n'), ((5523, 5557), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (5539, 5557), True, 'import tensorflow as tf\n'), ((5573, 5624), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""deserialize_image_record"""'], {}), "('deserialize_image_record')\n", (5596, 5624), True, 'import tensorflow as tf\n'), ((5640, 5719), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', ([], {'serialized': 'example_serialized', 'features': 'feature_map'}), '(serialized=example_serialized, features=feature_map)\n', (5666, 5719), True, 'import tensorflow as tf\n'), ((5736, 5845), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (["obj['image/encoded']"], {'channels': '(3)', 'fancy_upscaling': '(False)', 'dct_method': '"""INTEGER_FAST"""'}), "(obj['image/encoded'], channels=3, fancy_upscaling=\n False, dct_method='INTEGER_FAST')\n", (5756, 5845), True, 'import tensorflow as tf\n'), ((6046, 6097), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""deserialize_image_record"""'], {}), "('deserialize_image_record')\n", (6069, 6097), True, 'import tensorflow as tf\n'), ((6280, 6304), 'tensorflow.cast', 'tf.cast', (['label', 'tf.int32'], {}), '(label, tf.int32)\n', (6287, 6304), True, 'import tensorflow as tf\n'), ((6321, 6338), 'tensorflow.squeeze', 'tf.squeeze', (['label'], {}), '(label)\n', (6331, 6338), True, 'import tensorflow as tf\n'), ((6386, 6432), 'tensorflow.one_hot', 'tf.one_hot', (['label', '(num_classes - labels_offset)'], {}), '(label, num_classes - labels_offset)\n', (6396, 6432), True, 'import tensorflow as tf\n'), ((6569, 6606), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string', '""""""'], {}), "([], tf.string, '')\n", (6587, 6606), True, 'import tensorflow as tf\n'), ((6637, 6674), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[1]', 'tf.int64', '(-1)'], {}), '([1], tf.int64, -1)\n', (6655, 6674), True, 'import tensorflow as tf\n'), ((6704, 6741), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string', '""""""'], {}), "([], tf.string, '')\n", (6722, 6741), True, 'import tensorflow as tf\n'), ((6777, 6811), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (6793, 6811), True, 'import tensorflow as tf\n'), ((6847, 6881), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (6863, 6881), True, 'import tensorflow as tf\n'), ((6917, 6951), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (6933, 6951), True, 'import tensorflow as tf\n'), ((6987, 7021), 'tensorflow.VarLenFeature', 'tf.VarLenFeature', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (7003, 7021), True, 'import tensorflow as tf\n'), ((7037, 7088), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""deserialize_image_record"""'], {}), "('deserialize_image_record')\n", (7060, 7088), True, 'import tensorflow as tf\n'), ((7104, 7183), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', ([], {'serialized': 'example_serialized', 'features': 'feature_map'}), '(serialized=example_serialized, features=feature_map)\n', (7130, 7183), True, 'import tensorflow as tf\n'), ((7200, 7309), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (["obj['image/encoded']"], {'channels': '(3)', 'fancy_upscaling': '(False)', 'dct_method': '"""INTEGER_FAST"""'}), "(obj['image/encoded'], channels=3, fancy_upscaling=\n False, dct_method='INTEGER_FAST')\n", (7220, 7309), True, 'import tensorflow as tf\n'), ((7569, 7620), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""deserialize_image_record"""'], {}), "('deserialize_image_record')\n", (7592, 7620), True, 'import tensorflow as tf\n'), ((7696, 7720), 'tensorflow.cast', 'tf.cast', (['label', 'tf.int32'], {}), '(label, tf.int32)\n', (7703, 7720), True, 'import tensorflow as tf\n'), ((7737, 7754), 'tensorflow.squeeze', 'tf.squeeze', (['label'], {}), '(label)\n', (7747, 7754), True, 'import tensorflow as tf\n'), ((7802, 7848), 'tensorflow.one_hot', 'tf.one_hot', (['label', '(num_classes - labels_offset)'], {}), '(label, num_classes - labels_offset)\n', (7812, 7848), True, 'import tensorflow as tf\n'), ((9649, 9725), 'tensorflow.python.data.experimental.ops.threadpool.PrivateThreadPool', 'threadpool.PrivateThreadPool', (['(128)'], {'display_name': '"""input_pipeline_thread_pool"""'}), "(128, display_name='input_pipeline_thread_pool')\n", (9677, 9725), False, 'from tensorflow.python.data.experimental.ops import threadpool\n'), ((1651, 1675), 'tensorflow.python.platform.gfile.Glob', 'gfile.Glob', (['data_sources'], {}), '(data_sources)\n', (1661, 1675), False, 'from tensorflow.python.platform import gfile\n'), ((3653, 3684), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'value'}), '(value=value)\n', (3671, 3684), True, 'import tensorflow as tf\n'), ((4760, 4794), 'tensorflow.image.resize', 'tf.image.resize', (['image', '[224, 224]'], {}), '(image, [224, 224])\n', (4775, 4794), True, 'import tensorflow as tf\n'), ((6228, 6262), 'tensorflow.image.resize', 'tf.image.resize', (['image', '[224, 224]'], {}), '(image, [224, 224])\n', (6243, 6262), True, 'import tensorflow as tf\n')]
|
import board
import audioio
import audiobusio
import digitalio
import time
import array
import math
buf = bytearray(16000)
print(3)
time.sleep(1)
print(2)
time.sleep(1)
print(1)
time.sleep(1)
#print("recording", time.monotonic())
print("recording")
#trigger = digitalio.DigitalInOut(board.A1)
#trigger.switch_to_output(value = True)
with audiobusio.PDMIn(board.MICROPHONE_CLOCK, board.MICROPHONE_DATA) as mic:
mic.record(buf, len(buf))
#trigger.value = False
#print("done recording", time.monotonic())
print("done recording")
speaker_enable = digitalio.DigitalInOut(board.SPEAKER_ENABLE)
speaker_enable.switch_to_output(value=True)
time.sleep(1)
#trigger.value = True
#print("playback", time.monotonic())
print("playback")
with audioio.AudioOut(board.SPEAKER, buf) as speaker:
speaker.frequency = 8000
speaker.play()
while speaker.playing:
pass
#trigger.value = False
|
[
"audioio.AudioOut",
"digitalio.DigitalInOut",
"audiobusio.PDMIn",
"time.sleep"
] |
[((134, 147), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (144, 147), False, 'import time\n'), ((157, 170), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (167, 170), False, 'import time\n'), ((180, 193), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (190, 193), False, 'import time\n'), ((550, 594), 'digitalio.DigitalInOut', 'digitalio.DigitalInOut', (['board.SPEAKER_ENABLE'], {}), '(board.SPEAKER_ENABLE)\n', (572, 594), False, 'import digitalio\n'), ((639, 652), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (649, 652), False, 'import time\n'), ((340, 403), 'audiobusio.PDMIn', 'audiobusio.PDMIn', (['board.MICROPHONE_CLOCK', 'board.MICROPHONE_DATA'], {}), '(board.MICROPHONE_CLOCK, board.MICROPHONE_DATA)\n', (356, 403), False, 'import audiobusio\n'), ((736, 772), 'audioio.AudioOut', 'audioio.AudioOut', (['board.SPEAKER', 'buf'], {}), '(board.SPEAKER, buf)\n', (752, 772), False, 'import audioio\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
import travel.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TravelBucketList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100)),
('is_public', models.BooleanField(default=True)),
('description', models.TextField(blank=True)),
('last_update', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'travel_bucket_list',
},
),
migrations.CreateModel(
name='TravelCurrency',
fields=[
('iso', models.CharField(max_length=4, serialize=False, primary_key=True)),
('name', models.CharField(max_length=50)),
('fraction', models.CharField(max_length=8, blank=True)),
('fraction_name', models.CharField(max_length=15, blank=True)),
('sign', models.CharField(max_length=4, blank=True)),
('alt_sign', models.CharField(max_length=4, blank=True)),
],
options={
'db_table': 'travel_currency',
},
),
migrations.CreateModel(
name='TravelEntity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('geonameid', models.IntegerField(default=0)),
('code', models.CharField(max_length=6, db_index=True)),
('name', models.CharField(max_length=175)),
('full_name', models.CharField(max_length=175)),
('lat', models.DecimalField(null=True, max_digits=7, decimal_places=4, blank=True)),
('lon', models.DecimalField(null=True, max_digits=7, decimal_places=4, blank=True)),
('category', models.CharField(max_length=4, blank=True)),
('locality', models.CharField(max_length=256, blank=True)),
('tz', models.CharField(max_length=40, verbose_name=b'timezone', blank=True)),
('capital', models.ForeignKey(on_delete=django.db.models.SET_NULL, related_name='capital_set', blank=True, to='travel.TravelEntity', null=True)),
('continent', models.ForeignKey(on_delete=django.db.models.SET_NULL, related_name='continent_set', blank=True, to='travel.TravelEntity', null=True)),
('country', models.ForeignKey(on_delete=django.db.models.SET_NULL, related_name='country_set', blank=True, to='travel.TravelEntity', null=True)),
],
options={
'ordering': ('name',),
'db_table': 'travel_entity',
},
),
migrations.CreateModel(
name='TravelEntityInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('iso3', models.CharField(max_length=3, blank=True)),
('denom', models.CharField(max_length=40, blank=True)),
('denoms', models.CharField(max_length=60, blank=True)),
('language_codes', models.CharField(max_length=100, blank=True)),
('phone', models.CharField(max_length=20, blank=True)),
('electrical', models.CharField(max_length=40, blank=True)),
('postal_code', models.CharField(max_length=60, blank=True)),
('tld', models.CharField(max_length=8, blank=True)),
('population', models.IntegerField(default=None, null=True, blank=True)),
('area', models.IntegerField(default=None, null=True, blank=True)),
('currency', models.ForeignKey(on_delete=django.db.models.SET_NULL, blank=True, to='travel.TravelCurrency', null=True)),
('entity', models.OneToOneField(on_delete=django.db.models.CASCADE, related_name='entityinfo', to='travel.TravelEntity')),
],
options={
'db_table': 'travel_entityinfo',
},
),
migrations.CreateModel(
name='TravelEntityType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('abbr', models.CharField(max_length=4, db_index=True)),
('title', models.CharField(max_length=25)),
],
options={
'db_table': 'travel_entitytype',
},
),
migrations.CreateModel(
name='TravelFlag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('source', models.CharField(max_length=255)),
('base_dir', models.CharField(max_length=8)),
('ref', models.CharField(max_length=6)),
('thumb', models.ImageField(blank=True)),
('large', models.ImageField(blank=True)),
('svg', models.FileField(upload_to=travel.models.svg_upload, blank=True)),
('is_locked', models.BooleanField(default=False)),
],
options={
'db_table': 'travel_flag',
},
),
migrations.CreateModel(
name='TravelLanguage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('iso639_1', models.CharField(max_length=2, blank=True)),
('iso639_2', models.CharField(max_length=12, blank=True)),
('iso639_3', models.CharField(max_length=3, blank=True)),
('name', models.CharField(max_length=60)),
],
),
migrations.CreateModel(
name='TravelLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('arrival', models.DateTimeField()),
('rating', models.PositiveSmallIntegerField(default=3, choices=[(1, b'★★★★★'), (2, b'★★★★'), (3, b'★★★'), (4, b'★★'), (5, b'★')])),
('notes', models.TextField(blank=True)),
('entity', models.ForeignKey(on_delete=django.db.models.CASCADE, to='travel.TravelEntity')),
('user', models.ForeignKey(on_delete=django.db.models.CASCADE, related_name='travellog_set', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-arrival',),
'get_latest_by': 'arrival',
},
),
migrations.CreateModel(
name='TravelProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('access', models.CharField(default='PRO', max_length=3, choices=[('PUB', b'Public'), ('PRI', b'Private'), ('PRO', b'Protected')])),
('user', models.OneToOneField(on_delete=django.db.models.CASCADE, related_name='travel_profile', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'travel_profile',
},
),
migrations.AddField(
model_name='travelentityinfo',
name='languages',
field=models.ManyToManyField(to='travel.TravelLanguage', blank=True),
),
migrations.AddField(
model_name='travelentityinfo',
name='neighbors',
field=models.ManyToManyField(to='travel.TravelEntity', blank=True),
),
migrations.AddField(
model_name='travelentity',
name='flag',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='travel.TravelFlag', null=True),
),
migrations.AddField(
model_name='travelentity',
name='state',
field=models.ForeignKey(on_delete=django.db.models.SET_NULL, related_name='state_set', blank=True, to='travel.TravelEntity', null=True),
),
migrations.AddField(
model_name='travelentity',
name='type',
field=models.ForeignKey(on_delete=django.db.models.PROTECT, related_name='entity_set', to='travel.TravelEntityType'),
),
migrations.AddField(
model_name='travelbucketlist',
name='entities',
field=models.ManyToManyField(to='travel.TravelEntity'),
),
migrations.AddField(
model_name='travelbucketlist',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.SET_NULL, default=None, blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
]
|
[
"django.db.models.FileField",
"django.db.models.TextField",
"django.db.models.OneToOneField",
"django.db.migrations.swappable_dependency",
"django.db.models.ManyToManyField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.ImageField",
"django.db.models.IntegerField",
"django.db.models.DecimalField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.DateTimeField"
] |
[((264, 321), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (295, 321), False, 'from django.db import migrations, models\n'), ((7870, 7932), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""travel.TravelLanguage"""', 'blank': '(True)'}), "(to='travel.TravelLanguage', blank=True)\n", (7892, 7932), False, 'from django.db import migrations, models\n'), ((8065, 8125), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""travel.TravelEntity"""', 'blank': '(True)'}), "(to='travel.TravelEntity', blank=True)\n", (8087, 8125), False, 'from django.db import migrations, models\n'), ((8249, 8363), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.SET_NULL', 'blank': '(True)', 'to': '"""travel.TravelFlag"""', 'null': '(True)'}), "(on_delete=django.db.models.deletion.SET_NULL, blank=True,\n to='travel.TravelFlag', null=True)\n", (8266, 8363), False, 'from django.db import migrations, models\n'), ((8484, 8618), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.SET_NULL', 'related_name': '"""state_set"""', 'blank': '(True)', 'to': '"""travel.TravelEntity"""', 'null': '(True)'}), "(on_delete=django.db.models.SET_NULL, related_name=\n 'state_set', blank=True, to='travel.TravelEntity', null=True)\n", (8501, 8618), False, 'from django.db import migrations, models\n'), ((8737, 8852), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.PROTECT', 'related_name': '"""entity_set"""', 'to': '"""travel.TravelEntityType"""'}), "(on_delete=django.db.models.PROTECT, related_name=\n 'entity_set', to='travel.TravelEntityType')\n", (8754, 8852), False, 'from django.db import migrations, models\n'), ((8979, 9027), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""travel.TravelEntity"""'}), "(to='travel.TravelEntity')\n", (9001, 9027), False, 'from django.db import migrations, models\n'), ((9156, 9281), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.SET_NULL', 'default': 'None', 'blank': '(True)', 'to': 'settings.AUTH_USER_MODEL', 'null': '(True)'}), '(on_delete=django.db.models.SET_NULL, default=None, blank=\n True, to=settings.AUTH_USER_MODEL, null=True)\n', (9173, 9281), False, 'from django.db import migrations, models\n'), ((462, 555), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (478, 555), False, 'from django.db import migrations, models\n'), ((580, 612), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (596, 612), False, 'from django.db import migrations, models\n'), ((645, 678), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (664, 678), False, 'from django.db import migrations, models\n'), ((713, 741), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (729, 741), False, 'from django.db import migrations, models\n'), ((776, 811), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (796, 811), False, 'from django.db import migrations, models\n'), ((1039, 1104), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'serialize': '(False)', 'primary_key': '(True)'}), '(max_length=4, serialize=False, primary_key=True)\n', (1055, 1104), False, 'from django.db import migrations, models\n'), ((1132, 1163), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1148, 1163), False, 'from django.db import migrations, models\n'), ((1195, 1237), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(8)', 'blank': '(True)'}), '(max_length=8, blank=True)\n', (1211, 1237), False, 'from django.db import migrations, models\n'), ((1274, 1317), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)', 'blank': '(True)'}), '(max_length=15, blank=True)\n', (1290, 1317), False, 'from django.db import migrations, models\n'), ((1345, 1387), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'blank': '(True)'}), '(max_length=4, blank=True)\n', (1361, 1387), False, 'from django.db import migrations, models\n'), ((1419, 1461), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'blank': '(True)'}), '(max_length=4, blank=True)\n', (1435, 1461), False, 'from django.db import migrations, models\n'), ((1683, 1776), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (1699, 1776), False, 'from django.db import migrations, models\n'), ((1805, 1835), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1824, 1835), False, 'from django.db import migrations, models\n'), ((1863, 1908), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(6)', 'db_index': '(True)'}), '(max_length=6, db_index=True)\n', (1879, 1908), False, 'from django.db import migrations, models\n'), ((1936, 1968), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(175)'}), '(max_length=175)\n', (1952, 1968), False, 'from django.db import migrations, models\n'), ((2001, 2033), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(175)'}), '(max_length=175)\n', (2017, 2033), False, 'from django.db import migrations, models\n'), ((2060, 2134), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'null': '(True)', 'max_digits': '(7)', 'decimal_places': '(4)', 'blank': '(True)'}), '(null=True, max_digits=7, decimal_places=4, blank=True)\n', (2079, 2134), False, 'from django.db import migrations, models\n'), ((2161, 2235), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'null': '(True)', 'max_digits': '(7)', 'decimal_places': '(4)', 'blank': '(True)'}), '(null=True, max_digits=7, decimal_places=4, blank=True)\n', (2180, 2235), False, 'from django.db import migrations, models\n'), ((2267, 2309), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'blank': '(True)'}), '(max_length=4, blank=True)\n', (2283, 2309), False, 'from django.db import migrations, models\n'), ((2341, 2385), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'blank': '(True)'}), '(max_length=256, blank=True)\n', (2357, 2385), False, 'from django.db import migrations, models\n'), ((2411, 2480), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'verbose_name': "b'timezone'", 'blank': '(True)'}), "(max_length=40, verbose_name=b'timezone', blank=True)\n", (2427, 2480), False, 'from django.db import migrations, models\n'), ((2511, 2647), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.SET_NULL', 'related_name': '"""capital_set"""', 'blank': '(True)', 'to': '"""travel.TravelEntity"""', 'null': '(True)'}), "(on_delete=django.db.models.SET_NULL, related_name=\n 'capital_set', blank=True, to='travel.TravelEntity', null=True)\n", (2528, 2647), False, 'from django.db import migrations, models\n'), ((2675, 2813), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.SET_NULL', 'related_name': '"""continent_set"""', 'blank': '(True)', 'to': '"""travel.TravelEntity"""', 'null': '(True)'}), "(on_delete=django.db.models.SET_NULL, related_name=\n 'continent_set', blank=True, to='travel.TravelEntity', null=True)\n", (2692, 2813), False, 'from django.db import migrations, models\n'), ((2839, 2975), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.SET_NULL', 'related_name': '"""country_set"""', 'blank': '(True)', 'to': '"""travel.TravelEntity"""', 'null': '(True)'}), "(on_delete=django.db.models.SET_NULL, related_name=\n 'country_set', blank=True, to='travel.TravelEntity', null=True)\n", (2856, 2975), False, 'from django.db import migrations, models\n'), ((3233, 3326), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (3249, 3326), False, 'from django.db import migrations, models\n'), ((3350, 3392), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)', 'blank': '(True)'}), '(max_length=3, blank=True)\n', (3366, 3392), False, 'from django.db import migrations, models\n'), ((3421, 3464), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'blank': '(True)'}), '(max_length=40, blank=True)\n', (3437, 3464), False, 'from django.db import migrations, models\n'), ((3494, 3537), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)', 'blank': '(True)'}), '(max_length=60, blank=True)\n', (3510, 3537), False, 'from django.db import migrations, models\n'), ((3575, 3619), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (3591, 3619), False, 'from django.db import migrations, models\n'), ((3648, 3691), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)'}), '(max_length=20, blank=True)\n', (3664, 3691), False, 'from django.db import migrations, models\n'), ((3725, 3768), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'blank': '(True)'}), '(max_length=40, blank=True)\n', (3741, 3768), False, 'from django.db import migrations, models\n'), ((3803, 3846), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)', 'blank': '(True)'}), '(max_length=60, blank=True)\n', (3819, 3846), False, 'from django.db import migrations, models\n'), ((3873, 3915), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(8)', 'blank': '(True)'}), '(max_length=8, blank=True)\n', (3889, 3915), False, 'from django.db import migrations, models\n'), ((3949, 4005), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (3968, 4005), False, 'from django.db import migrations, models\n'), ((4033, 4089), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': 'None', 'null': '(True)', 'blank': '(True)'}), '(default=None, null=True, blank=True)\n', (4052, 4089), False, 'from django.db import migrations, models\n'), ((4121, 4231), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.SET_NULL', 'blank': '(True)', 'to': '"""travel.TravelCurrency"""', 'null': '(True)'}), "(on_delete=django.db.models.SET_NULL, blank=True, to=\n 'travel.TravelCurrency', null=True)\n", (4138, 4231), False, 'from django.db import migrations, models\n'), ((4256, 4370), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.CASCADE', 'related_name': '"""entityinfo"""', 'to': '"""travel.TravelEntity"""'}), "(on_delete=django.db.models.CASCADE, related_name=\n 'entityinfo', to='travel.TravelEntity')\n", (4276, 4370), False, 'from django.db import migrations, models\n'), ((4593, 4686), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (4609, 4686), False, 'from django.db import migrations, models\n'), ((4710, 4755), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'db_index': '(True)'}), '(max_length=4, db_index=True)\n', (4726, 4755), False, 'from django.db import migrations, models\n'), ((4784, 4815), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(25)'}), '(max_length=25)\n', (4800, 4815), False, 'from django.db import migrations, models\n'), ((5037, 5130), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (5053, 5130), False, 'from django.db import migrations, models\n'), ((5156, 5188), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5172, 5188), False, 'from django.db import migrations, models\n'), ((5220, 5250), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(8)'}), '(max_length=8)\n', (5236, 5250), False, 'from django.db import migrations, models\n'), ((5277, 5307), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(6)'}), '(max_length=6)\n', (5293, 5307), False, 'from django.db import migrations, models\n'), ((5336, 5365), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)'}), '(blank=True)\n', (5353, 5365), False, 'from django.db import migrations, models\n'), ((5394, 5423), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)'}), '(blank=True)\n', (5411, 5423), False, 'from django.db import migrations, models\n'), ((5450, 5514), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': 'travel.models.svg_upload', 'blank': '(True)'}), '(upload_to=travel.models.svg_upload, blank=True)\n', (5466, 5514), False, 'from django.db import migrations, models\n'), ((5547, 5581), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (5566, 5581), False, 'from django.db import migrations, models\n'), ((5801, 5894), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (5817, 5894), False, 'from django.db import migrations, models\n'), ((5922, 5964), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)', 'blank': '(True)'}), '(max_length=2, blank=True)\n', (5938, 5964), False, 'from django.db import migrations, models\n'), ((5996, 6039), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(12)', 'blank': '(True)'}), '(max_length=12, blank=True)\n', (6012, 6039), False, 'from django.db import migrations, models\n'), ((6071, 6113), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)', 'blank': '(True)'}), '(max_length=3, blank=True)\n', (6087, 6113), False, 'from django.db import migrations, models\n'), ((6141, 6172), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (6157, 6172), False, 'from django.db import migrations, models\n'), ((6307, 6400), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (6323, 6400), False, 'from django.db import migrations, models\n'), ((6427, 6449), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (6447, 6449), False, 'from django.db import migrations, models\n'), ((6479, 6699), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'default': '(3)', 'choices': "[(1, b'★★★★★'), (2,\n b'★★★★'), (3, b'★★★'), (4,\n b'★★'), (5, b'★')]"}), "(default=3, choices=[(1,\n b'★★★★★'), (2,\n b'★★★★'), (3, b'★★★'), (4,\n b'★★'), (5, b'★')])\n", (6511, 6699), False, 'from django.db import migrations, models\n'), ((6716, 6744), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (6732, 6744), False, 'from django.db import migrations, models\n'), ((6774, 6853), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.CASCADE', 'to': '"""travel.TravelEntity"""'}), "(on_delete=django.db.models.CASCADE, to='travel.TravelEntity')\n", (6791, 6853), False, 'from django.db import migrations, models\n'), ((6881, 6998), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.CASCADE', 'related_name': '"""travellog_set"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.CASCADE, related_name=\n 'travellog_set', to=settings.AUTH_USER_MODEL)\n", (6898, 6998), False, 'from django.db import migrations, models\n'), ((7256, 7349), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (7272, 7349), False, 'from django.db import migrations, models\n'), ((7375, 7498), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""PRO"""', 'max_length': '(3)', 'choices': "[('PUB', b'Public'), ('PRI', b'Private'), ('PRO', b'Protected')]"}), "(default='PRO', max_length=3, choices=[('PUB', b'Public'),\n ('PRI', b'Private'), ('PRO', b'Protected')])\n", (7391, 7498), False, 'from django.db import migrations, models\n'), ((7522, 7643), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.CASCADE', 'related_name': '"""travel_profile"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.CASCADE, related_name=\n 'travel_profile', to=settings.AUTH_USER_MODEL)\n", (7542, 7643), False, 'from django.db import migrations, models\n')]
|
import sys
sys.path.insert(0, 'dependencies')
import boto3
import json
import os
import shutil
from glob import glob
from os.path import join
from sqlite3 import connect
S3 = boto3.client('s3')
BUCKET = os.environ['S3_BUCKET']
CAMPAIGN_TILES = 'campaign.mbtiles'
PATH = '/tmp'
def list_mbtiles(uuid):
mbtiles_folder = 'campaigns/{0}/mbtiles/'.format(uuid)
mbtiles = S3.list_objects_v2(
Bucket=BUCKET,
Prefix=mbtiles_folder
)
mbtiles = [m['Key'] for m in mbtiles['Contents']
if m['Key'].endswith('.mbtiles')]
return mbtiles
def merge_tiles(folder_path, merge_file):
mbtiles = glob('{0}/*.mbtiles'.format(folder_path))
mbtile = mbtiles.pop(0)
shutil.copy(mbtile, merge_file)
dst_conn = connect(merge_file)
dst_cursor = dst_conn.cursor()
query = '''INSERT OR REPLACE INTO
tiles(zoom_level, tile_column, tile_row, tile_data)
VALUES (?,?,?,?);'''
for mbtile in mbtiles:
src_conn = connect(mbtile)
src_cursor = src_conn.cursor()
sql_text = 'SELECT * FROM tiles'
src_cursor.execute(sql_text)
row = src_cursor.fetchone()
while row is not None:
dst_cursor.execute(query, row)
row = src_cursor.fetchone()
dst_conn.commit()
def lambda_handler(event, context):
try:
main(event)
except Exception as e:
error_dict = {'function': 'process_merge_mbtiles', 'failure': str(e)}
key = f'campaigns/{event["uuid"]}/failure.json'
S3.put_object(
Bucket=BUCKET,
Key=key,
Body=json.dumps(error_dict),
ACL='public-read')
def main(event):
uuid = event['uuid']
folder_path = join(PATH, uuid)
if os.path.isdir(folder_path):
shutil.rmtree(folder_path)
os.mkdir(folder_path)
# Download all one by one.
for mbtile in list_mbtiles(uuid):
file_name = mbtile.split('/')[-1]
S3.download_file(BUCKET,
mbtile,
join(folder_path, file_name)
)
# Merge using sqlite.
merge_file = join(PATH, CAMPAIGN_TILES)
merge_tiles(folder_path, merge_file)
key = 'campaigns/{0}/{1}'.format(uuid, CAMPAIGN_TILES)
with open(merge_file, "rb") as data:
S3.upload_fileobj(
Fileobj=data,
Bucket=BUCKET,
Key=key,
ExtraArgs={'ACL': 'public-read'}
)
|
[
"os.mkdir",
"boto3.client",
"os.path.isdir",
"sys.path.insert",
"json.dumps",
"sqlite3.connect",
"shutil.rmtree",
"os.path.join",
"shutil.copy"
] |
[((11, 45), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""dependencies"""'], {}), "(0, 'dependencies')\n", (26, 45), False, 'import sys\n'), ((177, 195), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (189, 195), False, 'import boto3\n'), ((705, 736), 'shutil.copy', 'shutil.copy', (['mbtile', 'merge_file'], {}), '(mbtile, merge_file)\n', (716, 736), False, 'import shutil\n'), ((753, 772), 'sqlite3.connect', 'connect', (['merge_file'], {}), '(merge_file)\n', (760, 772), False, 'from sqlite3 import connect\n'), ((1728, 1744), 'os.path.join', 'join', (['PATH', 'uuid'], {}), '(PATH, uuid)\n', (1732, 1744), False, 'from os.path import join\n'), ((1753, 1779), 'os.path.isdir', 'os.path.isdir', (['folder_path'], {}), '(folder_path)\n', (1766, 1779), False, 'import os\n'), ((1820, 1841), 'os.mkdir', 'os.mkdir', (['folder_path'], {}), '(folder_path)\n', (1828, 1841), False, 'import os\n'), ((2101, 2127), 'os.path.join', 'join', (['PATH', 'CAMPAIGN_TILES'], {}), '(PATH, CAMPAIGN_TILES)\n', (2105, 2127), False, 'from os.path import join\n'), ((983, 998), 'sqlite3.connect', 'connect', (['mbtile'], {}), '(mbtile)\n', (990, 998), False, 'from sqlite3 import connect\n'), ((1789, 1815), 'shutil.rmtree', 'shutil.rmtree', (['folder_path'], {}), '(folder_path)\n', (1802, 1815), False, 'import shutil\n'), ((2019, 2047), 'os.path.join', 'join', (['folder_path', 'file_name'], {}), '(folder_path, file_name)\n', (2023, 2047), False, 'from os.path import join\n'), ((1610, 1632), 'json.dumps', 'json.dumps', (['error_dict'], {}), '(error_dict)\n', (1620, 1632), False, 'import json\n')]
|
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Union, List, Type
import os
from ruamel.yaml.scalarstring import DoubleQuotedScalarString
from kipoi_containers.singularityhelper import (
build_singularity_image,
update_existing_singularity_container,
push_new_singularity_image,
test_singularity_image,
cleanup,
)
from kipoi_containers import zenodoclient
from kipoi_utils.external.torchvision.dataset_utils import check_integrity
@dataclass
class SingularityHandler:
"""This is a dataclass to be instantiated in order to update and
adding singularity images"""
model_group: str
docker_image_name: str
model_group_to_singularity_dict: Dict
workflow_release_data: Dict
singularity_image_folder: Union[str, Path] = None
zenodo_client: zenodoclient.Client = zenodoclient.Client()
def __post_init__(self):
"""If a location has not been specified for saving the downloaded
singularity containers to, a value is populated from
SINGULARITY_PULL_FOLDER environment variable. If there is no
such variable, the current directory is served as default."""
if self.singularity_image_folder is None:
self.singularity_image_folder = os.environ.get(
"SINGULARITY_PULL_FOLDER", Path(__file__).parent.resolve()
)
def update_container_info(self, updated_singularity_dict: Dict) -> None:
"""Update url, md5 and name keys of the model group's singularity
container dict with the correspondong values from updated_singularity_dict"""
self.model_group_to_singularity_dict[self.model_group] = {
k: v
for k, v in updated_singularity_dict.items()
if k in ["url", "name", "md5"]
}
def update_release_workflow(self) -> None:
"""Update .github/workflows/release-workflow.yml with the newly
added model group if it is not using one of the shared environments"""
if "shared" not in self.singularity_image_name:
self.workflow_release_data["jobs"]["buildtestandpushsingularity"][
"strategy"
]["matrix"]["image"].append(
DoubleQuotedScalarString(
self.docker_image_name.split(":")[1].replace("-slim", "")
)
)
def add(
self,
models_to_test: List,
docker_to_model_dict: Dict = {},
push: bool = True,
) -> None:
"""Adds a new singularity image. The steps are as follows -
1. First, the new image is built and saved in
singularity_image_folder from the docker image
2. This new singularity image is tested with the models in
<models_to_test>
3. If everything is fine, push the image to zenodo and return
the modified url, name and md5 as a dict
4. Update <model_group_to_singularity_dict> with the new model
group as key and the dictionary with url, md5, key as values"""
if "shared" in self.docker_image_name:
self.singularity_image_name = (
f"kipoi-docker_{self.docker_image_name.split(':')[1]}.sif"
)
else:
self.singularity_image_name = (
f"kipoi-docker_{self.model_group.lower()}-slim.sif"
)
self.singularity_dict = {
"url": "",
"name": self.singularity_image_name.replace(".sif", ""),
"md5": "",
}
build_singularity_image(
name_of_docker_image=self.docker_image_name,
singularity_image_name=self.singularity_image_name,
singularity_image_folder=self.singularity_image_folder,
)
for model in models_to_test:
test_singularity_image(
singularity_image_folder=self.singularity_image_folder,
singularity_image_name=self.singularity_image_name,
model=model,
)
if "shared" not in self.docker_image_name:
new_singularity_dict = push_new_singularity_image(
zenodo_client=self.zenodo_client,
singularity_image_folder=self.singularity_image_folder,
singularity_dict=self.singularity_dict,
model_group=self.model_group,
push=push,
)
else:
example_model = docker_to_model_dict[
self.docker_image_name.replace("-slim", "")
][0]
new_singularity_dict = self.model_group_to_singularity_dict[
example_model.split("/")[0]
]
self.update_container_info(new_singularity_dict)
self.update_release_workflow()
def update(self, models_to_test: List, push: bool = True) -> None:
"""Updates an existing singularity image. The steps are as follows -
1. First, a singularity image is built and saved in
singularity_image_folder from the docker image
2. A checksum is computed and compared against the existing md5 key
3. If the new image is identical to the existing one,
a cleanup is performed followed by an exit.
2. Otherwise, This new singularity image is tested with the models in
<models_to_test>
3. If everything is fine, push the new image to zenodo as a new version
and return the modified url, name and md5 as a dict
4. Update <model_group_to_singularity_dict> with the new model
group as key and the dictionary with url, md5, key as values"""
self.singularity_dict = self.model_group_to_singularity_dict[
self.model_group
]
self.singularity_image_name = f'{self.singularity_dict["name"]}.sif'
singularity_image_path = build_singularity_image(
name_of_docker_image=self.docker_image_name,
singularity_image_name=self.singularity_image_name,
singularity_image_folder=self.singularity_image_folder,
)
checksum_match = check_integrity(
singularity_image_path, self.singularity_dict["md5"]
)
if checksum_match:
print(
f"No need to update the existing singularity container for {self.model_group}"
)
cleanup(singularity_image_path)
else:
for model in models_to_test:
test_singularity_image(
singularity_image_folder=self.singularity_image_folder,
singularity_image_name=self.singularity_image_name,
model=model,
)
updated_singularity_dict = update_existing_singularity_container(
zenodo_client=self.zenodo_client,
singularity_dict=self.singularity_dict,
singularity_image_folder=self.singularity_image_folder,
model_group=self.model_group,
push=push,
)
cleanup(singularity_image_path)
self.update_container_info(updated_singularity_dict)
|
[
"kipoi_containers.singularityhelper.test_singularity_image",
"kipoi_containers.singularityhelper.push_new_singularity_image",
"kipoi_containers.zenodoclient.Client",
"kipoi_containers.singularityhelper.cleanup",
"pathlib.Path",
"kipoi_utils.external.torchvision.dataset_utils.check_integrity",
"kipoi_containers.singularityhelper.update_existing_singularity_container",
"kipoi_containers.singularityhelper.build_singularity_image"
] |
[((849, 870), 'kipoi_containers.zenodoclient.Client', 'zenodoclient.Client', ([], {}), '()\n', (868, 870), False, 'from kipoi_containers import zenodoclient\n'), ((3520, 3704), 'kipoi_containers.singularityhelper.build_singularity_image', 'build_singularity_image', ([], {'name_of_docker_image': 'self.docker_image_name', 'singularity_image_name': 'self.singularity_image_name', 'singularity_image_folder': 'self.singularity_image_folder'}), '(name_of_docker_image=self.docker_image_name,\n singularity_image_name=self.singularity_image_name,\n singularity_image_folder=self.singularity_image_folder)\n', (3543, 3704), False, 'from kipoi_containers.singularityhelper import build_singularity_image, update_existing_singularity_container, push_new_singularity_image, test_singularity_image, cleanup\n'), ((5807, 5991), 'kipoi_containers.singularityhelper.build_singularity_image', 'build_singularity_image', ([], {'name_of_docker_image': 'self.docker_image_name', 'singularity_image_name': 'self.singularity_image_name', 'singularity_image_folder': 'self.singularity_image_folder'}), '(name_of_docker_image=self.docker_image_name,\n singularity_image_name=self.singularity_image_name,\n singularity_image_folder=self.singularity_image_folder)\n', (5830, 5991), False, 'from kipoi_containers.singularityhelper import build_singularity_image, update_existing_singularity_container, push_new_singularity_image, test_singularity_image, cleanup\n'), ((6056, 6125), 'kipoi_utils.external.torchvision.dataset_utils.check_integrity', 'check_integrity', (['singularity_image_path', "self.singularity_dict['md5']"], {}), "(singularity_image_path, self.singularity_dict['md5'])\n", (6071, 6125), False, 'from kipoi_utils.external.torchvision.dataset_utils import check_integrity\n'), ((3793, 3946), 'kipoi_containers.singularityhelper.test_singularity_image', 'test_singularity_image', ([], {'singularity_image_folder': 'self.singularity_image_folder', 'singularity_image_name': 'self.singularity_image_name', 'model': 'model'}), '(singularity_image_folder=self.\n singularity_image_folder, singularity_image_name=self.\n singularity_image_name, model=model)\n', (3815, 3946), False, 'from kipoi_containers.singularityhelper import build_singularity_image, update_existing_singularity_container, push_new_singularity_image, test_singularity_image, cleanup\n'), ((4086, 4295), 'kipoi_containers.singularityhelper.push_new_singularity_image', 'push_new_singularity_image', ([], {'zenodo_client': 'self.zenodo_client', 'singularity_image_folder': 'self.singularity_image_folder', 'singularity_dict': 'self.singularity_dict', 'model_group': 'self.model_group', 'push': 'push'}), '(zenodo_client=self.zenodo_client,\n singularity_image_folder=self.singularity_image_folder,\n singularity_dict=self.singularity_dict, model_group=self.model_group,\n push=push)\n', (4112, 4295), False, 'from kipoi_containers.singularityhelper import build_singularity_image, update_existing_singularity_container, push_new_singularity_image, test_singularity_image, cleanup\n'), ((6315, 6346), 'kipoi_containers.singularityhelper.cleanup', 'cleanup', (['singularity_image_path'], {}), '(singularity_image_path)\n', (6322, 6346), False, 'from kipoi_containers.singularityhelper import build_singularity_image, update_existing_singularity_container, push_new_singularity_image, test_singularity_image, cleanup\n'), ((6680, 6897), 'kipoi_containers.singularityhelper.update_existing_singularity_container', 'update_existing_singularity_container', ([], {'zenodo_client': 'self.zenodo_client', 'singularity_dict': 'self.singularity_dict', 'singularity_image_folder': 'self.singularity_image_folder', 'model_group': 'self.model_group', 'push': 'push'}), '(zenodo_client=self.zenodo_client,\n singularity_dict=self.singularity_dict, singularity_image_folder=self.\n singularity_image_folder, model_group=self.model_group, push=push)\n', (6717, 6897), False, 'from kipoi_containers.singularityhelper import build_singularity_image, update_existing_singularity_container, push_new_singularity_image, test_singularity_image, cleanup\n'), ((6996, 7027), 'kipoi_containers.singularityhelper.cleanup', 'cleanup', (['singularity_image_path'], {}), '(singularity_image_path)\n', (7003, 7027), False, 'from kipoi_containers.singularityhelper import build_singularity_image, update_existing_singularity_container, push_new_singularity_image, test_singularity_image, cleanup\n'), ((6418, 6571), 'kipoi_containers.singularityhelper.test_singularity_image', 'test_singularity_image', ([], {'singularity_image_folder': 'self.singularity_image_folder', 'singularity_image_name': 'self.singularity_image_name', 'model': 'model'}), '(singularity_image_folder=self.\n singularity_image_folder, singularity_image_name=self.\n singularity_image_name, model=model)\n', (6440, 6571), False, 'from kipoi_containers.singularityhelper import build_singularity_image, update_existing_singularity_container, push_new_singularity_image, test_singularity_image, cleanup\n'), ((1328, 1342), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1332, 1342), False, 'from pathlib import Path\n')]
|
import unittest
import test
def provisioner(*args):
return test.scloud("provisioner", *args)
class TestProvisioner(unittest.TestCase):
def setUp(self):
# retrieve the selected tenant name
code, self.tname, _ = test.scloud("get", "tenant")
self.assertEqual(0, code)
self.assertIsNotNone(self.tname)
def test_tenants(self):
code, tenants, _ = provisioner("list-tenants")
self.assertEqual(0, code)
self.assertTrue(any(t["name"] == self.tname for t in tenants))
code, tenant, _ = provisioner("get-tenant", self.tname)
self.assertEqual(0, code)
self.assertEqual(self.tname, tenant["name"])
self.assertTrue("createdAt" in tenant)
self.assertTrue("createdBy" in tenant)
if __name__ == "__main__":
unittest.main()
|
[
"test.scloud",
"unittest.main"
] |
[((66, 99), 'test.scloud', 'test.scloud', (['"""provisioner"""', '*args'], {}), "('provisioner', *args)\n", (77, 99), False, 'import test\n'), ((812, 827), 'unittest.main', 'unittest.main', ([], {}), '()\n', (825, 827), False, 'import unittest\n'), ((240, 268), 'test.scloud', 'test.scloud', (['"""get"""', '"""tenant"""'], {}), "('get', 'tenant')\n", (251, 268), False, 'import test\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os.path
from django.utils import six
from django.db import models
from django import template
from django.urls import (
NoReverseMatch,
reverse,
)
from django.utils.html import escape
from django.utils.safestring import mark_safe
from cruds import utils
register = template.Library()
@register.filter
def get_attr(obj, attr):
"""
Filter returns obj attribute.
"""
return getattr(obj, attr)
@register.simple_tag
def crud_url(obj, action):
try:
url = reverse(
utils.crud_url_name(type(obj), action),
kwargs={'pk': obj.pk})
except NoReverseMatch:
url = None
return url
def format_value_instance(value):
url = crud_url(value, utils.ACTION_DETAIL)
if url:
return mark_safe('<a href="%s">%s</a>' % (url, escape(value)))
if hasattr(value, 'get_absolute_url'):
url = getattr(value, 'get_absolute_url')()
return mark_safe('<a href="%s">%s</a>' % (url, escape(value)))
return value
@register.filter
def format_value(obj, field_name):
"""
Simple value formatting.
If value is model instance returns link to detail view if exists.
"""
display_func = getattr(obj, 'get_%s_display' % field_name, None)
if display_func:
return display_func()
value = getattr(obj, field_name)
if isinstance(value, models.fields.files.FieldFile):
if value:
return mark_safe('<a href="%s">%s</a>' % (
value.url,
os.path.basename(value.name),
))
else:
return ''
if isinstance(value, models.Model):
return format_value_instance(value)
if isinstance(value, models.Manager):
return mark_safe(', '.join(
[format_value_instance(instance) for instance in value.all()]
))
if value is None:
value = ""
return value
@register.inclusion_tag('cruds/templatetags/crud_fields.html')
def crud_fields(obj, fields=None):
"""
Display object fields in table rows::
<table>
{% crud_fields object 'id, %}
</table>
* ``fields`` fields to include
If fields is ``None`` all fields will be displayed.
If fields is ``string`` comma separated field names will be
displayed.
if field is dictionary, key should be field name and value
field verbose name.
"""
if fields is None:
fields = utils.get_fields(type(obj))
elif isinstance(fields, six.string_types):
field_names = [f.strip() for f in fields.split(',')]
fields = utils.get_fields(type(obj), include=field_names)
return {
'object': obj,
'fields': fields,
}
@register.simple_tag
def get_fields(model, fields=None):
"""
Assigns fields for model.
"""
include = [f.strip() for f in fields.split(',')] if fields else None
return utils.get_fields(
model,
include
)
|
[
"django.template.Library",
"cruds.utils.get_fields",
"django.utils.html.escape"
] |
[((349, 367), 'django.template.Library', 'template.Library', ([], {}), '()\n', (365, 367), False, 'from django import template\n'), ((2971, 3003), 'cruds.utils.get_fields', 'utils.get_fields', (['model', 'include'], {}), '(model, include)\n', (2987, 3003), False, 'from cruds import utils\n'), ((872, 885), 'django.utils.html.escape', 'escape', (['value'], {}), '(value)\n', (878, 885), False, 'from django.utils.html import escape\n'), ((1037, 1050), 'django.utils.html.escape', 'escape', (['value'], {}), '(value)\n', (1043, 1050), False, 'from django.utils.html import escape\n')]
|
from ferris.core.ndb import Behavior
from app.behaviors.sanitize import Sanitize
class MailBehavior(Behavior):
sanitizer = Sanitize()
def before_put(self, instance):
instance.sender = self.sanitizer.sanitize_email(instance.sender)
instance.recipient = self.sanitizer.sanitize_email(instance.recipient)
instance.subject = self.sanitizer.sanitize_text(instance.subject)
instance.message = self.sanitizer.sanitize_text(instance.message)
|
[
"app.behaviors.sanitize.Sanitize"
] |
[((129, 139), 'app.behaviors.sanitize.Sanitize', 'Sanitize', ([], {}), '()\n', (137, 139), False, 'from app.behaviors.sanitize import Sanitize\n')]
|
import numpy as np
import random as rd
import tensorflow as tf
from tensorflow import keras
class Brain():
def __init__(self,brain_spec, random = True, weights = None):
self.brain_spec = brain_spec
##INIT
#This is a new brai,
self.neurones = keras.Sequential()
for i in range(len(brain_spec)-2):
#init the weights between two layers, with matrix [layer_i,layer_i+1] and the bias
self.neurones.add(keras.layers.Dense(brain_spec[i+1],activation= "elu",input_shape=(brain_spec[i],)))
#output layer
self.neurones.add(keras.layers.Dense(brain_spec[-1], activation="softmax"))
#In case want specific value
if not(random):
assert(weights != None)
self.neurones.set_weights(weights)
#self.brain.compile(optimizer="adam", loss =t.tanh_custom_loss,metrics=[t.tanh_custom_loss])
self.optimizer = keras.optimizers.Adam(learning_rate=0.01)
def think(self, x):
return(self.neurones(np.expand_dims(x,axis=0))).numpy()[0]
def mutate(self,mutation_factor = 0.1):
weights = self.neurones.get_weights()
for layer in weights:
layer += layer*rd.uniform(-1*mutation_factor,1*mutation_factor)*np.random.randint(2,size=layer.shape)
self.neurones.set_weights(weights)
def expand(self):
pass
def learn(self,memory):
pass
if __name__ == "__main__":
TEST = True
if TEST:
test_input = np.array([1,1,1,1])
output_size = 4
brain_spec = [test_input.shape[0],5,output_size]
print("#################### RANDOM INIT ######################################")
head = Brain(brain_spec,random = True)
print(head.neurones.get_weights())
print("#################### DEFINE INIT ######################################")
head = Brain(brain_spec,random = False, weights=head.neurones.get_weights())
print(head.neurones.get_weights())
print(head.neurones.summary())
print("#################### MUTATING ###########################################")
head.mutate()
print(head.neurones.get_weights())
##THINK
print("#################### THINKING ############################################")
print(head.think(test_input))
##LEARN
print(head.neurones.trainable_variables)
print("#################### LEARNING ############################################")
memory = [np.array([[1.0,1.0,10.0,10.0]]),np.array([2.0])]
head.learn(memory)
|
[
"tensorflow.keras.layers.Dense",
"random.uniform",
"numpy.expand_dims",
"numpy.random.randint",
"numpy.array",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.Sequential"
] |
[((284, 302), 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (300, 302), False, 'from tensorflow import keras\n'), ((941, 982), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (962, 982), False, 'from tensorflow import keras\n'), ((1545, 1567), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (1553, 1567), True, 'import numpy as np\n'), ((603, 659), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['brain_spec[-1]'], {'activation': '"""softmax"""'}), "(brain_spec[-1], activation='softmax')\n", (621, 659), False, 'from tensorflow import keras\n'), ((2561, 2595), 'numpy.array', 'np.array', (['[[1.0, 1.0, 10.0, 10.0]]'], {}), '([[1.0, 1.0, 10.0, 10.0]])\n', (2569, 2595), True, 'import numpy as np\n'), ((2593, 2608), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (2601, 2608), True, 'import numpy as np\n'), ((471, 561), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['brain_spec[i + 1]'], {'activation': '"""elu"""', 'input_shape': '(brain_spec[i],)'}), "(brain_spec[i + 1], activation='elu', input_shape=(\n brain_spec[i],))\n", (489, 561), False, 'from tensorflow import keras\n'), ((1279, 1317), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': 'layer.shape'}), '(2, size=layer.shape)\n', (1296, 1317), True, 'import numpy as np\n'), ((1230, 1283), 'random.uniform', 'rd.uniform', (['(-1 * mutation_factor)', '(1 * mutation_factor)'], {}), '(-1 * mutation_factor, 1 * mutation_factor)\n', (1240, 1283), True, 'import random as rd\n'), ((1039, 1064), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1053, 1064), True, 'import numpy as np\n')]
|
import time
from abc import abstractmethod
from typing import List
from alfi.models import LFM
import torch
import numpy as np
import gpytorch
from torch.utils.data.dataloader import DataLoader
from alfi.utilities.torch import is_cuda
from alfi.datasets import LFMDataset
class Trainer:
"""
An abstract LFM trainer. Subclasses must implement the `single_epoch` function.
Parameters
----------
lfm: The Latent Force Model.
optimizers: list of `torch.optim.Optimizer`s. For when natural gradients are used for variational models.
dataset: Dataset where t_observed (D, T), m_observed (J, T).
give_output: whether the trainers should give the first output (y_0) as initial value to the model `forward()`
track_parameters: the keys into `named_parameters()` of parameters that the trainer should track. The
tracked parameters can be accessed from `parameter_trace`
train_mask: boolean mask
"""
def __init__(self,
lfm: LFM,
optimizers: List[torch.optim.Optimizer],
dataset: LFMDataset,
batch_size=1,
give_output=False,
track_parameters=None,
train_mask=None,
checkpoint_dir=None):
self.lfm = lfm
self.num_epochs = 0
self.optimizers = optimizers
self.use_natural_gradient = len(self.optimizers) > 1
self.batch_size = batch_size
self.data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=False)
self.losses = None
self.give_output = give_output
self.train_mask = train_mask
self.checkpoint_dir = checkpoint_dir
self.parameter_trace = None
if track_parameters is not None:
named_params = dict(lfm.named_parameters())
self.parameter_trace = {key: [named_params[key].detach()] for key in track_parameters}
def train(self, epochs=20, report_interval=1, reporter_callback=None, **kwargs):
"""
Parameters:
reporter_callback: function called every report_interval
"""
self.lfm.train()
losses = list()
times = list()
end_epoch = self.num_epochs+epochs
for epoch in range(epochs):
epoch_loss, split_loss = self.single_epoch(epoch=self.num_epochs, **kwargs)
t = time.time()
times.append((t, epoch_loss))
if (epoch % report_interval) == 0:
if reporter_callback is not None:
reporter_callback(self.num_epochs)
print('Epoch %03d/%03d - Loss: %.2f (' % (
self.num_epochs + 1, end_epoch, epoch_loss), end='')
print(' '.join(map(lambda l: '%.2f' % l, split_loss)), end='')
if isinstance(self.lfm, gpytorch.models.GP):
kernel = self.lfm.covar_module
print(f') λ: {str(kernel.lengthscale.view(-1).detach().numpy())}', end='')
elif hasattr(self.lfm, 'gp_model'):
print(f') kernel: {self.lfm.summarise_gp_hyp()}', end='')
else:
print(')', end='')
self.print_extra()
if self.checkpoint_dir is not None:
self.lfm.save(self.checkpoint_dir / f'epoch{epoch}')
losses.append(split_loss)
self.after_epoch()
self.num_epochs += 1
losses = torch.tensor(losses).cpu().numpy()
if self.losses is None:
self.losses = np.empty((0, losses.shape[1]))
self.losses = np.concatenate([self.losses, losses], axis=0)
return times
@abstractmethod
def single_epoch(self, epoch=0, **kwargs):
raise NotImplementedError
def set_optimizers(self, optimizers):
self.optimizers = optimizers
def print_extra(self):
print('')
def after_epoch(self):
if self.parameter_trace is not None:
params = dict(self.lfm.named_parameters())
for key in params:
if key in self.parameter_trace:
self.parameter_trace[key].append(params[key].detach().clone())
|
[
"numpy.concatenate",
"numpy.empty",
"time.time",
"torch.utils.data.dataloader.DataLoader",
"torch.tensor"
] |
[((1499, 1556), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(dataset, batch_size=batch_size, shuffle=False)\n', (1509, 1556), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((3636, 3681), 'numpy.concatenate', 'np.concatenate', (['[self.losses, losses]'], {'axis': '(0)'}), '([self.losses, losses], axis=0)\n', (3650, 3681), True, 'import numpy as np\n'), ((2393, 2404), 'time.time', 'time.time', ([], {}), '()\n', (2402, 2404), False, 'import time\n'), ((3583, 3613), 'numpy.empty', 'np.empty', (['(0, losses.shape[1])'], {}), '((0, losses.shape[1]))\n', (3591, 3613), True, 'import numpy as np\n'), ((3490, 3510), 'torch.tensor', 'torch.tensor', (['losses'], {}), '(losses)\n', (3502, 3510), False, 'import torch\n')]
|
import aepp
from aepp import connector
from copy import deepcopy
import requests
from typing import IO, Union
import logging
class DataIngestion:
"""
Class that manages sending data via authenticated methods.
For Batch and Streaming messages.
"""
loggingEnabled = False
logger = None
def __init__(
self,
config: dict = aepp.config.config_object,
header=aepp.config.header,
loggingObject: dict = None,
**kwargs,
):
"""
Instantiate the DataAccess class.
Arguments:
config : OPTIONAL : config object in the config module.
header : OPTIONAL : header object in the config module.
Additional kwargs will update the header.
"""
if loggingObject is not None and sorted(
["level", "stream", "format", "filename", "file"]
) == sorted(list(loggingObject.keys())):
self.loggingEnabled = True
self.logger = logging.getLogger(f"{__name__}")
self.logger.setLevel(loggingObject["level"])
formatter = logging.Formatter(loggingObject["format"])
if loggingObject["file"]:
fileHandler = logging.FileHandler(loggingObject["filename"])
fileHandler.setFormatter(formatter)
self.logger.addHandler(fileHandler)
if loggingObject["stream"]:
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
self.logger.addHandler(streamHandler)
self.connector = connector.AdobeRequest(config_object=config, header=header)
self.header = self.connector.header
self.header.update(**kwargs)
self.sandbox = self.connector.config["sandbox"]
self.endpoint = (
aepp.config.endpoints["global"] + aepp.config.endpoints["ingestion"]
)
self.endpoint_streaming = aepp.config.endpoints["streaming"]["collection"]
self.STREAMING_REFERENCE = {
"header": {
"schemaRef": {
"id": "https://ns.adobe.com/{TENANT_ID}/schemas/{SCHEMA_ID}",
"contentType": "application/vnd.adobe.xed-full+json;version={SCHEMA_VERSION}",
},
"imsOrgId": "{IMS_ORG_ID}",
"datasetId": "{DATASET_ID}",
"createdAt": "1526283801869",
"source": {"name": "{SOURCE_NAME}"},
},
"body": {
"xdmMeta": {
"schemaRef": {
"id": "https://ns.adobe.com/{TENANT_ID}/schemas/{SCHEMA_ID}",
"contentType": "application/vnd.adobe.xed-full+json;version={SCHEMA_VERSION}",
}
},
"xdmEntity": {
"person": {
"name": {
"firstName": "Jane",
"middleName": "F",
"lastName": "Doe",
},
"birthDate": "1969-03-14",
"gender": "female",
},
"workEmail": {
"primary": True,
"address": "<EMAIL>",
"type": "work",
"status": "active",
},
},
},
}
def createBatch(
self,
datasetId: str = None,
format: str = "json",
multiline: bool = False,
enableDiagnostic: bool = False,
partialIngestionPercentage: int = 0,
) -> dict:
"""
Create a new batch in Catalog Service.
Arguments:
datasetId : REQUIRED : The Dataset ID for the batch to upload data to.
format : REQUIRED : the format of the data send.(default json)
multiline : OPTIONAL : If you wish to upload multi-line JSON.
"""
if datasetId is None:
raise ValueError("Require a dataSetId")
if self.loggingEnabled:
self.logger.debug(f"Using createBatch with following format ({format})")
obj = {
"datasetId": datasetId,
"inputFormat": {"format": format, "isMultiLineJson": False},
}
if multiline is True:
obj["inputFormat"]["isMultiLineJson"] = True
if enableDiagnostic != False:
obj["enableErrorDiagnostics"] = True
if partialIngestionPercentage > 0:
obj["partialIngestionPercentage"] = partialIngestionPercentage
path = "/batches"
res = self.connector.postData(self.endpoint + path, data=obj)
return res
def deleteBatch(self, batchId: str = None) -> str:
"""
Delete a batch by applying the revert action on it.
Argument:
batchId : REQUIRED : Batch ID to be deleted
"""
if batchId is None:
raise ValueError("Require a batchId argument")
if self.loggingEnabled:
self.logger.debug(f"Starting deleteBatch for ID: ({batchId})")
path = f"/batches/{batchId}"
params = {"action": "REVERT"}
res = self.connector.postData(self.endpoint + path, params=params)
return res
def replayBatch(self, datasetId: str = None, batchIds: list = None) -> dict:
"""
You can replay a batch that has already been ingested. You need to provide the datasetId and the list of batch to be replay.
Once specify through that action, you will need to re-upload batch information via uploadSmallFile method with JSON format and then specify the completion.
You will need to re-use the batchId provided for the re-upload.
Arguments:
dataSetId : REQUIRED : The dataset ID attached to the batch
batchIds : REQUIRED : The list of batchID to replay.
"""
if datasetId is None:
raise ValueError("Require a dataset ID")
if batchIds is None or type(batchIds) != list:
raise ValueError("Require a list of batch ID")
if self.loggingEnabled:
self.logger.debug(f"Starting replayBatch for dataset ID: ({datasetId})")
path = "/batches"
predecessors = [f"${batchId}" for batchId in batchIds]
data = {
"datasetId": datasetId,
"inputFormat": {"format": "json"},
"replay": {"predecessors": predecessors, "reason": "replace"},
}
res = self.connector.patchData(self.endpoint + path, data=data)
return res
def uploadSmallFile(
self,
batchId: str = None,
datasetId: str = None,
filePath: str = None,
data: Union[list, dict] = None,
verbose: bool = False,
) -> dict:
"""
Upload a small file (<256 MB) to the filePath location in the dataset.
Arguments:
batchId : REQUIRED : The batchId referencing the batch processed created beforehand.
datasetId : REQUIRED : The dataSetId related to where the data are ingested to.
filePath : REQUIRED : the filePath that will store the value.
data : REQUIRED : The data to be uploaded (following the type provided). List or Dictionary, depending if multiline is enabled.
verbose: OPTIONAL : if you wish to see comments around the
"""
if batchId is None:
raise Exception("require a batchId")
if datasetId is None:
raise Exception("require a dataSetId")
if filePath is None:
raise Exception("require a filePath value")
if data is None:
raise Exception("require data to be passed")
if verbose:
print(f"Your data is in {type(data)} format")
if self.loggingEnabled:
self.logger.debug(f"uploadSmallFile as format: ({type(data)})")
privateHeader = deepcopy(self.header)
privateHeader["Content-Type"] = "application/octet-stream"
path = f"/batches/{batchId}/datasets/{datasetId}/files/{filePath}"
res = self.connector.putData(
self.endpoint + path, data=data, headers=privateHeader
)
return res
def uploadSmallFileFinish(
self, batchId: str = None, action: str = "COMPLETE", verbose: bool = False
) -> dict:
"""
Send an action to signify that the import is done.
Arguments:
batchId : REQUIRED : The batchId referencing the batch processed created beforehand.
action : REQUIRED : either one of these actions:
COMPLETE (default value)
ABORT
FAIL
REVERT
"""
if batchId is None:
raise Exception("require a batchId")
if action is None or action not in ["COMPLETE", "ABORT", "FAIL", "REVERT"]:
raise Exception("Not a valid action has been passed")
path = f"/batches/{batchId}"
if self.loggingEnabled:
self.logger.debug(f"Finishing upload for batch ID: ({batchId})")
params = {"action": action}
res = self.connector.postData(
self.endpoint + path, params=params, verbose=verbose
)
return res
def uploadLargeFileStartEnd(
self,
batchId: str = None,
datasetId: str = None,
filePath: str = None,
action: str = "INITIALIZE",
) -> dict:
"""
Start / End the upload of a large file with a POST method defining the action (see parameter)
Arguments:
batchId : REQUIRED : The batchId referencing the batch processed created beforehand.
datasetId : REQUIRED : The dataSetId related to where the data are ingested to.
filePath : REQUIRED : the filePath that will store the value.
action : REQUIRED : Action to either INITIALIZE or COMPLETE the upload.
"""
if batchId is None:
raise Exception("require a batchId")
if datasetId is None:
raise Exception("require a dataSetId")
if filePath is None:
raise Exception("require a filePath value")
params = {"action": action}
if self.loggingEnabled:
self.logger.debug(
f"Starting or Ending large upload for batch ID: ({batchId})"
)
path = f"/batches/{batchId}/datasets/{datasetId}/files/{filePath}"
res = self.connector.postData(self.endpoint + path, params=params)
return res
def uploadLargeFilePart(
self,
batchId: str = None,
datasetId: str = None,
filePath: str = None,
data: bytes = None,
contentRange: str = None,
) -> dict:
"""
Continue the upload of a large file with a PATCH method.
Arguments:
batchId : REQUIRED : The batchId referencing the batch processed created beforehand.
datasetId : REQUIRED : The dataSetId related to where the data are ingested to.
filePath : REQUIRED : the filePath that will store the value.
data : REQUIRED : The data to be uploaded (in bytes)
contentRange : REQUIRED : The range of bytes of the file being uploaded with this request.
"""
if batchId is None:
raise Exception("require a batchId")
if datasetId is None:
raise Exception("require a dataSetId")
if filePath is None:
raise Exception("require a filePath value")
if data is None:
raise Exception("require data to be passed")
if contentRange is None:
raise Exception("require the content range to be passed")
privateHeader = deepcopy(self.header)
privateHeader["Content-Type"] = "application/octet-stream"
privateHeader["Content-Range"] = contentRange
if self.loggingEnabled:
self.logger.debug(f"Uploading large part for batch ID: ({batchId})")
path = f"/batches/{batchId}/datasets/{datasetId}/files/{filePath}"
res = requests.patch(self.endpoint + path, data=data, headers=privateHeader)
res_json = res.json()
return res_json
def headFileStatus(
self, batchId: str = None, datasetId: str = None, filePath: str = None
) -> dict:
"""
Check the status of a large file upload.
Arguments:
batchId : REQUIRED : The batchId referencing the batch processed created beforehand.
datasetId : REQUIRED : The dataSetId related to where the data are ingested to.
filePath : REQUIRED : the filePath that reference the file.
"""
if batchId is None:
raise Exception("require a batchId")
if datasetId is None:
raise Exception("require a dataSetId")
if filePath is None:
raise Exception("require a filePath value")
if self.loggingEnabled:
self.logger.debug(f"Head File Status batch ID: ({batchId})")
path = f"/batches/{batchId}/datasets/{datasetId}/files/{filePath}"
res = self.connector.headData(self.endpoint + path)
return res
def getPreviewBatchDataset(
self,
batchId: str = None,
datasetId: str = None,
format: str = "json",
delimiter: str = ",",
quote: str = '"',
escape: str = "\\",
charset: str = "utf-8",
header: bool = True,
nrow: int = 5,
) -> dict:
"""
Generates a data preview for the files uploaded to the batch so far. The preview can be generated for all the batch datasets collectively or for the selected datasets.
Arguments:
batchId : REQUIRED : The batchId referencing the batch processed created beforehand.
datasetId : REQUIRED : The dataSetId related to where the data are ingested to.
format : REQUIRED : Format of the file ('json' default)
delimiter : OPTIONAL : The delimiter to use for parsing column values.
quote : OPTIONAL : The quote value to use while parsing data.
escape : OPTIONAL : The escape character to use while parsing data.
charset : OPTIONAL : The encoding to be used (default utf-8)
header : OPTIONAL : The flag to indicate if the header is supplied in the dataset files.
nrow : OPTIONAL : The number of rows to parse. (default 5) - cannot be 10 or greater
"""
if batchId is None:
raise Exception("require a batchId")
if datasetId is None:
raise Exception("require a dataSetId")
if format is None:
raise Exception("require a format type")
params = {
"delimiter": delimiter,
"quote": quote,
"escape": escape,
"charset": charset,
"header": header,
"nrow": nrow,
}
if self.loggingEnabled:
self.logger.debug(f"getPreviewBatchDataset for dataset ID: ({datasetId})")
path = f"/batches/{batchId}/datasets/{datasetId}/preview"
res = self.connector.getData(self.endpoint + path, params=params)
return res
def streamMessage(
self,
inletId: str = None,
data: dict = None,
synchronousValidation: bool = False,
) -> dict:
"""
Send a dictionary to the connection for streaming ingestion.
Arguments:
inletId : REQUIRED : the connection ID to be used for ingestion
data : REQUIRED : The data that you want to ingest to Platform.
synchronousValidation : OPTIONAL : An optional query parameter, intended for development purposes.
If set to true, it can be used for immediate feedback to determine if the request was successfully sent.
"""
if inletId is None:
raise Exception("Require a connectionId to be present")
if data is None and type(data) != dict:
raise Exception("Require a dictionary to be send for ingestion")
if self.loggingEnabled:
self.logger.debug(f"Starting Streaming single message")
params = {"synchronousValidation": synchronousValidation}
path = f"/collection/{inletId}"
res = self.connector.postData(
self.endpoint_streaming + path, data=data, params=params
)
return res
def streamMessages(
self,
inletId: str = None,
data: list = None,
synchronousValidation: bool = False,
) -> dict:
"""
Send a dictionary to the connection for streaming ingestion.
Arguments:
inletId : REQUIRED : the connection ID to be used for ingestion
data : REQUIRED : The list of data that you want to ingest to Platform.
synchronousValidation : OPTIONAL : An optional query parameter, intended for development purposes.
If set to true, it can be used for immediate feedback to determine if the request was successfully sent.
"""
if inletId is None:
raise Exception("Require a connectionId to be present")
if data is None and type(data) != list:
raise Exception("Require a list of dictionary to be send for ingestion")
if self.loggingEnabled:
self.logger.debug(f"Starting Streaming multiple messages")
params = {"synchronousValidation": synchronousValidation}
data = {"messages": data}
path = f"/collection/batch/{inletId}"
res = self.connector.postData(
self.endpoint_streaming + path, data=data, params=params
)
return res
|
[
"copy.deepcopy",
"requests.patch",
"logging.FileHandler",
"logging.StreamHandler",
"logging.Formatter",
"aepp.connector.AdobeRequest",
"logging.getLogger"
] |
[((1592, 1651), 'aepp.connector.AdobeRequest', 'connector.AdobeRequest', ([], {'config_object': 'config', 'header': 'header'}), '(config_object=config, header=header)\n', (1614, 1651), False, 'from aepp import connector\n'), ((7985, 8006), 'copy.deepcopy', 'deepcopy', (['self.header'], {}), '(self.header)\n', (7993, 8006), False, 'from copy import deepcopy\n'), ((11807, 11828), 'copy.deepcopy', 'deepcopy', (['self.header'], {}), '(self.header)\n', (11815, 11828), False, 'from copy import deepcopy\n'), ((12152, 12222), 'requests.patch', 'requests.patch', (['(self.endpoint + path)'], {'data': 'data', 'headers': 'privateHeader'}), '(self.endpoint + path, data=data, headers=privateHeader)\n', (12166, 12222), False, 'import requests\n'), ((987, 1019), 'logging.getLogger', 'logging.getLogger', (['f"""{__name__}"""'], {}), "(f'{__name__}')\n", (1004, 1019), False, 'import logging\n'), ((1101, 1143), 'logging.Formatter', 'logging.Formatter', (["loggingObject['format']"], {}), "(loggingObject['format'])\n", (1118, 1143), False, 'import logging\n'), ((1212, 1258), 'logging.FileHandler', 'logging.FileHandler', (["loggingObject['filename']"], {}), "(loggingObject['filename'])\n", (1231, 1258), False, 'import logging\n'), ((1435, 1458), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1456, 1458), False, 'import logging\n')]
|
#!/usr/bin/env python
import os
import sys
import shutil
import subprocess
import conf
import options
from build_paper import build_paper
output_dir = conf.output_dir
build_dir = conf.build_dir
bib_dir = conf.bib_dir
pdf_dir = conf.pdf_dir
toc_conf = conf.toc_conf
proc_conf = conf.proc_conf
dirs = conf.dirs
def paper_stats(paper_id, start):
stats = options.cfg2dict(os.path.join(output_dir, paper_id, 'paper_stats.json'))
# Write page number snippet to be included in the LaTeX output
if 'pages' in stats:
pages = stats['pages']
else:
pages = 1
stop = start + pages - 1
print('"%s" from p. %s to %s' % (paper_id, start, stop))
with open(os.path.join(output_dir, paper_id, 'page_numbers.tex'), 'w') as f:
f.write('\setcounter{page}{%s}' % start)
# Build table of contents
stats.update({'page': {'start': start,
'stop': stop}})
stats.update({'paper_id': paper_id})
return stats, stop
if __name__ == "__main__":
start = 0
toc_entries = []
options.mkdir_p(pdf_dir)
for paper_id in dirs:
build_paper(paper_id)
stats, start = paper_stats(paper_id, start + 1)
toc_entries.append(stats)
build_paper(paper_id)
src_pdf = os.path.join(output_dir, paper_id, 'paper.pdf')
dest_pdf = os.path.join(pdf_dir, paper_id+'.pdf')
shutil.copy(src_pdf, dest_pdf)
command_line = 'cd '+pdf_dir+' ; pdfannotextractor '+paper_id+'.pdf'
run = subprocess.Popen(command_line, shell=True, stdout=subprocess.PIPE)
out, err = run.communicate()
toc = {'toc': toc_entries}
options.dict2cfg(toc, toc_conf)
|
[
"subprocess.Popen",
"options.mkdir_p",
"build_paper.build_paper",
"shutil.copy",
"os.path.join",
"options.dict2cfg"
] |
[((1075, 1099), 'options.mkdir_p', 'options.mkdir_p', (['pdf_dir'], {}), '(pdf_dir)\n', (1090, 1099), False, 'import options\n'), ((1674, 1705), 'options.dict2cfg', 'options.dict2cfg', (['toc', 'toc_conf'], {}), '(toc, toc_conf)\n', (1690, 1705), False, 'import options\n'), ((393, 447), 'os.path.join', 'os.path.join', (['output_dir', 'paper_id', '"""paper_stats.json"""'], {}), "(output_dir, paper_id, 'paper_stats.json')\n", (405, 447), False, 'import os\n'), ((1134, 1155), 'build_paper.build_paper', 'build_paper', (['paper_id'], {}), '(paper_id)\n', (1145, 1155), False, 'from build_paper import build_paper\n'), ((1256, 1277), 'build_paper.build_paper', 'build_paper', (['paper_id'], {}), '(paper_id)\n', (1267, 1277), False, 'from build_paper import build_paper\n'), ((1297, 1344), 'os.path.join', 'os.path.join', (['output_dir', 'paper_id', '"""paper.pdf"""'], {}), "(output_dir, paper_id, 'paper.pdf')\n", (1309, 1344), False, 'import os\n'), ((1364, 1404), 'os.path.join', 'os.path.join', (['pdf_dir', "(paper_id + '.pdf')"], {}), "(pdf_dir, paper_id + '.pdf')\n", (1376, 1404), False, 'import os\n'), ((1411, 1441), 'shutil.copy', 'shutil.copy', (['src_pdf', 'dest_pdf'], {}), '(src_pdf, dest_pdf)\n', (1422, 1441), False, 'import shutil\n'), ((1534, 1600), 'subprocess.Popen', 'subprocess.Popen', (['command_line'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(command_line, shell=True, stdout=subprocess.PIPE)\n', (1550, 1600), False, 'import subprocess\n'), ((708, 762), 'os.path.join', 'os.path.join', (['output_dir', 'paper_id', '"""page_numbers.tex"""'], {}), "(output_dir, paper_id, 'page_numbers.tex')\n", (720, 762), False, 'import os\n')]
|
import pygame, time
import virtual_start_page
import two_players
def CreateGameWindow(width, height):
pygame.display.set_caption("Checkers !")
gamewindow = pygame.display.set_mode((width, height))
return gamewindow
def WriteText(text, text_pos_x, text_pos_y, text_size):
text_font = pygame.font.SysFont(None, text_size)
text_render = text_font.render(text, True, Black)
gameWindow.blit(text_render, (text_pos_x, text_pos_y))
class CreateButton():
def layout(self,button):
pygame.draw.rect(gameWindow, button[4], (button[0], button[1], button[2], button[3]))
def text(self, button, space_x, space_y):
WriteText(button[5], button[0] + space_x, button[1] + space_y, button[6])
def Animate(self, button, actual_color, animate_color):
mouse_x, mouse_y = pygame.mouse.get_pos()
if mouse_x >= button[0] and mouse_y >= button[1] and mouse_x <= button[0] + button[2] and mouse_y <= button[1] + button[3]:
button[7] += 1
if button[7] == 1:
button[6] += 1
button[4] = animate_color
else:
button[4] = actual_color
button[6] = 30
button[7] = 0
pygame.init()
#Colors:
White = (255,255,255)
LightWhite = (200,200,200)
Black = (0,0,0)
Gray = (128,128,128)
LightGreen = (0,200,0)
BrightGreen = (0,255,0)
LightBlue = (0,0,200)
BrightBlue = (0,0,255)
#Dimensions:
gameWindow_width = 680
gameWindow_height = 680
#-------------Lists of properties of Buttons------
twoPlayers = [gameWindow_width/4 - 60, gameWindow_height/3 + 60 , 200, 50, LightGreen, "Two Players", 30, 0]
team_play = [gameWindow_width/4 + 270 - 80, gameWindow_height/3 + 60, 200, 50, LightGreen, "Team Play", 30, 0]
back = [10, gameWindow_height - 80, 160, 50, Black, "Back", 30, 0]
gameWindow = CreateGameWindow(gameWindow_width, gameWindow_height)
#pygame.display.set_caption("Checkers")
#gameWindow = pygame.display.set_mode((gameWindow_width,gameWindow_height))
def Run_Game():
End = False
while not End:
gameWindow.fill(LightWhite)
WriteText("PLAY", 210, 100, 150)
createButton = CreateButton()
createButton.layout(twoPlayers)
createButton.text(twoPlayers, 36, 16)
createButton.Animate(twoPlayers, LightGreen, BrightGreen)
createButton.layout(team_play)
createButton.text(team_play, 45, 16)
createButton.Animate(team_play, LightGreen, BrightGreen)
createButton.layout(back)
createButton.text(back, 55, 16)
createButton.Animate(back, LightWhite, Gray)
#On_Click_Back_Button(back)
for key in pygame.event.get():
if key.type == pygame.KEYDOWN:
if key.key == pygame.K_ESCAPE:
End = True
pygame.quit()
if key.type == pygame.QUIT:
pygame.quit()
mouse_x, mouse_y = pygame.mouse.get_pos()
if key.type == pygame.MOUSEBUTTONDOWN:
if mouse_x >= back[0] and mouse_y >= back[1] and mouse_x <= back[0] + back[2] and mouse_y <= back[1] + back[3]:
return
if mouse_x >= twoPlayers[0] and mouse_y >= twoPlayers[1] and mouse_x <= twoPlayers[0] + twoPlayers[2] \
and mouse_y <= twoPlayers[1] + twoPlayers[3]:
two_players.StartSinglePlayer(0, 0, 0)
pygame.display.update()
#Run_Game()
#pygame.quit()
|
[
"pygame.quit",
"pygame.font.SysFont",
"pygame.draw.rect",
"pygame.display.set_mode",
"pygame.event.get",
"two_players.StartSinglePlayer",
"pygame.init",
"pygame.display.update",
"pygame.mouse.get_pos",
"pygame.display.set_caption"
] |
[((1201, 1214), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1212, 1214), False, 'import pygame, time\n'), ((107, 147), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Checkers !"""'], {}), "('Checkers !')\n", (133, 147), False, 'import pygame, time\n'), ((165, 205), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, height)'], {}), '((width, height))\n', (188, 205), False, 'import pygame, time\n'), ((301, 337), 'pygame.font.SysFont', 'pygame.font.SysFont', (['None', 'text_size'], {}), '(None, text_size)\n', (320, 337), False, 'import pygame, time\n'), ((511, 600), 'pygame.draw.rect', 'pygame.draw.rect', (['gameWindow', 'button[4]', '(button[0], button[1], button[2], button[3])'], {}), '(gameWindow, button[4], (button[0], button[1], button[2],\n button[3]))\n', (527, 600), False, 'import pygame, time\n'), ((814, 836), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (834, 836), False, 'import pygame, time\n'), ((2645, 2663), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2661, 2663), False, 'import pygame, time\n'), ((3407, 3430), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3428, 3430), False, 'import pygame, time\n'), ((2923, 2945), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (2943, 2945), False, 'import pygame, time\n'), ((2877, 2890), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2888, 2890), False, 'import pygame, time\n'), ((2806, 2819), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2817, 2819), False, 'import pygame, time\n'), ((3359, 3397), 'two_players.StartSinglePlayer', 'two_players.StartSinglePlayer', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3388, 3397), False, 'import two_players\n')]
|
import os
import torch
import pickle
import numpy as np
from lib import inteutil
from lib import posematcher
from lib.models import networkinte
from tqdm import tqdm
from TorchSUL import Model as M
from collections import defaultdict
if __name__=='__main__':
## step 1: match the poses
print('Matching poses from two branches...')
matcher = posematcher.PoseMatcher(top_down_path='./mupots/pred/',
btm_up_path='./mupots/MUPOTS_Preds_btmup_transformed.pkl')
matcher.match(pts_out_path='./mupots/pred_bu/', dep_out_path='./mupots/pred_dep_bu/',
gt_dep_path='./mupots/depths/')
## step 2: infer the integrated results
print('Inferring the integrated poses...')
# create data loader
data = inteutil.InteDataset(bu_path='./mupots/pred_bu/', bu_dep_path='./mupots/pred_dep_bu/',
td_path='./mupots/pred/', td_dep_path='./mupots/pred_dep/')
# initialize the network
net = networkinte.IntegrationNet()
pts_dumb = torch.zeros(2, 102)
dep_dumb = torch.zeros(2, 2)
net(pts_dumb, dep_dumb)
M.Saver(net).restore('./ckpts/model_inte/')
net.cuda()
# create paths
if not os.path.exists('./mupots/pred_inte/'):
os.makedirs('./mupots/pred_inte/')
if not os.path.exists('./mupots/pred_dep_inte/'):
os.makedirs('./mupots/pred_dep_inte/')
with torch.no_grad():
all_pts = defaultdict(list)
for src_pts,src_dep,vid_inst in tqdm(data):
src_pts = torch.from_numpy(src_pts).cuda()
src_dep = torch.from_numpy(src_dep).cuda()
res_pts, res_dep = net(src_pts, src_dep)
res_pts = res_pts.cpu().numpy()
res_dep = res_dep.squeeze().cpu().numpy() * 1000 # the depth is scaled 1000
# save results
i,j = vid_inst
all_pts[i].insert(j, res_pts)
pickle.dump(res_dep, open('./mupots/pred_dep_inte/%02d_%02d.pkl'%(i,j), 'wb'))
for k in all_pts:
result = np.stack(all_pts[k], axis=1)
pickle.dump(result, open('./mupots/pred_inte/%d.pkl'%(k+1), 'wb'))
|
[
"numpy.stack",
"tqdm.tqdm",
"torch.from_numpy",
"os.makedirs",
"lib.models.networkinte.IntegrationNet",
"os.path.exists",
"lib.posematcher.PoseMatcher",
"collections.defaultdict",
"TorchSUL.Model.Saver",
"torch.zeros",
"torch.no_grad",
"lib.inteutil.InteDataset"
] |
[((354, 473), 'lib.posematcher.PoseMatcher', 'posematcher.PoseMatcher', ([], {'top_down_path': '"""./mupots/pred/"""', 'btm_up_path': '"""./mupots/MUPOTS_Preds_btmup_transformed.pkl"""'}), "(top_down_path='./mupots/pred/', btm_up_path=\n './mupots/MUPOTS_Preds_btmup_transformed.pkl')\n", (377, 473), False, 'from lib import posematcher\n'), ((720, 876), 'lib.inteutil.InteDataset', 'inteutil.InteDataset', ([], {'bu_path': '"""./mupots/pred_bu/"""', 'bu_dep_path': '"""./mupots/pred_dep_bu/"""', 'td_path': '"""./mupots/pred/"""', 'td_dep_path': '"""./mupots/pred_dep/"""'}), "(bu_path='./mupots/pred_bu/', bu_dep_path=\n './mupots/pred_dep_bu/', td_path='./mupots/pred/', td_dep_path=\n './mupots/pred_dep/')\n", (740, 876), False, 'from lib import inteutil\n'), ((908, 936), 'lib.models.networkinte.IntegrationNet', 'networkinte.IntegrationNet', ([], {}), '()\n', (934, 936), False, 'from lib.models import networkinte\n'), ((949, 968), 'torch.zeros', 'torch.zeros', (['(2)', '(102)'], {}), '(2, 102)\n', (960, 968), False, 'import torch\n'), ((981, 998), 'torch.zeros', 'torch.zeros', (['(2)', '(2)'], {}), '(2, 2)\n', (992, 998), False, 'import torch\n'), ((1107, 1144), 'os.path.exists', 'os.path.exists', (['"""./mupots/pred_inte/"""'], {}), "('./mupots/pred_inte/')\n", (1121, 1144), False, 'import os\n'), ((1148, 1182), 'os.makedirs', 'os.makedirs', (['"""./mupots/pred_inte/"""'], {}), "('./mupots/pred_inte/')\n", (1159, 1182), False, 'import os\n'), ((1191, 1232), 'os.path.exists', 'os.path.exists', (['"""./mupots/pred_dep_inte/"""'], {}), "('./mupots/pred_dep_inte/')\n", (1205, 1232), False, 'import os\n'), ((1236, 1274), 'os.makedirs', 'os.makedirs', (['"""./mupots/pred_dep_inte/"""'], {}), "('./mupots/pred_dep_inte/')\n", (1247, 1274), False, 'import os\n'), ((1282, 1297), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1295, 1297), False, 'import torch\n'), ((1311, 1328), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1322, 1328), False, 'from collections import defaultdict\n'), ((1363, 1373), 'tqdm.tqdm', 'tqdm', (['data'], {}), '(data)\n', (1367, 1373), False, 'from tqdm import tqdm\n'), ((1025, 1037), 'TorchSUL.Model.Saver', 'M.Saver', (['net'], {}), '(net)\n', (1032, 1037), True, 'from TorchSUL import Model as M\n'), ((1812, 1840), 'numpy.stack', 'np.stack', (['all_pts[k]'], {'axis': '(1)'}), '(all_pts[k], axis=1)\n', (1820, 1840), True, 'import numpy as np\n'), ((1388, 1413), 'torch.from_numpy', 'torch.from_numpy', (['src_pts'], {}), '(src_pts)\n', (1404, 1413), False, 'import torch\n'), ((1434, 1459), 'torch.from_numpy', 'torch.from_numpy', (['src_dep'], {}), '(src_dep)\n', (1450, 1459), False, 'import torch\n')]
|
from django.contrib import admin
from .models import ResItem
admin.site.register(ResItem)
|
[
"django.contrib.admin.site.register"
] |
[((63, 91), 'django.contrib.admin.site.register', 'admin.site.register', (['ResItem'], {}), '(ResItem)\n', (82, 91), False, 'from django.contrib import admin\n')]
|
import os
import sys
sys.path.append(os.path.join(os.getcwd(), '..'))
|
[
"os.getcwd"
] |
[((51, 62), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (60, 62), False, 'import os\n')]
|
# coding=utf-8
"""以MNIST为例,使用slim.data
"""
import os
import tensorflow as tf
slim = tf.contrib.slim
def get_data(data_dir, num_samples, num_class, file_pattern='*.tfrecord'):
"""返回slim.data.Dataset
:param data_dir: tfrecord文件路径
:param num_samples: 样本数目
:param num_class: 类别数目
:param file_pattern: tfrecord文件格式
:return:
"""
file_pattern = os.path.join(data_dir, file_pattern)
keys_to_features = {
"image/encoded": tf.FixedLenFeature((), tf.string, default_value=""),
"image/format": tf.FixedLenFeature((), tf.string, default_value="raw"),
'image/height': tf.FixedLenFeature((), tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
'image/width': tf.FixedLenFeature((), tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
"image/class/label": tf.FixedLenFeature((), tf.int64, default_value=tf.zeros([], dtype=tf.int64))
}
items_to_handlers = {
"image": slim.tfexample_decoder.Image(channels=1),
"label": slim.tfexample_decoder.Tensor("image/class/label")
}
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
items_to_descriptions = {
"image": 'A color image of varying size',
"label": 'A single interger between 0 and ' + str(num_class - 1)
}
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=tf.TFRecordReader,
decoder=decoder,
num_samples=num_samples,
items_to_descriptions=items_to_descriptions,
num_classes=num_class,
label_to_names=label_to_name
)
NUM_EPOCH = 2
BATCH_SIZE = 8
NUM_CLASS = 10
NUM_SAMPLE = 60000
label_to_name = {'0': 'one', '1': 'two', '3': 'three', '4': 'four', '5': 'five',
'6': 'six', '7': 'seven', '8': 'eight', '9': 'nine'}
data_dir = './'
dataset = get_data(data_dir, NUM_SAMPLE, NUM_CLASS, 'mnist_train.tfrecord')
data_provider = slim.dataset_data_provider.DatasetDataProvider(dataset)
[image, label] = data_provider.get(['image', 'label'])
# 组合数据
images, labels = tf.train.batch([image, label], batch_size=BATCH_SIZE)
labels = slim.one_hot_encoding(labels, NUM_CLASS)
|
[
"tensorflow.FixedLenFeature",
"tensorflow.train.batch",
"os.path.join",
"tensorflow.zeros"
] |
[((2069, 2122), 'tensorflow.train.batch', 'tf.train.batch', (['[image, label]'], {'batch_size': 'BATCH_SIZE'}), '([image, label], batch_size=BATCH_SIZE)\n', (2083, 2122), True, 'import tensorflow as tf\n'), ((375, 411), 'os.path.join', 'os.path.join', (['data_dir', 'file_pattern'], {}), '(data_dir, file_pattern)\n', (387, 411), False, 'import os\n'), ((462, 513), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (480, 513), True, 'import tensorflow as tf\n'), ((539, 593), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '"""raw"""'}), "((), tf.string, default_value='raw')\n", (557, 593), True, 'import tensorflow as tf\n'), ((666, 694), 'tensorflow.zeros', 'tf.zeros', (['[]'], {'dtype': 'tf.int64'}), '([], dtype=tf.int64)\n', (674, 694), True, 'import tensorflow as tf\n'), ((767, 795), 'tensorflow.zeros', 'tf.zeros', (['[]'], {'dtype': 'tf.int64'}), '([], dtype=tf.int64)\n', (775, 795), True, 'import tensorflow as tf\n'), ((874, 902), 'tensorflow.zeros', 'tf.zeros', (['[]'], {'dtype': 'tf.int64'}), '([], dtype=tf.int64)\n', (882, 902), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/python
import os
import sys
import csv
import shutil
from io import StringIO
import unittest
sys.path.append(".")
from mock_helper import import_data
from annogesiclib.format_fixer import FormatFixer
class TestFormatFixer(unittest.TestCase):
def setUp(self):
self.fixer = FormatFixer()
self.example = Example()
self.ratt_out = self.example.ratt_out
self.rnaplex_out = self.example.rnaplex_out
self.emboss_out = self.example.emboss_out
self.test_folder = "test_folder"
if (not os.path.exists(self.test_folder)):
os.mkdir(self.test_folder)
self.ratt_file = os.path.join(self.test_folder, "ratt.gff")
with open(self.ratt_file, "w") as rh:
rh.write(self.example.ratt_gff)
self.rnaplex_file = os.path.join(self.test_folder, "rnaplex.txt")
with open(self.rnaplex_file, "w") as rh:
rh.write(self.example.rnaplex_file)
self.emboss_file = os.path.join(self.test_folder, "emboss.txt")
with open(self.emboss_file, "w") as rh:
rh.write(self.example.emboss_file)
def tearDown(self):
if os.path.exists(self.test_folder):
shutil.rmtree(self.test_folder)
def test_fix_ratt(self):
out = os.path.join(self.test_folder, "ratt.out")
self.fixer.fix_ratt(self.ratt_file, "Staphylococcus_aureus_HG003", out)
datas = import_data(out)
self.assertEqual(set(datas), set(self.ratt_out.split("\n")))
def test_fix_rnaplex(self):
out_file = os.path.join(self.test_folder, "rnaplex.out")
self.fixer.fix_rnaplex(self.rnaplex_file, out_file)
datas = import_data(out_file)
self.assertEqual(set(datas), set(self.rnaplex_out.split("\n")))
def test_fix_emboss(self):
out_file = os.path.join(self.test_folder, "emboss.out")
self.fixer.fix_emboss(self.emboss_file, out_file)
datas = import_data(out_file)
self.assertEqual(set(datas), set(self.emboss_out.split("\n")))
class Example(object):
ratt_gff = """##gff-version 3
chromosome.Staphylococcus_aureus_HG003.final Refseq source 1 2821337 . + . mol_type=genomic DNA;db_xref=taxon:93061;strain=NCTC 8325;organism=Staphylococcus aureus subsp. aureus NCTC 8325;sub_species=aureus
chromosome.Staphylococcus_aureus_HG003.final Refseq gene 517 1878 . + . gene=dnaA;db_xref=GeneID:3919798;locus_tag=SAOUHSC_00001
chromosome.Staphylococcus_aureus_HG003.final Refseq CDS 517 1878 . + . gene=dnaA;db_xref=GI:88193824;db_xref=GeneID:3919798;transl_table=11;product=chromosomal replication initiation protein;note=binds to the dnaA-box as an ATP-bound complex at the origin of replication during the initiation of chromosomal replication, can also affect transcription of multiple genes including itself.;locus_tag=SAOUHSC_00001;protein_id=REF_uohsc:SAOUHSC00001;protein_id=YP_498609.1;codon_start=1
chromosome.Staphylococcus_aureus_HG003.final Refseq gene 2156 3289 . + . db_xref=GeneID:3919799;locus_tag=SAOUHSC_00002
chromosome.Staphylococcus_aureus_HG003.final Refseq tRNA 2156 3289 . + . EC_number=2.7.7.7;db_xref=GI:88193825;db_xref=GeneID:3919799;transl_table=11;product=DNA polymerase III subunit beta;note=binds the polymerase to DNA and acts as a sliding clamp;locus_tag=SAOUHSC_00002;protein_id=REF_uohsc:SAOUHSC00002;protein_id=YP_498610.1;codon_start=1"""
ratt_out = """##gff-version 3
Staphylococcus_aureus_HG003 Refseq source 1 2821337 . + . mol_type=genomic DNA;db_xref=taxon:93061;strain=NCTC 8325;organism=Staphylococcus aureus subsp. aureus NCTC 8325;sub_species=aureus
Staphylococcus_aureus_HG003 Refseq gene 517 1878 . + . ID=gene0;Name=dnaA;gene=dnaA;db_xref=GeneID:3919798;locus_tag=SAOUHSC_00001
Staphylococcus_aureus_HG003 Refseq CDS 517 1878 . + . ID=cds0;Name=YP_498609.1;Parent=gene0;gene=dnaA;db_xref=GI:88193824;db_xref=GeneID:3919798;transl_table=11;product=chromosomal replication initiation protein;note=binds to the dnaA-box as an ATP-bound complex at the origin of replication during the initiation of chromosomal replication, can also affect transcription of multiple genes including itself.;locus_tag=SAOUHSC_00001;protein_id=REF_uohsc:SAOUHSC00001;protein_id=YP_498609.1;codon_start=1
Staphylococcus_aureus_HG003 Refseq gene 2156 3289 . + . ID=gene1;Name=SAOUHSC_00002;db_xref=GeneID:3919799;locus_tag=SAOUHSC_00002
Staphylococcus_aureus_HG003 Refseq tRNA 2156 3289 . + . ID=rna0;Name=SAOUHSC_00002;EC_number=2.7.7.7;db_xref=GI:88193825;db_xref=GeneID:3919799;transl_table=11;product=DNA polymerase III subunit beta;note=binds the polymerase to DNA and acts as a sliding clamp;locus_tag=SAOUHSC_00002;protein_id=REF_uohsc:SAOUHSC00002;protein_id=YP_498610.1;codon_start=1"""
rnaplex_file = """>SAOUHSC_00001|dnaA
>srna1023
((((((&)))))) 571,576 : 20,25 (-5.30 = -7.89 + 0.18 + 2.41)
>SAOUHSC_00001|dnaA
>srna352
((((((((&)))))))) 163,170 : 24,31 (-1.91 = -8.31 + 0.60 + 5.80)
>SAOUHSC_00001|dnaA
>srna559
(((((((((((((&)))))))))).))) 301,313 : 4,17 (-5.43 = -9.60 + 3.14 + 1.03)
Error during initialization of the duplex in duplexfold_XS
>SAOUHSC_00002
>srna1023
((((((&)))))) 571,576 : 20,25 (-5.30 = -7.89 + 0.18 + 2.41)"""
rnaplex_out = """>SAOUHSC_00001|dnaA
>srna1023
((((((&)))))) 571,576 : 20,25 (-5.30 = -7.89 + 0.18 + 2.41)
>SAOUHSC_00001|dnaA
>srna352
((((((((&)))))))) 163,170 : 24,31 (-1.91 = -8.31 + 0.60 + 5.80)
>SAOUHSC_00001|dnaA
>srna559
(((((((((((((&)))))))))).))) 301,313 : 4,17 (-5.43 = -9.60 + 3.14 + 1.03)
>SAOUHSC_00002
>srna1023
((((((&)))))) 571,576 : 20,25 (-5.30 = -7.89 + 0.18 + 2.41)"""
emboss_file = """>A_1
DKSSNSFYKDLFIDFYIKILCITNKQDKVIHRLL
>B_1
NGIVPCLLSSPSILA*SALKRMSSLSLLVLLFAKAKX
>C_1
IELNHLSKQQKFGPTPYLSVVLFEESLLQYX"""
emboss_out = """>A
DKSSNSFYKDLFIDFYIKILCITNKQDKVIHRLL
>B
NGIVPCLLSSPSILA*SALKRMSSLSLLVLLFAKAKX
>C
IELNHLSKQQKFGPTPYLSVVLFEESLLQYX"""
if __name__ == "__main__":
unittest.main()
|
[
"sys.path.append",
"unittest.main",
"os.mkdir",
"annogesiclib.format_fixer.FormatFixer",
"os.path.exists",
"mock_helper.import_data",
"shutil.rmtree",
"os.path.join"
] |
[((105, 125), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (120, 125), False, 'import sys\n'), ((5938, 5953), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5951, 5953), False, 'import unittest\n'), ((299, 312), 'annogesiclib.format_fixer.FormatFixer', 'FormatFixer', ([], {}), '()\n', (310, 312), False, 'from annogesiclib.format_fixer import FormatFixer\n'), ((650, 692), 'os.path.join', 'os.path.join', (['self.test_folder', '"""ratt.gff"""'], {}), "(self.test_folder, 'ratt.gff')\n", (662, 692), False, 'import os\n'), ((811, 856), 'os.path.join', 'os.path.join', (['self.test_folder', '"""rnaplex.txt"""'], {}), "(self.test_folder, 'rnaplex.txt')\n", (823, 856), False, 'import os\n'), ((981, 1025), 'os.path.join', 'os.path.join', (['self.test_folder', '"""emboss.txt"""'], {}), "(self.test_folder, 'emboss.txt')\n", (993, 1025), False, 'import os\n'), ((1157, 1189), 'os.path.exists', 'os.path.exists', (['self.test_folder'], {}), '(self.test_folder)\n', (1171, 1189), False, 'import os\n'), ((1279, 1321), 'os.path.join', 'os.path.join', (['self.test_folder', '"""ratt.out"""'], {}), "(self.test_folder, 'ratt.out')\n", (1291, 1321), False, 'import os\n'), ((1418, 1434), 'mock_helper.import_data', 'import_data', (['out'], {}), '(out)\n', (1429, 1434), False, 'from mock_helper import import_data\n'), ((1556, 1601), 'os.path.join', 'os.path.join', (['self.test_folder', '"""rnaplex.out"""'], {}), "(self.test_folder, 'rnaplex.out')\n", (1568, 1601), False, 'import os\n'), ((1678, 1699), 'mock_helper.import_data', 'import_data', (['out_file'], {}), '(out_file)\n', (1689, 1699), False, 'from mock_helper import import_data\n'), ((1823, 1867), 'os.path.join', 'os.path.join', (['self.test_folder', '"""emboss.out"""'], {}), "(self.test_folder, 'emboss.out')\n", (1835, 1867), False, 'import os\n'), ((1942, 1963), 'mock_helper.import_data', 'import_data', (['out_file'], {}), '(out_file)\n', (1953, 1963), False, 'from mock_helper import import_data\n'), ((551, 583), 'os.path.exists', 'os.path.exists', (['self.test_folder'], {}), '(self.test_folder)\n', (565, 583), False, 'import os\n'), ((598, 624), 'os.mkdir', 'os.mkdir', (['self.test_folder'], {}), '(self.test_folder)\n', (606, 624), False, 'import os\n'), ((1203, 1234), 'shutil.rmtree', 'shutil.rmtree', (['self.test_folder'], {}), '(self.test_folder)\n', (1216, 1234), False, 'import shutil\n')]
|
import numpy as np
from PIL import Image
from src.data.rand_augment import RandAugmentMC
import torchvision.transforms as transforms
def pad(x, border=4):
return np.pad(x, [(0, 0), (border, border), (border, border)], mode='reflect')
class RandomPadandCrop(object):
"""Crop randomly the image.
Args:
output_size (tuple or int): Desired output size. If int, square crop
is made.
"""
def __init__(self, width=4, output_size=None):
self.width = width
if output_size is None:
self.output_size = output_size
# assert isinstance(output_size, (int, tuple))
elif isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, x):
old_h, old_w = x.size[:2]
x = np.transpose(x, (2, 0, 1))
x = pad(x, self.width)
h, w = x.shape[1:]
if self.output_size is None:
new_h, new_w = old_h, old_w
else:
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
x = x[:, top: top + new_h, left: left + new_w]
return Image.fromarray(np.transpose(x, (1, 2, 0)))
# TODO Implement TransformKTimes
class TransformTwice:
def __init__(self, transform):
self.transform = transform
def __call__(self, inp):
out1 = self.transform(inp)
out2 = self.transform(inp)
return out1, out2
class TransformFix(object):
def __init__(self, base_transform):
self.weak = base_transform
# Inserting strong augmentation
self.strong = []
for transform in base_transform.transforms:
if isinstance(transform, transforms.ToTensor):
self.strong.append(RandAugmentMC(n=2, m=10))
self.strong.append(transform)
self.strong = transforms.Compose(self.strong)
def __call__(self, inp):
weak = self.weak(inp)
strong = self.strong(inp)
return weak, strong
def build_transforms(normalize=None, center_crop=None, image_size=None,
random_crop=None, flip=None, random_resize_crop=None):
"""
Args:
normalize (tuple or transforms.Normalize): Parameters for data normalization.
center_crop (int): Size for center crop.
image_size (int): Size for image size.
random_crop (int): Size for image random crop.
flip (bool): Randomly flip the data horizontally.
random_resize_crop (dict): Random resize crop the image.
Returns:
Transforms
"""
transform_ = []
if image_size:
if isinstance(image_size, int):
image_size = (image_size, image_size)
transform_.append(transforms.Resize(image_size))
if random_resize_crop:
transform_.append(transforms.RandomResizedCrop(random_resize_crop['size'], random_resize_crop['scale']))
elif random_crop:
transform_.append(transforms.RandomCrop(random_crop))
elif center_crop:
transform_.append(transforms.CenterCrop(center_crop))
if flip:
transform_.append(transforms.RandomHorizontalFlip())
transform_.append(transforms.ToTensor())
if normalize:
if isinstance(normalize, transforms.Normalize):
transform_.append(normalize)
else:
transform_.append(transforms.Normalize(*normalize))
transform = transforms.Compose(transform_)
return transform
|
[
"numpy.pad",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.Normalize",
"numpy.transpose",
"torchvision.transforms.RandomResizedCrop",
"numpy.random.randint",
"torchvision.transforms.Compose",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.Resize",
"src.data.rand_augment.RandAugmentMC",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.ToTensor"
] |
[((168, 239), 'numpy.pad', 'np.pad', (['x', '[(0, 0), (border, border), (border, border)]'], {'mode': '"""reflect"""'}), "(x, [(0, 0), (border, border), (border, border)], mode='reflect')\n", (174, 239), True, 'import numpy as np\n'), ((3549, 3579), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_'], {}), '(transform_)\n', (3567, 3579), True, 'import torchvision.transforms as transforms\n'), ((905, 931), 'numpy.transpose', 'np.transpose', (['x', '(2, 0, 1)'], {}), '(x, (2, 0, 1))\n', (917, 931), True, 'import numpy as np\n'), ((1141, 1172), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h - new_h)'], {}), '(0, h - new_h)\n', (1158, 1172), True, 'import numpy as np\n'), ((1188, 1219), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w - new_w)'], {}), '(0, w - new_w)\n', (1205, 1219), True, 'import numpy as np\n'), ((1996, 2027), 'torchvision.transforms.Compose', 'transforms.Compose', (['self.strong'], {}), '(self.strong)\n', (2014, 2027), True, 'import torchvision.transforms as transforms\n'), ((3316, 3337), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3335, 3337), True, 'import torchvision.transforms as transforms\n'), ((1308, 1334), 'numpy.transpose', 'np.transpose', (['x', '(1, 2, 0)'], {}), '(x, (1, 2, 0))\n', (1320, 1334), True, 'import numpy as np\n'), ((2878, 2907), 'torchvision.transforms.Resize', 'transforms.Resize', (['image_size'], {}), '(image_size)\n', (2895, 2907), True, 'import torchvision.transforms as transforms\n'), ((2963, 3053), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (["random_resize_crop['size']", "random_resize_crop['scale']"], {}), "(random_resize_crop['size'], random_resize_crop\n ['scale'])\n", (2991, 3053), True, 'import torchvision.transforms as transforms\n'), ((3258, 3291), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3289, 3291), True, 'import torchvision.transforms as transforms\n'), ((3098, 3132), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['random_crop'], {}), '(random_crop)\n', (3119, 3132), True, 'import torchvision.transforms as transforms\n'), ((3499, 3531), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['*normalize'], {}), '(*normalize)\n', (3519, 3531), True, 'import torchvision.transforms as transforms\n'), ((1906, 1930), 'src.data.rand_augment.RandAugmentMC', 'RandAugmentMC', ([], {'n': '(2)', 'm': '(10)'}), '(n=2, m=10)\n', (1919, 1930), False, 'from src.data.rand_augment import RandAugmentMC\n'), ((3182, 3216), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['center_crop'], {}), '(center_crop)\n', (3203, 3216), True, 'import torchvision.transforms as transforms\n')]
|
"""
Implement credentials lookup using the ~/.netrc(5) file.
"""
import base64
import binascii
from netrc import netrc
import os.path
from ddupdate.ddplugin import AuthPlugin, AuthError
class AuthNetrc(AuthPlugin):
"""Get credentials stored in the .netrc(5) file.
This is the original storage used before 0.7.1. It is less secure
than for example the keyring but is convenient and, since it does
not require anything to be unlocked, a good candidate for servers.
"""
_name = 'netrc'
_oneliner = 'Store credentials in .netrc(5)'
__version__ = '0.7.1'
def get_auth(self, machine):
"""Implement AuthPlugin::get_auth()."""
path = os.environ.get('NETRC', '')
if path:
pass
elif os.path.exists(os.path.expanduser('~/.netrc')):
path = os.path.expanduser('~/.netrc')
elif os.path.exists('/etc/netrc'):
path = '/etc/netrc'
else:
raise AuthError("Cannot locate the netrc file (see manpage).")
auth = netrc(path).authenticators(machine)
if not auth:
raise AuthError("No .netrc data found for " + machine)
if not auth[2]:
raise AuthError("No password found for " + machine)
try:
pw = base64.b64decode(auth[2]).decode('ascii')
except (binascii.Error, UnicodeDecodeError):
pw = auth[2]
return auth[0], pw
def set_password(self, machine, username, password):
"""Implement AuthPlugin::set_password()."""
def is_matching_entry(line):
"""Return True if line contains 'machine' machine'."""
words = line.split(' ')
for i in range(0, len(words) - 1):
if words[i] == 'machine' \
and words[i + 1].lower() == machine.lower():
return True
return False
def new_entry():
"""Return new entry."""
pw = base64.b64encode(password.encode('utf-8')).decode('ascii')
line = 'machine ' + machine.lower()
if username:
line += ' login ' + username
line += ' password ' + pw
return line
path = os.path.expanduser('~/.netrc')
lines = []
if os.path.exists(path):
with open(path, 'r') as f:
lines = f.readlines()
lines = [line for line in lines if not is_matching_entry(line)]
lines.append(new_entry())
lines = [line.strip() + "\n" for line in lines]
with open(path, 'w') as f:
f.writelines(lines)
|
[
"netrc.netrc",
"ddupdate.ddplugin.AuthError",
"base64.b64decode"
] |
[((1111, 1159), 'ddupdate.ddplugin.AuthError', 'AuthError', (["('No .netrc data found for ' + machine)"], {}), "('No .netrc data found for ' + machine)\n", (1120, 1159), False, 'from ddupdate.ddplugin import AuthPlugin, AuthError\n'), ((1202, 1247), 'ddupdate.ddplugin.AuthError', 'AuthError', (["('No password found for ' + machine)"], {}), "('No password found for ' + machine)\n", (1211, 1247), False, 'from ddupdate.ddplugin import AuthPlugin, AuthError\n'), ((1036, 1047), 'netrc.netrc', 'netrc', (['path'], {}), '(path)\n', (1041, 1047), False, 'from netrc import netrc\n'), ((964, 1020), 'ddupdate.ddplugin.AuthError', 'AuthError', (['"""Cannot locate the netrc file (see manpage)."""'], {}), "('Cannot locate the netrc file (see manpage).')\n", (973, 1020), False, 'from ddupdate.ddplugin import AuthPlugin, AuthError\n'), ((1278, 1303), 'base64.b64decode', 'base64.b64decode', (['auth[2]'], {}), '(auth[2])\n', (1294, 1303), False, 'import base64\n')]
|
from trackGits import *
import os
def installTest():
"""
Test installView with install function
"""
global __CONFIG_NAME, __SRC_DIR
conf = os.path.join(__SRC_DIR,__CONFIG_NAME)
if not isInstalled(conf):
installed = installView(conf,installer=install)
if not installed: return False
else: return True
else:
return True
def addTest():
"""
Test addDir function
"""
global __CONFIG_NAME, __SRC_DIR
conf = os.path.join(__SRC_DIR,__CONFIG_NAME)
src = input("dirpath of git project")
addDir(src,conf)
|
[
"os.path.join"
] |
[((146, 184), 'os.path.join', 'os.path.join', (['__SRC_DIR', '__CONFIG_NAME'], {}), '(__SRC_DIR, __CONFIG_NAME)\n', (158, 184), False, 'import os\n'), ((428, 466), 'os.path.join', 'os.path.join', (['__SRC_DIR', '__CONFIG_NAME'], {}), '(__SRC_DIR, __CONFIG_NAME)\n', (440, 466), False, 'import os\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 13 14:13:10 2018
@author: antony
"""
import numpy as np
import pandas as pd
import sys
import matplotlib
from matplotlib.colors import Normalize
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import libplot
import matplotlib.gridspec as gridspec
# http://arep.med.harvard.edu/N-Regulation/Tolonen2006/GSEA/index.html
class ExtGSEA(object):
def __init__(self, ranked_gene_list, ranked_score, permutations=1000, w=1):
self.__w = w
self.__np = permutations
l = len(ranked_gene_list)
rk = np.concatenate((ranked_gene_list, ranked_gene_list))
rsc = np.concatenate((ranked_score, -ranked_score), axis=0)
ix = np.argsort(rsc)[::-1]
print(np.sort(rsc)[::-1])
pn = np.concatenate((np.ones(l), -np.ones(l)), axis=0)
self.__rk = ranked_gene_list
self.__rs = ranked_score
self.__rkc = rk[ix]
self.__rsc = rsc[ix]
self.__pn = pn[ix]
# Defaults if nothing found
self.__es = -1
self.__nes = -1
self.__pv = -1
self.__ledge = []
self.__bg = {}
self.__gsn1 = 'n1'
self.__gsn2 = 'n2'
self.__run = False
def enrichment_score(self, gs1):
l = len(self.__rk)
hits = np.zeros(l)
for i in range(0, l):
if self.__rk[i] in gs1:
hits[i] = 1
# Compute ES
if self.__w != 1:
score_hit = np.cumsum(np.abs(self.__rs * hits) ** self.__w)
else:
score_hit = np.cumsum(np.abs(self.__rs * hits))
score_hit = score_hit / score_hit[-1]
score_miss = np.cumsum(1 - hits)
score_miss = score_miss / score_miss[-1]
es_all = score_hit - score_miss
es = np.max(es_all) + np.min(es_all)
isen = np.zeros(l)
if es < 0:
ixpk = np.where(es_all == np.min(es_all))[0][0]
isen[ixpk:] = 1
ledge = self.__rk[(isen == 1) & (hits == 1)]
ledge = ledge[::-1]
else:
ixpk = np.where(es_all == np.max(es_all))[0][0]
print(ixpk)
isen[0:(ixpk + 1)] = 1
ledge = self.__rk[(isen == 1) & (hits == 1)]
return es, es_all, hits, ledge
def ext_gsea(self, gs1, gs2, name1='Gene set 1', name2='Gene set 2'):
self.__gs1 = gs1
self.__gs2 = gs2
self.__gsn1 = name1
self.__gsn2 = name2
l = len(self.__rk)
self.__hits1 = np.zeros(l)
self.__hits2 = np.zeros(l)
for i in range(0, l):
if self.__rk[i] in gs1:
self.__hits1[i] = 1
if self.__rk[i] in gs2:
self.__hits2[i] = 1
l = len(self.__rkc)
self.__isgs = np.zeros(l)
for i in range(0, l):
if (self.__pn[i] > 0 and self.__rkc[i] in gs1) or (self.__pn[i] < 0 and self.__rkc[i] in gs2):
self.__isgs[i] = 1
# Compute ES
if self.__w != 1:
self.__score_hit = np.cumsum(np.abs(self.__rsc * self.__isgs) ** self.__w)
else:
self.__score_hit = np.cumsum(np.abs(self.__rsc * self.__isgs))
self.__score_hit = self.__score_hit / self.__score_hit[-1]
self.__score_miss = np.cumsum(1 - self.__isgs)
self.__score_miss = self.__score_miss / self.__score_miss[-1]
self.__es_all = self.__score_hit - self.__score_miss
self.__es = np.max(self.__es_all) + np.min(self.__es_all)
isen = np.zeros(l)
if self.__es < 0:
ixpk = np.where(self.__es_all == np.min(self.__es_all))[0][0]
isen[ixpk:] = 1
self.__ledge = self.__rkc[(isen == 1) & (self.__isgs == 1)]
self.__ledge = self.__ledge[::-1]
else:
ixpk = np.where(self.__es_all == np.max(self.__es_all))[0][0]
isen[0:(ixpk + 1)] = 1
self.__ledge = self.__rkc[(isen == 1) & (self.__isgs == 1)]
if self.__np > 0:
self.__bg['es'] = np.zeros(self.__np)
for i in range(0, self.__np):
self.__bg['isgs'] = self.__isgs[np.random.permutation(l)];
if self.__w != 1:
self.__bg['hit'] = np.cumsum((np.abs(self.__rsc * self.__bg['isgs'])) ** self.__w)
else:
self.__bg['hit'] = np.cumsum(np.abs(self.__rsc * self.__bg['isgs']))
self.__bg['hit'] = self.__bg['hit'] / self.__bg['hit'][-1]
self.__bg['miss'] = np.cumsum(1 - self.__bg['isgs']);
self.__bg['miss'] = self.__bg['miss'] / self.__bg['miss'][-1]
self.__bg['all'] = self.__bg['hit'] - self.__bg['miss'];
self.__bg['es'][i] = max(self.__bg['all']) + min(self.__bg['all']);
if self.__es < 0:
self.__pv = np.sum(self.__bg['es'] <= self.__es) / self.__np
self.__nes = self.__es / np.abs(np.mean(self.__bg['es'][self.__bg['es'] < 0]))
else:
self.__pv = np.sum(self.__bg['es'] >= self.__es) / self.__np
self.__nes = self.__es / np.abs(np.mean(self.__bg['es'][self.__bg['es'] > 0]))
else:
self.__pv = -1
self.__nes = -1
self.__run = True
return self.__es, self.__nes, self.__pv, self.__ledge
@property
def bg(self):
return self.__bg
@property
def score_hit(self):
return self.__score_hit
@property
def isgs(self):
return self.__isgs
@property
def es(self):
return self.__es
@property
def es_all(self):
return self.__es_all
@property
def score_miss(self):
return self.__score_miss
def plot(self, title=None, out=None):
"""
Replot existing GSEA plot to make it better for publications
"""
if not self.__run:
return
libplot.setup()
# output truetype
#plt.rcParams.update({'pdf.fonttype':42,'ps.fonttype':42})
# in most case, we will have mangy plots, so do not display plots
# It's also convinient to run this script on command line.
fig = libplot.new_base_fig(w=10, h=7)
# GSEA Plots
gs = gridspec.GridSpec(16, 1)
x = np.array(list(range(0, len(self.__rk))))
es1, es_all1, hits1, ledge1 = self.enrichment_score(self.__gs1)
es2, es_all2, hits2, ledge2 = self.enrichment_score(self.__gs2)
# Ranked Metric Scores Plot
ix = list(range(0, len(x), 100))
print(ix)
x1 = x[ix]
y1 = self.__rs[ix]
print(hits1)
ax1 = fig.add_subplot(gs[10:])
ax1.fill_between(x1, y1=y1, y2=0, color='#2c5aa0')
ax1.set_ylabel("Ranked list metric", fontsize=14)
ax1.text(.05, .9, self.__gsn1, color='black', horizontalalignment='left', verticalalignment='top',
transform=ax1.transAxes)
ax1.text(.95, .05, self.__gsn2, color='red', horizontalalignment='right', verticalalignment='bottom',
transform=ax1.transAxes)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.set_xlim((0, len(x)))
#
# Hits
#
# gene hits
ax2 = fig.add_subplot(gs[8:9], sharex=ax1)
# the x coords of this transformation are data, and the y coord are axes
trans2 = transforms.blended_transform_factory(ax2.transData, ax2.transAxes)
ax2.vlines(np.where(hits1 == 1)[0], 0, 1, linewidth=.5, transform=trans2, color ='black')
libplot.invisible_axes(ax2)
ax3 = fig.add_subplot(gs[9:10], sharex=ax1)
# the x coords of this transformation are data, and the y coord are axes
trans3 = transforms.blended_transform_factory(ax3.transData, ax3.transAxes)
ax3.vlines(np.where(hits2 == 1)[0], 0, 1, linewidth=.5,transform=trans3, color ='red')
libplot.invisible_axes(ax3)
#
# Enrichment score plot
#
ax4 = fig.add_subplot(gs[:8], sharex=ax1)
# max es
y2 = np.max(es_all1)
x1 = np.where(es_all1 == y2)[0]
print(x1, y2)
ax4.vlines(x1, 0, y2, linewidth=.5, color='grey')
y2 = np.min(es_all2)
x1 = np.where(es_all2 == y2)[0]
print(x1, y2)
ax4.vlines(x1, 0, y2, linewidth=.5, color='grey')
y1 = es_all1
y2 = es_all2
ax4.plot(x, y1, linewidth=3, color ='black')
ax4.plot(x, y2, linewidth=3, color ='red')
ax4.tick_params(axis='both', which='both', color='dimgray')
#ax4.spines['left'].set_color('dimgray')
ax4.spines['bottom'].set_visible(False) #set_color('dimgray')
# the y coords of this transformation are data, and the x coord are axes
trans4 = transforms.blended_transform_factory(ax4.transAxes, ax4.transData)
ax4.hlines(0, 0, 1, linewidth=.5, transform=trans4, color='grey')
ax4.set_ylabel("Enrichment score (ES)", fontsize=14)
ax4.set_xlim(min(x), max(x))
ax4.spines['top'].set_visible(False)
ax4.spines['right'].set_visible(False)
ax4.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off')
ax4.locator_params(axis='y', nbins=5)
# FuncFormatter need two argment, I don't know why. this lambda function used to format yaxis tick labels.
ax4.yaxis.set_major_formatter(plt.FuncFormatter(lambda tick_loc,tick_num : '{:.1f}'.format(tick_loc)) )
if title is not None:
fig.suptitle(title)
fig.tight_layout(pad=2) #rect=[o, o, w, w])
if out is not None:
plt.savefig(out, dpi=600)
|
[
"numpy.abs",
"numpy.sum",
"numpy.ones",
"numpy.argsort",
"numpy.mean",
"libplot.setup",
"numpy.cumsum",
"numpy.max",
"matplotlib.transforms.blended_transform_factory",
"libplot.new_base_fig",
"numpy.sort",
"libplot.invisible_axes",
"numpy.min",
"numpy.random.permutation",
"numpy.concatenate",
"numpy.zeros",
"numpy.where",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.savefig"
] |
[((643, 695), 'numpy.concatenate', 'np.concatenate', (['(ranked_gene_list, ranked_gene_list)'], {}), '((ranked_gene_list, ranked_gene_list))\n', (657, 695), True, 'import numpy as np\n'), ((710, 763), 'numpy.concatenate', 'np.concatenate', (['(ranked_score, -ranked_score)'], {'axis': '(0)'}), '((ranked_score, -ranked_score), axis=0)\n', (724, 763), True, 'import numpy as np\n'), ((1446, 1457), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (1454, 1457), True, 'import numpy as np\n'), ((1848, 1867), 'numpy.cumsum', 'np.cumsum', (['(1 - hits)'], {}), '(1 - hits)\n', (1857, 1867), True, 'import numpy as np\n'), ((2035, 2046), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (2043, 2046), True, 'import numpy as np\n'), ((2751, 2762), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (2759, 2762), True, 'import numpy as np\n'), ((2786, 2797), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (2794, 2797), True, 'import numpy as np\n'), ((3071, 3082), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (3079, 3082), True, 'import numpy as np\n'), ((3647, 3673), 'numpy.cumsum', 'np.cumsum', (['(1 - self.__isgs)'], {}), '(1 - self.__isgs)\n', (3656, 3673), True, 'import numpy as np\n'), ((3904, 3915), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (3912, 3915), True, 'import numpy as np\n'), ((6479, 6494), 'libplot.setup', 'libplot.setup', ([], {}), '()\n', (6492, 6494), False, 'import libplot\n'), ((6761, 6792), 'libplot.new_base_fig', 'libplot.new_base_fig', ([], {'w': '(10)', 'h': '(7)'}), '(w=10, h=7)\n', (6781, 6792), False, 'import libplot\n'), ((6836, 6860), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(16)', '(1)'], {}), '(16, 1)\n', (6853, 6860), True, 'import matplotlib.gridspec as gridspec\n'), ((8139, 8205), 'matplotlib.transforms.blended_transform_factory', 'transforms.blended_transform_factory', (['ax2.transData', 'ax2.transAxes'], {}), '(ax2.transData, ax2.transAxes)\n', (8175, 8205), True, 'import matplotlib.transforms as transforms\n'), ((8312, 8339), 'libplot.invisible_axes', 'libplot.invisible_axes', (['ax2'], {}), '(ax2)\n', (8334, 8339), False, 'import libplot\n'), ((8508, 8574), 'matplotlib.transforms.blended_transform_factory', 'transforms.blended_transform_factory', (['ax3.transData', 'ax3.transAxes'], {}), '(ax3.transData, ax3.transAxes)\n', (8544, 8574), True, 'import matplotlib.transforms as transforms\n'), ((8678, 8705), 'libplot.invisible_axes', 'libplot.invisible_axes', (['ax3'], {}), '(ax3)\n', (8700, 8705), False, 'import libplot\n'), ((8874, 8889), 'numpy.max', 'np.max', (['es_all1'], {}), '(es_all1)\n', (8880, 8889), True, 'import numpy as np\n'), ((9032, 9047), 'numpy.min', 'np.min', (['es_all2'], {}), '(es_all2)\n', (9038, 9047), True, 'import numpy as np\n'), ((9662, 9728), 'matplotlib.transforms.blended_transform_factory', 'transforms.blended_transform_factory', (['ax4.transAxes', 'ax4.transData'], {}), '(ax4.transAxes, ax4.transData)\n', (9698, 9728), True, 'import matplotlib.transforms as transforms\n'), ((777, 792), 'numpy.argsort', 'np.argsort', (['rsc'], {}), '(rsc)\n', (787, 792), True, 'import numpy as np\n'), ((1979, 1993), 'numpy.max', 'np.max', (['es_all'], {}), '(es_all)\n', (1985, 1993), True, 'import numpy as np\n'), ((1996, 2010), 'numpy.min', 'np.min', (['es_all'], {}), '(es_all)\n', (2002, 2010), True, 'import numpy as np\n'), ((3834, 3855), 'numpy.max', 'np.max', (['self.__es_all'], {}), '(self.__es_all)\n', (3840, 3855), True, 'import numpy as np\n'), ((3858, 3879), 'numpy.min', 'np.min', (['self.__es_all'], {}), '(self.__es_all)\n', (3864, 3879), True, 'import numpy as np\n'), ((4435, 4454), 'numpy.zeros', 'np.zeros', (['self.__np'], {}), '(self.__np)\n', (4443, 4454), True, 'import numpy as np\n'), ((8903, 8926), 'numpy.where', 'np.where', (['(es_all1 == y2)'], {}), '(es_all1 == y2)\n', (8911, 8926), True, 'import numpy as np\n'), ((9061, 9084), 'numpy.where', 'np.where', (['(es_all2 == y2)'], {}), '(es_all2 == y2)\n', (9069, 9084), True, 'import numpy as np\n'), ((10608, 10633), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out'], {'dpi': '(600)'}), '(out, dpi=600)\n', (10619, 10633), True, 'import matplotlib.pyplot as plt\n'), ((822, 834), 'numpy.sort', 'np.sort', (['rsc'], {}), '(rsc)\n', (829, 834), True, 'import numpy as np\n'), ((880, 890), 'numpy.ones', 'np.ones', (['l'], {}), '(l)\n', (887, 890), True, 'import numpy as np\n'), ((1738, 1762), 'numpy.abs', 'np.abs', (['(self.__rs * hits)'], {}), '(self.__rs * hits)\n', (1744, 1762), True, 'import numpy as np\n'), ((3501, 3533), 'numpy.abs', 'np.abs', (['(self.__rsc * self.__isgs)'], {}), '(self.__rsc * self.__isgs)\n', (3507, 3533), True, 'import numpy as np\n'), ((4985, 5017), 'numpy.cumsum', 'np.cumsum', (["(1 - self.__bg['isgs'])"], {}), "(1 - self.__bg['isgs'])\n", (4994, 5017), True, 'import numpy as np\n'), ((8225, 8245), 'numpy.where', 'np.where', (['(hits1 == 1)'], {}), '(hits1 == 1)\n', (8233, 8245), True, 'import numpy as np\n'), ((8594, 8614), 'numpy.where', 'np.where', (['(hits2 == 1)'], {}), '(hits2 == 1)\n', (8602, 8614), True, 'import numpy as np\n'), ((893, 903), 'numpy.ones', 'np.ones', (['l'], {}), '(l)\n', (900, 903), True, 'import numpy as np\n'), ((1652, 1676), 'numpy.abs', 'np.abs', (['(self.__rs * hits)'], {}), '(self.__rs * hits)\n', (1658, 1676), True, 'import numpy as np\n'), ((3400, 3432), 'numpy.abs', 'np.abs', (['(self.__rsc * self.__isgs)'], {}), '(self.__rsc * self.__isgs)\n', (3406, 3432), True, 'import numpy as np\n'), ((4559, 4583), 'numpy.random.permutation', 'np.random.permutation', (['l'], {}), '(l)\n', (4580, 4583), True, 'import numpy as np\n'), ((5314, 5350), 'numpy.sum', 'np.sum', (["(self.__bg['es'] <= self.__es)"], {}), "(self.__bg['es'] <= self.__es)\n", (5320, 5350), True, 'import numpy as np\n'), ((5505, 5541), 'numpy.sum', 'np.sum', (["(self.__bg['es'] >= self.__es)"], {}), "(self.__bg['es'] >= self.__es)\n", (5511, 5541), True, 'import numpy as np\n'), ((4813, 4851), 'numpy.abs', 'np.abs', (["(self.__rsc * self.__bg['isgs'])"], {}), "(self.__rsc * self.__bg['isgs'])\n", (4819, 4851), True, 'import numpy as np\n'), ((5411, 5456), 'numpy.mean', 'np.mean', (["self.__bg['es'][self.__bg['es'] < 0]"], {}), "(self.__bg['es'][self.__bg['es'] < 0])\n", (5418, 5456), True, 'import numpy as np\n'), ((5602, 5647), 'numpy.mean', 'np.mean', (["self.__bg['es'][self.__bg['es'] > 0]"], {}), "(self.__bg['es'][self.__bg['es'] > 0])\n", (5609, 5647), True, 'import numpy as np\n'), ((2113, 2127), 'numpy.min', 'np.min', (['es_all'], {}), '(es_all)\n', (2119, 2127), True, 'import numpy as np\n'), ((2304, 2318), 'numpy.max', 'np.max', (['es_all'], {}), '(es_all)\n', (2310, 2318), True, 'import numpy as np\n'), ((3996, 4017), 'numpy.min', 'np.min', (['self.__es_all'], {}), '(self.__es_all)\n', (4002, 4017), True, 'import numpy as np\n'), ((4230, 4251), 'numpy.max', 'np.max', (['self.__es_all'], {}), '(self.__es_all)\n', (4236, 4251), True, 'import numpy as np\n'), ((4689, 4727), 'numpy.abs', 'np.abs', (["(self.__rsc * self.__bg['isgs'])"], {}), "(self.__rsc * self.__bg['isgs'])\n", (4695, 4727), True, 'import numpy as np\n')]
|
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import BatchSampler
import numpy as np
import torch
import os
from elbo_functions import deviance_upper_bound, elbo, KL_closed, minibatch_KLD_upper_bound, minibatch_KLD_upper_bound_iter
from model_test import MSE_test_GPapprox, MSE_test
from utils import SubjectSampler, VaryingLengthSubjectSampler, VaryingLengthBatchSampler, HensmanDataLoader
from predict_HealthMNIST import recon_complete_gen, gen_rotated_mnist_plot, variational_complete_gen
from validation import validate
def hensman_training(nnet_model, type_nnet, epochs, dataset, optimiser, type_KL, num_samples, latent_dim, covar_module0,
covar_module1, likelihoods, m, H, zt_list, P, T, varying_T, Q, weight, id_covariate, loss_function,
natural_gradient=False, natural_gradient_lr=0.01, subjects_per_batch=20, memory_dbg=False,
eps=1e-6, results_path=None, validation_dataset=None, generation_dataset=None,
prediction_dataset=None, gp_model=None, csv_file_test_data=None, csv_file_test_label=None,
test_mask_file=None, data_source_path=None):
"""
Perform training with minibatching and Stochastic Variational Inference [Hensman et. al, 2013]. See L-VAE supplementary
materials
:param nnet_model: encoder/decoder neural network model
:param type_nnet: type of encoder/decoder
:param epochs: numner of epochs
:param dataset: dataset to use in training
:param optimiser: optimiser to be used
:param type_KL: type of KL divergenve computation to use
:param num_samples: number of samples to use
:param latent_dim: number of latent dimensions
:param covar_module0: additive kernel (sum of cross-covariances) without id covariate
:param covar_module1: additive kernel (sum of cross-covariances) with id covariate
:param likelihoods: GPyTorch likelihood model
:param m: variational mean
:param H: variational variance
:param zt_list: list of inducing points
:param P: number of unique instances
:param T: number of longitudinal samples per individual
:param Q: number of covariates
:param weight: value for the weight
:param id_covariate: covariate number of the id
:param loss_function: selected loss function
:param natural_gradient: use of natural gradients
:param natural_gradient_lr: natural gradients learning rate
:param subject_per_batch; number of subjects per batch (vectorisation)
:param memory_dbg: enable debugging
:param eps: jitter
:param results_path: path to results
:param validation_dataset: dataset for vaildation set
:param generation_dataset: dataset to help with sample image generation
:param prediction_dataset; dataset with subjects for prediction
:param gp_mode: GPyTorch gp model
:param csv_file_test_data: path to test data
:param csv_file_test_label: path to test label
:param test_mask_file: path to test mask
:param data_source_path: path to data source
:return trained models and resulting losses
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
N = len(dataset)
assert type_KL == 'GPapprox_closed'
if varying_T:
n_batches = (P + subjects_per_batch - 1)//subjects_per_batch
dataloader = HensmanDataLoader(dataset, batch_sampler=VaryingLengthBatchSampler(VaryingLengthSubjectSampler(dataset, id_covariate), subjects_per_batch), num_workers=4)
else:
batch_size = subjects_per_batch*T
n_batches = (P*T + batch_size - 1)//(batch_size)
dataloader = HensmanDataLoader(dataset, batch_sampler=BatchSampler(SubjectSampler(dataset, P, T), batch_size, drop_last=False), num_workers=4)
net_train_loss_arr = np.empty((0, 1))
recon_loss_arr = np.empty((0, 1))
nll_loss_arr = np.empty((0, 1))
kld_loss_arr = np.empty((0, 1))
penalty_term_arr = np.empty((0, 1))
best_val_pred_mse = np.Inf
best_epoch = 0
for epoch in range(1, epochs + 1):
recon_loss_sum = 0
nll_loss_sum = 0
kld_loss_sum = 0
net_loss_sum = 0
iid_kld_sum = 0
for batch_idx, sample_batched in enumerate(dataloader):
optimiser.zero_grad()
nnet_model.train()
covar_module0.train()
covar_module1.train()
indices = sample_batched['idx']
data = sample_batched['digit'].double().to(device)
train_x = sample_batched['label'].double().to(device)
mask = sample_batched['mask'].double().to(device)
N_batch = data.shape[0]
covariates = torch.cat((train_x[:, :id_covariate], train_x[:, id_covariate+1:]), dim=1)
recon_batch, mu, log_var = nnet_model(data)
[recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)
recon_loss = torch.sum(recon_loss)
nll_loss = torch.sum(nll)
PSD_H = H if natural_gradient else torch.matmul(H, H.transpose(-1, -2))
if varying_T:
P_in_current_batch = torch.unique(train_x[:, id_covariate]).shape[0]
kld_loss, grad_m, grad_H = minibatch_KLD_upper_bound_iter(covar_module0, covar_module1, likelihoods, latent_dim, m, PSD_H, train_x, mu, log_var, zt_list, P, P_in_current_batch, N, natural_gradient, id_covariate, eps)
else:
P_in_current_batch = N_batch // T
kld_loss, grad_m, grad_H = minibatch_KLD_upper_bound(covar_module0, covar_module1, likelihoods, latent_dim, m, PSD_H, train_x, mu, log_var, zt_list, P, P_in_current_batch, T, natural_gradient, eps)
recon_loss = recon_loss * P/P_in_current_batch
nll_loss = nll_loss * P/P_in_current_batch
if loss_function == 'nll':
net_loss = nll_loss + kld_loss
elif loss_function == 'mse':
kld_loss = kld_loss / latent_dim
net_loss = recon_loss + weight * kld_loss
net_loss.backward()
optimiser.step()
if natural_gradient:
LH = torch.cholesky(H)
iH = torch.cholesky_solve(torch.eye(H.shape[-1], dtype=torch.double).to(device), LH)
iH_new = iH + natural_gradient_lr*(grad_H + grad_H.transpose(-1,-2))
LiH_new = torch.cholesky(iH_new)
H = torch.cholesky_solve(torch.eye(H.shape[-1], dtype=torch.double).to(device), LiH_new).detach()
m = torch.matmul(H, torch.matmul(iH, m) - natural_gradient_lr*(grad_m - 2*torch.matmul(grad_H, m))).detach()
net_loss_sum += net_loss.item() / n_batches
recon_loss_sum += recon_loss.item() / n_batches
nll_loss_sum += nll_loss.item() / n_batches
kld_loss_sum += kld_loss.item() / n_batches
print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - NLL Loss: %.3f - Recon Loss: %.3f' % (
epoch, epochs, net_loss_sum, kld_loss_sum, nll_loss_sum, recon_loss_sum), flush=True)
penalty_term_arr = np.append(penalty_term_arr, 0.0)
net_train_loss_arr = np.append(net_train_loss_arr, net_loss_sum)
recon_loss_arr = np.append(recon_loss_arr, recon_loss_sum)
nll_loss_arr = np.append(nll_loss_arr, nll_loss_sum)
kld_loss_arr = np.append(kld_loss_arr, kld_loss_sum)
if (not epoch % 25) and epoch != epochs:
with torch.no_grad():
nnet_model.eval()
covar_module0.eval()
covar_module1.eval()
if validation_dataset is not None:
full_mu = torch.zeros(len(dataset), latent_dim, dtype=torch.double).to(device)
prediction_x = torch.zeros(len(dataset), Q, dtype=torch.double).to(device)
for batch_idx, sample_batched in enumerate(dataloader):
label_id = sample_batched['idx']
prediction_x[label_id] = sample_batched['label'].double().to(device)
data = sample_batched['digit'].double().to(device)
covariates = torch.cat((prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate+1:]), dim=1)
mu, log_var = nnet_model.encode(data)
full_mu[label_id] = mu
val_pred_mse = validate(nnet_model, type_nnet, validation_dataset, type_KL, num_samples, latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T, weight, full_mu, prediction_x, id_covariate, loss_function, eps=1e-6)
if val_pred_mse < best_val_pred_mse:
best_val_pred_mse = val_pred_mse
best_epoch = epoch
prediction_dataloader = DataLoader(prediction_dataset, batch_sampler=VaryingLengthBatchSampler(
VaryingLengthSubjectSampler(prediction_dataset, id_covariate), subjects_per_batch),
num_workers=4)
full_mu = torch.zeros(len(prediction_dataset), latent_dim, dtype=torch.double).to(device)
prediction_x = torch.zeros(len(prediction_dataset), Q, dtype=torch.double).to(device)
with torch.no_grad():
for batch_idx, sample_batched in enumerate(prediction_dataloader):
label_id = sample_batched['idx']
prediction_x[label_id] = sample_batched['label'].double().to(device)
data = sample_batched['digit'].double().to(device)
covariates = torch.cat(
(prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate + 1:]),
dim=1)
mu, log_var = nnet_model.encode(data)
full_mu[label_id] = mu
covar_module0.eval()
covar_module1.eval()
if type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':
MSE_test_GPapprox(csv_file_test_data, csv_file_test_label, test_mask_file,
data_source_path, type_nnet,
nnet_model, covar_module0, covar_module1, likelihoods, results_path,
latent_dim, prediction_x,
full_mu, zt_list, P, T, id_covariate, varying_T,
save_file='result_error_best.csv')
print('Saving better model')
try:
torch.save(nnet_model.state_dict(), os.path.join(results_path, 'nnet_model_best.pth'))
torch.save(gp_model.state_dict(), os.path.join(results_path, 'gp_model_best.pth'))
torch.save(zt_list, os.path.join(results_path, 'zt_list_best.pth'))
torch.save(m, os.path.join(results_path, 'm_best.pth'))
torch.save(H, os.path.join(results_path, 'H_best.pth'))
if results_path and generation_dataset:
prediction_dataloader = DataLoader(prediction_dataset,
batch_sampler=VaryingLengthBatchSampler(
VaryingLengthSubjectSampler(prediction_dataset,
id_covariate),
subjects_per_batch), num_workers=4)
full_mu = torch.zeros(len(prediction_dataset), latent_dim, dtype=torch.double).to(
device)
prediction_x = torch.zeros(len(prediction_dataset), Q, dtype=torch.double).to(device)
for batch_idx, sample_batched in enumerate(prediction_dataloader):
label_id = sample_batched['idx']
prediction_x[label_id] = sample_batched['label'].double().to(device)
data = sample_batched['digit'].double().to(device)
covariates = torch.cat((prediction_x[label_id, :id_covariate],
prediction_x[label_id, id_covariate + 1:]), dim=1)
mu, log_var = nnet_model.encode(data)
full_mu[label_id] = mu
recon_complete_gen(generation_dataset, nnet_model, type_nnet,
results_path, covar_module0,
covar_module1, likelihoods, latent_dim,
'./data', prediction_x, full_mu, epoch,
zt_list, P, T, id_covariate, varying_T)
except e:
print(e)
print('Saving intermediate model failed!')
pass
if torch.cuda.is_available():
torch.cuda.empty_cache()
return penalty_term_arr, net_train_loss_arr, nll_loss_arr, recon_loss_arr, kld_loss_arr, m, H, best_epoch
def minibatch_training(nnet_model, type_nnet, epochs, dataset, optimiser, type_KL, num_samples, latent_dim,
covar_module0, covar_module1, likelihoods, zt_list, P, T, Q, weight, id_covariate,
loss_function, memory_dbg=False, eps=1e-6, results_path=None, validation_dataset=None,
generation_dataset=None, prediction_dataset=None):
"""
Perform training with minibatching (psuedo-minibatching) similar to GPPVAE [Casale el. al, 2018]. See L-VAE supplementary
materials
:param nnet_model: encoder/decoder neural network model
:param type_nnet: type of encoder/decoder
:param epochs: numner of epochs
:param dataset: dataset to use in training
:param optimiser: optimiser to be used
:param type_KL: type of KL divergenve computation to use
:param num_samples: number of samples to use
:param latent_dim: number of latent dimensions
:param covar_module0: additive kernel (sum of cross-covariances) without id covariate
:param covar_module1: additive kernel (sum of cross-covariances) with id covariate
:param likelihoods: GPyTorch likelihood model
:param zt_list: list of inducing points
:param P: number of unique instances
:param T: number of longitudinal samples per individual
:param Q: number of covariates
:param weight: value for the weight
:param id_covariate: covariate number of the id
:param loss_function: selected loss function
:param memory_dbg: enable debugging
:param eps: jitter
:param results_path: path to results
:param validation_dataset: dataset for vaildation set
:param generation_dataset: dataset to help with sample image generation
:param prediction_dataset; dataset with subjects for prediction
:return trained models and resulting losses
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = T
assert (type_KL == 'GPapprox_closed' or type_KL == 'GPapprox')
# set up Data Loader for training
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=4)
net_train_loss_arr = np.empty((0, 1))
recon_loss_arr = np.empty((0, 1))
nll_loss_arr = np.empty((0, 1))
gp_loss_arr = np.empty((0, 1))
penalty_term_arr = np.empty((0, 1))
for epoch in range(1, epochs + 1):
optimiser.zero_grad()
full_mu = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)
full_log_var = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)
train_x = torch.zeros(len(dataset), Q, dtype=torch.double, requires_grad=False).to(device)
#Step 1: Encode the sample data to obtain \bar{\mu} and diag(W)
with torch.no_grad():
for batch_idx, sample_batched in enumerate(dataloader):
indices = sample_batched['idx']
data = sample_batched['digit'].double().to(device)
train_x[indices] = sample_batched['label'].double().to(device)
covariates = torch.cat((train_x[indices, :id_covariate], train_x[indices, id_covariate+1:]), dim=1)
mu, log_var = nnet_model.encode(data)
full_mu[indices] = mu
full_log_var[indices] = log_var
mu_grads = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)
log_var_grads = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)
gp_losses = 0
gp_loss_sum = 0
param_list = []
#Steps 2 & 3: compute d and E, compute gradients of KLD w.r.t S and theta
if type_KL == 'GPapprox':
for sample in range(0, num_samples):
Z = nnet_model.sample_latent(full_mu, full_log_var)
for i in range(0, latent_dim):
Z_dim = Z[:, i]
gp_loss = -elbo(covar_module0[i], covar_module1[i], likelihoods[i], train_x, Z_dim,
zt_list[i].to(device), P, T, eps)
gp_loss_sum = gp_loss.item() + gp_loss_sum
gp_losses = gp_losses + gp_loss
gp_losses = gp_losses / num_samples
gp_loss_sum /= num_samples
elif type_KL == 'GPapprox_closed':
for i in range(0, latent_dim):
mu_sliced = full_mu[:, i]
log_var_sliced = full_log_var[:, i]
gp_loss = deviance_upper_bound(covar_module0[i], covar_module1[i],
likelihoods[i], train_x,
mu_sliced, log_var_sliced,
zt_list[i].to(device), P,
T, eps)
gp_loss_sum = gp_loss.item() + gp_loss_sum
gp_losses = gp_losses + gp_loss
for i in range(0, latent_dim):
param_list += list(covar_module0[i].parameters())
param_list += list(covar_module1[i].parameters())
# param_list.append(zt_list[i])
if loss_function == 'mse':
gp_losses = weight*gp_losses/latent_dim
gp_loss_sum /= latent_dim
mu_grads = torch.autograd.grad(gp_losses, full_mu, retain_graph=True)[0]
log_var_grads = torch.autograd.grad(gp_losses, full_log_var, retain_graph=True)[0]
grads = torch.autograd.grad(gp_losses, param_list)
for ind, p in enumerate(param_list):
p.grad = grads[ind]
recon_loss_sum = 0
nll_loss_sum = 0
#Step 4: compute reconstruction losses w.r.t phi and psi, add dKLD/dphi to the gradients
for batch_idx, sample_batched in enumerate(dataloader):
data = sample_batched['digit'].double().to(device)
mask = sample_batched['mask'].double().to(device)
indices = sample_batched['idx']
label = sample_batched['label'].double().to(device)
covariates = torch.cat((label[:, :id_covariate], label[:, id_covariate+1:]), dim=1)
recon_batch, mu, log_var = nnet_model(data)
[recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)
recon_loss = torch.sum(recon_loss)
nll = torch.sum(nll)
mu.backward(mu_grads[indices], retain_graph = True)
log_var.backward(log_var_grads[indices], retain_graph = True)
if loss_function == 'mse':
recon_loss.backward()
elif loss_function == 'nll':
nll.backward()
recon_loss_sum = recon_loss_sum + recon_loss.item()
nll_loss_sum = nll_loss_sum + nll.item()
#Do logging
print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - NLL loss: %.3f - Recon Loss: %.3f' % (
epoch, epochs, recon_loss_sum + weight*gp_loss_sum, gp_loss_sum, nll_loss_sum, recon_loss_sum))
penalty_term_arr = np.append(penalty_term_arr, 0.0)
net_train_loss_arr = np.append(net_train_loss_arr, recon_loss_sum + weight*gp_loss_sum)
nll_loss_arr = np.append(nll_loss_arr, nll_loss_sum)
recon_loss_arr = np.append(recon_loss_arr, recon_loss_sum)
gp_loss_arr = np.append(gp_loss_arr, gp_loss_sum)
#Step 5: apply gradients using an Adam optimiser
optimiser.step()
if (not epoch % 100) and epoch != epochs:
if validation_dataset is not None:
validate(nnet_model, type_nnet, validation_dataset, type_KL, num_samples, latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T, weight, full_mu, train_x, id_covariate, loss_function, eps=1e-6)
if torch.cuda.is_available():
torch.cuda.empty_cache()
if results_path and generation_dataset:
prediction_dataloader = DataLoader(prediction_dataset, batch_size=1000, shuffle=False, num_workers=4)
full_mu = torch.zeros(len(prediction_dataset), latent_dim, dtype=torch.double).to(device)
prediction_x = torch.zeros(len(prediction_dataset), Q, dtype=torch.double).to(device)
with torch.no_grad():
for batch_idx, sample_batched in enumerate(prediction_dataloader):
# no mini-batching. Instead get a batch of dataset size
label_id = sample_batched['idx']
prediction_x[label_id] = sample_batched['label'].double().to(device)
data = sample_batched['digit'].double().to(device)
covariates = torch.cat((prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate+1:]), dim=1)
mu, log_var = nnet_model.encode(data)
full_mu[label_id] = mu
recon_complete_gen(generation_dataset, nnet_model, type_nnet,
results_path, covar_module0,
covar_module1, likelihoods, latent_dim,
'./data', prediction_x, full_mu, epoch,
zt_list, P, T, id_covariate)
return penalty_term_arr, net_train_loss_arr, nll_loss_arr, recon_loss_arr, gp_loss_arr
def standard_training(nnet_model, type_nnet, epochs, dataset, optimiser, type_KL, num_samples,
latent_dim, covar_modules, likelihoods, zt_list, id_covariate, P, T, Q, weight, constrain_scales,
loss_function, memory_dbg=False, eps=1e-6, validation_dataset=None, generation_dataset=None, prediction_dataset=None):
"""
Perform training without minibatching.
:param nnet_model: encoder/decoder neural network model
:param type_nnet: type of encoder/decoder
:param epochs: numner of epochs
:param dataset: dataset to use in training
:param optimiser: optimiser to be used
:param type_KL: type of KL divergenve computation to use
:param num_samples: number of samples to use
:param latent_dim: number of latent dimensions
:param covar_modules: additive kernel (sum of cross-covariances)
:param likelihoods: GPyTorch likelihood model
:param zt_list: list of inducing points
:param id_covariate: covariate number of the id
:param P: number of unique instances
:param T: number of longitudinal samples per individual
:param Q: number of covariates
:param weight: value for the weight
:param constrain_scales: boolean to constrain scales to 1
:param loss_function: selected loss function
:param memory_dbg: enable debugging
:param eps: jitter
:param validation_dataset: dataset for vaildation set
:param generation_dataset: dataset to help with sample image generation
:param prediction_dataset; dataset with subjects for prediction
:return trained models and resulting losses
"""
if type_KL == 'closed':
covar_module = covar_modules[0]
elif type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':
covar_module0 = covar_modules[0]
covar_module1 = covar_modules[1]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# set up Data Loader for training
dataloader = DataLoader(dataset, batch_size=len(dataset), shuffle=False, num_workers=4)
net_train_loss_arr = np.empty((0, 1))
recon_loss_arr = np.empty((0, 1))
nll_loss_arr = np.empty((0, 1))
gp_loss_arr = np.empty((0, 1))
penalty_term_arr = np.empty((0, 1))
for epoch in range(1, epochs + 1):
for batch_idx, sample_batched in enumerate(dataloader):
# no mini-batching. Instead get a batch of dataset size.
optimiser.zero_grad() # clear gradients
label_id = sample_batched['idx']
label = sample_batched['label']
data = sample_batched['digit']
data = data.double().to(device)
mask = sample_batched['mask']
mask = mask.to(device)
train_x = label.double().to(device)
covariates = torch.cat((train_x[:, :id_covariate], train_x[:, id_covariate+1:]), dim=1)
# encode data
recon_batch, mu, log_var = nnet_model(data)
[recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)
recon_loss = torch.sum(recon_loss)
nll_loss = torch.sum(nll)
gp_loss_avg = torch.tensor([0.0]).to(device)
net_loss = torch.tensor([0.0]).to(device)
penalty_term = torch.tensor([0.0]).to(device)
for sample_iter in range(0, num_samples):
# Iterate over specified number of samples. Default: num_samples = 1.
Z = nnet_model.sample_latent(mu, log_var)
gp_loss = torch.tensor([0.0]).to(device)
for i in range(0, latent_dim):
Z_dim = Z[:, i].view(-1).type(torch.DoubleTensor).to(device)
if type_KL == 'closed':
# Closed-form KL divergence formula
kld1 = KL_closed(covar_module[i], train_x, likelihoods[i], data, mu[:, i], log_var[:, i])
gp_loss = gp_loss + kld1
elif type_KL == 'conj_gradient':
# GPyTorch default: use modified batch conjugate gradients
# See: https://arxiv.org/abs/1809.11165
gp_models[i].set_train_data(train_x.to(device), Z_dim.to(device))
gp_loss = gp_loss - mlls[i](gp_models[i](train_x.to(device)), Z_dim)
elif type_KL == 'GPapprox':
# Our proposed efficient approximate GP inference scheme
# See: http://arxiv.org/abs/2006.09763
loss = -elbo(covar_module0[i], covar_module1[i], likelihoods[i], train_x, Z_dim,
zt_list[i].to(device), P, T, eps)
gp_loss = gp_loss + loss
elif type_KL == 'GPapprox_closed':
# A variant of our proposed efficient approximate GP inference scheme.
# The key difference with GPapprox is the direct use of the variational mean and variance,
# instead of a sample from Z. We can call this a deviance upper bound.
# See the L-VAE supplement for more details: http://arxiv.org/abs/2006.09763
loss = deviance_upper_bound(covar_module0[i], covar_module1[i], likelihoods[i], train_x,
mu[:, i].view(-1), log_var[:, i].view(-1), zt_list[i].to(device), P,
T, eps)
gp_loss = gp_loss + loss
if type_KL == 'closed' or type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':
if loss_function == 'mse':
gp_loss_avg = gp_loss_avg + (gp_loss / latent_dim)
elif loss_function == 'nll':
gp_loss_avg = gp_loss_avg + gp_loss
elif type_KL == 'conj_gradient':
if loss_function == 'mse':
gp_loss = gp_loss * data.shape[0] / latent_dim
elif loss_function == 'nll':
gp_loss = gp_loss * data.shape[0]
gp_loss_avg = gp_loss_avg + gp_loss
if type_KL == 'closed' or type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':
gp_loss_avg = gp_loss_avg / num_samples
if loss_function == 'mse':
net_loss = recon_loss + weight * gp_loss_avg
elif loss_function == 'nll':
net_loss = nll_loss + gp_loss_avg
elif type_KL == 'conj_gradient':
gp_loss_avg = gp_loss_avg / num_samples
penalty_term = -0.5 * log_var.sum() / latent_dim
if loss_function == 'mse':
net_loss = recon_loss + weight * (gp_loss_avg + penalty_term)
elif loss_function == 'nll':
net_loss = nll_loss + gp_loss_avg + penalty_term
net_loss.backward()
if type_KL == 'closed' or type_KL == 'GPapprox' or type_KL == 'GPapprox_closed':
print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - NLL Loss: %.3f - Recon Loss: %.3f' % (
epoch, epochs, net_loss.item(), gp_loss_avg.item(), nll_loss.item(), recon_loss.item()))
elif type_KL == 'conj_gradient':
print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - Penalty: %.3f - NLL Loss: %.3f - Recon Loss: %.3f' % (
epoch, epochs, net_loss.item(), gp_loss_avg.item(), penalty_term.item(), nll_loss.item(), recon_loss.item()))
penalty_term_arr = np.append(penalty_term_arr, penalty_term.cpu().item())
net_train_loss_arr = np.append(net_train_loss_arr, net_loss.cpu().item())
recon_loss_arr = np.append(recon_loss_arr, recon_loss.cpu().item())
nll_loss_arr = np.append(nll_loss_arr, nll_loss.cpu().item())
gp_loss_arr = np.append(gp_loss_arr, gp_loss_avg.cpu().item())
optimiser.step()
if constrain_scales:
for i in range(0, latent_dim):
likelihoods[i].noise = torch.tensor([1], dtype=torch.float).to(device)
if (not epoch % 100) and epoch != epochs:
if validation_dataset is not None:
standard_validate(nnet_model, type_nnet, validation_dataset, type_KL, num_samples, latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T, weight, mu, train_x, id_covariate, loss_function, eps=1e-6)
if torch.cuda.is_available():
torch.cuda.empty_cache()
return penalty_term_arr, net_train_loss_arr, nll_loss_arr, recon_loss_arr, gp_loss_arr
def variational_inference_optimization(nnet_model, type_nnet, epochs, dataset, prediction_dataset, optimiser,
latent_dim, covar_module0, covar_module1, likelihoods, zt_list, P, T, Q, weight, constrain_scales,
id_covariate, loss_function, memory_dbg=False, eps=1e-6, results_path=None, save_path=None, gp_model_folder=None,
generation_dataset=None):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# set up Data Loader for training
dataloader = DataLoader(dataset, batch_size=len(dataset), shuffle=False, num_workers=4)
net_train_loss_arr = np.empty((0, 1))
recon_loss_arr = np.empty((0, 1))
nll_loss_arr = np.empty((0, 1))
gp_loss_arr = np.empty((0, 1))
penalty_term_arr = np.empty((0, 1))
for batch_idx, sample_batched in enumerate(dataloader):
label_id = sample_batched['idx']
label = sample_batched['label'].double().to(device)
data = sample_batched['digit'].double().to(device)
mask = sample_batched['mask'].double().to(device)
covariates = torch.cat((label[:, :id_covariate], label[:, id_covariate+1:]), dim=1)
# encode data
mu, log_var = nnet_model.encode(data)
mu = torch.nn.Parameter(mu.clone().detach(), requires_grad=True)
log_var = torch.nn.Parameter(log_var.clone().detach(), requires_grad=True)
try:
mu = torch.load(os.path.join(gp_model_folder, 'mu.pth'), map_location=torch.device(device)).detach().to(device).requires_grad_(True)
log_var = torch.load(os.path.join(gp_model_foder, 'log_var.pth'), map_location=torch.device(device)).detach().to(device).requires_grad_(True)
except:
pass
optimiser.add_param_group({'params': mu})
optimiser.add_param_group({'params': log_var})
for epoch in range(1, epochs + 1):
optimiser.zero_grad()
Z = nnet_model.sample_latent(mu, log_var)
recon_batch = nnet_model.decode(Z)
[recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)
recon_loss = torch.sum(recon_loss)
nll_loss = torch.sum(nll)
gp_loss_avg = torch.tensor([0.0]).to(device)
net_loss = torch.tensor([0.0]).to(device)
penalty_term = torch.tensor([0.0]).to(device)
for i in range(0, latent_dim):
loss = deviance_upper_bound(covar_module0[i], covar_module1[i], likelihoods[i], label,
mu[:, i].view(-1), log_var[:, i].view(-1), zt_list[i].to(device), P,
T, eps)
gp_loss_avg = gp_loss_avg + loss / latent_dim
if loss_function == 'mse':
net_loss = recon_loss + weight * gp_loss_avg
elif loss_function == 'nll':
net_loss = nll_loss + gp_loss_avg
net_loss.backward()
print('Iter %d/%d - Loss: %.3f - GP loss: %.3f - NLL Loss: %.3f - Recon Loss: %.3f' % (
epoch, epochs, net_loss.item(), gp_loss_avg.item(), nll_loss.item(), recon_loss.item()),
flush=True)
penalty_term_arr = np.append(penalty_term_arr, penalty_term.cpu().item())
net_train_loss_arr = np.append(net_train_loss_arr, net_loss.cpu().item())
recon_loss_arr = np.append(recon_loss_arr, recon_loss.cpu().item())
nll_loss_arr = np.append(nll_loss_arr, nll_loss.cpu().item())
gp_loss_arr = np.append(gp_loss_arr, gp_loss_avg.cpu().item())
optimiser.step()
if not epoch % 100:
sv_pth = os.path.join(save_path, 'recon_' + str(epoch) + '.pdf')
gen_rotated_mnist_plot(data[1920:2080].cpu().detach(), recon_batch[1920:2080].cpu().detach(), label[1920:2080].cpu().detach(), seq_length=20, num_sets=8, save_file=sv_pth)
torch.save(nnet_model.state_dict(), os.path.join(save_path, 'final-vae_model.pth'))
torch.save(mu, os.path.join(save_path, 'mu.pth'))
torch.save(log_var, os.path.join(save_path, 'log_var.pth'))
for i in range(0, latent_dim):
torch.save(covar_module0[i].state_dict(), os.path.join(save_path, 'cov_module0_' + str(i) + '.pth'))
torch.save(covar_module1[i].state_dict(), os.path.join(save_path, 'cov_module1_' + str(i) + '.pth'))
prediction_dataloader = DataLoader(prediction_dataset, batch_size=len(prediction_dataset), shuffle=False, num_workers=1)
for batch_idx, sample_batched in enumerate(prediction_dataloader):
label_pred = sample_batched['label'].double().to(device)
data_pred = sample_batched['digit'].double().to(device)
mask_pred = sample_batched['mask'].double().to(device)
covariates = torch.cat((label_pred[:, :id_covariate], label_pred[:, id_covariate+1:]), dim=1)
# encode data
mu_pred, log_var_pred = nnet_model.encode(data_pred)
break
try:
mu_pred = torch.load(os.path.join(gp_model_folder, 'mu_pred.pth'), map_location=torch.device(device)).detach().to(device).requires_grad_(True)
log_var_pred = torch.load(os.path.join(gp_model_folder, 'log_var_pred.pth'), map_location=torch.device(device)).detach().to(device).requires_grad_(True)
except:
pass
mu_pred = torch.nn.Parameter(mu_pred.clone().detach(), requires_grad=True)
log_var_pred = torch.nn.Parameter(log_var_pred.clone().detach(), requires_grad=True)
adam_param_list = []
adam_param_list.append({'params': mu_pred})
adam_param_list.append({'params': log_var_pred})
optimiser_pred = torch.optim.Adam(adam_param_list, lr=1e-3)
for epoch in range(1, 1001):
optimiser_pred.zero_grad()
Z = nnet_model.sample_latent(mu_pred, log_var_pred)
recon_batch = nnet_model.decode(Z)
[recon_loss, nll] = nnet_model.loss_function(recon_batch,
data_pred,
mask_pred)
recon_loss = torch.sum(recon_loss)
nll_loss = torch.sum(nll)
gp_loss_avg = torch.tensor([0.0]).to(device)
prediction_mu = torch.cat((mu_pred, mu), dim=0)
prediction_log_var = torch.cat((log_var_pred, log_var), dim=0)
prediction_x = torch.cat((label_pred, label), dim=0)
for i in range(0, latent_dim):
loss = deviance_upper_bound(covar_module0[i], covar_module1[i], likelihoods[i], prediction_x,
prediction_mu[:, i].view(-1), prediction_log_var[:, i].view(-1),
zt_list[i].to(device), P+8, T, eps)
gp_loss_avg = gp_loss_avg + loss / latent_dim
if loss_function == 'mse':
net_loss = recon_loss + weight * gp_loss_avg
elif loss_function == 'nll':
net_loss = nll_loss + gp_loss_avg
net_loss.backward()
print('Iter %d/1000 - Total Loss: %.3f - GP Loss: %.3f - Recon Loss: %.3f' % (
epoch, net_loss.item(), gp_loss_avg.item(), recon_loss.item()),
flush=True)
optimiser_pred.step()
torch.save(mu_pred, os.path.join(save_path, 'mu_pred.pth'))
torch.save(log_var_pred, os.path.join(save_path, 'log_var_pred.pth'))
l = [i*20 + k for i in range(0,8) for k in range(0,5)]
prediction_x = torch.cat((label_pred[l],
label))
prediction_mu = torch.cat((mu_pred[l],
mu))
if generation_dataset:
variational_complete_gen(generation_dataset, nnet_model, type_nnet,
results_path, covar_module0,
covar_module1, likelihoods, latent_dim,
'./data', prediction_x, prediction_mu, 'final',
zt_list, P, T, id_covariate)
exit(0)
|
[
"elbo_functions.minibatch_KLD_upper_bound_iter",
"torch.eye",
"torch.autograd.grad",
"numpy.empty",
"torch.cat",
"model_test.MSE_test_GPapprox",
"predict_HealthMNIST.variational_complete_gen",
"torch.device",
"torch.no_grad",
"os.path.join",
"predict_HealthMNIST.recon_complete_gen",
"torch.utils.data.DataLoader",
"numpy.append",
"torch.matmul",
"elbo_functions.minibatch_KLD_upper_bound",
"torch.unique",
"elbo_functions.KL_closed",
"torch.optim.Adam",
"torch.cuda.is_available",
"torch.sum",
"utils.SubjectSampler",
"validation.validate",
"utils.VaryingLengthSubjectSampler",
"torch.cholesky",
"torch.cuda.empty_cache",
"torch.tensor"
] |
[((3853, 3869), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (3861, 3869), True, 'import numpy as np\n'), ((3891, 3907), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (3899, 3907), True, 'import numpy as np\n'), ((3927, 3943), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (3935, 3943), True, 'import numpy as np\n'), ((3963, 3979), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (3971, 3979), True, 'import numpy as np\n'), ((4003, 4019), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (4011, 4019), True, 'import numpy as np\n'), ((15921, 15993), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(dataset, batch_size=batch_size, shuffle=False, num_workers=4)\n', (15931, 15993), False, 'from torch.utils.data import DataLoader\n'), ((16024, 16040), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (16032, 16040), True, 'import numpy as np\n'), ((16062, 16078), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (16070, 16078), True, 'import numpy as np\n'), ((16098, 16114), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (16106, 16114), True, 'import numpy as np\n'), ((16133, 16149), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (16141, 16149), True, 'import numpy as np\n'), ((16173, 16189), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (16181, 16189), True, 'import numpy as np\n'), ((25318, 25334), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (25326, 25334), True, 'import numpy as np\n'), ((25356, 25372), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (25364, 25372), True, 'import numpy as np\n'), ((25392, 25408), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (25400, 25408), True, 'import numpy as np\n'), ((25427, 25443), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (25435, 25443), True, 'import numpy as np\n'), ((25467, 25483), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (25475, 25483), True, 'import numpy as np\n'), ((32624, 32640), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (32632, 32640), True, 'import numpy as np\n'), ((32662, 32678), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (32670, 32678), True, 'import numpy as np\n'), ((32698, 32714), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (32706, 32714), True, 'import numpy as np\n'), ((32733, 32749), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (32741, 32749), True, 'import numpy as np\n'), ((32773, 32789), 'numpy.empty', 'np.empty', (['(0, 1)'], {}), '((0, 1))\n', (32781, 32789), True, 'import numpy as np\n'), ((37476, 37519), 'torch.optim.Adam', 'torch.optim.Adam', (['adam_param_list'], {'lr': '(0.001)'}), '(adam_param_list, lr=0.001)\n', (37492, 37519), False, 'import torch\n'), ((39240, 39273), 'torch.cat', 'torch.cat', (['(label_pred[l], label)'], {}), '((label_pred[l], label))\n', (39249, 39273), False, 'import torch\n'), ((39325, 39352), 'torch.cat', 'torch.cat', (['(mu_pred[l], mu)'], {}), '((mu_pred[l], mu))\n', (39334, 39352), False, 'import torch\n'), ((7154, 7186), 'numpy.append', 'np.append', (['penalty_term_arr', '(0.0)'], {}), '(penalty_term_arr, 0.0)\n', (7163, 7186), True, 'import numpy as np\n'), ((7216, 7259), 'numpy.append', 'np.append', (['net_train_loss_arr', 'net_loss_sum'], {}), '(net_train_loss_arr, net_loss_sum)\n', (7225, 7259), True, 'import numpy as np\n'), ((7286, 7327), 'numpy.append', 'np.append', (['recon_loss_arr', 'recon_loss_sum'], {}), '(recon_loss_arr, recon_loss_sum)\n', (7295, 7327), True, 'import numpy as np\n'), ((7351, 7388), 'numpy.append', 'np.append', (['nll_loss_arr', 'nll_loss_sum'], {}), '(nll_loss_arr, nll_loss_sum)\n', (7360, 7388), True, 'import numpy as np\n'), ((7412, 7449), 'numpy.append', 'np.append', (['kld_loss_arr', 'kld_loss_sum'], {}), '(kld_loss_arr, kld_loss_sum)\n', (7421, 7449), True, 'import numpy as np\n'), ((19373, 19415), 'torch.autograd.grad', 'torch.autograd.grad', (['gp_losses', 'param_list'], {}), '(gp_losses, param_list)\n', (19392, 19415), False, 'import torch\n'), ((20941, 20973), 'numpy.append', 'np.append', (['penalty_term_arr', '(0.0)'], {}), '(penalty_term_arr, 0.0)\n', (20950, 20973), True, 'import numpy as np\n'), ((21003, 21071), 'numpy.append', 'np.append', (['net_train_loss_arr', '(recon_loss_sum + weight * gp_loss_sum)'], {}), '(net_train_loss_arr, recon_loss_sum + weight * gp_loss_sum)\n', (21012, 21071), True, 'import numpy as np\n'), ((21094, 21131), 'numpy.append', 'np.append', (['nll_loss_arr', 'nll_loss_sum'], {}), '(nll_loss_arr, nll_loss_sum)\n', (21103, 21131), True, 'import numpy as np\n'), ((21157, 21198), 'numpy.append', 'np.append', (['recon_loss_arr', 'recon_loss_sum'], {}), '(recon_loss_arr, recon_loss_sum)\n', (21166, 21198), True, 'import numpy as np\n'), ((21221, 21256), 'numpy.append', 'np.append', (['gp_loss_arr', 'gp_loss_sum'], {}), '(gp_loss_arr, gp_loss_sum)\n', (21230, 21256), True, 'import numpy as np\n'), ((33091, 33163), 'torch.cat', 'torch.cat', (['(label[:, :id_covariate], label[:, id_covariate + 1:])'], {'dim': '(1)'}), '((label[:, :id_covariate], label[:, id_covariate + 1:]), dim=1)\n', (33100, 33163), False, 'import torch\n'), ((34066, 34087), 'torch.sum', 'torch.sum', (['recon_loss'], {}), '(recon_loss)\n', (34075, 34087), False, 'import torch\n'), ((34107, 34121), 'torch.sum', 'torch.sum', (['nll'], {}), '(nll)\n', (34116, 34121), False, 'import torch\n'), ((35806, 35852), 'os.path.join', 'os.path.join', (['save_path', '"""final-vae_model.pth"""'], {}), "(save_path, 'final-vae_model.pth')\n", (35818, 35852), False, 'import os\n'), ((35873, 35906), 'os.path.join', 'os.path.join', (['save_path', '"""mu.pth"""'], {}), "(save_path, 'mu.pth')\n", (35885, 35906), False, 'import os\n'), ((35932, 35970), 'os.path.join', 'os.path.join', (['save_path', '"""log_var.pth"""'], {}), "(save_path, 'log_var.pth')\n", (35944, 35970), False, 'import os\n'), ((36635, 36721), 'torch.cat', 'torch.cat', (['(label_pred[:, :id_covariate], label_pred[:, id_covariate + 1:])'], {'dim': '(1)'}), '((label_pred[:, :id_covariate], label_pred[:, id_covariate + 1:]),\n dim=1)\n', (36644, 36721), False, 'import torch\n'), ((37908, 37929), 'torch.sum', 'torch.sum', (['recon_loss'], {}), '(recon_loss)\n', (37917, 37929), False, 'import torch\n'), ((37949, 37963), 'torch.sum', 'torch.sum', (['nll'], {}), '(nll)\n', (37958, 37963), False, 'import torch\n'), ((38043, 38074), 'torch.cat', 'torch.cat', (['(mu_pred, mu)'], {'dim': '(0)'}), '((mu_pred, mu), dim=0)\n', (38052, 38074), False, 'import torch\n'), ((38104, 38145), 'torch.cat', 'torch.cat', (['(log_var_pred, log_var)'], {'dim': '(0)'}), '((log_var_pred, log_var), dim=0)\n', (38113, 38145), False, 'import torch\n'), ((38169, 38206), 'torch.cat', 'torch.cat', (['(label_pred, label)'], {'dim': '(0)'}), '((label_pred, label), dim=0)\n', (38178, 38206), False, 'import torch\n'), ((39047, 39085), 'os.path.join', 'os.path.join', (['save_path', '"""mu_pred.pth"""'], {}), "(save_path, 'mu_pred.pth')\n", (39059, 39085), False, 'import os\n'), ((39116, 39159), 'os.path.join', 'os.path.join', (['save_path', '"""log_var_pred.pth"""'], {}), "(save_path, 'log_var_pred.pth')\n", (39128, 39159), False, 'import os\n'), ((39420, 39646), 'predict_HealthMNIST.variational_complete_gen', 'variational_complete_gen', (['generation_dataset', 'nnet_model', 'type_nnet', 'results_path', 'covar_module0', 'covar_module1', 'likelihoods', 'latent_dim', '"""./data"""', 'prediction_x', 'prediction_mu', '"""final"""', 'zt_list', 'P', 'T', 'id_covariate'], {}), "(generation_dataset, nnet_model, type_nnet,\n results_path, covar_module0, covar_module1, likelihoods, latent_dim,\n './data', prediction_x, prediction_mu, 'final', zt_list, P, T, id_covariate\n )\n", (39444, 39646), False, 'from predict_HealthMNIST import recon_complete_gen, gen_rotated_mnist_plot, variational_complete_gen\n'), ((3200, 3225), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3223, 3225), False, 'import torch\n'), ((4729, 4805), 'torch.cat', 'torch.cat', (['(train_x[:, :id_covariate], train_x[:, id_covariate + 1:])'], {'dim': '(1)'}), '((train_x[:, :id_covariate], train_x[:, id_covariate + 1:]), dim=1)\n', (4738, 4805), False, 'import torch\n'), ((4968, 4989), 'torch.sum', 'torch.sum', (['recon_loss'], {}), '(recon_loss)\n', (4977, 4989), False, 'import torch\n'), ((5013, 5027), 'torch.sum', 'torch.sum', (['nll'], {}), '(nll)\n', (5022, 5027), False, 'import torch\n'), ((15740, 15765), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15763, 15765), False, 'import torch\n'), ((16666, 16681), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16679, 16681), False, 'import torch\n'), ((19204, 19262), 'torch.autograd.grad', 'torch.autograd.grad', (['gp_losses', 'full_mu'], {'retain_graph': '(True)'}), '(gp_losses, full_mu, retain_graph=True)\n', (19223, 19262), False, 'import torch\n'), ((19290, 19353), 'torch.autograd.grad', 'torch.autograd.grad', (['gp_losses', 'full_log_var'], {'retain_graph': '(True)'}), '(gp_losses, full_log_var, retain_graph=True)\n', (19309, 19353), False, 'import torch\n'), ((19967, 20039), 'torch.cat', 'torch.cat', (['(label[:, :id_covariate], label[:, id_covariate + 1:])'], {'dim': '(1)'}), '((label[:, :id_covariate], label[:, id_covariate + 1:]), dim=1)\n', (19976, 20039), False, 'import torch\n'), ((20214, 20235), 'torch.sum', 'torch.sum', (['recon_loss'], {}), '(recon_loss)\n', (20223, 20235), False, 'import torch\n'), ((20254, 20268), 'torch.sum', 'torch.sum', (['nll'], {}), '(nll)\n', (20263, 20268), False, 'import torch\n'), ((25123, 25148), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (25146, 25148), False, 'import torch\n'), ((26075, 26151), 'torch.cat', 'torch.cat', (['(train_x[:, :id_covariate], train_x[:, id_covariate + 1:])'], {'dim': '(1)'}), '((train_x[:, :id_covariate], train_x[:, id_covariate + 1:]), dim=1)\n', (26084, 26151), False, 'import torch\n'), ((26341, 26362), 'torch.sum', 'torch.sum', (['recon_loss'], {}), '(recon_loss)\n', (26350, 26362), False, 'import torch\n'), ((26386, 26400), 'torch.sum', 'torch.sum', (['nll'], {}), '(nll)\n', (26395, 26400), False, 'import torch\n'), ((32429, 32454), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (32452, 32454), False, 'import torch\n'), ((5268, 5465), 'elbo_functions.minibatch_KLD_upper_bound_iter', 'minibatch_KLD_upper_bound_iter', (['covar_module0', 'covar_module1', 'likelihoods', 'latent_dim', 'm', 'PSD_H', 'train_x', 'mu', 'log_var', 'zt_list', 'P', 'P_in_current_batch', 'N', 'natural_gradient', 'id_covariate', 'eps'], {}), '(covar_module0, covar_module1, likelihoods,\n latent_dim, m, PSD_H, train_x, mu, log_var, zt_list, P,\n P_in_current_batch, N, natural_gradient, id_covariate, eps)\n', (5298, 5465), False, 'from elbo_functions import deviance_upper_bound, elbo, KL_closed, minibatch_KLD_upper_bound, minibatch_KLD_upper_bound_iter\n'), ((5569, 5747), 'elbo_functions.minibatch_KLD_upper_bound', 'minibatch_KLD_upper_bound', (['covar_module0', 'covar_module1', 'likelihoods', 'latent_dim', 'm', 'PSD_H', 'train_x', 'mu', 'log_var', 'zt_list', 'P', 'P_in_current_batch', 'T', 'natural_gradient', 'eps'], {}), '(covar_module0, covar_module1, likelihoods,\n latent_dim, m, PSD_H, train_x, mu, log_var, zt_list, P,\n P_in_current_batch, T, natural_gradient, eps)\n', (5594, 5747), False, 'from elbo_functions import deviance_upper_bound, elbo, KL_closed, minibatch_KLD_upper_bound, minibatch_KLD_upper_bound_iter\n'), ((6207, 6224), 'torch.cholesky', 'torch.cholesky', (['H'], {}), '(H)\n', (6221, 6224), False, 'import torch\n'), ((6437, 6459), 'torch.cholesky', 'torch.cholesky', (['iH_new'], {}), '(iH_new)\n', (6451, 6459), False, 'import torch\n'), ((7517, 7532), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7530, 7532), False, 'import torch\n'), ((16975, 17067), 'torch.cat', 'torch.cat', (['(train_x[indices, :id_covariate], train_x[indices, id_covariate + 1:])'], {'dim': '(1)'}), '((train_x[indices, :id_covariate], train_x[indices, id_covariate +\n 1:]), dim=1)\n', (16984, 17067), False, 'import torch\n'), ((21454, 21668), 'validation.validate', 'validate', (['nnet_model', 'type_nnet', 'validation_dataset', 'type_KL', 'num_samples', 'latent_dim', 'covar_module0', 'covar_module1', 'likelihoods', 'zt_list', 'T', 'weight', 'full_mu', 'train_x', 'id_covariate', 'loss_function'], {'eps': '(1e-06)'}), '(nnet_model, type_nnet, validation_dataset, type_KL, num_samples,\n latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T,\n weight, full_mu, train_x, id_covariate, loss_function, eps=1e-06)\n', (21462, 21668), False, 'from validation import validate\n'), ((21679, 21704), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21702, 21704), False, 'import torch\n'), ((21844, 21921), 'torch.utils.data.DataLoader', 'DataLoader', (['prediction_dataset'], {'batch_size': '(1000)', 'shuffle': '(False)', 'num_workers': '(4)'}), '(prediction_dataset, batch_size=1000, shuffle=False, num_workers=4)\n', (21854, 21921), False, 'from torch.utils.data import DataLoader\n'), ((34145, 34164), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (34157, 34164), False, 'import torch\n'), ((34195, 34214), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (34207, 34214), False, 'import torch\n'), ((34249, 34268), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (34261, 34268), False, 'import torch\n'), ((37987, 38006), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (37999, 38006), False, 'import torch\n'), ((3475, 3525), 'utils.VaryingLengthSubjectSampler', 'VaryingLengthSubjectSampler', (['dataset', 'id_covariate'], {}), '(dataset, id_covariate)\n', (3502, 3525), False, 'from utils import SubjectSampler, VaryingLengthSubjectSampler, VaryingLengthBatchSampler, HensmanDataLoader\n'), ((3751, 3780), 'utils.SubjectSampler', 'SubjectSampler', (['dataset', 'P', 'T'], {}), '(dataset, P, T)\n', (3765, 3780), False, 'from utils import SubjectSampler, VaryingLengthSubjectSampler, VaryingLengthBatchSampler, HensmanDataLoader\n'), ((8469, 8688), 'validation.validate', 'validate', (['nnet_model', 'type_nnet', 'validation_dataset', 'type_KL', 'num_samples', 'latent_dim', 'covar_module0', 'covar_module1', 'likelihoods', 'zt_list', 'T', 'weight', 'full_mu', 'prediction_x', 'id_covariate', 'loss_function'], {'eps': '(1e-06)'}), '(nnet_model, type_nnet, validation_dataset, type_KL, num_samples,\n latent_dim, covar_module0, covar_module1, likelihoods, zt_list, T,\n weight, full_mu, prediction_x, id_covariate, loss_function, eps=1e-06)\n', (8477, 8688), False, 'from validation import validate\n'), ((13657, 13682), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13680, 13682), False, 'import torch\n'), ((21726, 21750), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (21748, 21750), False, 'import torch\n'), ((22151, 22166), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22164, 22166), False, 'import torch\n'), ((22828, 23035), 'predict_HealthMNIST.recon_complete_gen', 'recon_complete_gen', (['generation_dataset', 'nnet_model', 'type_nnet', 'results_path', 'covar_module0', 'covar_module1', 'likelihoods', 'latent_dim', '"""./data"""', 'prediction_x', 'full_mu', 'epoch', 'zt_list', 'P', 'T', 'id_covariate'], {}), "(generation_dataset, nnet_model, type_nnet, results_path,\n covar_module0, covar_module1, likelihoods, latent_dim, './data',\n prediction_x, full_mu, epoch, zt_list, P, T, id_covariate)\n", (22846, 23035), False, 'from predict_HealthMNIST import recon_complete_gen, gen_rotated_mnist_plot, variational_complete_gen\n'), ((26428, 26447), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (26440, 26447), False, 'import torch\n'), ((26482, 26501), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (26494, 26501), False, 'import torch\n'), ((26540, 26559), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (26552, 26559), False, 'import torch\n'), ((31860, 31885), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (31883, 31885), False, 'import torch\n'), ((5177, 5215), 'torch.unique', 'torch.unique', (['train_x[:, id_covariate]'], {}), '(train_x[:, id_covariate])\n', (5189, 5215), False, 'import torch\n'), ((8225, 8330), 'torch.cat', 'torch.cat', (['(prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate +\n 1:])'], {'dim': '(1)'}), '((prediction_x[label_id, :id_covariate], prediction_x[label_id, \n id_covariate + 1:]), dim=1)\n', (8234, 8330), False, 'import torch\n'), ((13708, 13732), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13730, 13732), False, 'import torch\n'), ((22597, 22702), 'torch.cat', 'torch.cat', (['(prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate +\n 1:])'], {'dim': '(1)'}), '((prediction_x[label_id, :id_covariate], prediction_x[label_id, \n id_covariate + 1:]), dim=1)\n', (22606, 22702), False, 'import torch\n'), ((26797, 26816), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (26809, 26816), False, 'import torch\n'), ((27094, 27181), 'elbo_functions.KL_closed', 'KL_closed', (['covar_module[i]', 'train_x', 'likelihoods[i]', 'data', 'mu[:, i]', 'log_var[:, i]'], {}), '(covar_module[i], train_x, likelihoods[i], data, mu[:, i], log_var\n [:, i])\n', (27103, 27181), False, 'from elbo_functions import deviance_upper_bound, elbo, KL_closed, minibatch_KLD_upper_bound, minibatch_KLD_upper_bound_iter\n'), ((31911, 31935), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (31933, 31935), False, 'import torch\n'), ((6267, 6309), 'torch.eye', 'torch.eye', (['H.shape[-1]'], {'dtype': 'torch.double'}), '(H.shape[-1], dtype=torch.double)\n', (6276, 6309), False, 'import torch\n'), ((9398, 9413), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9411, 9413), False, 'import torch\n'), ((31453, 31489), 'torch.tensor', 'torch.tensor', (['[1]'], {'dtype': 'torch.float'}), '([1], dtype=torch.float)\n', (31465, 31489), False, 'import torch\n'), ((6610, 6629), 'torch.matmul', 'torch.matmul', (['iH', 'm'], {}), '(iH, m)\n', (6622, 6629), False, 'import torch\n'), ((9804, 9909), 'torch.cat', 'torch.cat', (['(prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate +\n 1:])'], {'dim': '(1)'}), '((prediction_x[label_id, :id_covariate], prediction_x[label_id, \n id_covariate + 1:]), dim=1)\n', (9813, 9909), False, 'import torch\n'), ((10320, 10614), 'model_test.MSE_test_GPapprox', 'MSE_test_GPapprox', (['csv_file_test_data', 'csv_file_test_label', 'test_mask_file', 'data_source_path', 'type_nnet', 'nnet_model', 'covar_module0', 'covar_module1', 'likelihoods', 'results_path', 'latent_dim', 'prediction_x', 'full_mu', 'zt_list', 'P', 'T', 'id_covariate', 'varying_T'], {'save_file': '"""result_error_best.csv"""'}), "(csv_file_test_data, csv_file_test_label, test_mask_file,\n data_source_path, type_nnet, nnet_model, covar_module0, covar_module1,\n likelihoods, results_path, latent_dim, prediction_x, full_mu, zt_list,\n P, T, id_covariate, varying_T, save_file='result_error_best.csv')\n", (10337, 10614), False, 'from model_test import MSE_test_GPapprox, MSE_test\n'), ((11000, 11049), 'os.path.join', 'os.path.join', (['results_path', '"""nnet_model_best.pth"""'], {}), "(results_path, 'nnet_model_best.pth')\n", (11012, 11049), False, 'import os\n'), ((11113, 11160), 'os.path.join', 'os.path.join', (['results_path', '"""gp_model_best.pth"""'], {}), "(results_path, 'gp_model_best.pth')\n", (11125, 11160), False, 'import os\n'), ((11210, 11256), 'os.path.join', 'os.path.join', (['results_path', '"""zt_list_best.pth"""'], {}), "(results_path, 'zt_list_best.pth')\n", (11222, 11256), False, 'import os\n'), ((11300, 11340), 'os.path.join', 'os.path.join', (['results_path', '"""m_best.pth"""'], {}), "(results_path, 'm_best.pth')\n", (11312, 11340), False, 'import os\n'), ((11384, 11424), 'os.path.join', 'os.path.join', (['results_path', '"""H_best.pth"""'], {}), "(results_path, 'H_best.pth')\n", (11396, 11424), False, 'import os\n'), ((13044, 13262), 'predict_HealthMNIST.recon_complete_gen', 'recon_complete_gen', (['generation_dataset', 'nnet_model', 'type_nnet', 'results_path', 'covar_module0', 'covar_module1', 'likelihoods', 'latent_dim', '"""./data"""', 'prediction_x', 'full_mu', 'epoch', 'zt_list', 'P', 'T', 'id_covariate', 'varying_T'], {}), "(generation_dataset, nnet_model, type_nnet, results_path,\n covar_module0, covar_module1, likelihoods, latent_dim, './data',\n prediction_x, full_mu, epoch, zt_list, P, T, id_covariate, varying_T)\n", (13062, 13262), False, 'from predict_HealthMNIST import recon_complete_gen, gen_rotated_mnist_plot, variational_complete_gen\n'), ((6501, 6543), 'torch.eye', 'torch.eye', (['H.shape[-1]'], {'dtype': 'torch.double'}), '(H.shape[-1], dtype=torch.double)\n', (6510, 6543), False, 'import torch\n'), ((8986, 9047), 'utils.VaryingLengthSubjectSampler', 'VaryingLengthSubjectSampler', (['prediction_dataset', 'id_covariate'], {}), '(prediction_dataset, id_covariate)\n', (9013, 9047), False, 'from utils import SubjectSampler, VaryingLengthSubjectSampler, VaryingLengthBatchSampler, HensmanDataLoader\n'), ((12716, 12821), 'torch.cat', 'torch.cat', (['(prediction_x[label_id, :id_covariate], prediction_x[label_id, id_covariate +\n 1:])'], {'dim': '(1)'}), '((prediction_x[label_id, :id_covariate], prediction_x[label_id, \n id_covariate + 1:]), dim=1)\n', (12725, 12821), False, 'import torch\n'), ((33414, 33453), 'os.path.join', 'os.path.join', (['gp_model_folder', '"""mu.pth"""'], {}), "(gp_model_folder, 'mu.pth')\n", (33426, 33453), False, 'import os\n'), ((33560, 33603), 'os.path.join', 'os.path.join', (['gp_model_foder', '"""log_var.pth"""'], {}), "(gp_model_foder, 'log_var.pth')\n", (33572, 33603), False, 'import os\n'), ((36852, 36896), 'os.path.join', 'os.path.join', (['gp_model_folder', '"""mu_pred.pth"""'], {}), "(gp_model_folder, 'mu_pred.pth')\n", (36864, 36896), False, 'import os\n'), ((37008, 37057), 'os.path.join', 'os.path.join', (['gp_model_folder', '"""log_var_pred.pth"""'], {}), "(gp_model_folder, 'log_var_pred.pth')\n", (37020, 37057), False, 'import os\n'), ((33468, 33488), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (33480, 33488), False, 'import torch\n'), ((33618, 33638), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (33630, 33638), False, 'import torch\n'), ((36911, 36931), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (36923, 36931), False, 'import torch\n'), ((37072, 37092), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (37084, 37092), False, 'import torch\n'), ((6664, 6687), 'torch.matmul', 'torch.matmul', (['grad_H', 'm'], {}), '(grad_H, m)\n', (6676, 6687), False, 'import torch\n'), ((11761, 11822), 'utils.VaryingLengthSubjectSampler', 'VaryingLengthSubjectSampler', (['prediction_dataset', 'id_covariate'], {}), '(prediction_dataset, id_covariate)\n', (11788, 11822), False, 'from utils import SubjectSampler, VaryingLengthSubjectSampler, VaryingLengthBatchSampler, HensmanDataLoader\n')]
|
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution('foundations_rest_api').version
except DistributionNotFound:
__version__ = None
|
[
"pkg_resources.get_distribution"
] |
[((88, 128), 'pkg_resources.get_distribution', 'get_distribution', (['"""foundations_rest_api"""'], {}), "('foundations_rest_api')\n", (104, 128), False, 'from pkg_resources import get_distribution, DistributionNotFound\n')]
|
# Copyright 2019 Rackspace, US Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
import uuid
from openstack.load_balancer.v2 import amphora
IDENTIFIER = uuid.uuid4()
LB_ID = uuid.uuid4()
LISTENER_ID = uuid.uuid4()
COMPUTE_ID = uuid.uuid4()
VRRP_PORT_ID = uuid.uuid4()
HA_PORT_ID = uuid.uuid4()
IMAGE_ID = uuid.uuid4()
COMPUTE_FLAVOR = uuid.uuid4()
AMPHORA_ID = uuid.uuid4()
EXAMPLE = {
'id': IDENTIFIER,
'loadbalancer_id': LB_ID,
'compute_id': COMPUTE_ID,
'lb_network_ip': '192.168.1.2',
'vrrp_ip': '192.168.1.5',
'ha_ip': '192.168.1.10',
'vrrp_port_id': VRRP_PORT_ID,
'ha_port_id': HA_PORT_ID,
'cert_expiration': '2019-09-19 00:34:51',
'cert_busy': 0,
'role': 'MASTER',
'status': 'ALLOCATED',
'vrrp_interface': 'eth1',
'vrrp_id': 1,
'vrrp_priority': 100,
'cached_zone': 'zone1',
'created_at': '2017-05-10T18:14:44',
'updated_at': '2017-05-10T23:08:12',
'image_id': IMAGE_ID,
'compute_flavor': COMPUTE_FLAVOR
}
class TestAmphora(base.TestCase):
def test_basic(self):
test_amphora = amphora.Amphora()
self.assertEqual('amphora', test_amphora.resource_key)
self.assertEqual('amphorae', test_amphora.resources_key)
self.assertEqual('/octavia/amphorae', test_amphora.base_path)
self.assertFalse(test_amphora.allow_create)
self.assertTrue(test_amphora.allow_fetch)
self.assertFalse(test_amphora.allow_commit)
self.assertFalse(test_amphora.allow_delete)
self.assertTrue(test_amphora.allow_list)
def test_make_it(self):
test_amphora = amphora.Amphora(**EXAMPLE)
self.assertEqual(IDENTIFIER, test_amphora.id)
self.assertEqual(LB_ID, test_amphora.loadbalancer_id)
self.assertEqual(COMPUTE_ID, test_amphora.compute_id)
self.assertEqual(EXAMPLE['lb_network_ip'], test_amphora.lb_network_ip)
self.assertEqual(EXAMPLE['vrrp_ip'], test_amphora.vrrp_ip)
self.assertEqual(EXAMPLE['ha_ip'], test_amphora.ha_ip)
self.assertEqual(VRRP_PORT_ID, test_amphora.vrrp_port_id)
self.assertEqual(HA_PORT_ID, test_amphora.ha_port_id)
self.assertEqual(EXAMPLE['cert_expiration'],
test_amphora.cert_expiration)
self.assertEqual(EXAMPLE['cert_busy'], test_amphora.cert_busy)
self.assertEqual(EXAMPLE['role'], test_amphora.role)
self.assertEqual(EXAMPLE['status'], test_amphora.status)
self.assertEqual(EXAMPLE['vrrp_interface'],
test_amphora.vrrp_interface)
self.assertEqual(EXAMPLE['vrrp_id'], test_amphora.vrrp_id)
self.assertEqual(EXAMPLE['vrrp_priority'], test_amphora.vrrp_priority)
self.assertEqual(EXAMPLE['cached_zone'], test_amphora.cached_zone)
self.assertEqual(EXAMPLE['created_at'], test_amphora.created_at)
self.assertEqual(EXAMPLE['updated_at'], test_amphora.updated_at)
self.assertEqual(IMAGE_ID, test_amphora.image_id)
self.assertEqual(COMPUTE_FLAVOR, test_amphora.compute_flavor)
self.assertDictEqual(
{'limit': 'limit',
'marker': 'marker',
'id': 'id',
'loadbalancer_id': 'loadbalancer_id',
'compute_id': 'compute_id',
'lb_network_ip': 'lb_network_ip',
'vrrp_ip': 'vrrp_ip',
'ha_ip': 'ha_ip',
'vrrp_port_id': 'vrrp_port_id',
'ha_port_id': 'ha_port_id',
'cert_expiration': 'cert_expiration',
'cert_busy': 'cert_busy',
'role': 'role',
'status': 'status',
'vrrp_interface': 'vrrp_interface',
'vrrp_id': 'vrrp_id',
'vrrp_priority': 'vrrp_priority',
'cached_zone': 'cached_zone',
'created_at': 'created_at',
'updated_at': 'updated_at',
'image_id': 'image_id',
'image_id': 'image_id'
},
test_amphora._query_mapping._mapping)
class TestAmphoraConfig(base.TestCase):
def test_basic(self):
test_amp_config = amphora.AmphoraConfig()
self.assertEqual('/octavia/amphorae/%(amphora_id)s/config',
test_amp_config.base_path)
self.assertFalse(test_amp_config.allow_create)
self.assertFalse(test_amp_config.allow_fetch)
self.assertTrue(test_amp_config.allow_commit)
self.assertFalse(test_amp_config.allow_delete)
self.assertFalse(test_amp_config.allow_list)
class TestAmphoraFailover(base.TestCase):
def test_basic(self):
test_amp_failover = amphora.AmphoraFailover()
self.assertEqual('/octavia/amphorae/%(amphora_id)s/failover',
test_amp_failover.base_path)
self.assertFalse(test_amp_failover.allow_create)
self.assertFalse(test_amp_failover.allow_fetch)
self.assertTrue(test_amp_failover.allow_commit)
self.assertFalse(test_amp_failover.allow_delete)
self.assertFalse(test_amp_failover.allow_list)
|
[
"openstack.load_balancer.v2.amphora.Amphora",
"openstack.load_balancer.v2.amphora.AmphoraFailover",
"openstack.load_balancer.v2.amphora.AmphoraConfig",
"uuid.uuid4"
] |
[((696, 708), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (706, 708), False, 'import uuid\n'), ((717, 729), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (727, 729), False, 'import uuid\n'), ((744, 756), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (754, 756), False, 'import uuid\n'), ((770, 782), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (780, 782), False, 'import uuid\n'), ((798, 810), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (808, 810), False, 'import uuid\n'), ((824, 836), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (834, 836), False, 'import uuid\n'), ((848, 860), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (858, 860), False, 'import uuid\n'), ((878, 890), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (888, 890), False, 'import uuid\n'), ((904, 916), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (914, 916), False, 'import uuid\n'), ((1621, 1638), 'openstack.load_balancer.v2.amphora.Amphora', 'amphora.Amphora', ([], {}), '()\n', (1636, 1638), False, 'from openstack.load_balancer.v2 import amphora\n'), ((2144, 2170), 'openstack.load_balancer.v2.amphora.Amphora', 'amphora.Amphora', ([], {}), '(**EXAMPLE)\n', (2159, 2170), False, 'from openstack.load_balancer.v2 import amphora\n'), ((4645, 4668), 'openstack.load_balancer.v2.amphora.AmphoraConfig', 'amphora.AmphoraConfig', ([], {}), '()\n', (4666, 4668), False, 'from openstack.load_balancer.v2 import amphora\n'), ((5159, 5184), 'openstack.load_balancer.v2.amphora.AmphoraFailover', 'amphora.AmphoraFailover', ([], {}), '()\n', (5182, 5184), False, 'from openstack.load_balancer.v2 import amphora\n')]
|
# coding: utf-8
from lxml import html
import requests
import pandas as pd
import config
# DATABASE THINGS
# this one is used by the database
import pymongo as pm
# connect to the database choosing the correct collection
mongo_db_url = config.mongo_db_url
mongo_client = pm.MongoClient(mongo_db_url)
db = mongo_client[config.database_name] # the selected database on your mongo server
collection = db[config.collection_name] # the collection to which to write the data
baseURL = 'https://coinmarketcap.com/historical/';
snapshotDates = ['20130505', '20130512', '20130519', '20130526', '20130602', '20130609', '20130616', '20130623', '20130630', '20130707', '20130714', '20130721', '20130728', '20130804', '20130811',
'20130818', '20130825', '20130901', '20130908', '20130915', '20130922', '20130929', '20131006', '20131013', '20131020', '20131027', '20131103', '20131110', '20131117', '20131124', '20131201',
'20131208', '20131215', '20131222', '20131229', '20140105', '20140112', '20140119', '20140126', '20140202', '20140209', '20140216', '20140223', '20140302', '20140309', '20140316', '20140323',
'20140330', '20140406', '20140413', '20140420', '20140427', '20140504', '20140511', '20140518', '20140525', '20140601', '20140608', '20140615', '20140622', '20140629', '20140706', '20140713',
'20140720', '20140727', '20140803', '20140810', '20140817', '20140824', '20140831', '20140907', '20140914', '20140921', '20140928', '20141005', '20141012', '20141019', '20141026', '20141102',
'20141109', '20141116', '20141123', '20141130', '20141207', '20141214', '20141221', '20141228', '20150104', '20150111', '20150118', '20150125', '20150201', '20150208', '20150215', '20150222',
'20150301', '20150308', '20150315', '20150322', '20150329', '20150405', '20150412', '20150419', '20150426', '20150503', '20150510', '20150517', '20150524', '20150531', '20150607', '20150614',
'20150621', '20150628', '20150705', '20150712', '20150719', '20150726', '20150802', '20150809', '20150816', '20150823', '20150830', '20150906', '20150913', '20150920', '20150927', '20151004',
'20151011', '20151018', '20151025', '20151101', '20151108', '20151115', '20151122', '20151129', '20151206', '20151213', '20151220', '20151227', '20160103', '20160110', '20160117', '20160124',
'20160131', '20160207', '20160214', '20160221', '20160228', '20160306', '20160313', '20160320', '20160327', '20160403', '20160410', '20160417', '20160424', '20160501', '20160508', '20160515',
'20160522', '20160529', '20160605', '20160612', '20160619', '20160626', '20160703', '20160710', '20160717', '20160724', '20160731', '20160807', '20160814', '20160821', '20160828', '20160904',
'20160911', '20160918', '20160925', '20161002', '20161009', '20161016', '20161023', '20161030', '20161106', '20161113', '20161120', '20161127', '20161204', '20161211', '20161218', '20161225',
'20170101', '20170108', '20170115', '20170122', '20170129', '20170205', '20170212', '20170219', '20170226', '20170305', '20170312', '20170319', '20170326', '20170402', '20170409', '20170416',
'20170423', '20170430', '20170507', '20170514', '20170521', '20170528', '20170604', '20170611', '20170618', '20170625', '20170702', '20170709', '20170716', '20170723', '20170730', '20170806',
'20170813', '20170820', '20170827', '20170903', '20170910', '20170917', '20170924', '20171001', '20171008', '20171015', '20171022', '20171029', '20171105', '20171112', '20171119', '20171126',
'20171203', '20171210', '20171217', '20171224', '20171231', '20180107', '20180114', '20180121', '20180128', '20180204', '20180211', '20180218', '20180225', '20180304', '20180311', '20180318',
'20180325', '20180401', '20180408', '20180415', '20180422', '20180429', '20180506', '20180513', '20180520', '20180527', '20180603', '20180610', '20180617', '20180624', '20180701', '20180708',
'20180715', '20180722', '20180729', '20180805', '20180812', '20180819', '20180826', '20180902', '20180909', '20180916', '20180923', '20180930', '20181007']
start_amount = len(snapshotDates)
def maybe_float( text ):
try:
if isinstance(text, pd.Series):
return float(text.tolist()[0])
return float(text)
except (ValueError, IndexError):
return 0
def parse_snapshot( date ):
fullURL = baseURL + date + '/';
if config.DEBUG:
print("starting URL parsing snapshot for: " + date);
resp = requests.get(fullURL)
h = html.fromstring(resp.content)
names = h.xpath('//a[@class="currency-name-container link-secondary"]/text()')
symbols = h.xpath('//td[@class="text-left col-symbol"]/text()')
symbols = [replaceSymbolCharacters(symbol) for symbol in symbols];
market_caps = [maybe_float(row) for row in h.xpath('//td[@class="no-wrap market-cap text-right"]/@data-usd')]
oneday_volumes = [maybe_float(row) for row in h.xpath('//a[@class="volume"]/@data-usd')]
prices_usd = [maybe_float(row) for row in h.xpath('//a[@class="price"]/@data-usd')]
prices_btc = [maybe_float(row) for row in h.xpath('//a[@class="price"]/@data-btc')]
formattedForReturn = {};
for x in range(0, len(symbols)):
formattedForReturn[symbols[x]] = {'name': names[x], 'symbol': symbols[x], 'market_cap': market_caps[x], 'oneday_volume': oneday_volumes[x], 'price_usd': prices_usd[x],
'price_btc': prices_btc[x]};
if config.DEBUG:
print("Finished parsing " + date);
return formattedForReturn
def write_snapshotresults_to_database( datesAndData ):
result = collection.insert_many(datesAndData)
#print("wrote " + str(len(datesAndData)) + " to db!");
result.inserted_ids
def replaceSymbolCharacters( stringToFix ):
symbols_that_does_not_work_in_mongo_and_their_replacements = {'$': 'SSS'};
for symbol, replacement in symbols_that_does_not_work_in_mongo_and_their_replacements.items():
# print("want to replace" + symbol + " in string " + stringToFix + " with " + replacement);
stringToFix = stringToFix.replace(symbol, replacement);
return stringToFix;
def parse_and_save_data( snapshotDatesToParse ):
while len(snapshotDatesToParse) > 0:
# first parse
parsedSnapshots = [];
limit = 2;
counter = 0;
while counter < limit and len(snapshotDatesToParse) > 0:
snapshotDate = snapshotDatesToParse.pop();
entry = {};
entry['date'] = snapshotDate;
entry['marketData'] = parse_snapshot(snapshotDate);
parsedSnapshots.append(entry);
# print(parsedSnapshots);
counter += 1;
# then save
write_snapshotresults_to_database(parsedSnapshots)
progress_number = float(start_amount - len(snapshotDatesToParse)) / float( start_amount) * 100
progress_string = "{:.1f}".format(progress_number) + "%"
print("wrote to database, progress: " + progress_string)
parse_and_save_data(snapshotDates); # write_snapshotresults_to_database(allRecordedSnapshots);
|
[
"pymongo.MongoClient",
"requests.get",
"lxml.html.fromstring"
] |
[((273, 301), 'pymongo.MongoClient', 'pm.MongoClient', (['mongo_db_url'], {}), '(mongo_db_url)\n', (287, 301), True, 'import pymongo as pm\n'), ((4412, 4433), 'requests.get', 'requests.get', (['fullURL'], {}), '(fullURL)\n', (4424, 4433), False, 'import requests\n'), ((4442, 4471), 'lxml.html.fromstring', 'html.fromstring', (['resp.content'], {}), '(resp.content)\n', (4457, 4471), False, 'from lxml import html\n')]
|
from urllib import parse
from typing import Optional
from logging import Logger
from lxml import etree
from aiohttp import ClientSession
from tenacity import retry
from tenacity import retry_if_exception_type, stop_after_attempt, wait_fixed
from infra.excepts.types import ReqSysAbnoramlError
from infra.asynchttp.resp import SyncHttpResponse
from settings.config import Config
class RetryableRequester(object):
def __init__(self, logger: Logger, abnormal_url: str) -> None:
self._logger = logger
self._abnormal_url = abnormal_url
@retry(stop=stop_after_attempt(3), wait=wait_fixed(5), retry=retry_if_exception_type(ReqSysAbnoramlError))
async def get(self,
url: str,
params: dict,
headers: Optional[dict] = None,
cookies: Optional[dict] = None) -> SyncHttpResponse:
try:
encoded_params = parse.urlencode(params)
async with ClientSession() as session:
async with session.get(url, params=encoded_params, headers=headers, cookies=cookies) as resp:
sync_resp = SyncHttpResponse(await resp.read(),
await resp.text(),
resp.status,
resp.headers,
resp.cookies,
resp.url.human_repr())
self._logger.debug(f"Response Cookies: {sync_resp.cookies}")
await self._check_does_normal_resp(sync_resp)
return sync_resp
except ReqSysAbnoramlError as rse:
self._logger.warning(f" [ Warning ] 請求網址的回應異常 ! ")
self._logger.warning(f" 請求網址 : {url} | params: {params} | headers: {headers} | cookies: {cookies}")
self._logger.warning(f" 回應網址 : {rse.url} | 頁面狀態碼: {rse.http_code}\n" + rse.content)
raise rse
async def _check_does_normal_resp(self, resp: SyncHttpResponse) -> bool:
if resp.url == self._abnormal_url:
lxmltree = etree.HTML(resp.raw_content)
content = etree.tostring(lxmltree, method='html', pretty_print=True).decode('utf-8')
raise ReqSysAbnoramlError(resp.status_code, "解析旅館資料異常!皆為 None", resp.url, content)
return True
|
[
"infra.excepts.types.ReqSysAbnoramlError",
"tenacity.stop_after_attempt",
"tenacity.retry_if_exception_type",
"urllib.parse.urlencode",
"aiohttp.ClientSession",
"tenacity.wait_fixed",
"lxml.etree.tostring",
"lxml.etree.HTML"
] |
[((912, 935), 'urllib.parse.urlencode', 'parse.urlencode', (['params'], {}), '(params)\n', (927, 935), False, 'from urllib import parse\n'), ((570, 591), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(3)'], {}), '(3)\n', (588, 591), False, 'from tenacity import retry_if_exception_type, stop_after_attempt, wait_fixed\n'), ((598, 611), 'tenacity.wait_fixed', 'wait_fixed', (['(5)'], {}), '(5)\n', (608, 611), False, 'from tenacity import retry_if_exception_type, stop_after_attempt, wait_fixed\n'), ((619, 663), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['ReqSysAbnoramlError'], {}), '(ReqSysAbnoramlError)\n', (642, 663), False, 'from tenacity import retry_if_exception_type, stop_after_attempt, wait_fixed\n'), ((2152, 2180), 'lxml.etree.HTML', 'etree.HTML', (['resp.raw_content'], {}), '(resp.raw_content)\n', (2162, 2180), False, 'from lxml import etree\n'), ((2296, 2372), 'infra.excepts.types.ReqSysAbnoramlError', 'ReqSysAbnoramlError', (['resp.status_code', '"""解析旅館資料異常!皆為 None"""', 'resp.url', 'content'], {}), "(resp.status_code, '解析旅館資料異常!皆為 None', resp.url, content)\n", (2315, 2372), False, 'from infra.excepts.types import ReqSysAbnoramlError\n'), ((959, 974), 'aiohttp.ClientSession', 'ClientSession', ([], {}), '()\n', (972, 974), False, 'from aiohttp import ClientSession\n'), ((2203, 2261), 'lxml.etree.tostring', 'etree.tostring', (['lxmltree'], {'method': '"""html"""', 'pretty_print': '(True)'}), "(lxmltree, method='html', pretty_print=True)\n", (2217, 2261), False, 'from lxml import etree\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import pytest
import os
from os.path import relpath
from brainvisa.installer.bvi_utils.paths import Paths
FULLPATH = os.path.dirname(os.path.abspath(__file__))
ROOTPATH = "/" + relpath(FULLPATH + "/../.", "/")
def test_bvi_utils_Paths():
assert Paths.BV == ROOTPATH
assert Paths.BV_PYTHON == ROOTPATH + '/python'
assert Paths.BV_SHARE == ROOTPATH + '/share'
assert Paths.BVI_SHARE == ROOTPATH + '/share/brainvisa/installer'
assert Paths.BVI_SHARE_XML == ROOTPATH + '/share/brainvisa/installer/xml'
assert Paths.BVI_SHARE_IMAGES == ROOTPATH + '/share/brainvisa/installer/images'
assert Paths.BVI_SHARE_LICENSES == ROOTPATH + \
'/share/brainvisa/installer/licenses'
assert Paths.BVI_CONFIGURATION == ROOTPATH + \
'/share/brainvisa/installer/xml/configuration.xml'
@pytest.mark.win32
def test_bvi_utils_Paths_Binary_win():
assert Paths.BV_ENV == 'bv_env.exe'
assert Paths.BV_PACKAGING == 'bv_packaging'
assert Paths.IFW_BINARYCREATOR == 'binarycreator.exe'
assert Paths.IFW_REPOGEN == 'repogen.exe'
assert Paths.IFW_ARCHIVEGEN == 'archivegen.exe'
@pytest.mark.linux
def test_bvi_utils_Paths_Binary_linux():
assert Paths.BV_ENV == 'bv_env'
assert Paths.BV_PACKAGING == 'bv_packaging'
assert Paths.IFW_BINARYCREATOR == 'binarycreator'
assert Paths.IFW_REPOGEN == 'repogen'
assert Paths.IFW_ARCHIVEGEN == 'archivegen'
@pytest.mark.osx
def test_bvi_utils_Paths_Binary_osx():
assert Paths.BV_ENV == 'bv_env'
assert Paths.BV_PACKAGING == 'bv_packaging'
assert Paths.IFW_BINARYCREATOR == 'binarycreator'
assert Paths.IFW_REPOGEN == 'repogen'
assert Paths.IFW_ARCHIVEGEN == 'archivegen'
|
[
"os.path.abspath",
"os.path.relpath"
] |
[((220, 245), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (235, 245), False, 'import os\n'), ((264, 296), 'os.path.relpath', 'relpath', (["(FULLPATH + '/../.')", '"""/"""'], {}), "(FULLPATH + '/../.', '/')\n", (271, 296), False, 'from os.path import relpath\n')]
|
"""Handles the installation of downloaded modules."""
import os
import shutil
import tempfile
import zipfile
from _cloud import utils
def install(zipfile, metadata):
"""Install a module once it has been downloaded locally.
Takes the GitHub repo zipped up in a BytesIO, as well as all the metadata
about the package.
"""
# Initial extraction (to a temporary directory)
zipfile = zipfile.ZipFile(zipfile)
extract_to = tempfile.gettempdir()
zipfile.extractall(extract_to)
# Moving of the main module to a site-packages dir
extracted = os.path.join(extract_to, zipfile.namelist()[0])
source = os.path.join(extracted, metadata["entry_point"])
destination = os.path.join(
utils.pick_site_dir(metadata["py_versions"]),
os.path.basename(metadata["entry_point"])
)
shutil.move(source, destination)
|
[
"zipfile.extractall",
"zipfile.ZipFile",
"_cloud.utils.pick_site_dir",
"os.path.basename",
"tempfile.gettempdir",
"zipfile.namelist",
"shutil.move",
"os.path.join"
] |
[((406, 430), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zipfile'], {}), '(zipfile)\n', (421, 430), False, 'import zipfile\n'), ((448, 469), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (467, 469), False, 'import tempfile\n'), ((474, 504), 'zipfile.extractall', 'zipfile.extractall', (['extract_to'], {}), '(extract_to)\n', (492, 504), False, 'import zipfile\n'), ((637, 685), 'os.path.join', 'os.path.join', (['extracted', "metadata['entry_point']"], {}), "(extracted, metadata['entry_point'])\n", (649, 685), False, 'import os\n'), ((832, 864), 'shutil.move', 'shutil.move', (['source', 'destination'], {}), '(source, destination)\n', (843, 864), False, 'import shutil\n'), ((726, 770), '_cloud.utils.pick_site_dir', 'utils.pick_site_dir', (["metadata['py_versions']"], {}), "(metadata['py_versions'])\n", (745, 770), False, 'from _cloud import utils\n'), ((780, 821), 'os.path.basename', 'os.path.basename', (["metadata['entry_point']"], {}), "(metadata['entry_point'])\n", (796, 821), False, 'import os\n'), ((601, 619), 'zipfile.namelist', 'zipfile.namelist', ([], {}), '()\n', (617, 619), False, 'import zipfile\n')]
|
from __future__ import division
from __future__ import print_function
import sys
sys.path.append('..')
from joystick import JoyStick
from servo import ServoController
if __name__ == '__main__':
sc = ServoController(angle_range=80)
js = JoyStick()
while True:
# read serial
v = js.read()
# convert to servo angle
angle = sc.analog2deg(v)
print(angle)
for i in range(len(angle)):
sc.control([i, angle[i]])
|
[
"sys.path.append",
"servo.ServoController",
"joystick.JoyStick"
] |
[((82, 103), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (97, 103), False, 'import sys\n'), ((205, 236), 'servo.ServoController', 'ServoController', ([], {'angle_range': '(80)'}), '(angle_range=80)\n', (220, 236), False, 'from servo import ServoController\n'), ((246, 256), 'joystick.JoyStick', 'JoyStick', ([], {}), '()\n', (254, 256), False, 'from joystick import JoyStick\n')]
|
# Unix SMB/CIFS implementation.
# utility functions for provisioning a Samba4 server
# Copyright (C) <NAME> <<EMAIL>> 2007-2010
# Copyright (C) <NAME> <<EMAIL>> 2008-2009
# Copyright (C) <NAME> <<EMAIL>> 2008-2009
#
# Based on the original in EJS:
# Copyright (C) <NAME> <<EMAIL>> 2005
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Functions for setting up a Samba configuration."""
__docformat__ = "restructuredText"
import os
from samba import read_and_sub_file
from samba.param import setup_dir
FILL_FULL = "FULL"
FILL_SUBDOMAIN = "SUBDOMAIN"
FILL_NT4SYNC = "NT4SYNC"
FILL_DRS = "DRS"
def setup_path(file):
"""Return an absolute path to the provision tempate file specified by file"""
return os.path.join(setup_dir(), file)
def setup_add_ldif(ldb, ldif_path, subst_vars=None,controls=["relax:0"]):
"""Setup a ldb in the private dir.
:param ldb: LDB file to import data into
:param ldif_path: Path of the LDIF file to load
:param subst_vars: Optional variables to subsitute in LDIF.
:param nocontrols: Optional list of controls, can be None for no controls
"""
assert isinstance(ldif_path, str)
data = read_and_sub_file(ldif_path, subst_vars)
ldb.add_ldif(data, controls)
def setup_modify_ldif(ldb, ldif_path, subst_vars=None,controls=["relax:0"]):
"""Modify a ldb in the private dir.
:param ldb: LDB object.
:param ldif_path: LDIF file path.
:param subst_vars: Optional dictionary with substitution variables.
"""
data = read_and_sub_file(ldif_path, subst_vars)
ldb.modify_ldif(data, controls)
def setup_ldb(ldb, ldif_path, subst_vars):
"""Import a LDIF a file into a LDB handle, optionally substituting
variables.
:note: Either all LDIF data will be added or none (using transactions).
:param ldb: LDB file to import into.
:param ldif_path: Path to the LDIF file.
:param subst_vars: Dictionary with substitution variables.
"""
assert ldb is not None
ldb.transaction_start()
try:
setup_add_ldif(ldb, ldif_path, subst_vars)
except:
ldb.transaction_cancel()
raise
else:
ldb.transaction_commit()
|
[
"samba.read_and_sub_file",
"samba.param.setup_dir"
] |
[((1750, 1790), 'samba.read_and_sub_file', 'read_and_sub_file', (['ldif_path', 'subst_vars'], {}), '(ldif_path, subst_vars)\n', (1767, 1790), False, 'from samba import read_and_sub_file\n'), ((2101, 2141), 'samba.read_and_sub_file', 'read_and_sub_file', (['ldif_path', 'subst_vars'], {}), '(ldif_path, subst_vars)\n', (2118, 2141), False, 'from samba import read_and_sub_file\n'), ((1319, 1330), 'samba.param.setup_dir', 'setup_dir', ([], {}), '()\n', (1328, 1330), False, 'from samba.param import setup_dir\n')]
|
#!/bin/env python3
#
# Schooner - Simple Course Management System
# admin.insert.py / Add script owner as an Admin
# University of Turku / Faculty of Technology / Department of Computing
# <NAME> <<EMAIL>>
#
# 2021-08-13 Initial version.
#
# NOTE: Using Psycopg 3 (dev2) 2021-08-13
#
import os
import pwd
import syslog
import psycopg
# Owner of this file - the user who pulled/cloned this repository
GITUSER = pwd.getpwuid(os.stat(__file__).st_uid).pw_name
# This syntax doesn't work with psycopg3.. ? To-be-investigated...
#with psycopg.connect(dbname="schooner" user="postgres") as conn:
with psycopg.connect("dbname=schooner user=postgres") as conn:
with conn.cursor() as cur:
try:
cur.execute(
"INSERT INTO admin (uid) VALUES (%(uid)s)",
{ "uid" : GITUSER }
)
except psycopg.Error as e:
syslog.syslog(
"Database error: " + e + ", SQL: " + cur.query
)
os._exit(1)
else:
syslog.syslog(
f"{GITUSER} added as an Admin"
)
|
[
"psycopg.connect",
"os._exit",
"syslog.syslog",
"os.stat"
] |
[((600, 648), 'psycopg.connect', 'psycopg.connect', (['"""dbname=schooner user=postgres"""'], {}), "('dbname=schooner user=postgres')\n", (615, 648), False, 'import psycopg\n'), ((427, 444), 'os.stat', 'os.stat', (['__file__'], {}), '(__file__)\n', (434, 444), False, 'import os\n'), ((1029, 1074), 'syslog.syslog', 'syslog.syslog', (['f"""{GITUSER} added as an Admin"""'], {}), "(f'{GITUSER} added as an Admin')\n", (1042, 1074), False, 'import syslog\n'), ((887, 948), 'syslog.syslog', 'syslog.syslog', (["('Database error: ' + e + ', SQL: ' + cur.query)"], {}), "('Database error: ' + e + ', SQL: ' + cur.query)\n", (900, 948), False, 'import syslog\n'), ((991, 1002), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (999, 1002), False, 'import os\n')]
|
__doc__ = r"""
>>> from formalchemy.tests import *
>>> FieldSet.default_renderers = original_renderers.copy()
# some low-level testing first
>>> fs = FieldSet(order1)
>>> fs._raw_fields()
[AttributeField(id), AttributeField(user_id), AttributeField(quantity), AttributeField(user)]
>>> fs.user.name
'user_id'
>>> fs = FieldSet(bill)
>>> fs._raw_fields()
[AttributeField(id), AttributeField(email), AttributeField(password), AttributeField(name), AttributeField(orders)]
>>> fs.orders.name
'orders'
binding should not change attribute order:
>>> fs = FieldSet(User)
>>> fs_bound = fs.bind(User)
>>> fs_bound._fields.values()
[AttributeField(id), AttributeField(email), AttributeField(password), AttributeField(name), AttributeField(orders)]
>>> fs = FieldSet(User2)
>>> fs._raw_fields()
[AttributeField(user_id), AttributeField(address_id), AttributeField(name), AttributeField(address)]
>>> fs.render() #doctest: +ELLIPSIS
Traceback (most recent call last):
...
Exception: No session found...
>>> fs = FieldSet(One)
>>> fs.configure(pk=True, focus=None)
>>> fs.id.is_required()
True
>>> print fs.render()
<div>
<label class="field_req" for="One--id">
Id
</label>
<input id="One--id" name="One--id" type="text" />
</div>
>>> fs = FieldSet(Two)
>>> fs
<FieldSet with ['id', 'foo']>
>>> fs.configure(pk=True)
>>> fs
<FieldSet (configured) with ['id', 'foo']>
>>> print fs.render()
<div>
<label class="field_req" for="Two--id">
Id
</label>
<input id="Two--id" name="Two--id" type="text" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Two--id").focus();
//]]>
</script>
<div>
<label class="field_opt" for="Two--foo">
Foo
</label>
<input id="Two--foo" name="Two--foo" type="text" value="133" />
</div>
>>> fs = FieldSet(Two)
>>> print fs.render()
<div>
<label class="field_opt" for="Two--foo">
Foo
</label>
<input id="Two--foo" name="Two--foo" type="text" value="133" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Two--foo").focus();
//]]>
</script>
>>> fs = FieldSet(Two)
>>> fs.configure(options=[fs.foo.label('A custom label')])
>>> print fs.render()
<div>
<label class="field_opt" for="Two--foo">
A custom label
</label>
<input id="Two--foo" name="Two--foo" type="text" value="133" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Two--foo").focus();
//]]>
</script>
>>> fs.configure(options=[fs.foo.label('')])
>>> print fs.render()
<div>
<label class="field_opt" for="Two--foo">
</label>
<input id="Two--foo" name="Two--foo" type="text" value="133" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Two--foo").focus();
//]]>
</script>
>>> fs = FieldSet(Two)
>>> assert fs.render() == configure_and_render(fs, include=[fs.foo])
>>> assert fs.render() == configure_and_render(fs, exclude=[fs.id])
>>> fs = FieldSet(Two)
>>> fs.configure(include=[fs.foo.hidden()])
>>> print fs.render()
<input id="Two--foo" name="Two--foo" type="hidden" value="133" />
>>> fs = FieldSet(Two)
>>> fs.configure(include=[fs.foo.dropdown([('option1', 'value1'), ('option2', 'value2')])])
>>> print fs.render()
<div>
<label class="field_opt" for="Two--foo">
Foo
</label>
<select id="Two--foo" name="Two--foo">
<option value="value1">
option1
</option>
<option value="value2">
option2
</option>
</select>
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Two--foo").focus();
//]]>
</script>
>>> fs = FieldSet(Two)
>>> assert configure_and_render(fs, include=[fs.foo.dropdown([('option1', 'value1'), ('option2', 'value2')])]) == configure_and_render(fs, options=[fs.foo.dropdown([('option1', 'value1'), ('option2', 'value2')])])
>>> print pretty_html(fs.foo.with_html(onblur='test()').render())
<select id="Two--foo" name="Two--foo" onblur="test()">
<option value="value1">
option1
</option>
<option value="value2">
option2
</option>
</select>
>>> print fs.foo.reset().with_html(onblur='test').render()
<input id="Two--foo" name="Two--foo" onblur="test" type="text" value="133" />
# Test with_metadata()
>>> fs = FieldSet(Three)
>>> fs.configure(include=[fs.foo.with_metadata(instructions=u'Answer well')])
>>> print fs.render()
<div>
<label class="field_opt" for="Three--foo">
Foo
</label>
<input id="Three--foo" name="Three--foo" type="text" />
<span class="instructions">
Answer well
</span>
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Three--foo").focus();
//]]>
</script>
# test sync
>>> print session.query(One).count()
0
>>> fs_1 = FieldSet(One, data={}, session=session)
>>> fs_1.sync()
>>> session.flush()
>>> print session.query(One).count()
1
>>> session.rollback()
>>> twof = TwoFloat(id=1, foo=32.2)
>>> fs_twof = FieldSet(twof)
>>> print '%.1f' % fs_twof.foo.value
32.2
>>> print pretty_html(fs_twof.foo.render())
<input id="TwoFloat-1-foo" name="TwoFloat-1-foo" type="text" value="32.2" />
>>> import datetime
>>> twoi = TwoInterval(id=1, foo=datetime.timedelta(2.2))
>>> fs_twoi = FieldSet(twoi)
>>> fs_twoi.foo.renderer
<IntervalFieldRenderer for AttributeField(foo)>
>>> fs_twoi.foo.value
datetime.timedelta(2, 17280)
>>> print pretty_html(fs_twoi.foo.render())
<input id="TwoInterval-1-foo" name="TwoInterval-1-foo" type="text" value="2.17280" />
>>> fs_twoi.rebind(data={"TwoInterval-1-foo": "3.1"})
>>> fs_twoi.sync()
>>> new_twoi = fs_twoi.model
>>> new_twoi.foo == datetime.timedelta(3.1)
True
# test render and sync fatypes.Numeric
# http://code.google.com/p/formalchemy/issues/detail?id=41
>>> twon = TwoNumeric(id=1, foo=Decimal('2.3'))
>>> fs_twon = FieldSet(twon)
>>> print pretty_html(fs_twon.foo.render())
<input id="TwoNumeric-1-foo" name="TwoNumeric-1-foo" type="text" value="2.3" />
>>> fs_twon.rebind(data={"TwoNumeric-1-foo": "6.7"})
>>> fs_twon.sync()
>>> new_twon = fs_twon.model
>>> new_twon.foo == Decimal("6.7")
True
# test sync when TwoNumeric-1-foo is empty
>>> fs_twon.rebind(data={"TwoNumeric-1-foo": ""})
>>> fs_twon.sync()
>>> new_twon = fs_twon.model
>>> str(new_twon.foo)
'None'
>>> fs_cb = FieldSet(CheckBox)
>>> fs_cb.field.value is None
True
>>> print pretty_html(fs_cb.field.dropdown().render())
<select id="CheckBox--field" name="CheckBox--field">
<option value="True">
Yes
</option>
<option value="False">
No
</option>
</select>
# test no checkbox/radio submitted
>>> fs_cb.rebind(data={})
>>> fs_cb.field.raw_value is None
True
>>> fs_cb.field.value
False
>>> fs_cb.field.renderer.value is None
True
>>> print fs_cb.field.render()
<input id="CheckBox--field" name="CheckBox--field" type="checkbox" value="True" />
>>> fs_cb.field.renderer #doctest: +ELLIPSIS
<CheckBoxFieldRenderer for AttributeField(field)>
>>> fs_cb.field.renderer._serialized_value() is None
True
>>> print pretty_html(fs_cb.field.radio().render())
<input id="CheckBox--field_0" name="CheckBox--field" type="radio" value="True" />
<label for="CheckBox--field_0">
Yes
</label>
<br />
<input id="CheckBox--field_1" name="CheckBox--field" type="radio" value="False" />
<label for="CheckBox--field_1">
No
</label>
>>> fs_cb.validate()
True
>>> fs_cb.errors
{}
>>> fs_cb.sync()
>>> cb = fs_cb.model
>>> cb.field
False
>>> fs_cb.rebind(data={'CheckBox--field': 'True'})
>>> fs_cb.validate()
True
>>> fs_cb.sync()
>>> cb.field
True
>>> fs_cb.configure(options=[fs_cb.field.dropdown()])
>>> fs_cb.rebind(data={'CheckBox--field': 'False'})
>>> fs_cb.sync()
>>> cb.field
False
>>> fs = FieldSet(Two)
>>> print pretty_html(fs.foo.dropdown(options=['one', 'two']).radio().render())
<input id="Two--foo_0" name="Two--foo" type="radio" value="one" />
<label for="Two--foo_0">
one
</label>
<br />
<input id="Two--foo_1" name="Two--foo" type="radio" value="two" />
<label for="Two--foo_1">
two
</label>
>>> assert fs.foo.radio(options=['one', 'two']).render() == fs.foo.dropdown(options=['one', 'two']).radio().render()
>>> print fs.foo.radio(options=['one', 'two']).dropdown().render()
<select id="Two--foo" name="Two--foo">
<option value="one">one</option>
<option value="two">two</option>
</select>
>>> assert fs.foo.dropdown(options=['one', 'two']).render() == fs.foo.radio(options=['one', 'two']).dropdown().render()
>>> print pretty_html(fs.foo.dropdown(options=['one', 'two'], multiple=True).checkbox().render())
<input id="Two--foo_0" name="Two--foo" type="checkbox" value="one" />
<label for="Two--foo_0">
one
</label>
<br />
<input id="Two--foo_1" name="Two--foo" type="checkbox" value="two" />
<label for="Two--foo_1">
two
</label>
>>> fs = FieldSet(User, session=session)
>>> print fs.render()
<div>
<label class="field_req" for="User--email">
Email
</label>
<input id="User--email" maxlength="40" name="User--email" type="text" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("User--email").focus();
//]]>
</script>
<div>
<label class="field_req" for="User--password">
Password
</label>
<input id="User--password" maxlength="20" name="User--password" type="text" />
</div>
<div>
<label class="field_opt" for="User--name">
Name
</label>
<input id="User--name" maxlength="30" name="User--name" type="text" />
</div>
<div>
<label class="field_opt" for="User--orders">
Orders
</label>
<select id="User--orders" multiple="multiple" name="User--orders" size="5">
<option value="2">
Quantity: 5
</option>
<option value="3">
Quantity: 6
</option>
<option value="1">
Quantity: 10
</option>
</select>
</div>
>>> fs = FieldSet(bill)
>>> print pretty_html(fs.orders.render())
<select id="User-1-orders" multiple="multiple" name="User-1-orders" size="5">
<option value="2">
Quantity: 5
</option>
<option value="3">
Quantity: 6
</option>
<option selected="selected" value="1">
Quantity: 10
</option>
</select>
>>> print pretty_html(fs.orders.checkbox().render())
<input id="User-1-orders_0" name="User-1-orders" type="checkbox" value="2" />
<label for="User-1-orders_0">
Quantity: 5
</label>
<br />
<input id="User-1-orders_1" name="User-1-orders" type="checkbox" value="3" />
<label for="User-1-orders_1">
Quantity: 6
</label>
<br />
<input checked="checked" id="User-1-orders_2" name="User-1-orders" type="checkbox" value="1" />
<label for="User-1-orders_2">
Quantity: 10
</label>
>>> print fs.orders.checkbox(options=session.query(Order).filter_by(id=1)).render()
<input checked="checked" id="User-1-orders_0" name="User-1-orders" type="checkbox" value="1" /><label for="User-1-orders_0">Quantity: 10</label>
>>> fs = FieldSet(bill, data={})
>>> fs.configure(include=[fs.orders.checkbox()])
>>> fs.validate()
True
>>> fs = FieldSet(bill, data={'User-1-orders': ['2', '3']})
>>> print pretty_html(fs.orders.render())
<select id="User-1-orders" multiple="multiple" name="User-1-orders" size="5">
<option selected="selected" value="2">
Quantity: 5
</option>
<option selected="selected" value="3">
Quantity: 6
</option>
<option value="1">
Quantity: 10
</option>
</select>
>>> fs.orders.model_value
[1]
>>> fs.orders.raw_value
[<Order for user 1: 10>]
>>> fs = FieldSet(Two)
>>> print fs.foo.render()
<input id="Two--foo" name="Two--foo" type="text" value="133" />
>>> fs = FieldSet(Two)
>>> print fs.foo.dropdown([('option1', 'value1'), ('option2', 'value2')]).render()
<select id="Two--foo" name="Two--foo">
<option value="value1">option1</option>
<option value="value2">option2</option>
</select>
>>> fs = FieldSet(Order, session)
>>> print fs.render()
<div>
<label class="field_req" for="Order--quantity">
Quantity
</label>
<input id="Order--quantity" name="Order--quantity" type="text" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Order--quantity").focus();
//]]>
</script>
<div>
<label class="field_req" for="Order--user_id">
User
</label>
<select id="Order--user_id" name="Order--user_id">
<option value="1">
Bill
</option>
<option value="2">
John
</option>
</select>
</div>
# this seems particularly prone to errors; break it out in its own test
>>> fs = FieldSet(order1)
>>> fs.user.value
1
# test re-binding
>>> fs = FieldSet(Order)
>>> fs.configure(pk=True, options=[fs.quantity.hidden()])
>>> fs.rebind(order1)
>>> fs.quantity.value
10
>>> fs.session == object_session(order1)
True
>>> print fs.render()
<div>
<label class="field_req" for="Order-1-id">
Id
</label>
<input id="Order-1-id" name="Order-1-id" type="text" value="1" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Order-1-id").focus();
//]]>
</script>
<input id="Order-1-quantity" name="Order-1-quantity" type="hidden" value="10" />
<div>
<label class="field_req" for="Order-1-user_id">
User
</label>
<select id="Order-1-user_id" name="Order-1-user_id">
<option selected="selected" value="1">
Bill
</option>
<option value="2">
John
</option>
</select>
</div>
>>> fs = FieldSet(One)
>>> fs.configure(pk=True)
>>> print fs.render()
<div>
<label class="field_req" for="One--id">
Id
</label>
<input id="One--id" name="One--id" type="text" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("One--id").focus();
//]]>
</script>
>>> fs.configure(include=[])
>>> print fs.render()
<BLANKLINE>
>>> fs.configure(pk=True, focus=None)
>>> print fs.render()
<div>
<label class="field_req" for="One--id">
Id
</label>
<input id="One--id" name="One--id" type="text" />
</div>
>>> fs = FieldSet(One)
>>> fs.rebind(Two) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...
>>> fs = FieldSet(Two)
>>> fs.configure()
>>> fs2 = fs.bind(Two)
>>> [fs2 == field.parent for field in fs2._render_fields.itervalues()]
[True]
>>> fs = FieldSet(OTOParent, session)
>>> print fs.render()
<div>
<label class="field_req" for="OTOParent--oto_child_id">
Child
</label>
<select id="OTOParent--oto_child_id" name="OTOParent--oto_child_id">
<option value="1">
baz
</option>
</select>
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("OTOParent--oto_child_id").focus();
//]]>
</script>
>>> fs.rebind(parent)
>>> fs.child.raw_value
<OTOChild baz>
# validation + sync
>>> fs_2 = FieldSet(Two, session=session, data={'Two--foo': ''})
>>> fs_2.foo.value # '' is deserialized to None, so default of 133 is used
'133'
>>> fs_2.validate()
True
>>> fs_2.configure(options=[fs_2.foo.required()], focus=None)
>>> fs_2.validate()
False
>>> fs_2.errors
{AttributeField(foo): ['Please enter a value']}
>>> print fs_2.render()
<div>
<label class="field_req" for="Two--foo">
Foo
</label>
<input id="Two--foo" name="Two--foo" type="text" value="133" />
<span class="field_error">
Please enter a value
</span>
</div>
>>> fs_2.rebind(data={'Two--foo': 'asdf'})
>>> fs_2.data
SimpleMultiDict([('Two--foo', u'asdf')])
>>> fs_2.validate()
False
>>> fs_2.errors
{AttributeField(foo): ['Value is not an integer']}
>>> print fs_2.render()
<div>
<label class="field_req" for="Two--foo">
Foo
</label>
<input id="Two--foo" name="Two--foo" type="text" value="asdf" />
<span class="field_error">
Value is not an integer
</span>
</div>
>>> fs_2.rebind(data={'Two--foo': '2'})
>>> fs_2.data
SimpleMultiDict([('Two--foo', u'2')])
>>> fs_2.validate()
True
>>> fs_2.errors
{}
>>> fs_2.sync()
>>> fs_2.model.foo
2
>>> session.flush()
>>> print fs_2.render() #doctest: +ELLIPSIS
Traceback (most recent call last):
...
PkError: Primary key of model has changed since binding, probably due to sync()ing a new instance (from None to 1)...
>>> session.rollback()
>>> fs_1 = FieldSet(One, session=session, data={'One--id': '1'})
>>> fs_1.configure(pk=True)
>>> fs_1.validate()
True
>>> fs_1.sync()
>>> fs_1.model.id
1
>>> fs_1.rebind(data={'One--id': 'asdf'})
>>> fs_1.id.renderer.name
u'One--id'
>>> fs_1.validate()
False
>>> fs_1.errors
{AttributeField(id): ['Value is not an integer']}
# test updating _bound_pk copy
>>> one = One(id=1)
>>> fs_11 = FieldSet(one)
>>> fs_11.id.renderer.name
u'One-1-id'
>>> one.id = 2
>>> fs_11.rebind(one)
>>> fs_11.id.renderer.name
u'One-2-id'
>>> fs_u = FieldSet(User, session=session, data={})
>>> fs_u.configure(include=[fs_u.orders])
>>> fs_u.validate()
True
>>> fs_u.sync()
>>> fs_u.model.orders
[]
>>> fs_u.rebind(User, session, data={'User--orders': [str(order1.id), str(order2.id)]})
>>> fs_u.validate()
True
>>> fs_u.sync()
>>> fs_u.model.orders == [order1, order2]
True
>>> session.rollback()
>>> fs_3 = FieldSet(Three, data={'Three--foo': 'asdf', 'Three--bar': 'fdsa'})
>>> fs_3.foo.value
u'asdf'
>>> print fs_3.foo.textarea().render()
<textarea id="Three--foo" name="Three--foo">asdf</textarea>
>>> print fs_3.foo.textarea("3x4").render()
<textarea cols="3" id="Three--foo" name="Three--foo" rows="4">asdf</textarea>
>>> print fs_3.foo.textarea((3,4)).render()
<textarea cols="3" id="Three--foo" name="Three--foo" rows="4">asdf</textarea>
>>> fs_3.bar.value
u'fdsa'
>>> def custom_validator(fs):
... if fs.foo.value != fs.bar.value:
... fs.foo.errors.append('does not match bar')
... raise ValidationError('foo and bar do not match')
>>> fs_3.configure(global_validator=custom_validator, focus=None)
>>> fs_3.validate()
False
>>> sorted(fs_3.errors.items())
[(None, ('foo and bar do not match',)), (AttributeField(foo), ['does not match bar'])]
>>> print fs_3.render()
<div class="fieldset_error">
foo and bar do not match
</div>
<div>
<label class="field_opt" for="Three--foo">
Foo
</label>
<input id="Three--foo" name="Three--foo" type="text" value="asdf" />
<span class="field_error">
does not match bar
</span>
</div>
<div>
<label class="field_opt" for="Three--bar">
Bar
</label>
<input id="Three--bar" name="Three--bar" type="text" value="fdsa" />
</div>
# custom renderer
>>> fs_3 = FieldSet(Three, data={'Three--foo': 'http://example.com/image.png'})
>>> fs_3.configure(include=[fs_3.foo.with_renderer(ImgRenderer)])
>>> print fs_3.foo.render()
<img src="http://example.com/image.png">
# natural PKs
>>> fs_npk = FieldSet(NaturalOrder, session)
>>> print fs_npk.render()
<div>
<label class="field_req" for="NaturalOrder--quantity">
Quantity
</label>
<input id="NaturalOrder--quantity" name="NaturalOrder--quantity" type="text" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("NaturalOrder--quantity").focus();
//]]>
</script>
<div>
<label class="field_req" for="NaturalOrder--user_email">
User
</label>
<select id="NaturalOrder--user_email" name="NaturalOrder--user_email">
<option value="<EMAIL>">
Natural Bill
</option>
<option value="<EMAIL>">
<NAME>
</option>
</select>
</div>
>>> fs_npk.rebind(norder2, session, data={'NaturalOrder-2-user_email': nbill.email, 'NaturalOrder-2-quantity': str(norder2.quantity)})
>>> fs_npk.user_email.renderer.name
u'NaturalOrder-2-user_email'
>>> fs_npk.sync()
>>> fs_npk.model.user_email == nbill.email
True
>>> session.rollback()
# allow attaching custom attributes to wrappers
>>> fs = FieldSet(User)
>>> fs.name.baz = 'asdf'
>>> fs2 = fs.bind(bill)
>>> fs2.name.baz
'asdf'
# equality can tell an field bound to an instance is the same as one bound to a type
>>> fs.name == fs2.name
True
# Field
>>> fs = FieldSet(One)
>>> fs.add(Field('foo'))
>>> print configure_and_render(fs, focus=None)
<div>
<label class="field_opt" for="One--foo">
Foo
</label>
<input id="One--foo" name="One--foo" type="text" />
</div>
>>> fs = FieldSet(One)
>>> fs.add(Field('foo', types.Integer, value=2))
>>> fs.foo.value
2
>>> print configure_and_render(fs, focus=None)
<div>
<label class="field_opt" for="One--foo">
Foo
</label>
<input id="One--foo" name="One--foo" type="text" value="2" />
</div>
>>> fs.rebind(One, data={'One--foo': '4'})
>>> fs.sync()
>>> fs.foo.value
4
>>> fs = FieldSet(One)
>>> fs.add(Field('foo', types.Integer, value=2).dropdown(options=[('1', 1), ('2', 2)]))
>>> print configure_and_render(fs, focus=None)
<div>
<label class="field_opt" for="One--foo">
Foo
</label>
<select id="One--foo" name="One--foo">
<option value="1">
1
</option>
<option selected="selected" value="2">
2
</option>
</select>
</div>
# test Field __hash__, __eq__
>>> fs.foo == fs.foo.dropdown(options=[('1', 1), ('2', 2)])
True
>>> fs2 = FieldSet(One)
>>> fs2.add(Field('foo', types.Integer, value=2))
>>> fs2.configure(options=[fs2.foo.dropdown(options=[('1', 1), ('2', 2)])], focus=None)
>>> fs.render() == fs2.render()
True
>>> fs_1 = FieldSet(One)
>>> fs_1.add(Field('foo', types.Integer, value=[2, 3]).dropdown(options=[('1', 1), ('2', 2), ('3', 3)], multiple=True))
>>> print configure_and_render(fs_1, focus=None)
<div>
<label class="field_opt" for="One--foo">
Foo
</label>
<select id="One--foo" multiple="multiple" name="One--foo" size="5">
<option value="1">
1
</option>
<option selected="selected" value="2">
2
</option>
<option selected="selected" value="3">
3
</option>
</select>
</div>
>>> fs_1.rebind(One, data={'One--foo': ['1', '2']})
>>> fs_1.sync()
>>> fs_1.foo.value
[1, 2]
# test attribute names
>>> fs = FieldSet(One)
>>> fs.add(Field('foo'))
>>> fs.foo == fs['foo']
True
>>> fs.add(Field('add'))
>>> fs.add == fs['add']
False
# change default renderer
>>> class BooleanSelectRenderer(SelectFieldRenderer):
... def render(self, **kwargs):
... kwargs['options'] = [('Yes', True), ('No', False)]
... return SelectFieldRenderer.render(self, **kwargs)
>>> d = dict(FieldSet.default_renderers)
>>> d[types.Boolean] = BooleanSelectRenderer
>>> fs = FieldSet(CheckBox)
>>> fs.default_renderers = d
>>> print fs.field.render()
<select id="CheckBox--field" name="CheckBox--field">
<option value="True">Yes</option>
<option value="False">No</option>
</select>
# test setter rejection
>>> fs = FieldSet(One)
>>> fs.id = fs.id.required()
Traceback (most recent call last):
...
AttributeError: Do not set field attributes manually. Use append() or configure() instead
# join
>>> fs = FieldSet(Order__User)
>>> fs._fields.values()
[AttributeField(orders_id), AttributeField(orders_user_id), AttributeField(orders_quantity), AttributeField(users_id), AttributeField(users_email), AttributeField(users_password), AttributeField(users_name)]
>>> fs.rebind(session.query(Order__User).filter_by(orders_id=1).one())
>>> print configure_and_render(fs, focus=None)
<div>
<label class="field_req" for="Order__User-1_1-orders_quantity">
Orders quantity
</label>
<input id="Order__User-1_1-orders_quantity" name="Order__User-1_1-orders_quantity" type="text" value="10" />
</div>
<div>
<label class="field_req" for="Order__User-1_1-users_email">
Users email
</label>
<input id="Order__User-1_1-users_email" maxlength="40" name="Order__User-1_1-users_email" type="text" value="<EMAIL>" />
</div>
<div>
<label class="field_req" for="Order__User-1_1-users_password">
Users password
</label>
<input id="Order__User-1_1-users_password" maxlength="20" name="Order__User-1_1-users_password" type="text" value="<PASSWORD>" />
</div>
<div>
<label class="field_opt" for="Order__User-1_1-users_name">
Users name
</label>
<input id="Order__User-1_1-users_name" maxlength="30" name="Order__User-1_1-users_name" type="text" value="Bill" />
</div>
>>> fs.rebind(session.query(Order__User).filter_by(orders_id=1).one(), data={'Order__User-1_1-orders_quantity': '5', 'Order__User-1_1-users_email': bill.email, 'Order__User-1_1-users_password': '<PASSWORD>', 'Order__User-1_1-users_name': 'Bill'})
>>> fs.validate()
True
>>> fs.sync()
>>> session.flush()
>>> session.refresh(bill)
>>> bill.password == '<PASSWORD>'
True
>>> session.rollback()
>>> FieldSet.default_renderers[Point] = PointFieldRenderer
>>> fs = FieldSet(Vertex)
>>> print pretty_html(fs.start.render())
<input id="Vertex--start-x" name="Vertex--start-x" type="text" value="" />
<input id="Vertex--start-y" name="Vertex--start-y" type="text" value="" />
>>> fs.rebind(Vertex)
>>> v = fs.model
>>> v.start = Point(1,2)
>>> v.end = Point(3,4)
>>> print pretty_html(fs.start.render())
<input id="Vertex--start-x" name="Vertex--start-x" type="text" value="1" />
<input id="Vertex--start-y" name="Vertex--start-y" type="text" value="2" />
>>> fs.rebind(v)
>>> fs.rebind(data={'Vertex--start-x': '10', 'Vertex--start-y': '20', 'Vertex--end-x': '30', 'Vertex--end-y': '40'})
>>> fs.validate()
True
>>> fs.sync()
>>> session.add(v)
>>> session.flush()
>>> v.id
1
>>> session.refresh(v)
>>> v.start.x
10
>>> v.end.y
40
>>> session.rollback()
# readonly tests
>>> t = FieldSet(john)
>>> john.name = None
>>> t.configure(readonly=True)
>>> t.readonly
True
>>> print t.render()
<tbody>
<tr>
<td class="field_readonly">
Email:
</td>
<td>
<EMAIL>
</td>
</tr>
<tr>
<td class="field_readonly">
Password:
</td>
<td>
5678
</td>
</tr>
<tr>
<td class="field_readonly">
Name:
</td>
<td>
</td>
</tr>
<tr>
<td class="field_readonly">
Orders:
</td>
<td>
Quantity: 5, Quantity: 6
</td>
</tr>
</tbody>
>>> session.rollback()
>>> session.refresh(john)
>>> fs_or = FieldSet(order1)
>>> print fs_or.user.render_readonly()
<a href="mailto:<EMAIL>">Bill</a>
>>> out = FieldSet(OrderUserTag, session=session)
>>> list(sorted(out._fields))
['id', 'order_id', 'order_user', 'tag', 'user_id']
>>> print out.order_user.name
order_user
>>> out.order_user.is_raw_foreign_key
False
>>> out.order_user.is_composite_foreign_key
True
>>> list(sorted(out.render_fields))
['order_user', 'tag']
>>> print pretty_html(out.order_user.render())
<select id="OrderUserTag--order_user" name="OrderUserTag--order_user">
<option value="(1, 1)">
OrderUser(1, 1)
</option>
<option value="(1, 2)">
OrderUser(1, 2)
</option>
</select>
>>> out.rebind(data={'OrderUserTag--order_user': '(1, 2)', 'OrderUserTag--tag': 'asdf'})
>>> out.validate()
True
>>> out.sync()
>>> print out.model.order_user
OrderUser(1, 2)
>>> fs = FieldSet(Function)
>>> fs.configure(pk=True)
>>> fs.foo.render().startswith('<span')
True
>>> fs_r = FieldSet(Recursive)
>>> fs_r.parent_id.is_raw_foreign_key
True
>>> fs_r.rebind(data={'Recursive--foo': 'asdf'})
>>> fs_r.validate()
True
>>> fs_oo = FieldSet(OptionalOrder, session=session)
>>> fs_oo.configure(options=[fs_oo.user.with_null_as(('No user', ''))])
>>> fs_oo.user._null_option
('No user', '')
>>> print pretty_html(fs_oo.user.render())
<select id="OptionalOrder--user_id" name="OptionalOrder--user_id">
<option selected="selected" value="">
No user
</option>
<option value="1">
Bill
</option>
<option value="2">
John
</option>
</select>
>>> fs_oo = FieldSet(OptionalOrder)
>>> fs_oo.rebind(data={'OptionalOrder--user_id': fs_oo.user_id._null_option[1], 'OptionalOrder--quantity': ''})
>>> fs_oo.validate()
True
>>> fs_oo.user_id.value is None
True
>>> fs_bad = FieldSet(One)
>>> fs_bad.configure(include=[Field('invalid')])
Traceback (most recent call last):
...
ValueError: Unrecognized Field `AttributeField(invalid)` in `include` -- did you mean to call append() first?
>>> fs_s = FieldSet(Synonym)
>>> fs_s._fields
{'foo': AttributeField(foo), 'id': AttributeField(id)}
>>> fs_prefix = FieldSet(Two, prefix="myprefix")
>>> print(fs_prefix.render())
<div>
<label class="field_opt" for="myprefix-Two--foo">
Foo
</label>
<input id="myprefix-Two--foo" name="myprefix-Two--foo" type="text" value="133" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("myprefix-Two--foo").focus();
//]]>
</script>
>>> fs_prefix.rebind(data={"myprefix-Two--foo": "42"})
>>> fs_prefix.validate()
True
>>> fs_prefix.sync()
>>> fs_prefix.model.foo
42
>>> fs_two = FieldSet(Two)
>>> fs_two.configure(options=[fs_two.foo.label('1 < 2')])
>>> print fs_two.render()
<div>
<label class="field_opt" for="Two--foo">
1 < 2
</label>
<input id="Two--foo" name="Two--foo" type="text" value="133" />
</div>
<script type="text/javascript">
//<![CDATA[
document.getElementById("Two--foo").focus();
//]]>
</script>
>>> fs_prop = FieldSet(Property)
>>> fs_prop.foo.is_readonly()
True
>>> fs_conflict = FieldSet(ConflictNames)
>>> fs_conflict.rebind(conflict_names)
>>> print fs_conflict.render() #doctest: +ELLIPSIS
<div>
...
"""
if __name__ == '__main__':
import doctest
doctest.testmod()
|
[
"doctest.testmod"
] |
[((28095, 28112), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (28110, 28112), False, 'import doctest\n')]
|
from setuptools import setup, find_packages
setup(name='pykl',
version='0.1.6',
packages=find_packages(),
author='pykl',
author_email='<EMAIL>',
description='kltool for python, toolset for web, http, cache, dht, xml, json and so on',
long_description=open('README.rst').read(),
keywords='kltool html graphql xml json',
url='http://github.com/wowngasb/pykl',
license='MIT',
install_requires=[
'graphql-core==1.1',
'graphene==1.4',
'flask-graphql>=1.2.0',
'pyquery==1.2.11',
'requests==2.9.1',
'SQLAlchemy==1.1.15',
'six',
'singledispatch'
],
tests_require=[
])
|
[
"setuptools.find_packages"
] |
[((102, 117), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (115, 117), False, 'from setuptools import setup, find_packages\n')]
|
import re
import config
from datetime import datetime
from urllib2 import urlopen
from twilio.rest import TwilioRestClient
def validateStatus(site):
'''Return False to trigger the canary'''
return urlopen(site).getcode() == 200
def validateString(site):
p = re.compile(config.CHECK_STR)
return p.match(urlopen(site).read())
def lambda_handler(event, context):
print('Checking {} at {}...'.format(config.SITE, event['time']))
try:
if not validateString(config.SITE):
raise Exception('Validation failed')
except:
print('Check failed!')
make_call()
raise
else:
print('Check passed!')
return event['time']
finally:
print('Check complete at {}'.format(str(datetime.now())))
def make_call():
client = TwilioRestClient(config.ACCOUNT_SID, config.AUTH_TOKEN)
call = client.calls.create(
to=config.PHONE_TO,
from_=config.PHONE_FROM,
url=config.CALL_URL,
method="GET",
fallback_method="GET",
status_callback_method="GET",
record="false"
)
return call.sid
|
[
"datetime.datetime.now",
"urllib2.urlopen",
"twilio.rest.TwilioRestClient",
"re.compile"
] |
[((272, 300), 're.compile', 're.compile', (['config.CHECK_STR'], {}), '(config.CHECK_STR)\n', (282, 300), False, 'import re\n'), ((807, 862), 'twilio.rest.TwilioRestClient', 'TwilioRestClient', (['config.ACCOUNT_SID', 'config.AUTH_TOKEN'], {}), '(config.ACCOUNT_SID, config.AUTH_TOKEN)\n', (823, 862), False, 'from twilio.rest import TwilioRestClient\n'), ((206, 219), 'urllib2.urlopen', 'urlopen', (['site'], {}), '(site)\n', (213, 219), False, 'from urllib2 import urlopen\n'), ((320, 333), 'urllib2.urlopen', 'urlopen', (['site'], {}), '(site)\n', (327, 333), False, 'from urllib2 import urlopen\n'), ((758, 772), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (770, 772), False, 'from datetime import datetime\n')]
|
#!/usr/bin/env python3
import time
from http.server import HTTPServer, BaseHTTPRequestHandler
import requests
import json
from functools import reduce
import re
class Server(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_json({
'success': False,
'message': 'Wrong method, POST expected'
}, 400)
def do_GET(self):
self.send_json({
'success': False,
'message': 'Wrong method, POST expected'
}, 400)
def do_POST(self):
def escaper(s):
s = s[1]
print("Replacing:\n{}".format(s))
s = s.replace("\\", "\\\\")
s = s.replace("\r\n", "\\n")
s = s.replace("\n", "\\n")
s = s.replace("\"", "\\\"")
return s
if self.headers.get("Content-type") != "application/json":
print(self.headers)
self.send_json({
'success': False,
'message': 'Wrong content type (\'application/json\' expected)'
}, 400)
else:
content_length = int(self.headers.get('Content-Length', 0))
post_body = self.rfile.read(content_length).decode('utf-8')
print("Decoded: {}".format(post_body))
post_body = re.sub(r"\<\<\<([\s\S]*)\>\>\>", escaper, post_body, flags=re.M)
print("Edited: {}".format(post_body))
data = json.loads(post_body)
if data.get('authKey') == config['authKey']:
error = self.relay(data)
if not error:
self.send_json({'success': True})
else:
self.send_json({'success': False, 'message': error})
else:
self.send_json({
'success': False,
'message': 'Auth key missing or incorrect'
}, 403)
def relay(self, data):
if "url" not in data:
return "No video URL received"
if "author" not in data:
return "No video author name received"
if "title" not in data:
return "No video title name received"
if "description" not in data:
return "No video description received"
post_data = {
"content":
"@here {author} uploaded **{title}** at {url}".format(
author=data["author"][:256],
title=data["title"][:256],
url=data["url"][:256]
)
}
print("POST head")
self.relay_json(post_data)
# requests.post(config["discordWebhookURL"], data=post_data)
descriptions = split_text(filter_text(data["description"]), 2048)
page = 1
for description in descriptions:
post_data = {
"embeds": [{
"type": "rich",
"description": description
}]
}
if page == 1:
post_data["embeds"][0]["title"] = data["title"][:256]
print("POST description {}".format(page))
page += 1
self.relay_json(post_data)
# requests.post(config["discordWebhookURL"], data=post_data)
return None
def relay_json(self, data):
requests.post(
config["discordWebhookURL"],
data=json.dumps(data).encode('utf-8'),
headers={
"Content-Type": "application/json"
}
)
def send_json(self, obj, status=200):
self.send_response(status)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(obj).encode())
def filter_text(text):
paragraphs = text.split("\n")
def try_match(line):
for regexp in patterns:
if regexp.match(line):
return False
return True
# Filter paragraphs according to config
paragraphs = [paragraph for paragraph in paragraphs if try_match(paragraph)]
return "\n".join(paragraphs)
def split_text(text, limit):
def paragraph_splitter(result, paragraph):
if len(paragraph) <= limit:
# If a paragraph can fit in one message, just add it
result.append(paragraph)
else:
# If a paragraph is too long, split it
while len(paragraph):
if len(paragraph) > limit:
# Remaining portion still too long
# Try to split at the last space possible
idx = paragraph.rfind(' ', 0, limit - 5) + 1
if idx < 1:
# If no space found, split as far as possible
idx = limit - 5
# Add the chopped-off portion, proceed with rest
result.append(paragraph[:idx])
paragraph = paragraph[idx:]
else:
# Remaining portion OK, just add it
result.append(paragraph)
paragraph = ""
if len(paragraph):
# If this was not the last portion, add continuation mark
result[-1] += "[...]"
return result
if limit < 6:
raise RuntimeError("Limit too narrow to split")
# Split text into paragraphs
paragraphs = text.split("\n")
# Split up paragraphs that are too long
paragraphs = reduce(paragraph_splitter, paragraphs, [])
# Each paragraph should already be small enough
for paragraph in paragraphs:
assert(len(paragraph) < limit)
# Assemble chunks as large as possible out of paragraphs
result = []
candidate = ""
quota = limit
for paragraph in paragraphs:
if len(paragraph) + 1 <= quota:
# We still have space for the paragraph + "\n"
if len(candidate) > 0:
candidate += "\n"
quota -= 1
candidate += paragraph
quota -= len(paragraph)
else:
# We can't add another paragraph, output current chunk
if len(candidate) > 0:
result.append(candidate)
candidate = ""
quota = limit
assert(len(paragraph) < quota)
# Start a new candidate chunk
candidate += paragraph
quota -= len(paragraph)
# Add last chunk, if non-empty
if len(candidate.strip()):
result.append(candidate)
# Strip extra "\n"
result = [part.strip() for part in result]
for part in result:
assert(len(part) < limit)
return result
if __name__ == '__main__':
global config, patterns
try:
with open("config.json") as config_file:
config = json.load(config_file)
except IOError:
print("Error reading config file")
exit(1)
patterns = []
for pattern in config.get("filters", []):
patterns.append(re.compile(pattern))
httpd = HTTPServer((config["host"], config["port"]), Server)
print(time.asctime(), 'Server UP - %s:%s' % (config["host"], config["port"]))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), 'Server DOWN - %s:%s' % (config["host"], config["port"]))
|
[
"time.asctime",
"http.server.HTTPServer",
"json.load",
"json.loads",
"json.dumps",
"functools.reduce",
"re.sub",
"re.compile"
] |
[((5494, 5536), 'functools.reduce', 'reduce', (['paragraph_splitter', 'paragraphs', '[]'], {}), '(paragraph_splitter, paragraphs, [])\n', (5500, 5536), False, 'from functools import reduce\n'), ((7061, 7113), 'http.server.HTTPServer', 'HTTPServer', (["(config['host'], config['port'])", 'Server'], {}), "((config['host'], config['port']), Server)\n", (7071, 7113), False, 'from http.server import HTTPServer, BaseHTTPRequestHandler\n'), ((7124, 7138), 'time.asctime', 'time.asctime', ([], {}), '()\n', (7136, 7138), False, 'import time\n'), ((7313, 7327), 'time.asctime', 'time.asctime', ([], {}), '()\n', (7325, 7327), False, 'import time\n'), ((1288, 1359), 're.sub', 're.sub', (['"""\\\\<\\\\<\\\\<([\\\\s\\\\S]*)\\\\>\\\\>\\\\>"""', 'escaper', 'post_body'], {'flags': 're.M'}), "('\\\\<\\\\<\\\\<([\\\\s\\\\S]*)\\\\>\\\\>\\\\>', escaper, post_body, flags=re.M)\n", (1294, 1359), False, 'import re\n'), ((1422, 1443), 'json.loads', 'json.loads', (['post_body'], {}), '(post_body)\n', (1432, 1443), False, 'import json\n'), ((6836, 6858), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (6845, 6858), False, 'import json\n'), ((7027, 7046), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (7037, 7046), False, 'import re\n'), ((3714, 3729), 'json.dumps', 'json.dumps', (['obj'], {}), '(obj)\n', (3724, 3729), False, 'import json\n'), ((3392, 3408), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (3402, 3408), False, 'import json\n')]
|
import os
import argparse
parser = argparse.ArgumentParser(description="Run attention2vec generation embeddings suite.")
parser.add_argument('--dataset', nargs='?', default='cora',
help='Input graph name for saving files')
args = parser.parse_args()
dataset = args.dataset
gamma = [10]
R = [0.5]
T = [3, 4]
train = [20, 30]
for g in gamma:
for r in R:
for t in T:
for tr in train:
print("----------------------------")
print("Parameters : ", g, r, t, tr)
print("----------------------------")
cmd = "python main.py --dataset {} --attn2v_iter {} --r {} --t {} --train_per {}".format(dataset, g, r, t, tr)
print(cmd + "\n")
os.system(cmd)
print("Done!")
|
[
"os.system",
"argparse.ArgumentParser"
] |
[((36, 126), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run attention2vec generation embeddings suite."""'}), "(description=\n 'Run attention2vec generation embeddings suite.')\n", (59, 126), False, 'import argparse\n'), ((766, 780), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (775, 780), False, 'import os\n')]
|
import random
def delete_random_character(s):
"""Returns s with a random character deleted"""
if s == "":
return s
pos = random.randint(0, len(s) - 1)
# print("Deleting", repr(s[pos]), "at", pos)
return s[:pos] + s[pos + 1:]
def insert_random_character(s):
"""Returns s with a random character inserted"""
pos = random.randint(0, len(s))
random_character = chr(random.randrange(32, 127))
# print("Inserting", repr(random_character), "at", pos)
return s[:pos] + random_character + s[pos:]
def flip_random_character(s):
"""Returns s with a random bit flipped in a random position"""
if s == "":
return s
pos = random.randint(0, len(s) - 1)
c = s[pos]
bit = 1 << random.randint(0, 6)
new_c = chr(ord(c) ^ bit)
# print("Flipping", bit, "in", repr(c) + ", giving", repr(new_c))
return s[:pos] + new_c + s[pos + 1:]
def mutate_strings(s):
"""Return s with a random mutation applied"""
mutators = [
delete_random_character,
insert_random_character,
flip_random_character
]
mutator = random.choice(mutators)
# print(mutator)
return mutator(s)
|
[
"random.randint",
"random.randrange",
"random.choice"
] |
[((1110, 1133), 'random.choice', 'random.choice', (['mutators'], {}), '(mutators)\n', (1123, 1133), False, 'import random\n'), ((404, 429), 'random.randrange', 'random.randrange', (['(32)', '(127)'], {}), '(32, 127)\n', (420, 429), False, 'import random\n'), ((741, 761), 'random.randint', 'random.randint', (['(0)', '(6)'], {}), '(0, 6)\n', (755, 761), False, 'import random\n')]
|
import logging
import re
import pystache
def init_logger():
logging.basicConfig(
format=
"%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s",
level=logging.INFO,
)
def _convert_to_dict(obj) -> dict:
converted_obj = {}
if isinstance(obj, list):
for i, value in enumerate(obj):
converted_obj[str(i)] = value
elif isinstance(obj, dict):
for key, value in obj.items():
converted_obj[key] = _convert_to_dict(value)
else:
converted_obj = obj
return converted_obj
def enable_request_debug_log(func):
def wrapper(*args, **kwargs):
requests_log = logging.getLogger("urllib3")
level = requests_log.level
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
try:
return func(*args, **kwargs)
finally:
requests_log.setLevel(level)
requests_log.propagate = False
return wrapper
def render_string_with_secrets(string, secrets) -> str:
if not secrets:
return string
secret_dict = _convert_to_dict(secrets)
parsed = pystache.parse(string, delimiters=("<%", "%>"))
for token in parsed._parse_tree: #pylint: disable=protected-access
if isinstance(token, pystache.parser._EscapeNode): #pylint: disable=protected-access
token.key = re.sub(
r"\[(\d+)\]", r".\1",
token.key) # make format such as $secrets.data[0] works
return pystache.Renderer().render(parsed, {"$secrets": secret_dict})
|
[
"logging.basicConfig",
"pystache.parse",
"pystache.Renderer",
"re.sub",
"logging.getLogger"
] |
[((67, 193), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s',\n level=logging.INFO)\n", (86, 193), False, 'import logging\n'), ((1155, 1202), 'pystache.parse', 'pystache.parse', (['string'], {'delimiters': "('<%', '%>')"}), "(string, delimiters=('<%', '%>'))\n", (1169, 1202), False, 'import pystache\n'), ((675, 703), 'logging.getLogger', 'logging.getLogger', (['"""urllib3"""'], {}), "('urllib3')\n", (692, 703), False, 'import logging\n'), ((1393, 1434), 're.sub', 're.sub', (['"""\\\\[(\\\\d+)\\\\]"""', '""".\\\\1"""', 'token.key'], {}), "('\\\\[(\\\\d+)\\\\]', '.\\\\1', token.key)\n", (1399, 1434), False, 'import re\n'), ((1523, 1542), 'pystache.Renderer', 'pystache.Renderer', ([], {}), '()\n', (1540, 1542), False, 'import pystache\n')]
|
#!/usr/bin/env python
"""
Advent of Code 2020: Day 16
"""
import copy
from collections import defaultdict
import math
import itertools
import signal
import sys
from types import FrameType
from typing import List, Mapping
from pathlib import Path
DEBUG = False
ACTIVE = True
INACTIVE = False
# Common -----------------------------------------------------------------------
def decode(file: Path) -> dict[tuple[int, int], bool]:
"""
Decode file contents
:param file: file containing the input values
:return: 2d map of the initial slice
"""
fh = open(file)
decoded_map = dict()
for y, l in enumerate(fh):
for x, c in enumerate(l.strip()):
active = c == '#'
decoded_map[(x, y)] = active
return decoded_map
def list_indexes(map_: dict[tuple, any], axis: int) -> list:
"""
List the indexes of a given axis in a mapping
:param map_: mapping of a property (activation) per grid position
:param axis: selected grid axis
:return: set of indexes across the given axis
"""
axis_count: int = len(next(iter(map_.keys())))
if axis >= axis_count:
return [0]
indexes = set(position[axis] for position in map_.keys())
index_list = sorted(indexes)
return index_list
def visualize(map_: dict[tuple, any]) -> None:
"""
Visualize slices of a mapping
:param map_: mapping of a property (activation) per grid position
:return: nothing
"""
conv = lambda pos, axis_cnt: \
('X' if map_[pos[:axis_cnt]] else ".") if isinstance(
map_[pos[:axis_cnt]], bool) else str(map_[pos[:axis_cnt]])
axis_count: int = len(next(iter(map_.keys())))
for w in list_indexes(map_, 3):
for z in list_indexes(map_, 2):
if axis_count == 4:
print(f'z={z}, w={w}')
elif axis_count == 3:
print(f'z={z}')
for y in list_indexes(map_, 1):
print(f'{" ".join(conv((x, y, z, w), axis_count) for x in list_indexes(map_, 0))}')
def execute_cycle(state: dict[tuple, bool]) -> dict[tuple[int, int, int], bool]:
"""
Execute one single state update cycle
:param state: 3d mapping of the state
:return: 3d mapping of the state
"""
expanded_state = state
axis_count: int = len(next(iter(state.keys())))
for axis in range(axis_count):
state = copy.copy(expanded_state)
axis_values = list_indexes(map_=state, axis=axis)
for upper in [True, False]:
index = max(axis_values) if upper else min(axis_values)
state_slice = {pos: v for pos, v in state.items()
if pos[axis] == index}
slice_active = any(state_slice.values())
if slice_active:
new_index = index + (1 if upper else -1)
for pos, s in state_slice.items():
new_pos = tuple(new_index if i == axis else a
for i, a in enumerate(pos))
expanded_state[new_pos] = False
if DEBUG:
visualize(expanded_state)
state_dd = defaultdict(bool, expanded_state)
active_neighbors_map = dict()
moves = [[-1, 0, +1]] * axis_count
self = tuple([0] * axis_count)
directions = [m for m in list(itertools.product(*moves)) if m != self]
for pos in expanded_state.keys():
active_neighbors = 0
for dir_ in directions:
neighbor = tuple(pos[axis] + dir_[axis] for axis in range(axis_count))
if state_dd[neighbor]:
active_neighbors += 1
active_neighbors_map[pos] = active_neighbors
if DEBUG:
visualize(active_neighbors_map)
updated_state = expanded_state #copy.copy(expanded_state)
for pos, count in active_neighbors_map.items():
cube_active = expanded_state[pos] == ACTIVE
neighbors_active = active_neighbors_map[pos]
if cube_active and neighbors_active not in [2, 3]:
updated_state[pos] = INACTIVE
elif not cube_active and neighbors_active == 3:
updated_state[pos] = ACTIVE
if DEBUG:
visualize(updated_state)
return updated_state
# Part One ---------------------------------------------------------------------
def process(file: Path) -> int:
"""
Process input file yielding the submission value
:param file: file containing the input values
:return: value to submit
"""
initial_slice = decode(file=file)
initial_state = {pos + tuple([0]): state for pos, state in initial_slice.items()}
if DEBUG:
visualize(map_=initial_state)
state = initial_state
for cycle in range(6):
if DEBUG:
visualize(state)
new_state = execute_cycle(state=state)
state = new_state
active_cubes = sum(state.values())
submission = active_cubes
return submission
# Part Two ---------------------------------------------------------------------
def process_part2(file: Path) -> int:
"""
Process input file yielding the submission value
:param file: file containing the input values
:return: value to submit
"""
initial_slice = decode(file=file)
initial_state = {pos + tuple([0, 0]): state
for pos, state in initial_slice.items()}
if DEBUG:
visualize(map_=initial_state)
state = initial_state
for cycle in range(6):
if DEBUG:
visualize(state)
new_state = execute_cycle(state=state)
state = new_state
active_cubes = sum(state.values())
submission = active_cubes
return submission
# Main -------------------------------------------------------------------------
def main() -> int:
"""
Main function
:return: Shell exit code
"""
files = ['./example.txt', './input.txt']
#files = ['./example.txt']
#files = []
for f in files:
print(f'In file {f}:')
print(f'\tPart One: {process(file=Path(f))}')
files = ['./example.txt', './input.txt']
#files = ['./example.txt']
#files = []
for f in files:
print(f'In file {f}:')
print(f'\tPart Two: {process_part2(file=Path(f))}')
return 0
def handle_sigint(signal_value: signal.Signals, frame: FrameType) -> None:
"""
Interrupt signal call-back method
:param signal_value: signal (expected SIGINT)
:param frame: current stack frame at the time of signal
:return: nothing
"""
assert signal_value == signal.SIGINT
print(frame.f_locals)
sys.exit(1)
if __name__ == '__main__':
signal.signal(signal.SIGINT, handle_sigint)
sys.exit(main())
|
[
"copy.copy",
"collections.defaultdict",
"pathlib.Path",
"itertools.product",
"signal.signal",
"sys.exit"
] |
[((3140, 3173), 'collections.defaultdict', 'defaultdict', (['bool', 'expanded_state'], {}), '(bool, expanded_state)\n', (3151, 3173), False, 'from collections import defaultdict\n'), ((6576, 6587), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6584, 6587), False, 'import sys\n'), ((6621, 6664), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'handle_sigint'], {}), '(signal.SIGINT, handle_sigint)\n', (6634, 6664), False, 'import signal\n'), ((2404, 2429), 'copy.copy', 'copy.copy', (['expanded_state'], {}), '(expanded_state)\n', (2413, 2429), False, 'import copy\n'), ((3316, 3341), 'itertools.product', 'itertools.product', (['*moves'], {}), '(*moves)\n', (3333, 3341), False, 'import itertools\n'), ((6011, 6018), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (6015, 6018), False, 'from pathlib import Path\n'), ((6215, 6222), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (6219, 6222), False, 'from pathlib import Path\n')]
|
#!/usr/bin/python
"""
Test oracle client
Author: <NAME>
Copyright (c) 2018 aeternity developers
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all
copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
"""
import asyncio
from epoch import Epoch
import json
import os
from websocket import create_connection
class Oracle:
def __init__(self):
self.pub_key = os.environ['AE_PUB_KEY']
self.url = "ws://localhost:" + os.environ['AE_WEBSOCKET'] + "/websocket"
self.websocket = None
self.local_port = os.environ['AE_LOCAL_PORT']
self.local_internal_port = os.environ['AE_LOCAL_INTERNAL_PORT']
self.epoch = Epoch()
def connect_websocket(self):
if not self.websocket:
self.websocket = create_connection(self.url)
def register(self, query_format, response_format, query_fee, ttl, fee):
self.connect_websocket()
query = { "target": "oracle",
"action": "register",
"payload": { "type": "OracleRegisterTxObject",
"vsn": 1,
"account": self.pub_key,
"query_format": query_format,
"response_format": response_format,
"query_fee": int(query_fee),
"ttl": {"type": "delta",
"value": int(ttl)},
"fee": int(fee) } }
j = json.dumps(query)
print(j)
self.epoch.update_top_block()
self.websocket.send(j)
response = json.loads(self.websocket.recv())
if not response['payload']['result'] == "ok":
raise RuntimeError(response)
oracle_id = response['payload']['oracle_id']
self.epoch.wait_for_block()
return oracle_id
def wait_for_block(self):
self.epoch.update_top_block()
self.epoch.wait_for_block()
def subscribe(self, oracle_id, callback = None):
self.connect_websocket()
query = {"target": "oracle",
"action": "subscribe",
"payload": {"type": "query",
"oracle_id": oracle_id }}
j = json.dumps(query)
self.websocket.send(j)
while True:
response = json.loads(self.websocket.recv())
print(response)
if response['action'] == 'mined_block':
continue
if not response['payload']['result'] == 'ok':
raise RuntimeError(response)
id = response['payload']['subscribed_to']['oracle_id']
break
mining_events = 0
while True:
data = self.websocket.recv()
j = json.loads(data)
print(j)
if j['action'] == 'mined_block':
mining_events += 1
continue
if j['action'] == 'new_oracle_query':
if callback:
callback(j)
else:
print("Unhandled")
if mining_events == 0:
self.epoch.wait_for_block()
def query(self, oracle_pubkey, query_fee, query_ttl, response_ttl,
fee, query):
self.connect_websocket()
request = {"target": "oracle",
"action": "query",
"payload": {"type": "OracleQueryTxObject",
"vsn": 1,
"oracle_pubkey": oracle_pubkey,
"query_fee": int(query_fee),
"query_ttl": {"type": "delta",
"value": int(query_ttl)},
"response_ttl": {"type": "delta",
"value": int(response_ttl)},
"fee": int(fee),
"query": query }}
j = json.dumps(request)
print(j)
self.websocket.send(j)
response = self.websocket.recv()
print(response)
response = json.loads(response)
if response['payload']['result'] == "ok":
return response['payload']['query_id']
self.epoch.wait_for_block()
return False
def subscribe_query(self, query_id, callback = None):
self.connect_websocket()
request = {"target": "oracle",
"action": "subscribe",
"payload": {"type": "response",
"query_id": query_id }}
j = json.dumps(request)
print(j)
self.websocket.send(j)
# check response, might have to consume a block mined message
while True:
blocks_mined = 0
response = self.websocket.recv()
response = json.loads(response)
print(response)
if response['action'] == 'mined_block':
blocks_mined += 1
continue
if response['action'] == 'new_oracle_response':
if callback:
callback(response['payload'])
else:
print(response['payload'])
break
# Should we get here?
if not response['payload']['result'] == 'ok':
raise RuntimeError(response)
def respond(self, query_id, fee, reply):
self.connect_websocket()
response = {"target": "oracle",
"action": "response",
"payload": {"type": "OracleResponseTxObject",
"vsn": 1,
"query_id": query_id,
"fee": int(fee),
"response": reply}}
response = json.dumps(response)
print(response)
self.websocket.send(response)
|
[
"websocket.create_connection",
"epoch.Epoch",
"json.loads",
"json.dumps"
] |
[((1248, 1255), 'epoch.Epoch', 'Epoch', ([], {}), '()\n', (1253, 1255), False, 'from epoch import Epoch\n'), ((2094, 2111), 'json.dumps', 'json.dumps', (['query'], {}), '(query)\n', (2104, 2111), False, 'import json\n'), ((2854, 2871), 'json.dumps', 'json.dumps', (['query'], {}), '(query)\n', (2864, 2871), False, 'import json\n'), ((4581, 4600), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (4591, 4600), False, 'import json\n'), ((4733, 4753), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (4743, 4753), False, 'import json\n'), ((5203, 5222), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (5213, 5222), False, 'import json\n'), ((6441, 6461), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (6451, 6461), False, 'import json\n'), ((1350, 1377), 'websocket.create_connection', 'create_connection', (['self.url'], {}), '(self.url)\n', (1367, 1377), False, 'from websocket import create_connection\n'), ((3376, 3392), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (3386, 3392), False, 'import json\n'), ((5459, 5479), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (5469, 5479), False, 'import json\n')]
|
#!/usr/bin/env python
# coding=utf-8
from setuptools import setup,find_packages
setup(
name="sopt",
version="0.0.6.1",
description="sopt:a simple python optimizer library",
long_description=
'''
sopt is a simple python optimizer library.Currentlly,it includes some stochastic optimization
algorithms,like Genetic Algorithm(GA),Particle Swarm Optimization(PSO),Simulated Anealing
(SA),Random Walk(and its improvement version),and some gradient based optimizers,like Gradient
Descent,Momentum,AdaGrad,RMSProp and Adam Optimizers.For the GA optimizers,it includes many
kinds of different selected methods,mutation methods etc,as well as for PSO and other optimizers,
so you can try many different kinds of optimizers with different settings,all the stochastic optimization
also supports the non-linear complex constraints by using penalty methods or dropout-bad-solution methods.
''',
author='lyrichu',
author_email='<EMAIL>',
url = "http://www.github.com/Lyrichu",
maintainer='lyrichu',
maintainer_email='<EMAIL>',
packages=['sopt','sopt/GA','sopt/SGA','sopt/PSO','sopt/test','sopt/util','sopt/Optimizers'],
package_dir={'sopt': 'sopt'},
install_requires=['numpy']
)
|
[
"setuptools.setup"
] |
[((81, 1231), 'setuptools.setup', 'setup', ([], {'name': '"""sopt"""', 'version': '"""0.0.6.1"""', 'description': '"""sopt:a simple python optimizer library"""', 'long_description': '"""\n sopt is a simple python optimizer library.Currentlly,it includes some stochastic optimization\n algorithms,like Genetic Algorithm(GA),Particle Swarm Optimization(PSO),Simulated Anealing\n (SA),Random Walk(and its improvement version),and some gradient based optimizers,like Gradient\n Descent,Momentum,AdaGrad,RMSProp and Adam Optimizers.For the GA optimizers,it includes many\n kinds of different selected methods,mutation methods etc,as well as for PSO and other optimizers,\n so you can try many different kinds of optimizers with different settings,all the stochastic optimization\n also supports the non-linear complex constraints by using penalty methods or dropout-bad-solution methods.\n """', 'author': '"""lyrichu"""', 'author_email': '"""<EMAIL>"""', 'url': '"""http://www.github.com/Lyrichu"""', 'maintainer': '"""lyrichu"""', 'maintainer_email': '"""<EMAIL>"""', 'packages': "['sopt', 'sopt/GA', 'sopt/SGA', 'sopt/PSO', 'sopt/test', 'sopt/util',\n 'sopt/Optimizers']", 'package_dir': "{'sopt': 'sopt'}", 'install_requires': "['numpy']"}), '(name=\'sopt\', version=\'0.0.6.1\', description=\n \'sopt:a simple python optimizer library\', long_description=\n """\n sopt is a simple python optimizer library.Currentlly,it includes some stochastic optimization\n algorithms,like Genetic Algorithm(GA),Particle Swarm Optimization(PSO),Simulated Anealing\n (SA),Random Walk(and its improvement version),and some gradient based optimizers,like Gradient\n Descent,Momentum,AdaGrad,RMSProp and Adam Optimizers.For the GA optimizers,it includes many\n kinds of different selected methods,mutation methods etc,as well as for PSO and other optimizers,\n so you can try many different kinds of optimizers with different settings,all the stochastic optimization\n also supports the non-linear complex constraints by using penalty methods or dropout-bad-solution methods.\n """\n , author=\'lyrichu\', author_email=\'<EMAIL>\', url=\n \'http://www.github.com/Lyrichu\', maintainer=\'lyrichu\', maintainer_email\n =\'<EMAIL>\', packages=[\'sopt\', \'sopt/GA\', \'sopt/SGA\', \'sopt/PSO\',\n \'sopt/test\', \'sopt/util\', \'sopt/Optimizers\'], package_dir={\'sopt\':\n \'sopt\'}, install_requires=[\'numpy\'])\n', (86, 1231), False, 'from setuptools import setup, find_packages\n')]
|
import logging
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.models import AccessControlList
from mayan.apps.document_states.classes import WorkflowAction
from .models import UserMailer
from .permissions import permission_user_mailer_use
__all__ = ('EmailAction',)
logger = logging.getLogger(name=__name__)
class EmailAction(WorkflowAction):
fields = {
'mailing_profile': {
'label': _('Mailing profile'),
'class': 'django.forms.ModelChoiceField', 'kwargs': {
'help_text': _('Mailing profile to use when sending the email.'),
'queryset': UserMailer.objects.none(), 'required': True
}
},
'recipient': {
'label': _('Recipient'),
'class': 'django.forms.CharField', 'kwargs': {
'help_text': _(
'Email address of the recipient. Can be multiple addresses '
'separated by comma or semicolon. A template can be used '
'to reference properties of the document.'
),
'required': True
}
},
'cc': {
'label': _('CC'),
'class': 'django.forms.CharField', 'kwargs': {
'help_text': _(
'Address used in the "Bcc" header when sending the '
'email. Can be multiple addresses '
'separated by comma or semicolon. A template can be used '
'to reference properties of the document.'
),
'required': False
}
},
'bcc': {
'label': _('BCC'),
'class': 'django.forms.CharField', 'kwargs': {
'help_text': _(
'Address used in the "Bcc" header when sending the '
'email. Can be multiple addresses '
'separated by comma or semicolon. A template can be used '
'to reference properties of the document.'
),
'required': False
}
},
'reply_to': {
'label': _('Reply to'),
'class': 'django.forms.CharField', 'kwargs': {
'help_text': _(
'Address used in the "Reply-To" header when sending the '
'email. Can be multiple addresses '
'separated by comma or semicolon. A template can be used '
'to reference properties of the document.'
),
'required': False
}
},
'subject': {
'label': _('Subject'),
'class': 'django.forms.CharField', 'kwargs': {
'help_text': _(
'Subject of the email. Can be a string or a template.'
),
'required': True
}
},
'body': {
'label': _('Body'),
'class': 'django.forms.CharField', 'kwargs': {
'help_text': _(
'Body of the email to send. Can be a string or a template.'
),
'required': True
}
},
'attachment': {
'label': _('Attachment'),
'class': 'django.forms.BooleanField', 'default': False,
'help_text': _(
'Attach the document to the mail.'
),
'required': False
},
}
field_order = (
'mailing_profile', 'recipient', 'cc', 'bcc', 'reply_to', 'subject',
'body', 'attachment'
)
label = _('Send email')
widgets = {
'body': {
'class': 'django.forms.widgets.Textarea', 'kwargs': {}
}
}
permission = permission_user_mailer_use
def execute(self, context):
recipient = self.render_field(
field_name='recipient', context=context
)
cc = self.render_field(
field_name='cc', context=context
)
bcc = self.render_field(
field_name='bcc', context=context
)
reply_to = self.render_field(
field_name='reply_to', context=context
)
subject = self.render_field(
field_name='subject', context=context
)
body = self.render_field(
field_name='body', context=context
)
user_mailer = self.get_user_mailer()
kwargs = {
'bcc': bcc, 'cc': cc, 'body': body, 'reply_to': reply_to,
'subject': subject, 'to': recipient
}
if self.form_data.get('attachment', False):
kwargs.update(
{
'as_attachment': True,
'document': context['document']
}
)
user_mailer.send_document(**kwargs)
else:
user_mailer.send(**kwargs)
def get_form_schema(self, **kwargs):
result = super().get_form_schema(**kwargs)
queryset = AccessControlList.objects.restrict_queryset(
permission=self.permission, queryset=UserMailer.objects.all(),
user=kwargs['request'].user
)
result['fields']['mailing_profile']['kwargs']['queryset'] = queryset
return result
def get_user_mailer(self):
return UserMailer.objects.get(pk=self.form_data['mailing_profile'])
|
[
"django.utils.translation.ugettext_lazy",
"logging.getLogger"
] |
[((320, 352), 'logging.getLogger', 'logging.getLogger', ([], {'name': '__name__'}), '(name=__name__)\n', (337, 352), False, 'import logging\n'), ((3751, 3766), 'django.utils.translation.ugettext_lazy', '_', (['"""Send email"""'], {}), "('Send email')\n", (3752, 3766), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((461, 481), 'django.utils.translation.ugettext_lazy', '_', (['"""Mailing profile"""'], {}), "('Mailing profile')\n", (462, 481), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((779, 793), 'django.utils.translation.ugettext_lazy', '_', (['"""Recipient"""'], {}), "('Recipient')\n", (780, 793), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1234, 1241), 'django.utils.translation.ugettext_lazy', '_', (['"""CC"""'], {}), "('CC')\n", (1235, 1241), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1733, 1741), 'django.utils.translation.ugettext_lazy', '_', (['"""BCC"""'], {}), "('BCC')\n", (1734, 1741), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2238, 2251), 'django.utils.translation.ugettext_lazy', '_', (['"""Reply to"""'], {}), "('Reply to')\n", (2239, 2251), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2752, 2764), 'django.utils.translation.ugettext_lazy', '_', (['"""Subject"""'], {}), "('Subject')\n", (2753, 2764), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3057, 3066), 'django.utils.translation.ugettext_lazy', '_', (['"""Body"""'], {}), "('Body')\n", (3058, 3066), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3370, 3385), 'django.utils.translation.ugettext_lazy', '_', (['"""Attachment"""'], {}), "('Attachment')\n", (3371, 3385), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3482, 3519), 'django.utils.translation.ugettext_lazy', '_', (['"""Attach the document to the mail."""'], {}), "('Attach the document to the mail.')\n", (3483, 3519), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((580, 631), 'django.utils.translation.ugettext_lazy', '_', (['"""Mailing profile to use when sending the email."""'], {}), "('Mailing profile to use when sending the email.')\n", (581, 631), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((885, 1049), 'django.utils.translation.ugettext_lazy', '_', (['"""Email address of the recipient. Can be multiple addresses separated by comma or semicolon. A template can be used to reference properties of the document."""'], {}), "('Email address of the recipient. Can be multiple addresses separated by comma or semicolon. A template can be used to reference properties of the document.'\n )\n", (886, 1049), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1333, 1522), 'django.utils.translation.ugettext_lazy', '_', (['"""Address used in the "Bcc" header when sending the email. Can be multiple addresses separated by comma or semicolon. A template can be used to reference properties of the document."""'], {}), '(\'Address used in the "Bcc" header when sending the email. Can be multiple addresses separated by comma or semicolon. A template can be used to reference properties of the document.\'\n )\n', (1334, 1522), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1833, 2022), 'django.utils.translation.ugettext_lazy', '_', (['"""Address used in the "Bcc" header when sending the email. Can be multiple addresses separated by comma or semicolon. A template can be used to reference properties of the document."""'], {}), '(\'Address used in the "Bcc" header when sending the email. Can be multiple addresses separated by comma or semicolon. A template can be used to reference properties of the document.\'\n )\n', (1834, 2022), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2343, 2537), 'django.utils.translation.ugettext_lazy', '_', (['"""Address used in the "Reply-To" header when sending the email. Can be multiple addresses separated by comma or semicolon. A template can be used to reference properties of the document."""'], {}), '(\'Address used in the "Reply-To" header when sending the email. Can be multiple addresses separated by comma or semicolon. A template can be used to reference properties of the document.\'\n )\n', (2344, 2537), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2856, 2913), 'django.utils.translation.ugettext_lazy', '_', (['"""Subject of the email. Can be a string or a template."""'], {}), "('Subject of the email. Can be a string or a template.')\n", (2857, 2913), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3158, 3220), 'django.utils.translation.ugettext_lazy', '_', (['"""Body of the email to send. Can be a string or a template."""'], {}), "('Body of the email to send. Can be a string or a template.')\n", (3159, 3220), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
from django.shortcuts import render, redirect
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView
from django.contrib.auth.models import User
from wiki.models import Page
from wiki.forms import PageForm
class PageListView(ListView):
""" Renders a list of all Pages. """
model = Page
def get(self, request):
""" GET a list of Pages. """
pages = self.get_queryset().all()
return render(request, 'list.html', {
'pages': pages
})
class PageDetailView(DetailView):
""" Renders a specific page based on it's slug."""
model = Page
def get(self, request, slug):
""" Returns a specific wiki page by slug. """
page = self.get_queryset().get(slug__iexact=slug)
return render(request, 'page.html', {
'page': page
})
class PageCreateView(FormView):
template_name = 'create_page.html'
form_class = PageForm
success_url = '/'
def post(self, request):
page_form = PageForm(request.POST)
page = page_form.save(commit=False)
page.author = User.objects.get(id=request.POST['author'])
page.save()
return redirect(page)
def form_valid(self, form):
return super().form_valid(form)
|
[
"django.shortcuts.render",
"django.contrib.auth.models.User.objects.get",
"django.shortcuts.redirect",
"wiki.forms.PageForm"
] |
[((510, 556), 'django.shortcuts.render', 'render', (['request', '"""list.html"""', "{'pages': pages}"], {}), "(request, 'list.html', {'pages': pages})\n", (516, 556), False, 'from django.shortcuts import render, redirect\n'), ((846, 890), 'django.shortcuts.render', 'render', (['request', '"""page.html"""', "{'page': page}"], {}), "(request, 'page.html', {'page': page})\n", (852, 890), False, 'from django.shortcuts import render, redirect\n'), ((1081, 1103), 'wiki.forms.PageForm', 'PageForm', (['request.POST'], {}), '(request.POST)\n', (1089, 1103), False, 'from wiki.forms import PageForm\n'), ((1170, 1213), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': "request.POST['author']"}), "(id=request.POST['author'])\n", (1186, 1213), False, 'from django.contrib.auth.models import User\n'), ((1249, 1263), 'django.shortcuts.redirect', 'redirect', (['page'], {}), '(page)\n', (1257, 1263), False, 'from django.shortcuts import render, redirect\n')]
|
#from django.conf import settings
from django.conf.urls.static import static
from django import views
from django.contrib import admin
from django.shortcuts import redirect
from django.urls import path, include
from django.utils import decorators
from blog import settings #from .settings import local
import categoria.views, comentario.views, cuenta.views, publicacion.views
"""blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
urlpatterns = [
path('admin/', admin.site.urls), # sitio de administración
path('', publicacion.views.index, name='index'), # inicio de la web
path('nosotros', publicacion.views.nosotros, name='nosotros'), # inicio de la web
path('blog/post/nueva/', publicacion.views.nueva, name='publicacion_nueva'),
path('blog/post/editar/<int:id>/', publicacion.views.editar, name='publicacion_editar'),
path('blog/post/eliminar/<int:id>/', publicacion.views.eliminar, name='publicacion_eliminar'),
path('blog/post/<int:id>/', publicacion.views.ver, name='publicacion_ver'),
path('blog/post/autor/', publicacion.views.autor, name='publicacion_por_autor'),
path('blog/post/autor/<int:id>/', publicacion.views.autor, name='publicacion_por_autor'),
path('blog/post/<int:publicacion_id>/comentario/<int:comentario_id>/', comentario.views.ver, name='comentario_ver'),
path('categoria/listado/', categoria.views.listado, name='categoria_listado'),
path('categoria/nueva/', categoria.views.nueva, name='categoria_nueva'),
path('categoria/editar/<int:id>/', categoria.views.editar, name='categoria_editar'),
path('categoria/eliminar/<int:id>/', categoria.views.eliminar, name='categoria_eliminar'),
path('categoria/<int:id>/', categoria.views.filtrar, name='categoria_filtrar'),
path('usuario/listado/', cuenta.views.usuario_listado, name="usuario_listado"),
path('usuario/nuevo/', cuenta.views.usuario_nuevo, name="usuario_nuevo"),
path('usuario/editar/<int:id>/', cuenta.views.usuario_editar, name="usuario_editar"),
path('usuario/eliminar/<int:id>/', cuenta.views.usuario_eliminar, name="usuario_eliminar"),
path('usuario/tipo/', cuenta.views.tipo_listado, name="tipo_listado"),
path('usuario/tipo/nuevo/', cuenta.views.tipo_nuevo, name="tipo_nuevo"),
path('usuario/tipo/editar/<int:id>', cuenta.views.tipo_editar, name="tipo_editar"),
path('usuario/tipo/eliminar/<int:id>', cuenta.views.tipo_eliminar, name="tipo_eliminar"),
path('cuenta/', cuenta.views.cuenta, name="cuenta"),
path('cuenta/registrar/', cuenta.views.registrar, name="cuenta_registrar"),
path('cuenta/iniciar/', cuenta.views.iniciar_sesion, name="iniciar_sesion"),
path('cuenta/cerrar/', cuenta.views.cerrar_sesion, name="cerrar_sesion"),
path('cuenta/cambiarPassword/', cuenta.views.cambiarPassword, name="cambiarPassword"),
path('restringido/', cuenta.views.restringido, name='restringido'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"django.conf.urls.static.static",
"django.urls.path"
] |
[((3553, 3614), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (3559, 3614), False, 'from django.conf.urls.static import static\n'), ((1055, 1086), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (1059, 1086), False, 'from django.urls import path, include\n'), ((1121, 1168), 'django.urls.path', 'path', (['""""""', 'publicacion.views.index'], {'name': '"""index"""'}), "('', publicacion.views.index, name='index')\n", (1125, 1168), False, 'from django.urls import path, include\n'), ((1194, 1255), 'django.urls.path', 'path', (['"""nosotros"""', 'publicacion.views.nosotros'], {'name': '"""nosotros"""'}), "('nosotros', publicacion.views.nosotros, name='nosotros')\n", (1198, 1255), False, 'from django.urls import path, include\n'), ((1283, 1358), 'django.urls.path', 'path', (['"""blog/post/nueva/"""', 'publicacion.views.nueva'], {'name': '"""publicacion_nueva"""'}), "('blog/post/nueva/', publicacion.views.nueva, name='publicacion_nueva')\n", (1287, 1358), False, 'from django.urls import path, include\n'), ((1365, 1457), 'django.urls.path', 'path', (['"""blog/post/editar/<int:id>/"""', 'publicacion.views.editar'], {'name': '"""publicacion_editar"""'}), "('blog/post/editar/<int:id>/', publicacion.views.editar, name=\n 'publicacion_editar')\n", (1369, 1457), False, 'from django.urls import path, include\n'), ((1459, 1557), 'django.urls.path', 'path', (['"""blog/post/eliminar/<int:id>/"""', 'publicacion.views.eliminar'], {'name': '"""publicacion_eliminar"""'}), "('blog/post/eliminar/<int:id>/', publicacion.views.eliminar, name=\n 'publicacion_eliminar')\n", (1463, 1557), False, 'from django.urls import path, include\n'), ((1559, 1633), 'django.urls.path', 'path', (['"""blog/post/<int:id>/"""', 'publicacion.views.ver'], {'name': '"""publicacion_ver"""'}), "('blog/post/<int:id>/', publicacion.views.ver, name='publicacion_ver')\n", (1563, 1633), False, 'from django.urls import path, include\n'), ((1640, 1719), 'django.urls.path', 'path', (['"""blog/post/autor/"""', 'publicacion.views.autor'], {'name': '"""publicacion_por_autor"""'}), "('blog/post/autor/', publicacion.views.autor, name='publicacion_por_autor')\n", (1644, 1719), False, 'from django.urls import path, include\n'), ((1726, 1819), 'django.urls.path', 'path', (['"""blog/post/autor/<int:id>/"""', 'publicacion.views.autor'], {'name': '"""publicacion_por_autor"""'}), "('blog/post/autor/<int:id>/', publicacion.views.autor, name=\n 'publicacion_por_autor')\n", (1730, 1819), False, 'from django.urls import path, include\n'), ((1821, 1940), 'django.urls.path', 'path', (['"""blog/post/<int:publicacion_id>/comentario/<int:comentario_id>/"""', 'comentario.views.ver'], {'name': '"""comentario_ver"""'}), "('blog/post/<int:publicacion_id>/comentario/<int:comentario_id>/',\n comentario.views.ver, name='comentario_ver')\n", (1825, 1940), False, 'from django.urls import path, include\n'), ((1949, 2026), 'django.urls.path', 'path', (['"""categoria/listado/"""', 'categoria.views.listado'], {'name': '"""categoria_listado"""'}), "('categoria/listado/', categoria.views.listado, name='categoria_listado')\n", (1953, 2026), False, 'from django.urls import path, include\n'), ((2033, 2104), 'django.urls.path', 'path', (['"""categoria/nueva/"""', 'categoria.views.nueva'], {'name': '"""categoria_nueva"""'}), "('categoria/nueva/', categoria.views.nueva, name='categoria_nueva')\n", (2037, 2104), False, 'from django.urls import path, include\n'), ((2111, 2199), 'django.urls.path', 'path', (['"""categoria/editar/<int:id>/"""', 'categoria.views.editar'], {'name': '"""categoria_editar"""'}), "('categoria/editar/<int:id>/', categoria.views.editar, name=\n 'categoria_editar')\n", (2115, 2199), False, 'from django.urls import path, include\n'), ((2201, 2295), 'django.urls.path', 'path', (['"""categoria/eliminar/<int:id>/"""', 'categoria.views.eliminar'], {'name': '"""categoria_eliminar"""'}), "('categoria/eliminar/<int:id>/', categoria.views.eliminar, name=\n 'categoria_eliminar')\n", (2205, 2295), False, 'from django.urls import path, include\n'), ((2297, 2375), 'django.urls.path', 'path', (['"""categoria/<int:id>/"""', 'categoria.views.filtrar'], {'name': '"""categoria_filtrar"""'}), "('categoria/<int:id>/', categoria.views.filtrar, name='categoria_filtrar')\n", (2301, 2375), False, 'from django.urls import path, include\n'), ((2384, 2462), 'django.urls.path', 'path', (['"""usuario/listado/"""', 'cuenta.views.usuario_listado'], {'name': '"""usuario_listado"""'}), "('usuario/listado/', cuenta.views.usuario_listado, name='usuario_listado')\n", (2388, 2462), False, 'from django.urls import path, include\n'), ((2469, 2541), 'django.urls.path', 'path', (['"""usuario/nuevo/"""', 'cuenta.views.usuario_nuevo'], {'name': '"""usuario_nuevo"""'}), "('usuario/nuevo/', cuenta.views.usuario_nuevo, name='usuario_nuevo')\n", (2473, 2541), False, 'from django.urls import path, include\n'), ((2548, 2637), 'django.urls.path', 'path', (['"""usuario/editar/<int:id>/"""', 'cuenta.views.usuario_editar'], {'name': '"""usuario_editar"""'}), "('usuario/editar/<int:id>/', cuenta.views.usuario_editar, name=\n 'usuario_editar')\n", (2552, 2637), False, 'from django.urls import path, include\n'), ((2639, 2734), 'django.urls.path', 'path', (['"""usuario/eliminar/<int:id>/"""', 'cuenta.views.usuario_eliminar'], {'name': '"""usuario_eliminar"""'}), "('usuario/eliminar/<int:id>/', cuenta.views.usuario_eliminar, name=\n 'usuario_eliminar')\n", (2643, 2734), False, 'from django.urls import path, include\n'), ((2736, 2805), 'django.urls.path', 'path', (['"""usuario/tipo/"""', 'cuenta.views.tipo_listado'], {'name': '"""tipo_listado"""'}), "('usuario/tipo/', cuenta.views.tipo_listado, name='tipo_listado')\n", (2740, 2805), False, 'from django.urls import path, include\n'), ((2812, 2883), 'django.urls.path', 'path', (['"""usuario/tipo/nuevo/"""', 'cuenta.views.tipo_nuevo'], {'name': '"""tipo_nuevo"""'}), "('usuario/tipo/nuevo/', cuenta.views.tipo_nuevo, name='tipo_nuevo')\n", (2816, 2883), False, 'from django.urls import path, include\n'), ((2890, 2977), 'django.urls.path', 'path', (['"""usuario/tipo/editar/<int:id>"""', 'cuenta.views.tipo_editar'], {'name': '"""tipo_editar"""'}), "('usuario/tipo/editar/<int:id>', cuenta.views.tipo_editar, name=\n 'tipo_editar')\n", (2894, 2977), False, 'from django.urls import path, include\n'), ((2979, 3072), 'django.urls.path', 'path', (['"""usuario/tipo/eliminar/<int:id>"""', 'cuenta.views.tipo_eliminar'], {'name': '"""tipo_eliminar"""'}), "('usuario/tipo/eliminar/<int:id>', cuenta.views.tipo_eliminar, name=\n 'tipo_eliminar')\n", (2983, 3072), False, 'from django.urls import path, include\n'), ((3076, 3127), 'django.urls.path', 'path', (['"""cuenta/"""', 'cuenta.views.cuenta'], {'name': '"""cuenta"""'}), "('cuenta/', cuenta.views.cuenta, name='cuenta')\n", (3080, 3127), False, 'from django.urls import path, include\n'), ((3134, 3208), 'django.urls.path', 'path', (['"""cuenta/registrar/"""', 'cuenta.views.registrar'], {'name': '"""cuenta_registrar"""'}), "('cuenta/registrar/', cuenta.views.registrar, name='cuenta_registrar')\n", (3138, 3208), False, 'from django.urls import path, include\n'), ((3215, 3290), 'django.urls.path', 'path', (['"""cuenta/iniciar/"""', 'cuenta.views.iniciar_sesion'], {'name': '"""iniciar_sesion"""'}), "('cuenta/iniciar/', cuenta.views.iniciar_sesion, name='iniciar_sesion')\n", (3219, 3290), False, 'from django.urls import path, include\n'), ((3297, 3369), 'django.urls.path', 'path', (['"""cuenta/cerrar/"""', 'cuenta.views.cerrar_sesion'], {'name': '"""cerrar_sesion"""'}), "('cuenta/cerrar/', cuenta.views.cerrar_sesion, name='cerrar_sesion')\n", (3301, 3369), False, 'from django.urls import path, include\n'), ((3376, 3466), 'django.urls.path', 'path', (['"""cuenta/cambiarPassword/"""', 'cuenta.views.cambiarPassword'], {'name': '"""cambiarPassword"""'}), "('cuenta/cambiarPassword/', cuenta.views.cambiarPassword, name=\n 'cambiarPassword')\n", (3380, 3466), False, 'from django.urls import path, include\n'), ((3474, 3540), 'django.urls.path', 'path', (['"""restringido/"""', 'cuenta.views.restringido'], {'name': '"""restringido"""'}), "('restringido/', cuenta.views.restringido, name='restringido')\n", (3478, 3540), False, 'from django.urls import path, include\n')]
|
from builtins import super
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
# from django.core.checks import messages
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import DeleteView, CreateView
from extra_views import CreateWithInlinesView, UpdateWithInlinesView, InlineFormSetFactory
from hitcount.views import HitCountDetailView
from accounts.decorators import UserRequiredMixin, employer_required
from category.models import Category
from company.models import Company, CompanyImage, OpeningHours, ClosingRules
# Category views
from jobcorner import settings
from location.models import Location
from reviews.forms import ReviewForm
from .filters import CompanyFilter
from .forms import CompanyForm, OpeningHoursForm, CompanyFilterForm
def company_list_view(request):
company_list = Company.published.all()
company_filter = CompanyFilter(request.GET, queryset=company_list)
form = CompanyFilterForm(data=request.GET)
facets = {
"selected": {},
"catego": {
"category": Category.objects.all(),
"location": Location.objects.all(),
},
}
if form.is_valid():
category = form.cleaned_data["category"]
if category:
facets["selected"]["category"] = category
company_list = company_list.filter(category=category).distinct()
location = form.cleaned_data["location"]
if location:
facets["selected"]["location"] = location
company_list = company_list.filter(location=location).distinct()
if settings.DEBUG:
from pprint import pprint
pprint(facets)
context = {
"form": form,
"facets": facets,
"object_list": company_list,
'filter': company_filter,
}
return render(request, 'company/list.html', context)
class PhotosInline(InlineFormSetFactory):
model = CompanyImage
# form_class = CompanyPhotoFormSet
fields = ['img', 'alt']
@method_decorator([login_required, employer_required], name='dispatch')
class CompanyCreate(CreateWithInlinesView):
model = Company
inlines = [PhotosInline]
form_class = CompanyForm
template_name = 'company/form.html'
def forms_valid(self, form, inlines):
form.instance.user = self.request.user
return super(CompanyCreate, self).forms_valid(form, inlines)
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context['title'] = "Add your Company"
class CompanyEdit(LoginRequiredMixin, UserRequiredMixin, UpdateWithInlinesView):
model = Company
inlines = [PhotosInline]
slug_url_kwarg = 'slug'
form_class = CompanyForm
template_name = 'company/form.html'
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# # context['company'] = Company.objects.all()[:5]
# context['title'] = " Update Company "
def forms_valid(self, form, inlines):
form.instance.user = self.request.user
return super(CompanyEdit, self).forms_valid(form, inlines)
def get_success_url(self):
return self.object.get_absolute_url()
class CompanyDelete(LoginRequiredMixin, UserRequiredMixin, DeleteView):
model = Company
success_url = reverse_lazy('company:list')
template_name = 'delete.html'
class CompanyDetail(HitCountDetailView):
queryset = Company.published.all()
template_name = 'company/detail.html'
context_object_name = 'company'
slug_field = 'slug'
count_hit = True
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# context['meta'] = self.get_object().as_meta(self.request)
context['company_image'] = CompanyImage.objects.filter(company=self.get_object())
context['open_hours'] = OpeningHours.objects.filter(company=self.get_object())
context['closing_rules'] = ClosingRules.objects.filter(company=self.get_object())
context['form'] = ReviewForm()
context['related'] = self.object.tags.similar_objects()[:4]
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super().post(request, *args, **kwargs)
#
# class OpeningHourCreate(LoginRequiredMixin, ModelFormSetView):
# model = OpeningHours
# form_class = OpeningHoursForm
# # formset_class = OpeningHoursFormset
# template_name = 'company/formset.html'
# flocationy_kwargs = {'can_order': False, 'can_delete': False}
# # formset_kwargs = {'auto_id': 'my_id_%s'}
#
#
# def form_valid(self, form):
# form.instance.company = get_object_or_404(Company, slug=self.kwargs['slug'])
# form.save()
# return super().form_valid(form)
#
# def form_invalid(self, form):
# """
# If the form is invalid, re-render the context data with the
# data-filled form and errors.
# """
# print('the is an error in your form')
# messages.warning(self.request, 'There was an error in this form')
# return self.render_to_response(self.get_context_data(form=form))
#
class OpeningHourCreate(LoginRequiredMixin, CreateView):
model = OpeningHours
form_class = OpeningHoursForm
template_name = 'form.html'
def form_valid(self, form):
form.instance.company = get_object_or_404(Company, slug=self.kwargs['slug'])
form.save()
return super().form_valid(form)
def form_invalid(self, form):
"""
If the form is invalid, re-render the context data with the
data-filled form and errors.
"""
print('the is an error in your form')
messages.warning(self.request, 'There was an error in this form')
return self.render_to_response(self.get_context_data(form=form))
# follow(request.user)
# unfollow(request.user)
# followers(request.user) # returns a list of Users who follow request.user
# following(request.user) # returns a list of locations who request.user is following
|
[
"reviews.forms.ReviewForm",
"location.models.Location.objects.all",
"django.utils.decorators.method_decorator",
"builtins.super",
"django.urls.reverse_lazy",
"django.shortcuts.get_object_or_404",
"company.models.Company.published.all",
"pprint.pprint",
"django.shortcuts.render",
"django.contrib.messages.warning",
"category.models.Category.objects.all"
] |
[((2175, 2245), 'django.utils.decorators.method_decorator', 'method_decorator', (['[login_required, employer_required]'], {'name': '"""dispatch"""'}), "([login_required, employer_required], name='dispatch')\n", (2191, 2245), False, 'from django.utils.decorators import method_decorator\n'), ((1022, 1045), 'company.models.Company.published.all', 'Company.published.all', ([], {}), '()\n', (1043, 1045), False, 'from company.models import Company, CompanyImage, OpeningHours, ClosingRules\n'), ((1990, 2035), 'django.shortcuts.render', 'render', (['request', '"""company/list.html"""', 'context'], {}), "(request, 'company/list.html', context)\n", (1996, 2035), False, 'from django.shortcuts import render, get_object_or_404\n'), ((3498, 3526), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""company:list"""'], {}), "('company:list')\n", (3510, 3526), False, 'from django.urls import reverse_lazy\n'), ((3619, 3642), 'company.models.Company.published.all', 'Company.published.all', ([], {}), '()\n', (3640, 3642), False, 'from company.models import Company, CompanyImage, OpeningHours, ClosingRules\n'), ((1821, 1835), 'pprint.pprint', 'pprint', (['facets'], {}), '(facets)\n', (1827, 1835), False, 'from pprint import pprint\n'), ((4223, 4235), 'reviews.forms.ReviewForm', 'ReviewForm', ([], {}), '()\n', (4233, 4235), False, 'from reviews.forms import ReviewForm\n'), ((5586, 5638), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Company'], {'slug': "self.kwargs['slug']"}), "(Company, slug=self.kwargs['slug'])\n", (5603, 5638), False, 'from django.shortcuts import render, get_object_or_404\n'), ((5917, 5982), 'django.contrib.messages.warning', 'messages.warning', (['self.request', '"""There was an error in this form"""'], {}), "(self.request, 'There was an error in this form')\n", (5933, 5982), False, 'from django.contrib import messages\n'), ((1248, 1270), 'category.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (1268, 1270), False, 'from category.models import Category\n'), ((1296, 1318), 'location.models.Location.objects.all', 'Location.objects.all', ([], {}), '()\n', (1316, 1318), False, 'from location.models import Location\n'), ((2513, 2539), 'builtins.super', 'super', (['CompanyCreate', 'self'], {}), '(CompanyCreate, self)\n', (2518, 2539), False, 'from builtins import super\n'), ((3256, 3280), 'builtins.super', 'super', (['CompanyEdit', 'self'], {}), '(CompanyEdit, self)\n', (3261, 3280), False, 'from builtins import super\n'), ((3827, 3834), 'builtins.super', 'super', ([], {}), '()\n', (3832, 3834), False, 'from builtins import super\n'), ((4429, 4436), 'builtins.super', 'super', ([], {}), '()\n', (4434, 4436), False, 'from builtins import super\n'), ((5674, 5681), 'builtins.super', 'super', ([], {}), '()\n', (5679, 5681), False, 'from builtins import super\n')]
|
# coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ReplaceStringRegexRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'text_content': 'str',
'regular_expression_string': 'str',
'replace_with_string': 'str'
}
attribute_map = {
'text_content': 'TextContent',
'regular_expression_string': 'RegularExpressionString',
'replace_with_string': 'ReplaceWithString'
}
def __init__(self, text_content=None, regular_expression_string=None, replace_with_string=None): # noqa: E501
"""ReplaceStringRegexRequest - a model defined in Swagger""" # noqa: E501
self._text_content = None
self._regular_expression_string = None
self._replace_with_string = None
self.discriminator = None
if text_content is not None:
self.text_content = text_content
if regular_expression_string is not None:
self.regular_expression_string = regular_expression_string
if replace_with_string is not None:
self.replace_with_string = replace_with_string
@property
def text_content(self):
"""Gets the text_content of this ReplaceStringRegexRequest. # noqa: E501
Input text content # noqa: E501
:return: The text_content of this ReplaceStringRegexRequest. # noqa: E501
:rtype: str
"""
return self._text_content
@text_content.setter
def text_content(self, text_content):
"""Sets the text_content of this ReplaceStringRegexRequest.
Input text content # noqa: E501
:param text_content: The text_content of this ReplaceStringRegexRequest. # noqa: E501
:type: str
"""
self._text_content = text_content
@property
def regular_expression_string(self):
"""Gets the regular_expression_string of this ReplaceStringRegexRequest. # noqa: E501
Target input regular expression (regex) string to match and be replaced; supports all regular expression values # noqa: E501
:return: The regular_expression_string of this ReplaceStringRegexRequest. # noqa: E501
:rtype: str
"""
return self._regular_expression_string
@regular_expression_string.setter
def regular_expression_string(self, regular_expression_string):
"""Sets the regular_expression_string of this ReplaceStringRegexRequest.
Target input regular expression (regex) string to match and be replaced; supports all regular expression values # noqa: E501
:param regular_expression_string: The regular_expression_string of this ReplaceStringRegexRequest. # noqa: E501
:type: str
"""
self._regular_expression_string = regular_expression_string
@property
def replace_with_string(self):
"""Gets the replace_with_string of this ReplaceStringRegexRequest. # noqa: E501
Replacement for target string; supports referencing indexed regex matched values from RegularExpressionString, such as $1, $2, and so on # noqa: E501
:return: The replace_with_string of this ReplaceStringRegexRequest. # noqa: E501
:rtype: str
"""
return self._replace_with_string
@replace_with_string.setter
def replace_with_string(self, replace_with_string):
"""Sets the replace_with_string of this ReplaceStringRegexRequest.
Replacement for target string; supports referencing indexed regex matched values from RegularExpressionString, such as $1, $2, and so on # noqa: E501
:param replace_with_string: The replace_with_string of this ReplaceStringRegexRequest. # noqa: E501
:type: str
"""
self._replace_with_string = replace_with_string
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ReplaceStringRegexRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReplaceStringRegexRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((4485, 4518), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (4498, 4518), False, 'import six\n')]
|
"""
WebObs class
"""
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import SkyCoord
import numpy as np
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
import os
import io
import wget
import requests
class WebObs(object):
"""
Class for AAVSO web observation.
fileoutput = aavso.html
filtername = vis|ccd
"""
def __init__(self, nameID, filtername='Vis', fileoutput='aavso.html'):
self.nameID = nameID
self.filter = filtername
self.fileoutput = fileoutput
self.titlename = ''
self.comment = ''
self.observation = Table()
self.html = BeautifulSoup()
self.available = False
self._period = 0
self.filter = self.isfilter(filtername)
self.read
@property
def read(self):
"""
Return html of observation
Ex: wget --no-check-certificate 'https://app.aavso.org/webobs/results/?star=' -O aavso.html
"""
if os.path.exists(self.fileoutput) :
os.remove(self.fileoutput)
if ' ' in self.nameID:
nameID = self.nameID.replace(' ','%20')
else:
nameID = self.nameID
if self.isccd:
filtername = 'ccd'
else:
filtername = 'vis'
url = 'https://app.aavso.org/webobs/results/?star=' + nameID + '&num_results=200' + '&obs_types=' + filtername
filedownload = wget.download(url,out=self.fileoutput,bar=None)
with open(filedownload) as fp:
self.html = BeautifulSoup(fp, 'html.parser')
if self.noerror == 0 :
self.available = True
self.title
self.comments
self.table
else:
self.available = False
@property
def title(self):
self.titlename = self.html.title.contents[0] + ' -- ' + self.nameID
return self.titlename
@property
def comments(self):
if self.available:
comment = self.html.find(id='obsinfo').contents[0].string
comment = comment + self.html.find(id='obsinfo').contents[1].string
comment = comment + self.html.find(id='obsinfo').contents[2].string.replace('\n \n','').replace('\n','').replace(' ',' ')
comment = comment + self.html.find(id='obsinfo').contents[3].string
comment = comment + self.html.find(id='obsinfo').contents[4].string.replace('\n \n \n \n ','')
comment = comment + self.html.find(id='obsinfo').contents[5].string
comment = comment + self.html.find(id='obsinfo').contents[6].string.replace('\n \n \n \n \n ','')
self.comment = comment
return self.comment
def isfilter(self,filtername='vis'):
"""
Return filter
"""
if filtername in ['Vis','I','R','B','V']:
f = filtername
else:
f = 'Vis'
return f
@property
def isccd(self):
"""
Return true if in ccd filter
"""
if self.filter in ['I','R','B','V']:
return True
else:
return False
@property
def data(self):
"""
Return data of html file observations
"""
data = []
if self.available:
data = self.html.table.contents[3].get_text().replace('\n','|').replace('Details...|||||||||Comp Star|Check Star|Transformed|Chart|Comment Codes|Notes|||||','').replace('|||||||||','<>').replace('|||||||','').replace('|||','').replace('| (','(').replace('| ','').split('<>')
return data
@property
def table(self):
"""
Return Table of html file observations
"""
Star = []
JD = []
Calendar_Date = []
Magnitude = []
Error = []
Filter = []
Observer = []
Comp_Star = []
Check_Star = []
Transformed = []
Chart = []
Comment_Codes = []
Notes = []
if self.available:
for ligne in self.data:
data = ligne.split('|')
if self.filter in data[5]:
Star.append(data[0])
JD.append(float(data[1]))
Calendar_Date.append(data[2])
if isinstance(data[3], int) or isinstance(data[3], float):
Magnitude.append(float(data[3]))
else:
Magnitude.append(float(data[3].replace('<','')))
Error.append(data[4])
Filter.append(data[5])
Observer.append(data[6])
Comp_Star.append(data[7])
Check_Star.append(data[8])
Transformed.append(data[9])
Chart.append(data[10])
Comment_Codes.append(data[11])
Notes.append(data[12])
if len(Star) > 0:
self.observation = Table([Star,JD,Calendar_Date,Magnitude,Error,Filter,Observer,Comp_Star,Check_Star,Transformed,Chart,Comment_Codes,Notes],
names=['Star', 'JD', 'Calendar Date', 'Magnitude', 'Error', 'Filter', 'Observer', 'Comp Star', 'Check Star', 'Transformed', 'Chart', 'Comment Codes', 'Notes'])
self._period = self.observation['JD'][0] - self.observation['JD'][len(self.observation)-1]
return self.observation
@property
def period(self):
"""
Return period JD
"""
if self.observation:
return self._period
@property
def observations(self):
"""
Return observations table
"""
if self.observation:
return self.observation
@property
def JDMinMax(self):
"""
Return min and max JD in observations table
"""
if self.observation:
return self.observation['JD'][len(self.observation)-1],self.observation['JD'][0]
@property
def magnitudeMinMax(self):
"""
Return min and max of magnitude in observations table
"""
if self.observation:
return min(self.observation['Magnitude']),max(self.observation['Magnitude'])
def plot(self):
"""
Plot observations table
"""
if self.available:
jd_min,jd_max = self.JDMinMax
mv_min,mv_max = self.magnitudeMinMax
x = []
for value in self.observations:
x.append(value['JD']-jd_min)
y = self.observations['Magnitude']
mymodel = np.poly1d(np.polyfit(x, y, 5))
myline = np.linspace(0, jd_max-jd_min, 2000)
plt.xlim(-.5,round(jd_max-jd_min)+.5)
plt.ylim(round(mv_min)-0.5,round(mv_max)+0.5)
plt.gca().invert_yaxis()
plt.scatter(x, y, c = 'black', s = 2, alpha = 0.5)
plt.plot(myline, mymodel(myline))
plt.title(self.title, loc='center')
plt.xlabel(str(int(jd_min))+'\nJD', fontsize = 12)
if self.filter == 'Vis':
plt.ylabel(r'$m_v$', fontsize = 12)
else:
plt.ylabel(self.filter, fontsize = 12)
plt.show()
else:
print(self.comment)
@property
def noerror(self):
"""
Error handling
"""
error_code = 0
if 'errors' in self.html.p.get_text():
error_code = 404
self.comment = 'The star ' + self.nameID + ' cannot be found in our database.'
else:
if 'no results' in self.html.p.get_text():
error_code = 404
self.comment = 'The star ' + self.nameID + ' cannot be found in our database.'
return error_code
class datadownload(object):
"""
Class for AAVSO for data download (https://www.aavso.org/data-download).
fileinput = datadownload.csv
filtername = Vis|B|V|R|I|TG|CV
"""
def __init__(self, filtername='Vis.', fileinput='aavsodata.csv'):
self.nameID = ''
self.filter = filtername
self.fileinput = fileinput
self.titlename = ''
self.comment = ''
self.observation = Table()
self.JDline = _JDline()
self.available = False
self._period = 0
self.filter = self.isfilter(filtername)
self.read
def isfilter(self,filtername='Vis.'):
"""
Return filter
"""
if filtername in ['Vis.','I','R','B','V','CV','TG']:
f = filtername
else:
f = 'Vis.'
return f
@property
def read(self):
"""
Return table of observation
"""
self.observation = Table.read(self.fileinput, format='ascii.csv')
if len(self.observation) > 0:
self.available = True
self.title
self.period
self.comments
else:
self.available = False
def filtername(self, filtername='Vis.'):
"""
Update filter
"""
if self.available:
self.filter = self.isfilter(filtername)
@property
def Vis(self):
if self.available:
self.filter = 'Vis.'
@property
def I(self):
if self.available:
self.filter = 'I'
@property
def R(self):
if self.available:
self.filter = 'R'
@property
def V(self):
if self.available:
self.filter = 'V'
@property
def B(self):
if self.available:
self.filter = 'B'
@property
def CV(self):
if self.available:
self.filter = 'CV'
@property
def TG(self):
if self.available:
self.filter = 'TG'
@property
def period(self):
"""
Return period JD
"""
if self.available:
self._period = self.observation['JD'][len(self.observation)-1] - self.observation['JD'][0]
return self._period
@property
def title(self):
if self.available:
self.titlename = 'AAVSO -- data-download -- ' + self.observation['Star Name'][0]
return self.titlename
@property
def comments(self):
if self.available:
observers = []
for i in self.observation['Observer Code'] :
if i not in observers:
observers.append(i)
comment = 'Showing ' + str(len(self.observation)) + ' observations for ' + self.observation['Star Name'][0] + ' from ' + str(len(observers)) + ' observers'
self.comment = comment
return self.comment
@property
def observations(self):
"""
Return observations table
"""
if self.observation:
return self.observation
@property
def JDMinMax(self):
"""
Return min and max JD in observations table
"""
if self.observation:
return self.observation['JD'][0],self.observation['JD'][len(self.observation)-1]
@property
def magnitudeMinMax(self):
"""
Return min and max of magnitude in observations table
"""
if self.observation:
mv = []
for value in self.observations:
if self.filter == value['Band']:
if '<' not in value['Magnitude']:
mv.append(float(value['Magnitude']))
return min(mv),max(mv)
@property
def JulianDay(self):
"""
Return JD table
"""
return self.JDline.JulianDay
@JulianDay.setter
def JulianDay(self,JDtable):
"""
Create JD table
"""
self.JDline.JulianDay = JDtable
def plot(self):
"""
Plot observations table
"""
if self.available:
jd_min,jd_max = self.JDMinMax
mv_min,mv_max = self.magnitudeMinMax
x = []
y = []
for value in self.observations:
if self.filter == value['Band']:
if '<' not in value['Magnitude']:
x.append(value['JD'])
y.append(float(value['Magnitude']))
plt.xlim(round(jd_min)-5,round(jd_max)+5)
plt.ylim(round(mv_min)-1,round(mv_max)+1)
plt.gca().invert_yaxis()
plt.scatter(x, y, c = 'black', s = 2, alpha = 0.2)
self.JDline.plot()
plt.title(self.title, loc='center')
plt.xlabel('JD', fontsize = 12)
if self.filter == 'Vis':
plt.ylabel(r'$m_v$', fontsize = 12)
else:
plt.ylabel(self.filter, fontsize = 12)
plt.show()
class vsx(object):
"""
Class AAVSO VSX, return TABLE
"""
def __init__(self, nameID):
self.nameID = nameID
self.vsx_table = Table()
self.available = False
self.read
@property
def read(self):
"""
Return TABLE of Variable
"""
self.table
@property
def data(self):
"""
Return JSON data
Source : https://www.aavso.org/direct-web-query-vsxvsp
"""
if ' ' in self.nameID:
nameID = self.nameID.replace(' ','%20')
else:
nameID = self.nameID
url = "http://www.aavso.org/vsx/index.php"
params = {}
params['view']='api.object'
params['format']='json'
params['ident']=self.nameID
response = requests.get(url,params=params)
if (response.status_code > 400):
self.available = False
else:
self.available = True
return response.json()
@property
def table(self):
"""
Return data table
"""
result = self.data['VSXObject']
header = []
value = []
types = []
for item in result:
value.append(result[item])
header.append(item)
types.append('str')
self.vsx_table = Table(names = header, dtype = types)
self.vsx_table.add_row(value)
@property
def observations(self):
"""
Return vsx table
"""
if self.available:
return self.vsx_table
@property
def name(self):
"""
Return vsx name
"""
if self.available:
return self.vsx_table['Name'][0]
@property
def coordinates(self):
"""
Return vsx RA,DEC (degree,degree)
"""
if self.available:
return float(self.vsx_table['RA2000']), float(self.vsx_table['Declination2000'])
@property
def hourdegree(self):
"""
Return vsx RA,DEC (Hour,Degree)
"""
if self.available:
c = SkyCoord(ra=float(self.vsx_table['RA2000'])*u.degree, dec=float(self.vsx_table['Declination2000'])*u.degree)
return c.ra.hour, c.dec.degree
class _JDline(object):
"""
Class line Julian Day
"""
def __init__(self):
self.JDtable = []
@property
def JulianDay(self):
"""
Return JD table
"""
return self.JDtable
@JulianDay.setter
def JulianDay(self,JDtable):
"""
Add JD's
"""
if len(JDtable) > 0:
for number in JDtable:
self.JDtable.append(number)
else:
self.JDtable.clear()
def plot(self):
"""
Plot line of JD's
"""
plt.vlines(self.JDtable, -30,30 , linestyles = 'solid', colors = 'grey', alpha = 0.3)
|
[
"matplotlib.pyplot.title",
"os.remove",
"astropy.table.Table",
"matplotlib.pyplot.show",
"numpy.polyfit",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.scatter",
"os.path.exists",
"matplotlib.pyplot.vlines",
"wget.download",
"requests.get",
"numpy.linspace",
"bs4.BeautifulSoup",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"astropy.table.Table.read"
] |
[((647, 654), 'astropy.table.Table', 'Table', ([], {}), '()\n', (652, 654), False, 'from astropy.table import Table\n'), ((675, 690), 'bs4.BeautifulSoup', 'BeautifulSoup', ([], {}), '()\n', (688, 690), False, 'from bs4 import BeautifulSoup\n'), ((1024, 1055), 'os.path.exists', 'os.path.exists', (['self.fileoutput'], {}), '(self.fileoutput)\n', (1038, 1055), False, 'import os\n'), ((1486, 1535), 'wget.download', 'wget.download', (['url'], {'out': 'self.fileoutput', 'bar': 'None'}), '(url, out=self.fileoutput, bar=None)\n', (1499, 1535), False, 'import wget\n'), ((8417, 8424), 'astropy.table.Table', 'Table', ([], {}), '()\n', (8422, 8424), False, 'from astropy.table import Table\n'), ((8953, 8999), 'astropy.table.Table.read', 'Table.read', (['self.fileinput'], {'format': '"""ascii.csv"""'}), "(self.fileinput, format='ascii.csv')\n", (8963, 8999), False, 'from astropy.table import Table\n'), ((13314, 13321), 'astropy.table.Table', 'Table', ([], {}), '()\n', (13319, 13321), False, 'from astropy.table import Table\n'), ((13954, 13986), 'requests.get', 'requests.get', (['url'], {'params': 'params'}), '(url, params=params)\n', (13966, 13986), False, 'import requests\n'), ((14494, 14526), 'astropy.table.Table', 'Table', ([], {'names': 'header', 'dtype': 'types'}), '(names=header, dtype=types)\n', (14499, 14526), False, 'from astropy.table import Table\n'), ((16029, 16108), 'matplotlib.pyplot.vlines', 'plt.vlines', (['self.JDtable', '(-30)', '(30)'], {'linestyles': '"""solid"""', 'colors': '"""grey"""', 'alpha': '(0.3)'}), "(self.JDtable, -30, 30, linestyles='solid', colors='grey', alpha=0.3)\n", (16039, 16108), True, 'import matplotlib.pyplot as plt\n'), ((1070, 1096), 'os.remove', 'os.remove', (['self.fileoutput'], {}), '(self.fileoutput)\n', (1079, 1096), False, 'import os\n'), ((1599, 1631), 'bs4.BeautifulSoup', 'BeautifulSoup', (['fp', '"""html.parser"""'], {}), "(fp, 'html.parser')\n", (1612, 1631), False, 'from bs4 import BeautifulSoup\n'), ((5111, 5421), 'astropy.table.Table', 'Table', (['[Star, JD, Calendar_Date, Magnitude, Error, Filter, Observer, Comp_Star,\n Check_Star, Transformed, Chart, Comment_Codes, Notes]'], {'names': "['Star', 'JD', 'Calendar Date', 'Magnitude', 'Error', 'Filter', 'Observer',\n 'Comp Star', 'Check Star', 'Transformed', 'Chart', 'Comment Codes', 'Notes'\n ]"}), "([Star, JD, Calendar_Date, Magnitude, Error, Filter, Observer,\n Comp_Star, Check_Star, Transformed, Chart, Comment_Codes, Notes], names\n =['Star', 'JD', 'Calendar Date', 'Magnitude', 'Error', 'Filter',\n 'Observer', 'Comp Star', 'Check Star', 'Transformed', 'Chart',\n 'Comment Codes', 'Notes'])\n", (5116, 5421), False, 'from astropy.table import Table\n'), ((6800, 6837), 'numpy.linspace', 'np.linspace', (['(0)', '(jd_max - jd_min)', '(2000)'], {}), '(0, jd_max - jd_min, 2000)\n', (6811, 6837), True, 'import numpy as np\n'), ((7006, 7050), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': '"""black"""', 's': '(2)', 'alpha': '(0.5)'}), "(x, y, c='black', s=2, alpha=0.5)\n", (7017, 7050), True, 'import matplotlib.pyplot as plt\n'), ((7115, 7150), 'matplotlib.pyplot.title', 'plt.title', (['self.title'], {'loc': '"""center"""'}), "(self.title, loc='center')\n", (7124, 7150), True, 'import matplotlib.pyplot as plt\n'), ((7388, 7398), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7396, 7398), True, 'import matplotlib.pyplot as plt\n'), ((12796, 12840), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': '"""black"""', 's': '(2)', 'alpha': '(0.2)'}), "(x, y, c='black', s=2, alpha=0.2)\n", (12807, 12840), True, 'import matplotlib.pyplot as plt\n'), ((12890, 12925), 'matplotlib.pyplot.title', 'plt.title', (['self.title'], {'loc': '"""center"""'}), "(self.title, loc='center')\n", (12899, 12925), True, 'import matplotlib.pyplot as plt\n'), ((12938, 12967), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""JD"""'], {'fontsize': '(12)'}), "('JD', fontsize=12)\n", (12948, 12967), True, 'import matplotlib.pyplot as plt\n'), ((13144, 13154), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13152, 13154), True, 'import matplotlib.pyplot as plt\n'), ((6758, 6777), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(5)'], {}), '(x, y, 5)\n', (6768, 6777), True, 'import numpy as np\n'), ((7267, 7299), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$m_v$"""'], {'fontsize': '(12)'}), "('$m_v$', fontsize=12)\n", (7277, 7299), True, 'import matplotlib.pyplot as plt\n'), ((7337, 7373), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.filter'], {'fontsize': '(12)'}), '(self.filter, fontsize=12)\n', (7347, 7373), True, 'import matplotlib.pyplot as plt\n'), ((13023, 13055), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$m_v$"""'], {'fontsize': '(12)'}), "('$m_v$', fontsize=12)\n", (13033, 13055), True, 'import matplotlib.pyplot as plt\n'), ((13093, 13129), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.filter'], {'fontsize': '(12)'}), '(self.filter, fontsize=12)\n', (13103, 13129), True, 'import matplotlib.pyplot as plt\n'), ((6969, 6978), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6976, 6978), True, 'import matplotlib.pyplot as plt\n'), ((12759, 12768), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (12766, 12768), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 25 13:52:02 2018
@author: sunka
"""
import pandas as pd
dataframe = pd.read_csv('concrete.csv')
print(dataframe)
df1 = dataframe.iloc[:,1:]
df2 = dataframe.iloc[:,:8]
from sklearn import preprocessing
df1 = preprocessing.normalize(df1)
df2 = preprocessing.normalize(df2)
from sklearn import model_selection
train_data , test_data , train_target , test_target = model_selection.train_test_split(df1,df2)
from sklearn import linear_model
regression = linear_model.LinearRegression()
fitting = regression.fit(train_data,train_target)
result = regression.predict(test_data)
print(result)
coefficient = regression.coef_
intercept = regression.intercept_
print("The coefficeint is " + str(coefficient))
print("Intercept is " + str(intercept))
from sklearn import metrics
mean_square_error = metrics.mean_squared_error(test_target,result)
print("Mean square error is " + str(mean_square_error))
varience = metrics.r2_score(test_target,result)
print("Varience is " + str(varience))
from matplotlib import pyplot
pyplot.hist(train_data)
pyplot.hist(result)
# Output
pyplot.scatter(test_target,result)
pyplot.title("Output")
pyplot.scatter(result,result-test_target)
pyplot.title('Residue')
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.hist",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.scatter",
"sklearn.metrics.r2_score",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.normalize",
"sklearn.metrics.mean_squared_error"
] |
[((127, 154), 'pandas.read_csv', 'pd.read_csv', (['"""concrete.csv"""'], {}), "('concrete.csv')\n", (138, 154), True, 'import pandas as pd\n'), ((283, 311), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['df1'], {}), '(df1)\n', (306, 311), False, 'from sklearn import preprocessing\n'), ((321, 349), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['df2'], {}), '(df2)\n', (344, 349), False, 'from sklearn import preprocessing\n'), ((449, 491), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['df1', 'df2'], {}), '(df1, df2)\n', (481, 491), False, 'from sklearn import model_selection\n'), ((547, 578), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (576, 578), False, 'from sklearn import linear_model\n'), ((914, 961), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['test_target', 'result'], {}), '(test_target, result)\n', (940, 961), False, 'from sklearn import metrics\n'), ((1034, 1071), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['test_target', 'result'], {}), '(test_target, result)\n', (1050, 1071), False, 'from sklearn import metrics\n'), ((1154, 1177), 'matplotlib.pyplot.hist', 'pyplot.hist', (['train_data'], {}), '(train_data)\n', (1165, 1177), False, 'from matplotlib import pyplot\n'), ((1179, 1198), 'matplotlib.pyplot.hist', 'pyplot.hist', (['result'], {}), '(result)\n', (1190, 1198), False, 'from matplotlib import pyplot\n'), ((1216, 1251), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['test_target', 'result'], {}), '(test_target, result)\n', (1230, 1251), False, 'from matplotlib import pyplot\n'), ((1252, 1274), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Output"""'], {}), "('Output')\n", (1264, 1274), False, 'from matplotlib import pyplot\n'), ((1278, 1322), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['result', '(result - test_target)'], {}), '(result, result - test_target)\n', (1292, 1322), False, 'from matplotlib import pyplot\n'), ((1321, 1344), 'matplotlib.pyplot.title', 'pyplot.title', (['"""Residue"""'], {}), "('Residue')\n", (1333, 1344), False, 'from matplotlib import pyplot\n')]
|
from core.differential_geometry import DifferentialGeometry
from core.distribution1d import Distribution1D
from core.light_sample import LightSample
from core.ray import Ray
from core.shape import Shape
from maths.config import infinity_max_f
from maths.normal import Normal
from maths.point3d import Point3d
from maths.vector3d import Vector3d
class ShapeSet:
def __init__(self, shape: Shape):
self.shapes = []
self.sumArea = 0.0
self.areas = []
self.areaDistribution = []
todo = [shape]
while len(todo)>0:
sh = todo.pop()
if sh.get_can_intersect():
self.shapes.append(sh)
else:
sh.get_refine(todo)
self.sumArea = 0.0
for i in range(len(self.shapes)):
area = self.shapes[i].Area()
self.areas.append(area)
self.sumArea += area
self.areaDistribution = Distribution1D(self.areas)
def Sample1(self, p: Point3d, ls: LightSample, Ns: Normal)->Point3d:
pdf, sn = self.areaDistribution.SampleDiscrete(ls.uComponent)
pt = self.shapes[sn].Sample2(p, ls.uPos, Ns)
# Find closest intersection of ray with shapes in _ShapeSet_
r = Ray(p, (pt-p).get_normalized(), 1e-3, infinity_max_f)
anyHit = False
thit = 1.0
dg = DifferentialGeometry()
for i in range(len(self.shapes)):
anyHit_b, thit_f = self.shapes[i].get_intersection(r, dg)
if anyHit_b:
anyHit = True
thit = thit_f
if anyHit:
Ns.Set(dg.normal)
return r.get_at(thit)
def Sample2(self, ls: LightSample, n: Normal)->Point3d:
pdf, sn = self.areaDistribution.SampleDiscrete(ls.uComponent)
return self.shapes[sn].Sample1(ls.uPos, n)
def Area(self):
return self.sumArea
def Pdf1(self, p: Point3d)->float:
pdf = 0.0
for i in range(len(self.shapes)):
pdf += self.areas[i] * self.shapes[i].Pdf1(p)
return pdf / self.sumArea
def Pdf2(self, p: Point3d, wi: Vector3d)->float:
pdf = 0.0
for i in range(len(self.shapes)):
pdf += self.areas[i] * self.shapes[i].Pdf2(p, wi)
return pdf / self.sumArea
|
[
"core.differential_geometry.DifferentialGeometry",
"core.distribution1d.Distribution1D"
] |
[((970, 996), 'core.distribution1d.Distribution1D', 'Distribution1D', (['self.areas'], {}), '(self.areas)\n', (984, 996), False, 'from core.distribution1d import Distribution1D\n'), ((1395, 1417), 'core.differential_geometry.DifferentialGeometry', 'DifferentialGeometry', ([], {}), '()\n', (1415, 1417), False, 'from core.differential_geometry import DifferentialGeometry\n')]
|
from .models import MemberProfile
import io
import csv
TOKEN_NAME = 'names'
def get_some_objects(list):
return MemberProfile.objects.filter(pk__in=list)
def parse_token_data(request_post):
f = io.StringIO(request_post[TOKEN_NAME])
reader = csv.reader(f, delimiter=',')
user_ids = []
for row in reader:
user_ids = set(row)
return user_ids
def get_bulk_members(request, method="POST"):
if method == "POST":
user_ids = parse_token_data(request.POST)
elif method == "GET":
user_ids = parse_token_data(request.GET)
else:
raise ValueError("Unsupported method")
members = get_some_objects(user_ids)
return members
|
[
"io.StringIO",
"csv.reader"
] |
[((207, 244), 'io.StringIO', 'io.StringIO', (['request_post[TOKEN_NAME]'], {}), '(request_post[TOKEN_NAME])\n', (218, 244), False, 'import io\n'), ((258, 286), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (268, 286), False, 'import csv\n')]
|
from tsx.db import get_session
import logging
import sys
import argparse
import csv
from tqdm import tqdm
log = logging.getLogger(__name__)
def main():
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)-15s %(name)s %(levelname)-8s %(message)s')
parser = argparse.ArgumentParser(description='Import data source information')
parser.add_argument('--relax', action='store_true', dest='relax', help="Ignore invalid source ids")
parser.add_argument('--update-source-table', action='store_true', dest='update_source_table', help="Also update `source` table with authors, provider and description information")
parser.add_argument('filename', type=str, help='Data source file (aka master list) (CSV)')
args = parser.parse_args()
session = get_session()
session.execute("DELETE FROM data_source");
with open(args.filename) as f:
reader = csv.DictReader(f)
for row in tqdm(list(reader)):
row = {k: v.strip() for k, v in row.items()}
data = {
'source_id': row['SourceID'],
'taxon_id': row['TaxonID'],
'data_agreement_id': row.get('AgreementSigned') or None,
'objective_of_monitoring_id': lookup(row, 'ObjectiveOfMonitoring'),
'absences_recorded': get_bool(row, 'AbsencesRecorded', True, unknown_value_default=True),
'standardisation_of_method_effort_id': lookup(row, 'StandardisationOfMethodEffort', optional=True),
'consistency_of_monitoring_id': lookup(row, 'ConsistencyOfMonitoring', optional=True),
'start_year': row.get('StartYear') or None,
'end_year': row.get('EndYear') or None,
'exclude_from_analysis': get_bool(row, 'NotInIndex', False, unknown_value_default=True, optional=True),
'suppress_aggregated_data': get_bool(row, 'SuppressAggregatedDataUntil', False, unknown_value_default=True, optional=True)
}
# In relaxed mode, silently skip rows without SourceID value
if args.relax and row['SourceID'].strip() in ('', 'NULL', 'NA'):
continue
r = session.execute("SELECT 1 FROM source WHERE id = :id", { 'id': data['source_id'] }).fetchall()
if len(r) == 0:
if args.relax:
log.warning("Skipping unknown source id: %s" % data['source_id'])
continue
else:
raise ValueError("Invalid source id: %s" % data['source_id'])
try:
if data['data_agreement_id']:
data['data_agreement_id'] = int(data['data_agreement_id'])
except:
if args.relax:
log.warning("Treating unknown AgreementSigned value as blank: %s" % data['data_agreement_id'])
data['data_agreement_id'] = None
else:
raise ValueError("Invalid AgreementSigned: %s" % data['data_agreement_id'])
if args.update_source_table:
def strip_and_warn(s):
stripped = s.strip(". ")
if s != stripped:
log.warning("Stripping leading/trailing space/periods from '%s'", s)
return stripped
data['authors'] = strip_and_warn(row['Authors'])
data['provider'] = strip_and_warn(row['SourceProvider'])
data['description'] = strip_and_warn(row['SourceDesc'])
session.execute("""UPDATE source SET authors = :authors, provider = :provider, description = :description WHERE id = :source_id""", data)
session.execute("""INSERT INTO data_source (
source_id,
taxon_id,
data_agreement_id,
objective_of_monitoring_id,
absences_recorded,
standardisation_of_method_effort_id,
consistency_of_monitoring_id,
start_year,
end_year,
exclude_from_analysis,
suppress_aggregated_data
) VALUES (
:source_id,
:taxon_id,
:data_agreement_id,
:objective_of_monitoring_id,
:absences_recorded,
:standardisation_of_method_effort_id,
:consistency_of_monitoring_id,
:start_year,
:end_year,
:exclude_from_analysis,
:suppress_aggregated_data
)""",
data
)
session.commit()
LOOKUPS = {
'ObjectiveOfMonitoring': {
'Monitoring for targeted conservation management': 4,
'Monitoring for general conservation management – ‘surveillance’ monitoring.': 3,
'Baseline monitoring': 2,
'Monitoring for community engagement': 1
},
'ConsistencyOfMonitoring': {
'Balanced; all (or virtually all) sites surveyed in each year sampled (no, or virtually no, site turnover)': 4,
'Imbalanced (low turnover); sites surveyed consistently through time as established, but new sites are added to program with time': 3,
'Imbalanced (high turnover); new sites are surveyed with time, but monitoring of older sites is often not maintained': 2,
'Highly Imbalanced (very high turnover); different sites surveyed in different sampling periods. Sites are generally not surveyed consistently through time (highly biased)': 1
},
'StandardisationOfMethodEffort': {
'Pre-defined sites/plots surveyed repeatedly through time using a single standardised method and effort across the whole monitoring program': 6,
'Pre-defined sites/plots surveyed repeatedly through time with methods and effort standardised within site units, but not across program - i.e. different sites surveyed have different survey effort/methods': 5,
'Pre-defined sites/plots surveyed repeatedly through time with varying methods and effort': 4,
'Data collection using standardised methods and effort but surveys not site-based (i.e. surveys spatially ad-hoc). Post-hoc site grouping possible - e.g. a lot of fixed area/time searches conducted within a region but not at pre-defined sites': 3,
'Data collection using standardised methods and effort but surveys not site-based (i.e. surveys spatially ad-hoc). Post-hoc site grouping not possible': 2,
'Unstandardised methods/effort, surveys not site-based': 1
}
}
def get_bool(row, column, default=None, unknown_value_default=None, optional=False):
raw_value = row.get(column)
if optional and raw_value is None:
return default
raw_value = raw_value.strip()
value = raw_value.lower()
if value in ('1', 'yes', 'true', 'y', 't'):
return True
if value in ('0', 'no', 'false', 'n', 'f'):
return False
if value in ('', 'na', 'null'):
return default
else:
log.warning("Unknown value for %s: '%s', defaulting to %s" % (column, raw_value, unknown_value_default))
return unknown_value_default
def lookup(row, column, optional=False):
value = row.get(column)
if optional and value is None:
return None
lookup = LOOKUPS[column]
if value in ('', 'NA', '0'):
return None
elif value.isdigit() and int(value) in lookup.values():
return value
elif value in lookup:
return lookup[value]
else:
log.warning("Unknown value for %s: '%s', defaulting to None" % (column, value))
return None
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"tsx.db.get_session",
"csv.DictReader",
"logging.getLogger"
] |
[((113, 140), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (130, 140), False, 'import logging\n'), ((155, 280), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO', 'format': '"""%(asctime)-15s %(name)s %(levelname)-8s %(message)s"""'}), "(stream=sys.stdout, level=logging.INFO, format=\n '%(asctime)-15s %(name)s %(levelname)-8s %(message)s')\n", (174, 280), False, 'import logging\n'), ((287, 356), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Import data source information"""'}), "(description='Import data source information')\n", (310, 356), False, 'import argparse\n'), ((772, 785), 'tsx.db.get_session', 'get_session', ([], {}), '()\n', (783, 785), False, 'from tsx.db import get_session\n'), ((876, 893), 'csv.DictReader', 'csv.DictReader', (['f'], {}), '(f)\n', (890, 893), False, 'import csv\n')]
|
"""Torch Module for Topology Adaptive Graph Convolutional layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch as th
from torch import nn
from .... import function as fn
class TAGConv(nn.Module):
r"""Topology Adaptive Graph Convolutional layer from paper `Topology
Adaptive Graph Convolutional Networks <https://arxiv.org/pdf/1710.10370.pdf>`__.
.. math::
\mathbf{X}^{\prime} = \sum_{k=0}^K \mathbf{D}^{-1/2} \mathbf{A}
\mathbf{D}^{-1/2}\mathbf{X} \mathbf{\Theta}_{k},
where :math:`\mathbf{A}` denotes the adjacency matrix and
:math:`D_{ii} = \sum_{j=0} A_{ij}` its diagonal degree matrix.
Parameters
----------
in_feats : int
Input feature size.
out_feats : int
Output feature size.
k: int, optional
Number of hops :math: `k`. (default: 2)
bias: bool, optional
If True, adds a learnable bias to the output. Default: ``True``.
activation: callable activation function/layer or None, optional
If not None, applies an activation function to the updated node features.
Default: ``None``.
Attributes
----------
lin : torch.Module
The learnable linear module.
"""
def __init__(self,
in_feats,
out_feats,
k=2,
bias=True,
activation=None):
super(TAGConv, self).__init__()
self._in_feats = in_feats
self._out_feats = out_feats
self._k = k
self._activation = activation
self.lin = nn.Linear(in_feats * (self._k + 1), out_feats, bias=bias)
self.reset_parameters()
def reset_parameters(self):
"""Reinitialize learnable parameters."""
gain = nn.init.calculate_gain('relu')
nn.init.xavier_normal_(self.lin.weight, gain=gain)
def forward(self, graph, feat):
r"""Compute topology adaptive graph convolution.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor
The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`
is size of input feature, :math:`N` is the number of nodes.
Returns
-------
torch.Tensor
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`
is size of output feature.
"""
with graph.local_scope():
assert graph.is_homogeneous(), 'Graph is not homogeneous'
norm = th.pow(graph.in_degrees().float().clamp(min=1), -0.5)
shp = norm.shape + (1,) * (feat.dim() - 1)
norm = th.reshape(norm, shp).to(feat.device)
#D-1/2 A D -1/2 X
fstack = [feat]
for _ in range(self._k):
rst = fstack[-1] * norm
graph.ndata['h'] = rst
graph.update_all(fn.copy_src(src='h', out='m'),
fn.sum(msg='m', out='h'))
rst = graph.ndata['h']
rst = rst * norm
fstack.append(rst)
rst = self.lin(th.cat(fstack, dim=-1))
if self._activation is not None:
rst = self._activation(rst)
return rst
|
[
"torch.nn.init.xavier_normal_",
"torch.cat",
"torch.nn.Linear",
"torch.nn.init.calculate_gain",
"torch.reshape"
] |
[((1582, 1639), 'torch.nn.Linear', 'nn.Linear', (['(in_feats * (self._k + 1))', 'out_feats'], {'bias': 'bias'}), '(in_feats * (self._k + 1), out_feats, bias=bias)\n', (1591, 1639), False, 'from torch import nn\n'), ((1770, 1800), 'torch.nn.init.calculate_gain', 'nn.init.calculate_gain', (['"""relu"""'], {}), "('relu')\n", (1792, 1800), False, 'from torch import nn\n'), ((1809, 1859), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.lin.weight'], {'gain': 'gain'}), '(self.lin.weight, gain=gain)\n', (1831, 1859), False, 'from torch import nn\n'), ((3134, 3156), 'torch.cat', 'th.cat', (['fstack'], {'dim': '(-1)'}), '(fstack, dim=-1)\n', (3140, 3156), True, 'import torch as th\n'), ((2661, 2682), 'torch.reshape', 'th.reshape', (['norm', 'shp'], {}), '(norm, shp)\n', (2671, 2682), True, 'import torch as th\n')]
|
import sys
sys.path.append('../core')
import argparse
import torch
import cv2
import numpy as np
from viz import sim3_visualization
from lietorch import SO3, SE3, Sim3
from networks.sim3_net import Sim3Net
def normalize_images(images):
images = images[:, :, [2,1,0]]
mean = torch.as_tensor([0.485, 0.456, 0.406], device=images.device)
std = torch.as_tensor([0.229, 0.224, 0.225], device=images.device)
return (images/255.0).sub_(mean[:, None, None]).div_(std[:, None, None])
def load_example(i=0):
""" get demo example """
DEPTH_SCALE = 5.0
if i==0:
image1 = cv2.imread('assets/image1.png')
image2 = cv2.imread('assets/image2.png')
depth1 = np.load('assets/depth1.npy') / DEPTH_SCALE
depth2 = np.load('assets/depth2.npy') / DEPTH_SCALE
elif i==1:
image1 = cv2.imread('assets/image3.png')
image2 = cv2.imread('assets/image4.png')
depth1 = np.load('assets/depth3.npy') / DEPTH_SCALE
depth2 = np.load('assets/depth4.npy') / DEPTH_SCALE
images = np.stack([image1, image2], 0)
images = torch.from_numpy(images).permute(0,3,1,2)
depths = np.stack([depth1, depth2], 0)
depths = torch.from_numpy(depths).float()
intrinsics = np.array([320.0, 320.0, 320.0, 240.0])
intrinsics = np.tile(intrinsics[None], (2,1))
intrinsics = torch.from_numpy(intrinsics).float()
return images[None].cuda(), depths[None].cuda(), intrinsics[None].cuda()
@torch.no_grad()
def demo(model, index=0):
images, depths, intrinsics = load_example(index)
# initial transformation estimate
if args.transformation == 'SE3':
Gs = SE3.Identity(1, 2, device='cuda')
elif args.transformation == 'Sim3':
Gs = Sim3.Identity(1, 2, device='cuda')
depths[:,0] *= 2**(2*torch.rand(1) - 1.0).cuda()
images1 = normalize_images(images)
ests, _ = model(Gs, images1, depths, intrinsics, num_steps=12)
# only care about last transformation
Gs = ests[-1]
T = Gs[:,0] * Gs[:,1].inv()
T = T[0].matrix().double().cpu().numpy()
sim3_visualization(T, images, depths, intrinsics)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--transformation', default='SE3', help='checkpoint to restore')
parser.add_argument('--ckpt', help='checkpoint to restore')
args = parser.parse_args()
model = Sim3Net(args)
model.load_state_dict(torch.load(args.ckpt))
model.cuda()
model.eval()
# run two demos
demo(model, 0)
demo(model, 1)
|
[
"sys.path.append",
"numpy.stack",
"numpy.load",
"argparse.ArgumentParser",
"lietorch.SE3.Identity",
"lietorch.Sim3.Identity",
"torch.load",
"viz.sim3_visualization",
"cv2.imread",
"networks.sim3_net.Sim3Net",
"numpy.array",
"numpy.tile",
"torch.rand",
"torch.as_tensor",
"torch.no_grad",
"torch.from_numpy"
] |
[((11, 37), 'sys.path.append', 'sys.path.append', (['"""../core"""'], {}), "('../core')\n", (26, 37), False, 'import sys\n'), ((1469, 1484), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1482, 1484), False, 'import torch\n'), ((285, 345), 'torch.as_tensor', 'torch.as_tensor', (['[0.485, 0.456, 0.406]'], {'device': 'images.device'}), '([0.485, 0.456, 0.406], device=images.device)\n', (300, 345), False, 'import torch\n'), ((356, 416), 'torch.as_tensor', 'torch.as_tensor', (['[0.229, 0.224, 0.225]'], {'device': 'images.device'}), '([0.229, 0.224, 0.225], device=images.device)\n', (371, 416), False, 'import torch\n'), ((1052, 1081), 'numpy.stack', 'np.stack', (['[image1, image2]', '(0)'], {}), '([image1, image2], 0)\n', (1060, 1081), True, 'import numpy as np\n'), ((1151, 1180), 'numpy.stack', 'np.stack', (['[depth1, depth2]', '(0)'], {}), '([depth1, depth2], 0)\n', (1159, 1180), True, 'import numpy as np\n'), ((1245, 1283), 'numpy.array', 'np.array', (['[320.0, 320.0, 320.0, 240.0]'], {}), '([320.0, 320.0, 320.0, 240.0])\n', (1253, 1283), True, 'import numpy as np\n'), ((1301, 1334), 'numpy.tile', 'np.tile', (['intrinsics[None]', '(2, 1)'], {}), '(intrinsics[None], (2, 1))\n', (1308, 1334), True, 'import numpy as np\n'), ((2089, 2138), 'viz.sim3_visualization', 'sim3_visualization', (['T', 'images', 'depths', 'intrinsics'], {}), '(T, images, depths, intrinsics)\n', (2107, 2138), False, 'from viz import sim3_visualization\n'), ((2181, 2206), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2204, 2206), False, 'import argparse\n'), ((2404, 2417), 'networks.sim3_net.Sim3Net', 'Sim3Net', (['args'], {}), '(args)\n', (2411, 2417), False, 'from networks.sim3_net import Sim3Net\n'), ((599, 630), 'cv2.imread', 'cv2.imread', (['"""assets/image1.png"""'], {}), "('assets/image1.png')\n", (609, 630), False, 'import cv2\n'), ((648, 679), 'cv2.imread', 'cv2.imread', (['"""assets/image2.png"""'], {}), "('assets/image2.png')\n", (658, 679), False, 'import cv2\n'), ((1654, 1687), 'lietorch.SE3.Identity', 'SE3.Identity', (['(1)', '(2)'], {'device': '"""cuda"""'}), "(1, 2, device='cuda')\n", (1666, 1687), False, 'from lietorch import SO3, SE3, Sim3\n'), ((2444, 2465), 'torch.load', 'torch.load', (['args.ckpt'], {}), '(args.ckpt)\n', (2454, 2465), False, 'import torch\n'), ((697, 725), 'numpy.load', 'np.load', (['"""assets/depth1.npy"""'], {}), "('assets/depth1.npy')\n", (704, 725), True, 'import numpy as np\n'), ((757, 785), 'numpy.load', 'np.load', (['"""assets/depth2.npy"""'], {}), "('assets/depth2.npy')\n", (764, 785), True, 'import numpy as np\n'), ((837, 868), 'cv2.imread', 'cv2.imread', (['"""assets/image3.png"""'], {}), "('assets/image3.png')\n", (847, 868), False, 'import cv2\n'), ((886, 917), 'cv2.imread', 'cv2.imread', (['"""assets/image4.png"""'], {}), "('assets/image4.png')\n", (896, 917), False, 'import cv2\n'), ((1095, 1119), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (1111, 1119), False, 'import torch\n'), ((1194, 1218), 'torch.from_numpy', 'torch.from_numpy', (['depths'], {}), '(depths)\n', (1210, 1218), False, 'import torch\n'), ((1351, 1379), 'torch.from_numpy', 'torch.from_numpy', (['intrinsics'], {}), '(intrinsics)\n', (1367, 1379), False, 'import torch\n'), ((1742, 1776), 'lietorch.Sim3.Identity', 'Sim3.Identity', (['(1)', '(2)'], {'device': '"""cuda"""'}), "(1, 2, device='cuda')\n", (1755, 1776), False, 'from lietorch import SO3, SE3, Sim3\n'), ((935, 963), 'numpy.load', 'np.load', (['"""assets/depth3.npy"""'], {}), "('assets/depth3.npy')\n", (942, 963), True, 'import numpy as np\n'), ((995, 1023), 'numpy.load', 'np.load', (['"""assets/depth4.npy"""'], {}), "('assets/depth4.npy')\n", (1002, 1023), True, 'import numpy as np\n'), ((1806, 1819), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1816, 1819), False, 'import torch\n')]
|
import numpy as np
import matplotlib.cm as cm
import matplotlib.pylab as plt
def generate_colors(count):
cm = plt.get_cmap('gist_rainbow')
return np.array([cm(1. * i / count) for i in range(count)]).astype(float)
def get_q_color(id, ant_num):
from PyQt4 import QtGui
r, g, b = get_color(id, ant_num)
return QtGui.QColor(r, g, b)
def get_color(id, ant_num):
colors = cm.rainbow(np.linspace(0, 1, ant_num))
return int(colors[id][0] * 255), int(colors[id][1] * 255), int(colors[id][2] * 255)
def get_opacity(current_depth, max_depth):
return float((max_depth - current_depth) + float(current_depth/max_depth))/max_depth/2
def get_contrast_color(r, g, b):
if (r+g+b)/3 < 128:
return 250, 250, 255
return 5, 0, 5
|
[
"PyQt4.QtGui.QColor",
"matplotlib.cm",
"numpy.linspace",
"matplotlib.pylab.get_cmap"
] |
[((116, 144), 'matplotlib.pylab.get_cmap', 'plt.get_cmap', (['"""gist_rainbow"""'], {}), "('gist_rainbow')\n", (128, 144), True, 'import matplotlib.pylab as plt\n'), ((331, 352), 'PyQt4.QtGui.QColor', 'QtGui.QColor', (['r', 'g', 'b'], {}), '(r, g, b)\n', (343, 352), False, 'from PyQt4 import QtGui\n'), ((407, 433), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'ant_num'], {}), '(0, 1, ant_num)\n', (418, 433), True, 'import numpy as np\n'), ((166, 185), 'matplotlib.cm', 'cm', (['(1.0 * i / count)'], {}), '(1.0 * i / count)\n', (168, 185), True, 'import matplotlib.cm as cm\n')]
|
from django.db import models
class FullName(models.Model):
full_name = models.CharField(verbose_name='ФИО', max_length=256)
def __str__(self):
return self.full_name
class Meta:
verbose_name = 'ФИО'
verbose_name_plural = 'ФИО'
|
[
"django.db.models.CharField"
] |
[((77, 129), 'django.db.models.CharField', 'models.CharField', ([], {'verbose_name': '"""ФИО"""', 'max_length': '(256)'}), "(verbose_name='ФИО', max_length=256)\n", (93, 129), False, 'from django.db import models\n')]
|
from cffconvert import BibtexObject
import unittest
import os
import ruamel.yaml as yaml
from test.contracts.BibtexObject import Contract
class BibtexObjectTest(Contract, unittest.TestCase):
def setUp(self):
fixture = os.path.join(os.path.dirname(__file__), "CITATION.cff")
with open(fixture, "r") as f:
cffstr = f.read()
cff_object = yaml.safe_load(cffstr)
self.bo = BibtexObject(cff_object, initialize_empty=True)
def test_author(self):
# CFF file is not valid, hence contract does not apply
pass
def test_check_cff_object(self):
with self.assertRaises(ValueError) as context:
self.bo.check_cff_object()
self.assertTrue('Missing key "cff-version" in CITATION.cff file.' in str(context.exception))
def test_doi(self):
# CFF file is not valid, hence contract does not apply
pass
def test_month(self):
# CFF file is not valid, hence contract does not apply
pass
def test_print(self):
# CFF file is not valid, hence contract does not apply
pass
def test_title(self):
# CFF file is not valid, hence contract does not apply
pass
def test_url(self):
# CFF file is not valid, hence contract does not apply
pass
def test_year(self):
# CFF file is not valid, hence contract does not apply
pass
|
[
"os.path.dirname",
"cffconvert.BibtexObject",
"ruamel.yaml.safe_load"
] |
[((246, 271), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (261, 271), False, 'import os\n'), ((382, 404), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['cffstr'], {}), '(cffstr)\n', (396, 404), True, 'import ruamel.yaml as yaml\n'), ((427, 474), 'cffconvert.BibtexObject', 'BibtexObject', (['cff_object'], {'initialize_empty': '(True)'}), '(cff_object, initialize_empty=True)\n', (439, 474), False, 'from cffconvert import BibtexObject\n')]
|
from nltk.corpus import stopwords
import requests
import re
import nltk
nltk.download('stopwords')
WIKI_API_URL = "https://en.wikipedia.org/w/api.php"
inputTitleRepr = './ShortReprLists'
def retrieveCategoryFromJson(pages):
categories = []
for k, v in pages.items():
for cat in v['categories']:
titleCategory = cat['title'].replace('Category:', '')
if 'All' in titleCategory:
continue
if 'Pages' in titleCategory:
continue
if 'Articles' in titleCategory:
continue
if 'Wikipedia' in titleCategory:
continue
if 'Wikidata' in titleCategory:
continue
categories.append(titleCategory)
return list(set(categories))
def FindCategory(session, title):
category = []
PARAMS = {
"action": "query",
"format": "json",
"prop": "categories",
"titles": title
}
response = session.get(url=WIKI_API_URL, params=PARAMS)
data = response.json()
pages = data['query']['pages']
category.extend(retrieveCategoryFromJson(pages))
while "continue" in data:
clcontinue = data["continue"]["clcontinue"]
PARAMS["clcontinue"] = clcontinue
response = session.get(url=WIKI_API_URL, params=PARAMS)
data = response.json()
pages = data['query']['pages']
category.extend(retrieveCategoryFromJson(pages))
return list(set(category))
def getAllBacklinksFromFile(filename):
backlinks = []
row_number = 0
with open(inputTitleRepr+'/'+filename+'.txt.txt', 'r') as f:
for row in f:
row_number += 1
splitted = row.split(' -')
splitted = splitted[0].split(' ')
backlinks.extend(splitted)
return (row_number, backlinks)
def routine(session, title):
print('Processing {}...'.format(title))
categoryOfTitle = FindCategory(session, title)
dictOfCategories = {el.capitalize(): 0 for el in categoryOfTitle}
infoFromBacklinks = getAllBacklinksFromFile(title)
backlinksNumber = infoFromBacklinks[0]
backlinks = infoFromBacklinks[1]
for bl in backlinks:
blCategories = FindCategory(session, bl)
for cat in blCategories:
if cat.capitalize() in dictOfCategories:
#print('{} is in'.format(cat.capitalize()))
dictOfCategories[cat.capitalize()] += 1
# print('--------')
maxCat = max(dictOfCategories, key=dictOfCategories.get)
cSim = dictOfCategories[maxCat]/backlinksNumber
print('{}\t{}\t{}'.format(title, maxCat, round(cSim, 2)), file=f)
session = requests.Session()
titles = ['Official_(tennis)', 'Maria_Pepe', 'SEAT_Arona', 'Dodge_Coronet',
'Christmas_window', 'Last.fm', 'Traditional_bluegrass']
with open('output.txt', 'w') as f:
print('Entity\t\tCategory\t\tc-Similarity\n', file=f)
for title in titles:
routine(session, title)
|
[
"nltk.download",
"requests.Session"
] |
[((72, 98), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (85, 98), False, 'import nltk\n'), ((2693, 2711), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2709, 2711), False, 'import requests\n')]
|
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class RecordRelatedTab:
def __init__(self, app):
self.app = app
def expand_Show_more_actions_dropdown_on_record_page(self):
driver = self.app.driver
dropdown_locator = "//div[contains(@class,'windowViewMode-normal')]//span[contains(text(),'Show')]/.."
dropdown_element = WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, dropdown_locator)))
dropdown_element.click()
def click_Edit_button(self):
driver = self.app.driver
driver.find_element_by_xpath("//*[text()='Edit']/parent::a").click()
def click_Edit_button_on_record_page(self):
driver = self.app.driver
# if "Edit" button is already displayed or not
if len(driver.find_elements_by_xpath("//*[text()='Edit']/parent::a")):
self.click_Edit_button()
else:
self.expand_Show_more_actions_dropdown_on_record_page()
self.click_Edit_button()
def check_that_page_title_contains_record_name(self, record_name):
driver = self.app.driver
print("\n" + driver.title)
assert record_name in driver.title, "Wrong page title :)"
def switch_to_Details_tab_of_record(self):
driver = self.app.driver
details_tab_element = WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH,
"//div[contains(@class,'active')]//a[text()='Details']")))
details_tab_element.click()
|
[
"selenium.webdriver.support.expected_conditions.element_to_be_clickable",
"selenium.webdriver.support.ui.WebDriverWait"
] |
[((510, 566), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, dropdown_locator)'], {}), '((By.XPATH, dropdown_locator))\n', (536, 566), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1465, 1564), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, "//div[contains(@class,\'active\')]//a[text()=\'Details\']")'], {}), '((By.XPATH,\n "//div[contains(@class,\'active\')]//a[text()=\'Details\']"))\n', (1491, 1564), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((479, 503), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(5)'], {}), '(driver, 5)\n', (492, 503), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((1434, 1458), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(5)'], {}), '(driver, 5)\n', (1447, 1458), False, 'from selenium.webdriver.support.ui import WebDriverWait\n')]
|
# <NAME>
# CS1400 - LW2 XL
# Assignment #11
from modules.orbian import Orbian
from time import sleep
from random import randint
from random import shuffle # Hint hint
def main():
print("WELCOME TO ORBIAN FAMILY")
print()
family = []
input("Hit Enter to Create the First Four Orbians")
for i in range(0, 4):
name = input("\tEnter a name for Orbian " + str(i + 1) + ": ")
# The first four Orbians are created with random values
headRadius = randint(2, 5)
bodyRadius = randint(3, 8)
bodyHeight = randint(5, 15)
family.append(Orbian(name, headRadius, bodyRadius, bodyHeight))
print("\tCreating your Orbian Family", end="")
thinking()
done = False
while not done:
print()
print("Menu")
print("\t1) Meet Orbian Family")
print("\t2) Compare Orbians")
print("\t3) Orbian Info")
print("\t4) Create Orbian Baby")
print("\t5) Send to Pasture")
print("\t6) Orbian Thanos")
print("\t7) Quit")
choice = int(input("Choose an option: "))
print()
if choice == 1:
listFamily(family)
elif choice == 2:
compare(family)
elif choice == 3:
info(family)
elif choice == 4:
createBaby(family)
elif choice == 5:
toPasture(family)
elif choice == 6:
thanosSnap(family)
elif choice == 7:
done = True
print("Thanks for playing Orbian Family!!!")
def thinking():
for i in range(5):
print(".", end="")
sleep(0.5) # You can comment this out while testing to make things go faster
print()
def selectOrbian(famList, selected=None):
count = 1
for i in famList:
print("\t" + str(count) + ") " + i.getName(), end="")
if selected is not None and i is selected:
print(" (already selected)")
else:
print()
count += 1
return famList[int(input("Select an Orbian: ")) - 1] # Returns an Orbian object
# DO NOT MODIFY ANY CODE ABOVE THIS LINE ##############
# Define/Complete the functions below ###################
def listFamily(famList):
# <<<<<<<<<<<<<< Write code to list the Orbian family >>>>>>>>>>>>>>>
for i in famList:
print("I am Orbian " + str(i))
def compare(famList):
orb1 = selectOrbian(famList)
orb2 = selectOrbian(famList, orb1)
# DO NOT MODIFY THIS FUNCTION BEYOND THIS LINE ############
if (orb1 == orb2):
print("\tOrbian " + orb1.getName() + " is equal to Orbian " + orb2.getName())
elif (orb1 > orb2):
print("\tOrbian " + orb1.getName() + " is bigger than Orbian " + orb2.getName())
else:
print("\tOrbian " + orb1.getName() + " is smaller than Orbian " + orb2.getName())
def createBaby(famList):
# <<<<<<<<<<<<<< Write code to select two orbians to be parents >>>>>>>>>>>>>>>
orb1 = selectOrbian(famList)
orb2 = selectOrbian(famList, orb1)
# ########## DO NOT MODIFY THIS FUNCTION BEYOND THIS LINE ############
famList.append(orb1 + orb2)
print("\tGreetings Orbian " + famList[len(famList) - 1].getName())
def info(famList):
# ########## DO NOT MODIFY THIS FUNCTION ############
print("Select an Orbian to view")
orbian = selectOrbian(famList)
print("Orbian " + orbian.getName() + " is " + str(orbian.getAge()) + " zungs old")
print("\tand is " + str(orbian.getVolume()) + " zogs, and " + str(len(orbian)) + " zings")
def toPasture(famList):
orb = selectOrbian(famList)
orbIndex = famList.index(orb)
famList.pop(orbIndex)
print("\nWe wish " + orb.getName() + " well in his future adventures!\nThey will be missed.")
def thanosSnap(famList):
print("Uh oh. Orbian Thanos just snapped his fingers")
thinking()
half = len(famList) // 2
shuffle(famList)
for i in range(half):
famList.pop()
main()
|
[
"random.shuffle",
"modules.orbian.Orbian",
"random.randint",
"time.sleep"
] |
[((3890, 3906), 'random.shuffle', 'shuffle', (['famList'], {}), '(famList)\n', (3897, 3906), False, 'from random import shuffle\n'), ((489, 502), 'random.randint', 'randint', (['(2)', '(5)'], {}), '(2, 5)\n', (496, 502), False, 'from random import randint\n'), ((524, 537), 'random.randint', 'randint', (['(3)', '(8)'], {}), '(3, 8)\n', (531, 537), False, 'from random import randint\n'), ((559, 573), 'random.randint', 'randint', (['(5)', '(15)'], {}), '(5, 15)\n', (566, 573), False, 'from random import randint\n'), ((1618, 1628), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (1623, 1628), False, 'from time import sleep\n'), ((596, 644), 'modules.orbian.Orbian', 'Orbian', (['name', 'headRadius', 'bodyRadius', 'bodyHeight'], {}), '(name, headRadius, bodyRadius, bodyHeight)\n', (602, 644), False, 'from modules.orbian import Orbian\n')]
|
from PIL import Image
from src.utils import int_clamp
from PIL import UnidentifiedImageError
import tkinter.messagebox as messagebox
def safe_open_image(filename, corner_creator):
image = None
try:
if filename is not None and filename != "":
image = PILCollageImage(filename, corner_creator)
except UnidentifiedImageError:
messagebox.showerror("Error", "Failed open file {0}".format(filename))
return image
class ViewingWindow:
"""
Class for managing viewing window in original image
"""
def __init__(self, original, scale_step=0.05, scale_value_min=0.2, move_step=5):
self.original = original
self._image_size = None
self.scale_value = 1
self.scale_step = scale_step
self.move_step = move_step
self.view_vector = (0, 0)
self.scale_value_min = scale_value_min
self._borders = None
self._corner = None
self._actual_im_size = None
width = property()
height = property()
@width.getter
def width(self):
return self._image_size[0] * self.scale_value
@height.getter
def height(self):
return self._image_size[1] * self.scale_value
def _update_params(self):
center = (self.original.width / 2 + self.view_vector[0], self.original.height / 2 + self.view_vector[1])
left = center[0] - self.width / 2
upper = center[1] - self.height / 2
right = center[0] + self.width / 2
lower = center[1] + self.height / 2
new_borders = (
int_clamp(left, min_val=0),
int_clamp(upper, min_val=0),
int_clamp(right, max_val=self.original.width),
int_clamp(lower, max_val=self.original.height)
)
new_width = int_clamp(
(new_borders[2] - new_borders[0]) / self.scale_value, min_val=1, max_val=self._image_size[0])
new_height = int_clamp(
(new_borders[3] - new_borders[1]) / self.scale_value, min_val=1, max_val=self._image_size[1])
corner_x = int_clamp(-left / self.scale_value, min_val=0, max_val=self._image_size[0] - 1)
corner_y = int_clamp(-upper / self.scale_value, min_val=0, max_val=self._image_size[1] - 1)
self._borders = new_borders
self._actual_im_size = (new_width, new_height)
self._corner = (corner_x, corner_y)
def get(self):
"""
Crops rectangle from original image and resizes it to image size
Returns cropped PIL Image
"""
return self.original.crop(self._borders).resize(self._actual_im_size)
def _scale(self, new_scale_value):
self.scale_value = new_scale_value
self._update_params()
def resize(self, size):
self._image_size = int_clamp(size[0], min_val=1), int_clamp(size[1], min_val=1)
self._update_params()
def move(self, dx, dy):
self.view_vector = (
self.view_vector[0] + dx * self.scale_value,
self.view_vector[1] + dy * self.scale_value)
self._update_params()
def zoom_in(self):
self._scale(max(self.scale_value - self.scale_step, self.scale_value_min))
def zoom_out(self):
self._scale(self.scale_value + self.scale_step)
def move_up(self):
self.move(dx=0, dy=-self.move_step)
def move_down(self):
self.move(dx=0, dy=self.move_step)
def move_left(self):
self.move(dx=-self.move_step, dy=0)
def move_right(self):
self.move(dx=self.move_step, dy=0)
corner = property()
@corner.getter
def corner(self):
return self._corner
class PILCollageImage:
def __init__(self, filename, corner_creator):
self.corners = corner_creator
original = Image.open(filename)
self.viewing_window = ViewingWindow(original)
self._corner = None
def resize(self, size):
"""
Resize the image
size – The requested size in pixels, as a 2-tuple: (width, height).
"""
self.viewing_window.resize(size)
def move_view_up(self):
self.viewing_window.move_up()
def move_view(self, dx, dy):
self.viewing_window.move(dx=dx, dy=dy)
def move_view_down(self):
self.viewing_window.move_down()
def move_view_left(self):
self.viewing_window.move_left()
def move_view_right(self):
self.viewing_window.move_right()
def zoom_in(self):
self.viewing_window.zoom_in()
def zoom_out(self):
self.viewing_window.zoom_out()
def _update_corners(self, img):
alpha = self.corners.get_alpha(img.size)
img.putalpha(alpha)
return img
PhotoImage = property()
corner = property()
ViewingWindow = property()
@PhotoImage.getter
def PIL(self):
return self._update_corners(self.viewing_window.get())
@corner.getter
def corner(self):
return self.viewing_window.corner
@ViewingWindow.getter
def ViewingWindow(self):
return self.viewing_window
|
[
"src.utils.int_clamp",
"PIL.Image.open"
] |
[((1784, 1891), 'src.utils.int_clamp', 'int_clamp', (['((new_borders[2] - new_borders[0]) / self.scale_value)'], {'min_val': '(1)', 'max_val': 'self._image_size[0]'}), '((new_borders[2] - new_borders[0]) / self.scale_value, min_val=1,\n max_val=self._image_size[0])\n', (1793, 1891), False, 'from src.utils import int_clamp\n'), ((1922, 2029), 'src.utils.int_clamp', 'int_clamp', (['((new_borders[3] - new_borders[1]) / self.scale_value)'], {'min_val': '(1)', 'max_val': 'self._image_size[1]'}), '((new_borders[3] - new_borders[1]) / self.scale_value, min_val=1,\n max_val=self._image_size[1])\n', (1931, 2029), False, 'from src.utils import int_clamp\n'), ((2059, 2138), 'src.utils.int_clamp', 'int_clamp', (['(-left / self.scale_value)'], {'min_val': '(0)', 'max_val': '(self._image_size[0] - 1)'}), '(-left / self.scale_value, min_val=0, max_val=self._image_size[0] - 1)\n', (2068, 2138), False, 'from src.utils import int_clamp\n'), ((2158, 2243), 'src.utils.int_clamp', 'int_clamp', (['(-upper / self.scale_value)'], {'min_val': '(0)', 'max_val': '(self._image_size[1] - 1)'}), '(-upper / self.scale_value, min_val=0, max_val=self._image_size[1] - 1\n )\n', (2167, 2243), False, 'from src.utils import int_clamp\n'), ((3759, 3779), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (3769, 3779), False, 'from PIL import Image\n'), ((1567, 1593), 'src.utils.int_clamp', 'int_clamp', (['left'], {'min_val': '(0)'}), '(left, min_val=0)\n', (1576, 1593), False, 'from src.utils import int_clamp\n'), ((1607, 1634), 'src.utils.int_clamp', 'int_clamp', (['upper'], {'min_val': '(0)'}), '(upper, min_val=0)\n', (1616, 1634), False, 'from src.utils import int_clamp\n'), ((1648, 1693), 'src.utils.int_clamp', 'int_clamp', (['right'], {'max_val': 'self.original.width'}), '(right, max_val=self.original.width)\n', (1657, 1693), False, 'from src.utils import int_clamp\n'), ((1707, 1753), 'src.utils.int_clamp', 'int_clamp', (['lower'], {'max_val': 'self.original.height'}), '(lower, max_val=self.original.height)\n', (1716, 1753), False, 'from src.utils import int_clamp\n'), ((2773, 2802), 'src.utils.int_clamp', 'int_clamp', (['size[0]'], {'min_val': '(1)'}), '(size[0], min_val=1)\n', (2782, 2802), False, 'from src.utils import int_clamp\n'), ((2804, 2833), 'src.utils.int_clamp', 'int_clamp', (['size[1]'], {'min_val': '(1)'}), '(size[1], min_val=1)\n', (2813, 2833), False, 'from src.utils import int_clamp\n')]
|
from django.test import SimpleTestCase
from django.urls import reverse , resolve
from article.views import *
class TestUrls(SimpleTestCase):
def test_articles_url_is_resolved(self):
url = reverse('mumbles-api-articles:articles')
self.assertEquals(resolve(url).func,articles)
def test_articles_created_url_is_resolved(self):
url = reverse('mumbles-api-articles:create-article')
self.assertEquals(resolve(url).func,createArticle)
def test_articles_vote_url_is_resolved(self):
url = reverse('mumbles-api-articles:article-vote')
self.assertEquals(resolve(url).func,updateVote)
def test_get_article_url_is_resolved(self):
url = reverse('mumbles-api-articles:get-article',args=['sOmE-iD'])
self.assertEquals(resolve(url).func,getArticle)
def test_edit_article_url_is_resolved(self):
url = reverse('mumbles-api-articles:edit-article',args=['sOmE-iD'])
self.assertEquals(resolve(url).func,editArticle)
def test_delete_article_url_is_resolved(self):
url = reverse('mumbles-api-articles:delete-article',args=['sOmE-iD'])
self.assertEquals(resolve(url).func,deleteArticle)
def test_edit_article_comment_url_is_resolved(self):
url = reverse('mumbles-api-articles:edit-article-comment',args=['sOmE-iD'])
self.assertEquals(resolve(url).func,editArticleComment)
def test_delete_article_comment_url_is_resolved(self):
url = reverse('mumbles-api-articles:delete-article-comment',args=['sOmE-iD'])
self.assertEquals(resolve(url).func,deleteArticleComment)
|
[
"django.urls.reverse",
"django.urls.resolve"
] |
[((206, 246), 'django.urls.reverse', 'reverse', (['"""mumbles-api-articles:articles"""'], {}), "('mumbles-api-articles:articles')\n", (213, 246), False, 'from django.urls import reverse, resolve\n'), ((369, 415), 'django.urls.reverse', 'reverse', (['"""mumbles-api-articles:create-article"""'], {}), "('mumbles-api-articles:create-article')\n", (376, 415), False, 'from django.urls import reverse, resolve\n'), ((540, 584), 'django.urls.reverse', 'reverse', (['"""mumbles-api-articles:article-vote"""'], {}), "('mumbles-api-articles:article-vote')\n", (547, 584), False, 'from django.urls import reverse, resolve\n'), ((704, 765), 'django.urls.reverse', 'reverse', (['"""mumbles-api-articles:get-article"""'], {'args': "['sOmE-iD']"}), "('mumbles-api-articles:get-article', args=['sOmE-iD'])\n", (711, 765), False, 'from django.urls import reverse, resolve\n'), ((885, 947), 'django.urls.reverse', 'reverse', (['"""mumbles-api-articles:edit-article"""'], {'args': "['sOmE-iD']"}), "('mumbles-api-articles:edit-article', args=['sOmE-iD'])\n", (892, 947), False, 'from django.urls import reverse, resolve\n'), ((1070, 1134), 'django.urls.reverse', 'reverse', (['"""mumbles-api-articles:delete-article"""'], {'args': "['sOmE-iD']"}), "('mumbles-api-articles:delete-article', args=['sOmE-iD'])\n", (1077, 1134), False, 'from django.urls import reverse, resolve\n'), ((1265, 1335), 'django.urls.reverse', 'reverse', (['"""mumbles-api-articles:edit-article-comment"""'], {'args': "['sOmE-iD']"}), "('mumbles-api-articles:edit-article-comment', args=['sOmE-iD'])\n", (1272, 1335), False, 'from django.urls import reverse, resolve\n'), ((1473, 1545), 'django.urls.reverse', 'reverse', (['"""mumbles-api-articles:delete-article-comment"""'], {'args': "['sOmE-iD']"}), "('mumbles-api-articles:delete-article-comment', args=['sOmE-iD'])\n", (1480, 1545), False, 'from django.urls import reverse, resolve\n'), ((273, 285), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (280, 285), False, 'from django.urls import reverse, resolve\n'), ((442, 454), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (449, 454), False, 'from django.urls import reverse, resolve\n'), ((611, 623), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (618, 623), False, 'from django.urls import reverse, resolve\n'), ((791, 803), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (798, 803), False, 'from django.urls import reverse, resolve\n'), ((973, 985), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (980, 985), False, 'from django.urls import reverse, resolve\n'), ((1160, 1172), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (1167, 1172), False, 'from django.urls import reverse, resolve\n'), ((1361, 1373), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (1368, 1373), False, 'from django.urls import reverse, resolve\n'), ((1571, 1583), 'django.urls.resolve', 'resolve', (['url'], {}), '(url)\n', (1578, 1583), False, 'from django.urls import reverse, resolve\n')]
|
from game.field import FieldXO
from game.player import Player, Bot
class Game:
__field__: FieldXO = None
__status__: bool = False
__active_player__: Player
def __init__(self, player_x_id: str, player_o_id: str):
self.__playerX__ = self.__create_player(player_x_id, 'X')
self.__playerO__ = self.__create_player(player_o_id, 'O')
self.__active_player__ = self.__playerX__
@staticmethod
def __create_player(player_id: str, player_type: str) -> Player:
if player_id == 'bot':
return Bot(player_type=player_type)
else:
return Player(player_id=player_id, player_type=player_type)
def change_status(self, status: bool):
self.__status__ = status
def get_status(self):
return self.__status__
def get_field(self):
return self.__field__
def new_game(self):
self.__field__ = FieldXO()
self.__status__ = True
def end_game(self):
self.__field__ = None
self.__status__ = False
def __player_move(self, x: int, y: int, player: Player) -> bool:
if player.get_player_id() == self.__active_player__.get_player_id():
if self.__field__.fill_cell(x, y, player.get_player_type()):
self.__change_active_player()
return True
else:
return False
else:
return False
def __change_active_player(self):
if self.__active_player__ == self.__playerX__:
self.__active_player__ = self.__playerO__
else:
self.__active_player__ = self.__playerX__
def provide_player_move(self, x: int, y: int, player_id: str) -> str:
player = self.__define_player(player_id)
if not self.__player_move(x, y, player):
return "Клетка занята! или не ваш ход"
if self.get_field().is_winner(player_type=player.get_player_type()):
self.change_status(status=False)
return "Вы выиграли!"
if not self.get_field().has_empty_cell():
self.change_status(status=False)
return "Ничья"
else:
if self.__active_player__.get_player_id() == 'bot':
player = self.__active_player__
self.__bot_move(player)
if self.get_field().is_winner(player_type=player.get_player_type()):
self.change_status(status=False)
return "Вы проиграли!"
if not self.get_field().has_empty_cell():
self.change_status(status=False)
return "Ничья"
return "Ваш ход"
return "ожидание хода другого игрока"
def __define_player(self, player_id: str) -> Player:
if self.__playerX__.__player_id__ == player_id:
return self.__playerX__
elif self.__playerO__.__player_id__ == player_id:
return self.__playerO__
else:
raise Exception('Не найден игрок с id={0}'.format(player_id))
def __bot_move(self, player: Player):
cell = player.get_move_cell(self.__field__)
if not self.__player_move(cell.x, cell.y, player):
raise Exception('Игровой бот сломался, выдает неправильные координаты хода, '
'cell={0} или не его ход, active_player={1}'.format(cell, self.__active_player__))
|
[
"game.field.FieldXO",
"game.player.Bot",
"game.player.Player"
] |
[((908, 917), 'game.field.FieldXO', 'FieldXO', ([], {}), '()\n', (915, 917), False, 'from game.field import FieldXO\n'), ((552, 580), 'game.player.Bot', 'Bot', ([], {'player_type': 'player_type'}), '(player_type=player_type)\n', (555, 580), False, 'from game.player import Player, Bot\n'), ((614, 666), 'game.player.Player', 'Player', ([], {'player_id': 'player_id', 'player_type': 'player_type'}), '(player_id=player_id, player_type=player_type)\n', (620, 666), False, 'from game.player import Player, Bot\n')]
|
import numpy as np
from SOM import SOM
data = np.loadtxt('Data/output.txt', delimiter=';', usecols=range(40))
###SOM
som = SOM(10, 10) # initialize the SOM
som.fit(data, 10000, decay='hill')
# som = SOM(10, 10) # initialize the SOM
# som.load('Data/SOM')
targets = np.loadtxt('Data/target.txt', dtype='int')
targets = targets - 1
names = ['Автомобиль',
'Грузовик 2',
'Грузовик 3',
'Грузовик 4+',
'Автобус 2',
'Автобус 3',
'<NAME>'
]
# now visualize the learned representation with the class labels
som.plot_point_map(data, targets, names, filename='images/SOM/som.png')
# for name in names:
# som.plot_class_density(data, targets, t=names.index(name), name=name, filename='images/SOM/density ' + name + '.png')
# som.save('SOM')
|
[
"SOM.SOM",
"numpy.loadtxt"
] |
[((126, 137), 'SOM.SOM', 'SOM', (['(10)', '(10)'], {}), '(10, 10)\n', (129, 137), False, 'from SOM import SOM\n'), ((272, 314), 'numpy.loadtxt', 'np.loadtxt', (['"""Data/target.txt"""'], {'dtype': '"""int"""'}), "('Data/target.txt', dtype='int')\n", (282, 314), True, 'import numpy as np\n')]
|
'''
##### DO Not Touch your face ver.0.2
### Medical Imaging & Intelligent Reality Lab (MI2RL) @ Asan Medical Center(AMC)
# MI2RL website : https://www.mi2rl.co/
# AMC : http://www.amc.seoul.kr/asan/main.do
### Developer
# <NAME> : <EMAIL>
# <NAME> : <EMAIL>
# <NAME> : <EMAIL>
# <NAME> : <EMAIL>
# <NAME> : <EMAIL>
### Data contributor
# MI2RL researchers
# <NAME>, Emergency Medicine@AMC
# <NAME>, Convergence Medicine@AMC
### references
# I3D Network (https://github.com/hassony2/kinetics_i3d_pytorch)
#####
'''
import cv2
import sys
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from gui_viewer import GUIViewer
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
viewer = GUIViewer()
viewer.show()
app.exec_()
|
[
"PyQt5.QtWidgets.QApplication",
"gui_viewer.GUIViewer"
] |
[((731, 763), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (753, 763), False, 'from PyQt5 import QtWidgets\n'), ((778, 789), 'gui_viewer.GUIViewer', 'GUIViewer', ([], {}), '()\n', (787, 789), False, 'from gui_viewer import GUIViewer\n')]
|
import subprocess
import sys
import json
import datetime
import urllib.parse
import sys
def main():
files_by_date = {}
bucket = sys.argv[1]
days_to_keep = int(sys.argv[2])
print(f"Looking for binaries to delete older than {days_to_keep} days")
files_lines = execute_cli(f"b2 ls --long --versions {bucket} nightly").split("\n")
for x in files_lines:
parts = [y for y in x.split(' ') if y]
if parts and parts[0]:
date = datetime.datetime.strptime(parts[2], '%Y-%m-%d').replace(hour=0, minute=0, second=0, microsecond=0)
now = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
delta = now - date
if delta.days > days_to_keep:
print(f'Deleting {parts[5]}')
execute_cli(f'b2 delete-file-version {parts[0]}')
def execute_cli(command):
sb = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
return sb.stdout.read().decode("utf-8");
if __name__ == '__main__':
sys.exit(main())
|
[
"datetime.datetime.strptime",
"subprocess.Popen",
"datetime.datetime.utcnow"
] |
[((892, 953), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(command, shell=True, stdout=subprocess.PIPE)\n', (908, 953), False, 'import subprocess\n'), ((473, 521), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['parts[2]', '"""%Y-%m-%d"""'], {}), "(parts[2], '%Y-%m-%d')\n", (499, 521), False, 'import datetime\n'), ((591, 617), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (615, 617), False, 'import datetime\n')]
|
# Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import time
from newrelic.core.attribute import (check_name_is_string, check_name_length,
process_user_attribute, NameIsNotStringException, NameTooLongException,
MAX_NUM_USER_ATTRIBUTES)
_logger = logging.getLogger(__name__)
EVENT_TYPE_VALID_CHARS_REGEX = re.compile(r'^[a-zA-Z0-9:_ ]+$')
class NameInvalidCharactersException(Exception): pass
def check_event_type_valid_chars(name):
regex = EVENT_TYPE_VALID_CHARS_REGEX
if not regex.match(name):
raise NameInvalidCharactersException()
def process_event_type(name):
"""Perform all necessary validation on a potential event type.
If any of the validation checks fail, they will raise an exception
which we catch, so we can log a message, and return None.
Args:
name (str): The type (name) of the custom event.
Returns:
name, if name is OK.
NONE, if name isn't.
"""
FAILED_RESULT = None
try:
check_name_is_string(name)
check_name_length(name)
check_event_type_valid_chars(name)
except NameIsNotStringException:
_logger.debug('Event type must be a string. Dropping '
'event: %r', name)
return FAILED_RESULT
except NameTooLongException:
_logger.debug('Event type exceeds maximum length. Dropping '
'event: %r', name)
return FAILED_RESULT
except NameInvalidCharactersException:
_logger.debug('Event type has invalid characters. Dropping '
'event: %r', name)
return FAILED_RESULT
else:
return name
def create_custom_event(event_type, params):
"""Creates a valid custom event.
Ensures that the custom event has a valid name, and also checks
the format and number of attributes. No event is created, if the
name is invalid. An event is created, if any of the attributes are
invalid, but the invalid attributes are dropped.
Args:
event_type (str): The type (name) of the custom event.
params (dict): Attributes to add to the event.
Returns:
Custom event (list of 2 dicts), if successful.
None, if not successful.
"""
name = process_event_type(event_type)
if name is None:
return None
attributes = {}
try:
for k, v in params.items():
key, value = process_user_attribute(k, v)
if key:
if len(attributes) >= MAX_NUM_USER_ATTRIBUTES:
_logger.debug('Maximum number of attributes already '
'added to event %r. Dropping attribute: %r=%r',
name, key, value)
else:
attributes[key] = value
except Exception:
_logger.debug('Attributes failed to validate for unknown reason. '
'Check traceback for clues. Dropping event: %r.', name,
exc_info=True)
return None
intrinsics = {
'type': name,
'timestamp': int(1000.0 * time.time()),
}
event = [intrinsics, attributes]
return event
|
[
"newrelic.core.attribute.process_user_attribute",
"time.time",
"newrelic.core.attribute.check_name_is_string",
"newrelic.core.attribute.check_name_length",
"logging.getLogger",
"re.compile"
] |
[((822, 849), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (839, 849), False, 'import logging\n'), ((882, 913), 're.compile', 're.compile', (['"""^[a-zA-Z0-9:_ ]+$"""'], {}), "('^[a-zA-Z0-9:_ ]+$')\n", (892, 913), False, 'import re\n'), ((1558, 1584), 'newrelic.core.attribute.check_name_is_string', 'check_name_is_string', (['name'], {}), '(name)\n', (1578, 1584), False, 'from newrelic.core.attribute import check_name_is_string, check_name_length, process_user_attribute, NameIsNotStringException, NameTooLongException, MAX_NUM_USER_ATTRIBUTES\n'), ((1593, 1616), 'newrelic.core.attribute.check_name_length', 'check_name_length', (['name'], {}), '(name)\n', (1610, 1616), False, 'from newrelic.core.attribute import check_name_is_string, check_name_length, process_user_attribute, NameIsNotStringException, NameTooLongException, MAX_NUM_USER_ATTRIBUTES\n'), ((2962, 2990), 'newrelic.core.attribute.process_user_attribute', 'process_user_attribute', (['k', 'v'], {}), '(k, v)\n', (2984, 2990), False, 'from newrelic.core.attribute import check_name_is_string, check_name_length, process_user_attribute, NameIsNotStringException, NameTooLongException, MAX_NUM_USER_ATTRIBUTES\n'), ((3632, 3643), 'time.time', 'time.time', ([], {}), '()\n', (3641, 3643), False, 'import time\n')]
|
from .tractography import Tractography
from .trackvis import tractography_from_trackvis_file, tractography_to_trackvis_file
from warnings import warn
import numpy
__all__ = [
'Tractography',
'tractography_from_trackvis_file', 'tractography_to_trackvis_file',
'tractography_from_files',
'tractography_from_file', 'tractography_to_file',
]
try:
__all__ += [
'tractography_from_vtk_files', 'tractography_to_vtk_file',
'vtkPolyData_to_tracts', 'tracts_to_vtkPolyData'
]
from .vtkInterface import (
tractography_from_vtk_files, tractography_to_vtk_file,
vtkPolyData_to_tracts, tracts_to_vtkPolyData
)
except ImportError:
warn(
'VTK support not installed in this python distribution, '
'VTK files will not be read or written'
)
def tractography_from_files(filenames):
if isinstance(filenames, str):
filenames = [filenames]
tracts = tractography_from_file(filenames[0])
for filename in filenames[1:]:
tracts_ = tractography_from_file(filename)
tracts.append(tracts_.tracts(), tracts_.tracts_data())
return tracts
def tractography_from_file(filename):
if filename.endswith('trk'):
return tractography_from_trackvis_file(filename)
elif filename.endswith('vtk') or filename.endswith('vtp'):
if 'tractography_from_vtk_files' in __all__:
return tractography_from_vtk_files(filename)
else:
raise IOError("No VTK support installed, VTK files could not be read")
else:
raise IOError("File format not supported")
def tractography_to_file(filename, tractography, **kwargs):
if filename.endswith('trk'):
if 'affine' not in kwargs or kwargs['affine'] is None:
if (
hasattr(tractography, 'affine') and
tractography.affine is not None
):
kwargs['affine'] = tractography.affine
else:
warn('Setting affine of trk file to the identity')
kwargs['affine'] = numpy.eye(4)
if (
'image_dimensions' not in kwargs or
kwargs['image_dimensions'] is None
):
if (
hasattr(tractography, 'image_dims') and
tractography.image_dims is not None
):
kwargs['image_dimensions'] = tractography.image_dims
else:
warn('Setting image_dimensions of trk file to: 1 1 1')
kwargs['image_dimensions'] = numpy.ones(3)
return tractography_to_trackvis_file(filename, tractography, **kwargs)
elif filename.endswith('vtk') or filename.endswith('vtp'):
if 'tractography_from_vtk_files' in __all__:
return tractography_to_vtk_file(filename, tractography, **kwargs)
else:
raise IOError("No VTK support installed, VTK files could not be read")
else:
raise IOError("File format not supported")
|
[
"warnings.warn",
"numpy.ones",
"numpy.eye"
] |
[((688, 798), 'warnings.warn', 'warn', (['"""VTK support not installed in this python distribution, VTK files will not be read or written"""'], {}), "(\n 'VTK support not installed in this python distribution, VTK files will not be read or written'\n )\n", (692, 798), False, 'from warnings import warn\n'), ((1992, 2042), 'warnings.warn', 'warn', (['"""Setting affine of trk file to the identity"""'], {}), "('Setting affine of trk file to the identity')\n", (1996, 2042), False, 'from warnings import warn\n'), ((2078, 2090), 'numpy.eye', 'numpy.eye', (['(4)'], {}), '(4)\n', (2087, 2090), False, 'import numpy\n'), ((2462, 2516), 'warnings.warn', 'warn', (['"""Setting image_dimensions of trk file to: 1 1 1"""'], {}), "('Setting image_dimensions of trk file to: 1 1 1')\n", (2466, 2516), False, 'from warnings import warn\n'), ((2562, 2575), 'numpy.ones', 'numpy.ones', (['(3)'], {}), '(3)\n', (2572, 2575), False, 'import numpy\n')]
|
import os
from setuptools import find_packages, setup
def read_requirements():
ret = []
fname = os.path.join(os.path.dirname(__file__), "requirements.txt")
with open(fname, "r") as f:
for line in f:
line = line.strip()
if line and not line.startswith("#"):
ret.append(line)
return ret
def read_long_description():
with open("README.rst", "r") as f:
return f.read()
setup(
name="mock-ssh-server",
version="0.9.1",
description="Mock SSH server for testing purposes",
long_description=read_long_description(),
url="https://github.com/carletes/mock-ssh-server",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Testing",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
package_dir={
"mockssh": "mockssh",
},
packages=find_packages(),
package_data={
"mockssh": [
"sample-user-key",
"sample-user-key.pub",
"server-key",
"server-key.pub",
]
},
install_requires=read_requirements(),
zip_safe=False,
)
|
[
"os.path.dirname",
"setuptools.find_packages"
] |
[((120, 145), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (135, 145), False, 'import os\n'), ((1383, 1398), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1396, 1398), False, 'from setuptools import find_packages, setup\n')]
|
from django.contrib import admin
from temp_tribes.models import Tribe
class TribeAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'creator', 'created', 'deleted')
admin.site.register(Tribe, TribeAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((179, 217), 'django.contrib.admin.site.register', 'admin.site.register', (['Tribe', 'TribeAdmin'], {}), '(Tribe, TribeAdmin)\n', (198, 217), False, 'from django.contrib import admin\n')]
|
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from app.models import ResourceLink
def index(request):
links = ResourceLink.objects.all()
return render(request, "links/index.html", context={
"links": links
})
def link(request, url_extension):
# Link exists
if ResourceLink.objects.filter(url_extension=url_extension).exists():
resource_link = ResourceLink.objects.get(url_extension=url_extension)
return HttpResponseRedirect(resource_link.link)
# Link doesn't exist
else:
return HttpResponse("That link doesn't exist.")
|
[
"app.models.ResourceLink.objects.all",
"app.models.ResourceLink.objects.get",
"django.http.HttpResponse",
"app.models.ResourceLink.objects.filter",
"django.shortcuts.render",
"django.http.HttpResponseRedirect"
] |
[((164, 190), 'app.models.ResourceLink.objects.all', 'ResourceLink.objects.all', ([], {}), '()\n', (188, 190), False, 'from app.models import ResourceLink\n'), ((202, 263), 'django.shortcuts.render', 'render', (['request', '"""links/index.html"""'], {'context': "{'links': links}"}), "(request, 'links/index.html', context={'links': links})\n", (208, 263), False, 'from django.shortcuts import render\n'), ((429, 482), 'app.models.ResourceLink.objects.get', 'ResourceLink.objects.get', ([], {'url_extension': 'url_extension'}), '(url_extension=url_extension)\n', (453, 482), False, 'from app.models import ResourceLink\n'), ((498, 538), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['resource_link.link'], {}), '(resource_link.link)\n', (518, 538), False, 'from django.http import HttpResponseRedirect, HttpResponse\n'), ((590, 630), 'django.http.HttpResponse', 'HttpResponse', (['"""That link doesn\'t exist."""'], {}), '("That link doesn\'t exist.")\n', (602, 630), False, 'from django.http import HttpResponseRedirect, HttpResponse\n'), ((338, 394), 'app.models.ResourceLink.objects.filter', 'ResourceLink.objects.filter', ([], {'url_extension': 'url_extension'}), '(url_extension=url_extension)\n', (365, 394), False, 'from app.models import ResourceLink\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-06 20:14
from __future__ import unicode_literals
import uuid
from django.db import migrations, models
def gen_uuid(apps, schema_editor):
"""Generate unique UUID values"""
Profile = apps.get_model('profiles', 'Profile')
for profile in Profile.objects.all():
profile.mail_id = uuid.uuid4()
profile.save()
class Migration(migrations.Migration):
dependencies = [
('profiles', '0030_create_mail_id'),
]
operations = [
migrations.RunPython(gen_uuid, reverse_code=migrations.RunPython.noop),
]
|
[
"django.db.migrations.RunPython",
"uuid.uuid4"
] |
[((363, 375), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (373, 375), False, 'import uuid\n'), ((541, 611), 'django.db.migrations.RunPython', 'migrations.RunPython', (['gen_uuid'], {'reverse_code': 'migrations.RunPython.noop'}), '(gen_uuid, reverse_code=migrations.RunPython.noop)\n', (561, 611), False, 'from django.db import migrations, models\n')]
|
import pdb
import binascii
import os
from flask import g
from werkzeug.security import generate_password_hash, check_password_hash
from . import db
from .utils import timestamp
class User(db.Model):
"""The User model
Attributes:
__tablename__ (str): Table name for user model in database
id (SQLAlchemy table column, int): User ID
created_at (SQLAlchemy table column, int): Timestamp at which user was first created
updated_at (SQLAlchemy table column, int): Timestamp of last time user profile was updated
last_seen_at (SQLAlchemy table column, int): Timestamp of last time user was active
username (SQLAlchemy table column, str): User username
password_hash (SQLAlchemy table column, str): User password hash string
token (SQLAlchemy table column, str): User authentication token
online (SQLAlchemy table column, bool): Boolean that captures whether user is online
num_logins (SQLAlchemy table column, int): Number of user logins to page
frames (SQLAlchemy table relationship): Relationship property linking "user" model table to this one
"""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.Integer, default=timestamp)
updated_at = db.Column(db.Integer, default=timestamp, onupdate=timestamp)
last_seen_at = db.Column(db.Integer, default=timestamp)
username = db.Column(db.String(32), nullable=False, unique=True)
password_hash = db.Column(db.String(256), nullable=False)
token = db.Column(db.String(64), nullable=True, unique=True)
online = db.Column(db.Boolean, default=False)
num_logins = db.Column(db.Integer, default=1)
frames = db.relationship('Frame', lazy='dynamic', backref='user')
@property
def password(self):
"""Returns attribute error if user password is not readable"""
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
"""Generates password hash string and authentication token from user password
Args:
password (str): User password
"""
self.password_hash = generate_password_hash(password)
self.token = None # if user is changing passwords, also revoke token
def verify_password(self, password):
"""Verify password matches stored password hash string
Args:
password (str): Inputted user password
Returns:
(bool): True if password matches password hash string
"""
return check_password_hash(self.password_hash, password)
def generate_token(self):
"""Creates a 64 character long randomly generated token
Returns:
self.token (str): Generated token
"""
self.token = binascii.hexlify(os.urandom(32)).decode('utf-8')
return self.token
def ping(self):
"""Marks the user as recently seen and online"""
self.last_seen_at = timestamp()
self.online = True
def new_login(self):
"""Increments number of times user has logged in."""
self.num_logins += 1
@staticmethod
def create(data):
"""Create a new user
Args:
data (dict): Dictionary containing user's username and password
Returns:
user (object): Newly created user
"""
user = User()
user.from_dict(data)
return user
def from_dict(self, data):
"""Import user data from a dictionary
Args:
data (dict): Dictionary containing user's username and password
"""
for field in ['username', 'password']:
try:
setattr(self, field, data[field])
except KeyError:
print(f'Key {key} not valid.')
def to_dict(self):
"""Export user to a dictionary"""
return {
'username': self.username,
'online': self.online,
}
@staticmethod
def find_offline_users():
"""Find users that haven't been active and mark them as offline
Returns:
users (list): List of offline users
"""
users = User.query.filter(User.last_seen_at < timestamp() - 60, User.online == True).all()
for user in users:
user.online = False
db.session.add(user)
db.session.commit()
return users
class Frame(db.Model):
"""The Frames model
Attributes:
__tablename__ (str): Table name for user model in database
instance (SQLAlchemy table column, str): Unique ID for processed frame
date (SQLAlchemy table column, datetime): Date that frame is processed
session_id (SQLAlchemy table column, int): User's login count
frame_count (SQLAlchemy table column, int): Frame number for user's current session
ip_address (SQLAlchemy table column, str): User's IP address
root_dir (SQLAlchemy table column, str): Root directory of user's image folder
raw_path (SQLAlchemy table column, str): Path for original image
processed_path (SQLAlchemy table column, str): Path for processed image
true_gest (SQLAlchemy table column, str): Ground-truth gesture inputted by user
pred_gest (SQLAlchemy table column, str): Predicted gesture
pred_conf (SQLAlchemy table column, float): Prediction confidence, percent
pred_time (SQLAlchemy table column, float): Prediction time, seconds
user_id (SQLAlchemy table column, int): User ID
"""
__tablename__ = 'frames'
instance = db.Column(db.String(), primary_key=True, nullable=False)
date = db.Column(db.DateTime(), nullable=False)
session_id = db.Column(db.Integer(), nullable=False)
frame_count = db.Column(db.Integer(), nullable=False)
ip_address = db.Column(db.String())
root_dir = db.Column(db.String(), nullable=False)
raw_path = db.Column(db.String(), nullable=False)
processed_path = db.Column(db.String())
true_gest = db.Column(db.String(), nullable=False)
pred_gest = db.Column(db.String(), nullable=False)
pred_conf = db.Column(db.Numeric(), nullable=False)
pred_time = db.Column(db.Numeric(), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
@staticmethod
def create(data, user=None):
"""Create a new frame. The user is obtained from the context unless provided explicitly.
Args:
data (dict): Dictionary containing values for some or all class attributes listed above
Returns:
frame (object): Newly generated frame
"""
frame = Frame(user=user or g.current_user)
frame.from_dict(data)
return frame
def from_dict(self, data):
"""Import frame data from a dictionary
Args:
data (dict): Dictionary containing values for some or all class attributes listed above
"""
for key in list(data.keys()):
try:
setattr(self, key, data[key])
except KeyError:
print(f'Key {key} not valid.')
|
[
"werkzeug.security.check_password_hash",
"os.urandom",
"werkzeug.security.generate_password_hash"
] |
[((2217, 2249), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['password'], {}), '(password)\n', (2239, 2249), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n'), ((2619, 2668), 'werkzeug.security.check_password_hash', 'check_password_hash', (['self.password_hash', 'password'], {}), '(self.password_hash, password)\n', (2638, 2668), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n'), ((2879, 2893), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (2889, 2893), False, 'import os\n')]
|
from trodesnetwork import socket
from enum import Enum, auto
__all__ = ['CurrentScaling', 'GlobalStimulationSettings', 'StimulationCommand',
'TrodesHardware', 'TrodesInfoRequester', 'TrodesAnnotationRequester',
'TrodesAcquisitionRequester', 'TrodesEventSubscriber',
'TrodesAcquisitionSubscriber', 'TrodesSourceStatusSubscriber']
class CurrentScaling(Enum):
max10nA = auto()
max20nA = auto()
max50nA = auto()
max100nA = auto()
max200nA = auto()
max500nA = auto()
max1uA = auto()
max2uA = auto()
max5uA = auto()
max10uA = auto()
class GlobalStimulationSettings:
def setVoltageScale(self, scaleValue):
self.scaleValue = scaleValue
class StimulationCommand:
def setBiphasicPulseShape(self, leadingPulseWidth_Samples,
leadingPulseAmplitude, secondPulseWidth_Samples,
secondPulseAmplitude, interPhaseDwell_Samples, pulsePeriod_Samples,
startDelay_Samples):
self.leadingPulseWidth_Samples = leadingPulseWidth_Samples
self.leadingPulseAmplitude = leadingPulseAmplitude
self.secondPulseWidth_Samples = secondPulseWidth_Samples
self.secondPulseAmplitude = secondPulseAmplitude
self.interPhaseDwell_Samples = interPhaseDwell_Samples
self.pulsePeriod_Samples = pulsePeriod_Samples
self.startDelay_Samples = startDelay_Samples
def setNumPulsesInTrain(self, numPulsesInTrain):
self.numPulsesInTrain = numPulsesInTrain
def setChannels(self, cathodeID, cathodeChannel, anodeID, anodeChannel):
self.cathodeChannel = cathodeChannel
self.anodeChannel = anodeChannel
self.cathodeNtrodeID = cathodeID
self.anodeNtrodeID = anodeID
def setGroup(self, group):
self.group = group
def setSlot(self, slot):
self.slot = slot
class TrodesHardware:
def __init__(self, *, server_address="tcp://127.0.0.1:49152"):
self.service = socket.ServiceConsumer(
'trodes.hardware', server_address=server_address)
def settle_command_triggered(self):
data = ['tag', 'HRSettle']
return self.service.request(data)
def __startstop(self, startstop, slotgroup, number):
data = [
'tag',
'HRStartStopCommand',
{'startstop': startstop, 'slotgroup': slotgroup, 'number': number}
]
return self.service.request(data)
def sendStimulationStartSlot(self, slot):
return self.__startstop('START', 'SLOT', slot)
def sendStimulationStartGroup(self, group):
return self.__startstop('START', 'GROUP', group)
def sendStimulationStopSlot(self, slot):
return self.__startstop('STOP', 'SLOT', slot)
def sendStimulationStopGroup(self, group):
return self.__startstop('STOP', 'GROUP', group)
def sendStimulationParams(self, params):
'''
Takes StimulationCommand params
'''
data = [
'tag',
'HRSet',
{
'_group': params.group,
'slot': params.slot,
'cathodeChannel': params.cathodeChannel,
'anodeChannel': params.anodeChannel,
'cathodeNtrodeID': params.cathodeNtrodeID,
'anodeNtrodeID': params.anodeNtrodeID,
'leadingPulseWidth_Samples': params.leadingPulseWidth_Samples,
'leadingPulseAmplitude': params.leadingPulseAmplitude,
'secondPulseWidth_Samples': params.secondPulseWidth_Samples,
'secondPulseAmplitude': params.secondPulseAmplitude,
'interPhaseDwell_Samples': params.interPhaseDwell_Samples,
'pulsePeriod_Samples': params.pulsePeriod_Samples,
'startDelay_Samples': params.startDelay_Samples,
'numPulsesInTrain': params.numPulsesInTrain
}
]
return self.service.request(data)
def sendClearStimulationParams(self, slot):
'''
clear any existing commands in the given slot
'''
data = [
'tag',
'HRClear',
{ 'number': slot }
]
return self.service.request(data)
def sendGlobalStimulationSettings(self, settings):
def getScaleValue(scaleValue):
if scaleValue == CurrentScaling.max10nA:
return 'max10nA'
elif scaleValue == CurrentScaling.max20nA:
return 'max20nA'
elif scaleValue == CurrentScaling.max50nA:
return 'max50nA'
elif scaleValue == CurrentScaling.max100nA:
return 'max100nA'
elif scaleValue == CurrentScaling.max200nA:
return 'max200nA'
elif scaleValue == CurrentScaling.max500nA:
return 'max500nA'
elif scaleValue == CurrentScaling.max1uA:
return 'max1uA'
elif scaleValue == CurrentScaling.max2uA:
return 'max2uA'
elif scaleValue == CurrentScaling.max5uA:
return 'max5uA'
elif scaleValue == CurrentScaling.max10uA:
return 'max10uA'
else:
raise ValueError("unknown scaleValue enum")
data = [
'tag',
'HRSetGS',
{ 'scaleValue': getScaleValue(settings.scaleValue) }
]
return self.service.request(data)
def global_stimulation_command(self, resetSequencerCmd,
killStimulationCmd, clearDSPOffsetRemovalCmd,
enableStimulation):
data = [
'tag',
'HRSetGC',
{
'resetSequencerCmd': resetSequencerCmd,
'killStimulationCmd': killStimulationCmd,
'clearDSPOffsetRemovalCmd': clearDSPOffsetRemovalCmd,
'enableStimulation': enableStimulation,
}
]
return self.service.request(data)
def ecu_shortcut_message(self, fn):
data = [
'tag',
'HRSCTrig',
{ 'fn': fn }
]
return self.service.request(data)
class TrodesInfoRequester:
def __init__(self, *, server_address="tcp://127.0.0.1:49152"):
self.service = socket.ServiceConsumer(
'trodes.info', server_address=server_address)
def __request(self, item):
data = { 'request': item }
return self.service.request(data)
def request_time(self):
return self.__request('time')[2]['time']
def request_timerate(self):
return self.__request('timerate')[2]['timerate']
def request_config(self):
return self.__request('config')
class TrodesAnnotationRequester:
def __init__(self, *, server_address="tcp://127.0.0.1:49152"):
self.service = socket.ServiceConsumer(
'trodes.annotation', server_address=server_address)
def request_annotation(self, timestamp, sender, event):
data = {
'timestamp': timestamp,
'sender': sender,
'event': event
}
return self.service.request(data)
class TrodesAcquisitionRequester:
def __init__(self, *, server_address="tcp://127.0.0.1:49152"):
self.service = socket.ServiceConsumer(
'trodes.acquisition.service', server_address=server_address)
def __request(self, command, timestamp):
data = { 'command': command, 'timestamp': timestamp }
return self.service.request(data)
def request_play(self):
return self.__request('play', 0)
def request_pause(self):
return self.__request('pause', 0)
def request_stop(self):
return self.__request('stop', 0)
def request_seek(self, timestamp):
return self.__request('seek', timestamp)
class TrodesEventSubscriber:
def __init__(self, *, server_address="tcp://127.0.0.1:49152"):
self.subscriber = socket.SourceSubscriber(
'trodes.events', server_address=server_address)
def receive(self, *, noblock=False):
return self.subscriber.receive(noblock=noblock)
class TrodesAcquisitionSubscriber:
def __init__(self, *, server_address="tcp://127.0.0.1:49152"):
self.subscriber = socket.SourceSubscriber(
'trodes.acquisition', server_address=server_address)
def receive(self, *, noblock=False):
return self.subscriber.receive(noblock=noblock)
class TrodesSourceStatusSubscriber:
def __init__(self, *, server_address="tcp://127.0.0.1:49152"):
self.subscriber = socket.SourceSubscriber(
'trodes.source.pub', server_address=server_address)
def receive(self, *, noblock=False):
return self.subscriber.receive(noblock=noblock)
|
[
"enum.auto",
"trodesnetwork.socket.ServiceConsumer",
"trodesnetwork.socket.SourceSubscriber"
] |
[((407, 413), 'enum.auto', 'auto', ([], {}), '()\n', (411, 413), False, 'from enum import Enum, auto\n'), ((428, 434), 'enum.auto', 'auto', ([], {}), '()\n', (432, 434), False, 'from enum import Enum, auto\n'), ((449, 455), 'enum.auto', 'auto', ([], {}), '()\n', (453, 455), False, 'from enum import Enum, auto\n'), ((471, 477), 'enum.auto', 'auto', ([], {}), '()\n', (475, 477), False, 'from enum import Enum, auto\n'), ((493, 499), 'enum.auto', 'auto', ([], {}), '()\n', (497, 499), False, 'from enum import Enum, auto\n'), ((515, 521), 'enum.auto', 'auto', ([], {}), '()\n', (519, 521), False, 'from enum import Enum, auto\n'), ((535, 541), 'enum.auto', 'auto', ([], {}), '()\n', (539, 541), False, 'from enum import Enum, auto\n'), ((555, 561), 'enum.auto', 'auto', ([], {}), '()\n', (559, 561), False, 'from enum import Enum, auto\n'), ((575, 581), 'enum.auto', 'auto', ([], {}), '()\n', (579, 581), False, 'from enum import Enum, auto\n'), ((596, 602), 'enum.auto', 'auto', ([], {}), '()\n', (600, 602), False, 'from enum import Enum, auto\n'), ((1972, 2044), 'trodesnetwork.socket.ServiceConsumer', 'socket.ServiceConsumer', (['"""trodes.hardware"""'], {'server_address': 'server_address'}), "('trodes.hardware', server_address=server_address)\n", (1994, 2044), False, 'from trodesnetwork import socket\n'), ((6305, 6373), 'trodesnetwork.socket.ServiceConsumer', 'socket.ServiceConsumer', (['"""trodes.info"""'], {'server_address': 'server_address'}), "('trodes.info', server_address=server_address)\n", (6327, 6373), False, 'from trodesnetwork import socket\n'), ((6859, 6933), 'trodesnetwork.socket.ServiceConsumer', 'socket.ServiceConsumer', (['"""trodes.annotation"""'], {'server_address': 'server_address'}), "('trodes.annotation', server_address=server_address)\n", (6881, 6933), False, 'from trodesnetwork import socket\n'), ((7295, 7383), 'trodesnetwork.socket.ServiceConsumer', 'socket.ServiceConsumer', (['"""trodes.acquisition.service"""'], {'server_address': 'server_address'}), "('trodes.acquisition.service', server_address=\n server_address)\n", (7317, 7383), False, 'from trodesnetwork import socket\n'), ((7966, 8037), 'trodesnetwork.socket.SourceSubscriber', 'socket.SourceSubscriber', (['"""trodes.events"""'], {'server_address': 'server_address'}), "('trodes.events', server_address=server_address)\n", (7989, 8037), False, 'from trodesnetwork import socket\n'), ((8278, 8354), 'trodesnetwork.socket.SourceSubscriber', 'socket.SourceSubscriber', (['"""trodes.acquisition"""'], {'server_address': 'server_address'}), "('trodes.acquisition', server_address=server_address)\n", (8301, 8354), False, 'from trodesnetwork import socket\n'), ((8596, 8671), 'trodesnetwork.socket.SourceSubscriber', 'socket.SourceSubscriber', (['"""trodes.source.pub"""'], {'server_address': 'server_address'}), "('trodes.source.pub', server_address=server_address)\n", (8619, 8671), False, 'from trodesnetwork import socket\n')]
|
# This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import logging
import struct
from io import BytesIO
from typing import Any, Dict, Iterable, Optional
import requests
from .. import mapping, protobuf
from . import Transport, TransportException
LOG = logging.getLogger(__name__)
TREZORD_HOST = "http://127.0.0.1:21325"
TREZORD_ORIGIN_HEADER = {"Origin": "https://python.trezor.io"}
TREZORD_VERSION_MODERN = (2, 0, 25)
CONNECTION = requests.Session()
CONNECTION.headers.update(TREZORD_ORIGIN_HEADER)
def call_bridge(uri: str, data=None) -> requests.Response:
url = TREZORD_HOST + "/" + uri
r = CONNECTION.post(url, data=data)
if r.status_code != 200:
error_str = "trezord: {} failed with code {}: {}".format(
uri, r.status_code, r.json()["error"]
)
raise TransportException(error_str)
return r
def is_legacy_bridge() -> bool:
config = call_bridge("configure").json()
version_tuple = tuple(map(int, config["version"].split(".")))
return version_tuple < TREZORD_VERSION_MODERN
class BridgeHandle:
def __init__(self, transport: "BridgeTransport") -> None:
self.transport = transport
def read_buf(self) -> bytes:
raise NotImplementedError
def write_buf(self, buf: bytes) -> None:
raise NotImplementedError
class BridgeHandleModern(BridgeHandle):
def write_buf(self, buf: bytes) -> None:
self.transport._call("post", data=buf.hex())
def read_buf(self) -> bytes:
data = self.transport._call("read")
return bytes.fromhex(data.text)
class BridgeHandleLegacy(BridgeHandle):
def __init__(self, transport: "BridgeTransport") -> None:
super().__init__(transport)
self.request = None # type: Optional[str]
def write_buf(self, buf: bytes) -> None:
if self.request is not None:
raise TransportException("Can't write twice on legacy Bridge")
self.request = buf.hex()
def read_buf(self) -> bytes:
if self.request is None:
raise TransportException("Can't read without write on legacy Bridge")
try:
data = self.transport._call("call", data=self.request)
return bytes.fromhex(data.text)
finally:
self.request = None
class BridgeTransport(Transport):
"""
BridgeTransport implements transport through TREZOR Bridge (aka trezord).
"""
PATH_PREFIX = "bridge"
ENABLED = True
def __init__(
self, device: Dict[str, Any], legacy: bool, debug: bool = False
) -> None:
if legacy and debug:
raise TransportException("Debugging not supported on legacy Bridge")
self.device = device
self.session = None # type: Optional[str]
self.debug = debug
self.legacy = legacy
if legacy:
self.handle = BridgeHandleLegacy(self) # type: BridgeHandle
else:
self.handle = BridgeHandleModern(self)
def get_path(self) -> str:
return "%s:%s" % (self.PATH_PREFIX, self.device["path"])
def find_debug(self) -> "BridgeTransport":
if not self.device.get("debug"):
raise TransportException("Debug device not available")
return BridgeTransport(self.device, self.legacy, debug=True)
def _call(self, action: str, data: str = None) -> requests.Response:
session = self.session or "null"
uri = action + "/" + str(session)
if self.debug:
uri = "debug/" + uri
return call_bridge(uri, data=data)
@classmethod
def enumerate(cls) -> Iterable["BridgeTransport"]:
try:
legacy = is_legacy_bridge()
return [
BridgeTransport(dev, legacy) for dev in call_bridge("enumerate").json()
]
except Exception:
return []
def begin_session(self) -> None:
data = self._call("acquire/" + self.device["path"])
self.session = data.json()["session"]
def end_session(self) -> None:
if not self.session:
return
self._call("release")
self.session = None
def write(self, msg: protobuf.MessageType) -> None:
LOG.debug(
"sending message: {}".format(msg.__class__.__name__),
extra={"protobuf": msg},
)
buffer = BytesIO()
protobuf.dump_message(buffer, msg)
ser = buffer.getvalue()
header = struct.pack(">HL", mapping.get_type(msg), len(ser))
self.handle.write_buf(header + ser)
def read(self) -> protobuf.MessageType:
data = self.handle.read_buf()
headerlen = struct.calcsize(">HL")
msg_type, datalen = struct.unpack(">HL", data[:headerlen])
buffer = BytesIO(data[headerlen : headerlen + datalen])
msg = protobuf.load_message(buffer, mapping.get_class(msg_type))
LOG.debug(
"received message: {}".format(msg.__class__.__name__),
extra={"protobuf": msg},
)
return msg
|
[
"io.BytesIO",
"requests.Session",
"struct.unpack",
"struct.calcsize",
"logging.getLogger"
] |
[((884, 911), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (901, 911), False, 'import logging\n'), ((1067, 1085), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1083, 1085), False, 'import requests\n'), ((4962, 4971), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (4969, 4971), False, 'from io import BytesIO\n'), ((5264, 5286), 'struct.calcsize', 'struct.calcsize', (['""">HL"""'], {}), "('>HL')\n", (5279, 5286), False, 'import struct\n'), ((5315, 5353), 'struct.unpack', 'struct.unpack', (['""">HL"""', 'data[:headerlen]'], {}), "('>HL', data[:headerlen])\n", (5328, 5353), False, 'import struct\n'), ((5371, 5415), 'io.BytesIO', 'BytesIO', (['data[headerlen:headerlen + datalen]'], {}), '(data[headerlen:headerlen + datalen])\n', (5378, 5415), False, 'from io import BytesIO\n')]
|
from django import forms
from .models import Project,Rate,Profile,DESIGN_CHOICES,USABILITY_CHOICES,CONTENT_CHOICES
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class GreazeRegistrationForm(UserCreationForm):
class Meta:
model = User
fields = ['first_name', 'last_name', 'email', 'username','<PASSWORD>','<PASSWORD>' ]
widgets = {
'first_name':forms.TextInput(attrs = {'class':'form-control names', 'placeholder':"First Name", 'label': 'First Name'}),
'last_name':forms.TextInput(attrs = {'class':'form-control names', 'placeholder':"Second Name", 'label': 'Second Name'}),
'email':forms.TextInput(attrs = {'class':'form-control names', 'placeholder':"Email Address", 'label': 'Email Address'}),
'username':forms.TextInput(attrs = {'class':'form-control names', 'placeholder':"Username", 'label': 'Username'}),
'password1':forms.TextInput(attrs = {'class':'form-control ','type':'password', 'placeholder':"Password", 'label': 'Password'}),
'password2':forms.TextInput(attrs = {'class':'form-control', 'placeholder':"Confirm Password", 'label': 'Confirm Password'}),
}
class PostProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['title','image','description','link']
widgets = {
'title':forms.TextInput(attrs={'class':'form-control','placeholder':'Project Title...'}),
'image':forms.TextInput(attrs= {'class':'form-control ','placeholder':'In a word...','label':'Put a name'}),
'description':forms.Textarea(attrs = {'class':'form-control','placeholder':"Write here..",'label':"Caption"}),
'link':forms.URLInput(attrs={'class':'form-control'}),
}
class RateForm(forms.ModelForm):
design = forms.ChoiceField(choices=DESIGN_CHOICES,widget=forms.Select(),required=True)
usability = forms.ChoiceField(choices=USABILITY_CHOICES,widget=forms.Select(),required=True)
content = forms.ChoiceField(choices=CONTENT_CHOICES,widget=forms.Select(),required=True)
class Meta:
model = Rate
fields = ['design','usability','content']
# widgets = {
# 'design': forms.SelectMultiple(attrs={'class':'form-control','name':'design'}),
# 'usability': forms.SelectMultiple(attrs={'class':'form-control','placeholder':'Input value','name':'usability'}),
# 'content': forms.SelectMultiple(attrs={'class':'form-control','name':'content'}),
# }
class EditProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['profile_photo','bio','gender','contact']
widgets = {
'profile_photo':forms.FileInput(attrs={'class':'form-control'}),
'bio':forms.Textarea(attrs={'class':'form-control ','placeholder':'Write here...','label':'Put a name'}),
}
class UpdateProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['title','image','description','link']
widgets = {
'title':forms.TextInput(attrs={'class':'form-control','placeholder':'Project Title...'}),
'image':forms.TextInput(attrs= {'class':'form-control ','placeholder':'In a word...','label':'Put a name'}),
'description':forms.Textarea(attrs = {'class':'form-control','placeholder':"Caption",'label':"Caption"}),
'link':forms.URLInput(attrs={'class':'form-control'}),
}
|
[
"django.forms.Select",
"django.forms.URLInput",
"django.forms.TextInput",
"django.forms.FileInput",
"django.forms.Textarea"
] |
[((1421, 1508), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Project Title...'}"}), "(attrs={'class': 'form-control', 'placeholder':\n 'Project Title...'})\n", (1436, 1508), False, 'from django import forms\n'), ((1523, 1630), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control ', 'placeholder': 'In a word...', 'label': 'Put a name'\n }"}), "(attrs={'class': 'form-control ', 'placeholder':\n 'In a word...', 'label': 'Put a name'})\n", (1538, 1630), False, 'from django import forms\n'), ((1650, 1752), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Write here..', 'label': 'Caption'}"}), "(attrs={'class': 'form-control', 'placeholder':\n 'Write here..', 'label': 'Caption'})\n", (1664, 1752), False, 'from django import forms\n'), ((1766, 1813), 'django.forms.URLInput', 'forms.URLInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (1780, 1813), False, 'from django import forms\n'), ((3143, 3230), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Project Title...'}"}), "(attrs={'class': 'form-control', 'placeholder':\n 'Project Title...'})\n", (3158, 3230), False, 'from django import forms\n'), ((3245, 3352), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control ', 'placeholder': 'In a word...', 'label': 'Put a name'\n }"}), "(attrs={'class': 'form-control ', 'placeholder':\n 'In a word...', 'label': 'Put a name'})\n", (3260, 3352), False, 'from django import forms\n'), ((3372, 3469), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Caption', 'label': 'Caption'}"}), "(attrs={'class': 'form-control', 'placeholder': 'Caption',\n 'label': 'Caption'})\n", (3386, 3469), False, 'from django import forms\n'), ((3483, 3530), 'django.forms.URLInput', 'forms.URLInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (3497, 3530), False, 'from django import forms\n'), ((439, 549), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control names', 'placeholder': 'First Name', 'label':\n 'First Name'}"}), "(attrs={'class': 'form-control names', 'placeholder':\n 'First Name', 'label': 'First Name'})\n", (454, 549), False, 'from django import forms\n'), ((571, 683), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control names', 'placeholder': 'Second Name', 'label':\n 'Second Name'}"}), "(attrs={'class': 'form-control names', 'placeholder':\n 'Second Name', 'label': 'Second Name'})\n", (586, 683), False, 'from django import forms\n'), ((701, 817), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control names', 'placeholder': 'Email Address', 'label':\n 'Email Address'}"}), "(attrs={'class': 'form-control names', 'placeholder':\n 'Email Address', 'label': 'Email Address'})\n", (716, 817), False, 'from django import forms\n'), ((838, 944), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control names', 'placeholder': 'Username', 'label': 'Username'}"}), "(attrs={'class': 'form-control names', 'placeholder':\n 'Username', 'label': 'Username'})\n", (853, 944), False, 'from django import forms\n'), ((966, 1087), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control ', 'type': 'password', 'placeholder': 'Password',\n 'label': 'Password'}"}), "(attrs={'class': 'form-control ', 'type': 'password',\n 'placeholder': 'Password', 'label': 'Password'})\n", (981, 1087), False, 'from django import forms\n'), ((1107, 1223), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'form-control', 'placeholder': 'Confirm Password', 'label':\n 'Confirm Password'}"}), "(attrs={'class': 'form-control', 'placeholder':\n 'Confirm Password', 'label': 'Confirm Password'})\n", (1122, 1223), False, 'from django import forms\n'), ((1920, 1934), 'django.forms.Select', 'forms.Select', ([], {}), '()\n', (1932, 1934), False, 'from django import forms\n'), ((2017, 2031), 'django.forms.Select', 'forms.Select', ([], {}), '()\n', (2029, 2031), False, 'from django import forms\n'), ((2110, 2124), 'django.forms.Select', 'forms.Select', ([], {}), '()\n', (2122, 2124), False, 'from django import forms\n'), ((2770, 2818), 'django.forms.FileInput', 'forms.FileInput', ([], {'attrs': "{'class': 'form-control'}"}), "(attrs={'class': 'form-control'})\n", (2785, 2818), False, 'from django import forms\n'), ((2837, 2944), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'class': 'form-control ', 'placeholder': 'Write here...', 'label':\n 'Put a name'}"}), "(attrs={'class': 'form-control ', 'placeholder':\n 'Write here...', 'label': 'Put a name'})\n", (2851, 2944), False, 'from django import forms\n')]
|
# encoding: utf-8
__author__ = "<NAME>"
# Taken and adapted from: https://github.com/wdika/NeMo/blob/main/tests/core/test_optimizers_schedulers.py
import math
import os
import random
import shutil
from abc import ABC
import numpy as np
import omegaconf
import pytest
import pytorch_lightning as pl
import torch
import torch.optim
from mridc.core import optim
from mridc.core.conf import optimizers
from mridc.core.conf.optimizers import NovogradParams, SGDParams
from mridc.core.conf.schedulers import CosineAnnealingParams
from mridc.core.optim.lr_scheduler import AVAILABLE_SCHEDULERS, SquareRootAnnealing
from mridc.core.optim.novograd import Novograd
from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer
from mridc.utils import logging
class TempModel(torch.nn.Module):
"""Create a dummy model for testing."""
def __init__(self):
super(TempModel, self).__init__()
self.layer = torch.nn.Linear(5, 1)
def forward(self, x):
"""Forward pass."""
x = self.layer(x)
return x
class OptCounter(torch.optim.SGD):
"""A simple optimizer that counts the number of calls to step()."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for group in self.param_groups:
group.setdefault("count", 0)
def step(self, closure=None):
"""Performs a single optimization step."""
for group in self.param_groups:
group["count"] += 1
super().step(closure)
class RandomDataset(torch.utils.data.Dataset):
"""A dataset that returns random tensors."""
def __init__(self, dataset_len):
super().__init__()
self.__dataset_len = dataset_len
def __getitem__(self, *args):
return torch.randn(2)
def __len__(self):
return self.__dataset_len
class ExampleModel(pl.LightningModule, ABC):
"""A dummy model for testing."""
def __init__(self, batch_size, dataset_len, drop_last, max_steps):
super().__init__()
self.l1 = torch.nn.modules.Linear(in_features=2, out_features=1)
self.batch_size = batch_size
self.dataset_len = dataset_len
self.drop_last = drop_last
self.max_steps = max_steps
self.my_opt = None
def train_dataloader(self):
"""Return a training data loader."""
dataset = RandomDataset(self.dataset_len)
return torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, drop_last=self.drop_last)
def training_step(self, batch, batch_idx):
"""Set training step."""
output = self.l1(batch)
output = torch.nn.functional.l1_loss(output, torch.ones(output.size()).to(output.device))
return {"loss": output}
def configure_optimizers(self):
"""Configure optimizers for the model."""
self.my_opt = OptCounter(self.parameters(), lr=0.02)
return self.my_opt
class Callback(pl.callbacks.Callback):
"""A dummy callback for testing."""
@pl.utilities.distributed.rank_zero_only
def on_train_end(self, trainer, module):
"""On train end, check that the number of steps is correct"""
count = module.my_opt.param_groups[0]["count"]
if trainer.global_step != count or trainer.global_step != module.max_steps:
logging.debug(f"max_epochs: {trainer.max_epochs}")
logging.debug(f"accumulate_grad_batches: {trainer.accumulate_grad_batches}")
logging.debug(f"limit_train_batches: {trainer.limit_train_batches}")
logging.debug(f"num_processes: {trainer.num_processes}")
logging.debug(f"batch_size: {module.batch_size}")
logging.debug(f"dataset_len: {module.dataset_len}")
logging.debug(f"drop_last: {module.drop_last}")
logging.debug(f"{len(trainer.train_dataloader)}")
logging.debug(f"{trainer.num_training_batches}")
self.assert_counts(trainer, module, count)
@staticmethod
def assert_counts(trainer, module, count):
"""Assert that the number of steps is correct"""
if trainer.global_step != count:
raise AssertionError(f"{trainer.global_step} != {count} != {module.max_steps}")
if trainer.global_step != module.max_steps:
raise AssertionError(f"{trainer.global_step} != {count} != {module.max_steps}")
class SchedulerNoOpCallback(Callback):
"""A dummy callback for testing."""
@staticmethod
def on_train_batch_end(trainer: pl.Trainer, pl_module, outputs, batch, batch_idx):
"""On each training batch end"""
# pl_module.max_steps is "original" max steps without trainer extra steps.
if (trainer.global_step + 1) % 3 == 0 and (trainer.global_step + 1) < pl_module.max_steps:
schedulers = trainer.lr_schedulers
for scheduler in schedulers:
# Decrement the counter by 2, then perform a scheduler.step() to perform a no-up
# as well as update the optimizer lr in all param groups
scheduler["scheduler"].last_epoch -= 2
scheduler["scheduler"].step()
# Increase the max step count by 1
trainer.fit_loop.max_steps = trainer.fit_loop.max_steps + 1
def assert_counts(self, trainer, module, count):
"""This is a no-op callback, so the counts should not change"""
num_skips = torch.div(module.max_steps, 3, rounding_mode="trunc")
extra_steps = module.max_steps + num_skips
if trainer.global_step != count:
raise AssertionError(f"{trainer.global_step} != {count} != {extra_steps}")
if trainer.global_step != extra_steps:
raise AssertionError(f"{trainer.global_step} != {count} != {extra_steps}")
class TestOptimizersSchedulers:
"""Test the optimizers and schedulers."""
INITIAL_LR = 0.1
MIN_LR = 1e-3
MAX_STEPS = 10
# fused_adam is looking for CUDA and this test is being run on CPU only tests
@pytest.mark.unit
def test_get_optimizer(self):
"""Test that the optimizer is correctly created"""
model = TempModel()
for opt_name in AVAILABLE_OPTIMIZERS:
if opt_name == "fused_adam" and not torch.cuda.is_available():
continue
opt_cls = get_optimizer(opt_name)
if opt_name == "adafactor":
# Adafactor's default mode uses relative_step without any lr.
opt = opt_cls(model.parameters())
else:
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
if not isinstance(opt, AVAILABLE_OPTIMIZERS[opt_name]):
raise AssertionError
@pytest.mark.unit
def test_register_optimizer(self):
"""Test that we can register a new optimizer"""
class TempOpt(torch.optim.SGD):
"""A dummy optimizer"""
class TempOptParams(optimizers.SGDParams):
"""A dummy optimizer params"""
register_optimizer("TempOpt", TempOpt, TempOptParams)
model = TempModel()
opt_cls = get_optimizer("TempOpt")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
if not isinstance(opt, TempOpt):
raise AssertionError
@pytest.mark.unit
def test_optim_config_parse_bypass(self):
"""Test that the optimizer config is parsed correctly when the optimizer is not registered."""
basic_optim_config = {"weight_decay": 0.001, "betas": [0.8, 0.5]}
parsed_params = parse_optimizer_args("novograd", basic_optim_config)
if parsed_params["weight_decay"] != basic_optim_config["weight_decay"]:
raise AssertionError
if parsed_params["betas"][0] != basic_optim_config["betas"][0]:
raise AssertionError
if parsed_params["betas"][1] != basic_optim_config["betas"][1]:
raise AssertionError
dict_config = omegaconf.OmegaConf.create(basic_optim_config)
parsed_params = parse_optimizer_args("novograd", dict_config)
if parsed_params["weight_decay"] != dict_config["weight_decay"]:
raise AssertionError
if parsed_params["betas"][0] != dict_config["betas"][0]:
raise AssertionError
if parsed_params["betas"][1] != dict_config["betas"][1]:
raise AssertionError
@pytest.mark.unit
def test_optim_config_parse_arg_by_target(self):
"""Test that the optimizer config is parsed correctly by target."""
basic_optim_config = {
"_target_": "mridc.core.conf.optimizers.NovogradParams",
"params": {"weight_decay": 0.001, "betas": [0.8, 0.5]},
}
basic_optim_config = omegaconf.OmegaConf.create(basic_optim_config)
parsed_params = parse_optimizer_args("novograd", basic_optim_config)
if parsed_params["weight_decay"] != basic_optim_config["params"]["weight_decay"]:
raise AssertionError
if parsed_params["betas"][0] != basic_optim_config["params"]["betas"][0]:
raise AssertionError
if parsed_params["betas"][1] != basic_optim_config["params"]["betas"][1]:
raise AssertionError
dict_config = omegaconf.OmegaConf.create(basic_optim_config)
parsed_params = parse_optimizer_args("novograd", dict_config)
if parsed_params["weight_decay"] != dict_config["params"]["weight_decay"]:
raise AssertionError
if parsed_params["betas"][0] != dict_config["params"]["betas"][0]:
raise AssertionError
if parsed_params["betas"][1] != dict_config["params"]["betas"][1]:
raise AssertionError
# Names are ignored when passing class path
# This will be captured during optimizer instantiation
output_config = parse_optimizer_args("sgd", dict_config)
sgd_config = vars(SGDParams())
novograd_config = vars(NovogradParams())
if set(output_config.keys()) == set(sgd_config.keys()):
raise AssertionError
if set(output_config.keys()) != set(novograd_config):
raise AssertionError
@pytest.mark.unit
def test_get_scheduler(self):
"""Test that get_scheduler returns the correct scheduler class."""
model = TempModel()
optimizer = Novograd(model.parameters(), lr=self.INITIAL_LR)
for sched_name in AVAILABLE_SCHEDULERS:
sched_cls = optim.lr_scheduler.get_scheduler(sched_name)
try:
sched = sched_cls(optimizer)
if not isinstance(sched, AVAILABLE_SCHEDULERS[sched_name]):
raise AssertionError
continue
except Exception:
pass
try:
sched = sched_cls(optimizer, max_steps=self.MAX_STEPS)
if not isinstance(sched, AVAILABLE_SCHEDULERS[sched_name]):
raise AssertionError
continue
except Exception:
pass
@pytest.mark.unit
def test_register_scheduler(self):
"""Test registering a new scheduler"""
class TempSched(optim.lr_scheduler.CosineAnnealing):
"""Temporary scheduler class."""
class TempSchedParams(CosineAnnealingParams):
"""Temporary scheduler class."""
optim.lr_scheduler.register_scheduler("TempSched", TempSched, TempSchedParams)
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
sched_cls = optim.lr_scheduler.get_scheduler("TempSched")
sched = sched_cls(opt, max_steps=self.MAX_STEPS)
if not isinstance(sched, TempSched):
raise AssertionError
@pytest.mark.unit
def test_sched_config_parse_simple(self):
"""Test that scheduler config is parsed correctly"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
basic_sched_config = {"name": "CosineAnnealing", "max_steps": 10}
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, basic_sched_config)
if not isinstance(scheduler_setup["scheduler"], optim.lr_scheduler.CosineAnnealing):
raise AssertionError
dict_config = omegaconf.OmegaConf.create(basic_sched_config)
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, dict_config)
if not isinstance(scheduler_setup["scheduler"], optim.lr_scheduler.CosineAnnealing):
raise AssertionError
@pytest.mark.unit
def test_sched_config_parse_from_cls(self):
"""Test that we can parse a scheduler from a class"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
basic_sched_config = {
"_target_": "mridc.core.conf.schedulers.CosineAnnealingParams",
"params": {"min_lr": 0.1},
"max_steps": self.MAX_STEPS,
}
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, basic_sched_config)
if not isinstance(scheduler_setup["scheduler"], optim.lr_scheduler.CosineAnnealing):
raise AssertionError
dict_config = omegaconf.OmegaConf.create(basic_sched_config)
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, dict_config)
if not isinstance(scheduler_setup["scheduler"], optim.lr_scheduler.CosineAnnealing):
raise AssertionError
@pytest.mark.unit
def test_WarmupPolicy(self):
"""Test WarmupPolicy"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.WarmupPolicy(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] != self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.WarmupPolicy(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 4:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] != self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_WarmupHoldPolicy(self):
"""Test WarmupHoldPolicy"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.WarmupHoldPolicy(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] != self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr <= self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.WarmupHoldPolicy(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 4:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] != self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr <= self.MIN_LR:
raise AssertionError
# Warmup + Hold steps available
policy = optim.lr_scheduler.WarmupHoldPolicy(
opt, warmup_steps=5, hold_steps=3, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 4:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] != self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr < self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_WarmupAnnealing(self):
"""Test that the warmup annealing policy works as expected."""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.WarmupAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr < self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.WarmupAnnealing(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 5:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] >= self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup + Hold steps available
policy = optim.lr_scheduler.WarmupHoldPolicy(
opt, warmup_steps=5, hold_steps=3, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 4:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] != self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr < self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_SquareAnnealing(self):
"""Test SquareAnnealing"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.SquareAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.SquareAnnealing(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 5:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] >= self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_SquareRootAnnealing(self):
"""Test SquareRootAnnealing"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = SquareRootAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.SquareRootAnnealing(
opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 5:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] >= self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_CosineAnnealing(self):
"""Test CosineAnnealing"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.CosineAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.CosineAnnealing(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 5:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] >= self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup + Constant steps available
policy = optim.lr_scheduler.CosineAnnealing(
opt, warmup_steps=3, constant_steps=2, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 3:
if policy.get_last_lr()[0] > self.INITIAL_LR + 1e-5:
raise AssertionError
elif 3 < i <= 8:
if policy.get_last_lr()[0] != policy._get_lr(i)[0]:
raise AssertionError
elif policy.get_last_lr()[0] != self.MIN_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_PolynomialDecayAnnealing(self):
"""Test PolynomialDecayAnnealing"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.PolynomialDecayAnnealing(
opt, power=2, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.PolynomialDecayAnnealing(
opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 5:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] >= self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_PolynomialHoldDecayAnnealing(self):
"""Test PolynomialHoldDecayAnnealing"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.PolynomialHoldDecayAnnealing(
opt, power=2, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr <= self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.PolynomialHoldDecayAnnealing(
opt, power=2, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr < self.MIN_LR:
raise AssertionError
# Warmup + Hold steps available
policy = optim.lr_scheduler.PolynomialHoldDecayAnnealing(
opt, warmup_steps=5, hold_steps=3, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR, power=2
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 4:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif i <= 8:
if policy.get_last_lr()[0] < self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr < self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_InverseSquareRootAnnealing(self):
"""Test InverseSquareRootAnnealing"""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.InverseSquareRootAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
for _ in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
# Warmup steps available
policy = optim.lr_scheduler.InverseSquareRootAnnealing(
opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
if initial_lr >= self.INITIAL_LR:
raise AssertionError
for i in range(self.MAX_STEPS):
if i <= 5:
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
elif policy.get_last_lr()[0] >= self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
if final_lr != self.MIN_LR:
raise AssertionError
@pytest.mark.unit
def test_CosineAnnealing_with_noop_steps(self):
"""Test CosineAnnealing with noop steps."""
model = TempModel()
opt_cls = get_optimizer("novograd")
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.CosineAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
if initial_lr != self.INITIAL_LR:
raise AssertionError
update_steps = 0
for i in range(self.MAX_STEPS):
if policy.get_last_lr()[0] > self.INITIAL_LR:
raise AssertionError
opt.step()
policy.step()
# Perform a No-Op for scheduler every 2 steps
if i % 2 == 0:
policy.last_epoch -= 1
else:
update_steps += 1
policy.step()
update_steps += 1
if update_steps >= self.MAX_STEPS:
raise AssertionError
final_lr = policy.get_last_lr()[0]
if final_lr <= self.MIN_LR:
raise AssertionError
# update step = true number of updates performed after some number of skipped steps
true_end_lr = policy._get_lr(step=update_steps)[0]
if final_lr != true_end_lr:
raise AssertionError
@pytest.mark.unit
@pytest.mark.run_only_on("CPU")
def test_max_step_computation(self):
"""Test that the max_step computation is correct."""
def train(
max_epochs, accumulate_grad_batches, limit_train_batches, num_processes, batch_size, dataset_len, drop_last
):
"""Set up the training environment"""
trainer = pl.Trainer(
max_epochs=max_epochs,
strategy="ddp_spawn",
accelerator="cpu",
num_processes=num_processes,
accumulate_grad_batches=accumulate_grad_batches,
limit_train_batches=limit_train_batches,
enable_checkpointing=False,
progress_bar_refresh_rate=0,
weights_summary=None,
)
max_steps = optim.lr_scheduler.compute_max_steps(
max_epochs,
accumulate_grad_batches,
limit_train_batches,
num_processes,
dataset_len,
batch_size,
drop_last,
)
model = ExampleModel(batch_size, dataset_len, drop_last, max_steps)
trainer.callbacks.append(Callback())
trainer.fit(model)
# This test will break once we and lightning upgrade to pytorch 1.7.0 due to a bug fix in pytorch 1.7.0
train(
31,
accumulate_grad_batches=1,
limit_train_batches=1.0,
num_processes=9,
batch_size=60,
dataset_len=1613,
drop_last=True,
)
train(
5,
accumulate_grad_batches=1,
limit_train_batches=0.17382691901706027,
num_processes=4,
batch_size=97,
dataset_len=498,
drop_last=False,
)
train(
5,
accumulate_grad_batches=8,
limit_train_batches=0.1663306588594945,
num_processes=4,
batch_size=54,
dataset_len=629,
drop_last=True,
)
train(
5,
accumulate_grad_batches=1,
limit_train_batches=0.2121376533631948,
num_processes=1,
batch_size=68,
dataset_len=488,
drop_last=False,
)
for _ in range(5):
drop_last = bool(random.randint(0, 1))
accumulate_grad_batches = random.randint(1, 10)
limit_train_batches_int = random.randint(1, 10)
limit_train_batches_float = random.uniform(0, 1)
limit_train_batches = random.choice([limit_train_batches_int, limit_train_batches_float])
max_epochs = random.randint(4, 20)
num_processes = random.randint(1, 5)
dataset_len = random.randint(20, num_processes * 500)
batch_size = random.randint(
math.ceil(5.0 / num_processes), min(np.floor_divide(dataset_len, num_processes), 128)
)
train(
max_epochs,
accumulate_grad_batches,
limit_train_batches,
num_processes,
batch_size,
dataset_len,
drop_last,
)
@pytest.mark.unit
@pytest.mark.run_only_on("CPU")
def test_max_step_computation_with_sched_no_ops(self):
"""Test that max_step is computed correctly when scheduler has no_ops"""
def train(
max_steps, accumulate_grad_batches, limit_train_batches, num_processes, batch_size, dataset_len, drop_last
):
"""Set up trainer and model"""
trainer = pl.Trainer(
max_steps=max_steps,
strategy="ddp_spawn",
accelerator="cpu",
num_processes=num_processes,
accumulate_grad_batches=accumulate_grad_batches,
limit_train_batches=limit_train_batches,
enable_checkpointing=False,
progress_bar_refresh_rate=0,
weights_summary=None,
)
model = ExampleModel(batch_size, dataset_len, drop_last, max_steps)
trainer.callbacks.append(SchedulerNoOpCallback())
trainer.fit(model)
# This test will break once we and lightning upgrade to pytorch 1.7.0 due to a bug fix in pytorch 1.7.0
train(
max_steps=20,
accumulate_grad_batches=1,
limit_train_batches=1.0,
num_processes=4,
batch_size=60,
dataset_len=2000,
drop_last=True,
)
@staticmethod
def test_remove_logs_left():
"""Remove logs left by the trainer."""
if os.path.exists(os.path.join(os.getcwd(), "lightning_logs")):
shutil.rmtree(os.path.join(os.getcwd(), "lightning_logs"))
|
[
"pytorch_lightning.Trainer",
"mridc.core.optim.lr_scheduler.register_scheduler",
"torch.randn",
"mridc.core.optim.optimizers.register_optimizer",
"mridc.core.optim.lr_scheduler.WarmupHoldPolicy",
"pytest.mark.run_only_on",
"mridc.core.optim.lr_scheduler.WarmupPolicy",
"mridc.core.optim.lr_scheduler.SquareAnnealing",
"mridc.core.optim.lr_scheduler.get_scheduler",
"mridc.core.optim.optimizers.get_optimizer",
"random.randint",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"mridc.core.optim.lr_scheduler.PolynomialDecayAnnealing",
"mridc.core.optim.lr_scheduler.compute_max_steps",
"torch.nn.modules.Linear",
"math.ceil",
"numpy.floor_divide",
"mridc.core.optim.lr_scheduler.SquareRootAnnealing",
"mridc.core.optim.lr_scheduler.CosineAnnealing",
"mridc.core.conf.optimizers.SGDParams",
"mridc.core.conf.optimizers.NovogradParams",
"torch.cuda.is_available",
"mridc.utils.logging.debug",
"mridc.core.optim.lr_scheduler.InverseSquareRootAnnealing",
"random.uniform",
"os.getcwd",
"mridc.core.optim.lr_scheduler.PolynomialHoldDecayAnnealing",
"random.choice",
"mridc.core.optim.optimizers.parse_optimizer_args",
"omegaconf.OmegaConf.create",
"mridc.core.optim.lr_scheduler.WarmupAnnealing",
"mridc.core.optim.lr_scheduler.prepare_lr_scheduler",
"torch.div"
] |
[((32233, 32263), 'pytest.mark.run_only_on', 'pytest.mark.run_only_on', (['"""CPU"""'], {}), "('CPU')\n", (32256, 32263), False, 'import pytest\n'), ((35520, 35550), 'pytest.mark.run_only_on', 'pytest.mark.run_only_on', (['"""CPU"""'], {}), "('CPU')\n", (35543, 35550), False, 'import pytest\n'), ((977, 998), 'torch.nn.Linear', 'torch.nn.Linear', (['(5)', '(1)'], {}), '(5, 1)\n', (992, 998), False, 'import torch\n'), ((1813, 1827), 'torch.randn', 'torch.randn', (['(2)'], {}), '(2)\n', (1824, 1827), False, 'import torch\n'), ((2087, 2141), 'torch.nn.modules.Linear', 'torch.nn.modules.Linear', ([], {'in_features': '(2)', 'out_features': '(1)'}), '(in_features=2, out_features=1)\n', (2110, 2141), False, 'import torch\n'), ((2459, 2554), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'drop_last': 'self.drop_last'}), '(dataset, batch_size=self.batch_size, drop_last=\n self.drop_last)\n', (2486, 2554), False, 'import torch\n'), ((5448, 5501), 'torch.div', 'torch.div', (['module.max_steps', '(3)'], {'rounding_mode': '"""trunc"""'}), "(module.max_steps, 3, rounding_mode='trunc')\n", (5457, 5501), False, 'import torch\n'), ((7034, 7087), 'mridc.core.optim.optimizers.register_optimizer', 'register_optimizer', (['"""TempOpt"""', 'TempOpt', 'TempOptParams'], {}), "('TempOpt', TempOpt, TempOptParams)\n", (7052, 7087), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((7135, 7159), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""TempOpt"""'], {}), "('TempOpt')\n", (7148, 7159), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((7567, 7619), 'mridc.core.optim.optimizers.parse_optimizer_args', 'parse_optimizer_args', (['"""novograd"""', 'basic_optim_config'], {}), "('novograd', basic_optim_config)\n", (7587, 7619), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((7966, 8012), 'omegaconf.OmegaConf.create', 'omegaconf.OmegaConf.create', (['basic_optim_config'], {}), '(basic_optim_config)\n', (7992, 8012), False, 'import omegaconf\n'), ((8037, 8082), 'mridc.core.optim.optimizers.parse_optimizer_args', 'parse_optimizer_args', (['"""novograd"""', 'dict_config'], {}), "('novograd', dict_config)\n", (8057, 8082), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((8744, 8790), 'omegaconf.OmegaConf.create', 'omegaconf.OmegaConf.create', (['basic_optim_config'], {}), '(basic_optim_config)\n', (8770, 8790), False, 'import omegaconf\n'), ((8815, 8867), 'mridc.core.optim.optimizers.parse_optimizer_args', 'parse_optimizer_args', (['"""novograd"""', 'basic_optim_config'], {}), "('novograd', basic_optim_config)\n", (8835, 8867), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((9244, 9290), 'omegaconf.OmegaConf.create', 'omegaconf.OmegaConf.create', (['basic_optim_config'], {}), '(basic_optim_config)\n', (9270, 9290), False, 'import omegaconf\n'), ((9315, 9360), 'mridc.core.optim.optimizers.parse_optimizer_args', 'parse_optimizer_args', (['"""novograd"""', 'dict_config'], {}), "('novograd', dict_config)\n", (9335, 9360), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((9833, 9873), 'mridc.core.optim.optimizers.parse_optimizer_args', 'parse_optimizer_args', (['"""sgd"""', 'dict_config'], {}), "('sgd', dict_config)\n", (9853, 9873), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((11365, 11443), 'mridc.core.optim.lr_scheduler.register_scheduler', 'optim.lr_scheduler.register_scheduler', (['"""TempSched"""', 'TempSched', 'TempSchedParams'], {}), "('TempSched', TempSched, TempSchedParams)\n", (11402, 11443), False, 'from mridc.core import optim\n'), ((11491, 11516), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (11504, 11516), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((11599, 11644), 'mridc.core.optim.lr_scheduler.get_scheduler', 'optim.lr_scheduler.get_scheduler', (['"""TempSched"""'], {}), "('TempSched')\n", (11631, 11644), False, 'from mridc.core import optim\n'), ((11957, 11982), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (11970, 11982), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((12146, 12210), 'mridc.core.optim.lr_scheduler.prepare_lr_scheduler', 'optim.lr_scheduler.prepare_lr_scheduler', (['opt', 'basic_sched_config'], {}), '(opt, basic_sched_config)\n', (12185, 12210), False, 'from mridc.core import optim\n'), ((12360, 12406), 'omegaconf.OmegaConf.create', 'omegaconf.OmegaConf.create', (['basic_sched_config'], {}), '(basic_sched_config)\n', (12386, 12406), False, 'import omegaconf\n'), ((12433, 12490), 'mridc.core.optim.lr_scheduler.prepare_lr_scheduler', 'optim.lr_scheduler.prepare_lr_scheduler', (['opt', 'dict_config'], {}), '(opt, dict_config)\n', (12472, 12490), False, 'from mridc.core import optim\n'), ((12796, 12821), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (12809, 12821), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((13108, 13172), 'mridc.core.optim.lr_scheduler.prepare_lr_scheduler', 'optim.lr_scheduler.prepare_lr_scheduler', (['opt', 'basic_sched_config'], {}), '(opt, basic_sched_config)\n', (13147, 13172), False, 'from mridc.core import optim\n'), ((13322, 13368), 'omegaconf.OmegaConf.create', 'omegaconf.OmegaConf.create', (['basic_sched_config'], {}), '(basic_sched_config)\n', (13348, 13368), False, 'import omegaconf\n'), ((13395, 13452), 'mridc.core.optim.lr_scheduler.prepare_lr_scheduler', 'optim.lr_scheduler.prepare_lr_scheduler', (['opt', 'dict_config'], {}), '(opt, dict_config)\n', (13434, 13452), False, 'from mridc.core import optim\n'), ((13713, 13738), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (13726, 13738), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((13844, 13931), 'mridc.core.optim.lr_scheduler.WarmupPolicy', 'optim.lr_scheduler.WarmupPolicy', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=self.\n MIN_LR)\n', (13875, 13931), False, 'from mridc.core import optim\n'), ((14421, 14524), 'mridc.core.optim.lr_scheduler.WarmupPolicy', 'optim.lr_scheduler.WarmupPolicy', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (14452, 14524), False, 'from mridc.core import optim\n'), ((15233, 15258), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (15246, 15258), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((15364, 15455), 'mridc.core.optim.lr_scheduler.WarmupHoldPolicy', 'optim.lr_scheduler.WarmupHoldPolicy', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=\n self.MIN_LR)\n', (15399, 15455), False, 'from mridc.core import optim\n'), ((15945, 16052), 'mridc.core.optim.lr_scheduler.WarmupHoldPolicy', 'optim.lr_scheduler.WarmupHoldPolicy', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (15980, 16052), False, 'from mridc.core import optim\n'), ((16678, 16798), 'mridc.core.optim.lr_scheduler.WarmupHoldPolicy', 'optim.lr_scheduler.WarmupHoldPolicy', (['opt'], {'warmup_steps': '(5)', 'hold_steps': '(3)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, hold_steps=3,\n max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)\n', (16713, 16798), False, 'from mridc.core import optim\n'), ((17563, 17588), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (17576, 17588), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((17694, 17784), 'mridc.core.optim.lr_scheduler.WarmupAnnealing', 'optim.lr_scheduler.WarmupAnnealing', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=\n self.MIN_LR)\n', (17728, 17784), False, 'from mridc.core import optim\n'), ((18272, 18378), 'mridc.core.optim.lr_scheduler.WarmupAnnealing', 'optim.lr_scheduler.WarmupAnnealing', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (18306, 18378), False, 'from mridc.core import optim\n'), ((19004, 19124), 'mridc.core.optim.lr_scheduler.WarmupHoldPolicy', 'optim.lr_scheduler.WarmupHoldPolicy', (['opt'], {'warmup_steps': '(5)', 'hold_steps': '(3)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, hold_steps=3,\n max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)\n', (19039, 19124), False, 'from mridc.core import optim\n'), ((19853, 19878), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (19866, 19878), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((19984, 20074), 'mridc.core.optim.lr_scheduler.SquareAnnealing', 'optim.lr_scheduler.SquareAnnealing', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=\n self.MIN_LR)\n', (20018, 20074), False, 'from mridc.core import optim\n'), ((20563, 20669), 'mridc.core.optim.lr_scheduler.SquareAnnealing', 'optim.lr_scheduler.SquareAnnealing', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (20597, 20669), False, 'from mridc.core import optim\n'), ((21385, 21410), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (21398, 21410), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((21516, 21586), 'mridc.core.optim.lr_scheduler.SquareRootAnnealing', 'SquareRootAnnealing', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)\n', (21535, 21586), False, 'from mridc.core.optim.lr_scheduler import AVAILABLE_SCHEDULERS, SquareRootAnnealing\n'), ((22080, 22190), 'mridc.core.optim.lr_scheduler.SquareRootAnnealing', 'optim.lr_scheduler.SquareRootAnnealing', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (22118, 22190), False, 'from mridc.core import optim\n'), ((22920, 22945), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (22933, 22945), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((23051, 23141), 'mridc.core.optim.lr_scheduler.CosineAnnealing', 'optim.lr_scheduler.CosineAnnealing', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=\n self.MIN_LR)\n', (23085, 23141), False, 'from mridc.core import optim\n'), ((23630, 23736), 'mridc.core.optim.lr_scheduler.CosineAnnealing', 'optim.lr_scheduler.CosineAnnealing', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (23664, 23736), False, 'from mridc.core import optim\n'), ((24366, 24489), 'mridc.core.optim.lr_scheduler.CosineAnnealing', 'optim.lr_scheduler.CosineAnnealing', (['opt'], {'warmup_steps': '(3)', 'constant_steps': '(2)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=3, constant_steps=2,\n max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)\n', (24400, 24489), False, 'from mridc.core import optim\n'), ((25379, 25404), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (25392, 25404), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((25510, 25618), 'mridc.core.optim.lr_scheduler.PolynomialDecayAnnealing', 'optim.lr_scheduler.PolynomialDecayAnnealing', (['opt'], {'power': '(2)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, power=2, max_steps=self.\n MAX_STEPS, min_lr=self.MIN_LR)\n', (25553, 25618), False, 'from mridc.core import optim\n'), ((26129, 26244), 'mridc.core.optim.lr_scheduler.PolynomialDecayAnnealing', 'optim.lr_scheduler.PolynomialDecayAnnealing', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5, max_steps=\n self.MAX_STEPS, min_lr=self.MIN_LR)\n', (26172, 26244), False, 'from mridc.core import optim\n'), ((27000, 27025), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (27013, 27025), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((27131, 27243), 'mridc.core.optim.lr_scheduler.PolynomialHoldDecayAnnealing', 'optim.lr_scheduler.PolynomialHoldDecayAnnealing', (['opt'], {'power': '(2)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, power=2, max_steps=\n self.MAX_STEPS, min_lr=self.MIN_LR)\n', (27178, 27243), False, 'from mridc.core import optim\n'), ((27754, 27882), 'mridc.core.optim.lr_scheduler.PolynomialHoldDecayAnnealing', 'optim.lr_scheduler.PolynomialHoldDecayAnnealing', (['opt'], {'power': '(2)', 'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, power=2, warmup_steps=\n 5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)\n', (27801, 27882), False, 'from mridc.core import optim\n'), ((28400, 28541), 'mridc.core.optim.lr_scheduler.PolynomialHoldDecayAnnealing', 'optim.lr_scheduler.PolynomialHoldDecayAnnealing', (['opt'], {'warmup_steps': '(5)', 'hold_steps': '(3)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR', 'power': '(2)'}), '(opt, warmup_steps=5,\n hold_steps=3, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR, power=2)\n', (28447, 28541), False, 'from mridc.core import optim\n'), ((29419, 29444), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (29432, 29444), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((29550, 29650), 'mridc.core.optim.lr_scheduler.InverseSquareRootAnnealing', 'optim.lr_scheduler.InverseSquareRootAnnealing', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS,\n min_lr=self.MIN_LR)\n', (29595, 29650), False, 'from mridc.core import optim\n'), ((30140, 30256), 'mridc.core.optim.lr_scheduler.InverseSquareRootAnnealing', 'optim.lr_scheduler.InverseSquareRootAnnealing', (['opt'], {'warmup_steps': '(5)', 'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, warmup_steps=5,\n max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)\n', (30185, 30256), False, 'from mridc.core import optim\n'), ((31020, 31045), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['"""novograd"""'], {}), "('novograd')\n", (31033, 31045), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((31151, 31241), 'mridc.core.optim.lr_scheduler.CosineAnnealing', 'optim.lr_scheduler.CosineAnnealing', (['opt'], {'max_steps': 'self.MAX_STEPS', 'min_lr': 'self.MIN_LR'}), '(opt, max_steps=self.MAX_STEPS, min_lr=\n self.MIN_LR)\n', (31185, 31241), False, 'from mridc.core import optim\n'), ((3361, 3411), 'mridc.utils.logging.debug', 'logging.debug', (['f"""max_epochs: {trainer.max_epochs}"""'], {}), "(f'max_epochs: {trainer.max_epochs}')\n", (3374, 3411), False, 'from mridc.utils import logging\n'), ((3424, 3500), 'mridc.utils.logging.debug', 'logging.debug', (['f"""accumulate_grad_batches: {trainer.accumulate_grad_batches}"""'], {}), "(f'accumulate_grad_batches: {trainer.accumulate_grad_batches}')\n", (3437, 3500), False, 'from mridc.utils import logging\n'), ((3513, 3581), 'mridc.utils.logging.debug', 'logging.debug', (['f"""limit_train_batches: {trainer.limit_train_batches}"""'], {}), "(f'limit_train_batches: {trainer.limit_train_batches}')\n", (3526, 3581), False, 'from mridc.utils import logging\n'), ((3594, 3650), 'mridc.utils.logging.debug', 'logging.debug', (['f"""num_processes: {trainer.num_processes}"""'], {}), "(f'num_processes: {trainer.num_processes}')\n", (3607, 3650), False, 'from mridc.utils import logging\n'), ((3663, 3712), 'mridc.utils.logging.debug', 'logging.debug', (['f"""batch_size: {module.batch_size}"""'], {}), "(f'batch_size: {module.batch_size}')\n", (3676, 3712), False, 'from mridc.utils import logging\n'), ((3725, 3776), 'mridc.utils.logging.debug', 'logging.debug', (['f"""dataset_len: {module.dataset_len}"""'], {}), "(f'dataset_len: {module.dataset_len}')\n", (3738, 3776), False, 'from mridc.utils import logging\n'), ((3789, 3836), 'mridc.utils.logging.debug', 'logging.debug', (['f"""drop_last: {module.drop_last}"""'], {}), "(f'drop_last: {module.drop_last}')\n", (3802, 3836), False, 'from mridc.utils import logging\n'), ((3911, 3959), 'mridc.utils.logging.debug', 'logging.debug', (['f"""{trainer.num_training_batches}"""'], {}), "(f'{trainer.num_training_batches}')\n", (3924, 3959), False, 'from mridc.utils import logging\n'), ((6349, 6372), 'mridc.core.optim.optimizers.get_optimizer', 'get_optimizer', (['opt_name'], {}), '(opt_name)\n', (6362, 6372), False, 'from mridc.core.optim.optimizers import AVAILABLE_OPTIMIZERS, get_optimizer, parse_optimizer_args, register_optimizer\n'), ((9900, 9911), 'mridc.core.conf.optimizers.SGDParams', 'SGDParams', ([], {}), '()\n', (9909, 9911), False, 'from mridc.core.conf.optimizers import NovogradParams, SGDParams\n'), ((9944, 9960), 'mridc.core.conf.optimizers.NovogradParams', 'NovogradParams', ([], {}), '()\n', (9958, 9960), False, 'from mridc.core.conf.optimizers import NovogradParams, SGDParams\n'), ((10457, 10501), 'mridc.core.optim.lr_scheduler.get_scheduler', 'optim.lr_scheduler.get_scheduler', (['sched_name'], {}), '(sched_name)\n', (10489, 10501), False, 'from mridc.core import optim\n'), ((32589, 32878), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'max_epochs': 'max_epochs', 'strategy': '"""ddp_spawn"""', 'accelerator': '"""cpu"""', 'num_processes': 'num_processes', 'accumulate_grad_batches': 'accumulate_grad_batches', 'limit_train_batches': 'limit_train_batches', 'enable_checkpointing': '(False)', 'progress_bar_refresh_rate': '(0)', 'weights_summary': 'None'}), "(max_epochs=max_epochs, strategy='ddp_spawn', accelerator='cpu',\n num_processes=num_processes, accumulate_grad_batches=\n accumulate_grad_batches, limit_train_batches=limit_train_batches,\n enable_checkpointing=False, progress_bar_refresh_rate=0,\n weights_summary=None)\n", (32599, 32878), True, 'import pytorch_lightning as pl\n'), ((33045, 33194), 'mridc.core.optim.lr_scheduler.compute_max_steps', 'optim.lr_scheduler.compute_max_steps', (['max_epochs', 'accumulate_grad_batches', 'limit_train_batches', 'num_processes', 'dataset_len', 'batch_size', 'drop_last'], {}), '(max_epochs, accumulate_grad_batches,\n limit_train_batches, num_processes, dataset_len, batch_size, drop_last)\n', (33081, 33194), False, 'from mridc.core import optim\n'), ((34673, 34694), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (34687, 34694), False, 'import random\n'), ((34734, 34755), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (34748, 34755), False, 'import random\n'), ((34796, 34816), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (34810, 34816), False, 'import random\n'), ((34851, 34918), 'random.choice', 'random.choice', (['[limit_train_batches_int, limit_train_batches_float]'], {}), '([limit_train_batches_int, limit_train_batches_float])\n', (34864, 34918), False, 'import random\n'), ((34944, 34965), 'random.randint', 'random.randint', (['(4)', '(20)'], {}), '(4, 20)\n', (34958, 34965), False, 'import random\n'), ((34994, 35014), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (35008, 35014), False, 'import random\n'), ((35041, 35080), 'random.randint', 'random.randint', (['(20)', '(num_processes * 500)'], {}), '(20, num_processes * 500)\n', (35055, 35080), False, 'import random\n'), ((35906, 36193), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'max_steps': 'max_steps', 'strategy': '"""ddp_spawn"""', 'accelerator': '"""cpu"""', 'num_processes': 'num_processes', 'accumulate_grad_batches': 'accumulate_grad_batches', 'limit_train_batches': 'limit_train_batches', 'enable_checkpointing': '(False)', 'progress_bar_refresh_rate': '(0)', 'weights_summary': 'None'}), "(max_steps=max_steps, strategy='ddp_spawn', accelerator='cpu',\n num_processes=num_processes, accumulate_grad_batches=\n accumulate_grad_batches, limit_train_batches=limit_train_batches,\n enable_checkpointing=False, progress_bar_refresh_rate=0,\n weights_summary=None)\n", (35916, 36193), True, 'import pytorch_lightning as pl\n'), ((34613, 34633), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (34627, 34633), False, 'import random\n'), ((35138, 35168), 'math.ceil', 'math.ceil', (['(5.0 / num_processes)'], {}), '(5.0 / num_processes)\n', (35147, 35168), False, 'import math\n'), ((37001, 37012), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (37010, 37012), False, 'import os\n'), ((6275, 6300), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6298, 6300), False, 'import torch\n'), ((35174, 35217), 'numpy.floor_divide', 'np.floor_divide', (['dataset_len', 'num_processes'], {}), '(dataset_len, num_processes)\n', (35189, 35217), True, 'import numpy as np\n'), ((37073, 37084), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (37082, 37084), False, 'import os\n')]
|
from __future__ import print_function, absolute_import, division, unicode_literals
# This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
from server_common.channel_access import ChannelAccess
from server_common.utilities import print_and_log
class ProcServWrapper(object):
"""A wrapper for ProcSev to allow for control of IOCs"""
@staticmethod
def generate_prefix(prefix: str, ioc: str) -> str:
"""Creates a PV based on the given prefix and IOC name
Args:
prefix: The prefix of the instrument the IOC is being run on
ioc: The name of the requested IOC
"""
return "{}CS:PS:{}".format(prefix, ioc)
def start_ioc(self, prefix: str, ioc: str) -> None:
"""Starts the specified IOC
Args:
prefix: The prefix of the instrument the IOC is being run on
ioc: The name of the IOC to start
"""
print_and_log("Starting IOC {}".format(ioc))
ChannelAccess.caput(self.generate_prefix(prefix, ioc) + ":START", 1)
def stop_ioc(self, prefix: str, ioc: str) -> None:
"""Stops the specified IOC
Args:
prefix: The prefix of the instrument the IOC is being run on
ioc: The name of the IOC to stop
"""
print_and_log("Stopping IOC {}".format(ioc))
ChannelAccess.caput(self.generate_prefix(prefix, ioc) + ":STOP", 1)
def restart_ioc(self, prefix: str, ioc: str) -> None:
"""Restarts the specified IOC
Args:
prefix: The prefix of the instrument the IOC is being run on
ioc: The name of the IOC to restart
"""
print_and_log("Restarting IOC {}".format(ioc))
ChannelAccess.caput(self.generate_prefix(prefix, ioc) + ":RESTART", 1)
def get_ioc_status(self, prefix: str, ioc: str) -> str:
"""Gets the status of the specified IOC
Args:
prefix: The prefix of the instrument the IOC is being run on
ioc: The name of the IOC
Returns:
The status of the requested IOC
"""
pv = self.generate_prefix(prefix, ioc) + ":STATUS"
ans = ChannelAccess.caget(pv, as_string=True)
if ans is None:
raise IOError("Could not find IOC (%s)" % pv)
return ans.upper()
def ioc_exists(self, prefix: str, ioc: str) -> bool:
"""Checks if the IOC exists on ProcServ
Args:
prefix: The prefix of the instrument the IOC is being run on
ioc: The name of the IOC
Returns:
True if IOC exists, False otherwise
"""
try:
self.get_ioc_status(prefix, ioc)
return True
except:
return False
|
[
"server_common.channel_access.ChannelAccess.caget"
] |
[((2955, 2994), 'server_common.channel_access.ChannelAccess.caget', 'ChannelAccess.caget', (['pv'], {'as_string': '(True)'}), '(pv, as_string=True)\n', (2974, 2994), False, 'from server_common.channel_access import ChannelAccess\n')]
|
#!/usr/bin/python
import csv
import os
def find_headers(lines) :
host_headers = ['host']
for line in lines :
if(line.count("State") > 0 and line.count("rank-1") > 0) :
t = line.split(",")
host_headers.append(t[7])
return host_headers
def print_data(file_name) :
f = open("./dump_files/" + file_name + ".dump", "r")
lines = f.readlines()
links_data = [['source', 'dest', 'hopcount']]
hosts_map = {}
hosts_data = []
for line in lines :
if(line.count("MPI_LINK") > 0) :
t = line.split(",")[9].split("_")
src = (int(t[0]))
dst = (int(t[1]))
count = (int(t[3]))
links_data.append([src,dst,count])
if(line.count("State") > 0) :
t = line.split(",")
t[1] = t[1].strip()
t[7] = t[7].strip()
if( t[1] not in hosts_map.keys()) :
hosts_map[t[1]] = {}
hosts_map[t[1]][t[7]] = max(t[3:7])
# hosts_data.append([t[1] ])
# host_map to host_data
x = list(hosts_map['rank-1'].keys())
x.insert(0,"host")
hosts_data.append(x)
for x in hosts_map.keys() :
t = []
t.append(x)
for y in hosts_map[x].keys() :
t.append(hosts_map[x][y])
hosts_data.append(t)
with open('./csv/link_data/' + file_name + '.csv', 'w') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(links_data)
with open('./csv/host_data/' + file_name + '.csv', 'w') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(hosts_data)
f.close()
for filename in os.listdir("./dump_files/"):
filename = (filename[0:-5])
print_data(filename)
|
[
"csv.writer",
"os.listdir"
] |
[((1682, 1709), 'os.listdir', 'os.listdir', (['"""./dump_files/"""'], {}), "('./dump_files/')\n", (1692, 1709), False, 'import os\n'), ((1446, 1465), 'csv.writer', 'csv.writer', (['csvFile'], {}), '(csvFile)\n', (1456, 1465), False, 'import csv\n'), ((1593, 1612), 'csv.writer', 'csv.writer', (['csvFile'], {}), '(csvFile)\n', (1603, 1612), False, 'import csv\n')]
|
#!/usr/bin/env python3
# Author: <NAME>
# INFO521 Homeword 3 Problem 6
import numpy as np
import matplotlib.pyplot as plt
# --------------------------------------------------
def true_function(x):
"""$t = 5x+x^2-0.5x^3$"""
return (5 * x) + x**2 - (0.5 * x**3)
# --------------------------------------------------
def sample_from_function(N=100, noise_var=1000, xmin=-5., xmax=5.):
""" Sample data from the true function.
N: Number of samples
Returns a noisy sample t_sample from the function
and the true function t. """
x = np.random.uniform(xmin, xmax, N)
t = true_function(x)
# add standard normal noise using np.random.randn
# (standard normal is a Gaussian N(0, 1.0) (i.e., mean 0, variance 1),
# so multiplying by np.sqrt(noise_var) make it N(0,standard_deviation))
t = t + np.random.randn(x.shape[0]) * np.sqrt(noise_var)
return x, t
# --------------------------------------------------
def main():
xmin = -4.
xmax = 5.
noise_var = 6
orders = [1, 3, 5, 9]
N = 25
num_samples = 20
# Make a set of N evenly-spaced x values between xmin and xmax
test_x = np.linspace(xmin, xmax, N)
true_y = true_function(test_x)
for i in orders:
plt.figure(0)
for _ in range(0, num_samples):
x, t = sample_from_function(
N=25, xmin=xmin, xmax=xmax, noise_var=noise_var)
X = np.zeros(shape=(x.shape[0], i + 1))
testX = np.zeros(shape=(test_x.shape[0], i + 1))
for k in range(i + 1):
X[:, k] = np.power(x, k)
testX[:, k] = np.power(test_x, k)
# fit model parameters
w = np.dot(np.linalg.inv(np.dot(X.T, X)), np.dot(X.T, t))
# calculate predictions
prediction_t = np.dot(testX, w)
plt.plot(test_x, prediction_t, color='blue')
# Plot the true function in red so it will be visible
plt.plot(test_x, true_y, color='red', linewidth=3)
plt.xlabel('x')
plt.ylabel('t')
plt.title('Model order {} prediction of {}, $x \in [{},{}]$'.format(
i, true_function.__doc__, xmin, xmax))
plt.pause(.1) # required on some systems so that rendering can happen
outfile = 'model_bias-{}.png'.format(i)
plt.savefig(outfile, format='png')
plt.show()
# --------------------------------------------------
if __name__ == '__main__':
main()
|
[
"numpy.random.uniform",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"numpy.power",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((571, 603), 'numpy.random.uniform', 'np.random.uniform', (['xmin', 'xmax', 'N'], {}), '(xmin, xmax, N)\n', (588, 603), True, 'import numpy as np\n'), ((1166, 1192), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'N'], {}), '(xmin, xmax, N)\n', (1177, 1192), True, 'import numpy as np\n'), ((1258, 1271), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (1268, 1271), True, 'import matplotlib.pyplot as plt\n'), ((1973, 2023), 'matplotlib.pyplot.plot', 'plt.plot', (['test_x', 'true_y'], {'color': '"""red"""', 'linewidth': '(3)'}), "(test_x, true_y, color='red', linewidth=3)\n", (1981, 2023), True, 'import matplotlib.pyplot as plt\n'), ((2032, 2047), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2042, 2047), True, 'import matplotlib.pyplot as plt\n'), ((2056, 2071), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""t"""'], {}), "('t')\n", (2066, 2071), True, 'import matplotlib.pyplot as plt\n'), ((2208, 2222), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (2217, 2222), True, 'import matplotlib.pyplot as plt\n'), ((2335, 2369), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {'format': '"""png"""'}), "(outfile, format='png')\n", (2346, 2369), True, 'import matplotlib.pyplot as plt\n'), ((2378, 2388), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2386, 2388), True, 'import matplotlib.pyplot as plt\n'), ((848, 875), 'numpy.random.randn', 'np.random.randn', (['x.shape[0]'], {}), '(x.shape[0])\n', (863, 875), True, 'import numpy as np\n'), ((878, 896), 'numpy.sqrt', 'np.sqrt', (['noise_var'], {}), '(noise_var)\n', (885, 896), True, 'import numpy as np\n'), ((1435, 1470), 'numpy.zeros', 'np.zeros', ([], {'shape': '(x.shape[0], i + 1)'}), '(shape=(x.shape[0], i + 1))\n', (1443, 1470), True, 'import numpy as np\n'), ((1491, 1531), 'numpy.zeros', 'np.zeros', ([], {'shape': '(test_x.shape[0], i + 1)'}), '(shape=(test_x.shape[0], i + 1))\n', (1499, 1531), True, 'import numpy as np\n'), ((1828, 1844), 'numpy.dot', 'np.dot', (['testX', 'w'], {}), '(testX, w)\n', (1834, 1844), True, 'import numpy as np\n'), ((1857, 1901), 'matplotlib.pyplot.plot', 'plt.plot', (['test_x', 'prediction_t'], {'color': '"""blue"""'}), "(test_x, prediction_t, color='blue')\n", (1865, 1901), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1607), 'numpy.power', 'np.power', (['x', 'k'], {}), '(x, k)\n', (1601, 1607), True, 'import numpy as np\n'), ((1638, 1657), 'numpy.power', 'np.power', (['test_x', 'k'], {}), '(test_x, k)\n', (1646, 1657), True, 'import numpy as np\n'), ((1748, 1762), 'numpy.dot', 'np.dot', (['X.T', 't'], {}), '(X.T, t)\n', (1754, 1762), True, 'import numpy as np\n'), ((1731, 1745), 'numpy.dot', 'np.dot', (['X.T', 'X'], {}), '(X.T, X)\n', (1737, 1745), True, 'import numpy as np\n')]
|
import sys
sys.path.append('../')
from matplotlib import figure
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
from tqdm import tqdm
from models import model
### Config folders
config_data = pd.read_csv('config.csv', sep=',', header=None, index_col=0)
figures_path = config_data.loc['figures_dir'][1]
results_path = config_data.loc['results_test_dir'][1]
ages_data_path = config_data.loc['bogota_age_data_dir'][1]
houses_data_path = config_data.loc['bogota_houses_data_dir'][1]
### Arguments
import argparse
parser = argparse.ArgumentParser(description='Dynamics visualization.')
parser.add_argument('--population', default=10000, type=int,
help='Speficy the number of individials')
parser.add_argument('--type_sim', default='intervention', type=str,
help='Speficy the type of simulation to plot')
args = parser.parse_args()
number_nodes = args.population
pop = number_nodes
### Read functions
def load_results_ints(type_res,n,int_effec,schl_occup,layer,path=results_path):
read_path = os.path.join(path,'{}_layerInt_{}_inter_{}_schoolcap_{}_{}.csv'.format(str(n),str(layer),str(int_effec),
str(schl_occup),type_res))
read_file = pd.read_csv(read_path)
return read_file
### Read file
results_path = os.path.join(results_path,str(pop))
###------------------------------------------------------------------------------------------------------------------------------------------------------
### Bar plots
intervention_effcs = [0.0,0.2,0.4]
school_cap = [1.0] #,0.35]
layers_test = ['work','community','all']
layers_labels = ['Intervention over work','Intervention over community','Intervention over-all']
layers_labels = dict(zip(layers_test,layers_labels))
df_list = []
for l, layer_ in enumerate(layers_test):
for i, inter_ in enumerate(intervention_effcs):
for j, schl_cap_ in enumerate(school_cap):
res_read = load_results_ints('soln_cum',args.population,inter_,schl_cap_,layer_,results_path)
for itr_ in range(10):
res_read_i = res_read['iter'] == itr_
res_read_i = pd.DataFrame(res_read[res_read_i])
end_cases = res_read_i['E'].iloc[-1]
df_res_i = pd.DataFrame(columns=['iter','Inter.Layer','interven_eff','end_cases'])
df_res_i['iter'] = [int(itr_)]
df_res_i['Inter.Layer'] = layers_labels[layer_]
df_res_i['interven_eff'] = r'{}%'.format(int(inter_*100))
df_res_i['end_cases'] = end_cases*pop
df_list.append(df_res_i)
df_final_E = pd.concat(df_list)
fig,ax = plt.subplots(1,1,figsize=(9, 6))
sns.catplot(ax=ax, data=df_final_E, y='interven_eff', x='end_cases', hue='Inter.Layer',kind='bar',palette='winter',alpha=0.7,legend=False)
#ax.legend(bbox_to_anchor=(1.02,1)).set_title('')
plt.legend(bbox_to_anchor=(1.02,0.6),title='',frameon=False, fontsize=16)
#plt.setp(ax.get_legend().get_texts(), fontsize='17') # for legend text
plt.ylabel(r'Intervention efficiency ($\%$)',fontsize=17)
plt.xlabel(r'Infections per 10,000',fontsize=17)
plt.title(r'Total infections | schools at {}%'.format(str(int(school_cap[0]*100))),fontsize=17)
plt.xticks(size=16)
plt.yticks(size=16)
save_path = os.path.join(figures_path,'bar_plots','layersInter_totalInfections_n_{}_schoolcap_{}_.png'.format(str(pop),str(school_cap[0])))
plt.savefig(save_path,dpi=400, transparent=False, bbox_inches='tight', pad_inches=0.1 )
# Deaths
school_cap = [0.35] #,0.35]
layers_test = ['work','community','all']
layers_labels = ['Intervention over work','Intervention over community','Intervention over-all']
layers_labels = dict(zip(layers_test,layers_labels))
df_list = []
for l, layer_ in enumerate(layers_test):
for i, inter_ in enumerate(intervention_effcs):
for j, schl_cap_ in enumerate(school_cap):
res_read = load_results_ints('soln_cum',args.population,inter_,schl_cap_,layer_,results_path)
for itr_ in range(10):
res_read_i = res_read['iter'] == itr_
res_read_i = pd.DataFrame(res_read[res_read_i])
end_dead = res_read_i['D'].iloc[-1]
df_res_i = pd.DataFrame(columns=['iter','Inter.Layer','interven_eff','end_dead'])
df_res_i['iter'] = [int(itr_)]
df_res_i['Inter.Layer'] = layers_labels[layer_]
df_res_i['interven_eff'] = r'{}%'.format(int(inter_*100))
df_res_i['end_dead'] = end_dead*pop
df_list.append(df_res_i)
df_final_D = pd.concat(df_list)
fig,ax = plt.subplots(1,1,figsize=(9, 6))
sns.catplot(ax=ax, data=df_final_D, y='interven_eff', x='end_dead', hue='Inter.Layer',kind='bar',palette='winter',alpha=0.7,legend=False)
#ax.legend(bbox_to_anchor=(1.02,1)).set_title('')
plt.legend(bbox_to_anchor=(1.02,0.6),title='',frameon=False, fontsize=16)
#plt.setp(ax.get_legend().get_texts(), fontsize='17') # for legend text
plt.ylabel(r'Intervention efficiency ($\%$)',fontsize=17)
plt.xlabel(r'Deaths per 10,000',fontsize=17)
plt.title(r'Total deaths | schools at {}%'.format(str(int(school_cap[0]*100))),fontsize=17)
plt.xticks(size=16)
plt.yticks(size=16)
plt.xlim([0,141])
save_path = os.path.join(figures_path,'bar_plots','layersInter_totalDeaths_n_{}_schoolcap_{}_.png'.format(str(pop),str(school_cap[0])))
plt.savefig(save_path,dpi=400, transparent=False, bbox_inches='tight', pad_inches=0.1 )
|
[
"sys.path.append",
"matplotlib.pyplot.xlim",
"pandas.DataFrame",
"seaborn.catplot",
"argparse.ArgumentParser",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"pandas.concat",
"matplotlib.pyplot.savefig"
] |
[((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((231, 291), 'pandas.read_csv', 'pd.read_csv', (['"""config.csv"""'], {'sep': '""","""', 'header': 'None', 'index_col': '(0)'}), "('config.csv', sep=',', header=None, index_col=0)\n", (242, 291), True, 'import pandas as pd\n'), ((560, 622), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Dynamics visualization."""'}), "(description='Dynamics visualization.')\n", (583, 622), False, 'import argparse\n'), ((2722, 2740), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (2731, 2740), True, 'import pandas as pd\n'), ((2751, 2785), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(9, 6)'}), '(1, 1, figsize=(9, 6))\n', (2763, 2785), True, 'import matplotlib.pyplot as plt\n'), ((2784, 2931), 'seaborn.catplot', 'sns.catplot', ([], {'ax': 'ax', 'data': 'df_final_E', 'y': '"""interven_eff"""', 'x': '"""end_cases"""', 'hue': '"""Inter.Layer"""', 'kind': '"""bar"""', 'palette': '"""winter"""', 'alpha': '(0.7)', 'legend': '(False)'}), "(ax=ax, data=df_final_E, y='interven_eff', x='end_cases', hue=\n 'Inter.Layer', kind='bar', palette='winter', alpha=0.7, legend=False)\n", (2795, 2931), True, 'import seaborn as sns\n'), ((2973, 3049), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.02, 0.6)', 'title': '""""""', 'frameon': '(False)', 'fontsize': '(16)'}), "(bbox_to_anchor=(1.02, 0.6), title='', frameon=False, fontsize=16)\n", (2983, 3049), True, 'import matplotlib.pyplot as plt\n'), ((3119, 3177), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intervention efficiency ($\\\\%$)"""'], {'fontsize': '(17)'}), "('Intervention efficiency ($\\\\%$)', fontsize=17)\n", (3129, 3177), True, 'import matplotlib.pyplot as plt\n'), ((3177, 3225), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Infections per 10,000"""'], {'fontsize': '(17)'}), "('Infections per 10,000', fontsize=17)\n", (3187, 3225), True, 'import matplotlib.pyplot as plt\n'), ((3322, 3341), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(16)'}), '(size=16)\n', (3332, 3341), True, 'import matplotlib.pyplot as plt\n'), ((3342, 3361), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(16)'}), '(size=16)\n', (3352, 3361), True, 'import matplotlib.pyplot as plt\n'), ((3503, 3594), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {'dpi': '(400)', 'transparent': '(False)', 'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(save_path, dpi=400, transparent=False, bbox_inches='tight',\n pad_inches=0.1)\n", (3514, 3594), True, 'import matplotlib.pyplot as plt\n'), ((4703, 4721), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (4712, 4721), True, 'import pandas as pd\n'), ((4732, 4766), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(9, 6)'}), '(1, 1, figsize=(9, 6))\n', (4744, 4766), True, 'import matplotlib.pyplot as plt\n'), ((4765, 4911), 'seaborn.catplot', 'sns.catplot', ([], {'ax': 'ax', 'data': 'df_final_D', 'y': '"""interven_eff"""', 'x': '"""end_dead"""', 'hue': '"""Inter.Layer"""', 'kind': '"""bar"""', 'palette': '"""winter"""', 'alpha': '(0.7)', 'legend': '(False)'}), "(ax=ax, data=df_final_D, y='interven_eff', x='end_dead', hue=\n 'Inter.Layer', kind='bar', palette='winter', alpha=0.7, legend=False)\n", (4776, 4911), True, 'import seaborn as sns\n'), ((4953, 5029), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.02, 0.6)', 'title': '""""""', 'frameon': '(False)', 'fontsize': '(16)'}), "(bbox_to_anchor=(1.02, 0.6), title='', frameon=False, fontsize=16)\n", (4963, 5029), True, 'import matplotlib.pyplot as plt\n'), ((5099, 5157), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intervention efficiency ($\\\\%$)"""'], {'fontsize': '(17)'}), "('Intervention efficiency ($\\\\%$)', fontsize=17)\n", (5109, 5157), True, 'import matplotlib.pyplot as plt\n'), ((5157, 5201), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Deaths per 10,000"""'], {'fontsize': '(17)'}), "('Deaths per 10,000', fontsize=17)\n", (5167, 5201), True, 'import matplotlib.pyplot as plt\n'), ((5294, 5313), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'size': '(16)'}), '(size=16)\n', (5304, 5313), True, 'import matplotlib.pyplot as plt\n'), ((5314, 5333), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'size': '(16)'}), '(size=16)\n', (5324, 5333), True, 'import matplotlib.pyplot as plt\n'), ((5334, 5352), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 141]'], {}), '([0, 141])\n', (5342, 5352), True, 'import matplotlib.pyplot as plt\n'), ((5489, 5580), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {'dpi': '(400)', 'transparent': '(False)', 'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(save_path, dpi=400, transparent=False, bbox_inches='tight',\n pad_inches=0.1)\n", (5500, 5580), True, 'import matplotlib.pyplot as plt\n'), ((1300, 1322), 'pandas.read_csv', 'pd.read_csv', (['read_path'], {}), '(read_path)\n', (1311, 1322), True, 'import pandas as pd\n'), ((2222, 2256), 'pandas.DataFrame', 'pd.DataFrame', (['res_read[res_read_i]'], {}), '(res_read[res_read_i])\n', (2234, 2256), True, 'import pandas as pd\n'), ((2338, 2412), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['iter', 'Inter.Layer', 'interven_eff', 'end_cases']"}), "(columns=['iter', 'Inter.Layer', 'interven_eff', 'end_cases'])\n", (2350, 2412), True, 'import pandas as pd\n'), ((4207, 4241), 'pandas.DataFrame', 'pd.DataFrame', (['res_read[res_read_i]'], {}), '(res_read[res_read_i])\n', (4219, 4241), True, 'import pandas as pd\n'), ((4322, 4395), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['iter', 'Inter.Layer', 'interven_eff', 'end_dead']"}), "(columns=['iter', 'Inter.Layer', 'interven_eff', 'end_dead'])\n", (4334, 4395), True, 'import pandas as pd\n')]
|
"""
webcam_train
Developer: <NAME>, <NAME>
Version: 0.1.0
Release Date: 2017-09-30
"""
import numpy as np
import cv2
import tensorflow as tf
import sys
from kwin import *
import time
import webcam
import dataset
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
########################################################################
#
########################################################################
#
while True:
target_name = input("무엇을 학습할까요? (종료하려면 exit) ")
target_name=target_name.strip()
target_dir = "%s/%s" %(dataset.train_data_path(), target_name)
if target_name == 'exit':
break
if os.path.exists(target_dir) is False:
os.mkdir(target_dir)
webcam.record_avi(target_name=target_name, target_dir=target_dir)
print("[%s]에 대한 동영상 촬영이 완료되었습니다." %target_name)
#
print("학습을 시작합니다. 종료 메시지가 나타날 때까지 잠시 기다리십시오.")
#
import retrain
retrain.do_train()
#
print("학습이 종료되었습니다. bottleneck을 확인하십시오.")
|
[
"os.mkdir",
"os.path.exists",
"webcam.record_avi",
"dataset.train_data_path",
"retrain.do_train"
] |
[((898, 916), 'retrain.do_train', 'retrain.do_train', ([], {}), '()\n', (914, 916), False, 'import retrain\n'), ((712, 777), 'webcam.record_avi', 'webcam.record_avi', ([], {'target_name': 'target_name', 'target_dir': 'target_dir'}), '(target_name=target_name, target_dir=target_dir)\n', (729, 777), False, 'import webcam\n'), ((642, 668), 'os.path.exists', 'os.path.exists', (['target_dir'], {}), '(target_dir)\n', (656, 668), False, 'import os\n'), ((687, 707), 'os.mkdir', 'os.mkdir', (['target_dir'], {}), '(target_dir)\n', (695, 707), False, 'import os\n'), ((550, 575), 'dataset.train_data_path', 'dataset.train_data_path', ([], {}), '()\n', (573, 575), False, 'import dataset\n')]
|
from django.contrib.auth import get_user_model
from django.contrib import admin
# Register your models here.
from .models import Post, Link
User = get_user_model()
class LinkInline(admin.StackedInline):
model = Link
extra = 0
# readonly_fields = ['url',]
fields = ['url']
class PostAdmin(admin.ModelAdmin):
inlines = [LinkInline]
list_display = ['title', 'description']
readonly_fields = ['date',]
raw_id_fields = ['author']
admin.site.register(Post, PostAdmin)
|
[
"django.contrib.auth.get_user_model",
"django.contrib.admin.site.register"
] |
[((149, 165), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (163, 165), False, 'from django.contrib.auth import get_user_model\n'), ((462, 498), 'django.contrib.admin.site.register', 'admin.site.register', (['Post', 'PostAdmin'], {}), '(Post, PostAdmin)\n', (481, 498), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# @Author: <NAME>
#
# app/comm/psql_wrapper.py
#
# PostgresqlWrapper that wraps postgres connection
# and also allows for our unit and integration tests to more
# easily mock it.
#
import psycopg2
from functools import wraps
class PostgresqlWrapper:
"""Allows for executing SQL statements to a postgresql database"""
def __init__(self, params: dict):
self.params_postgresql = params
def _connect_curs_postgresql(function):
"""Wrapper function that connects and authenticates to the PostgreSQL DB.
The passed function will receive the open cursor.
"""
@wraps(function)
def wrapper_connect(self, *args, **kwargs):
with psycopg2.connect(**self.params_postgresql) as conn:
with conn.cursor() as curs:
val = function(self, cursor=curs, *args, **kwargs)
return val
return wrapper_connect
@_connect_curs_postgresql
def execute(self, query: str, vars=None, cursor=None):
"""Connects to the postgresql DB and executes the statement.
Returns all results of the statement if applicable.
"""
cursor.execute(query, vars)
if cursor.description is not None:
return cursor.fetchall()
@_connect_curs_postgresql
def executemany(self, query: str, vars_list: list, cursor=None):
"""Connects to the postgresql DB and executes the many statement"""
cursor.executemany(query, vars_list)
|
[
"functools.wraps",
"psycopg2.connect"
] |
[((667, 682), 'functools.wraps', 'wraps', (['function'], {}), '(function)\n', (672, 682), False, 'from functools import wraps\n'), ((752, 794), 'psycopg2.connect', 'psycopg2.connect', ([], {}), '(**self.params_postgresql)\n', (768, 794), False, 'import psycopg2\n')]
|
import csv
from sklearn import tree
class tree_classifier():
classes_dict = {0: 'going_left',
1: 'going_right',
2: 'falling',
3: 'just_sitting',
4: 'just_standing',
5: 'just_lying',
6: 'sitting_down',
7: 'standing_up'}
def train():
going_left = tree_classifier.load_class('going_left')
going_left_value = [0 for _ in range(len(going_left))]
going_right = tree_classifier.load_class('going_right')
going_right_value = [1 for _ in range(len(going_right))]
falling = tree_classifier.load_class('falling')
falling_value = [2 for _ in range(len(falling))]
just_sitting = tree_classifier.load_class('just_sitting')
just_sitting_value = [3 for _ in range(len(just_sitting))]
just_standing = tree_classifier.load_class('just_standing')
just_standing_value = [4 for _ in range(len(just_standing))]
lying_down = tree_classifier.load_class('lying_down')
lying_down_value = [5 for _ in range(len(lying_down))]
sitting_down = tree_classifier.load_class('sitting_down')
sitting_down_value = [6 for _ in range(len(sitting_down))]
standing_up = tree_classifier.load_class('standing_up')
standing_up_value = [7 for _ in range(len(standing_up))]
X = going_left + going_right + falling + just_sitting + \
just_standing + lying_down + sitting_down + standing_up
Y = going_left_value + going_right_value + falling_value + just_sitting_value + \
just_standing_value + lying_down_value + sitting_down_value + standing_up_value
tree_classifier.clf = tree.DecisionTreeClassifier(max_depth = 10)
tree_classifier.clf.fit(X, Y)
return tree_classifier.clf.predict([[43.48047639929654, 4.3354936021207635, 3.59]])
def predict(sample):
return tree_classifier.clf.predict([sample])[0]
def load_class(class_name):
l2 = []
with open(class_name + '.tsv', 'r') as tsv:
for line in csv.reader(tsv, quotechar='\t'):
l2.append(line[0].split())
l = []
for x in l2:
l.append([float(r) for r in x])
return l
print(tree_classifier.train())
|
[
"csv.reader",
"sklearn.tree.DecisionTreeClassifier"
] |
[((1763, 1804), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {'max_depth': '(10)'}), '(max_depth=10)\n', (1790, 1804), False, 'from sklearn import tree\n'), ((2145, 2176), 'csv.reader', 'csv.reader', (['tsv'], {'quotechar': '"""\t"""'}), "(tsv, quotechar='\\t')\n", (2155, 2176), False, 'import csv\n')]
|
from CoolProp.CoolProp import get_fluid_param_string
lines = open('KunzWagner2012_TableA7.txt','r').read()
template = """{{"Name1" : "{Name1:s}",
"Name2" : "{Name2:s}",
"CAS1" : "{CAS1:s}",
"CAS2" : "{CAS2:s}",
"d" : {d:s},
"t" : {t:s},
"n" : {n:s},
"eta" : {eta:s},
"epsilon" : {epsilon:s},
"beta": {beta:s},
"gamma": {gamma:s}
}},"""
chunks = lines.split('\n\n')
for chunk in chunks:
lines = chunk.split('\n')
D,T,N,ETA,EPSILON,BETA,GAMMA = [0],[0],[0],[0],[0],[0],[0]
names = lines.pop(0)
for line in lines:
vals = line.strip().split(' ')
if len(vals) == 4:
i, d, t, n = vals
eta = 0
epsilon = 0
beta = 0
gamma = 0
else:
i, d, t, n, eta, epsilon, beta, gamma = vals
D.append(int(d))
T.append(float(t))
N.append(float(n))
ETA.append(float(eta))
EPSILON.append(float(epsilon))
BETA.append(float(beta))
GAMMA.append(float(gamma))
name1,name2 = names.split('/')
CAS1 = get_fluid_param_string(name1,'CAS')
CAS2 = get_fluid_param_string(name2,'CAS')
print(template.format(Name1 = name1,
Name2 = name2,
CAS1 = CAS1,
CAS2 = CAS2,
d = str(D),
t = str(T),
n = str(N),
eta = str(ETA),
epsilon= str(EPSILON),
beta = str(BETA),
gamma = str(GAMMA)
))
|
[
"CoolProp.CoolProp.get_fluid_param_string"
] |
[((1054, 1090), 'CoolProp.CoolProp.get_fluid_param_string', 'get_fluid_param_string', (['name1', '"""CAS"""'], {}), "(name1, 'CAS')\n", (1076, 1090), False, 'from CoolProp.CoolProp import get_fluid_param_string\n'), ((1101, 1137), 'CoolProp.CoolProp.get_fluid_param_string', 'get_fluid_param_string', (['name2', '"""CAS"""'], {}), "(name2, 'CAS')\n", (1123, 1137), False, 'from CoolProp.CoolProp import get_fluid_param_string\n')]
|