code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import logging
from envparse import env
from telegram import Message, ReplyKeyboardMarkup, ReplyKeyboardRemove, Update
from telegram.ext import CommandHandler, MessageHandler, Updater
from telegram.ext.filters import BaseFilter, Filters
from . import celery as tasks
from .helpers import get_file, get_subject, reply
from .models import User, create_tables, get_user_instance
env.read_envfile()
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
@reply
def start(bot, update: Update, user: User, render):
update.message.reply_text(text=render('hello_message'))
@reply
def resend(bot, update: Update, user, render):
tasks.send_confirmation_mail.delay(user.pk)
update.message.reply_text(text=render('confirmation_message_is_sent'), reply_markup=ReplyKeyboardRemove())
@reply
def reset_email(bot, update: Update, user, render):
user.email = None
user.save()
update.message.reply_text(text=render('email_is_reset'), reply_markup=ReplyKeyboardRemove())
@reply
def send_text_message(bot, update: Update, user: User, render, **kwargs):
text = update.message.text
subject = get_subject(text)
message = update.message.reply_text(text=render('message_is_sent'))
tasks.send_text.delay(
user_id=user.pk,
subject=subject,
text=text,
variables=dict(
message_id=message.message_id,
chat_id=update.message.chat_id,
),
)
@reply
def send_photo(bot, update: Update, user: User, render):
file = update.message.photo[-1].get_file()
photo = get_file(file)
message = update.message.reply_text(text=render('photo_is_sent'))
tasks.send_file.delay(
user_id=user.pk,
file=photo,
subject='Photo note to self',
variables=dict(
message_id=message.message_id,
chat_id=update.message.chat_id,
),
)
@reply
def send_voice(bot, update: Update, user: User, render):
file = update.message.voice.get_file()
voice = get_file(file)
message = update.message.reply_text(text=render('voice_is_sent'))
tasks.send_file.delay(
user_id=user.pk,
file=voice,
subject='Voice note to self',
variables=dict(
message_id=message.message_id,
chat_id=update.message.chat_id,
),
)
@reply
def prompt_for_setting_email(bot, update: Update, user: User, render):
update.message.reply_text(text=render('please_send_email'))
@reply
def send_confirmation(bot, update: Update, user: User, render):
email = update.message.text.strip()
if User.select().where(User.email == email):
update.message.reply_text(text=render('email_is_occupied'))
return
user.email = email
user.save()
tasks.send_confirmation_mail.delay(user.pk)
update.message.reply_text(text=render('confirmation_message_is_sent'))
@reply
def prompt_for_confirm(bot, update: Update, user: User, render):
reply_markup = ReplyKeyboardMarkup([['Resend confirmation email'], ['Change email']])
update.message.reply_text(render('waiting_for_confirmation'), reply_markup=reply_markup)
class ConfirmedUserFilter(BaseFilter):
def filter(self, message: Message):
user = get_user_instance(message.from_user, message.chat_id)
return user.is_confirmed
class UserWithoutEmailFilter(BaseFilter):
def filter(self, message: Message):
user = get_user_instance(message.from_user, message.chat_id)
return user.email is None
class NonConfirmedUserFilter(BaseFilter):
def filter(self, message: Message):
user = get_user_instance(message.from_user, message.chat_id)
return user.email is not None and user.is_confirmed is False
updater = Updater(token=env('BOT_TOKEN'))
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler('start', start))
dispatcher.add_handler(MessageHandler(UserWithoutEmailFilter() & Filters.text & Filters.regex('@'), send_confirmation)) # looks like email, so send confirmation to it
dispatcher.add_handler(MessageHandler(NonConfirmedUserFilter() & Filters.text & Filters.regex('Resend confirmation email'), resend)) # resend confirmation email
dispatcher.add_handler(MessageHandler(NonConfirmedUserFilter() & Filters.text & Filters.regex('Change email'), reset_email)) # change email
dispatcher.add_handler(MessageHandler(UserWithoutEmailFilter(), prompt_for_setting_email))
dispatcher.add_handler(MessageHandler(NonConfirmedUserFilter(), prompt_for_confirm))
dispatcher.add_handler(MessageHandler(ConfirmedUserFilter() & Filters.text, send_text_message))
dispatcher.add_handler(MessageHandler(ConfirmedUserFilter() & Filters.photo, send_photo))
dispatcher.add_handler(MessageHandler(ConfirmedUserFilter() & Filters.voice, send_voice))
if __name__ == '__main__':
create_tables()
updater.start_polling()
|
[
"logging.basicConfig",
"envparse.env.read_envfile",
"telegram.ReplyKeyboardRemove",
"envparse.env",
"telegram.ReplyKeyboardMarkup",
"telegram.ext.CommandHandler",
"telegram.ext.filters.Filters.regex"
] |
[((379, 397), 'envparse.env.read_envfile', 'env.read_envfile', ([], {}), '()\n', (395, 397), False, 'from envparse import env\n'), ((398, 506), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (417, 506), False, 'import logging\n'), ((3043, 3113), 'telegram.ReplyKeyboardMarkup', 'ReplyKeyboardMarkup', (["[['Resend confirmation email'], ['Change email']]"], {}), "([['Resend confirmation email'], ['Change email']])\n", (3062, 3113), False, 'from telegram import Message, ReplyKeyboardMarkup, ReplyKeyboardRemove, Update\n'), ((3899, 3929), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""start"""', 'start'], {}), "('start', start)\n", (3913, 3929), False, 'from telegram.ext import CommandHandler, MessageHandler, Updater\n'), ((3825, 3841), 'envparse.env', 'env', (['"""BOT_TOKEN"""'], {}), "('BOT_TOKEN')\n", (3828, 3841), False, 'from envparse import env\n'), ((835, 856), 'telegram.ReplyKeyboardRemove', 'ReplyKeyboardRemove', ([], {}), '()\n', (854, 856), False, 'from telegram import Message, ReplyKeyboardMarkup, ReplyKeyboardRemove, Update\n'), ((1032, 1053), 'telegram.ReplyKeyboardRemove', 'ReplyKeyboardRemove', ([], {}), '()\n', (1051, 1053), False, 'from telegram import Message, ReplyKeyboardMarkup, ReplyKeyboardRemove, Update\n'), ((4011, 4029), 'telegram.ext.filters.Filters.regex', 'Filters.regex', (['"""@"""'], {}), "('@')\n", (4024, 4029), False, 'from telegram.ext.filters import BaseFilter, Filters\n'), ((4179, 4221), 'telegram.ext.filters.Filters.regex', 'Filters.regex', (['"""Resend confirmation email"""'], {}), "('Resend confirmation email')\n", (4192, 4221), False, 'from telegram.ext.filters import BaseFilter, Filters\n'), ((4341, 4370), 'telegram.ext.filters.Filters.regex', 'Filters.regex', (['"""Change email"""'], {}), "('Change email')\n", (4354, 4370), False, 'from telegram.ext.filters import BaseFilter, Filters\n')]
|
from trex.emu.api import *
import argparse
import get_args
class Prof1():
def __init__(self):
self.mac = Mac('00:00:00:70:00:01')
self.def_ns_plugs = {'ipv6' : {'dmac':self.mac.V()}}
self.def_c_plugs = None
def create_profile(self, ns_size, clients_size):
ns_list = []
# create different namespace each time
vport, tci, tpid = 0, [0, 0], [0x00, 0x00]
for j in range(vport, ns_size + vport):
ns_key = EMUNamespaceKey(vport = j,
tci = tci,
tpid = tpid)
ns = EMUNamespaceObj(ns_key = ns_key, def_c_plugs = self.def_c_plugs)
mac = self.mac
ipv6 = Ipv6("2001:DB8:1::2")
# create a different client each time
for i in range(clients_size):
client = EMUClientObj(mac = mac[i].V(),
ipv6 = ipv6[i].V(),
plugs = {'ipv6': {},
},
)
ns.add_clients(client)
ns_list.append(ns)
return EMUProfile(ns = ns_list, def_ns_plugs = self.def_ns_plugs)
def get_profile(self, tuneables):
args = get_args.get_args(tuneables)
return self.create_profile(args.ns, args.clients)
def register():
return Prof1()
|
[
"get_args.get_args"
] |
[((1341, 1369), 'get_args.get_args', 'get_args.get_args', (['tuneables'], {}), '(tuneables)\n', (1358, 1369), False, 'import get_args\n')]
|
import numpy as np
from .wavelength import wave_log10
def center2edge(x):
x = np.asarray(x)
dx = np.diff(x)
return np.hstack((x[0] - .5 * dx[0], x[:-1] + .5 * dx, x[-1] + .5 * dx[-1]))
def rebin(wave, flux=None, flux_err=None, mask=None, wave_new=None):
""" Rebin spectrum to a new wavelength grid
Parameters
----------
wave: array
old wavelength
flux: array
old flux
flux_err: array (optional)
old flux error
mask: array (optional)
old mask, True for bad.
wave_new:
new wavelength. if None, use log10 wavelength.
Return
------
re-binned (flux, [flux_err], [mask])
"""
wave = np.asarray(wave)
if wave_new is None:
wave_new = wave_log10(wave)
else:
wave_new = np.asarray(wave_new)
wave_edge = center2edge(wave)
wave_new_edge = center2edge(wave_new)
# I = interp1d(wave_edge[:-1], np.arange(len(wave)), kind="linear",
# bounds_error=False)
# wave_new_edge_pos = I(wave_new_edge) # accurate position projected to old
wave_new_edge_pos = np.interp(wave_new_edge,
wave_edge[:-1], np.arange(len(wave)),
left=np.nan, right=np.nan)
wave_new_edge_pos2 = np.array(
[wave_new_edge_pos[:-1], wave_new_edge_pos[1:]]).T # slipt to lo & hi
wave_new_ipix = np.floor(wave_new_edge_pos2).astype(int) # integer part
wave_new_frac = wave_new_edge_pos2 - wave_new_ipix # fraction part
flags = np.any(np.isnan(wave_new_edge_pos2), axis=1)
result = []
# rebin flux
if flux is not None:
flux = np.asarray(flux)
assert flux.shape == wave.shape
flux_new = np.zeros_like(wave_new, dtype=float)
for ipix, this_flag in enumerate(flags):
if not this_flag:
flux_new[ipix] = np.sum(
flux[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1]]) \
- flux[wave_new_ipix[ipix, 0]] * wave_new_frac[ipix, 0] \
+ flux[wave_new_ipix[ipix, 1]] * wave_new_frac[ipix, 1]
else:
flux_new[ipix] = np.nan
result.append(flux_new)
# rebin flux_err
if flux_err is not None:
flux_err2 = np.square(np.asarray(flux_err, dtype=float))
assert flux_err2.shape == wave.shape
flux_err2_new = np.zeros_like(wave_new, dtype=float)
for ipix, this_flag in enumerate(flags):
if not this_flag:
flux_err2_new[ipix] = np.sum(
flux_err2[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1]]) \
- flux_err2[wave_new_ipix[ipix, 0]] * wave_new_frac[ipix, 0] \
+ flux_err2[wave_new_ipix[ipix, 1]] * wave_new_frac[ipix, 1]
else:
flux_err2_new[ipix] = np.nan
result.append(np.sqrt(flux_err2_new))
# rebin mask
if mask is not None:
mask = np.asarray(mask)
assert mask.shape == wave.shape
mask_new = np.ones_like(wave_new, dtype=bool)
for ipix, this_flag in enumerate(flags):
if not this_flag:
mask_new[ipix] = np.any(
mask[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1] + 1])
result.append(mask_new)
if len(result) == 1:
return result[0]
elif len(result) > 1:
return result
else:
raise ValueError("@rebin: what to rebin?")
def _test():
wave, flux, wave_new = np.arange(10), np.ones(10), np.arange(0, 10, 2) + 0.5
flux[5] += 1
flux_err = flux
mask = ~ (flux > 0)
mask[5] = True
print("========================")
print(wave, flux)
print("========================")
print(wave, rebin(wave, flux, wave_new=wave_new))
print("========================")
print(wave_new, rebin(
wave, flux=flux, flux_err=flux_err, mask=mask, wave_new=wave_new))
print("========================")
# figure()
# plot(wave, flux, 'x-')
# plot(wave_new, rebin(wave, flux, wave_new), 's-')
return
if __name__ == "__main__":
_test()
|
[
"numpy.zeros_like",
"numpy.ones_like",
"numpy.sum",
"numpy.asarray",
"numpy.floor",
"numpy.ones",
"numpy.isnan",
"numpy.hstack",
"numpy.any",
"numpy.diff",
"numpy.array",
"numpy.arange",
"numpy.sqrt"
] |
[((85, 98), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (95, 98), True, 'import numpy as np\n'), ((108, 118), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (115, 118), True, 'import numpy as np\n'), ((130, 202), 'numpy.hstack', 'np.hstack', (['(x[0] - 0.5 * dx[0], x[:-1] + 0.5 * dx, x[-1] + 0.5 * dx[-1])'], {}), '((x[0] - 0.5 * dx[0], x[:-1] + 0.5 * dx, x[-1] + 0.5 * dx[-1]))\n', (139, 202), True, 'import numpy as np\n'), ((687, 703), 'numpy.asarray', 'np.asarray', (['wave'], {}), '(wave)\n', (697, 703), True, 'import numpy as np\n'), ((794, 814), 'numpy.asarray', 'np.asarray', (['wave_new'], {}), '(wave_new)\n', (804, 814), True, 'import numpy as np\n'), ((1292, 1349), 'numpy.array', 'np.array', (['[wave_new_edge_pos[:-1], wave_new_edge_pos[1:]]'], {}), '([wave_new_edge_pos[:-1], wave_new_edge_pos[1:]])\n', (1300, 1349), True, 'import numpy as np\n'), ((1550, 1578), 'numpy.isnan', 'np.isnan', (['wave_new_edge_pos2'], {}), '(wave_new_edge_pos2)\n', (1558, 1578), True, 'import numpy as np\n'), ((1663, 1679), 'numpy.asarray', 'np.asarray', (['flux'], {}), '(flux)\n', (1673, 1679), True, 'import numpy as np\n'), ((1739, 1775), 'numpy.zeros_like', 'np.zeros_like', (['wave_new'], {'dtype': 'float'}), '(wave_new, dtype=float)\n', (1752, 1775), True, 'import numpy as np\n'), ((2400, 2436), 'numpy.zeros_like', 'np.zeros_like', (['wave_new'], {'dtype': 'float'}), '(wave_new, dtype=float)\n', (2413, 2436), True, 'import numpy as np\n'), ((2973, 2989), 'numpy.asarray', 'np.asarray', (['mask'], {}), '(mask)\n', (2983, 2989), True, 'import numpy as np\n'), ((3049, 3083), 'numpy.ones_like', 'np.ones_like', (['wave_new'], {'dtype': 'bool'}), '(wave_new, dtype=bool)\n', (3061, 3083), True, 'import numpy as np\n'), ((3515, 3528), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3524, 3528), True, 'import numpy as np\n'), ((3530, 3541), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (3537, 3541), True, 'import numpy as np\n'), ((1402, 1430), 'numpy.floor', 'np.floor', (['wave_new_edge_pos2'], {}), '(wave_new_edge_pos2)\n', (1410, 1430), True, 'import numpy as np\n'), ((2296, 2329), 'numpy.asarray', 'np.asarray', (['flux_err'], {'dtype': 'float'}), '(flux_err, dtype=float)\n', (2306, 2329), True, 'import numpy as np\n'), ((2891, 2913), 'numpy.sqrt', 'np.sqrt', (['flux_err2_new'], {}), '(flux_err2_new)\n', (2898, 2913), True, 'import numpy as np\n'), ((3543, 3562), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(2)'], {}), '(0, 10, 2)\n', (3552, 3562), True, 'import numpy as np\n'), ((3196, 3259), 'numpy.any', 'np.any', (['mask[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1] + 1]'], {}), '(mask[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1] + 1])\n', (3202, 3259), True, 'import numpy as np\n'), ((1888, 1947), 'numpy.sum', 'np.sum', (['flux[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1]]'], {}), '(flux[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1]])\n', (1894, 1947), True, 'import numpy as np\n'), ((2554, 2618), 'numpy.sum', 'np.sum', (['flux_err2[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1]]'], {}), '(flux_err2[wave_new_ipix[ipix, 0]:wave_new_ipix[ipix, 1]])\n', (2560, 2618), True, 'import numpy as np\n')]
|
from typing import TYPE_CHECKING, List
if TYPE_CHECKING:
from Platforms.Discord.main_discord import PhaazebotDiscord
from Platforms.Web.main_web import PhaazebotWeb
import json
import discord
from aiohttp.web import Response
from Utils.Classes.discordregulardisabledchannel import DiscordRegularDisabledChannel
from Utils.Classes.authdiscordwebuser import AuthDiscordWebUser
from Utils.Classes.webrequestcontent import WebRequestContent
from Utils.Classes.extendedrequest import ExtendedRequest
from Utils.Classes.undefined import UNDEFINED
from Platforms.Discord.db import getDiscordServerRegularDisabledChannels
from Platforms.Web.utils import authDiscordWebUser
from Platforms.Web.Processing.Api.errors import apiMissingAuthorisation, apiMissingData
async def apiDiscordConfigsRegularDisabledChannelsDelete(cls:"PhaazebotWeb", WebRequest:ExtendedRequest) -> Response:
"""
Default url: /api/discord/configs/regulardisabledchannels/delete
"""
Data:WebRequestContent = WebRequestContent(WebRequest)
await Data.load()
# get required stuff
guild_id:str = Data.getStr("guild_id", UNDEFINED, must_be_digit=True)
entry_id:str = Data.getStr("entry_id", UNDEFINED, must_be_digit=True)
# checks
if not guild_id:
return await apiMissingData(cls, WebRequest, msg="missing or invalid 'guild_id'")
if not entry_id:
return await apiMissingData(cls, WebRequest, msg="missing or invalid 'entry_id'")
PhaazeDiscord:"PhaazebotDiscord" = cls.BASE.Discord
Guild:discord.Guild = discord.utils.get(PhaazeDiscord.guilds, id=int(guild_id))
if not Guild:
return await cls.Tree.Api.Discord.errors.apiDiscordGuildUnknown(cls, WebRequest)
# get user info
AuthDiscord:AuthDiscordWebUser = await authDiscordWebUser(cls, WebRequest)
if not AuthDiscord.found:
return await apiMissingAuthorisation(cls, WebRequest)
# get member
CheckMember:discord.Member = Guild.get_member(int(AuthDiscord.User.user_id))
if not CheckMember:
return await cls.Tree.Api.Discord.errors.apiDiscordMemberNotFound(cls, WebRequest, guild_id=guild_id, user_id=AuthDiscord.User.user_id)
# check permissions
if not (CheckMember.guild_permissions.administrator or CheckMember.guild_permissions.manage_guild):
return await cls.Tree.Api.Discord.errors.apiDiscordMissingPermission(cls, WebRequest, guild_id=guild_id, user_id=AuthDiscord.User.user_id)
# get channel entry
res_channel:List[DiscordRegularDisabledChannel] = await getDiscordServerRegularDisabledChannels(PhaazeDiscord, guild_id=guild_id, entry_id=entry_id)
if not res_channel:
return await cls.Tree.Api.Discord.Configs.Regulardisabledchannels.errors.apiDiscordConfigsRegularDisabledChannelNotExists(cls, WebRequest)
ChannelToDelete:DiscordRegularDisabledChannel = res_channel.pop(0)
cls.BASE.PhaazeDB.deleteQuery("""
DELETE FROM `discord_disabled_regularchannel` WHERE `guild_id` = %s AND `id` = %s""",
(ChannelToDelete.guild_id, ChannelToDelete.entry_id)
)
cls.BASE.Logger.debug(f"(API/Discord) Regular disabled channel: {guild_id=} deleted {entry_id=}", require="discord:configs")
return cls.response(
text=json.dumps(dict(msg=f"Regular disabled channel: Deleted entry", deleted=ChannelToDelete.channel_id, status=200)),
content_type="application/json",
status=200
)
|
[
"Utils.Classes.webrequestcontent.WebRequestContent",
"Platforms.Web.Processing.Api.errors.apiMissingData",
"Platforms.Web.Processing.Api.errors.apiMissingAuthorisation",
"Platforms.Web.utils.authDiscordWebUser",
"Platforms.Discord.db.getDiscordServerRegularDisabledChannels"
] |
[((977, 1006), 'Utils.Classes.webrequestcontent.WebRequestContent', 'WebRequestContent', (['WebRequest'], {}), '(WebRequest)\n', (994, 1006), False, 'from Utils.Classes.webrequestcontent import WebRequestContent\n'), ((1698, 1733), 'Platforms.Web.utils.authDiscordWebUser', 'authDiscordWebUser', (['cls', 'WebRequest'], {}), '(cls, WebRequest)\n', (1716, 1733), False, 'from Platforms.Web.utils import authDiscordWebUser\n'), ((2412, 2508), 'Platforms.Discord.db.getDiscordServerRegularDisabledChannels', 'getDiscordServerRegularDisabledChannels', (['PhaazeDiscord'], {'guild_id': 'guild_id', 'entry_id': 'entry_id'}), '(PhaazeDiscord, guild_id=guild_id,\n entry_id=entry_id)\n', (2451, 2508), False, 'from Platforms.Discord.db import getDiscordServerRegularDisabledChannels\n'), ((1235, 1303), 'Platforms.Web.Processing.Api.errors.apiMissingData', 'apiMissingData', (['cls', 'WebRequest'], {'msg': '"""missing or invalid \'guild_id\'"""'}), '(cls, WebRequest, msg="missing or invalid \'guild_id\'")\n', (1249, 1303), False, 'from Platforms.Web.Processing.Api.errors import apiMissingAuthorisation, apiMissingData\n'), ((1338, 1406), 'Platforms.Web.Processing.Api.errors.apiMissingData', 'apiMissingData', (['cls', 'WebRequest'], {'msg': '"""missing or invalid \'entry_id\'"""'}), '(cls, WebRequest, msg="missing or invalid \'entry_id\'")\n', (1352, 1406), False, 'from Platforms.Web.Processing.Api.errors import apiMissingAuthorisation, apiMissingData\n'), ((1776, 1816), 'Platforms.Web.Processing.Api.errors.apiMissingAuthorisation', 'apiMissingAuthorisation', (['cls', 'WebRequest'], {}), '(cls, WebRequest)\n', (1799, 1816), False, 'from Platforms.Web.Processing.Api.errors import apiMissingAuthorisation, apiMissingData\n')]
|
# Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import logging
from collections import defaultdict, namedtuple
import numpy as np
import torch
logger = logging.getLogger(__name__)
_SubClade = namedtuple("_SubClade", ("clade", "name"))
class Phylogeny:
"""
Tensor data structure to represent a (batched) phylogenetic tree.
The tree is timed and is assumed to have only binary nodes; polysemy is
represented as multiple binary nodes but with zero branch length.
:param Tensor times: float tensor of times of each node. Must be ordered.
:param Tensor parents: int tensor of parent id of each node. The root node
must be first and have null id ``-1``.
:param Tensor leaves: int tensor of ids of all leaf nodes.
"""
_fields = ("times", "parents", "leaves")
def __init__(self, times, parents, leaves):
num_nodes = times.size(-1)
assert num_nodes % 2 == 1, "expected odd number of nodes"
num_leaves = (num_nodes + 1) // 2
assert parents.shape == times.shape
assert leaves.shape == times.shape[:-1] + (num_leaves,)
assert (
times[..., :-1] <= times[..., 1:]
).all(), "expected nodes ordered by time"
assert (parents[..., 0] == -1).all(), "expected root node first"
assert (parents[..., 1:] >= 0).all(), "multiple root nodes"
if __debug__:
_parents = parents[..., 1:]
is_leaf_1 = torch.ones_like(parents, dtype=torch.bool)
is_leaf_1.scatter_(-1, _parents, False)
is_leaf_2 = torch.zeros_like(is_leaf_1)
is_leaf_2.scatter_(-1, leaves, True)
assert (is_leaf_1.sum(-1) == num_leaves).all()
assert (is_leaf_2.sum(-1) == num_leaves).all()
assert (is_leaf_2 == is_leaf_1).all()
super().__init__()
self.times = times
self.parents = parents
self.leaves = leaves
@property
def num_nodes(self):
return self.times.size(-1)
@property
def num_leaves(self):
return self.leaves.size(-1)
@property
def batch_shape(self):
return self.times.shape[:-1]
def __len__(self):
return self.batch_shape[0]
def __getitem__(self, index):
kwargs = {name: getattr(self, name)[index] for name in self._fields}
return Phylogeny(**kwargs)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def contiguous(self):
kwargs = {name: getattr(self, name).contiguous() for name in self._fields}
return Phylogeny(**kwargs)
def num_lineages(self):
_parents = self.parents[..., 1:]
sign = torch.ones_like(self.parents).scatter_(-1, _parents, -1.0)
num_lineages = sign.flip(-1).cumsum(-1).flip(-1)
return num_lineages
def hash_topology(self):
"""
Returns a hashable binary tree represented as nested frozensets.
"""
if self.batch_shape:
return tuple(p.hash_topology() for p in self)
trees = defaultdict(list)
for leaf, v in enumerate(self.leaves.tolist()):
trees[v] = leaf
for v, parent in enumerate(self.parents[1:].tolist()):
trees[parent].append(trees[v + 1])
def freeze(x):
if isinstance(x, int):
return x
assert len(x) == 2
return frozenset(map(freeze, x))
return freeze(trees[0])
def time_mrca(self):
"""
Computes all-pairs times to most recent common ancestor.
"""
if self.batch_shape:
return torch.stack([p.time_mrca() for p in self])
descendents = torch.eye(self.num_nodes, dtype=torch.bool)
children = defaultdict(list)
result = self.times.new_zeros(descendents.shape)
result.diagonal()[:] = self.times
for c in range(self.num_nodes - 1, 0, -1):
p = self.parents[c].item()
descendents[p] |= descendents[c]
children[p].append(c)
if len(children[p]) == 2:
c1, c2 = children[p]
d1 = descendents[c1]
d2 = descendents[c2]
mask = d1 & d2[:, None]
mask[p] |= d1 | d2
mask |= mask.T
result[mask] = self.times[p]
return result
def leaf_time_mrca(self):
"""
Computes times to most recent common ancestors for all leaves.
For phylogenies whose leaves are all at time 0, the result is an
ultrametric.
"""
if self.batch_shape:
return torch.stack([p.leaf_time_mrca() for p in self])
result = self.time_mrca()
return result[self.leaves[..., None], self.leaves]
@staticmethod
def stack(phylogenies):
"""
:param iterable phylogenies: An iterable of :class:`Phylogeny` objects
of identical shape.
:returns: A batched phylogeny.
:rtype: Phylogeny
"""
phylogenies = list(phylogenies)
kwargs = {
name: torch.stack([getattr(x, name) for x in phylogenies])
for name in Phylogeny._fields
}
return Phylogeny(**kwargs)
@staticmethod
def from_bio_phylo(tree):
"""
Builds a :class:`Phylogeny` object from a biopython tree structure.
:param Bio.Phylo.BaseTree.Clade tree: A phylogenetic tree.
:returns: A single phylogeny.
:rtype: Phylogeny
"""
# Compute time as cumulative branch length.
def get_branch_length(clade):
branch_length = clade.branch_length
return 1.0 if branch_length is None else branch_length
# Collect times and parents.
clades = list(tree.find_clades())
clade_to_time = {tree.root: get_branch_length(tree.root)}
clade_to_parent = {}
clade_to_children = defaultdict(list)
for clade in clades:
time = clade_to_time[clade]
for child in clade:
clade_to_time[child] = time + get_branch_length(child)
clade_to_parent[child] = clade
clade_to_children[clade].append(child)
# Binarize the tree.
for parent, children in clade_to_children.items():
while len(children) > 2:
c1 = children.pop()
c2 = children.pop()
c12 = _SubClade(parent, f"{parent.name}.{len(children):0>4d}")
clades.append(c12)
children.append(c12)
clade_to_time[c12] = clade_to_time[parent]
clade_to_parent[c1] = c12
clade_to_parent[c2] = c12
clade_to_parent[c12] = parent
del clade_to_children
# Serialize clades.
clades.sort(key=lambda c: (clade_to_time[c], str(c.name)))
assert clades[0] not in clade_to_parent, "invalid root"
clade_to_id = {clade: i for i, clade in enumerate(clades)}
times = torch.tensor([float(clade_to_time[clade]) for clade in clades])
parents = torch.tensor(
[-1] + [clade_to_id[clade_to_parent[clade]] for clade in clades[1:]]
)
# Construct leaf index ordered by clade.name.
leaves = [clade for clade in clades if len(clade) == 0]
leaves.sort(key=lambda clade: clade.name)
leaves = torch.tensor([clade_to_id[clade] for clade in leaves])
return Phylogeny(times, parents, leaves)
@staticmethod
def generate(num_leaves, *, num_samples=None):
"""
Generate a random (arbitrarily distributed) phylogeny for testing.
"""
if num_samples is not None:
return Phylogeny.stack(
Phylogeny.generate(num_leaves) for _ in range(num_samples)
)
num_nodes = 2 * num_leaves - 1
times = torch.randn(num_nodes)
nodes = list(range(num_leaves))
parents = torch.zeros(num_nodes, dtype=torch.long)
for w in range(num_leaves, num_nodes):
i, j = np.random.choice(len(nodes), 2, replace=False)
u = nodes[i]
v = nodes[j]
nodes[i] = w
del nodes[j]
parents[u] = w
parents[v] = w
times[w] = torch.min(times[u], times[v]) - torch.rand(()) / num_leaves
assert len(nodes) == 1
leaves = torch.arange(num_leaves)
return Phylogeny.from_unsorted(times, parents, leaves)
@staticmethod
def sort(times, parents, leaves):
if times.dim() > 1:
raise NotImplementedError("Phylogeny.sort() does not support batching")
num_nodes = times.size(-1)
times, new2old = times.sort(-1)
old2new = torch.empty(num_nodes, dtype=torch.long)
old2new[new2old] = torch.arange(num_nodes)
leaves = old2new[leaves]
parents = old2new[parents[new2old]]
parents[0] = -1
return Phylogeny(times, parents, leaves), old2new, new2old
@staticmethod
def from_unsorted(times, parents, leaves):
return Phylogeny.sort(times, parents, leaves)[0]
|
[
"torch.ones_like",
"torch.eye",
"torch.zeros_like",
"torch.empty",
"torch.randn",
"logging.getLogger",
"collections.defaultdict",
"collections.namedtuple",
"torch.arange",
"torch.rand",
"torch.zeros",
"torch.min",
"torch.tensor"
] |
[((195, 222), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (212, 222), False, 'import logging\n'), ((236, 278), 'collections.namedtuple', 'namedtuple', (['"""_SubClade"""', "('clade', 'name')"], {}), "('_SubClade', ('clade', 'name'))\n", (246, 278), False, 'from collections import defaultdict, namedtuple\n'), ((3090, 3107), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3101, 3107), False, 'from collections import defaultdict, namedtuple\n'), ((3723, 3766), 'torch.eye', 'torch.eye', (['self.num_nodes'], {'dtype': 'torch.bool'}), '(self.num_nodes, dtype=torch.bool)\n', (3732, 3766), False, 'import torch\n'), ((3786, 3803), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3797, 3803), False, 'from collections import defaultdict, namedtuple\n'), ((5956, 5973), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5967, 5973), False, 'from collections import defaultdict, namedtuple\n'), ((7141, 7227), 'torch.tensor', 'torch.tensor', (['([-1] + [clade_to_id[clade_to_parent[clade]] for clade in clades[1:]])'], {}), '([-1] + [clade_to_id[clade_to_parent[clade]] for clade in\n clades[1:]])\n', (7153, 7227), False, 'import torch\n'), ((7432, 7486), 'torch.tensor', 'torch.tensor', (['[clade_to_id[clade] for clade in leaves]'], {}), '([clade_to_id[clade] for clade in leaves])\n', (7444, 7486), False, 'import torch\n'), ((7922, 7944), 'torch.randn', 'torch.randn', (['num_nodes'], {}), '(num_nodes)\n', (7933, 7944), False, 'import torch\n'), ((8003, 8043), 'torch.zeros', 'torch.zeros', (['num_nodes'], {'dtype': 'torch.long'}), '(num_nodes, dtype=torch.long)\n', (8014, 8043), False, 'import torch\n'), ((8442, 8466), 'torch.arange', 'torch.arange', (['num_leaves'], {}), '(num_leaves)\n', (8454, 8466), False, 'import torch\n'), ((8792, 8832), 'torch.empty', 'torch.empty', (['num_nodes'], {'dtype': 'torch.long'}), '(num_nodes, dtype=torch.long)\n', (8803, 8832), False, 'import torch\n'), ((8860, 8883), 'torch.arange', 'torch.arange', (['num_nodes'], {}), '(num_nodes)\n', (8872, 8883), False, 'import torch\n'), ((1485, 1527), 'torch.ones_like', 'torch.ones_like', (['parents'], {'dtype': 'torch.bool'}), '(parents, dtype=torch.bool)\n', (1500, 1527), False, 'import torch\n'), ((1604, 1631), 'torch.zeros_like', 'torch.zeros_like', (['is_leaf_1'], {}), '(is_leaf_1)\n', (1620, 1631), False, 'import torch\n'), ((2716, 2745), 'torch.ones_like', 'torch.ones_like', (['self.parents'], {}), '(self.parents)\n', (2731, 2745), False, 'import torch\n'), ((8334, 8363), 'torch.min', 'torch.min', (['times[u]', 'times[v]'], {}), '(times[u], times[v])\n', (8343, 8363), False, 'import torch\n'), ((8366, 8380), 'torch.rand', 'torch.rand', (['()'], {}), '(())\n', (8376, 8380), False, 'import torch\n')]
|
#!/usr/bin/env python
#
# Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
skslc = sys.argv[1]
lang = sys.argv[2]
settings = sys.argv[3]
inputs = sys.argv[4:]
def makeEmptyFile(path):
try:
open(path, 'wb').close()
except OSError:
pass
def compile(skslc, input, target, extension):
target += extension
try:
subprocess.check_output([skslc, input, target, settings], stderr=subprocess.STDOUT)
return True
except subprocess.CalledProcessError as err:
with open(target, 'wb') as dst:
dst.write("### Compilation failed:\n\n")
dst.write("\n".join(err.output.splitlines()))
dst.write("\n")
return False
if settings != "--settings" and settings != "--nosettings":
sys.exit("### Expected --settings or --nosettings, got " + settings)
for input in inputs:
noExt, ext = os.path.splitext(input)
head, tail = os.path.split(noExt)
targetDir = os.path.join(head, "golden")
if not os.path.isdir(targetDir):
os.mkdir(targetDir)
target = os.path.join(targetDir, tail)
if settings == "--nosettings":
target += "StandaloneSettings"
if lang == "--fp":
# First, compile the CPP. If we get an error, stop here.
if compile(skslc, input, target, ".cpp"):
# Next, compile the header.
if compile(skslc, input, target, ".h"):
# Both files built successfully.
continue
else:
# The header generated an error; this counts as an overall failure for this test.
# Blank out the passing CPP output since it's not relevant in a failure case.
makeEmptyFile(target + ".cpp")
else:
# The CPP generated an error. We didn't actually generate a header at all, but Ninja
# expects an output file to exist or it won't reach steady-state.
makeEmptyFile(target + ".h")
elif lang == "--glsl":
compile(skslc, input, target, ".glsl")
elif lang == "--metal":
compile(skslc, input, target, ".metal")
else:
sys.exit("### Expected one of: --fp --glsl --metal, got " + lang)
|
[
"os.mkdir",
"os.path.isdir",
"subprocess.check_output",
"os.path.splitext",
"os.path.split",
"os.path.join",
"sys.exit"
] |
[((889, 957), 'sys.exit', 'sys.exit', (["('### Expected --settings or --nosettings, got ' + settings)"], {}), "('### Expected --settings or --nosettings, got ' + settings)\n", (897, 957), False, 'import sys\n'), ((997, 1020), 'os.path.splitext', 'os.path.splitext', (['input'], {}), '(input)\n', (1013, 1020), False, 'import os\n'), ((1038, 1058), 'os.path.split', 'os.path.split', (['noExt'], {}), '(noExt)\n', (1051, 1058), False, 'import os\n'), ((1075, 1103), 'os.path.join', 'os.path.join', (['head', '"""golden"""'], {}), "(head, 'golden')\n", (1087, 1103), False, 'import os\n'), ((1183, 1212), 'os.path.join', 'os.path.join', (['targetDir', 'tail'], {}), '(targetDir, tail)\n', (1195, 1212), False, 'import os\n'), ((470, 558), 'subprocess.check_output', 'subprocess.check_output', (['[skslc, input, target, settings]'], {'stderr': 'subprocess.STDOUT'}), '([skslc, input, target, settings], stderr=subprocess\n .STDOUT)\n', (493, 558), False, 'import subprocess\n'), ((1115, 1139), 'os.path.isdir', 'os.path.isdir', (['targetDir'], {}), '(targetDir)\n', (1128, 1139), False, 'import os\n'), ((1149, 1168), 'os.mkdir', 'os.mkdir', (['targetDir'], {}), '(targetDir)\n', (1157, 1168), False, 'import os\n'), ((2247, 2312), 'sys.exit', 'sys.exit', (["('### Expected one of: --fp --glsl --metal, got ' + lang)"], {}), "('### Expected one of: --fp --glsl --metal, got ' + lang)\n", (2255, 2312), False, 'import sys\n')]
|
import os
import pyautogui as him2
import pyttsx3
import pywhatkit as him
import speech_recognition as him1
import wikipedia
import webbrowser
import smtplib
import datetime
import folium
import cv2
import pytube
from pytube import YouTube
import numpy as np
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def greet():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning Himanshu Sir, Have Your Tea Or Coffee!")
elif hour>=12 and hour<18:
speak("Good Afternoon Himanshu Sir, Have Your Lunch or not!")
else:
speak("Good Evening Himanshu Sir ,Have Your Snacks or not!")
speak("I am MALGO Your Personal Assistant Sir. How may I help you")
def Command():
r = him1.Recognizer()
with him1.Microphone() as source:
r.adjust_for_ambient_noise(source,duration=4)
print("Listening You Sir......")
speak("Listening You Sir......")
r.pause_threshold = 0.6
audio = r.listen(source)
try:
print("Trying to understand...")
speak("Trying to understand...")
query = r.recognize_google(audio, language='en-in').lower()
print("Ok Sir :-)\n")
speak("Ok Sir \n")
except Exception as e:
print(e)
print("Please Say That Again Sir...")
speak("Please Say That Again Sir...")
return "None"
return query
def Email(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('<EMAIL>', 'password')
server.sendmail('<EMAIL>', to, content)
server.close()
if __name__ == "__main__":
greet()
while (True):
query = Command().lower()
if 'Tell Me About ' in query:
speak('Searching From Internet...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("As Internet Says...")
print(results)
speak(results)
elif 'open my google' in query:
webbrowser.open("www.google.com")
print("Opening Google")
speak("Opening Google")
elif 'send whatsapp to me' in query:
him.sendwhatmsg("+919634470602","Hi sir how are you?",17,40)
print("Sending Whatsapp Message")
speak("Sending Whatsapp Message")
elif 'Map current location' in query:
curr_map=folium.Map(location=[27.1843328,77.98784]).save("E:\\TEST\\him.html")
webbrowser.open("E:\\TEST\\him.html")
print("Done Sir")
speak("Done Sir")
elif 'open my youtube' in query:
webbrowser.open("www.youtube.com")
print("Done Sir")
speak("Done Sir")
elif 'open my github ' in query:
webbrowser.open("www.github.com")
print("Done Sir")
speak("Done Sir")
elif 'open image MALGO' in query:
image_dir = "C:\\Users\\GOLASHBOY\\Pictures\\Camera Roll"
image = os.listdir(image_dir)
print(image)
os.startfile(os.path.join(image_dir, image[0]))
print("Done Sir")
speak("Done Sir")
elif 'Tell me the current time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak("Sir, the time is {strTime}")
elif 'email to Sir' in query:
try:
speak("What should be in email Sir ?")
content = Command()
to = "<EMAIL>"
Email(to, content)
speak("Email has been sent Successfully to You Sir!")
except Exception as e:
print(e)
speak("Sorry Sir. I am not able to send this email to anyone.")
|
[
"webbrowser.open",
"os.path.join",
"smtplib.SMTP",
"pyttsx3.init",
"speech_recognition.Microphone",
"pywhatkit.sendwhatmsg",
"wikipedia.summary",
"folium.Map",
"datetime.datetime.now",
"os.listdir",
"speech_recognition.Recognizer"
] |
[((294, 315), 'pyttsx3.init', 'pyttsx3.init', (['"""sapi5"""'], {}), "('sapi5')\n", (306, 315), False, 'import pyttsx3\n'), ((942, 959), 'speech_recognition.Recognizer', 'him1.Recognizer', ([], {}), '()\n', (957, 959), True, 'import speech_recognition as him1\n'), ((1664, 1699), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.gmail.com"""', '(587)'], {}), "('smtp.gmail.com', 587)\n", (1676, 1699), False, 'import smtplib\n'), ((970, 987), 'speech_recognition.Microphone', 'him1.Microphone', ([], {}), '()\n', (985, 987), True, 'import speech_recognition as him1\n'), ((503, 526), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (524, 526), False, 'import datetime\n'), ((2119, 2156), 'wikipedia.summary', 'wikipedia.summary', (['query'], {'sentences': '(2)'}), '(query, sentences=2)\n', (2136, 2156), False, 'import wikipedia\n'), ((2311, 2344), 'webbrowser.open', 'webbrowser.open', (['"""www.google.com"""'], {}), "('www.google.com')\n", (2326, 2344), False, 'import webbrowser\n'), ((2519, 2582), 'pywhatkit.sendwhatmsg', 'him.sendwhatmsg', (['"""+919634470602"""', '"""Hi sir how are you?"""', '(17)', '(40)'], {}), "('+919634470602', 'Hi sir how are you?', 17, 40)\n", (2534, 2582), True, 'import pywhatkit as him\n'), ((2851, 2888), 'webbrowser.open', 'webbrowser.open', (['"""E:\\\\TEST\\\\him.html"""'], {}), "('E:\\\\TEST\\\\him.html')\n", (2866, 2888), False, 'import webbrowser\n'), ((3034, 3068), 'webbrowser.open', 'webbrowser.open', (['"""www.youtube.com"""'], {}), "('www.youtube.com')\n", (3049, 3068), False, 'import webbrowser\n'), ((2768, 2811), 'folium.Map', 'folium.Map', ([], {'location': '[27.1843328, 77.98784]'}), '(location=[27.1843328, 77.98784])\n', (2778, 2811), False, 'import folium\n'), ((3202, 3235), 'webbrowser.open', 'webbrowser.open', (['"""www.github.com"""'], {}), "('www.github.com')\n", (3217, 3235), False, 'import webbrowser\n'), ((3438, 3459), 'os.listdir', 'os.listdir', (['image_dir'], {}), '(image_dir)\n', (3448, 3459), False, 'import os\n'), ((3516, 3549), 'os.path.join', 'os.path.join', (['image_dir', 'image[0]'], {}), '(image_dir, image[0])\n', (3528, 3549), False, 'import os\n'), ((3689, 3712), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3710, 3712), False, 'import datetime\n')]
|
from datetime import datetime
from decimal import Decimal
from dateutil import tz
from yarl import URL
from gateway.schema.customfeedinfo import CustomFeedInfo
from gateway.schema.dynamodb_feedinfo_schema import DynamoDbFeedInfoSchema
from gateway.schema.dynamodb_site_schema import DynamoDbSiteSchema
from gateway.schema.dynamodb_sitepath_schema import DynamoDbSitePathSchema
from gateway.schema.sitehost import SiteHost
from gateway.schema.sitepath import SitePath
def test_sitefeed_schema_loads(sitefeed_schema, sitefeed_json):
sitefeed = sitefeed_schema.loads(sitefeed_json)
assert sitefeed
assert isinstance(sitefeed, dict)
assert sitefeed["host"] == "xkcd.com"
assert sitefeed["last_seen"]
assert len(sitefeed["feeds"]) == 2
feed1 = sitefeed["feeds"]["https://xkcd.com/rss.xml"]
assert feed1
assert isinstance(feed1, CustomFeedInfo)
assert feed1.title == "xkcd.com"
assert feed1.version == "rss20"
feed2 = sitefeed["feeds"]["https://xkcd.com/atom.xml"]
assert isinstance(feed2, CustomFeedInfo)
assert feed2.title == "xkcd.com"
assert feed2.version == "atom10"
def test_feedinfo_schema_loads():
pass
feedinfo_schema_dict = {
"PK": "SITE#en.wikipedia.org",
"SK": "FEED#https://en.wikipedia.org/?feed=potd&format=atom",
"bozo": 0,
"content_length": 1024,
"host": "en.wikipedia.org",
"hubs": ["https://pubsubhubbub.com", "https://test.com/hub"],
"is_podcast": False,
"is_push": True,
"item_count": 10,
"last_seen": "2019-11-03T08:50:43+00:00",
"score": 0,
"url": "https://en.wikipedia.org/?feed=potd&format=atom",
"velocity": 1,
}
def test_dynamodb_feedinfo_schema_load():
schema = DynamoDbFeedInfoSchema()
feed = schema.load(feedinfo_schema_dict)
assert isinstance(feed, CustomFeedInfo)
assert feed.host == "en.wikipedia.org"
assert feed.url == URL("https://en.wikipedia.org?feed=potd&format=atom")
assert feed.last_seen == datetime(2019, 11, 3, 8, 50, 43, tzinfo=tz.tzutc())
assert feed.hubs == ["https://pubsubhubbub.com", "https://test.com/hub"]
assert feed.velocity == 1
assert feed.is_push is True
assert feed.item_count == 10
assert feed.content_length == 1024
def test_dynamodb_feedinfo_schema_dump():
schema = DynamoDbFeedInfoSchema()
feed = CustomFeedInfo(
host="en.wikipedia.org",
url=URL("https://en.wikipedia.org?feed=potd&format=atom"),
last_seen=datetime(2019, 11, 3, 8, 50, 43, tzinfo=tz.tzutc()),
hubs=["https://pubsubhubbub.com", "https://test.com/hub"],
velocity=1,
is_push=True,
item_count=10,
content_length=1024,
)
dump = schema.dump(feed)
assert dump == feedinfo_schema_dict
site_schema_dict = {
"PK": "SITE#en.wikipedia.org",
"SK": "#METADATA#",
"host": "en.wikipedia.org",
"last_seen": "2019-11-03T08:50:43+00:00",
}
def test_dynamodb_site_schema_load():
schema = DynamoDbSiteSchema()
site = schema.load(site_schema_dict)
assert isinstance(site, SiteHost)
assert site.host == "en.wikipedia.org"
assert site.last_seen == datetime(2019, 11, 3, 8, 50, 43, tzinfo=tz.tzutc())
def test_dynamodb_site_schema_dump():
schema = DynamoDbSiteSchema()
site = SiteHost(
host="en.wikipedia.org",
last_seen=datetime(2019, 11, 3, 8, 50, 43, tzinfo=tz.tzutc()),
)
dump = schema.dump(site)
assert dump == site_schema_dict
def test_sitepath_schema():
schema = DynamoDbSitePathSchema()
feeds = ["test.com/testing/rss.xml", "test.com/testing/atom.xml"]
sitepath = SitePath(
host="test.com", path="/testing", last_seen=datetime(2019, 1, 1), feeds=feeds
)
serialized = schema.dump(sitepath)
assert serialized.get("PK") == "SITE#test.com"
assert serialized.get("SK") == "PATH#/testing"
assert serialized.get("feeds") == feeds
deserialized = schema.load(serialized)
assert isinstance(deserialized, SitePath)
assert deserialized.host == "test.com"
assert deserialized.path == "/testing"
assert deserialized.feeds == feeds
|
[
"datetime.datetime",
"gateway.schema.dynamodb_feedinfo_schema.DynamoDbFeedInfoSchema",
"gateway.schema.dynamodb_site_schema.DynamoDbSiteSchema",
"dateutil.tz.tzutc",
"gateway.schema.dynamodb_sitepath_schema.DynamoDbSitePathSchema",
"yarl.URL"
] |
[((1714, 1738), 'gateway.schema.dynamodb_feedinfo_schema.DynamoDbFeedInfoSchema', 'DynamoDbFeedInfoSchema', ([], {}), '()\n', (1736, 1738), False, 'from gateway.schema.dynamodb_feedinfo_schema import DynamoDbFeedInfoSchema\n'), ((2297, 2321), 'gateway.schema.dynamodb_feedinfo_schema.DynamoDbFeedInfoSchema', 'DynamoDbFeedInfoSchema', ([], {}), '()\n', (2319, 2321), False, 'from gateway.schema.dynamodb_feedinfo_schema import DynamoDbFeedInfoSchema\n'), ((2971, 2991), 'gateway.schema.dynamodb_site_schema.DynamoDbSiteSchema', 'DynamoDbSiteSchema', ([], {}), '()\n', (2989, 2991), False, 'from gateway.schema.dynamodb_site_schema import DynamoDbSiteSchema\n'), ((3248, 3268), 'gateway.schema.dynamodb_site_schema.DynamoDbSiteSchema', 'DynamoDbSiteSchema', ([], {}), '()\n', (3266, 3268), False, 'from gateway.schema.dynamodb_site_schema import DynamoDbSiteSchema\n'), ((3508, 3532), 'gateway.schema.dynamodb_sitepath_schema.DynamoDbSitePathSchema', 'DynamoDbSitePathSchema', ([], {}), '()\n', (3530, 3532), False, 'from gateway.schema.dynamodb_sitepath_schema import DynamoDbSitePathSchema\n'), ((1894, 1947), 'yarl.URL', 'URL', (['"""https://en.wikipedia.org?feed=potd&format=atom"""'], {}), "('https://en.wikipedia.org?feed=potd&format=atom')\n", (1897, 1947), False, 'from yarl import URL\n'), ((2394, 2447), 'yarl.URL', 'URL', (['"""https://en.wikipedia.org?feed=potd&format=atom"""'], {}), "('https://en.wikipedia.org?feed=potd&format=atom')\n", (2397, 2447), False, 'from yarl import URL\n'), ((3682, 3702), 'datetime.datetime', 'datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (3690, 3702), False, 'from datetime import datetime\n'), ((2017, 2027), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (2025, 2027), False, 'from dateutil import tz\n'), ((3183, 3193), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (3191, 3193), False, 'from dateutil import tz\n'), ((2507, 2517), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (2515, 2517), False, 'from dateutil import tz\n'), ((3381, 3391), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (3389, 3391), False, 'from dateutil import tz\n')]
|
import re
import typing as T
from collections import Counter
from contextlib import contextmanager
from itertools import cycle
from pathlib import Path
from tempfile import TemporaryDirectory
from threading import Lock, Thread
from unittest import mock
import pytest
import yaml
from sqlalchemy import MetaData, create_engine
from cumulusci.core import exceptions as exc
from cumulusci.core.config import OrgConfig
from cumulusci.tasks.bulkdata.snowfakery import (
RunningTotals,
Snowfakery,
SnowfakeryWorkingDirectory,
)
from cumulusci.tasks.bulkdata.tests.integration_test_utils import ensure_accounts
from cumulusci.tasks.bulkdata.tests.utils import _make_task
from cumulusci.tasks.salesforce.BaseSalesforceApiTask import BaseSalesforceApiTask
from cumulusci.tests.util import DummyKeychain, DummyOrgConfig
from cumulusci.utils.parallel.task_worker_queues.tests.test_parallel_worker import (
DelaySpawner,
)
ensure_accounts = ensure_accounts # fixes 4 lint errors at once. Don't hate the player, hate the game.
simple_salesforce_yaml = (
Path(__file__).parent / "snowfakery/simple_snowfakery.recipe.yml"
)
sample_yaml = Path(__file__).parent / "snowfakery/gen_npsp_standard_objects.recipe.yml"
query_yaml = Path(__file__).parent / "snowfakery/query_snowfakery.recipe.yml"
original_refresh_token = OrgConfig.refresh_oauth_token
FAKE_LOAD_RESULTS = (
{
"Insert Account": {
"sobject": "Account",
"record_type": None,
"status": "Success",
"records_processed": 2,
"total_row_errors": 0,
},
"Insert Contact": {
"sobject": "Contact",
"record_type": None,
"status": "Success",
"records_processed": 2,
"total_row_errors": 0,
},
},
{
"Insert Account": {
"sobject": "Account",
"record_type": None,
"status": "Success",
"records_processed": 3,
"total_row_errors": 0,
},
"Insert Contact": {
"sobject": "Contact",
"record_type": None,
"status": "Success",
"records_processed": 3,
"total_row_errors": 0,
},
},
)
def table_values(connection, table):
query = f"select * from {table.name}"
values = [val for val in connection.execute(query)]
return values
class FakeLoadData(BaseSalesforceApiTask):
"""Simulates load results without doing a real load."""
# these are all used as mutable class variables
mock_calls: list # similar to how a mock.Mock() object works
fake_return_values: T.Iterator
fake_exception_on_request = -1
lock = Lock()
# Manipulating "self" from a mock side-effect is a challenge.
# So we need a "real function"
def __call__(self, *args, **kwargs):
"""Like the __call__ of _run_task, but also capture calls
in a normal mock_values structure."""
with self.lock: # the code below looks thread-safe but better safe than sorry
# tasks usually aren't called twice after being instantiated
# that would usually be a bug.
assert self not in self.mock_calls
self.__class__.mock_calls.append(self)
if (
len(self.__class__.mock_calls)
== self.__class__.fake_exception_on_request
):
raise AssertionError("You asked me to raise an exception")
# get the values that the Snowfakery task asked us to load and
# remember them for later inspection.
self.values_loaded = db_values_from_db_url(self.options["database_url"])
# return a fake return value so Snowfakery loader doesn't get confused
self.return_values = {"step_results": next(self.fake_return_values)}
# using mutable class variables is not something I would usually do
# because it is not thread safe, but the test intrinsically uses
# threads and therefore is not thread safe in general.
#
# Furthermore, attempts to use a closure instead of mutable class
# variables just doesn't work because of how Snowfakery instantiates
# tasks in sub-threads.
@classmethod
def reset(cls, fake_exception_on_request=-1):
cls.mock_calls = []
cls.fake_return_values = cycle(iter(FAKE_LOAD_RESULTS))
cls.fake_exception_on_request = fake_exception_on_request
def db_values_from_db_url(database_url):
engine = create_engine(database_url)
metadata = MetaData(engine)
metadata.reflect()
with engine.connect() as connection:
values = {
table_name: table_values(connection, table)
for table_name, table in metadata.tables.items()
if table_name[-6:] != "sf_ids"
}
return values
@pytest.fixture
def mock_load_data(
request,
threads_instead_of_processes, # mock patches wouldn't be inherited by child processs
):
fake_load_data = FakeLoadData
with mock.patch(
"cumulusci.tasks.bulkdata.generate_and_load_data.LoadData", fake_load_data
), mock.patch(
"cumulusci.tasks.bulkdata.snowfakery_utils.queue_manager.LoadData",
fake_load_data,
):
fake_load_data.reset()
yield fake_load_data
fake_load_data.reset()
@pytest.fixture
def threads_instead_of_processes(request):
with mock.patch(
"cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process",
wraps=Thread,
) as t:
yield t
@pytest.fixture
def fake_processes_and_threads(request):
class FakeProcessManager:
def __init__(self):
self.processes = []
def __call__(self, target, args, daemon):
res = self.process_handler(target, args, daemon, index=len(self.processes))
self.processes.append(res)
return res
process_manager = FakeProcessManager()
with mock.patch(
"cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Thread",
process_manager,
), mock.patch(
"cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process",
process_manager,
):
yield process_manager
@pytest.fixture
def snowfakery(request, create_task):
def snowfakery(**kwargs):
return create_task(Snowfakery, kwargs)
return snowfakery
@contextmanager
def temporary_file_path(filename):
with TemporaryDirectory() as tmpdirname:
path = Path(tmpdirname) / filename
yield path
class SnowfakeryTaskResults(T.NamedTuple):
"""Results from a Snowfakery data generation process"""
task: Snowfakery # The task, so we can inspect its return_values
working_dir: Path # The working directory, to look at mapping files, DB files, etc.
@pytest.fixture()
def run_snowfakery_and_inspect_mapping(
run_snowfakery_and_yield_results,
):
"""Run Snowfakery with some defaulted or overriden options.
Yield a mapping file for inspection that it was the right file.
Defaults are same as run_snowfakery_and_yield_results.
"""
def _run_snowfakery_and_inspect_mapping(**options):
with run_snowfakery_and_yield_results(**options) as results:
return get_mapping_from_snowfakery_task_results(results)
return _run_snowfakery_and_inspect_mapping
def get_mapping_from_snowfakery_task_results(results: SnowfakeryTaskResults):
"""Find the shared mapping file and return it."""
template_dir = SnowfakeryWorkingDirectory(results.working_dir / "template_1/")
temp_mapping = template_dir.mapping_file
with open(temp_mapping) as f:
mapping = yaml.safe_load(f)
other_mapping = Path(
str(temp_mapping).replace("template_1", "data_load_outbox/1_1")
)
# check that it's truly shared
assert temp_mapping.read_text() == other_mapping.read_text()
return mapping
def get_record_counts_from_snowfakery_results(
results: SnowfakeryTaskResults,
) -> Counter:
"""Collate the record counts from Snowfakery outbox directories.
Note that records created by the initial, just_once seeding flow are not
counted because they are deleted. If you need every single result, you s
hould probably use return_values instead. (but you may need to implement it)"""
rollups = Counter()
# when there is more than one channel, the directory structure is deeper
channeled_outboxes = tuple(results.working_dir.glob("*/data_load_outbox/*"))
regular_outboxes = tuple(results.working_dir.glob("data_load_outbox/*"))
assert bool(regular_outboxes) ^ bool(
channeled_outboxes
), f"One of regular_outboxes or channeled_outboxes should be available: {channeled_outboxes}, {regular_outboxes}"
outboxes = tuple(channeled_outboxes) + tuple(regular_outboxes)
for subdir in outboxes:
record_counts = SnowfakeryWorkingDirectory(subdir).get_record_counts()
rollups.update(record_counts)
return rollups
@pytest.fixture()
def run_snowfakery_and_yield_results(snowfakery, mock_load_data):
@contextmanager
def _run_snowfakery_and_inspect_mapping_and_example_records(**options):
with TemporaryDirectory() as workingdir:
workingdir = Path(workingdir) / "tempdir"
task = snowfakery(
run_until_recipe_repeated=2,
working_directory=workingdir,
**options,
)
task()
yield SnowfakeryTaskResults(task, workingdir)
return _run_snowfakery_and_inspect_mapping_and_example_records
class TestSnowfakery:
def test_no_options(self):
with pytest.raises(exc.TaskOptionsError, match="recipe"):
_make_task(Snowfakery, {})
@mock.patch(
"cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process",
)
def test_simple_snowfakery(self, Process, mock_load_data, create_task):
task = create_task(
Snowfakery,
{
"recipe": sample_yaml,
},
)
task()
assert mock_load_data.mock_calls
# should not be called for a simple one-rep load
assert not Process.mock_calls
@mock.patch(
"cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process",
)
@pytest.mark.vcr()
def test_snowfakery_query_salesforce(self, Process, mock_load_data, create_task):
task = create_task(
Snowfakery,
{
"recipe": query_yaml,
},
)
task()
assert mock_load_data.mock_calls
# should not be called for a simple one-rep load
assert not Process.mock_calls
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE", 3)
def test_small(
self, mock_load_data, threads_instead_of_processes, create_task_fixture
):
task = create_task_fixture(
Snowfakery,
{"recipe": sample_yaml, "run_until_recipe_repeated": "7"},
)
task()
# Batch size was 3, so 7 records takes
# one initial batch plus two parallel batches
assert len(mock_load_data.mock_calls) == 3, mock_load_data.mock_calls
# One should be in a sub-process/thread
assert len(threads_instead_of_processes.mock_calls) == 2
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE", 3)
def test_multi_part(
self, threads_instead_of_processes, mock_load_data, create_task_fixture
):
task = create_task_fixture(
Snowfakery,
{"recipe": sample_yaml, "run_until_recipe_repeated": 15},
)
task()
assert (
len(mock_load_data.mock_calls) > 3
) # depends on the details of the tuning
assert (
len(threads_instead_of_processes.mock_calls)
== len(mock_load_data.mock_calls) - 1
)
@mock.patch(
"cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process",
)
def test_run_until_loaded(
self, create_subprocess, mock_load_data, create_task_fixture
):
task = create_task_fixture(
Snowfakery,
{"recipe": sample_yaml, "run_until_records_loaded": "Account:1"},
)
task()
assert mock_load_data.mock_calls
# should not be called for a simple one-rep load
assert not create_subprocess.mock_calls
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE", 3)
def test_run_until_loaded_2_parts(
self, threads_instead_of_processes, mock_load_data, create_task_fixture
):
task = create_task_fixture(
Snowfakery,
{"recipe": sample_yaml, "run_until_records_loaded": "Account:6"},
)
task()
assert len(mock_load_data.mock_calls) == 2
assert len(threads_instead_of_processes.mock_calls) == 1
# There was previously a failed attempt at testing the connected app here.
# Could try again after Snowfakery 2.0 launch.
# https://github.com/SFDO-Tooling/CumulusCI/blob/c7e0d7552394b3ac268cb373ffb24b72b5c059f3/cumulusci/tasks/bulkdata/tests/test_snowfakery.py#L165-L197https://github.com/SFDO-Tooling/CumulusCI/blob/c7e0d7552394b3ac268cb373ffb24b72b5c059f3/cumulusci/tasks/bulkdata/tests/test_snowfakery.py#L165-L197
@pytest.mark.vcr()
def test_run_until_records_in_org__none_needed(
self, threads_instead_of_processes, mock_load_data, create_task, ensure_accounts
):
with ensure_accounts(6):
task = create_task(
Snowfakery,
{"recipe": sample_yaml, "run_until_records_in_org": "Account:6"},
)
task()
assert len(mock_load_data.mock_calls) == 0, mock_load_data.mock_calls
assert (
len(threads_instead_of_processes.mock_calls) == 0
), threads_instead_of_processes.mock_calls
@pytest.mark.vcr()
def test_run_until_records_in_org__one_needed(
self,
sf,
threads_instead_of_processes,
mock_load_data,
create_task,
ensure_accounts,
):
with ensure_accounts(10):
# org reports 10 records in org
# so we only need 6 more.
# That will be one "initial" batch plus one "parallel" batch
task = create_task(
Snowfakery,
{"recipe": sample_yaml, "run_until_records_in_org": "Account:16"},
)
task.logger = mock.Mock()
task()
assert len(mock_load_data.mock_calls) == 2, mock_load_data.mock_calls
assert len(threads_instead_of_processes.mock_calls) == 1
@pytest.mark.vcr()
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE", 3)
def test_run_until_records_in_org__multiple_needed(
self,
threads_instead_of_processes,
mock_load_data,
snowfakery,
ensure_accounts,
):
with ensure_accounts(10):
task = snowfakery(recipe=sample_yaml, run_until_records_in_org="Account:16")
task()
assert len(mock_load_data.mock_calls) == 2, mock_load_data.mock_calls
assert (
len(threads_instead_of_processes.mock_calls) == 1
), threads_instead_of_processes.mock_calls
def test_inaccessible_generator_yaml(self, snowfakery):
with pytest.raises(exc.TaskOptionsError, match="recipe"):
task = snowfakery(
recipe=sample_yaml / "junk",
)
task()
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.get_debug_mode", lambda: True)
@mock.patch("psutil.cpu_count", lambda logical: 11)
def test_snowfakery_debug_mode_and_cpu_count(self, snowfakery, mock_load_data):
task = snowfakery(recipe=sample_yaml, run_until_recipe_repeated="5")
with mock.patch.object(task, "logger") as logger:
task()
assert "Using 11 workers" in str(logger.mock_calls)
def test_record_count(self, snowfakery, mock_load_data):
task = snowfakery(recipe="datasets/recipe.yml", run_until_recipe_repeated="4")
with mock.patch.object(task, "logger") as logger, mock.patch.object(
task.project_config, "keychain", DummyKeychain()
) as keychain:
def get_org(username):
return DummyOrgConfig(
config={"keychain": keychain, "username": username}
)
keychain.get_org = mock.Mock(wraps=get_org)
task()
mock_calls_as_string = str(logger.mock_calls)
assert "Account: 5 successes" in mock_calls_as_string, mock_calls_as_string[
-500:
]
assert "Contact: 5 successes" in mock_calls_as_string, mock_calls_as_string[
-500:
]
def test_run_until_wrong_format(self, snowfakery):
with pytest.raises(exc.TaskOptionsError, match="Ten"):
task = snowfakery(
recipe=sample_yaml, run_until_records_loaded="Account:Ten"
)
task()
def test_run_until_wrong_format__2(self, snowfakery):
with pytest.raises(exc.TaskOptionsError, match="Ten"):
task = snowfakery(
recipe=sample_yaml, run_until_records_loaded="Account_Ten"
)
task()
def test_run_reps_wrong_format(self, snowfakery):
with pytest.raises(exc.TaskOptionsError, match="Ten"):
task = snowfakery(recipe=sample_yaml, run_until_recipe_repeated="Ten")
task()
def test_run_until_conflicting_params(self, snowfakery):
with pytest.raises(exc.TaskOptionsError, match="only one of"):
task = snowfakery(
recipe=sample_yaml,
run_until_records_loaded="Account_Ten",
run_until_recipe_repeated="1",
)
task()
def test_working_directory(self, snowfakery, mock_load_data):
with TemporaryDirectory() as t:
working_directory = Path(t) / "junkdir"
task = snowfakery(
recipe=sample_yaml,
run_until_recipe_repeated="1",
working_directory=str(working_directory),
)
task()
assert (working_directory / "data_load_outbox").exists()
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE", 1)
def xxx__test_failures_in_subprocesses__last_batch(
self, snowfakery, mock_load_data, fake_processes_and_threads
):
class FakeProcess(DelaySpawner):
def __init__(self, target, args, daemon, index):
super().__init__(target, args, daemon)
self.counter = 0
self.task_class = args[0]["task_class"]
self.index = index
try:
self._finish()
except AssertionError:
pass
def is_alive(self):
print("Alive?", self.task_class, self.index, self.counter, self)
self.counter += 1
if self.counter > 3:
return False
return True
fake_processes_and_threads.process_handler = FakeProcess
class LoadDataSucceedsOnceThenFails:
count = 0
def __call__(self, *args, **kwargs):
self.count += 1
if self.count > 1:
raise AssertionError("XYZZY")
mock_load_data.side_effect = LoadDataSucceedsOnceThenFails()
task = snowfakery(
recipe=sample_yaml,
run_until_records_loaded="Account:10",
num_processes=3, # todo: test this is enforced
)
with mock.patch.object(task, "logger") as logger:
with pytest.raises(exc.BulkDataException):
task()
assert "XYZZY" in str(logger.mock_calls)
def test_running_totals_repr(self):
r = RunningTotals()
r.errors = 12
r.successes = 11
assert "11" in repr(r)
def test_generate_mapping_file__loadfile__inferred(
self, run_snowfakery_and_inspect_mapping
):
mapping = run_snowfakery_and_inspect_mapping(recipe=simple_salesforce_yaml)
assert mapping["Insert Account"]["api"] == "bulk"
assert mapping["Insert Contact"].get("bulk_mode") is None
assert list(mapping.keys()) == ["Insert Account", "Insert Contact"]
def test_generate_mapping_file__loadfile__overridden(
self, run_snowfakery_and_inspect_mapping
):
loading_rules = str(simple_salesforce_yaml).replace(
".recipe.yml", "_2.load.yml"
)
mapping = run_snowfakery_and_inspect_mapping(
recipe=simple_salesforce_yaml, loading_rules=str(loading_rules)
)
assert mapping["Insert Account"].get("api") is None
assert mapping["Insert Contact"]["bulk_mode"].lower() == "parallel"
assert list(mapping.keys()) == ["Insert Contact", "Insert Account"]
def test_generate_mapping_file__loadfile_multiple_files(
self, run_snowfakery_and_inspect_mapping
):
loading_rules = (
str(simple_salesforce_yaml).replace(".recipe.yml", "_2.load.yml")
+ ","
+ str(simple_salesforce_yaml).replace(".recipe.yml", ".load.yml")
)
mapping = run_snowfakery_and_inspect_mapping(
recipe=simple_salesforce_yaml, loading_rules=str(loading_rules)
)
assert mapping["Insert Account"]["api"] == "bulk"
assert mapping["Insert Contact"]["bulk_mode"].lower() == "parallel"
assert list(mapping.keys()) == ["Insert Contact", "Insert Account"]
def test_options(
self,
mock_load_data,
run_snowfakery_and_yield_results,
):
options_yaml = str(sample_yaml).replace(
"gen_npsp_standard_objects.recipe.yml", "options.recipe.yml"
)
with run_snowfakery_and_yield_results(
recipe=options_yaml, recipe_options="row_count:7,account_name:aaaaa"
) as results:
record_counts = get_record_counts_from_snowfakery_results(results)
assert record_counts["Account"] == 7, record_counts["Account"]
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE", 3)
def test_multi_part_uniqueness(self, mock_load_data, create_task_fixture):
task = create_task_fixture(
Snowfakery,
{
"recipe": Path(__file__).parent / "snowfakery/unique_values.recipe.yml",
"run_until_recipe_repeated": 15,
},
)
task()
all_data_load_inputs = mock_load_data.mock_calls
all_rows = [
task_instance.values_loaded["blah"]
for task_instance in all_data_load_inputs
]
unique_values = [row.value for batchrows in all_rows for row in batchrows]
assert len(mock_load_data.mock_calls) == 6, len(mock_load_data.mock_calls)
assert len(unique_values) == 30, len(unique_values)
assert len(set(unique_values)) == 30, unique_values
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE", 2)
def test_two_channels(self, mock_load_data, create_task):
task = create_task(
Snowfakery,
{
"recipe": Path(__file__).parent
/ "snowfakery/simple_snowfakery_channels.recipe.yml",
"run_until_recipe_repeated": 15,
"recipe_options": {"xyzzy": "Nothing happens", "some_number": 42},
},
)
with mock.patch.object(
task.project_config, "keychain", DummyKeychain()
) as keychain:
def get_org(username):
return DummyOrgConfig(
config={"keychain": keychain, "username": username}
)
keychain.get_org = mock.Mock(wraps=get_org)
task()
assert keychain.get_org.mock_calls
assert keychain.get_org.call_args_list
assert keychain.get_org.call_args_list == [
(("channeltest",),),
(("channeltest-b",),),
(("channeltest-c",),),
(("Account",),),
], keychain.get_org.call_args_list
all_data_load_inputs = mock_load_data.mock_calls
all_data_load_inputs = sorted(
all_data_load_inputs,
key=lambda task_instance: task_instance.org_config.username,
)
usernames_values = [
(task_instance.org_config.username, task_instance.values_loaded)
for task_instance in all_data_load_inputs
]
count_loads = Counter(username for username, _ in usernames_values)
assert count_loads.keys() == {
"channeltest",
"channeltest-b",
"channeltest-c",
"Account",
}
# depends on threading. :(
for value in count_loads.values():
assert 1 <= value <= 4, value
assert sum(count_loads.values()) == 8
first_row_values = next(
value["Account"]
for username, value in usernames_values
if username == "channeltest"
)
assert len(first_row_values) == 1, len(first_row_values)
for username, values in usernames_values:
accounts = values["Account"]
if values["Account"] != first_row_values:
assert len(accounts) == 2, (values, first_row_values)
for account in accounts:
assert int(account.some_number) == 42
assert username in account.name, (username, account.name)
assert sum(len(v["Account"]) for _, v in usernames_values) == 15, sum(
len(v) for _, v in usernames_values
)
def test_channels_cli_options_conflict(self, create_task):
task = create_task(
Snowfakery,
{
"recipe": Path(__file__).parent
/ "snowfakery/simple_snowfakery_channels.recipe.yml",
"run_until_recipe_repeated": 15,
"recipe_options": {"xyzzy": "Nothing happens", "some_number": 37},
},
)
with pytest.raises(exc.TaskOptionsError) as e, mock.patch.object(
task.project_config, "keychain", DummyKeychain()
) as keychain:
def get_org(username):
return DummyOrgConfig(
config={"keychain": keychain, "username": username}
)
keychain.get_org = mock.Mock(wraps=get_org)
task()
assert "conflict" in str(e.value)
assert "some_number" in str(e.value)
@mock.patch(
"cumulusci.tasks.bulkdata.snowfakery.get_debug_mode", lambda: True
) # for coverage
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE", 2)
def test_explicit_channel_declarations(self, mock_load_data, create_task):
task = create_task(
Snowfakery,
{
"recipe": Path(__file__).parent
/ "snowfakery/simple_snowfakery.recipe.yml",
"run_until_recipe_repeated": 15,
"recipe_options": {"xyzzy": "Nothing happens", "some_number": 42},
"loading_rules": Path(__file__).parent
/ "snowfakery/simple_snowfakery_channels.load.yml",
},
)
with mock.patch.object(
task.project_config, "keychain", DummyKeychain()
) as keychain:
def get_org(username):
return DummyOrgConfig(
config={"keychain": keychain, "username": username}
)
keychain.get_org = mock.Mock(wraps=get_org)
task()
assert keychain.get_org.mock_calls
assert keychain.get_org.call_args_list
assert keychain.get_org.call_args_list == [
(("channeltest",),),
(("channeltest-b",),),
(("channeltest-c",),),
(("Account",),),
], keychain.get_org.call_args_list
all_data_load_inputs = mock_load_data.mock_calls
all_data_load_inputs = sorted(
all_data_load_inputs,
key=lambda task_instance: task_instance.org_config.username,
)
usernames_values = [
(task_instance.org_config.username, task_instance.values_loaded)
for task_instance in all_data_load_inputs
]
count_loads = Counter(username for username, _ in usernames_values)
assert count_loads.keys() == {
"channeltest",
"channeltest-b",
"channeltest-c",
"Account",
}
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE", 2)
def test_serial_mode(self, mock_load_data, create_task):
task = create_task(
Snowfakery,
{
"recipe": Path(__file__).parent
/ "snowfakery/simple_snowfakery.recipe.yml",
"run_until_recipe_repeated": 15,
"recipe_options": {"xyzzy": "Nothing happens", "some_number": 42},
"bulk_mode": "Serial",
},
)
with mock.patch.object(
task.project_config, "keychain", DummyKeychain()
) as keychain:
def get_org(username):
return DummyOrgConfig(
config={"keychain": keychain, "username": username}
)
keychain.get_org = mock.Mock(wraps=get_org)
task.logger = mock.Mock()
task()
for data_load_fake in mock_load_data.mock_calls:
assert data_load_fake.options["bulk_mode"] == "Serial"
pattern = r"Inprogress Loader Jobs: (\d+)"
loader_counts = re.findall(pattern, str(task.logger.mock_calls))
assert loader_counts, loader_counts
assert 0 <= all(int(count) <= 1 for count in loader_counts), loader_counts
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE", 2)
def test_bulk_mode_error(self, create_task, mock_load_data):
with pytest.raises(exc.TaskOptionsError):
task = create_task(
Snowfakery,
{
"recipe": Path(__file__).parent
/ "snowfakery/simple_snowfakery.recipe.yml",
"bulk_mode": "XYZZY",
},
)
task()
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE", 2)
def test_too_many_channel_declarations(self, mock_load_data, create_task):
task = create_task(
Snowfakery,
{
"recipe": Path(__file__).parent
/ "snowfakery/simple_snowfakery_channels.recipe.yml",
"run_until_recipe_repeated": 15,
"recipe_options": {"xyzzy": "Nothing happens", "some_number": 42},
"loading_rules": Path(__file__).parent
/ "snowfakery/simple_snowfakery_channels_2.load.yml",
},
)
with pytest.raises(exc.TaskOptionsError), mock.patch.object(
task.project_config, "keychain", DummyKeychain()
) as keychain:
def get_org(username):
return DummyOrgConfig(
config={"keychain": keychain, "username": username}
)
keychain.get_org = mock.Mock(wraps=get_org)
task()
@pytest.mark.skip() # TODO: make handling of errors more predictable and re-enable
@mock.patch("cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE", 2)
def test_error_handling_in_channels(self, mock_load_data, create_task):
task = create_task(
Snowfakery,
{
"recipe": Path(__file__).parent
/ "snowfakery/simple_snowfakery.recipe.yml",
"run_until_recipe_repeated": 15,
"loading_rules": Path(__file__).parent
/ "snowfakery/simple_snowfakery_channels.load.yml",
},
)
with mock.patch.object(
task.project_config, "keychain", DummyKeychain()
) as keychain:
def get_org(username):
return DummyOrgConfig(
config={"keychain": keychain, "username": username}
)
keychain.get_org = mock.Mock(wraps=get_org)
with pytest.raises(exc.BulkDataException):
mock_load_data.reset(fake_exception_on_request=3)
task()
# def test_generate_mapping_file(self):
# with temporary_file_path("mapping.yml") as temp_mapping:
# with temp_sqlite_database_url() as database_url:
# task = _make_task(
# GenerateDataFromYaml,
# {
# "options": {
# "generator_yaml": sample_yaml,
# "database_url": database_url,
# "generate_mapping_file": temp_mapping,
# }
# },
# )
# task()
# mapping = yaml.safe_load(open(temp_mapping))
# assert mapping["Insert Account"]["fields"]
# def test_use_mapping_file(self):
# assert vanilla_mapping_file.exists()
# with temp_sqlite_database_url() as database_url:
# task = _make_task(
# GenerateDataFromYaml,
# {
# "options": {
# "generator_yaml": sample_yaml,
# "database_url": database_url,
# "mapping": vanilla_mapping_file,
# }
# },
# )
# task()
# self.assertRowsCreated(database_url)
# def test_num_records(self):
# with temp_sqlite_database_url() as database_url:
# task = _make_task(
# GenerateDataFromYaml,
# {
# "options": {
# "generator_yaml": simple_yaml,
# "database_url": database_url,
# }
# },
# )
# task()
# assert len(self.assertRowsCreated(database_url)) == 1, len(
# self.assertRowsCreated(database_url)
# )
# @mock.patch(
# "cumulusci.tasks.bulkdata.generate_and_load_data_from_yaml.GenerateAndLoadDataFromYaml._dataload"
# )
# def test_simple_generate_and_load_with_numrecords(self, _dataload):
# task = _make_task(s
# GenerateAndLoadDataFromYaml,
# {
# "options": {
# "generator_yaml": simple_yaml,
# "num_records": 11,
# "num_records_tablename": "Account",
# }
# },
# )
# task()
# assert len(_dataload.mock_calls) == 1
# @mock.patch(
# "cumulusci.tasks.bulkdata.generate_and_load_data_from_yaml.GenerateAndLoadDataFromYaml._dataload"
# )
# def test_simple_generate_and_load(self, _dataload):
# task = _make_task(
# GenerateAndLoadDataFromYaml,
# {
# "options": {
# "generator_yaml": simple_yaml,
# "num_records": 11,
# "num_records_tablename": "Account",
# }
# },
# )
# task()
# assert len(_dataload.mock_calls) == 1
# @mock.patch("cumulusci.tasks.bulkdata.generate_from_yaml.generate_data")
# def test_exception_handled_cleanly(self, generate_data):
# generate_data.side_effect = AssertionError("Foo")
# with pytest.raises(AssertionError) as e:
# task = _make_task(
# GenerateAndLoadDataFromYaml,
# {
# "options": {
# "generator_yaml": simple_yaml,
# "num_records": 11,
# "num_records_tablename": "Account",
# }
# },
# )
# task()
# assert "Foo" in str(e.value)
# assert len(generate_data.mock_calls) == 1
# @mock.patch(
# "cumulusci.tasks.bulkdata.generate_and_load_data_from_yaml.GenerateAndLoadDataFromYaml._dataload"
# )
# def test_batching(self, _dataload):
# with temp_sqlite_database_url() as database_url:
# task = _make_task(
# GenerateAndLoadDataFromYaml,
# {
# "options": {
# "generator_yaml": simple_yaml,
# "num_records": 14,
# "batch_size": 6,
# "database_url": database_url,
# "num_records_tablename": "Account",
# "data_generation_task": "cumulusci.tasks.bulkdata.generate_from_yaml.GenerateDataFromYaml",
# "reset_oids": False,
# }
# },
# )
# task()
# assert len(_dataload.mock_calls) == 3
# task = None # clean up db?
# engine = create_engine(database_url)
# connection = engine.connect()
# records = list(connection.execute("select * from Account"))
# connection.close()
# assert len(records) == 14 % 6 # leftovers
# def test_mismatched_options(self):
# with pytest.raises(exc.exc.TaskOptionsError) as e:
# task = _make_task(
# GenerateDataFromYaml,
# {"options": {"generator_yaml": sample_yaml, "num_records": 10}},
# )
# task()
# assert "without num_records_tablename" in str(e.value)
# def generate_continuation_data(self, fileobj):
# g = data_generator_runtime.Globals()
# o = data_generator_runtime.ObjectRow(
# "Account", {"Name": "<NAME>", "id": 5}
# )
# g.register_object(o, "The Company", False)
# for i in range(0, 5):
# # burn through 5 imaginary accounts
# g.id_manager.generate_id("Account")
# data_generator.save_continuation_yaml(g, fileobj)
# def test_with_continuation_file(self):
# with temp_sqlite_database_url() as database_url:
# with temporary_file_path("cont.yml") as continuation_file_path:
# with open(continuation_file_path, "w") as continuation_file:
# self.generate_continuation_data(continuation_file)
# task = _make_task(
# GenerateDataFromYaml,
# {
# "options": {
# "generator_yaml": sample_yaml,
# "database_url": database_url,
# "mapping": vanilla_mapping_file,
# "continuation_file": continuation_file_path,
# }
# },
# )
# task()
# rows = self.assertRowsCreated(database_url)
# assert dict(rows[0])["id"] == 6
# def test_with_nonexistent_continuation_file(self):
# with pytest.raises(exc.TaskOptionsError) as e:
# with temp_sqlite_database_url() as database_url:
# task = _make_task(
# GenerateDataFromYaml,
# {
# "options": {
# "generator_yaml": sample_yaml,
# "database_url": database_url,
# "mapping": vanilla_mapping_file,
# "continuation_file": "/tmp/foobar/baz/jazz/continuation.yml",
# }
# },
# )
# task()
# rows = self.assertRowsCreated(database_url)
# assert dict(rows[0])["id"] == 6
# assert "jazz" in str(e.value)
# assert "does not exist" in str(e.value)
# def test_generate_continuation_file(self):
# with temporary_file_path("cont.yml") as temp_continuation_file:
# with temp_sqlite_database_url() as database_url:
# task = _make_task(
# GenerateDataFromYaml,
# {
# "options": {
# "generator_yaml": sample_yaml,
# "database_url": database_url,
# "generate_continuation_file": temp_continuation_file,
# }
# },
# )
# task()
# continuation_file = yaml.safe_load(open(temp_continuation_file))
# assert continuation_file # internals of this file are not important to CumulusCI
# def _run_snowfakery_and_inspect_mapping(self, **options):
# with temporary_file_path("mapping.yml") as temp_mapping:
# with temp_sqlite_database_url() as database_url:
# task = _make_task(
# GenerateDataFromYaml,
# {
# "options": {
# "database_url": database_url,
# "generate_mapping_file": temp_mapping,
# **options,
# }
# },
# )
# task()
# with open(temp_mapping) as f:
# mapping = yaml.safe_load(f)
# return mapping
# def test_generate_mapping_file__loadfile_missing(self):
# loading_rules = str(simple_snowfakery_yaml).replace(
# ".recipe.yml", "_3.load.yml"
# )
# with pytest.raises(FileNotFoundError):
# self._run_snowfakery_and_inspect_mapping(
# generator_yaml=simple_snowfakery_yaml, loading_rules=str(loading_rules)
# )
|
[
"cumulusci.tasks.bulkdata.snowfakery.SnowfakeryWorkingDirectory",
"pathlib.Path",
"yaml.safe_load",
"pytest.mark.skip",
"unittest.mock.patch.object",
"tempfile.TemporaryDirectory",
"cumulusci.tasks.bulkdata.snowfakery.RunningTotals",
"threading.Lock",
"pytest.raises",
"collections.Counter",
"pytest.fixture",
"unittest.mock.patch",
"cumulusci.tasks.bulkdata.tests.utils._make_task",
"sqlalchemy.MetaData",
"cumulusci.tests.util.DummyKeychain",
"pytest.mark.vcr",
"unittest.mock.Mock",
"cumulusci.tasks.bulkdata.tests.integration_test_utils.ensure_accounts",
"sqlalchemy.create_engine",
"cumulusci.tests.util.DummyOrgConfig"
] |
[((6883, 6899), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (6897, 6899), False, 'import pytest\n'), ((9069, 9085), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (9083, 9085), False, 'import pytest\n'), ((2707, 2713), 'threading.Lock', 'Lock', ([], {}), '()\n', (2711, 2713), False, 'from threading import Lock, Thread\n'), ((4522, 4549), 'sqlalchemy.create_engine', 'create_engine', (['database_url'], {}), '(database_url)\n', (4535, 4549), False, 'from sqlalchemy import MetaData, create_engine\n'), ((4565, 4581), 'sqlalchemy.MetaData', 'MetaData', (['engine'], {}), '(engine)\n', (4573, 4581), False, 'from sqlalchemy import MetaData, create_engine\n'), ((7577, 7640), 'cumulusci.tasks.bulkdata.snowfakery.SnowfakeryWorkingDirectory', 'SnowfakeryWorkingDirectory', (["(results.working_dir / 'template_1/')"], {}), "(results.working_dir / 'template_1/')\n", (7603, 7640), False, 'from cumulusci.tasks.bulkdata.snowfakery import RunningTotals, Snowfakery, SnowfakeryWorkingDirectory\n'), ((8401, 8410), 'collections.Counter', 'Counter', ([], {}), '()\n', (8408, 8410), False, 'from collections import Counter\n'), ((9825, 9934), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process"""'], {}), "(\n 'cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process'\n )\n", (9835, 9934), False, 'from unittest import mock\n'), ((10303, 10412), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process"""'], {}), "(\n 'cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process'\n )\n", (10313, 10412), False, 'from unittest import mock\n'), ((10423, 10440), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (10438, 10440), False, 'import pytest\n'), ((10813, 10882), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE"""', '(3)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE', 3)\n", (10823, 10882), False, 'from unittest import mock\n'), ((11444, 11513), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE"""', '(3)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE', 3)\n", (11454, 11513), False, 'from unittest import mock\n'), ((12035, 12144), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process"""'], {}), "(\n 'cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process'\n )\n", (12045, 12144), False, 'from unittest import mock\n'), ((12572, 12641), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE"""', '(3)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE', 3)\n", (12582, 12641), False, 'from unittest import mock\n'), ((13485, 13502), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (13500, 13502), False, 'import pytest\n'), ((14073, 14090), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (14088, 14090), False, 'import pytest\n'), ((14835, 14852), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (14850, 14852), False, 'import pytest\n'), ((14858, 14927), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE"""', '(3)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE', 3)\n", (14868, 14927), False, 'from unittest import mock\n'), ((15705, 15784), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.get_debug_mode"""', '(lambda : True)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.get_debug_mode', lambda : True)\n", (15715, 15784), False, 'from unittest import mock\n'), ((15789, 15839), 'unittest.mock.patch', 'mock.patch', (['"""psutil.cpu_count"""', '(lambda logical: 11)'], {}), "('psutil.cpu_count', lambda logical: 11)\n", (15799, 15839), False, 'from unittest import mock\n'), ((18483, 18552), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE"""', '(1)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE', 1)\n", (18493, 18552), False, 'from unittest import mock\n'), ((22422, 22491), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE"""', '(3)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE', 3)\n", (22432, 22491), False, 'from unittest import mock\n'), ((23306, 23375), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE"""', '(2)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE', 2)\n", (23316, 23375), False, 'from unittest import mock\n'), ((26915, 26994), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.get_debug_mode"""', '(lambda : True)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.get_debug_mode', lambda : True)\n", (26925, 26994), False, 'from unittest import mock\n'), ((27029, 27098), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE"""', '(2)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE', 2)\n", (27039, 27098), False, 'from unittest import mock\n'), ((29026, 29095), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE"""', '(2)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE', 2)\n", (29036, 29095), False, 'from unittest import mock\n'), ((30328, 30397), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE"""', '(2)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE', 2)\n", (30338, 30397), False, 'from unittest import mock\n'), ((30808, 30877), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE"""', '(2)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE', 2)\n", (30818, 30877), False, 'from unittest import mock\n'), ((31823, 31841), 'pytest.mark.skip', 'pytest.mark.skip', ([], {}), '()\n', (31839, 31841), False, 'import pytest\n'), ((31911, 31980), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE"""', '(2)'], {}), "('cumulusci.tasks.bulkdata.snowfakery.MIN_PORTION_SIZE', 2)\n", (31921, 31980), False, 'from unittest import mock\n'), ((1066, 1080), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1070, 1080), False, 'from pathlib import Path\n'), ((1148, 1162), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1152, 1162), False, 'from pathlib import Path\n'), ((1235, 1249), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1239, 1249), False, 'from pathlib import Path\n'), ((5042, 5132), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.generate_and_load_data.LoadData"""', 'fake_load_data'], {}), "('cumulusci.tasks.bulkdata.generate_and_load_data.LoadData',\n fake_load_data)\n", (5052, 5132), False, 'from unittest import mock\n'), ((5144, 5242), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.tasks.bulkdata.snowfakery_utils.queue_manager.LoadData"""', 'fake_load_data'], {}), "('cumulusci.tasks.bulkdata.snowfakery_utils.queue_manager.LoadData',\n fake_load_data)\n", (5154, 5242), False, 'from unittest import mock\n'), ((5425, 5548), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process"""'], {'wraps': 'Thread'}), "(\n 'cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process'\n , wraps=Thread)\n", (5435, 5548), False, 'from unittest import mock\n'), ((5988, 6113), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Thread"""', 'process_manager'], {}), "(\n 'cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Thread'\n , process_manager)\n", (5998, 6113), False, 'from unittest import mock\n'), ((6128, 6254), 'unittest.mock.patch', 'mock.patch', (['"""cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process"""', 'process_manager'], {}), "(\n 'cumulusci.utils.parallel.task_worker_queues.parallel_worker_queue.WorkerQueue.Process'\n , process_manager)\n", (6138, 6254), False, 'from unittest import mock\n'), ((6517, 6537), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (6535, 6537), False, 'from tempfile import TemporaryDirectory\n'), ((7738, 7755), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (7752, 7755), False, 'import yaml\n'), ((20122, 20137), 'cumulusci.tasks.bulkdata.snowfakery.RunningTotals', 'RunningTotals', ([], {}), '()\n', (20135, 20137), False, 'from cumulusci.tasks.bulkdata.snowfakery import RunningTotals, Snowfakery, SnowfakeryWorkingDirectory\n'), ((24891, 24944), 'collections.Counter', 'Counter', (['(username for username, _ in usernames_values)'], {}), '(username for username, _ in usernames_values)\n', (24898, 24944), False, 'from collections import Counter\n'), ((6568, 6584), 'pathlib.Path', 'Path', (['tmpdirname'], {}), '(tmpdirname)\n', (6572, 6584), False, 'from pathlib import Path\n'), ((9261, 9281), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (9279, 9281), False, 'from tempfile import TemporaryDirectory\n'), ((9727, 9778), 'pytest.raises', 'pytest.raises', (['exc.TaskOptionsError'], {'match': '"""recipe"""'}), "(exc.TaskOptionsError, match='recipe')\n", (9740, 9778), False, 'import pytest\n'), ((9792, 9818), 'cumulusci.tasks.bulkdata.tests.utils._make_task', '_make_task', (['Snowfakery', '{}'], {}), '(Snowfakery, {})\n', (9802, 9818), False, 'from cumulusci.tasks.bulkdata.tests.utils import _make_task\n'), ((13664, 13682), 'cumulusci.tasks.bulkdata.tests.integration_test_utils.ensure_accounts', 'ensure_accounts', (['(6)'], {}), '(6)\n', (13679, 13682), False, 'from cumulusci.tasks.bulkdata.tests.integration_test_utils import ensure_accounts\n'), ((14296, 14315), 'cumulusci.tasks.bulkdata.tests.integration_test_utils.ensure_accounts', 'ensure_accounts', (['(10)'], {}), '(10)\n', (14311, 14315), False, 'from cumulusci.tasks.bulkdata.tests.integration_test_utils import ensure_accounts\n'), ((14655, 14666), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (14664, 14666), False, 'from unittest import mock\n'), ((15125, 15144), 'cumulusci.tasks.bulkdata.tests.integration_test_utils.ensure_accounts', 'ensure_accounts', (['(10)'], {}), '(10)\n', (15140, 15144), False, 'from cumulusci.tasks.bulkdata.tests.integration_test_utils import ensure_accounts\n'), ((15537, 15588), 'pytest.raises', 'pytest.raises', (['exc.TaskOptionsError'], {'match': '"""recipe"""'}), "(exc.TaskOptionsError, match='recipe')\n", (15550, 15588), False, 'import pytest\n'), ((16014, 16047), 'unittest.mock.patch.object', 'mock.patch.object', (['task', '"""logger"""'], {}), "(task, 'logger')\n", (16031, 16047), False, 'from unittest import mock\n'), ((16300, 16333), 'unittest.mock.patch.object', 'mock.patch.object', (['task', '"""logger"""'], {}), "(task, 'logger')\n", (16317, 16333), False, 'from unittest import mock\n'), ((16645, 16669), 'unittest.mock.Mock', 'mock.Mock', ([], {'wraps': 'get_org'}), '(wraps=get_org)\n', (16654, 16669), False, 'from unittest import mock\n'), ((17038, 17086), 'pytest.raises', 'pytest.raises', (['exc.TaskOptionsError'], {'match': '"""Ten"""'}), "(exc.TaskOptionsError, match='Ten')\n", (17051, 17086), False, 'import pytest\n'), ((17299, 17347), 'pytest.raises', 'pytest.raises', (['exc.TaskOptionsError'], {'match': '"""Ten"""'}), "(exc.TaskOptionsError, match='Ten')\n", (17312, 17347), False, 'import pytest\n'), ((17556, 17604), 'pytest.raises', 'pytest.raises', (['exc.TaskOptionsError'], {'match': '"""Ten"""'}), "(exc.TaskOptionsError, match='Ten')\n", (17569, 17604), False, 'import pytest\n'), ((17783, 17839), 'pytest.raises', 'pytest.raises', (['exc.TaskOptionsError'], {'match': '"""only one of"""'}), "(exc.TaskOptionsError, match='only one of')\n", (17796, 17839), False, 'import pytest\n'), ((18124, 18144), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (18142, 18144), False, 'from tempfile import TemporaryDirectory\n'), ((19897, 19930), 'unittest.mock.patch.object', 'mock.patch.object', (['task', '"""logger"""'], {}), "(task, 'logger')\n", (19914, 19930), False, 'from unittest import mock\n'), ((24092, 24116), 'unittest.mock.Mock', 'mock.Mock', ([], {'wraps': 'get_org'}), '(wraps=get_org)\n', (24101, 24116), False, 'from unittest import mock\n'), ((26436, 26471), 'pytest.raises', 'pytest.raises', (['exc.TaskOptionsError'], {}), '(exc.TaskOptionsError)\n', (26449, 26471), False, 'import pytest\n'), ((26778, 26802), 'unittest.mock.Mock', 'mock.Mock', ([], {'wraps': 'get_org'}), '(wraps=get_org)\n', (26787, 26802), False, 'from unittest import mock\n'), ((27946, 27970), 'unittest.mock.Mock', 'mock.Mock', ([], {'wraps': 'get_org'}), '(wraps=get_org)\n', (27955, 27970), False, 'from unittest import mock\n'), ((28785, 28838), 'collections.Counter', 'Counter', (['(username for username, _ in usernames_values)'], {}), '(username for username, _ in usernames_values)\n', (28792, 28838), False, 'from collections import Counter\n'), ((29841, 29865), 'unittest.mock.Mock', 'mock.Mock', ([], {'wraps': 'get_org'}), '(wraps=get_org)\n', (29850, 29865), False, 'from unittest import mock\n'), ((29892, 29903), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (29901, 29903), False, 'from unittest import mock\n'), ((30476, 30511), 'pytest.raises', 'pytest.raises', (['exc.TaskOptionsError'], {}), '(exc.TaskOptionsError)\n', (30489, 30511), False, 'import pytest\n'), ((31436, 31471), 'pytest.raises', 'pytest.raises', (['exc.TaskOptionsError'], {}), '(exc.TaskOptionsError)\n', (31449, 31471), False, 'import pytest\n'), ((31773, 31797), 'unittest.mock.Mock', 'mock.Mock', ([], {'wraps': 'get_org'}), '(wraps=get_org)\n', (31782, 31797), False, 'from unittest import mock\n'), ((32742, 32766), 'unittest.mock.Mock', 'mock.Mock', ([], {'wraps': 'get_org'}), '(wraps=get_org)\n', (32751, 32766), False, 'from unittest import mock\n'), ((8953, 8987), 'cumulusci.tasks.bulkdata.snowfakery.SnowfakeryWorkingDirectory', 'SnowfakeryWorkingDirectory', (['subdir'], {}), '(subdir)\n', (8979, 8987), False, 'from cumulusci.tasks.bulkdata.snowfakery import RunningTotals, Snowfakery, SnowfakeryWorkingDirectory\n'), ((9322, 9338), 'pathlib.Path', 'Path', (['workingdir'], {}), '(workingdir)\n', (9326, 9338), False, 'from pathlib import Path\n'), ((16409, 16424), 'cumulusci.tests.util.DummyKeychain', 'DummyKeychain', ([], {}), '()\n', (16422, 16424), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((16507, 16574), 'cumulusci.tests.util.DummyOrgConfig', 'DummyOrgConfig', ([], {'config': "{'keychain': keychain, 'username': username}"}), "(config={'keychain': keychain, 'username': username})\n", (16521, 16574), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((18183, 18190), 'pathlib.Path', 'Path', (['t'], {}), '(t)\n', (18187, 18190), False, 'from pathlib import Path\n'), ((19959, 19995), 'pytest.raises', 'pytest.raises', (['exc.BulkDataException'], {}), '(exc.BulkDataException)\n', (19972, 19995), False, 'import pytest\n'), ((23856, 23871), 'cumulusci.tests.util.DummyKeychain', 'DummyKeychain', ([], {}), '()\n', (23869, 23871), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((23954, 24021), 'cumulusci.tests.util.DummyOrgConfig', 'DummyOrgConfig', ([], {'config': "{'keychain': keychain, 'username': username}"}), "(config={'keychain': keychain, 'username': username})\n", (23968, 24021), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((26542, 26557), 'cumulusci.tests.util.DummyKeychain', 'DummyKeychain', ([], {}), '()\n', (26555, 26557), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((26640, 26707), 'cumulusci.tests.util.DummyOrgConfig', 'DummyOrgConfig', ([], {'config': "{'keychain': keychain, 'username': username}"}), "(config={'keychain': keychain, 'username': username})\n", (26654, 26707), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((27710, 27725), 'cumulusci.tests.util.DummyKeychain', 'DummyKeychain', ([], {}), '()\n', (27723, 27725), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((27808, 27875), 'cumulusci.tests.util.DummyOrgConfig', 'DummyOrgConfig', ([], {'config': "{'keychain': keychain, 'username': username}"}), "(config={'keychain': keychain, 'username': username})\n", (27822, 27875), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((29605, 29620), 'cumulusci.tests.util.DummyKeychain', 'DummyKeychain', ([], {}), '()\n', (29618, 29620), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((29703, 29770), 'cumulusci.tests.util.DummyOrgConfig', 'DummyOrgConfig', ([], {'config': "{'keychain': keychain, 'username': username}"}), "(config={'keychain': keychain, 'username': username})\n", (29717, 29770), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((31537, 31552), 'cumulusci.tests.util.DummyKeychain', 'DummyKeychain', ([], {}), '()\n', (31550, 31552), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((31635, 31702), 'cumulusci.tests.util.DummyOrgConfig', 'DummyOrgConfig', ([], {'config': "{'keychain': keychain, 'username': username}"}), "(config={'keychain': keychain, 'username': username})\n", (31649, 31702), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((32506, 32521), 'cumulusci.tests.util.DummyKeychain', 'DummyKeychain', ([], {}), '()\n', (32519, 32521), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((32604, 32671), 'cumulusci.tests.util.DummyOrgConfig', 'DummyOrgConfig', ([], {'config': "{'keychain': keychain, 'username': username}"}), "(config={'keychain': keychain, 'username': username})\n", (32618, 32671), False, 'from cumulusci.tests.util import DummyKeychain, DummyOrgConfig\n'), ((32784, 32820), 'pytest.raises', 'pytest.raises', (['exc.BulkDataException'], {}), '(exc.BulkDataException)\n', (32797, 32820), False, 'import pytest\n'), ((22671, 22685), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (22675, 22685), False, 'from pathlib import Path\n'), ((23530, 23544), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (23534, 23544), False, 'from pathlib import Path\n'), ((26174, 26188), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (26178, 26188), False, 'from pathlib import Path\n'), ((27270, 27284), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (27274, 27284), False, 'from pathlib import Path\n'), ((27518, 27532), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (27522, 27532), False, 'from pathlib import Path\n'), ((29249, 29263), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (29253, 29263), False, 'from pathlib import Path\n'), ((31049, 31063), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (31053, 31063), False, 'from pathlib import Path\n'), ((31306, 31320), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (31310, 31320), False, 'from pathlib import Path\n'), ((32149, 32163), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (32153, 32163), False, 'from pathlib import Path\n'), ((32314, 32328), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (32318, 32328), False, 'from pathlib import Path\n'), ((30621, 30635), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (30625, 30635), False, 'from pathlib import Path\n')]
|
# #Modular python server/client framework for Night Owl##
# #IMPORTS
import json
import server
# #Classes
json_job_template={
"job": {
"id": {},
"created": {},
"completedby": {},
"all_done": {}
},
"module": {
"steno": {
"bpf": {}
},
"pulledpork": {
"newrules": {}
}
}
}
json_watchman_config = {
"watchman": {
"server": {
"addr": "127.0.0.1",
"port": "5000",
"pollinterval": "5",
"authkey": "STATIC_KEY"
},
"modules": {
"steno": "true",
"pulledpork": "true",
"splunk": "true"
}
}
}
json_jobboard_config = {
"job_board": {
"api": {
"addr": "127.0.0.1",
"port": "5000",
"loglevel": "error",
"logfile": "/opt/nightwatch/log/api.log"
},
"authkey": "STATIC_KEY"
}
}
# ##Command and control functions###
apikey = "TEST"
def get_role(): # Tells if host is a job board or watchman
filename = "watch.json"
with open(filename, "ro") as file:
config = json.load(file)
return config
def check_in():
print("Place Holder")
|
[
"json.load"
] |
[((1020, 1035), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1029, 1035), False, 'import json\n')]
|
import sys
import argparse
from xitorch.debug.modes import enable_debug
def parse_args():
# parse the argument
parser = argparse.ArgumentParser("Run python script by enabling xitorch debug mode")
parser.add_argument("scriptfile", type=str, help="Path to the script to run")
parser.add_argument("args", type=str, nargs=argparse.REMAINDER,
help="The arguments needed to run the script")
return parser.parse_args()
def main():
args = parse_args()
scriptfile = args.scriptfile
scriptargs = args.args if args.args is not None else []
scriptargs.insert(0, scriptfile)
sys.argv[:] = scriptargs[:]
# compile and run the code with debug mode enabled
with enable_debug():
with open(scriptfile, 'rb') as stream:
code = compile(stream.read(), scriptfile, 'exec')
globs = {
'__file__': scriptfile,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
exec(code, globs, None)
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"xitorch.debug.modes.enable_debug"
] |
[((129, 204), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Run python script by enabling xitorch debug mode"""'], {}), "('Run python script by enabling xitorch debug mode')\n", (152, 204), False, 'import argparse\n'), ((706, 720), 'xitorch.debug.modes.enable_debug', 'enable_debug', ([], {}), '()\n', (718, 720), False, 'from xitorch.debug.modes import enable_debug\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='prettytable-extras',
version='0.1.0',
packages=find_packages(),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: BSD License',
'Topic :: Text Processing',
],
install_requires=[
'prettytable >= 0.7.2',
],
license="BSD (3 clause)",
description='An extension to the excellent prettytable Python library',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/moul/prettytable-extras',
py_modules=['prettytable_extras'],
test_suite='prettytable_extras_test',
)
|
[
"setuptools.find_packages"
] |
[((165, 180), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (178, 180), False, 'from setuptools import setup, find_packages\n')]
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import azure.cli.command_modules.backup.custom_help as cust_help
import azure.cli.command_modules.backup.custom_common as common
# pylint: disable=import-error
# pylint: disable=broad-except
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
from uuid import uuid4
from azure.cli.command_modules.backup._validators import datetime_type
from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, \
AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, \
RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, \
ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, \
AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, \
AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem
from azure.cli.core.util import CLIError
from azure.cli.command_modules.backup._client_factory import backup_workload_items_cf, \
protectable_containers_cf, backup_protection_containers_cf, backup_protected_items_cf
from knack.log import get_logger
fabric_name = "Azure"
logger = get_logger(__name__)
# Mapping of workload type
workload_type_map = {'MSSQL': 'SQLDataBase',
'SAPHANA': 'SAPHanaDatabase',
'SAPASE': 'SAPAseDatabase'}
# Mapping of module name
module_map = {'sqldatabase': 'sql_database',
'saphanadatabase': 'sap_hana_database',
'sapasedatabase': 'sap_ase_database'}
# Mapping of attribute name
attr_map = {'sqldatabase': 'SQLDatabase',
'saphanadatabase': 'SAPHanaDatabase',
'sapasedatabase': 'SAPAseDatabase'}
protectable_item_type_map = {'SQLDatabase': 'SQLDataBase',
'HANADataBase': 'SAPHanaDatabase',
'HANAInstance': 'SAPHanaSystem',
'SQLInstance': 'SQLInstance',
'SQLAG': 'SQLAG'}
def show_wl_policy(client, resource_group_name, vault_name, name):
return [client.get(vault_name, resource_group_name, name)]
def list_wl_policies(client, resource_group_name, vault_name, workload_type, backup_management_type):
if workload_type is None:
raise CLIError(
"""
Workload type is required for Azure Workload.
""")
if backup_management_type is None:
raise CLIError(
"""
Backup Management Type needs to be specified for Azure Workload.
""")
workload_type = workload_type_map[workload_type]
filter_string = cust_help.get_filter_string({
'backupManagementType': backup_management_type,
'workloadType': workload_type})
policies = client.list(vault_name, resource_group_name, filter_string)
return cust_help.get_list_from_paged_response(policies)
def list_protectable_containers(cmd, resource_group_name, vault_name, container_type="AzureWorkload"):
filter_string = cust_help.get_filter_string({
'backupManagementType': container_type})
client = protectable_containers_cf(cmd.cli_ctx)
paged_containers = client.list(vault_name, resource_group_name, fabric_name, filter_string)
return cust_help.get_list_from_paged_response(paged_containers)
def register_wl_container(cmd, client, vault_name, resource_group_name, workload_type, resource_id, container_type):
if not cust_help.is_id(resource_id):
raise CLIError(
"""
Resource ID is not a valid one.
""")
workload_type = workload_type_map[workload_type]
container_name = resource_id.split('/')[-1]
containers = list_protectable_containers(cmd, resource_group_name, vault_name)
for container in containers:
if cust_help.get_resource_id(container.properties.container_id) == cust_help.get_resource_id(resource_id):
container_name = container.name
break
if not cust_help.is_native_name(container_name):
raise CLIError(
"""
Container unavailable or already registered.
""")
properties = AzureVMAppContainerProtectionContainer(backup_management_type=container_type,
source_resource_id=resource_id,
workload_type=workload_type)
param = ProtectionContainerResource(properties=properties)
# Trigger register and wait for completion
result = client.register(vault_name, resource_group_name, fabric_name, container_name, param, raw=True)
return cust_help.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, container_name)
def re_register_wl_container(cmd, client, vault_name, resource_group_name, workload_type,
container_name, container_type):
workload_type = workload_type_map[workload_type]
if not cust_help.is_native_name(container_name):
raise CLIError(
"""
Container name passed cannot be a friendly name.
Please pass a native container name.
""")
backup_cf = backup_protection_containers_cf(cmd.cli_ctx)
containers = common.list_containers(backup_cf, resource_group_name, vault_name, container_type)
source_resource_id = None
for container in containers:
if container.name == container_name:
source_resource_id = container.properties.source_resource_id
break
if not source_resource_id:
raise CLIError(
"""
No such registered container exists.
""")
properties = AzureVMAppContainerProtectionContainer(backup_management_type=container_type,
workload_type=workload_type,
operation_type='Reregister',
source_resource_id=source_resource_id)
param = ProtectionContainerResource(properties=properties)
# Trigger register and wait for completion
result = client.register(vault_name, resource_group_name, fabric_name, container_name, param, raw=True)
return cust_help.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, container_name)
def unregister_wl_container(cmd, client, vault_name, resource_group_name, container_name):
if not cust_help.is_native_name(container_name):
raise CLIError(
"""
Container name passed cannot be a friendly name.
Please pass a native container name.
""")
# Trigger unregister and wait for completion
result = client.unregister(vault_name, resource_group_name, fabric_name, container_name, raw=True)
return cust_help.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, container_name)
def update_policy_for_item(cmd, client, resource_group_name, vault_name, item, policy):
if item.properties.backup_management_type != policy.properties.backup_management_type:
raise CLIError(
"""
The policy type should match with the workload being protected.
Use the relevant get-default policy command and use it to update the policy for the workload.
""")
item_properties = item.properties
item_properties.policy_id = policy.id
container_uri = cust_help.get_protection_container_uri_from_id(item.id)
item_uri = cust_help.get_protected_item_uri_from_id(item.id)
param = ProtectedItemResource(properties=item_properties)
# Update policy
result = client.create_or_update(vault_name, resource_group_name, fabric_name,
container_uri, item_uri, param, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def resume_protection(cmd, client, resource_group_name, vault_name, item, policy):
return update_policy_for_item(cmd, client, resource_group_name, vault_name, item, policy)
def initialize_protectable_items(client, resource_group_name, vault_name, container_name, workload_type):
workload_type = workload_type_map[workload_type]
filter_string = cust_help.get_filter_string({
'backupManagementType': 'AzureWorkload',
'workloadType': workload_type})
return client.inquire(vault_name, resource_group_name, fabric_name, container_name, filter_string)
def create_policy(client, resource_group_name, vault_name, policy_name, policy, workload_type):
workload_type = workload_type_map[workload_type]
policy_object = cust_help.get_policy_from_json(client, policy)
policy_object.properties.backup_management_type = "AzureWorkload"
policy_object.properties.workload_type = workload_type
policy_object.name = policy_name
return client.create_or_update(vault_name, resource_group_name, policy_name, policy_object)
def set_policy(client, resource_group_name, vault_name, policy, policy_name):
if policy_name is None:
raise CLIError(
"""
Policy name is required for set policy.
""")
policy_object = cust_help.get_policy_from_json(client, policy)
return client.create_or_update(vault_name, resource_group_name, policy_name, policy_object)
def show_protectable_item(items, name, server_name, protectable_item_type):
if protectable_item_type_map.get(protectable_item_type) is not None:
protectable_item_type = protectable_item_type_map[protectable_item_type]
# Name filter
if cust_help.is_native_name(name):
filtered_items = [item for item in items if item.name.lower() == name.lower()]
else:
filtered_items = [item for item in items if item.properties.friendly_name.lower() == name.lower()]
# Server Name filter
filtered_items = [item for item in filtered_items if item.properties.server_name.lower() == server_name.lower()]
# Protectable Item Type filter
filtered_items = [item for item in filtered_items if
item.properties.protectable_item_type.lower() == protectable_item_type.lower()]
return cust_help.get_none_one_or_many(filtered_items)
def show_protectable_instance(items, server_name, protectable_item_type):
if protectable_item_type_map.get(protectable_item_type) is not None:
protectable_item_type = protectable_item_type_map[protectable_item_type]
# Server Name filter
filtered_items = [item for item in items if item.properties.server_name.lower() == server_name.lower()]
# Protectable Item Type filter
filtered_items = [item for item in filtered_items if
item.properties.protectable_item_type.lower() == protectable_item_type.lower()]
return cust_help.get_none_one_or_many(filtered_items)
def list_protectable_items(client, resource_group_name, vault_name, workload_type, container_uri=None):
workload_type = workload_type_map[workload_type]
filter_string = cust_help.get_filter_string({
'backupManagementType': "AzureWorkload",
'workloadType': workload_type})
# Items list
items = client.list(vault_name, resource_group_name, filter_string)
paged_items = cust_help.get_list_from_paged_response(items)
if container_uri:
return [item for item in paged_items if
cust_help.get_protection_container_uri_from_id(item.id).lower() == container_uri.lower()]
return paged_items
def list_wl_recovery_points(cmd, client, resource_group_name, vault_name, item, start_date=None, end_date=None,
extended_info=None):
# Get container and item URIs
container_uri = cust_help.get_protection_container_uri_from_id(item.id)
item_uri = cust_help.get_protected_item_uri_from_id(item.id)
query_end_date, query_start_date = cust_help.get_query_dates(end_date, start_date)
if query_end_date and query_start_date:
cust_help.is_range_valid(query_start_date, query_end_date)
filter_string = cust_help.get_filter_string({
'startDate': query_start_date,
'endDate': query_end_date})
if cmd.name.split()[2] == 'show-log-chain' or extended_info is not None:
filter_string = cust_help.get_filter_string({
'restorePointQueryType': 'Log',
'startDate': query_start_date,
'endDate': query_end_date,
'extendedInfo': extended_info})
# Get recovery points
recovery_points = client.list(vault_name, resource_group_name, fabric_name, container_uri, item_uri, filter_string)
paged_recovery_points = cust_help.get_list_from_paged_response(recovery_points)
return paged_recovery_points
def enable_protection_for_azure_wl(cmd, client, resource_group_name, vault_name, policy_object, protectable_item):
# Get protectable item.
protectable_item_object = protectable_item
protectable_item_type = protectable_item_object.properties.protectable_item_type
if protectable_item_type.lower() not in ["sqldatabase", "sqlinstance", "saphanadatabase", "saphanasystem"]:
raise CLIError(
"""
Protectable Item must be either of type SQLDataBase, HANADatabase, HANAInstance or SQLInstance.
""")
item_name = protectable_item_object.name
container_name = protectable_item_object.id.split('/')[12]
cust_help.validate_policy(policy_object)
policy_id = policy_object.id
properties = _get_protected_item_instance(protectable_item_type)
properties.backup_management_type = 'AzureWorkload'
properties.policy_id = policy_id
properties.workload_type = protectable_item_type
param = ProtectionContainerResource(properties=properties)
# Trigger enable protection and wait for completion
result = client.create_or_update(vault_name, resource_group_name, fabric_name,
container_name, item_name, param, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def backup_now(cmd, client, resource_group_name, vault_name, item, retain_until, backup_type,
enable_compression=False):
message = "For SAPHANA and SQL workload, retain-until parameter value will be overridden by the underlying policy"
if retain_until is not None:
logger.warning(message)
container_uri = cust_help.get_protection_container_uri_from_id(item.id)
item_uri = cust_help.get_protected_item_uri_from_id(item.id)
backup_item_type = item_uri.split(';')[0]
if not cust_help.is_sql(backup_item_type) and enable_compression:
raise CLIError(
"""
Enable compression is not applicable for SAPHanaDatabase item type.
""")
if cust_help.is_hana(backup_item_type) and backup_type in ['Log', 'CopyOnlyFull']:
raise CLIError(
"""
Backup type cannot be Log or CopyOnlyFull for SAPHanaDatabase item type.
""")
properties = AzureWorkloadBackupRequest(backup_type=backup_type, enable_compression=enable_compression,
recovery_point_expiry_time_in_utc=retain_until)
param = BackupRequestResource(properties=properties)
# Trigger backup and wait for completion
result = client.trigger(vault_name, resource_group_name, fabric_name, container_uri,
item_uri, param, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def disable_protection(cmd, client, resource_group_name, vault_name, item, delete_backup_data):
container_uri = cust_help.get_protection_container_uri_from_id(item.id)
item_uri = cust_help.get_protected_item_uri_from_id(item.id)
backup_item_type = item_uri.split(';')[0]
if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(backup_item_type):
raise CLIError(
"""
Item must be either of type SQLDataBase or SAPHanaDatabase.
""")
if delete_backup_data:
result = client.delete(vault_name, resource_group_name, fabric_name, container_uri, item_uri, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
properties = _get_protected_item_instance(backup_item_type)
properties.protection_state = 'ProtectionStopped'
properties.policy_id = ''
param = ProtectedItemResource(properties=properties)
# Trigger disable protection and wait for completion
result = client.create_or_update(vault_name, resource_group_name, fabric_name,
container_uri, item_uri, param, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def auto_enable_for_azure_wl(client, resource_group_name, vault_name, policy_object, protectable_item):
protectable_item_object = protectable_item
item_id = protectable_item_object.id
protectable_item_type = protectable_item_object.properties.protectable_item_type
if protectable_item_type.lower() != 'sqlinstance':
raise CLIError(
"""
Protectable Item can only be of type SQLInstance.
""")
policy_id = policy_object.id
properties = AzureRecoveryServiceVaultProtectionIntent(backup_management_type='AzureWorkload',
policy_id=policy_id, item_id=item_id)
param = ProtectionIntentResource(properties=properties)
intent_object_name = str(uuid4())
try:
client.create_or_update(vault_name, resource_group_name, fabric_name, intent_object_name, param)
return {'status': True}
except Exception:
return {'status': False}
def disable_auto_for_azure_wl(client, resource_group_name, vault_name, item_name):
if not cust_help.is_native_name(item_name):
raise CLIError(
"""
Protectable Item name must be native.
""")
protectable_item_type = item_name.split(';')[0]
if protectable_item_type.lower() != 'sqlinstance':
raise CLIError(
"""
Protectable Item can only be of type SQLInstance.
""")
intent_object_name = str(uuid4())
try:
client.delete(vault_name, resource_group_name, fabric_name, intent_object_name)
return {'status': True}
except Exception:
return {'status': False}
def list_workload_items(cmd, vault_name, resource_group_name, container_name,
container_type="AzureWorkload", workload_type="SQLInstance"):
filter_string = cust_help.get_filter_string({
'backupManagementType': container_type,
'workloadItemType': workload_type})
items = backup_workload_items_cf(cmd.cli_ctx).list(vault_name, resource_group_name,
fabric_name, container_name, filter_string)
return cust_help.get_list_from_paged_response(items)
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config):
recovery_config_object = cust_help.get_or_read_json(recovery_config)
restore_mode = recovery_config_object['restore_mode']
container_uri = recovery_config_object['container_uri']
item_uri = recovery_config_object['item_uri']
recovery_point_id = recovery_config_object['recovery_point_id']
log_point_in_time = recovery_config_object['log_point_in_time']
item_type = recovery_config_object['item_type']
source_resource_id = recovery_config_object['source_resource_id']
database_name = recovery_config_object['database_name']
container_id = recovery_config_object['container_id']
alternate_directory_paths = recovery_config_object['alternate_directory_paths']
recovery_mode = recovery_config_object['recovery_mode']
filepath = recovery_config_object['filepath']
# Construct trigger restore request object
trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time)
trigger_restore_properties.recovery_type = restore_mode
if restore_mode == 'AlternateLocation':
if recovery_mode != "FileRecovery":
setattr(trigger_restore_properties, 'source_resource_id', source_resource_id)
setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite',
database_name=database_name,
container_id=container_id))
if 'sql' in item_type.lower():
directory_map = []
for i in alternate_directory_paths:
directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1],
source_logical_name=i[2], target_path=i[3]))
setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map)
else:
target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id,
target_directory_for_file_restore=filepath)
setattr(trigger_restore_properties, 'target_info', target_info)
trigger_restore_properties.recovery_mode = recovery_mode
if log_point_in_time is not None:
setattr(trigger_restore_properties, 'point_in_time', datetime_type(log_point_in_time))
if 'sql' in item_type.lower():
setattr(trigger_restore_properties, 'should_use_alternate_target_location', True)
setattr(trigger_restore_properties, 'is_non_recoverable', False)
trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties)
# Trigger restore and wait for completion
result = client.trigger(vault_name, resource_group_name, fabric_name, container_uri,
item_uri, recovery_point_id, trigger_restore_request, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def show_recovery_config(cmd, client, resource_group_name, vault_name, restore_mode, container_name, item_name,
rp_name, target_item, target_item_name, log_point_in_time, from_full_rp_name,
filepath, target_container):
if log_point_in_time is not None:
datetime_type(log_point_in_time)
if restore_mode == 'AlternateWorkloadRestore':
if target_item is None:
raise CLIError(
"""
Target Item must be provided.
""")
protectable_item_type = target_item.properties.protectable_item_type
if protectable_item_type.lower() not in ["sqlinstance", "saphanasystem"]:
raise CLIError(
"""
Target Item must be either of type HANAInstance or SQLInstance.
""")
if restore_mode == 'RestoreAsFiles' and target_container is None:
raise CLIError("Target Container must be provided.")
if rp_name is None and log_point_in_time is None:
raise CLIError(
"""
Log point in time or recovery point name must be provided.
""")
item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name,
container_name, item_name, "AzureWorkload")
cust_help.validate_item(item)
item_type = item.properties.workload_type
item_name = item.name
if not cust_help.is_sql(item_type) and not cust_help.is_hana(item_type):
raise CLIError(
"""
Item must be either of type SQLDataBase or SAPHanaDatabase.
""")
# Mapping of restore mode
restore_mode_map = {'OriginalWorkloadRestore': 'OriginalLocation',
'AlternateWorkloadRestore': 'AlternateLocation',
'RestoreAsFiles': 'AlternateLocation'}
if rp_name is None and restore_mode == "RestoreAsFiles" and from_full_rp_name is not None:
rp_name = from_full_rp_name
rp_name = rp_name if rp_name is not None else 'DefaultRangeRecoveryPoint'
if rp_name == 'DefaultRangeRecoveryPoint':
recovery_points = list_wl_recovery_points(cmd, client, resource_group_name, vault_name, item,
None, None, True)
recovery_points = [rp for rp in recovery_points if rp.name == rp_name]
if recovery_points == []:
raise CLIError(
"""
Invalid input.
""")
recovery_point = recovery_points[0]
else:
recovery_point = common.show_recovery_point(cmd, client, resource_group_name, vault_name, container_name,
item_name, rp_name, item_type,
backup_management_type="AzureWorkload")
alternate_directory_paths = []
if 'sql' in item_type.lower() and restore_mode == 'AlternateWorkloadRestore':
items = list_workload_items(cmd, vault_name, resource_group_name, container_name)
for titem in items:
if titem.properties.friendly_name == target_item.properties.friendly_name:
if titem.properties.server_name == target_item.properties.server_name:
for path in recovery_point.properties.extended_info.data_directory_paths:
target_path = cust_help.get_target_path(path.type, path.path, path.logical_name,
titem.properties.data_directory_paths)
alternate_directory_paths.append((path.type, path.path, path.logical_name, target_path))
db_name = None
if restore_mode == 'AlternateWorkloadRestore':
friendly_name = target_item.properties.friendly_name
db_name = friendly_name + '/' + target_item_name
container_id = None
if restore_mode == 'AlternateWorkloadRestore':
container_id = '/'.join(target_item.id.split('/')[:-2])
if not ('sql' in item_type.lower() and restore_mode == 'AlternateWorkloadRestore'):
alternate_directory_paths = None
recovery_mode = None
if restore_mode == 'RestoreAsFiles':
recovery_mode = 'FileRecovery'
container_id = target_container.id
return {
'restore_mode': restore_mode_map[restore_mode],
'container_uri': item.properties.container_name,
'item_uri': item_name,
'recovery_point_id': recovery_point.name,
'log_point_in_time': log_point_in_time,
'item_type': 'SQL' if 'sql' in item_type.lower() else 'SAPHana',
'source_resource_id': item.properties.source_resource_id,
'database_name': db_name,
'container_id': container_id,
'recovery_mode': recovery_mode,
'filepath': filepath,
'alternate_directory_paths': alternate_directory_paths}
def _get_restore_request_instance(item_type, log_point_in_time):
if item_type.lower() == "saphana":
if log_point_in_time is not None:
return AzureWorkloadSAPHanaPointInTimeRestoreRequest()
return AzureWorkloadSAPHanaRestoreRequest()
if item_type.lower() == "sql":
if log_point_in_time is not None:
return AzureWorkloadSQLPointInTimeRestoreRequest()
return AzureWorkloadSQLRestoreRequest()
return None
def _get_protected_item_instance(item_type):
if item_type.lower() == "saphanadatabase":
return AzureVmWorkloadSAPHanaDatabaseProtectedItem()
return AzureVmWorkloadSQLDatabaseProtectedItem()
|
[
"azure.cli.command_modules.backup.custom_common.show_recovery_point",
"azure.mgmt.recoveryservicesbackup.models.AzureWorkloadSAPHanaPointInTimeRestoreRequest",
"azure.mgmt.recoveryservicesbackup.models.ProtectionIntentResource",
"azure.cli.command_modules.backup._client_factory.protectable_containers_cf",
"azure.cli.command_modules.backup._client_factory.backup_protection_containers_cf",
"azure.cli.command_modules.backup.custom_help.track_backup_job",
"azure.mgmt.recoveryservicesbackup.models.AzureVmWorkloadSAPHanaDatabaseProtectedItem",
"azure.cli.command_modules.backup.custom_help.track_register_operation",
"azure.cli.command_modules.backup.custom_help.is_native_name",
"azure.cli.command_modules.backup._client_factory.backup_workload_items_cf",
"azure.cli.command_modules.backup.custom_help.is_range_valid",
"azure.cli.command_modules.backup.custom_help.get_resource_id",
"azure.cli.command_modules.backup.custom_help.get_list_from_paged_response",
"azure.mgmt.recoveryservicesbackup.models.AzureWorkloadSQLRestoreRequest",
"azure.cli.command_modules.backup.custom_help.is_sql",
"azure.mgmt.recoveryservicesbackup.models.RestoreRequestResource",
"azure.mgmt.recoveryservicesbackup.models.TargetRestoreInfo",
"azure.mgmt.recoveryservicesbackup.models.AzureRecoveryServiceVaultProtectionIntent",
"azure.cli.command_modules.backup.custom_common.list_containers",
"azure.cli.command_modules.backup.custom_help.is_id",
"azure.cli.command_modules.backup.custom_help.get_target_path",
"azure.cli.command_modules.backup.custom_help.get_none_one_or_many",
"azure.mgmt.recoveryservicesbackup.models.BackupRequestResource",
"azure.cli.command_modules.backup.custom_help.get_filter_string",
"azure.cli.command_modules.backup.custom_help.get_query_dates",
"azure.mgmt.recoveryservicesbackup.models.ProtectedItemResource",
"azure.mgmt.recoveryservicesbackup.models.AzureWorkloadBackupRequest",
"azure.cli.command_modules.backup.custom_help.validate_item",
"azure.cli.command_modules.backup._client_factory.backup_protected_items_cf",
"azure.mgmt.recoveryservicesbackup.models.AzureWorkloadSAPHanaRestoreRequest",
"uuid.uuid4",
"azure.mgmt.recoveryservicesbackup.models.AzureVMAppContainerProtectionContainer",
"azure.cli.command_modules.backup.custom_help.is_hana",
"azure.cli.core.util.CLIError",
"azure.mgmt.recoveryservicesbackup.models.AzureWorkloadSQLPointInTimeRestoreRequest",
"azure.mgmt.recoveryservicesbackup.models.SQLDataDirectoryMapping",
"azure.cli.command_modules.backup.custom_help.validate_policy",
"azure.cli.command_modules.backup.custom_help.get_policy_from_json",
"azure.mgmt.recoveryservicesbackup.models.AzureVmWorkloadSQLDatabaseProtectedItem",
"azure.cli.command_modules.backup.custom_help.get_protected_item_uri_from_id",
"azure.mgmt.recoveryservicesbackup.models.ProtectionContainerResource",
"azure.cli.command_modules.backup.custom_help.get_protection_container_uri_from_id",
"azure.cli.command_modules.backup._validators.datetime_type",
"knack.log.get_logger",
"azure.cli.command_modules.backup.custom_help.get_or_read_json"
] |
[((1598, 1618), 'knack.log.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (1608, 1618), False, 'from knack.log import get_logger\n'), ((3060, 3172), 'azure.cli.command_modules.backup.custom_help.get_filter_string', 'cust_help.get_filter_string', (["{'backupManagementType': backup_management_type, 'workloadType': workload_type}"], {}), "({'backupManagementType': backup_management_type,\n 'workloadType': workload_type})\n", (3087, 3172), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((3273, 3321), 'azure.cli.command_modules.backup.custom_help.get_list_from_paged_response', 'cust_help.get_list_from_paged_response', (['policies'], {}), '(policies)\n', (3311, 3321), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((3447, 3516), 'azure.cli.command_modules.backup.custom_help.get_filter_string', 'cust_help.get_filter_string', (["{'backupManagementType': container_type}"], {}), "({'backupManagementType': container_type})\n", (3474, 3516), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((3539, 3577), 'azure.cli.command_modules.backup._client_factory.protectable_containers_cf', 'protectable_containers_cf', (['cmd.cli_ctx'], {}), '(cmd.cli_ctx)\n', (3564, 3577), False, 'from azure.cli.command_modules.backup._client_factory import backup_workload_items_cf, protectable_containers_cf, backup_protection_containers_cf, backup_protected_items_cf\n'), ((3685, 3741), 'azure.cli.command_modules.backup.custom_help.get_list_from_paged_response', 'cust_help.get_list_from_paged_response', (['paged_containers'], {}), '(paged_containers)\n', (3723, 3741), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((4586, 4734), 'azure.mgmt.recoveryservicesbackup.models.AzureVMAppContainerProtectionContainer', 'AzureVMAppContainerProtectionContainer', ([], {'backup_management_type': 'container_type', 'source_resource_id': 'resource_id', 'workload_type': 'workload_type'}), '(backup_management_type=\n container_type, source_resource_id=resource_id, workload_type=workload_type\n )\n', (4624, 4734), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((4849, 4899), 'azure.mgmt.recoveryservicesbackup.models.ProtectionContainerResource', 'ProtectionContainerResource', ([], {'properties': 'properties'}), '(properties=properties)\n', (4876, 4899), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((5067, 5175), 'azure.cli.command_modules.backup.custom_help.track_register_operation', 'cust_help.track_register_operation', (['cmd.cli_ctx', 'result', 'vault_name', 'resource_group_name', 'container_name'], {}), '(cmd.cli_ctx, result, vault_name,\n resource_group_name, container_name)\n', (5101, 5175), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((5617, 5661), 'azure.cli.command_modules.backup._client_factory.backup_protection_containers_cf', 'backup_protection_containers_cf', (['cmd.cli_ctx'], {}), '(cmd.cli_ctx)\n', (5648, 5661), False, 'from azure.cli.command_modules.backup._client_factory import backup_workload_items_cf, protectable_containers_cf, backup_protection_containers_cf, backup_protected_items_cf\n'), ((5679, 5765), 'azure.cli.command_modules.backup.custom_common.list_containers', 'common.list_containers', (['backup_cf', 'resource_group_name', 'vault_name', 'container_type'], {}), '(backup_cf, resource_group_name, vault_name,\n container_type)\n', (5701, 5765), True, 'import azure.cli.command_modules.backup.custom_common as common\n'), ((6117, 6301), 'azure.mgmt.recoveryservicesbackup.models.AzureVMAppContainerProtectionContainer', 'AzureVMAppContainerProtectionContainer', ([], {'backup_management_type': 'container_type', 'workload_type': 'workload_type', 'operation_type': '"""Reregister"""', 'source_resource_id': 'source_resource_id'}), "(backup_management_type=\n container_type, workload_type=workload_type, operation_type=\n 'Reregister', source_resource_id=source_resource_id)\n", (6155, 6301), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((6472, 6522), 'azure.mgmt.recoveryservicesbackup.models.ProtectionContainerResource', 'ProtectionContainerResource', ([], {'properties': 'properties'}), '(properties=properties)\n', (6499, 6522), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((6689, 6797), 'azure.cli.command_modules.backup.custom_help.track_register_operation', 'cust_help.track_register_operation', (['cmd.cli_ctx', 'result', 'vault_name', 'resource_group_name', 'container_name'], {}), '(cmd.cli_ctx, result, vault_name,\n resource_group_name, container_name)\n', (6723, 6797), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((7271, 7379), 'azure.cli.command_modules.backup.custom_help.track_register_operation', 'cust_help.track_register_operation', (['cmd.cli_ctx', 'result', 'vault_name', 'resource_group_name', 'container_name'], {}), '(cmd.cli_ctx, result, vault_name,\n resource_group_name, container_name)\n', (7305, 7379), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((7897, 7952), 'azure.cli.command_modules.backup.custom_help.get_protection_container_uri_from_id', 'cust_help.get_protection_container_uri_from_id', (['item.id'], {}), '(item.id)\n', (7943, 7952), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((7968, 8017), 'azure.cli.command_modules.backup.custom_help.get_protected_item_uri_from_id', 'cust_help.get_protected_item_uri_from_id', (['item.id'], {}), '(item.id)\n', (8008, 8017), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((8031, 8080), 'azure.mgmt.recoveryservicesbackup.models.ProtectedItemResource', 'ProtectedItemResource', ([], {'properties': 'item_properties'}), '(properties=item_properties)\n', (8052, 8080), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((8275, 8360), 'azure.cli.command_modules.backup.custom_help.track_backup_job', 'cust_help.track_backup_job', (['cmd.cli_ctx', 'result', 'vault_name', 'resource_group_name'], {}), '(cmd.cli_ctx, result, vault_name, resource_group_name\n )\n', (8301, 8360), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((8717, 8822), 'azure.cli.command_modules.backup.custom_help.get_filter_string', 'cust_help.get_filter_string', (["{'backupManagementType': 'AzureWorkload', 'workloadType': workload_type}"], {}), "({'backupManagementType': 'AzureWorkload',\n 'workloadType': workload_type})\n", (8744, 8822), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((9111, 9157), 'azure.cli.command_modules.backup.custom_help.get_policy_from_json', 'cust_help.get_policy_from_json', (['client', 'policy'], {}), '(client, policy)\n', (9141, 9157), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((9659, 9705), 'azure.cli.command_modules.backup.custom_help.get_policy_from_json', 'cust_help.get_policy_from_json', (['client', 'policy'], {}), '(client, policy)\n', (9689, 9705), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((10060, 10090), 'azure.cli.command_modules.backup.custom_help.is_native_name', 'cust_help.is_native_name', (['name'], {}), '(name)\n', (10084, 10090), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((10646, 10692), 'azure.cli.command_modules.backup.custom_help.get_none_one_or_many', 'cust_help.get_none_one_or_many', (['filtered_items'], {}), '(filtered_items)\n', (10676, 10692), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((11263, 11309), 'azure.cli.command_modules.backup.custom_help.get_none_one_or_many', 'cust_help.get_none_one_or_many', (['filtered_items'], {}), '(filtered_items)\n', (11293, 11309), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((11490, 11595), 'azure.cli.command_modules.backup.custom_help.get_filter_string', 'cust_help.get_filter_string', (["{'backupManagementType': 'AzureWorkload', 'workloadType': workload_type}"], {}), "({'backupManagementType': 'AzureWorkload',\n 'workloadType': workload_type})\n", (11517, 11595), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((11717, 11762), 'azure.cli.command_modules.backup.custom_help.get_list_from_paged_response', 'cust_help.get_list_from_paged_response', (['items'], {}), '(items)\n', (11755, 11762), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((12180, 12235), 'azure.cli.command_modules.backup.custom_help.get_protection_container_uri_from_id', 'cust_help.get_protection_container_uri_from_id', (['item.id'], {}), '(item.id)\n', (12226, 12235), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((12251, 12300), 'azure.cli.command_modules.backup.custom_help.get_protected_item_uri_from_id', 'cust_help.get_protected_item_uri_from_id', (['item.id'], {}), '(item.id)\n', (12291, 12300), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((12341, 12388), 'azure.cli.command_modules.backup.custom_help.get_query_dates', 'cust_help.get_query_dates', (['end_date', 'start_date'], {}), '(end_date, start_date)\n', (12366, 12388), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((12522, 12613), 'azure.cli.command_modules.backup.custom_help.get_filter_string', 'cust_help.get_filter_string', (["{'startDate': query_start_date, 'endDate': query_end_date}"], {}), "({'startDate': query_start_date, 'endDate':\n query_end_date})\n", (12549, 12613), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((13104, 13159), 'azure.cli.command_modules.backup.custom_help.get_list_from_paged_response', 'cust_help.get_list_from_paged_response', (['recovery_points'], {}), '(recovery_points)\n', (13142, 13159), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((13861, 13901), 'azure.cli.command_modules.backup.custom_help.validate_policy', 'cust_help.validate_policy', (['policy_object'], {}), '(policy_object)\n', (13886, 13901), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((14163, 14213), 'azure.mgmt.recoveryservicesbackup.models.ProtectionContainerResource', 'ProtectionContainerResource', ([], {'properties': 'properties'}), '(properties=properties)\n', (14190, 14213), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((14446, 14531), 'azure.cli.command_modules.backup.custom_help.track_backup_job', 'cust_help.track_backup_job', (['cmd.cli_ctx', 'result', 'vault_name', 'resource_group_name'], {}), '(cmd.cli_ctx, result, vault_name, resource_group_name\n )\n', (14472, 14531), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((14869, 14924), 'azure.cli.command_modules.backup.custom_help.get_protection_container_uri_from_id', 'cust_help.get_protection_container_uri_from_id', (['item.id'], {}), '(item.id)\n', (14915, 14924), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((14940, 14989), 'azure.cli.command_modules.backup.custom_help.get_protected_item_uri_from_id', 'cust_help.get_protected_item_uri_from_id', (['item.id'], {}), '(item.id)\n', (14980, 14989), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((15492, 15635), 'azure.mgmt.recoveryservicesbackup.models.AzureWorkloadBackupRequest', 'AzureWorkloadBackupRequest', ([], {'backup_type': 'backup_type', 'enable_compression': 'enable_compression', 'recovery_point_expiry_time_in_utc': 'retain_until'}), '(backup_type=backup_type, enable_compression=\n enable_compression, recovery_point_expiry_time_in_utc=retain_until)\n', (15518, 15635), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((15687, 15731), 'azure.mgmt.recoveryservicesbackup.models.BackupRequestResource', 'BackupRequestResource', ([], {'properties': 'properties'}), '(properties=properties)\n', (15708, 15731), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((15933, 16018), 'azure.cli.command_modules.backup.custom_help.track_backup_job', 'cust_help.track_backup_job', (['cmd.cli_ctx', 'result', 'vault_name', 'resource_group_name'], {}), '(cmd.cli_ctx, result, vault_name, resource_group_name\n )\n', (15959, 16018), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((16133, 16188), 'azure.cli.command_modules.backup.custom_help.get_protection_container_uri_from_id', 'cust_help.get_protection_container_uri_from_id', (['item.id'], {}), '(item.id)\n', (16179, 16188), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((16204, 16253), 'azure.cli.command_modules.backup.custom_help.get_protected_item_uri_from_id', 'cust_help.get_protected_item_uri_from_id', (['item.id'], {}), '(item.id)\n', (16244, 16253), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((16918, 16962), 'azure.mgmt.recoveryservicesbackup.models.ProtectedItemResource', 'ProtectedItemResource', ([], {'properties': 'properties'}), '(properties=properties)\n', (16939, 16962), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((17194, 17279), 'azure.cli.command_modules.backup.custom_help.track_backup_job', 'cust_help.track_backup_job', (['cmd.cli_ctx', 'result', 'vault_name', 'resource_group_name'], {}), '(cmd.cli_ctx, result, vault_name, resource_group_name\n )\n', (17220, 17279), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((17780, 17904), 'azure.mgmt.recoveryservicesbackup.models.AzureRecoveryServiceVaultProtectionIntent', 'AzureRecoveryServiceVaultProtectionIntent', ([], {'backup_management_type': '"""AzureWorkload"""', 'policy_id': 'policy_id', 'item_id': 'item_id'}), "(backup_management_type=\n 'AzureWorkload', policy_id=policy_id, item_id=item_id)\n", (17821, 17904), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((17971, 18018), 'azure.mgmt.recoveryservicesbackup.models.ProtectionIntentResource', 'ProtectionIntentResource', ([], {'properties': 'properties'}), '(properties=properties)\n', (17995, 18018), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((19137, 19245), 'azure.cli.command_modules.backup.custom_help.get_filter_string', 'cust_help.get_filter_string', (["{'backupManagementType': container_type, 'workloadItemType': workload_type}"], {}), "({'backupManagementType': container_type,\n 'workloadItemType': workload_type})\n", (19164, 19245), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((19458, 19503), 'azure.cli.command_modules.backup.custom_help.get_list_from_paged_response', 'cust_help.get_list_from_paged_response', (['items'], {}), '(items)\n', (19496, 19503), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((19620, 19663), 'azure.cli.command_modules.backup.custom_help.get_or_read_json', 'cust_help.get_or_read_json', (['recovery_config'], {}), '(recovery_config)\n', (19646, 19663), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((22261, 22322), 'azure.mgmt.recoveryservicesbackup.models.RestoreRequestResource', 'RestoreRequestResource', ([], {'properties': 'trigger_restore_properties'}), '(properties=trigger_restore_properties)\n', (22283, 22322), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((22561, 22646), 'azure.cli.command_modules.backup.custom_help.track_backup_job', 'cust_help.track_backup_job', (['cmd.cli_ctx', 'result', 'vault_name', 'resource_group_name'], {}), '(cmd.cli_ctx, result, vault_name, resource_group_name\n )\n', (22587, 22646), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((23998, 24027), 'azure.cli.command_modules.backup.custom_help.validate_item', 'cust_help.validate_item', (['item'], {}), '(item)\n', (24021, 24027), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((28201, 28242), 'azure.mgmt.recoveryservicesbackup.models.AzureVmWorkloadSQLDatabaseProtectedItem', 'AzureVmWorkloadSQLDatabaseProtectedItem', ([], {}), '()\n', (28240, 28242), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((2710, 2807), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Workload type is required for Azure Workload.\n """'], {}), '(\n """\n Workload type is required for Azure Workload.\n """\n )\n', (2718, 2807), False, 'from azure.cli.core.util import CLIError\n'), ((2865, 2981), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Backup Management Type needs to be specified for Azure Workload.\n """'], {}), '(\n """\n Backup Management Type needs to be specified for Azure Workload.\n """\n )\n', (2873, 2981), False, 'from azure.cli.core.util import CLIError\n'), ((3872, 3900), 'azure.cli.command_modules.backup.custom_help.is_id', 'cust_help.is_id', (['resource_id'], {}), '(resource_id)\n', (3887, 3900), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((3916, 3989), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Resource ID is not a valid one.\n """'], {}), '("""\n Resource ID is not a valid one.\n """)\n', (3924, 3989), False, 'from azure.cli.core.util import CLIError\n'), ((4412, 4452), 'azure.cli.command_modules.backup.custom_help.is_native_name', 'cust_help.is_native_name', (['container_name'], {}), '(container_name)\n', (4436, 4452), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((4468, 4564), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Container unavailable or already registered.\n """'], {}), '(\n """\n Container unavailable or already registered.\n """\n )\n', (4476, 4564), False, 'from azure.cli.core.util import CLIError\n'), ((5391, 5431), 'azure.cli.command_modules.backup.custom_help.is_native_name', 'cust_help.is_native_name', (['container_name'], {}), '(container_name)\n', (5415, 5431), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((5447, 5596), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Container name passed cannot be a friendly name.\n Please pass a native container name.\n """'], {}), '(\n """\n Container name passed cannot be a friendly name.\n Please pass a native container name.\n """\n )\n', (5455, 5596), False, 'from azure.cli.core.util import CLIError\n'), ((6008, 6086), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n No such registered container exists.\n """'], {}), '("""\n No such registered container exists.\n """)\n', (6016, 6086), False, 'from azure.cli.core.util import CLIError\n'), ((6898, 6938), 'azure.cli.command_modules.backup.custom_help.is_native_name', 'cust_help.is_native_name', (['container_name'], {}), '(container_name)\n', (6922, 6938), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((6954, 7103), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Container name passed cannot be a friendly name.\n Please pass a native container name.\n """'], {}), '(\n """\n Container name passed cannot be a friendly name.\n Please pass a native container name.\n """\n )\n', (6962, 7103), False, 'from azure.cli.core.util import CLIError\n'), ((7571, 7792), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n The policy type should match with the workload being protected.\n Use the relevant get-default policy command and use it to update the policy for the workload.\n """'], {}), '(\n """\n The policy type should match with the workload being protected.\n Use the relevant get-default policy command and use it to update the policy for the workload.\n """\n )\n', (7579, 7792), False, 'from azure.cli.core.util import CLIError\n'), ((9543, 9629), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Policy name is required for set policy.\n """'], {}), '(\n """\n Policy name is required for set policy.\n """)\n', (9551, 9629), False, 'from azure.cli.core.util import CLIError\n'), ((12442, 12500), 'azure.cli.command_modules.backup.custom_help.is_range_valid', 'cust_help.is_range_valid', (['query_start_date', 'query_end_date'], {}), '(query_start_date, query_end_date)\n', (12466, 12500), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((12729, 12888), 'azure.cli.command_modules.backup.custom_help.get_filter_string', 'cust_help.get_filter_string', (["{'restorePointQueryType': 'Log', 'startDate': query_start_date, 'endDate':\n query_end_date, 'extendedInfo': extended_info}"], {}), "({'restorePointQueryType': 'Log', 'startDate':\n query_start_date, 'endDate': query_end_date, 'extendedInfo': extended_info}\n )\n", (12756, 12888), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((13597, 13744), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Protectable Item must be either of type SQLDataBase, HANADatabase, HANAInstance or SQLInstance.\n """'], {}), '(\n """\n Protectable Item must be either of type SQLDataBase, HANADatabase, HANAInstance or SQLInstance.\n """\n )\n', (13605, 13744), False, 'from azure.cli.core.util import CLIError\n'), ((15121, 15240), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Enable compression is not applicable for SAPHanaDatabase item type.\n """'], {}), '(\n """\n Enable compression is not applicable for SAPHanaDatabase item type.\n """\n )\n', (15129, 15240), False, 'from azure.cli.core.util import CLIError\n'), ((15252, 15287), 'azure.cli.command_modules.backup.custom_help.is_hana', 'cust_help.is_hana', (['backup_item_type'], {}), '(backup_item_type)\n', (15269, 15287), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((15346, 15470), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Backup type cannot be Log or CopyOnlyFull for SAPHanaDatabase item type.\n """'], {}), '(\n """\n Backup type cannot be Log or CopyOnlyFull for SAPHanaDatabase item type.\n """\n )\n', (15354, 15470), False, 'from azure.cli.core.util import CLIError\n'), ((16406, 16517), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Item must be either of type SQLDataBase or SAPHanaDatabase.\n """'], {}), '(\n """\n Item must be either of type SQLDataBase or SAPHanaDatabase.\n """\n )\n', (16414, 16517), False, 'from azure.cli.core.util import CLIError\n'), ((16676, 16761), 'azure.cli.command_modules.backup.custom_help.track_backup_job', 'cust_help.track_backup_job', (['cmd.cli_ctx', 'result', 'vault_name', 'resource_group_name'], {}), '(cmd.cli_ctx, result, vault_name, resource_group_name\n )\n', (16702, 16761), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((17623, 17724), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Protectable Item can only be of type SQLInstance.\n """'], {}), '(\n """\n Protectable Item can only be of type SQLInstance.\n """\n )\n', (17631, 17724), False, 'from azure.cli.core.util import CLIError\n'), ((18049, 18056), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18054, 18056), False, 'from uuid import uuid4\n'), ((18356, 18391), 'azure.cli.command_modules.backup.custom_help.is_native_name', 'cust_help.is_native_name', (['item_name'], {}), '(item_name)\n', (18380, 18391), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((18407, 18486), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Protectable Item name must be native.\n """'], {}), '("""\n Protectable Item name must be native.\n """)\n', (18415, 18486), False, 'from azure.cli.core.util import CLIError\n'), ((18622, 18723), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Protectable Item can only be of type SQLInstance.\n """'], {}), '(\n """\n Protectable Item can only be of type SQLInstance.\n """\n )\n', (18630, 18723), False, 'from azure.cli.core.util import CLIError\n'), ((18757, 18764), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18762, 18764), False, 'from uuid import uuid4\n'), ((22959, 22991), 'azure.cli.command_modules.backup._validators.datetime_type', 'datetime_type', (['log_point_in_time'], {}), '(log_point_in_time)\n', (22972, 22991), False, 'from azure.cli.command_modules.backup._validators import datetime_type\n'), ((23585, 23631), 'azure.cli.core.util.CLIError', 'CLIError', (['"""Target Container must be provided."""'], {}), "('Target Container must be provided.')\n", (23593, 23631), False, 'from azure.cli.core.util import CLIError\n'), ((23701, 23811), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Log point in time or recovery point name must be provided.\n """'], {}), '(\n """\n Log point in time or recovery point name must be provided.\n """\n )\n', (23709, 23811), False, 'from azure.cli.core.util import CLIError\n'), ((23849, 23887), 'azure.cli.command_modules.backup._client_factory.backup_protected_items_cf', 'backup_protected_items_cf', (['cmd.cli_ctx'], {}), '(cmd.cli_ctx)\n', (23874, 23887), False, 'from azure.cli.command_modules.backup._client_factory import backup_workload_items_cf, protectable_containers_cf, backup_protection_containers_cf, backup_protected_items_cf\n'), ((24192, 24303), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Item must be either of type SQLDataBase or SAPHanaDatabase.\n """'], {}), '(\n """\n Item must be either of type SQLDataBase or SAPHanaDatabase.\n """\n )\n', (24200, 24303), False, 'from azure.cli.core.util import CLIError\n'), ((25267, 25435), 'azure.cli.command_modules.backup.custom_common.show_recovery_point', 'common.show_recovery_point', (['cmd', 'client', 'resource_group_name', 'vault_name', 'container_name', 'item_name', 'rp_name', 'item_type'], {'backup_management_type': '"""AzureWorkload"""'}), "(cmd, client, resource_group_name, vault_name,\n container_name, item_name, rp_name, item_type, backup_management_type=\n 'AzureWorkload')\n", (25293, 25435), True, 'import azure.cli.command_modules.backup.custom_common as common\n'), ((27794, 27830), 'azure.mgmt.recoveryservicesbackup.models.AzureWorkloadSAPHanaRestoreRequest', 'AzureWorkloadSAPHanaRestoreRequest', ([], {}), '()\n', (27828, 27830), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((27986, 28018), 'azure.mgmt.recoveryservicesbackup.models.AzureWorkloadSQLRestoreRequest', 'AzureWorkloadSQLRestoreRequest', ([], {}), '()\n', (28016, 28018), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((28144, 28189), 'azure.mgmt.recoveryservicesbackup.models.AzureVmWorkloadSAPHanaDatabaseProtectedItem', 'AzureVmWorkloadSAPHanaDatabaseProtectedItem', ([], {}), '()\n', (28187, 28189), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((4234, 4294), 'azure.cli.command_modules.backup.custom_help.get_resource_id', 'cust_help.get_resource_id', (['container.properties.container_id'], {}), '(container.properties.container_id)\n', (4259, 4294), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((4298, 4336), 'azure.cli.command_modules.backup.custom_help.get_resource_id', 'cust_help.get_resource_id', (['resource_id'], {}), '(resource_id)\n', (4323, 4336), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((15048, 15082), 'azure.cli.command_modules.backup.custom_help.is_sql', 'cust_help.is_sql', (['backup_item_type'], {}), '(backup_item_type)\n', (15064, 15082), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((16312, 16346), 'azure.cli.command_modules.backup.custom_help.is_sql', 'cust_help.is_sql', (['backup_item_type'], {}), '(backup_item_type)\n', (16328, 16346), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((16355, 16390), 'azure.cli.command_modules.backup.custom_help.is_hana', 'cust_help.is_hana', (['backup_item_type'], {}), '(backup_item_type)\n', (16372, 16390), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((19272, 19309), 'azure.cli.command_modules.backup._client_factory.backup_workload_items_cf', 'backup_workload_items_cf', (['cmd.cli_ctx'], {}), '(cmd.cli_ctx)\n', (19296, 19309), False, 'from azure.cli.command_modules.backup._client_factory import backup_workload_items_cf, protectable_containers_cf, backup_protection_containers_cf, backup_protected_items_cf\n'), ((21590, 21712), 'azure.mgmt.recoveryservicesbackup.models.TargetRestoreInfo', 'TargetRestoreInfo', ([], {'overwrite_option': '"""Overwrite"""', 'container_id': 'container_id', 'target_directory_for_file_restore': 'filepath'}), "(overwrite_option='Overwrite', container_id=container_id,\n target_directory_for_file_restore=filepath)\n", (21607, 21712), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((21998, 22030), 'azure.cli.command_modules.backup._validators.datetime_type', 'datetime_type', (['log_point_in_time'], {}), '(log_point_in_time)\n', (22011, 22030), False, 'from azure.cli.command_modules.backup._validators import datetime_type\n'), ((23094, 23173), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Target Item must be provided.\n """'], {}), '("""\n Target Item must be provided.\n """)\n', (23102, 23173), False, 'from azure.cli.core.util import CLIError\n'), ((23369, 23492), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Target Item must be either of type HANAInstance or SQLInstance.\n """'], {}), '(\n """\n Target Item must be either of type HANAInstance or SQLInstance.\n """\n )\n', (23377, 23492), False, 'from azure.cli.core.util import CLIError\n'), ((24112, 24139), 'azure.cli.command_modules.backup.custom_help.is_sql', 'cust_help.is_sql', (['item_type'], {}), '(item_type)\n', (24128, 24139), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((24148, 24176), 'azure.cli.command_modules.backup.custom_help.is_hana', 'cust_help.is_hana', (['item_type'], {}), '(item_type)\n', (24165, 24176), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((25105, 25169), 'azure.cli.core.util.CLIError', 'CLIError', (['"""\n Invalid input.\n """'], {}), '("""\n Invalid input.\n """)\n', (25113, 25169), False, 'from azure.cli.core.util import CLIError\n'), ((27731, 27778), 'azure.mgmt.recoveryservicesbackup.models.AzureWorkloadSAPHanaPointInTimeRestoreRequest', 'AzureWorkloadSAPHanaPointInTimeRestoreRequest', ([], {}), '()\n', (27776, 27778), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((27927, 27970), 'azure.mgmt.recoveryservicesbackup.models.AzureWorkloadSQLPointInTimeRestoreRequest', 'AzureWorkloadSQLPointInTimeRestoreRequest', ([], {}), '()\n', (27968, 27970), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((20845, 20952), 'azure.mgmt.recoveryservicesbackup.models.TargetRestoreInfo', 'TargetRestoreInfo', ([], {'overwrite_option': '"""Overwrite"""', 'database_name': 'database_name', 'container_id': 'container_id'}), "(overwrite_option='Overwrite', database_name=database_name,\n container_id=container_id)\n", (20862, 20952), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((21283, 21391), 'azure.mgmt.recoveryservicesbackup.models.SQLDataDirectoryMapping', 'SQLDataDirectoryMapping', ([], {'mapping_type': 'i[0]', 'source_path': 'i[1]', 'source_logical_name': 'i[2]', 'target_path': 'i[3]'}), '(mapping_type=i[0], source_path=i[1],\n source_logical_name=i[2], target_path=i[3])\n', (21306, 21391), False, 'from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem\n'), ((26073, 26183), 'azure.cli.command_modules.backup.custom_help.get_target_path', 'cust_help.get_target_path', (['path.type', 'path.path', 'path.logical_name', 'titem.properties.data_directory_paths'], {}), '(path.type, path.path, path.logical_name, titem.\n properties.data_directory_paths)\n', (26098, 26183), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n'), ((11850, 11905), 'azure.cli.command_modules.backup.custom_help.get_protection_container_uri_from_id', 'cust_help.get_protection_container_uri_from_id', (['item.id'], {}), '(item.id)\n', (11896, 11905), True, 'import azure.cli.command_modules.backup.custom_help as cust_help\n')]
|
"""
tests.helpers.test_init
~~~~~~~~~~~~~~~~~~~~~~~
Tests component helpers.
"""
# pylint: disable=protected-access,too-many-public-methods
import unittest
from homeassistant import helpers
from tests.common import get_test_home_assistant
class TestHelpers(unittest.TestCase):
""" Tests homeassistant.helpers module. """
def setUp(self): # pylint: disable=invalid-name
""" Init needed objects. """
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_extract_domain_configs(self):
config = {
'zone': None,
'zoner': None,
'zone ': None,
'zone Hallo': None,
'zone 100': None,
}
self.assertEqual(set(['zone', 'zone Hallo', 'zone 100']),
set(helpers.extract_domain_configs(config, 'zone')))
|
[
"homeassistant.helpers.extract_domain_configs",
"tests.common.get_test_home_assistant"
] |
[((441, 466), 'tests.common.get_test_home_assistant', 'get_test_home_assistant', ([], {}), '()\n', (464, 466), False, 'from tests.common import get_test_home_assistant\n'), ((904, 950), 'homeassistant.helpers.extract_domain_configs', 'helpers.extract_domain_configs', (['config', '"""zone"""'], {}), "(config, 'zone')\n", (934, 950), False, 'from homeassistant import helpers\n')]
|
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 20
validate_train_set = True
save_every = 20
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 4
sunny_batch_size = 4
batches_per_chunk = 32
num_epochs_train = 150
# - learning rate and method
base_lr = 0.0001
learning_rate_schedule = {
0: base_lr,
8*num_epochs_train/10: base_lr/10,
19*num_epochs_train/20: base_lr/100,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotate": (-180, 180),
"shear": (0, 0),
"zoom_x": (0.75, 1.25),
"zoom_y": (0.75, 1.25),
"skew_x": (-10, 10),
"skew_y": (-10, 10),
"translate_x": (-8, 8),
"translate_y": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0)
}
use_hough_roi = True
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(64,64)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
def filter_samples(folders):
# don't use patients who don't have mre than 6 slices
import glob
return folders
# Input sizes
image_size = 64
nr_slices = 22
data_sizes = {
"sliced:data:sax": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:sax:locations": (batch_size, nr_slices),
"sliced:data:sax:is_not_padded": (batch_size, nr_slices),
"sliced:data:randomslices": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:singleslice:2ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:4ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice": (batch_size, 30, image_size, image_size),
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size),
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 100 # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# nonlinearity putting a lower bound on it's output
def lb_softplus(lb):
return lambda x: nn.nonlinearities.softplus(x) + lb
init = nn.init.Orthogonal()
rnn_layer = functools.partial(nn.layers.RecurrentLayer,
W_in_to_hid=init,
W_hid_to_hid=init,
b=nn.init.Constant(0.1),
nonlinearity=nn.nonlinearities.rectify,
hid_init=nn.init.Constant(0.),
backwards=False,
learn_init=True,
gradient_steps=-1,
grad_clipping=False,
unroll_scan=False,
precompute_input=False)
# Architecture
def build_model():
from . import j6_2ch_gauss, j6_4ch_gauss
meta_2ch = j6_2ch_gauss.build_model()
meta_4ch = j6_4ch_gauss.build_model()
l_meta_2ch_systole = nn.layers.DenseLayer(meta_2ch["meta_outputs"]["systole"], num_units=64, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_meta_2ch_diastole = nn.layers.DenseLayer(meta_2ch["meta_outputs"]["diastole"], num_units=64, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_meta_4ch_systole = nn.layers.DenseLayer(meta_4ch["meta_outputs"]["systole"], num_units=64, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_meta_4ch_diastole = nn.layers.DenseLayer(meta_4ch["meta_outputs"]["diastole"], num_units=64, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:sax"]
input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
input_size_locations = data_sizes["sliced:data:sax:locations"]
l0 = nn.layers.InputLayer(input_size)
lin_slice_mask = nn.layers.InputLayer(input_size_mask)
lin_slice_locations = nn.layers.InputLayer(input_size_locations)
# PREPROCESS SLICES SEPERATELY
# Convolutional layers and some dense layers are defined in a submodel
l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))
from . import je_ss_jonisc64small_360_gauss_longer
submodel = je_ss_jonisc64small_360_gauss_longer.build_model(l0_slices)
# Systole Dense layers
l_sys_mu = submodel["meta_outputs"]["systole:mu"]
l_sys_sigma = submodel["meta_outputs"]["systole:sigma"]
l_sys_meta = submodel["meta_outputs"]["systole"]
# Diastole Dense layers
l_dia_mu = submodel["meta_outputs"]["diastole:mu"]
l_dia_sigma = submodel["meta_outputs"]["diastole:sigma"]
l_dia_meta = submodel["meta_outputs"]["diastole"]
# AGGREGATE SLICES PER PATIENT
l_scaled_slice_locations = layers.TrainableScaleLayer(lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)
# Systole
l_pat_sys_ss_mu = nn.layers.ReshapeLayer(l_sys_mu, (-1, nr_slices))
l_pat_sys_ss_sigma = nn.layers.ReshapeLayer(l_sys_sigma, (-1, nr_slices))
l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=100.)
l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)
l_sys_meta = nn.layers.DenseLayer(nn.layers.ReshapeLayer(l_sys_meta, (-1, nr_slices, 512)), num_units=64, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_meta_systole = nn.layers.ConcatLayer([l_meta_2ch_systole, l_meta_4ch_systole, l_sys_meta])
l_weights = nn.layers.DenseLayer(l_meta_systole, num_units=512, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_weight_mean = nn.layers.DenseLayer(l_weights, num_units=3, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.identity)
l_weight_certainty = nn.layers.DenseLayer(l_weights, num_units=1, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.identity)
l_weighted_mean = layers.WeightedMeanLayer(l_weight_mean, [l_systole, meta_2ch["outputs"]["systole"], meta_4ch["outputs"]["systole"]])
systole_output = layers.IncreaseCertaintyLayer(l_weight_certainty, l_weighted_mean)
# Diastole
l_pat_dia_ss_mu = nn.layers.ReshapeLayer(l_dia_mu, (-1, nr_slices))
l_pat_dia_ss_sigma = nn.layers.ReshapeLayer(l_dia_sigma, (-1, nr_slices))
l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=100.)
l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)
l_dia_meta = nn.layers.DenseLayer(nn.layers.ReshapeLayer(l_dia_meta, (-1, nr_slices, 512)), num_units=64, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_meta_diastole = nn.layers.ConcatLayer([l_meta_2ch_diastole, l_meta_4ch_diastole, l_dia_meta])
l_weights = nn.layers.DenseLayer(l_meta_diastole, num_units=512, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_weight_mean = nn.layers.DenseLayer(l_weights, num_units=3, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.identity)
l_weight_certainty = nn.layers.DenseLayer(l_weights, num_units=1, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.identity)
l_weighted_mean = layers.WeightedMeanLayer(l_weight_mean, [l_diastole, meta_2ch["outputs"]["diastole"], meta_4ch["outputs"]["diastole"]])
diastole_output = layers.IncreaseCertaintyLayer(l_weight_certainty, l_weighted_mean)
submodels = [submodel, meta_2ch, meta_4ch]
return {
"inputs":dict({
"sliced:data:sax": l0,
"sliced:data:sax:is_not_padded": lin_slice_mask,
"sliced:data:sax:locations": lin_slice_locations,
}, **{ k: v for d in [model["inputs"] for model in [meta_2ch, meta_4ch]]
for k, v in list(d.items()) }
),
"outputs": {
"systole": systole_output,
"diastole": diastole_output,
},
"regularizable": dict(
{},
**{
k: v
for d in [model["regularizable"] for model in submodels if "regularizable" in model]
for k, v in list(d.items()) }
),
"pretrained":{
je_ss_jonisc64small_360_gauss_longer.__name__: submodel["outputs"],
j6_2ch_gauss.__name__: meta_2ch["outputs"],
j6_4ch_gauss.__name__: meta_4ch["outputs"],
},
#"cutoff_gradients": [
#] + [ v for d in [model["meta_outputs"] for model in [meta_2ch, meta_4ch] if "meta_outputs" in model]
# for v in d.values() ]
}
|
[
"functools.partial",
"objectives.KaggleObjective",
"lasagne.layers.ConcatLayer",
"lasagne.layers.InputLayer",
"lasagne.regularization.regularize_layer_params_weighted",
"layers.MuSigmaErfLayer",
"layers.IncreaseCertaintyLayer",
"lasagne.layers.ReshapeLayer",
"lasagne.init.Constant",
"layers.WeightedMeanLayer",
"layers.JeroenLayer",
"lasagne.init.Orthogonal",
"lasagne.nonlinearities.softplus",
"utils.cdf_to_pdf"
] |
[((1971, 2021), 'functools.partial', 'functools.partial', (['preprocess_train'], {'augment': '(False)'}), '(preprocess_train, augment=False)\n', (1988, 2021), False, 'import functools\n'), ((2365, 2439), 'functools.partial', 'functools.partial', (['data_loader.generate_validation_batch'], {'set': '"""validation"""'}), "(data_loader.generate_validation_batch, set='validation')\n", (2382, 2439), False, 'import functools\n'), ((2465, 2534), 'functools.partial', 'functools.partial', (['data_loader.generate_validation_batch'], {'set': '"""train"""'}), "(data_loader.generate_validation_batch, set='train')\n", (2482, 2534), False, 'import functools\n'), ((2554, 2632), 'functools.partial', 'functools.partial', (['data_loader.generate_test_batch'], {'set': "['validation', 'test']"}), "(data_loader.generate_test_batch, set=['validation', 'test'])\n", (2571, 2632), False, 'import functools\n'), ((4598, 4618), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (4616, 4618), True, 'import lasagne as nn\n'), ((1257, 1315), 'functools.partial', 'functools.partial', (['preprocess.normalize_contrast_zmuv'], {'z': '(2)'}), '(preprocess.normalize_contrast_zmuv, z=2)\n', (1274, 1315), False, 'import functools\n'), ((3989, 4101), 'lasagne.regularization.regularize_layer_params_weighted', 'nn.regularization.regularize_layer_params_weighted', (["interface_layers['regularizable']", 'nn.regularization.l2'], {}), "(interface_layers[\n 'regularizable'], nn.regularization.l2)\n", (4039, 4101), True, 'import lasagne as nn\n'), ((4142, 4217), 'objectives.KaggleObjective', 'objectives.KaggleObjective', (["interface_layers['outputs']"], {'penalty': 'l2_penalty'}), "(interface_layers['outputs'], penalty=l2_penalty)\n", (4168, 4217), False, 'import objectives\n'), ((6181, 6213), 'lasagne.layers.InputLayer', 'nn.layers.InputLayer', (['input_size'], {}), '(input_size)\n', (6201, 6213), True, 'import lasagne as nn\n'), ((6236, 6273), 'lasagne.layers.InputLayer', 'nn.layers.InputLayer', (['input_size_mask'], {}), '(input_size_mask)\n', (6256, 6273), True, 'import lasagne as nn\n'), ((6301, 6343), 'lasagne.layers.InputLayer', 'nn.layers.InputLayer', (['input_size_locations'], {}), '(input_size_locations)\n', (6321, 6343), True, 'import lasagne as nn\n'), ((6475, 6522), 'lasagne.layers.ReshapeLayer', 'nn.layers.ReshapeLayer', (['l0', '(-1, [2], [3], [4])'], {}), '(l0, (-1, [2], [3], [4]))\n', (6497, 6522), True, 'import lasagne as nn\n'), ((7265, 7314), 'lasagne.layers.ReshapeLayer', 'nn.layers.ReshapeLayer', (['l_sys_mu', '(-1, nr_slices)'], {}), '(l_sys_mu, (-1, nr_slices))\n', (7287, 7314), True, 'import lasagne as nn\n'), ((7341, 7393), 'lasagne.layers.ReshapeLayer', 'nn.layers.ReshapeLayer', (['l_sys_sigma', '(-1, nr_slices)'], {}), '(l_sys_sigma, (-1, nr_slices))\n', (7363, 7393), True, 'import lasagne as nn\n'), ((7425, 7549), 'layers.JeroenLayer', 'layers.JeroenLayer', (['[l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask, l_scaled_slice_locations]'], {'rescale_input': '(100.0)'}), '([l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask,\n l_scaled_slice_locations], rescale_input=100.0)\n', (7443, 7549), False, 'import layers\n'), ((7564, 7611), 'layers.MuSigmaErfLayer', 'layers.MuSigmaErfLayer', (['l_pat_sys_aggr_mu_sigma'], {}), '(l_pat_sys_aggr_mu_sigma)\n', (7586, 7611), False, 'import layers\n'), ((7838, 7913), 'lasagne.layers.ConcatLayer', 'nn.layers.ConcatLayer', (['[l_meta_2ch_systole, l_meta_4ch_systole, l_sys_meta]'], {}), '([l_meta_2ch_systole, l_meta_4ch_systole, l_sys_meta])\n', (7859, 7913), True, 'import lasagne as nn\n'), ((8412, 8533), 'layers.WeightedMeanLayer', 'layers.WeightedMeanLayer', (['l_weight_mean', "[l_systole, meta_2ch['outputs']['systole'], meta_4ch['outputs']['systole']]"], {}), "(l_weight_mean, [l_systole, meta_2ch['outputs'][\n 'systole'], meta_4ch['outputs']['systole']])\n", (8436, 8533), False, 'import layers\n'), ((8551, 8617), 'layers.IncreaseCertaintyLayer', 'layers.IncreaseCertaintyLayer', (['l_weight_certainty', 'l_weighted_mean'], {}), '(l_weight_certainty, l_weighted_mean)\n', (8580, 8617), False, 'import layers\n'), ((8659, 8708), 'lasagne.layers.ReshapeLayer', 'nn.layers.ReshapeLayer', (['l_dia_mu', '(-1, nr_slices)'], {}), '(l_dia_mu, (-1, nr_slices))\n', (8681, 8708), True, 'import lasagne as nn\n'), ((8735, 8787), 'lasagne.layers.ReshapeLayer', 'nn.layers.ReshapeLayer', (['l_dia_sigma', '(-1, nr_slices)'], {}), '(l_dia_sigma, (-1, nr_slices))\n', (8757, 8787), True, 'import lasagne as nn\n'), ((8819, 8943), 'layers.JeroenLayer', 'layers.JeroenLayer', (['[l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask, l_scaled_slice_locations]'], {'rescale_input': '(100.0)'}), '([l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask,\n l_scaled_slice_locations], rescale_input=100.0)\n', (8837, 8943), False, 'import layers\n'), ((8959, 9006), 'layers.MuSigmaErfLayer', 'layers.MuSigmaErfLayer', (['l_pat_dia_aggr_mu_sigma'], {}), '(l_pat_dia_aggr_mu_sigma)\n', (8981, 9006), False, 'import layers\n'), ((9234, 9311), 'lasagne.layers.ConcatLayer', 'nn.layers.ConcatLayer', (['[l_meta_2ch_diastole, l_meta_4ch_diastole, l_dia_meta]'], {}), '([l_meta_2ch_diastole, l_meta_4ch_diastole, l_dia_meta])\n', (9255, 9311), True, 'import lasagne as nn\n'), ((9811, 9935), 'layers.WeightedMeanLayer', 'layers.WeightedMeanLayer', (['l_weight_mean', "[l_diastole, meta_2ch['outputs']['diastole'], meta_4ch['outputs']['diastole']]"], {}), "(l_weight_mean, [l_diastole, meta_2ch['outputs'][\n 'diastole'], meta_4ch['outputs']['diastole']])\n", (9835, 9935), False, 'import layers\n'), ((9954, 10020), 'layers.IncreaseCertaintyLayer', 'layers.IncreaseCertaintyLayer', (['l_weight_certainty', 'l_weighted_mean'], {}), '(l_weight_certainty, l_weighted_mean)\n', (9983, 10020), False, 'import layers\n'), ((1828, 1929), 'functools.partial', 'functools.partial', (['image_transform.normscale_resize_and_augment_2'], {'normalised_patch_size': '(64, 64)'}), '(image_transform.normscale_resize_and_augment_2,\n normalised_patch_size=(64, 64))\n', (1845, 1929), False, 'import functools\n'), ((4732, 4753), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (4748, 4753), True, 'import lasagne as nn\n'), ((4814, 4835), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.0)'], {}), '(0.0)\n', (4830, 4835), True, 'import lasagne as nn\n'), ((7653, 7709), 'lasagne.layers.ReshapeLayer', 'nn.layers.ReshapeLayer', (['l_sys_meta', '(-1, nr_slices, 512)'], {}), '(l_sys_meta, (-1, nr_slices, 512))\n', (7675, 7709), True, 'import lasagne as nn\n'), ((9048, 9104), 'lasagne.layers.ReshapeLayer', 'nn.layers.ReshapeLayer', (['l_dia_meta', '(-1, nr_slices, 512)'], {}), '(l_dia_meta, (-1, nr_slices, 512))\n', (9070, 9104), True, 'import lasagne as nn\n'), ((4428, 4447), 'utils.cdf_to_pdf', 'utils.cdf_to_pdf', (['x'], {}), '(x)\n', (4444, 4447), False, 'import utils\n'), ((4551, 4580), 'lasagne.nonlinearities.softplus', 'nn.nonlinearities.softplus', (['x'], {}), '(x)\n', (4577, 4580), True, 'import lasagne as nn\n'), ((5261, 5281), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (5279, 5281), True, 'import lasagne as nn\n'), ((5285, 5306), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (5301, 5306), True, 'import lasagne as nn\n'), ((5450, 5470), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (5468, 5470), True, 'import lasagne as nn\n'), ((5474, 5495), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (5490, 5495), True, 'import lasagne as nn\n'), ((5639, 5659), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (5657, 5659), True, 'import lasagne as nn\n'), ((5663, 5684), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (5679, 5684), True, 'import lasagne as nn\n'), ((5828, 5848), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (5846, 5848), True, 'import lasagne as nn\n'), ((5852, 5873), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (5868, 5873), True, 'import lasagne as nn\n'), ((7185, 7206), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (7201, 7206), True, 'import lasagne as nn\n'), ((7727, 7747), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (7745, 7747), True, 'import lasagne as nn\n'), ((7751, 7772), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (7767, 7772), True, 'import lasagne as nn\n'), ((7985, 8005), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (8003, 8005), True, 'import lasagne as nn\n'), ((8009, 8030), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (8025, 8030), True, 'import lasagne as nn\n'), ((8140, 8160), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (8158, 8160), True, 'import lasagne as nn\n'), ((8164, 8185), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (8180, 8185), True, 'import lasagne as nn\n'), ((8301, 8321), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (8319, 8321), True, 'import lasagne as nn\n'), ((8325, 8346), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (8341, 8346), True, 'import lasagne as nn\n'), ((9122, 9142), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (9140, 9142), True, 'import lasagne as nn\n'), ((9146, 9167), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (9162, 9167), True, 'import lasagne as nn\n'), ((9384, 9404), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (9402, 9404), True, 'import lasagne as nn\n'), ((9408, 9429), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (9424, 9429), True, 'import lasagne as nn\n'), ((9539, 9559), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (9557, 9559), True, 'import lasagne as nn\n'), ((9563, 9584), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (9579, 9584), True, 'import lasagne as nn\n'), ((9700, 9720), 'lasagne.init.Orthogonal', 'nn.init.Orthogonal', ([], {}), '()\n', (9718, 9720), True, 'import lasagne as nn\n'), ((9724, 9745), 'lasagne.init.Constant', 'nn.init.Constant', (['(0.1)'], {}), '(0.1)\n', (9740, 9745), True, 'import lasagne as nn\n')]
|
import torch
import torch.nn as nn
import pdb
class ActionClassification(nn.Module):
def __init__(self, feature_size, num_classes, opts):
super(ActionClassification, self).__init__()
self.opts = opts
self.attention_flow_stream = StreamModule(feature_size, num_classes)
self.attention_rgb_stream = StreamModule(feature_size, num_classes)
self.classifier_flow_stream = StreamClassificationHead(feature_size,
num_classes)
self.classifier_rgb_stream = StreamClassificationHead(feature_size,
num_classes)
self.classifier_both = StreamClassificationHead(feature_size, num_classes)
self.multi_label_cross = nn.MultiLabelSoftMarginLoss()
def forward(self, rgb_features, flow_features):
# rgb B x T x 1024
# flow B x T x 1024
wt_ft_rgb, attn_rgb = self.attention_rgb_stream(rgb_features)
wt_ft_flow, attn_flow = self.attention_flow_stream(flow_features)
class_flow = self.classifier_flow_stream(wt_ft_flow)
class_rgb = self.classifier_rgb_stream(wt_ft_rgb)
features_both = wt_ft_flow + wt_ft_rgb
class_both = self.classifier_both(features_both)
if self.opts.log_now:
self.opts.logger.histo_summary('rgb_wt_fetaures',
wt_ft_rgb.data.cpu().numpy(),
self.opts.iteration)
outputs = dict()
outputs['class_both'] = class_both
outputs['class_rgb'] = class_rgb
outputs['class_flow'] = class_flow
outputs['attn_rgb'] = attn_rgb
outputs['attn_flow'] = attn_flow
return outputs
def get_attention_models(self):
models = {}
models[
'rgb_attention_model'] = self.attention_rgb_stream.class_attention_models
models[
'flow_attention_model'] = self.attention_flow_stream.class_attention_models
return models
def build_loss(self, outputs, target_class):
# target_class is B x C
self.rgb_loss = self.multi_label_cross(outputs['class_rgb'], target_class)
self.flow_loss = self.multi_label_cross(outputs['class_flow'], target_class)
self.both_loss = self.multi_label_cross(outputs['class_both'], target_class)
self.rgb_sparsity = self.attention_rgb_stream.attention_module.l1_sparsity_loss(
outputs['attn_rgb'])
self.flow_sparsity = self.attention_flow_stream.attention_module.l1_sparsity_loss(
outputs['attn_flow'])
losses = {}
losses['rgb_loss'] = self.opts.cls_wt_rgb * self.rgb_loss
losses['flow_loss'] = self.opts.cls_wt_flow * self.flow_loss
losses['both_loss'] = self.opts.cls_wt_both * self.both_loss
losses['rgb_sparsity_loss'] = self.opts.sparsity_wt_rgb * self.rgb_sparsity
losses[
'flow_sparsity_loss'] = self.opts.sparsity_wt_flow * self.flow_sparsity
self.total_loss = losses['rgb_loss'] + losses['flow_loss'] + losses[
'both_loss'] + losses['rgb_sparsity_loss'] + losses['flow_sparsity_loss']
losses['total_loss'] = self.total_loss
return losses
class StreamModule(nn.Module):
def __init__(self, feature_size, num_classes):
super(StreamModule, self).__init__()
self.feature_size = feature_size
self.num_classes = num_classes
self.attention_module = ClassAttentionModule(self.feature_size,
self.num_classes)
self.class_attention_models = ClassAttentionModule(self.feature_size,
self.num_classes)
def forward(self, x):
# x is B x T x feature_size
attention = self.class_attention_models.forward(x) # B x T x num_classes
attention_expand = attention.unsqueeze(3).expand(
torch.Size([attention.size(0), attention.size(1), attention.size(2),
x.size(2)])) # B x T x num_clas x feature_size
x_expand = x.unsqueeze(2).expand(
attention_expand.size()) # B x T x num_class x feature_size
new_features = x_expand * attention_expand # B x T x num_classes x feature_size
weighted_features = torch.sum(new_features, 1)
return weighted_features, attention # ( B x num_class x feature_size , B x T x num_class)
class ClassifierModule(nn.Module):
# performs 0/1 classification
def __init__(self, class_index, feature_size):
super(ClassifierModule, self).__init__()
self.class_index = class_index
self.mlp1 = nn.Linear(feature_size, 256)
self.mlp2 = nn.Linear(256, 1)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.mlp1(x)
x = self.relu(x)
x = self.mlp2(x)
return x
class StreamClassificationHead(nn.Module):
def __init__(self, feature_size, num_classes):
super(StreamClassificationHead, self).__init__()
self.num_classes = num_classes
self.classifier_modules = nn.ModuleList(
[ClassifierModule(i, feature_size) for i in range(num_classes)])
def forward(self, x):
# x : B x num_class x feature_size
# x = self.classifier(x) # B x num_class x 1
outs = []
for i in range(self.num_classes):
outs.append(self.classifier_modules[i](x[:, i, :]))
outs = torch.cat(outs, dim=1) # B x num_classes x 1
# B x num_class
return outs
class ClassAttentionModule(nn.Module):
def __init__(self, feature_size, num_classes):
super(ClassAttentionModule, self).__init__()
self.num_classes = num_classes
self.attention_modules = nn.ModuleList(
[AttentionModule(i, feature_size) for i in range(num_classes)])
def forward(self, x, class_index=None):
if class_index is None:
out = []
for m in self.attention_modules:
out.append(m(x)) ## m(x) B x T x 1024
out = torch.stack(out).squeeze(3)
out = out.permute(1, 2, 0)
return out
else:
return self.attention_modules[class_index](x).squeeze(2) # B x T
def l1_sparsity_loss(self, x):
return x.sum(dim=2).sum(dim=1)
def build_binary_loss(self, class_index, pred_labels, target_labels,
label_weights):
return self.attention_modules[class_index].build_binary_loss(pred_labels,
target_labels,
label_weights)
class AttentionModule(nn.Module):
def __init__(self, class_id, feature_size):
super(AttentionModule, self).__init__()
self.class_id = class_id
self.feature_size = feature_size
self.net = nn.Sequential(nn.Linear(self.feature_size, 256),
nn.ReLU(),
nn.Linear(256, 1),
nn.Sigmoid())
def l1_penalty(self, var):
return torch.abs(var).sum()
def forward(self, feature_segments):
x = self.net(feature_segments)
return x ## B x T
def build_binary_loss(self, pred_labels, target_labels, label_weights,
lambda1=1.1E-3):
## pred_labels B x 1 ## target_labels B x 1
return torch.nn.functional.binary_cross_entropy(pred_labels, target_labels,
weight=label_weights) + lambda1 * self.l1_penalty(pred_labels)
|
[
"torch.nn.functional.binary_cross_entropy",
"torch.nn.ReLU",
"torch.stack",
"torch.cat",
"torch.nn.MultiLabelSoftMarginLoss",
"torch.abs",
"torch.nn.Linear",
"torch.sum",
"torch.nn.Sigmoid"
] |
[((753, 782), 'torch.nn.MultiLabelSoftMarginLoss', 'nn.MultiLabelSoftMarginLoss', ([], {}), '()\n', (780, 782), True, 'import torch.nn as nn\n'), ((4098, 4124), 'torch.sum', 'torch.sum', (['new_features', '(1)'], {}), '(new_features, 1)\n', (4107, 4124), False, 'import torch\n'), ((4434, 4462), 'torch.nn.Linear', 'nn.Linear', (['feature_size', '(256)'], {}), '(feature_size, 256)\n', (4443, 4462), True, 'import torch.nn as nn\n'), ((4479, 4496), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(1)'], {}), '(256, 1)\n', (4488, 4496), True, 'import torch.nn as nn\n'), ((4513, 4522), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4520, 4522), True, 'import torch.nn as nn\n'), ((4542, 4554), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4552, 4554), True, 'import torch.nn as nn\n'), ((5200, 5222), 'torch.cat', 'torch.cat', (['outs'], {'dim': '(1)'}), '(outs, dim=1)\n', (5209, 5222), False, 'import torch\n'), ((6559, 6592), 'torch.nn.Linear', 'nn.Linear', (['self.feature_size', '(256)'], {}), '(self.feature_size, 256)\n', (6568, 6592), True, 'import torch.nn as nn\n'), ((6623, 6632), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6630, 6632), True, 'import torch.nn as nn\n'), ((6663, 6680), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(1)'], {}), '(256, 1)\n', (6672, 6680), True, 'import torch.nn as nn\n'), ((6711, 6723), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6721, 6723), True, 'import torch.nn as nn\n'), ((7065, 7160), 'torch.nn.functional.binary_cross_entropy', 'torch.nn.functional.binary_cross_entropy', (['pred_labels', 'target_labels'], {'weight': 'label_weights'}), '(pred_labels, target_labels, weight\n =label_weights)\n', (7105, 7160), False, 'import torch\n'), ((6768, 6782), 'torch.abs', 'torch.abs', (['var'], {}), '(var)\n', (6777, 6782), False, 'import torch\n'), ((5756, 5772), 'torch.stack', 'torch.stack', (['out'], {}), '(out)\n', (5767, 5772), False, 'import torch\n')]
|
import tensorflow as tf
class TtestSelectionTest(tf.test.TestCase):
def testTtestCorrectScore(self):
raise NotImplementedError
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.test.main"
] |
[((175, 189), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (187, 189), True, 'import tensorflow as tf\n')]
|
import math
import numpy as np
import openmdao.api as om
from wisdem.commonse.akima import Akima
from wisdem.commonse.csystem import DirectionVector
from wisdem.commonse.utilities import cosd, sind # , linspace_with_deriv, interp_with_deriv, hstack, vstack
from wisdem.commonse.environment import LogWind, PowerWind, LinearWaves
# -----------------
# Helper Functions
# -----------------
# "Experiments on the Flow Past a Circular Cylinder at Very High Reynolds Numbers", Roshko
Re_pt = [
0.00001,
0.0001,
0.0010,
0.0100,
0.0200,
0.1220,
0.2000,
0.3000,
0.4000,
0.5000,
1.0000,
1.5000,
2.0000,
2.5000,
3.0000,
3.5000,
4.0000,
5.0000,
10.0000,
]
cd_pt = [
4.0000,
2.0000,
1.1100,
1.1100,
1.2000,
1.2000,
1.1700,
0.9000,
0.5400,
0.3100,
0.3800,
0.4600,
0.5300,
0.5700,
0.6100,
0.6400,
0.6700,
0.7000,
0.7000,
]
drag_spline = Akima(np.log10(Re_pt), cd_pt, delta_x=0.0) # exact akima because control points do not change
def cylinderDrag(Re):
"""Drag coefficient for a smooth circular cylinder.
Parameters
----------
Re : array_like
Reynolds number
Returns
-------
cd : array_like
drag coefficient (normalized by cylinder diameter)
"""
ReN = Re / 1.0e6
cd = np.zeros_like(Re)
dcd_dRe = np.zeros_like(Re)
idx = ReN > 0
cd[idx], dcd_dRe[idx], _, _ = drag_spline.interp(np.log10(ReN[idx]))
dcd_dRe[idx] /= Re[idx] * math.log(10) # chain rule
return cd, dcd_dRe
# -----------------
# Components
# -----------------
class AeroHydroLoads(om.ExplicitComponent):
"""
Compute summed forces due to wind and wave loads.
Parameters
----------
windLoads_Px : numpy array[nPoints], [N/m]
distributed loads, force per unit length in x-direction
windLoads_Py : numpy array[nPoints], [N/m]
distributed loads, force per unit length in y-direction
windLoads_Pz : numpy array[nPoints], [N/m]
distributed loads, force per unit length in z-direction
windLoads_qdyn : numpy array[nPoints], [N/m**2]
dynamic pressure
windLoads_z : numpy array[nPoints], [m]
corresponding heights
windLoads_beta : float, [deg]
wind/wave angle relative to inertia c.s.
waveLoads_Px : numpy array[nPoints], [N/m]
distributed loads, force per unit length in x-direction
waveLoads_Py : numpy array[nPoints], [N/m]
distributed loads, force per unit length in y-direction
waveLoads_Pz : numpy array[nPoints], [N/m]
distributed loads, force per unit length in z-direction
waveLoads_qdyn : numpy array[nPoints], [N/m**2]
dynamic pressure
waveLoads_z : numpy array[nPoints], [m]
corresponding heights
waveLoads_beta : float, [deg]
wind/wave angle relative to inertia c.s.
z : numpy array[nPoints], [m]
locations along cylinder
yaw : float, [deg]
yaw angle
Returns
-------
Px : numpy array[nPoints], [N/m]
force per unit length in x-direction
Py : numpy array[nPoints], [N/m]
force per unit length in y-direction
Pz : numpy array[nPoints], [N/m]
force per unit length in z-direction
qdyn : numpy array[nPoints], [N/m**2]
dynamic pressure
"""
def initialize(self):
self.options.declare("nPoints")
def setup(self):
nPoints = self.options["nPoints"]
self.add_input("windLoads_Px", np.zeros(nPoints), units="N/m")
self.add_input("windLoads_Py", np.zeros(nPoints), units="N/m")
self.add_input("windLoads_Pz", np.zeros(nPoints), units="N/m")
self.add_input("windLoads_qdyn", np.zeros(nPoints), units="N/m**2")
self.add_input("windLoads_z", np.zeros(nPoints), units="m")
self.add_input("windLoads_beta", 0.0, units="deg")
self.add_input("waveLoads_Px", np.zeros(nPoints), units="N/m")
self.add_input("waveLoads_Py", np.zeros(nPoints), units="N/m")
self.add_input("waveLoads_Pz", np.zeros(nPoints), units="N/m")
self.add_input("waveLoads_qdyn", np.zeros(nPoints), units="N/m**2")
self.add_input("waveLoads_z", np.zeros(nPoints), units="m")
self.add_input("waveLoads_beta", 0.0, units="deg")
self.add_input("z", np.zeros(nPoints), units="m")
self.add_input("yaw", 0.0, units="deg")
self.add_output("Px", np.zeros(nPoints), units="N/m")
self.add_output("Py", np.zeros(nPoints), units="N/m")
self.add_output("Pz", np.zeros(nPoints), units="N/m")
self.add_output("qdyn", np.zeros(nPoints), units="N/m**2")
def compute(self, inputs, outputs):
z = inputs["z"]
windLoads = (
DirectionVector(inputs["windLoads_Px"], inputs["windLoads_Py"], inputs["windLoads_Pz"])
.inertialToWind(inputs["windLoads_beta"])
.windToYaw(inputs["yaw"])
)
waveLoads = (
DirectionVector(inputs["waveLoads_Px"], inputs["waveLoads_Py"], inputs["waveLoads_Pz"])
.inertialToWind(inputs["waveLoads_beta"])
.windToYaw(inputs["yaw"])
)
Px = np.interp(z, inputs["windLoads_z"], windLoads.x) + np.interp(z, inputs["waveLoads_z"], waveLoads.x)
Py = np.interp(z, inputs["windLoads_z"], windLoads.y) + np.interp(z, inputs["waveLoads_z"], waveLoads.y)
Pz = np.interp(z, inputs["windLoads_z"], windLoads.z) + np.interp(z, inputs["waveLoads_z"], waveLoads.z)
qdyn = np.interp(z, inputs["windLoads_z"], inputs["windLoads_qdyn"]) + np.interp(
z, inputs["waveLoads_z"], inputs["waveLoads_qdyn"]
)
# The following are redundant, at one point we will consolidate them to something that works for both cylinder (not using vartrees) and jacket (still using vartrees)
outputs["Px"] = Px
outputs["Py"] = Py
outputs["Pz"] = Pz
outputs["qdyn"] = qdyn
# -----------------
class CylinderWindDrag(om.ExplicitComponent):
"""
Compute drag forces on a cylindrical cylinder due to wind.
Parameters
----------
U : numpy array[nPoints], [m/s]
magnitude of wind speed
z : numpy array[nPoints], [m]
heights where wind speed was computed
d : numpy array[nPoints], [m]
corresponding diameter of cylinder section
beta_wind : float, [deg]
corresponding wind angles relative to inertial coordinate system
rho_air : float, [kg/m**3]
air density
mu_air : float, [kg/(m*]
dynamic viscosity of air
cd_usr : float
User input drag coefficient to override Reynolds number based one
Returns
-------
windLoads_Px : numpy array[nPoints], [N/m]
distributed loads, force per unit length in x-direction
windLoads_Py : numpy array[nPoints], [N/m]
distributed loads, force per unit length in y-direction
windLoads_Pz : numpy array[nPoints], [N/m]
distributed loads, force per unit length in z-direction
windLoads_qdyn : numpy array[nPoints], [N/m**2]
dynamic pressure
windLoads_z : numpy array[nPoints], [m]
corresponding heights
windLoads_beta : float, [deg]
wind/wave angle relative to inertia c.s.
"""
def initialize(self):
self.options.declare("nPoints")
def setup(self):
nPoints = self.options["nPoints"]
# variables
self.add_input("U", np.zeros(nPoints), units="m/s")
self.add_input("z", np.zeros(nPoints), units="m")
self.add_input("d", np.zeros(nPoints), units="m")
self.add_input("beta_wind", 0.0, units="deg")
self.add_input("rho_air", 0.0, units="kg/m**3")
self.add_input("mu_air", 0.0, units="kg/(m*s)")
self.add_input("cd_usr", -1.0)
self.add_output("windLoads_Px", np.zeros(nPoints), units="N/m")
self.add_output("windLoads_Py", np.zeros(nPoints), units="N/m")
self.add_output("windLoads_Pz", np.zeros(nPoints), units="N/m")
self.add_output("windLoads_qdyn", np.zeros(nPoints), units="N/m**2")
self.add_output("windLoads_z", np.zeros(nPoints), units="m")
self.add_output("windLoads_beta", 0.0, units="deg")
arange = np.arange(nPoints)
self.declare_partials("windLoads_Px", "U", rows=arange, cols=arange)
self.declare_partials("windLoads_Px", "d", rows=arange, cols=arange)
self.declare_partials("windLoads_Py", "U", rows=arange, cols=arange)
self.declare_partials("windLoads_Py", "d", rows=arange, cols=arange)
self.declare_partials(["windLoads_Px", "windLoads_Py"], "cd_usr", method="fd")
self.declare_partials("windLoads_qdyn", "U", rows=arange, cols=arange)
self.declare_partials("windLoads_qdyn", "rho_air", method="fd")
self.declare_partials("windLoads_z", "z", rows=arange, cols=arange, val=1.0)
self.declare_partials("windLoads_beta", "beta_wind", val=1.0)
def compute(self, inputs, outputs):
rho = inputs["rho_air"]
U = inputs["U"]
d = inputs["d"]
mu = inputs["mu_air"]
beta = inputs["beta_wind"]
# dynamic pressure
q = 0.5 * rho * U ** 2
# Reynolds number and drag
if float(inputs["cd_usr"]) < 0.0:
Re = rho * U * d / mu
cd, dcd_dRe = cylinderDrag(Re)
else:
cd = inputs["cd_usr"]
Re = 1.0
dcd_dRe = 0.0
Fp = q * cd * d
# components of distributed loads
Px = Fp * cosd(beta)
Py = Fp * sind(beta)
Pz = 0 * Fp
# pack data
outputs["windLoads_Px"] = Px
outputs["windLoads_Py"] = Py
outputs["windLoads_Pz"] = Pz
outputs["windLoads_qdyn"] = q
outputs["windLoads_z"] = inputs["z"]
outputs["windLoads_beta"] = beta
def compute_partials(self, inputs, J):
# rename
rho = inputs["rho_air"]
U = inputs["U"]
d = inputs["d"]
mu = inputs["mu_air"]
beta = inputs["beta_wind"]
# dynamic pressure
q = 0.5 * rho * U ** 2
# Reynolds number and drag
if float(inputs["cd_usr"]) < 0.0:
Re = rho * U * d / mu
cd, dcd_dRe = cylinderDrag(Re)
else:
cd = inputs["cd_usr"]
Re = 1.0
dcd_dRe = 0.0
# derivatives
dq_dU = rho * U
const = (dq_dU * cd + q * dcd_dRe * rho * d / mu) * d
dPx_dU = const * cosd(beta)
dPy_dU = const * sind(beta)
const = (cd + dcd_dRe * Re) * q
dPx_dd = const * cosd(beta)
dPy_dd = const * sind(beta)
J["windLoads_Px", "U"] = dPx_dU
J["windLoads_Px", "d"] = dPx_dd
J["windLoads_Py", "U"] = dPy_dU
J["windLoads_Py", "d"] = dPy_dd
J["windLoads_qdyn", "U"] = dq_dU
# -----------------
class CylinderWaveDrag(om.ExplicitComponent):
"""
Compute drag forces on a cylindrical cylinder due to waves.
Parameters
----------
U : numpy array[nPoints], [m/s]
magnitude of wave speed
A : numpy array[nPoints], [m/s**2]
magnitude of wave acceleration
p : numpy array[nPoints], [N/m**2]
pressure oscillation
z : numpy array[nPoints], [m]
heights where wave speed was computed
d : numpy array[nPoints], [m]
corresponding diameter of cylinder section
beta_wave : float, [deg]
corresponding wave angles relative to inertial coordinate system
rho_water : float, [kg/m**3]
water density
mu_water : float, [kg/(m*]
dynamic viscosity of water
cm : float
mass coefficient
cd_usr : float
User input drag coefficient to override Reynolds number based one
Returns
-------
waveLoads_Px : numpy array[nPoints], [N/m]
distributed loads, force per unit length in x-direction
waveLoads_Py : numpy array[nPoints], [N/m]
distributed loads, force per unit length in y-direction
waveLoads_Pz : numpy array[nPoints], [N/m]
distributed loads, force per unit length in z-direction
waveLoads_qdyn : numpy array[nPoints], [N/m**2]
dynamic pressure
waveLoads_pt : numpy array[nPoints], [N/m**2]
total (static+dynamic) pressure
waveLoads_z : numpy array[nPoints], [m]
corresponding heights
waveLoads_beta : float, [deg]
wind/wave angle relative to inertia c.s.
"""
def initialize(self):
self.options.declare("nPoints")
def setup(self):
nPoints = self.options["nPoints"]
# variables
self.add_input("U", np.zeros(nPoints), units="m/s")
self.add_input("A", np.zeros(nPoints), units="m/s**2")
self.add_input("p", np.zeros(nPoints), units="N/m**2")
self.add_input("z", np.zeros(nPoints), units="m")
self.add_input("d", np.zeros(nPoints), units="m")
self.add_input("beta_wave", 0.0, units="deg")
self.add_input("rho_water", 0.0, units="kg/m**3")
self.add_input("mu_water", 0.0, units="kg/(m*s)")
self.add_input("cm", 0.0)
self.add_input("cd_usr", -1.0)
self.add_output("waveLoads_Px", np.zeros(nPoints), units="N/m")
self.add_output("waveLoads_Py", np.zeros(nPoints), units="N/m")
self.add_output("waveLoads_Pz", np.zeros(nPoints), units="N/m")
self.add_output("waveLoads_qdyn", np.zeros(nPoints), units="N/m**2")
self.add_output("waveLoads_pt", np.zeros(nPoints), units="N/m**2")
self.add_output("waveLoads_z", np.zeros(nPoints), units="m")
self.add_output("waveLoads_beta", 0.0, units="deg")
self.declare_partials("*", "rho_water", method="fd")
arange = np.arange(nPoints)
self.declare_partials(["waveLoads_Px", "waveLoads_Py"], ["U", "d", "cm", "cd_usr", "beta_wave"], method="fd")
self.declare_partials("waveLoads_Px", "A", rows=arange, cols=arange)
self.declare_partials("waveLoads_Py", "A", rows=arange, cols=arange)
self.declare_partials("waveLoads_qdyn", "U", rows=arange, cols=arange)
self.declare_partials("waveLoads_pt", "U", rows=arange, cols=arange)
self.declare_partials("waveLoads_pt", "p", rows=arange, cols=arange, val=1.0)
self.declare_partials("waveLoads_z", "z", rows=arange, cols=arange, val=1.0)
self.declare_partials("waveLoads_beta", "beta_wave", val=1.0)
def compute(self, inputs, outputs):
# wlevel = inputs['wlevel']
# if wlevel > 0.0: wlevel *= -1.0
rho = inputs["rho_water"]
U = inputs["U"]
# U0 = inputs['U0']
d = inputs["d"]
# zrel= inputs['z']-wlevel
mu = inputs["mu_water"]
beta = inputs["beta_wave"]
# beta0 = inputs['beta0']
# dynamic pressure
q = 0.5 * rho * U * np.abs(U)
# q0= 0.5*rho*U0**2
# Reynolds number and drag
if float(inputs["cd_usr"]) < 0.0:
Re = rho * U * d / mu
cd, dcd_dRe = cylinderDrag(Re)
else:
cd = inputs["cd_usr"] * np.ones_like(d)
Re = 1.0
dcd_dRe = 0.0
# inertial and drag forces
Fi = rho * inputs["cm"] * math.pi / 4.0 * d ** 2 * inputs["A"] # Morrison's equation
Fd = q * cd * d
Fp = Fi + Fd
# components of distributed loads
Px = Fp * cosd(beta)
Py = Fp * sind(beta)
Pz = 0.0 * Fp
# FORCES [N/m] AT z=0 m
# idx0 = np.abs(zrel).argmin() # closest index to z=0, used to find d at z=0
# d0 = d[idx0] # initialize
# cd0 = cd[idx0] # initialize
# if (zrel[idx0]<0.) and (idx0< (zrel.size-1)): # point below water
# d0 = np.mean(d[idx0:idx0+2])
# cd0 = np.mean(cd[idx0:idx0+2])
# elif (zrel[idx0]>0.) and (idx0>0): # point above water
# d0 = np.mean(d[idx0-1:idx0+1])
# cd0 = np.mean(cd[idx0-1:idx0+1])
# Fi0 = rho*inputs['cm']*math.pi/4.0*d0**2*inputs['A0'] # Morrison's equation
# Fd0 = q0*cd0*d0
# Fp0 = Fi0 + Fd0
# Px0 = Fp0*cosd(beta0)
# Py0 = Fp0*sind(beta0)
# Pz0 = 0.*Fp0
# Store qties at z=0 MSL
# outputs['waveLoads_Px0'] = Px0
# outputs['waveLoads_Py0'] = Py0
# outputs['waveLoads_Pz0'] = Pz0
# outputs['waveLoads_qdyn0'] = q0
# outputs['waveLoads_beta0'] = beta0
# pack data
outputs["waveLoads_Px"] = Px
outputs["waveLoads_Py"] = Py
outputs["waveLoads_Pz"] = Pz
outputs["waveLoads_qdyn"] = q
outputs["waveLoads_pt"] = q + inputs["p"]
outputs["waveLoads_z"] = inputs["z"]
outputs["waveLoads_beta"] = beta
def compute_partials(self, inputs, J):
# wlevel = inputs['wlevel']
# if wlevel > 0.0: wlevel *= -1.0
rho = inputs["rho_water"]
U = inputs["U"]
# U0 = inputs['U0']
d = inputs["d"]
# zrel= inputs['z']-wlevel
mu = inputs["mu_water"]
beta = inputs["beta_wave"]
# beta0 = inputs['beta0']
# dynamic pressure
q = 0.5 * rho * U ** 2
# q0= 0.5*rho*U0**2
# Reynolds number and drag
if float(inputs["cd_usr"]) < 0.0:
cd = inputs["cd_usr"] * np.ones_like(d)
Re = 1.0
dcd_dRe = 0.0
else:
Re = rho * U * d / mu
cd, dcd_dRe = cylinderDrag(Re)
# derivatives
dq_dU = rho * U
const = (dq_dU * cd + q * dcd_dRe * rho * d / mu) * d
dPx_dU = const * cosd(beta)
dPy_dU = const * sind(beta)
const = (cd + dcd_dRe * Re) * q + rho * inputs["cm"] * math.pi / 4.0 * 2 * d * inputs["A"]
dPx_dd = const * cosd(beta)
dPy_dd = const * sind(beta)
const = rho * inputs["cm"] * math.pi / 4.0 * d ** 2
dPx_dA = const * cosd(beta)
dPy_dA = const * sind(beta)
J["waveLoads_Px", "A"] = dPx_dA
J["waveLoads_Py", "A"] = dPy_dA
J["waveLoads_qdyn", "U"] = dq_dU
J["waveLoads_pt", "U"] = dq_dU
# ___________________________________________#
class CylinderEnvironment(om.Group):
def initialize(self):
self.options.declare("wind", default="power")
self.options.declare("nPoints")
self.options.declare("water_flag", default=True)
def setup(self):
nPoints = self.options["nPoints"]
wind = self.options["wind"]
water_flag = self.options["water_flag"]
self.set_input_defaults("z0", 0.0)
self.set_input_defaults("cd_usr", -1.0)
self.set_input_defaults("yaw", 0.0, units="deg")
self.set_input_defaults("beta_wind", 0.0, units="deg")
self.set_input_defaults("rho_air", 1.225, units="kg/m**3")
self.set_input_defaults("mu_air", 1.81206e-5, units="kg/m/s")
self.set_input_defaults("shearExp", 0.2)
if water_flag:
self.set_input_defaults("beta_wave", 0.0, units="deg")
self.set_input_defaults("rho_water", 1025.0, units="kg/m**3")
self.set_input_defaults("mu_water", 1.08e-3, units="kg/m/s")
# Wind profile and loads
promwind = ["Uref", "zref", "z", "z0"]
if wind is None or wind.lower() in ["power", "powerwind", ""]:
self.add_subsystem("wind", PowerWind(nPoints=nPoints), promotes=promwind + ["shearExp"])
elif wind.lower() == "logwind":
self.add_subsystem("wind", LogWind(nPoints=nPoints), promotes=promwind)
else:
raise ValueError("Unknown wind type, " + wind)
self.add_subsystem(
"windLoads",
CylinderWindDrag(nPoints=nPoints),
promotes=["cd_usr", "beta_wind", "rho_air", "mu_air", "z", "d"],
)
# Wave profile and loads
if water_flag:
self.add_subsystem(
"wave",
LinearWaves(nPoints=nPoints),
promotes=[
"z",
"Uc",
"Hsig_wave",
"Tsig_wave",
"rho_water",
("z_floor", "water_depth"),
("z_surface", "z0"),
],
)
self.add_subsystem(
"waveLoads",
CylinderWaveDrag(nPoints=nPoints),
promotes=["cm", "cd_usr", "beta_wave", "rho_water", "mu_water", "z", "d"],
)
# Combine all loads
self.add_subsystem(
"distLoads", AeroHydroLoads(nPoints=nPoints), promotes=["Px", "Py", "Pz", "qdyn", "yaw", "z"]
)
# Connections
self.connect("wind.U", "windLoads.U")
if water_flag:
self.connect("wave.U", "waveLoads.U")
self.connect("wave.A", "waveLoads.A")
self.connect("wave.p", "waveLoads.p")
self.connect("windLoads.windLoads_Px", "distLoads.windLoads_Px")
self.connect("windLoads.windLoads_Py", "distLoads.windLoads_Py")
self.connect("windLoads.windLoads_Pz", "distLoads.windLoads_Pz")
self.connect("windLoads.windLoads_qdyn", "distLoads.windLoads_qdyn")
self.connect("windLoads.windLoads_beta", "distLoads.windLoads_beta")
self.connect("windLoads.windLoads_z", "distLoads.windLoads_z")
if water_flag:
self.connect("waveLoads.waveLoads_Px", "distLoads.waveLoads_Px")
self.connect("waveLoads.waveLoads_Py", "distLoads.waveLoads_Py")
self.connect("waveLoads.waveLoads_Pz", "distLoads.waveLoads_Pz")
self.connect("waveLoads.waveLoads_pt", "distLoads.waveLoads_qdyn")
self.connect("waveLoads.waveLoads_beta", "distLoads.waveLoads_beta")
self.connect("waveLoads.waveLoads_z", "distLoads.waveLoads_z")
def main():
# initialize problem
U = np.array([20.0, 25.0, 30.0])
z = np.array([10.0, 30.0, 80.0])
d = np.array([5.5, 4.0, 3.0])
beta = np.array([45.0, 45.0, 45.0])
rho = 1.225
mu = 1.7934e-5
# cd_usr = 0.7
nPoints = len(z)
prob = om.Problem()
root = prob.model = om.Group()
root.add("p1", CylinderWindDrag(nPoints))
prob.setup()
prob["p1.U"] = U
prob["p1.z"] = z
prob["p1.d"] = d
prob["p1.beta"] = beta
prob["p1.rho"] = rho
prob["p1.mu"] = mu
# prob['p1.cd_usr'] = cd_usr
# run
prob.run_once()
# out
Re = prob["p1.rho"] * prob["p1.U"] * prob["p1.d"] / prob["p1.mu"]
cd, dcd_dRe = cylinderDrag(Re)
print(cd)
import matplotlib.pyplot as plt
plt.plot(prob["p1.windLoads_Px"], prob["p1.windLoads_z"])
plt.plot(prob["p1.windLoads_Py"], prob["p1.windLoads_z"])
plt.plot(prob["p1.windLoads_qdyn"], prob["p1.windLoads_z"])
plt.show()
if __name__ == "__main__":
main()
|
[
"numpy.abs",
"wisdem.commonse.environment.PowerWind",
"numpy.arange",
"numpy.interp",
"openmdao.api.Group",
"numpy.zeros_like",
"math.log",
"numpy.log10",
"openmdao.api.Problem",
"matplotlib.pyplot.show",
"numpy.ones_like",
"wisdem.commonse.environment.LinearWaves",
"wisdem.commonse.csystem.DirectionVector",
"wisdem.commonse.utilities.cosd",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.array",
"wisdem.commonse.environment.LogWind",
"wisdem.commonse.utilities.sind"
] |
[((986, 1001), 'numpy.log10', 'np.log10', (['Re_pt'], {}), '(Re_pt)\n', (994, 1001), True, 'import numpy as np\n'), ((1375, 1392), 'numpy.zeros_like', 'np.zeros_like', (['Re'], {}), '(Re)\n', (1388, 1392), True, 'import numpy as np\n'), ((1407, 1424), 'numpy.zeros_like', 'np.zeros_like', (['Re'], {}), '(Re)\n', (1420, 1424), True, 'import numpy as np\n'), ((21955, 21983), 'numpy.array', 'np.array', (['[20.0, 25.0, 30.0]'], {}), '([20.0, 25.0, 30.0])\n', (21963, 21983), True, 'import numpy as np\n'), ((21992, 22020), 'numpy.array', 'np.array', (['[10.0, 30.0, 80.0]'], {}), '([10.0, 30.0, 80.0])\n', (22000, 22020), True, 'import numpy as np\n'), ((22029, 22054), 'numpy.array', 'np.array', (['[5.5, 4.0, 3.0]'], {}), '([5.5, 4.0, 3.0])\n', (22037, 22054), True, 'import numpy as np\n'), ((22067, 22095), 'numpy.array', 'np.array', (['[45.0, 45.0, 45.0]'], {}), '([45.0, 45.0, 45.0])\n', (22075, 22095), True, 'import numpy as np\n'), ((22184, 22196), 'openmdao.api.Problem', 'om.Problem', ([], {}), '()\n', (22194, 22196), True, 'import openmdao.api as om\n'), ((22222, 22232), 'openmdao.api.Group', 'om.Group', ([], {}), '()\n', (22230, 22232), True, 'import openmdao.api as om\n'), ((22672, 22729), 'matplotlib.pyplot.plot', 'plt.plot', (["prob['p1.windLoads_Px']", "prob['p1.windLoads_z']"], {}), "(prob['p1.windLoads_Px'], prob['p1.windLoads_z'])\n", (22680, 22729), True, 'import matplotlib.pyplot as plt\n'), ((22734, 22791), 'matplotlib.pyplot.plot', 'plt.plot', (["prob['p1.windLoads_Py']", "prob['p1.windLoads_z']"], {}), "(prob['p1.windLoads_Py'], prob['p1.windLoads_z'])\n", (22742, 22791), True, 'import matplotlib.pyplot as plt\n'), ((22796, 22855), 'matplotlib.pyplot.plot', 'plt.plot', (["prob['p1.windLoads_qdyn']", "prob['p1.windLoads_z']"], {}), "(prob['p1.windLoads_qdyn'], prob['p1.windLoads_z'])\n", (22804, 22855), True, 'import matplotlib.pyplot as plt\n'), ((22860, 22870), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22868, 22870), True, 'import matplotlib.pyplot as plt\n'), ((1496, 1514), 'numpy.log10', 'np.log10', (['ReN[idx]'], {}), '(ReN[idx])\n', (1504, 1514), True, 'import numpy as np\n'), ((1546, 1558), 'math.log', 'math.log', (['(10)'], {}), '(10)\n', (1554, 1558), False, 'import math\n'), ((8296, 8314), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (8305, 8314), True, 'import numpy as np\n'), ((13804, 13822), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (13813, 13822), True, 'import numpy as np\n'), ((3552, 3569), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (3560, 3569), True, 'import numpy as np\n'), ((3623, 3640), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (3631, 3640), True, 'import numpy as np\n'), ((3694, 3711), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (3702, 3711), True, 'import numpy as np\n'), ((3767, 3784), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (3775, 3784), True, 'import numpy as np\n'), ((3840, 3857), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (3848, 3857), True, 'import numpy as np\n'), ((3968, 3985), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (3976, 3985), True, 'import numpy as np\n'), ((4039, 4056), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4047, 4056), True, 'import numpy as np\n'), ((4110, 4127), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4118, 4127), True, 'import numpy as np\n'), ((4183, 4200), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4191, 4200), True, 'import numpy as np\n'), ((4256, 4273), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4264, 4273), True, 'import numpy as np\n'), ((4373, 4390), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4381, 4390), True, 'import numpy as np\n'), ((4482, 4499), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4490, 4499), True, 'import numpy as np\n'), ((4544, 4561), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4552, 4561), True, 'import numpy as np\n'), ((4606, 4623), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4614, 4623), True, 'import numpy as np\n'), ((4670, 4687), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (4678, 4687), True, 'import numpy as np\n'), ((5232, 5280), 'numpy.interp', 'np.interp', (['z', "inputs['windLoads_z']", 'windLoads.x'], {}), "(z, inputs['windLoads_z'], windLoads.x)\n", (5241, 5280), True, 'import numpy as np\n'), ((5283, 5331), 'numpy.interp', 'np.interp', (['z', "inputs['waveLoads_z']", 'waveLoads.x'], {}), "(z, inputs['waveLoads_z'], waveLoads.x)\n", (5292, 5331), True, 'import numpy as np\n'), ((5345, 5393), 'numpy.interp', 'np.interp', (['z', "inputs['windLoads_z']", 'windLoads.y'], {}), "(z, inputs['windLoads_z'], windLoads.y)\n", (5354, 5393), True, 'import numpy as np\n'), ((5396, 5444), 'numpy.interp', 'np.interp', (['z', "inputs['waveLoads_z']", 'waveLoads.y'], {}), "(z, inputs['waveLoads_z'], waveLoads.y)\n", (5405, 5444), True, 'import numpy as np\n'), ((5458, 5506), 'numpy.interp', 'np.interp', (['z', "inputs['windLoads_z']", 'windLoads.z'], {}), "(z, inputs['windLoads_z'], windLoads.z)\n", (5467, 5506), True, 'import numpy as np\n'), ((5509, 5557), 'numpy.interp', 'np.interp', (['z', "inputs['waveLoads_z']", 'waveLoads.z'], {}), "(z, inputs['waveLoads_z'], waveLoads.z)\n", (5518, 5557), True, 'import numpy as np\n'), ((5573, 5634), 'numpy.interp', 'np.interp', (['z', "inputs['windLoads_z']", "inputs['windLoads_qdyn']"], {}), "(z, inputs['windLoads_z'], inputs['windLoads_qdyn'])\n", (5582, 5634), True, 'import numpy as np\n'), ((5637, 5698), 'numpy.interp', 'np.interp', (['z', "inputs['waveLoads_z']", "inputs['waveLoads_qdyn']"], {}), "(z, inputs['waveLoads_z'], inputs['waveLoads_qdyn'])\n", (5646, 5698), True, 'import numpy as np\n'), ((7502, 7519), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (7510, 7519), True, 'import numpy as np\n'), ((7562, 7579), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (7570, 7579), True, 'import numpy as np\n'), ((7620, 7637), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (7628, 7637), True, 'import numpy as np\n'), ((7896, 7913), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (7904, 7913), True, 'import numpy as np\n'), ((7968, 7985), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (7976, 7985), True, 'import numpy as np\n'), ((8040, 8057), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (8048, 8057), True, 'import numpy as np\n'), ((8114, 8131), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (8122, 8131), True, 'import numpy as np\n'), ((8188, 8205), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (8196, 8205), True, 'import numpy as np\n'), ((9600, 9610), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (9604, 9610), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((9629, 9639), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (9633, 9639), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((10566, 10576), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (10570, 10576), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((10602, 10612), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (10606, 10612), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((10679, 10689), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (10683, 10689), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((10715, 10725), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (10719, 10725), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((12709, 12726), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (12717, 12726), True, 'import numpy as np\n'), ((12769, 12786), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (12777, 12786), True, 'import numpy as np\n'), ((12832, 12849), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (12840, 12849), True, 'import numpy as np\n'), ((12895, 12912), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (12903, 12912), True, 'import numpy as np\n'), ((12953, 12970), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (12961, 12970), True, 'import numpy as np\n'), ((13267, 13284), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (13275, 13284), True, 'import numpy as np\n'), ((13339, 13356), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (13347, 13356), True, 'import numpy as np\n'), ((13411, 13428), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (13419, 13428), True, 'import numpy as np\n'), ((13485, 13502), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (13493, 13502), True, 'import numpy as np\n'), ((13560, 13577), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (13568, 13577), True, 'import numpy as np\n'), ((13634, 13651), 'numpy.zeros', 'np.zeros', (['nPoints'], {}), '(nPoints)\n', (13642, 13651), True, 'import numpy as np\n'), ((14917, 14926), 'numpy.abs', 'np.abs', (['U'], {}), '(U)\n', (14923, 14926), True, 'import numpy as np\n'), ((15459, 15469), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (15463, 15469), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((15488, 15498), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (15492, 15498), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((17679, 17689), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (17683, 17689), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((17715, 17725), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (17719, 17725), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((17851, 17861), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (17855, 17861), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((17887, 17897), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (17891, 17897), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((17984, 17994), 'wisdem.commonse.utilities.cosd', 'cosd', (['beta'], {}), '(beta)\n', (17988, 17994), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((18020, 18030), 'wisdem.commonse.utilities.sind', 'sind', (['beta'], {}), '(beta)\n', (18024, 18030), False, 'from wisdem.commonse.utilities import cosd, sind\n'), ((15160, 15175), 'numpy.ones_like', 'np.ones_like', (['d'], {}), '(d)\n', (15172, 15175), True, 'import numpy as np\n'), ((17391, 17406), 'numpy.ones_like', 'np.ones_like', (['d'], {}), '(d)\n', (17403, 17406), True, 'import numpy as np\n'), ((19433, 19459), 'wisdem.commonse.environment.PowerWind', 'PowerWind', ([], {'nPoints': 'nPoints'}), '(nPoints=nPoints)\n', (19442, 19459), False, 'from wisdem.commonse.environment import LogWind, PowerWind, LinearWaves\n'), ((20011, 20039), 'wisdem.commonse.environment.LinearWaves', 'LinearWaves', ([], {'nPoints': 'nPoints'}), '(nPoints=nPoints)\n', (20022, 20039), False, 'from wisdem.commonse.environment import LogWind, PowerWind, LinearWaves\n'), ((19575, 19599), 'wisdem.commonse.environment.LogWind', 'LogWind', ([], {'nPoints': 'nPoints'}), '(nPoints=nPoints)\n', (19582, 19599), False, 'from wisdem.commonse.environment import LogWind, PowerWind, LinearWaves\n'), ((4804, 4896), 'wisdem.commonse.csystem.DirectionVector', 'DirectionVector', (["inputs['windLoads_Px']", "inputs['windLoads_Py']", "inputs['windLoads_Pz']"], {}), "(inputs['windLoads_Px'], inputs['windLoads_Py'], inputs[\n 'windLoads_Pz'])\n", (4819, 4896), False, 'from wisdem.commonse.csystem import DirectionVector\n'), ((5028, 5120), 'wisdem.commonse.csystem.DirectionVector', 'DirectionVector', (["inputs['waveLoads_Px']", "inputs['waveLoads_Py']", "inputs['waveLoads_Pz']"], {}), "(inputs['waveLoads_Px'], inputs['waveLoads_Py'], inputs[\n 'waveLoads_Pz'])\n", (5043, 5120), False, 'from wisdem.commonse.csystem import DirectionVector\n')]
|
import re
import collections
import string
# copy from https://github.com/wenhuchen/HybridQA/blob/master/evaluate_script.py
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(examples, reference):
"""
Computes the exact and f1 scores from the examples and the model predictions
"""
exact_scores = {}
f1_scores = {}
for example in examples:
qas_id = example['question_id']
gold_answers = [reference['reference'][qas_id]]
prediction = example['pred']
exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers)
f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers)
qid_list = reference['reference'].keys()
total = len(qid_list)
table_list = reference['table']
passage_list = reference['passage']
return collections.OrderedDict(
[
("table exact", 100.0 * sum(exact_scores[k] for k in table_list) / len(table_list)),
("table f1", 100.0 * sum(f1_scores[k] for k in table_list) / len(table_list)),
("passage exact", 100.0 * sum(exact_scores[k] for k in passage_list) / len(passage_list)),
("passage f1", 100.0 * sum(f1_scores[k] for k in passage_list) / len(passage_list)),
("total exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("total f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
("total", total),
]
)
class EvaluateTool(object):
def __init__(self, args):
self.args = args
def evaluate(self, preds, golds, section):
summary = {}
exact_scores = {}
f1_scores = {}
for pred, gold in zip(preds, golds):
qas_id = gold['id']
gold_answers = [gold['answer_text']]
exact_scores[qas_id] = max(compute_exact(a, pred) for a in gold_answers)
f1_scores[qas_id] = max(compute_f1(a, pred) for a in gold_answers)
total = len(golds)
qid_list = list(exact_scores.keys())
summary["acc"] = sum(exact_scores[k] for k in qid_list) / total
summary["f1"] = sum(f1_scores[k] for k in qid_list) / total
return summary
|
[
"collections.Counter",
"re.sub",
"re.compile"
] |
[((287, 329), 're.compile', 're.compile', (['"""\\\\b(a|an|the)\\\\b"""', 're.UNICODE'], {}), "('\\\\b(a|an|the)\\\\b', re.UNICODE)\n", (297, 329), False, 'import re\n'), ((345, 369), 're.sub', 're.sub', (['regex', '""" """', 'text'], {}), "(regex, ' ', text)\n", (351, 369), False, 'import re\n'), ((1034, 1064), 'collections.Counter', 'collections.Counter', (['gold_toks'], {}), '(gold_toks)\n', (1053, 1064), False, 'import collections\n'), ((1067, 1097), 'collections.Counter', 'collections.Counter', (['pred_toks'], {}), '(pred_toks)\n', (1086, 1097), False, 'import collections\n')]
|
import os
def write_mol_file(material, simulation_path):
"""Writes .mol file for structural information."""
s = material.structure
file_name = os.path.join(simulation_path, "{}.mol".format(material.uuid))
with open(file_name, "w") as mol_file:
mol_file.write(
" Molecule_name: {}\n".format(material.uuid) +
"\n" +
" Coord_Info: Listed Cartesian None\n" +
" {}\n".format(len(s.atom_sites)))
for i in range(len(s.atom_sites)):
a = s.atom_sites[i]
mol_file.write(
"{:6} {:10.4f} {:10.4f} {:10.4f} {:5} {:10.8f} 0 0\n".format(
i + 1, round(a.x * s.a, 4), round(a.y * s.b, 4), round(a.z * s.c, 4),
str(a.atom_types.atom_type_index()), round(a.q, 8)))
mol_file.write(
"\n" +
"\n" +
"\n" +
" Fundcell_Info: Listed\n" +
" {:10.4f} {:10.4f} {:10.4f}\n".format(
round(s.a, 4), round(s.b, 4), round(s.c, 4)) +
" 90.0000 90.0000 90.0000\n" +
" 0.00000 0.00000 0.00000\n" +
" {:10.4f} {:10.4f} {:10.4f}\n".format(
round(s.a, 4), round(s.b, 4), round(s.c, 4)) +
"\n")
def write_mixing_rules(structure, simulation_path):
"""Writes .def file for forcefield information."""
adsorbate_LJ_atoms = [
['N_n2', 36.0, 3.31],
['C_co2', 27.0, 2.80],
['O_co2', 79.0, 3.05],
['CH4_sp3', 158.5, 3.72],
['He', 10.9, 2.64],
['H_com', 36.7, 2.958],
['Kr', 167.06, 3.924],
['Xe', 110.704, 3.690]
]
adsorbate_none_atoms = ['N_com', 'H_h2']
file_name = os.path.join(simulation_path, 'force_field_mixing_rules.def')
with open(file_name, "w") as mixing_rules_file:
mixing_rules_file.write(
"# general rule for shifted vs truncated\n" +
"shifted\n" +
"# general rule tailcorrections\n" +
"no\n" +
"# number of defined interactions\n" +
"{}\n".format(len(structure.atom_types) + 10) +
"# type interaction, parameters. " +
"IMPORTANT: define shortest matches first, so" +
" that more specific ones overwrites these\n"
)
for lj in structure.atom_types:
mixing_rules_file.write(
"{0:12} lennard-jones {1:8f} {2:8f}\n".format(lj.atom_type_index(),
round(lj.epsilon, 4), round(lj.sigma, 4)))
for at in adsorbate_LJ_atoms:
mixing_rules_file.write(
"{0:12} lennard-jones {1:8f} {2:8f}\n".format(at[0], at[1], at[2])
)
for at in adsorbate_none_atoms:
mixing_rules_file.write(
"{0:12} none\n".format(at)
)
mixing_rules_file.write(
"# general mixing rule for Lennard-Jones\n" +
"Lorentz-Berthelot")
def write_pseudo_atoms(structure, simulation_path):
"""Writes .def file for chemical information.
Args:
simulation_path (str): path to pseudo atoms definitions file
Returns:
NOTE: ALL CHARGES ARE 0. IN THIS VERSION.
"""
temporary_charge = 0.
file_name = os.path.join(simulation_path, 'pseudo_atoms.def')
with open(file_name, "w") as pseudo_atoms_file:
pseudo_atoms_file.write(
"#number of pseudo atoms\n" +
"%s\n" % (len(structure.atom_types) + 10) +
"#type print as chem oxidation mass charge polarization B-factor radii " +
"connectivity anisotropic anisotrop-type tinker-type\n")
for a in structure.atom_types:
pseudo_atoms_file.write(
"{0:7} yes C C 0 12.0 0.0 0.0 1.0 1.0 0 0 absolute 0\n".format(
str(a.atom_type_index())))
pseudo_atoms_file.write(
"N_n2 yes N N 0 14.00674 -0.4048 0.0 1.0 0.7 0 0 relative 0\n" +
"N_com no N - 0 0.0 0.8096 0.0 1.0 0.7 0 0 relative 0\n" +
"C_co2 yes C C 0 12.0 0.70 0.0 1.0 0.720 0 0 relative 0\n" +
"O_co2 yes O O 0 15.9994 -0.35 0.0 1.0 0.68 0 0 relative 0\n" +
"CH4_sp3 yes C C 0 16.04246 0.0 0.0 1.0 1.00 0 0 relative 0\n" +
"He yes He He 0 4.002602 0.0 0.0 1.0 1.0 0 0 relative 0\n" +
"H_h2 yes H H 0 1.00794 0.468 0.0 1.0 0.7 0 0 relative 0\n" +
"H_com no H H 0 0.0 0.936 0.0 1.0 0.7 0 0 relative 0\n" +
"Xe yes Xe Xe 0 131.293 0.0 0.0 1.0 2.459 0 0 relative 0\n" +
"Kr yes Kr Kr 0 83.798 0.0 0.0 1.0 2.27 0 0 relative 0\n"
)
def write_force_field(simulation_path):
"""Writes .def file to overwrite LJ-type interactions.
Args:
file_name (str): path to write .def-file
NOTE: NO INTERACTIONS ARE OVERWRITTEN BY DEFAULT.
"""
file_name = os.path.join(simulation_path, 'force_field.def')
with open(file_name, "w") as force_field_file:
force_field_file.write(
"# rules to overwrite\n" +
"0\n" +
"# number of defined interactions\n" +
"0\n" +
"# mixing rules to overwrite\n" +
"0")
|
[
"os.path.join"
] |
[((1993, 2054), 'os.path.join', 'os.path.join', (['simulation_path', '"""force_field_mixing_rules.def"""'], {}), "(simulation_path, 'force_field_mixing_rules.def')\n", (2005, 2054), False, 'import os\n'), ((3539, 3588), 'os.path.join', 'os.path.join', (['simulation_path', '"""pseudo_atoms.def"""'], {}), "(simulation_path, 'pseudo_atoms.def')\n", (3551, 3588), False, 'import os\n'), ((5463, 5511), 'os.path.join', 'os.path.join', (['simulation_path', '"""force_field.def"""'], {}), "(simulation_path, 'force_field.def')\n", (5475, 5511), False, 'import os\n')]
|
#!/usr/bin/env python
"""
A module for embedding videos into the VGG-16 feature space.
Info:
type: eta.core.types.Module
version: 0.1.0
Copyright 2017-2021, Voxel51, Inc.
voxel51.com
"""
# pragma pylint: disable=redefined-builtin
# pragma pylint: disable=unused-wildcard-import
# pragma pylint: disable=wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
# pragma pylint: enable=redefined-builtin
# pragma pylint: enable=unused-wildcard-import
# pragma pylint: enable=wildcard-import
import logging
import sys
from eta.core.config import Config
import eta.core.features as etaf
import eta.core.module as etam
import eta.core.utils as etau
import eta.core.vgg16 as etav
logger = logging.getLogger(__name__)
class ModuleConfig(etam.BaseModuleConfig):
"""Module configuration settings.
Attributes:
data (DataConfig)
parameters (ParametersConfig)
"""
def __init__(self, d):
super(ModuleConfig, self).__init__(d)
self.data = self.parse_object_array(d, "data", DataConfig)
self.parameters = self.parse_object(d, "parameters", ParametersConfig)
class DataConfig(Config):
"""Data configuration settings.
Inputs:
video_path (eta.core.types.Video): the input video
Outputs:
backing_dir (eta.core.types.Directory): the directory to write the
embeddings
"""
def __init__(self, d):
self.video_path = self.parse_string(d, "video_path")
self.backing_dir = self.parse_string(d, "backing_dir")
class ParametersConfig(Config):
"""Parameter configuration settings.
Parameters:
vgg16 (eta.core.types.Config): [None] an optional VGG16FeaturizerConfig
describing the VGG16Featurizer to use
crop_box (eta.core.types.Config): [None] an optional region of interest
to extract from each frame before embedding
"""
def __init__(self, d):
self.vgg16 = self.parse_object(
d, "vgg16", etav.VGG16FeaturizerConfig, default=None
)
self.crop_box = self.parse_object(
d, "crop_box", RectangleConfig, default=None
)
class Point2Config(Config):
"""A simple 2D point."""
def __init__(self, d):
self.x = self.parse_number(d, "x")
self.y = self.parse_number(d, "y")
class RectangleConfig(Config):
"""A rectangle defined by two Point2Configs."""
def __init__(self, d):
self.top_left = self.parse_object(d, "top_left", Point2Config)
self.bottom_right = self.parse_object(d, "bottom_right", Point2Config)
def _embed_vgg16(config):
# Build featurizer
frame_featurizer = (
etaf.ImageFeaturizerConfig.builder()
.set(type=etau.get_class_name(etav.VGG16Featurizer))
.set(config=config.parameters.vgg16)
.validate()
)
featurizer = etaf.CachingVideoFeaturizer(
etaf.CachingVideoFeaturizerConfig.builder()
.set(frame_featurizer=frame_featurizer)
.set(delete_backing_directory=False)
.build()
)
# Set crop box, if provided
if config.parameters.crop_box is not None:
featurizer.frame_preprocessor = _crop(config.parameters.crop_box)
with featurizer:
for data in config.data:
# Manually set backing directory for each video
featurizer.set_manual_backing_dir(data.backing_dir)
logger.info("Featurizing video '%s'", data.video_path)
featurizer.featurize(data.video_path)
def _crop(crop_box):
def crop_image(img):
tl = crop_box.top_left
br = crop_box.bottom_right
xs = img.shape[1]
ys = img.shape[0]
return img[
int(tl.y * ys) : int(br.y * ys), int(tl.x * xs) : int(br.x * xs),
]
return crop_image
def run(config_path, pipeline_config_path=None):
"""Run the embed_vgg16 module.
Args:
config_path: path to a config file containing the fields to define
both an ModuleConfig and a VGG16FeaturizerConfig
pipeline_config_path: optional path to a PipelineConfig file
"""
config = ModuleConfig.from_json(config_path)
etam.setup(config, pipeline_config_path=pipeline_config_path)
_embed_vgg16(config)
if __name__ == "__main__":
run(*sys.argv[1:]) # pylint: disable=no-value-for-parameter
|
[
"eta.core.features.CachingVideoFeaturizerConfig.builder",
"eta.core.module.setup",
"eta.core.utils.get_class_name",
"eta.core.features.ImageFeaturizerConfig.builder",
"logging.getLogger"
] |
[((829, 856), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (846, 856), False, 'import logging\n'), ((4290, 4351), 'eta.core.module.setup', 'etam.setup', (['config'], {'pipeline_config_path': 'pipeline_config_path'}), '(config, pipeline_config_path=pipeline_config_path)\n', (4300, 4351), True, 'import eta.core.module as etam\n'), ((2796, 2832), 'eta.core.features.ImageFeaturizerConfig.builder', 'etaf.ImageFeaturizerConfig.builder', ([], {}), '()\n', (2830, 2832), True, 'import eta.core.features as etaf\n'), ((2851, 2892), 'eta.core.utils.get_class_name', 'etau.get_class_name', (['etav.VGG16Featurizer'], {}), '(etav.VGG16Featurizer)\n', (2870, 2892), True, 'import eta.core.utils as etau\n'), ((3019, 3062), 'eta.core.features.CachingVideoFeaturizerConfig.builder', 'etaf.CachingVideoFeaturizerConfig.builder', ([], {}), '()\n', (3060, 3062), True, 'import eta.core.features as etaf\n')]
|
import sys
import numpy as np
import os
import random
import time
import pygame
import math
from constants import *
from chromosome import Chromosome
from bird import Bird
from selection import *
from pillar import Pillar
from pygame.locals import *
# ata = np.zeros((580,600,2))
# y: [-280,300]
# x: [600,0]
"""Agent for the Q-learning flappym with the matrices and matrix updates
"""
class QAgent:
def __init__(self, discount=0.9):
"""Initialize matrix
Args:
discount (float): discount parameter value
"""
# Action=true-matrix
self.QMatrix = np.zeros((2, 118, 600)) # action,y,x,ded
self.discount = discount
self.stateCount = np.zeros((580, 600))
def updateQMatrix(self, action, state, value):
"""Update the q matrix based on the parameters
Args:
action (bool): action boolean
state (list): list depicting the state
value (float): current state q value
"""
y = int(state[0] / 5)
x = state[1]
self.QMatrix[action, y, x] = value
def updateStateCount(self, state):
"""Update the times one has been on this state
Args:
state (list): list depicting the current state
Returns:
int: statecount
"""
self.stateCount[state[0], state[1]] += 1
return self.stateCount[state[0], state[1]]
def loadMatrix(self, name):
"""Load a q-matrix from the file
Args:
name (str): filename for the matrix to load to the object
"""
self.QMatrix = np.load("flappyQData//" + name + ".npy")
def getFromQ(self, action, state):
"""Get any item from the matrix
Args:
action (bool): boolean action
state (state): state of the game
Returns:
list: state
"""
y = int(state[0] / 5)
x = state[1]
return self.QMatrix.item((action, y, x))
|
[
"numpy.load",
"numpy.zeros"
] |
[((606, 629), 'numpy.zeros', 'np.zeros', (['(2, 118, 600)'], {}), '((2, 118, 600))\n', (614, 629), True, 'import numpy as np\n'), ((707, 727), 'numpy.zeros', 'np.zeros', (['(580, 600)'], {}), '((580, 600))\n', (715, 727), True, 'import numpy as np\n'), ((1619, 1659), 'numpy.load', 'np.load', (["('flappyQData//' + name + '.npy')"], {}), "('flappyQData//' + name + '.npy')\n", (1626, 1659), True, 'import numpy as np\n')]
|
import fastai
from fastai import URLs
from fastai.datasets import untar_data
from fastai.tabular.data import TabularList
from fastai_ext.model import tabular_learner, dae_learner
from fastai.tabular.transform import FillMissing, Normalize, Categorify
from sklearn.model_selection import KFold
import pandas as pd
import pdb
from fastai.metrics import accuracy
from fastai_ext.augmentations import TabularMixUpCallback, SwapNoiseCallback
from functools import partial
from matplotlib import pyplot as plt
from fastai_ext.hyperparameter import create_experiment, record_experiment, get_config_df, summarise_results, load_results
from fastai_ext.plot_utils import plot_best, plot_over_epochs
from fastai_ext.utils import request_lr, transfer_from_dae, freeze_but_last, unfreeze_all
print(fastai.show_install())
path = untar_data(URLs.ADULT_SAMPLE)
df = pd.read_csv(path/'adult.csv')
dep_var = '>=50k'
num_vars = ['age', 'fnlwgt', 'education-num', 'hours-per-week', 'capital-gain', 'capital-loss']
cat_vars = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']
experiment_name, exp_path = create_experiment('pretraining', path)
print(exp_path)
config={'pretrain':[False, True]}
config_df = get_config_df(config)
procs = [FillMissing, Categorify, Normalize]
src = TabularList.from_df(df, path=path, cat_names=cat_vars, cont_names=num_vars, procs=procs)
kf = KFold(5, random_state=42, shuffle=True)
config_df.to_csv(exp_path/'config.csv')
for i, params in config_df.iterrows():
for fold, (train_ids, valid_ids) in enumerate(kf.split(df)):
data = (src.split_by_idx(valid_ids).label_from_df(cols=dep_var).databunch(bs=512))
if params['pretrain']:
learn_dae = dae_learner(data, layers=[100,100], metrics=None, swap_noise=0.2)
lr = request_lr(learn_dae)
learn_dae.fit_one_cycle(15, lr)
learn = tabular_learner(data, layers=[100,100], metrics=accuracy, mixup_alpha=0)
if params['pretrain']:
transfer_from_dae(learn, learn_dae)
freeze_but_last(learn)
lr=request_lr(learn)
learn.fit_one_cycle(1, lr)
unfreeze_all(learn)
lr = request_lr(learn)
record_experiment(learn, f'{i}-fold_{fold+1}', exp_path)
learn.fit_one_cycle(5, lr)
config_df, recorder_df, param_names, metric_names = load_results(exp_path)
summary_df = summarise_results(recorder_df, param_names, metric_names)
# plot_best(summary_df, param_names, metric_names)
# plt.savefig(exp_path/'best.png', bbox_inches='tight')
print(exp_path)
plot_over_epochs(summary_df, param_names, metric_names, config_df)
plt.savefig(exp_path/'all_epochs.png', bbox_inches='tight')
|
[
"fastai_ext.hyperparameter.record_experiment",
"fastai.datasets.untar_data",
"fastai_ext.plot_utils.plot_over_epochs",
"pandas.read_csv",
"fastai_ext.model.dae_learner",
"fastai_ext.hyperparameter.get_config_df",
"fastai.tabular.data.TabularList.from_df",
"sklearn.model_selection.KFold",
"fastai_ext.hyperparameter.load_results",
"fastai_ext.hyperparameter.create_experiment",
"fastai_ext.hyperparameter.summarise_results",
"fastai_ext.model.tabular_learner",
"fastai_ext.utils.transfer_from_dae",
"fastai_ext.utils.unfreeze_all",
"fastai_ext.utils.freeze_but_last",
"fastai.show_install",
"fastai_ext.utils.request_lr",
"matplotlib.pyplot.savefig"
] |
[((817, 846), 'fastai.datasets.untar_data', 'untar_data', (['URLs.ADULT_SAMPLE'], {}), '(URLs.ADULT_SAMPLE)\n', (827, 846), False, 'from fastai.datasets import untar_data\n'), ((852, 883), 'pandas.read_csv', 'pd.read_csv', (["(path / 'adult.csv')"], {}), "(path / 'adult.csv')\n", (863, 883), True, 'import pandas as pd\n'), ((1145, 1183), 'fastai_ext.hyperparameter.create_experiment', 'create_experiment', (['"""pretraining"""', 'path'], {}), "('pretraining', path)\n", (1162, 1183), False, 'from fastai_ext.hyperparameter import create_experiment, record_experiment, get_config_df, summarise_results, load_results\n'), ((1246, 1267), 'fastai_ext.hyperparameter.get_config_df', 'get_config_df', (['config'], {}), '(config)\n', (1259, 1267), False, 'from fastai_ext.hyperparameter import create_experiment, record_experiment, get_config_df, summarise_results, load_results\n'), ((1321, 1413), 'fastai.tabular.data.TabularList.from_df', 'TabularList.from_df', (['df'], {'path': 'path', 'cat_names': 'cat_vars', 'cont_names': 'num_vars', 'procs': 'procs'}), '(df, path=path, cat_names=cat_vars, cont_names=num_vars,\n procs=procs)\n', (1340, 1413), False, 'from fastai.tabular.data import TabularList\n'), ((1415, 1454), 'sklearn.model_selection.KFold', 'KFold', (['(5)'], {'random_state': '(42)', 'shuffle': '(True)'}), '(5, random_state=42, shuffle=True)\n', (1420, 1454), False, 'from sklearn.model_selection import KFold\n'), ((2424, 2446), 'fastai_ext.hyperparameter.load_results', 'load_results', (['exp_path'], {}), '(exp_path)\n', (2436, 2446), False, 'from fastai_ext.hyperparameter import create_experiment, record_experiment, get_config_df, summarise_results, load_results\n'), ((2460, 2517), 'fastai_ext.hyperparameter.summarise_results', 'summarise_results', (['recorder_df', 'param_names', 'metric_names'], {}), '(recorder_df, param_names, metric_names)\n', (2477, 2517), False, 'from fastai_ext.hyperparameter import create_experiment, record_experiment, get_config_df, summarise_results, load_results\n'), ((2642, 2708), 'fastai_ext.plot_utils.plot_over_epochs', 'plot_over_epochs', (['summary_df', 'param_names', 'metric_names', 'config_df'], {}), '(summary_df, param_names, metric_names, config_df)\n', (2658, 2708), False, 'from fastai_ext.plot_utils import plot_best, plot_over_epochs\n'), ((2709, 2770), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(exp_path / 'all_epochs.png')"], {'bbox_inches': '"""tight"""'}), "(exp_path / 'all_epochs.png', bbox_inches='tight')\n", (2720, 2770), True, 'from matplotlib import pyplot as plt\n'), ((786, 807), 'fastai.show_install', 'fastai.show_install', ([], {}), '()\n', (805, 807), False, 'import fastai\n'), ((1926, 1999), 'fastai_ext.model.tabular_learner', 'tabular_learner', (['data'], {'layers': '[100, 100]', 'metrics': 'accuracy', 'mixup_alpha': '(0)'}), '(data, layers=[100, 100], metrics=accuracy, mixup_alpha=0)\n', (1941, 1999), False, 'from fastai_ext.model import tabular_learner, dae_learner\n'), ((2253, 2270), 'fastai_ext.utils.request_lr', 'request_lr', (['learn'], {}), '(learn)\n', (2263, 2270), False, 'from fastai_ext.utils import request_lr, transfer_from_dae, freeze_but_last, unfreeze_all\n'), ((2279, 2337), 'fastai_ext.hyperparameter.record_experiment', 'record_experiment', (['learn', 'f"""{i}-fold_{fold + 1}"""', 'exp_path'], {}), "(learn, f'{i}-fold_{fold + 1}', exp_path)\n", (2296, 2337), False, 'from fastai_ext.hyperparameter import create_experiment, record_experiment, get_config_df, summarise_results, load_results\n'), ((1752, 1818), 'fastai_ext.model.dae_learner', 'dae_learner', (['data'], {'layers': '[100, 100]', 'metrics': 'None', 'swap_noise': '(0.2)'}), '(data, layers=[100, 100], metrics=None, swap_noise=0.2)\n', (1763, 1818), False, 'from fastai_ext.model import tabular_learner, dae_learner\n'), ((1839, 1860), 'fastai_ext.utils.request_lr', 'request_lr', (['learn_dae'], {}), '(learn_dae)\n', (1849, 1860), False, 'from fastai_ext.utils import request_lr, transfer_from_dae, freeze_but_last, unfreeze_all\n'), ((2048, 2083), 'fastai_ext.utils.transfer_from_dae', 'transfer_from_dae', (['learn', 'learn_dae'], {}), '(learn, learn_dae)\n', (2065, 2083), False, 'from fastai_ext.utils import request_lr, transfer_from_dae, freeze_but_last, unfreeze_all\n'), ((2100, 2122), 'fastai_ext.utils.freeze_but_last', 'freeze_but_last', (['learn'], {}), '(learn)\n', (2115, 2122), False, 'from fastai_ext.utils import request_lr, transfer_from_dae, freeze_but_last, unfreeze_all\n'), ((2142, 2159), 'fastai_ext.utils.request_lr', 'request_lr', (['learn'], {}), '(learn)\n', (2152, 2159), False, 'from fastai_ext.utils import request_lr, transfer_from_dae, freeze_but_last, unfreeze_all\n'), ((2219, 2238), 'fastai_ext.utils.unfreeze_all', 'unfreeze_all', (['learn'], {}), '(learn)\n', (2231, 2238), False, 'from fastai_ext.utils import request_lr, transfer_from_dae, freeze_but_last, unfreeze_all\n')]
|
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1
import json
import math
import os
import sys
import matplotlib
import iotbx.phil
from cctbx import crystal, miller
from cctbx.array_family import flex
from scitbx import matrix
import dials.util
help_message = """
Calculates a stereographic projection image for the given crystal models and
the given miller indices (either specified individually, or for all miller indices
up to a given hkl_limit). By default the projection is in the plane
perpendicular to 0,0,1 reflection for the first crystal, however the projection
can optionally be performed in the laboratory frame (frame=laboratory) in the
plane perpendicular to the beam. Setting the parameter expand_to_p1=True will
also plot all symmetry equivalents of the given miller indices, and
eliminate_sys_absent=False will eliminate systematically absent reflections
before generating the projection.
Examples::
dials.stereographic_projection indexed.expt hkl=1,0,0 hkl=0,1,0
dials.stereographic_projection indexed.expt hkl_limit=2
dials.stereographic_projection indexed_1.expt indexed_2.expt hkl=1,0,0 expand_to_p1=True
"""
phil_scope = iotbx.phil.parse(
"""
hkl = None
.type = ints(size=3)
.multiple=True
hkl_limit = None
.type = int(value_min=1)
expand_to_p1 = True
.type = bool
.help = "Expand the given miller indices to symmetry equivalent reflections"
eliminate_sys_absent = False
.type = bool
.help = "Eliminate systematically absent reflections"
frame = *laboratory crystal
.type = choice
phi_angle = 0
.type = float
.help = "Phi rotation angle (degrees)"
use_starting_angle = False
.type = bool
.help = "If True, then the projection will be done for each crystal at the "
"starting phi angle for the scan associated with the crystal."
plane_normal = None
.type = ints(size=3)
save_coordinates = True
.type = bool
plot {
filename = stereographic_projection.png
.type = path
label_indices = False
.type = bool
colours = None
.type = strings
marker_size = 3
.type = int(value_min=1)
font_size = 6
.type = float(value_min=0)
colour_map = None
.type = str
gridsize = None
.type = int
labels = None
.type = strings
}
json {
filename = None
.type = path
}
"""
)
def reference_poles_perpendicular_to_beam(beam, goniometer):
# plane normal
d0 = matrix.col(beam.get_s0()).normalize()
if goniometer is not None:
d1 = d0.cross(matrix.col(goniometer.get_rotation_axis())).normalize()
else:
d1 = d0.ortho()
d2 = d1.cross(d0).normalize()
return (d0, d1, d2)
def reference_poles_crystal(crystal_model, plane_normal=(0, 0, 1)):
A = matrix.sqr(crystal_model.get_A())
B = matrix.sqr(crystal_model.get_B())
A_inv = A.inverse()
G = A_inv * A_inv.transpose()
G_star = A.transpose() * A
h0 = (G * matrix.col(plane_normal)).normalize()
h1 = matrix.col((1, 0, 0)).cross((G_star * h0).normalize())
h2 = (G_star * h1).cross(G_star * h0).normalize()
return tuple((B * h).normalize() for h in (h0, h1, h2))
def stereographic_projection(points, reference_poles):
# https://doi.org/10.1107/S0021889868005029
# J. Appl. Cryst. (1968). 1, 68-70
# The construction of stereographic projections by computer
# <NAME>, <NAME> and <NAME>
assert len(reference_poles) == 3
r_0, r_1, r_2 = reference_poles
projections = flex.vec2_double()
for p in points:
r_i = matrix.col(p)
# theta is the angle between r_i and the plane normal, r_0
cos_theta = r_i.cos_angle(r_0)
if cos_theta < 0:
r_i = -r_i
cos_theta = r_i.cos_angle(r_0)
# alpha is the angle between r_i and r_1
cos_alpha = r_i.cos_angle(r_1)
theta = math.acos(cos_theta)
cos_phi = cos_alpha / math.sin(theta)
if abs(cos_phi) > 1:
cos_phi = math.copysign(1, cos_phi)
phi = math.acos(cos_phi)
N = r_i.dot(r_2)
r = math.tan(theta / 2)
x = r * cos_phi
y = r * math.sin(phi)
y = math.copysign(y, N)
projections.append((x, y))
return projections
def gcd_list(l):
# greatest common divisor for a list of numbers
from scitbx.math import gcd_int_simple as gcd
result = l[0]
for i in range(1, len(l)):
result = gcd(result, l[i])
return result
@dials.util.show_mail_handle_errors()
def run(args=None):
from dials.util.options import OptionParser, flatten_experiments
# The script usage
usage = "dials.stereographic_projection [options] [param.phil] indexed.expt"
parser = OptionParser(
usage=usage,
phil=phil_scope,
read_experiments=True,
check_format=False,
epilog=help_message,
)
params, options = parser.parse_args(args=args, show_diff_phil=True)
experiments = flatten_experiments(params.input.experiments)
if not experiments:
parser.print_help()
return
if not params.hkl and params.hkl_limit is None:
sys.exit("Please provide hkl or hkl_limit parameters.")
if params.plot.labels and len(params.plot.labels) != len(experiments):
sys.exit(
"Number of labels (%i) must equal number of experiments (%i)"
% (len(params.plot.labels), len(experiments))
)
if params.hkl is not None and len(params.hkl):
miller_indices = flex.miller_index(params.hkl)
elif params.hkl_limit is not None:
limit = params.hkl_limit
miller_indices = flex.miller_index()
for h in range(-limit, limit + 1):
for k in range(-limit, limit + 1):
for l in range(-limit, limit + 1):
if (h, k, l) == (0, 0, 0):
continue
miller_indices.append((h, k, l))
crystals = experiments.crystals()
symmetry = crystal.symmetry(
unit_cell=crystals[0].get_unit_cell(), space_group=crystals[0].get_space_group()
)
miller_set = miller.set(symmetry, miller_indices)
d_spacings = miller_set.d_spacings()
if params.eliminate_sys_absent:
d_spacings = d_spacings.eliminate_sys_absent()
if params.expand_to_p1:
d_spacings = d_spacings.as_non_anomalous_array().expand_to_p1()
miller_indices = d_spacings.indices()
# find the greatest common factor (divisor) between miller indices
miller_indices_unique = flex.miller_index()
for hkl in miller_indices:
gcd = gcd_list(hkl)
if gcd > 1:
miller_indices_unique.append(tuple(int(h / gcd) for h in hkl))
elif gcd < 1:
pass
else:
miller_indices_unique.append(hkl)
miller_indices = miller_indices_unique
miller_indices = flex.miller_index(list(set(miller_indices)))
ref_crystal = crystals[0]
U = matrix.sqr(ref_crystal.get_U())
B = matrix.sqr(ref_crystal.get_B())
R = matrix.identity(3)
if params.frame == "laboratory":
reference_poles = reference_poles_perpendicular_to_beam(
experiments[0].beam, experiments[0].goniometer
)
if params.use_starting_angle:
rotation_axis = matrix.col(experiments[0].goniometer.get_rotation_axis())
R = rotation_axis.axis_and_angle_as_r3_rotation_matrix(
experiments[0].sequence.get_oscillation()[0], deg=True
)
elif params.phi_angle != 0:
rotation_axis = matrix.col(experiments[0].goniometer.get_rotation_axis())
R = rotation_axis.axis_and_angle_as_r3_rotation_matrix(
params.phi_angle, deg=True
)
else:
if params.plane_normal is not None:
plane_normal = params.plane_normal
else:
plane_normal = (0, 0, 1)
reference_poles = reference_poles_crystal(
ref_crystal, plane_normal=plane_normal
)
if params.frame == "crystal":
U = matrix.identity(3)
reciprocal_space_points = list(R * U * B) * miller_indices.as_vec3_double()
projections_ref = stereographic_projection(reciprocal_space_points, reference_poles)
projections_all = [projections_ref]
if experiments:
from dials.algorithms.indexing.compare_orientation_matrices import (
difference_rotation_matrix_axis_angle,
)
for expt in experiments[1:]:
cryst = expt.crystal
if params.frame == "crystal":
R_ij, axis, angle, cb_op = difference_rotation_matrix_axis_angle(
ref_crystal, cryst
)
U = R_ij
elif params.use_starting_angle:
if params.use_starting_angle:
rotation_axis = matrix.col(expt.goniometer.get_rotation_axis())
R = rotation_axis.axis_and_angle_as_r3_rotation_matrix(
expt.sequence.get_oscillation()[0], deg=True
)
else:
U = matrix.sqr(cryst.get_U())
reciprocal_space_points = (
list(R * U * matrix.sqr(cryst.get_B()))
* miller_indices.as_vec3_double()
)
projections = stereographic_projection(
reciprocal_space_points, reference_poles
)
projections_all.append(projections)
if params.save_coordinates:
with open("projections.txt", "w") as f:
f.write("crystal h k l x y" + os.linesep)
for i_cryst, projections in enumerate(projections_all):
for hkl, proj in zip(miller_indices, projections):
f.write("%i " % (i_cryst + 1))
f.write("%i %i %i " % hkl)
f.write(("%f %f" + os.linesep) % proj)
if params.plot.filename:
epochs = None
if params.plot.colour_map is not None:
if experiments[0].sequence is not None:
epochs = [expt.sequence.get_epochs()[0] for expt in experiments]
else:
epochs = [i for i, expt in enumerate(experiments)]
plot_projections(
projections_all,
filename=params.plot.filename,
colours=params.plot.colours,
marker_size=params.plot.marker_size,
font_size=params.plot.font_size,
gridsize=params.plot.gridsize,
label_indices=miller_indices if params.plot.label_indices else False,
epochs=epochs,
colour_map=params.plot.colour_map,
)
if params.json.filename:
projections_as_json(
projections_all, filename=params.json.filename, labels=params.plot.labels
)
def plot_projections(
projections,
filename=None,
colours=None,
marker_size=3,
font_size=6,
gridsize=None,
label_indices=False,
epochs=None,
colour_map=None,
):
projections_all = projections
# http://matplotlib.org/faq/howto_faq.html#generate-images-without-having-a-window-appear
matplotlib.use("Agg") # use a non-interactive backend
from matplotlib import pylab, pyplot
if epochs is not None and colour_map is not None:
epochs = flex.double(epochs)
epochs -= flex.min(epochs)
epochs /= flex.max(epochs)
cmap = matplotlib.cm.get_cmap(colour_map)
colours = [cmap(e) for e in epochs]
elif colours is None or len(colours) == 0:
colours = ["b"] * len(projections_all)
elif len(colours) < len(projections_all):
colours = colours * len(projections_all)
fig = pyplot.figure()
pyplot.scatter([0], [0], marker="+", c="0.75", s=100)
cir = pylab.Circle((0, 0), radius=1.0, fill=False, color="0.75")
pylab.gca().add_patch(cir)
if gridsize is not None:
x = flex.double()
y = flex.double()
for i, projections in enumerate(projections_all):
x_, y_ = projections.parts()
x.extend(x_)
y.extend(y_)
hb = pyplot.hexbin(x, y, gridsize=gridsize, linewidths=0.2)
pyplot.colorbar(hb)
else:
for i, projections in enumerate(projections_all):
x, y = projections.parts()
pyplot.scatter(
x.as_numpy_array(),
y.as_numpy_array(),
c=colours[i],
s=marker_size,
edgecolors="none",
)
if label_indices:
for j, (hkl, proj) in enumerate(zip(label_indices, projections)):
# hack to not write two labels on top of each other
p1, p2 = (projections - proj).parts()
if (flex.sqrt(flex.pow2(p1) + flex.pow2(p2)) < 1e-3).iselection()[
0
] != j:
continue
pyplot.text(proj[0], proj[1], str(hkl), fontsize=font_size)
fig.axes[0].set_aspect("equal")
pyplot.xlim(-1.1, 1.1)
pyplot.ylim(-1.1, 1.1)
if filename is not None:
pyplot.savefig(filename, dpi=300)
def projections_as_dict(projections, labels):
projections_all = flex.vec2_double()
if labels:
labels_all = []
assert len(projections) == len(labels)
for i, proj in enumerate(projections):
projections_all.extend(proj)
if labels:
labels_all.extend([labels[i]] * len(proj))
data = []
x, y = projections_all.parts()
data.append(
{
"x": list(x),
"y": list(y),
"mode": "markers",
"type": "scatter",
"name": "stereographic_projections",
"showlegend": False,
"hovertext": labels_all if labels else "",
"hoverinfo": "text",
}
)
data.append(
{
"x": [0],
"y": [0],
"mode": "markers",
"marker": {
"color": "black",
"size": 25,
"symbol": "cross-thin",
"line": {"width": 1},
},
"showlegend": False,
}
)
d = {
"data": data,
"layout": {
"title": "Stereographic projections",
"hovermode": "closest",
"xaxis": {
"range": [-1.0, 1.0],
"showgrid": False,
"zeroline": False,
"showline": False,
"ticks": "",
"showticklabels": False,
},
"yaxis": {
"range": [-1.0, 1.0],
"showgrid": False,
"zeroline": False,
"showline": False,
"ticks": "",
"showticklabels": False,
},
"shapes": [
{
"type": "circle",
"xref": "x",
"yref": "y",
"x0": -1,
"y0": -1,
"x1": 1,
"y1": 1,
"line": {"color": "black"},
}
],
},
}
return d
def projections_as_json(projections, filename=None, labels=None):
d = projections_as_dict(projections, labels=labels)
json_str = json.dumps(d)
if filename is not None:
with open(filename, "w") as f:
f.write(json_str)
return json_str
if __name__ == "__main__":
run()
|
[
"scitbx.math.gcd_int_simple",
"matplotlib.cm.get_cmap",
"cctbx.array_family.flex.miller_index",
"json.dumps",
"math.copysign",
"scitbx.matrix.identity",
"matplotlib.pyplot.figure",
"matplotlib.pylab.gca",
"cctbx.array_family.flex.double",
"cctbx.array_family.flex.min",
"cctbx.array_family.flex.pow2",
"matplotlib.pylab.Circle",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.hexbin",
"matplotlib.pyplot.ylim",
"cctbx.miller.set",
"math.sin",
"matplotlib.use",
"dials.algorithms.indexing.compare_orientation_matrices.difference_rotation_matrix_axis_angle",
"scitbx.matrix.col",
"dials.util.options.OptionParser",
"sys.exit",
"matplotlib.pyplot.xlim",
"cctbx.array_family.flex.vec2_double",
"matplotlib.pyplot.scatter",
"math.tan",
"dials.util.options.flatten_experiments",
"cctbx.array_family.flex.max",
"math.acos",
"matplotlib.pyplot.savefig"
] |
[((3436, 3454), 'cctbx.array_family.flex.vec2_double', 'flex.vec2_double', ([], {}), '()\n', (3452, 3454), False, 'from cctbx.array_family import flex\n'), ((4661, 4771), 'dials.util.options.OptionParser', 'OptionParser', ([], {'usage': 'usage', 'phil': 'phil_scope', 'read_experiments': '(True)', 'check_format': '(False)', 'epilog': 'help_message'}), '(usage=usage, phil=phil_scope, read_experiments=True,\n check_format=False, epilog=help_message)\n', (4673, 4771), False, 'from dials.util.options import OptionParser, flatten_experiments\n'), ((4906, 4951), 'dials.util.options.flatten_experiments', 'flatten_experiments', (['params.input.experiments'], {}), '(params.input.experiments)\n', (4925, 4951), False, 'from dials.util.options import OptionParser, flatten_experiments\n'), ((6056, 6092), 'cctbx.miller.set', 'miller.set', (['symmetry', 'miller_indices'], {}), '(symmetry, miller_indices)\n', (6066, 6092), False, 'from cctbx import crystal, miller\n'), ((6467, 6486), 'cctbx.array_family.flex.miller_index', 'flex.miller_index', ([], {}), '()\n', (6484, 6486), False, 'from cctbx.array_family import flex\n'), ((6968, 6986), 'scitbx.matrix.identity', 'matrix.identity', (['(3)'], {}), '(3)\n', (6983, 6986), False, 'from scitbx import matrix\n'), ((11069, 11090), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (11083, 11090), False, 'import matplotlib\n'), ((11621, 11636), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (11634, 11636), False, 'from matplotlib import pylab, pyplot\n'), ((11642, 11695), 'matplotlib.pyplot.scatter', 'pyplot.scatter', (['[0]', '[0]'], {'marker': '"""+"""', 'c': '"""0.75"""', 's': '(100)'}), "([0], [0], marker='+', c='0.75', s=100)\n", (11656, 11695), False, 'from matplotlib import pylab, pyplot\n'), ((11706, 11764), 'matplotlib.pylab.Circle', 'pylab.Circle', (['(0, 0)'], {'radius': '(1.0)', 'fill': '(False)', 'color': '"""0.75"""'}), "((0, 0), radius=1.0, fill=False, color='0.75')\n", (11718, 11764), False, 'from matplotlib import pylab, pyplot\n'), ((12976, 12998), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (12987, 12998), False, 'from matplotlib import pylab, pyplot\n'), ((13003, 13025), 'matplotlib.pyplot.ylim', 'pyplot.ylim', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (13014, 13025), False, 'from matplotlib import pylab, pyplot\n'), ((13167, 13185), 'cctbx.array_family.flex.vec2_double', 'flex.vec2_double', ([], {}), '()\n', (13183, 13185), False, 'from cctbx.array_family import flex\n'), ((15289, 15302), 'json.dumps', 'json.dumps', (['d'], {}), '(d)\n', (15299, 15302), False, 'import json\n'), ((3491, 3504), 'scitbx.matrix.col', 'matrix.col', (['p'], {}), '(p)\n', (3501, 3504), False, 'from scitbx import matrix\n'), ((3808, 3828), 'math.acos', 'math.acos', (['cos_theta'], {}), '(cos_theta)\n', (3817, 3828), False, 'import math\n'), ((3966, 3984), 'math.acos', 'math.acos', (['cos_phi'], {}), '(cos_phi)\n', (3975, 3984), False, 'import math\n'), ((4023, 4042), 'math.tan', 'math.tan', (['(theta / 2)'], {}), '(theta / 2)\n', (4031, 4042), False, 'import math\n'), ((4109, 4128), 'math.copysign', 'math.copysign', (['y', 'N'], {}), '(y, N)\n', (4122, 4128), False, 'import math\n'), ((4377, 4394), 'scitbx.math.gcd_int_simple', 'gcd', (['result', 'l[i]'], {}), '(result, l[i])\n', (4380, 4394), True, 'from scitbx.math import gcd_int_simple as gcd\n'), ((5081, 5136), 'sys.exit', 'sys.exit', (['"""Please provide hkl or hkl_limit parameters."""'], {}), "('Please provide hkl or hkl_limit parameters.')\n", (5089, 5136), False, 'import sys\n'), ((5450, 5479), 'cctbx.array_family.flex.miller_index', 'flex.miller_index', (['params.hkl'], {}), '(params.hkl)\n', (5467, 5479), False, 'from cctbx.array_family import flex\n'), ((7994, 8012), 'scitbx.matrix.identity', 'matrix.identity', (['(3)'], {}), '(3)\n', (8009, 8012), False, 'from scitbx import matrix\n'), ((11237, 11256), 'cctbx.array_family.flex.double', 'flex.double', (['epochs'], {}), '(epochs)\n', (11248, 11256), False, 'from cctbx.array_family import flex\n'), ((11275, 11291), 'cctbx.array_family.flex.min', 'flex.min', (['epochs'], {}), '(epochs)\n', (11283, 11291), False, 'from cctbx.array_family import flex\n'), ((11310, 11326), 'cctbx.array_family.flex.max', 'flex.max', (['epochs'], {}), '(epochs)\n', (11318, 11326), False, 'from cctbx.array_family import flex\n'), ((11342, 11376), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['colour_map'], {}), '(colour_map)\n', (11364, 11376), False, 'import matplotlib\n'), ((11838, 11851), 'cctbx.array_family.flex.double', 'flex.double', ([], {}), '()\n', (11849, 11851), False, 'from cctbx.array_family import flex\n'), ((11864, 11877), 'cctbx.array_family.flex.double', 'flex.double', ([], {}), '()\n', (11875, 11877), False, 'from cctbx.array_family import flex\n'), ((12040, 12094), 'matplotlib.pyplot.hexbin', 'pyplot.hexbin', (['x', 'y'], {'gridsize': 'gridsize', 'linewidths': '(0.2)'}), '(x, y, gridsize=gridsize, linewidths=0.2)\n', (12053, 12094), False, 'from matplotlib import pylab, pyplot\n'), ((12103, 12122), 'matplotlib.pyplot.colorbar', 'pyplot.colorbar', (['hb'], {}), '(hb)\n', (12118, 12122), False, 'from matplotlib import pylab, pyplot\n'), ((13063, 13096), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['filename'], {'dpi': '(300)'}), '(filename, dpi=300)\n', (13077, 13096), False, 'from matplotlib import pylab, pyplot\n'), ((2934, 2955), 'scitbx.matrix.col', 'matrix.col', (['(1, 0, 0)'], {}), '((1, 0, 0))\n', (2944, 2955), False, 'from scitbx import matrix\n'), ((3859, 3874), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (3867, 3874), False, 'import math\n'), ((3926, 3951), 'math.copysign', 'math.copysign', (['(1)', 'cos_phi'], {}), '(1, cos_phi)\n', (3939, 3951), False, 'import math\n'), ((4083, 4096), 'math.sin', 'math.sin', (['phi'], {}), '(phi)\n', (4091, 4096), False, 'import math\n'), ((5577, 5596), 'cctbx.array_family.flex.miller_index', 'flex.miller_index', ([], {}), '()\n', (5594, 5596), False, 'from cctbx.array_family import flex\n'), ((11769, 11780), 'matplotlib.pylab.gca', 'pylab.gca', ([], {}), '()\n', (11778, 11780), False, 'from matplotlib import pylab, pyplot\n'), ((2887, 2911), 'scitbx.matrix.col', 'matrix.col', (['plane_normal'], {}), '(plane_normal)\n', (2897, 2911), False, 'from scitbx import matrix\n'), ((8539, 8596), 'dials.algorithms.indexing.compare_orientation_matrices.difference_rotation_matrix_axis_angle', 'difference_rotation_matrix_axis_angle', (['ref_crystal', 'cryst'], {}), '(ref_crystal, cryst)\n', (8576, 8596), False, 'from dials.algorithms.indexing.compare_orientation_matrices import difference_rotation_matrix_axis_angle\n'), ((12716, 12729), 'cctbx.array_family.flex.pow2', 'flex.pow2', (['p1'], {}), '(p1)\n', (12725, 12729), False, 'from cctbx.array_family import flex\n'), ((12732, 12745), 'cctbx.array_family.flex.pow2', 'flex.pow2', (['p2'], {}), '(p2)\n', (12741, 12745), False, 'from cctbx.array_family import flex\n')]
|
import re
import sys
COUNT_RE = re.compile(r'^Total number of states checked is ([0-9]+)\.')
START_RE = re.compile(r'^== ([0-9]+) ==$')
END_RE = re.compile(r'^\[\[== End ([0-9]+) ==\]\]$')
class DealResult(object):
"""a deal result"""
IMP = 0
SOLVED = 1
INTRACT = 2
def __init__(self, idx, lines):
self.idx = idx
self.count = -1
for line in lines:
if line.startswith("I could not"):
self.verdict = self.IMP
break
elif line.startswith("This game is solv"):
self.verdict = self.SOLVED
break
elif line.startswith("Iterations count exceeded"):
self.verdict = self.INTRACT
for line_w_count in lines:
m = COUNT_RE.match(line_w_count)
if m:
self.count = int(m.group(1))
break
break
else:
print(lines)
raise BaseException("Invalid state - %d" % (idx))
def line_iter():
# 'vendu-3fc-output.txt'
FILENAMES = [
'vendu-7-3fc-output.txt',
'freecell-3fc-amazon.txt',
'vendu-8-3fc-output.txt',
'vendu-6-3fc-output.txt',
'vendu-9-3fc-output.txt',
'vendu-2-3fc-output.txt',
'vendu-3-3fc-output.txt',
'vendu-4-3fc-output.txt',
'vendu-10-3fc-output.txt',
'aws-3-3fc-output.txt',
'aws-2-3fc-output.txt',
'vendu-5-3fc-output.txt',
]
for fn in FILENAMES:
with open(fn, 'r') as fh:
for line in fh:
yield line.rstrip('\r\n')
def deal_iter():
it = line_iter()
line = it.next()
prev_idx = -1
while line:
lines = [line]
line = it.next()
while line is not None and not line.startswith('[[== End '):
lines.append(line)
line = it.next()
lines.append(line)
idx1 = int(START_RE.match(lines[0]).group(1))
idx2 = int(END_RE.match(lines[-1]).group(1))
if idx1 != idx2:
raise BaseException("index mismatch - %d ; %d" % (idx1, idx2))
result = DealResult(idx1, lines)
if not idx1 > prev_idx:
raise BaseException("Wrong indexes order %d -> %d" %
(prev_idx, idx1))
prev_idx = idx1
yield result
line = it.next()
def main(argv):
cnt = 0
verdict_names = {DealResult.IMP: "IMP", DealResult.SOLVED: "SOLVED",
DealResult.INTRACT: "INTRACT"}
verdict_counts = {}
for verdict in verdict_names.keys():
verdict_counts[verdict] = 0
with open('intract1.txt', 'w') as fh:
with open('solved1.txt', 'w') as sol_fh:
with open('impossible1.txt', 'w') as imp_fh:
with open('all-deals.txt', 'w') as all_fh:
for d in deal_iter():
i = d.idx
all_fh.write("%d\n" % (i))
if d.verdict == d.INTRACT:
fh.write("%d\n" % (i))
elif d.verdict == d.SOLVED:
sol_fh.write("%d\n" % (i))
else:
imp_fh.write("%d\n" % (i))
verdict_counts[d.verdict] += 1
cnt += 1
# print(d)
print("Deals Count = %d" % (cnt))
for verdict in verdict_names.keys():
print("%-10s : %d" % (verdict_names[verdict], verdict_counts[verdict]))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
[
"re.compile"
] |
[((33, 93), 're.compile', 're.compile', (['"""^Total number of states checked is ([0-9]+)\\\\."""'], {}), "('^Total number of states checked is ([0-9]+)\\\\.')\n", (43, 93), False, 'import re\n'), ((105, 135), 're.compile', 're.compile', (['"""^== ([0-9]+) ==$"""'], {}), "('^== ([0-9]+) ==$')\n", (115, 135), False, 'import re\n'), ((146, 192), 're.compile', 're.compile', (['"""^\\\\[\\\\[== End ([0-9]+) ==\\\\]\\\\]$"""'], {}), "('^\\\\[\\\\[== End ([0-9]+) ==\\\\]\\\\]$')\n", (156, 192), False, 'import re\n')]
|
#!/pxrpythonsubst
#
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from __future__ import print_function
import sys
from pxr.Usdviewq.qt import QtWidgets
# Remove any unwanted visuals from the view.
def _modifySettings(appController):
appController._dataModel.viewSettings.showBBoxes = False
appController._dataModel.viewSettings.showHUD = False
# Select one or more prim paths, then set active state of those prims.
def _selectAndSetActive(appController, active, paths):
selection = appController._dataModel.selection
with selection.batchPrimChanges:
selection.clearPrims()
for path in paths:
selection.addPrimPath(path)
if active:
appController.activateSelectedPrims()
# We must processEvents after every call to activateSelectedPrims() so the
# activated PrimViewItems can repopulate. (See _primViewUpdateTimer in
# appController.py)
QtWidgets.QApplication.processEvents()
else:
appController.deactivateSelectedPrims()
# Test deactivating then reactivating a single prim with no children.
def _testSingleDeactivate(appController):
_selectAndSetActive(appController, False, ["/spheres/a"])
appController._takeShot("singleDeactivate.png")
_selectAndSetActive(appController, True, ["/spheres/a"])
# Test deactivating then reactivating a single prim with some children.
def _testParentDeactivate(appController):
_selectAndSetActive(appController, False, ["/spheres"])
appController._takeShot("parentDeactivate.png")
_selectAndSetActive(appController, True, ["/spheres"])
# Test deactivating then reactivating a parent prim and one of its children.
def _testParentChildDeactivate(appController):
_selectAndSetActive(appController, False, ["/spheres", "/spheres/a"])
appController._takeShot("parentChildDeactivate1.png")
# Reactivation is a two-part process because we must activate the parent
# before we can even select the child. Take a snapshot in-between to verify
# this is working.
_selectAndSetActive(appController, True, ["/spheres"])
appController._takeShot("parentChildDeactivate2.png")
_selectAndSetActive(appController, True, ["/spheres/a"])
# In this case, the child prim has a shorter path than the parent due to a
# reference. If we deactivate the prims through Usd in sorted order where longer
# paths are deactivated first then this case fails.
def _testReferenceChildDeactivate(appController):
_selectAndSetActive(appController, False, ["/C2/D", "/A/B/C"])
# Test that instance proxies cannot be deactivated. The call does not raise an
# error, but prints a warning and does not perform the deactivation.
def _testInstanceProxyDeactivate(appController):
_selectAndSetActive(appController, False, ["/X/Y"])
prim = appController._dataModel.stage.GetPrimAtPath("/X/Y")
assert prim.IsActive() # Activation state should not have changed.
# Test that the complexity setting works properly in usdview.
def testUsdviewInputFunction(appController):
_modifySettings(appController)
_testSingleDeactivate(appController)
_testParentDeactivate(appController)
_testParentChildDeactivate(appController)
_testReferenceChildDeactivate(appController)
_testInstanceProxyDeactivate(appController)
|
[
"pxr.Usdviewq.qt.QtWidgets.QApplication.processEvents"
] |
[((1941, 1979), 'pxr.Usdviewq.qt.QtWidgets.QApplication.processEvents', 'QtWidgets.QApplication.processEvents', ([], {}), '()\n', (1977, 1979), False, 'from pxr.Usdviewq.qt import QtWidgets\n')]
|
import unittest
from nose.tools import (
raises,
assert_equal,
assert_true,
assert_false,
assert_count_equal,
)
from parliament import UnknownPrefixException, UnknownActionException
from parliament.statement import expand_action
class TestActionExpansion(unittest.TestCase):
"""Test class for expand_action function"""
def test_expand_action_no_expansion(self):
expanded_actions = expand_action("s3:listallmybuckets")
assert_count_equal(
expanded_actions, [{"service": "s3", "action": "ListAllMyBuckets"}]
)
def test_expand_action_with_expansion(self):
expanded_actions = expand_action("s3:listallmybucke*")
assert_count_equal(
expanded_actions, [{"service": "s3", "action": "ListAllMyBuckets"}]
)
def test_expand_action_with_casing(self):
expanded_actions = expand_action("iAm:li*sTuS*rs")
assert_count_equal(
expanded_actions, [{"service": "iam", "action": "ListUsers"}]
)
def test_expand_action_with_expansion_for_prefix_used_multiple_times(self):
expanded_actions = expand_action("ses:Describe*")
assert_count_equal(
expanded_actions,
[
{"service": "ses", "action": "DescribeActiveReceiptRuleSet"},
{"service": "ses", "action": "DescribeConfigurationSet"},
{"service": "ses", "action": "DescribeReceiptRule"},
{"service": "ses", "action": "DescribeReceiptRuleSet"},
],
)
def test_expand_action_with_permission_only_action(self):
# There are 17 privileges list as "logs.CreateLogDelivery [permission only]"
expanded_actions = expand_action("logs:GetLogDelivery")
assert_count_equal(
expanded_actions, [{"service": "logs", "action": "GetLogDelivery"}]
)
def test_exception_malformed(self):
try:
expand_action("malformed")
assert False
except ValueError as e:
assert True
def test_exception_bad_service(self):
try:
expand_action("333:listallmybuckets")
assert False
except UnknownPrefixException as e:
assert True
def test_exception_bad_action(self):
try:
expand_action("s3:zzz")
assert False
except UnknownActionException as e:
assert True
def test_exception_bad_expansion(self):
try:
expand_action("s3:zzz*")
assert False
except UnknownActionException as e:
assert True
def test_expand_all(self):
assert_true(len(expand_action("*")) > 1000)
assert_true(len(expand_action("*:*")) > 1000)
|
[
"parliament.statement.expand_action",
"nose.tools.assert_count_equal"
] |
[((421, 457), 'parliament.statement.expand_action', 'expand_action', (['"""s3:listallmybuckets"""'], {}), "('s3:listallmybuckets')\n", (434, 457), False, 'from parliament.statement import expand_action\n'), ((466, 557), 'nose.tools.assert_count_equal', 'assert_count_equal', (['expanded_actions', "[{'service': 's3', 'action': 'ListAllMyBuckets'}]"], {}), "(expanded_actions, [{'service': 's3', 'action':\n 'ListAllMyBuckets'}])\n", (484, 557), False, 'from nose.tools import raises, assert_equal, assert_true, assert_false, assert_count_equal\n'), ((653, 688), 'parliament.statement.expand_action', 'expand_action', (['"""s3:listallmybucke*"""'], {}), "('s3:listallmybucke*')\n", (666, 688), False, 'from parliament.statement import expand_action\n'), ((697, 788), 'nose.tools.assert_count_equal', 'assert_count_equal', (['expanded_actions', "[{'service': 's3', 'action': 'ListAllMyBuckets'}]"], {}), "(expanded_actions, [{'service': 's3', 'action':\n 'ListAllMyBuckets'}])\n", (715, 788), False, 'from nose.tools import raises, assert_equal, assert_true, assert_false, assert_count_equal\n'), ((881, 912), 'parliament.statement.expand_action', 'expand_action', (['"""iAm:li*sTuS*rs"""'], {}), "('iAm:li*sTuS*rs')\n", (894, 912), False, 'from parliament.statement import expand_action\n'), ((921, 1006), 'nose.tools.assert_count_equal', 'assert_count_equal', (['expanded_actions', "[{'service': 'iam', 'action': 'ListUsers'}]"], {}), "(expanded_actions, [{'service': 'iam', 'action':\n 'ListUsers'}])\n", (939, 1006), False, 'from nose.tools import raises, assert_equal, assert_true, assert_false, assert_count_equal\n'), ((1133, 1163), 'parliament.statement.expand_action', 'expand_action', (['"""ses:Describe*"""'], {}), "('ses:Describe*')\n", (1146, 1163), False, 'from parliament.statement import expand_action\n'), ((1172, 1455), 'nose.tools.assert_count_equal', 'assert_count_equal', (['expanded_actions', "[{'service': 'ses', 'action': 'DescribeActiveReceiptRuleSet'}, {'service':\n 'ses', 'action': 'DescribeConfigurationSet'}, {'service': 'ses',\n 'action': 'DescribeReceiptRule'}, {'service': 'ses', 'action':\n 'DescribeReceiptRuleSet'}]"], {}), "(expanded_actions, [{'service': 'ses', 'action':\n 'DescribeActiveReceiptRuleSet'}, {'service': 'ses', 'action':\n 'DescribeConfigurationSet'}, {'service': 'ses', 'action':\n 'DescribeReceiptRule'}, {'service': 'ses', 'action':\n 'DescribeReceiptRuleSet'}])\n", (1190, 1455), False, 'from nose.tools import raises, assert_equal, assert_true, assert_false, assert_count_equal\n'), ((1729, 1765), 'parliament.statement.expand_action', 'expand_action', (['"""logs:GetLogDelivery"""'], {}), "('logs:GetLogDelivery')\n", (1742, 1765), False, 'from parliament.statement import expand_action\n'), ((1774, 1865), 'nose.tools.assert_count_equal', 'assert_count_equal', (['expanded_actions', "[{'service': 'logs', 'action': 'GetLogDelivery'}]"], {}), "(expanded_actions, [{'service': 'logs', 'action':\n 'GetLogDelivery'}])\n", (1792, 1865), False, 'from nose.tools import raises, assert_equal, assert_true, assert_false, assert_count_equal\n'), ((1950, 1976), 'parliament.statement.expand_action', 'expand_action', (['"""malformed"""'], {}), "('malformed')\n", (1963, 1976), False, 'from parliament.statement import expand_action\n'), ((2126, 2163), 'parliament.statement.expand_action', 'expand_action', (['"""333:listallmybuckets"""'], {}), "('333:listallmybuckets')\n", (2139, 2163), False, 'from parliament.statement import expand_action\n'), ((2324, 2347), 'parliament.statement.expand_action', 'expand_action', (['"""s3:zzz"""'], {}), "('s3:zzz')\n", (2337, 2347), False, 'from parliament.statement import expand_action\n'), ((2511, 2535), 'parliament.statement.expand_action', 'expand_action', (['"""s3:zzz*"""'], {}), "('s3:zzz*')\n", (2524, 2535), False, 'from parliament.statement import expand_action\n'), ((2685, 2703), 'parliament.statement.expand_action', 'expand_action', (['"""*"""'], {}), "('*')\n", (2698, 2703), False, 'from parliament.statement import expand_action\n'), ((2737, 2757), 'parliament.statement.expand_action', 'expand_action', (['"""*:*"""'], {}), "('*:*')\n", (2750, 2757), False, 'from parliament.statement import expand_action\n')]
|
"""
Check the coverage of tests for Stan functions. If determines which functions
are covered from
- names of the test-*.R
- functions listed in tets.yaml: since some models test multiple functions, especially the ones that test the extractor functions.
"""
import sys
import os
from os import path
import re
import fnmatch
import yaml
from standoc import parse_stan_function_defs
def check_functions(filename, testdir):
covered = set()
for fname in os.listdir(testdir):
if fnmatch.fnmatch(fname, 'test-*.R'):
with open(path.join(testdir, fname), 'r') as f:
for line in f:
m = re.match(r'#function:\s+(\w+)', line)
if m:
covered.add(m.group(1))
with open(filename, 'r') as f:
functions = set(parse_stan_function_defs(f.read()))
missing = functions - covered
if len(missing) == 0:
print("All functions have tests!!!\n")
else:
print("Missing tests for %d functions:" % len(missing))
print('\n'.join("- %s" % x for x in missing))
unknown = covered - functions
if len(unknown) > 0:
print("\nThere are some unknown functions in tests")
print('\n'.join("- %s" % x for x in unknown))
def main():
filename, testdir = sys.argv[1:3]
check_functions(filename, testdir)
if __name__ == "__main__":
main()
|
[
"re.match",
"fnmatch.fnmatch",
"os.path.join",
"os.listdir"
] |
[((464, 483), 'os.listdir', 'os.listdir', (['testdir'], {}), '(testdir)\n', (474, 483), False, 'import os\n'), ((496, 530), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['fname', '"""test-*.R"""'], {}), "(fname, 'test-*.R')\n", (511, 530), False, 'import fnmatch\n'), ((554, 579), 'os.path.join', 'path.join', (['testdir', 'fname'], {}), '(testdir, fname)\n', (563, 579), False, 'from os import path\n'), ((647, 685), 're.match', 're.match', (['"""#function:\\\\s+(\\\\w+)"""', 'line'], {}), "('#function:\\\\s+(\\\\w+)', line)\n", (655, 685), False, 'import re\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.models.tools.module_helper import ModuleHelper
from lib.utils.tools.logger import Logger as Log
class ProjectionHead(nn.Module):
def __init__(self, dim_in, proj_dim=256, proj='convmlp', bn_type='torchsyncbn'):
super(ProjectionHead, self).__init__()
Log.info('proj_dim: {}'.format(proj_dim))
if proj == 'linear':
self.proj = nn.Conv2d(dim_in, proj_dim, kernel_size=1)
elif proj == 'convmlp':
self.proj = nn.Sequential(
nn.Conv2d(dim_in, dim_in, kernel_size=1),
ModuleHelper.BNReLU(dim_in, bn_type=bn_type),
nn.Conv2d(dim_in, proj_dim, kernel_size=1)
)
def forward(self, x):
return F.normalize(self.proj(x), p=2, dim=1)
|
[
"torch.nn.Conv2d",
"lib.models.tools.module_helper.ModuleHelper.BNReLU"
] |
[((444, 486), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'proj_dim'], {'kernel_size': '(1)'}), '(dim_in, proj_dim, kernel_size=1)\n', (453, 486), True, 'import torch.nn as nn\n'), ((574, 614), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_in'], {'kernel_size': '(1)'}), '(dim_in, dim_in, kernel_size=1)\n', (583, 614), True, 'import torch.nn as nn\n'), ((632, 676), 'lib.models.tools.module_helper.ModuleHelper.BNReLU', 'ModuleHelper.BNReLU', (['dim_in'], {'bn_type': 'bn_type'}), '(dim_in, bn_type=bn_type)\n', (651, 676), False, 'from lib.models.tools.module_helper import ModuleHelper\n'), ((694, 736), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'proj_dim'], {'kernel_size': '(1)'}), '(dim_in, proj_dim, kernel_size=1)\n', (703, 736), True, 'import torch.nn as nn\n')]
|
from PyQt5 import QtWidgets
from appGUI.preferences.cncjob.CNCJobAdvOptPrefGroupUI import CNCJobAdvOptPrefGroupUI
from appGUI.preferences.cncjob.CNCJobOptPrefGroupUI import CNCJobOptPrefGroupUI
from appGUI.preferences.cncjob.CNCJobGenPrefGroupUI import CNCJobGenPrefGroupUI
from appGUI.preferences.cncjob.CNCJobEditorPrefGroupUI import CNCJobEditorPrefGroupUI
class CNCJobPreferencesUI(QtWidgets.QWidget):
def __init__(self, decimals, parent=None):
QtWidgets.QWidget.__init__(self, parent=parent)
self.layout = QtWidgets.QHBoxLayout()
self.setLayout(self.layout)
self.decimals = decimals
self.cncjob_gen_group = CNCJobGenPrefGroupUI(decimals=self.decimals)
self.cncjob_gen_group.setMinimumWidth(260)
self.cncjob_opt_group = CNCJobOptPrefGroupUI(decimals=self.decimals)
self.cncjob_opt_group.setMinimumWidth(260)
self.cncjob_adv_opt_group = CNCJobAdvOptPrefGroupUI(decimals=self.decimals)
self.cncjob_adv_opt_group.setMinimumWidth(260)
self.cncjob_editor_group = CNCJobEditorPrefGroupUI(decimals=self.decimals)
self.cncjob_editor_group.setMinimumWidth(260)
vlay = QtWidgets.QVBoxLayout()
vlay.addWidget(self.cncjob_opt_group)
vlay.addWidget(self.cncjob_adv_opt_group)
self.layout.addWidget(self.cncjob_gen_group)
self.layout.addLayout(vlay)
self.layout.addWidget(self.cncjob_editor_group)
self.layout.addStretch()
|
[
"appGUI.preferences.cncjob.CNCJobOptPrefGroupUI.CNCJobOptPrefGroupUI",
"PyQt5.QtWidgets.QHBoxLayout",
"appGUI.preferences.cncjob.CNCJobEditorPrefGroupUI.CNCJobEditorPrefGroupUI",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QWidget.__init__",
"appGUI.preferences.cncjob.CNCJobGenPrefGroupUI.CNCJobGenPrefGroupUI",
"appGUI.preferences.cncjob.CNCJobAdvOptPrefGroupUI.CNCJobAdvOptPrefGroupUI"
] |
[((465, 512), 'PyQt5.QtWidgets.QWidget.__init__', 'QtWidgets.QWidget.__init__', (['self'], {'parent': 'parent'}), '(self, parent=parent)\n', (491, 512), False, 'from PyQt5 import QtWidgets\n'), ((535, 558), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (556, 558), False, 'from PyQt5 import QtWidgets\n'), ((661, 705), 'appGUI.preferences.cncjob.CNCJobGenPrefGroupUI.CNCJobGenPrefGroupUI', 'CNCJobGenPrefGroupUI', ([], {'decimals': 'self.decimals'}), '(decimals=self.decimals)\n', (681, 705), False, 'from appGUI.preferences.cncjob.CNCJobGenPrefGroupUI import CNCJobGenPrefGroupUI\n'), ((790, 834), 'appGUI.preferences.cncjob.CNCJobOptPrefGroupUI.CNCJobOptPrefGroupUI', 'CNCJobOptPrefGroupUI', ([], {'decimals': 'self.decimals'}), '(decimals=self.decimals)\n', (810, 834), False, 'from appGUI.preferences.cncjob.CNCJobOptPrefGroupUI import CNCJobOptPrefGroupUI\n'), ((922, 969), 'appGUI.preferences.cncjob.CNCJobAdvOptPrefGroupUI.CNCJobAdvOptPrefGroupUI', 'CNCJobAdvOptPrefGroupUI', ([], {'decimals': 'self.decimals'}), '(decimals=self.decimals)\n', (945, 969), False, 'from appGUI.preferences.cncjob.CNCJobAdvOptPrefGroupUI import CNCJobAdvOptPrefGroupUI\n'), ((1061, 1108), 'appGUI.preferences.cncjob.CNCJobEditorPrefGroupUI.CNCJobEditorPrefGroupUI', 'CNCJobEditorPrefGroupUI', ([], {'decimals': 'self.decimals'}), '(decimals=self.decimals)\n', (1084, 1108), False, 'from appGUI.preferences.cncjob.CNCJobEditorPrefGroupUI import CNCJobEditorPrefGroupUI\n'), ((1179, 1202), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (1200, 1202), False, 'from PyQt5 import QtWidgets\n')]
|
from typing import List
import torch
from beamforming.mvdr import PSD, MVDR
from parameterized import parameterized, param
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import (
TestBaseMixin,
get_whitenoise,
get_spectrogram,
skipIfRocm,
)
class AutogradTestMixin(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(
dtype=torch.cdouble if i.is_complex() else torch.double,
device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
def test_psd(self):
transform = PSD()
waveform = get_whitenoise(sample_rate=8000, duration=0.05, n_channels=2)
spectrogram = get_spectrogram(waveform, n_fft=400)
self.assert_grad(transform, [spectrogram])
@parameterized.expand([
[True],
[False],
])
def test_psd_with_mask(self, multi_mask):
transform = PSD(multi_mask=multi_mask)
waveform = get_whitenoise(sample_rate=8000, duration=0.05, n_channels=2)
spectrogram = get_spectrogram(waveform, n_fft=400)
if multi_mask:
mask = torch.rand(spectrogram.shape[-3:])
else:
mask = torch.rand(spectrogram.shape[-2:])
self.assert_grad(transform, [spectrogram, mask])
@parameterized.expand([
param(solution="ref_channel"),
param(solution="stv_power"),
# evd will fail since the eigenvalues are not distinct
# param(solution="stv_evd"),
])
@skipIfRocm
def test_mvdr(self, solution):
transform = MVDR(solution=solution)
waveform = get_whitenoise(sample_rate=8000, duration=0.05, n_channels=2)
spectrogram = get_spectrogram(waveform, n_fft=400)
mask = torch.rand(spectrogram.shape[-2:])
self.assert_grad(transform, [spectrogram, mask])
|
[
"torch.autograd.gradcheck",
"torchaudio_unittest.common_utils.get_whitenoise",
"beamforming.mvdr.MVDR",
"parameterized.param",
"parameterized.parameterized.expand",
"beamforming.mvdr.PSD",
"torch.autograd.gradgradcheck",
"torchaudio_unittest.common_utils.get_spectrogram",
"torch.rand",
"torch.is_tensor"
] |
[((1434, 1473), 'parameterized.parameterized.expand', 'parameterized.expand', (['[[True], [False]]'], {}), '([[True], [False]])\n', (1454, 1473), False, 'from parameterized import parameterized, param\n'), ((1084, 1113), 'torch.autograd.gradcheck', 'gradcheck', (['transform', 'inputs_'], {}), '(transform, inputs_)\n', (1093, 1113), False, 'from torch.autograd import gradcheck, gradgradcheck\n'), ((1129, 1185), 'torch.autograd.gradgradcheck', 'gradgradcheck', (['transform', 'inputs_'], {'nondet_tol': 'nondet_tol'}), '(transform, inputs_, nondet_tol=nondet_tol)\n', (1142, 1185), False, 'from torch.autograd import gradcheck, gradgradcheck\n'), ((1231, 1236), 'beamforming.mvdr.PSD', 'PSD', ([], {}), '()\n', (1234, 1236), False, 'from beamforming.mvdr import PSD, MVDR\n'), ((1256, 1317), 'torchaudio_unittest.common_utils.get_whitenoise', 'get_whitenoise', ([], {'sample_rate': '(8000)', 'duration': '(0.05)', 'n_channels': '(2)'}), '(sample_rate=8000, duration=0.05, n_channels=2)\n', (1270, 1317), False, 'from torchaudio_unittest.common_utils import TestBaseMixin, get_whitenoise, get_spectrogram, skipIfRocm\n'), ((1340, 1376), 'torchaudio_unittest.common_utils.get_spectrogram', 'get_spectrogram', (['waveform'], {'n_fft': '(400)'}), '(waveform, n_fft=400)\n', (1355, 1376), False, 'from torchaudio_unittest.common_utils import TestBaseMixin, get_whitenoise, get_spectrogram, skipIfRocm\n'), ((1563, 1589), 'beamforming.mvdr.PSD', 'PSD', ([], {'multi_mask': 'multi_mask'}), '(multi_mask=multi_mask)\n', (1566, 1589), False, 'from beamforming.mvdr import PSD, MVDR\n'), ((1609, 1670), 'torchaudio_unittest.common_utils.get_whitenoise', 'get_whitenoise', ([], {'sample_rate': '(8000)', 'duration': '(0.05)', 'n_channels': '(2)'}), '(sample_rate=8000, duration=0.05, n_channels=2)\n', (1623, 1670), False, 'from torchaudio_unittest.common_utils import TestBaseMixin, get_whitenoise, get_spectrogram, skipIfRocm\n'), ((1693, 1729), 'torchaudio_unittest.common_utils.get_spectrogram', 'get_spectrogram', (['waveform'], {'n_fft': '(400)'}), '(waveform, n_fft=400)\n', (1708, 1729), False, 'from torchaudio_unittest.common_utils import TestBaseMixin, get_whitenoise, get_spectrogram, skipIfRocm\n'), ((2216, 2239), 'beamforming.mvdr.MVDR', 'MVDR', ([], {'solution': 'solution'}), '(solution=solution)\n', (2220, 2239), False, 'from beamforming.mvdr import PSD, MVDR\n'), ((2259, 2320), 'torchaudio_unittest.common_utils.get_whitenoise', 'get_whitenoise', ([], {'sample_rate': '(8000)', 'duration': '(0.05)', 'n_channels': '(2)'}), '(sample_rate=8000, duration=0.05, n_channels=2)\n', (2273, 2320), False, 'from torchaudio_unittest.common_utils import TestBaseMixin, get_whitenoise, get_spectrogram, skipIfRocm\n'), ((2343, 2379), 'torchaudio_unittest.common_utils.get_spectrogram', 'get_spectrogram', (['waveform'], {'n_fft': '(400)'}), '(waveform, n_fft=400)\n', (2358, 2379), False, 'from torchaudio_unittest.common_utils import TestBaseMixin, get_whitenoise, get_spectrogram, skipIfRocm\n'), ((2395, 2429), 'torch.rand', 'torch.rand', (['spectrogram.shape[-2:]'], {}), '(spectrogram.shape[-2:])\n', (2405, 2429), False, 'import torch\n'), ((837, 855), 'torch.is_tensor', 'torch.is_tensor', (['i'], {}), '(i)\n', (852, 855), False, 'import torch\n'), ((1772, 1806), 'torch.rand', 'torch.rand', (['spectrogram.shape[-3:]'], {}), '(spectrogram.shape[-3:])\n', (1782, 1806), False, 'import torch\n'), ((1840, 1874), 'torch.rand', 'torch.rand', (['spectrogram.shape[-2:]'], {}), '(spectrogram.shape[-2:])\n', (1850, 1874), False, 'import torch\n'), ((1970, 1999), 'parameterized.param', 'param', ([], {'solution': '"""ref_channel"""'}), "(solution='ref_channel')\n", (1975, 1999), False, 'from parameterized import parameterized, param\n'), ((2009, 2036), 'parameterized.param', 'param', ([], {'solution': '"""stv_power"""'}), "(solution='stv_power')\n", (2014, 2036), False, 'from parameterized import parameterized, param\n')]
|
import numpy as np
from deepspeaker.audio_ds import read_mfcc
from deepspeaker.batcher import sample_from_mfcc
from deepspeaker.constants import SAMPLE_RATE, NUM_FRAMES, WIN_LENGTH
from deepspeaker.conv_models import DeepSpeakerModel
import tensorflow as tf
def build_model(ckpt_path):
model = DeepSpeakerModel()
model.m.load_weights(ckpt_path, by_name=True)
return model
def predict_embedding(model, audio, sr=SAMPLE_RATE, win_length=WIN_LENGTH, cuda=True):
mfcc = sample_from_mfcc(read_mfcc(audio, sr, win_length), NUM_FRAMES)
# Call the model to get the embeddings of shape (1, 512) for each file.
gpus = tf.config.experimental.list_physical_devices('GPU') if cuda else 0
if gpus:
try:
tf.config.experimental.set_visible_devices(gpus[0], 'GPU')
except RuntimeError as e:
print(e)
with tf.device('/device:GPU:0'):
embedding = model.m.predict(np.expand_dims(mfcc, axis=0)) # Female
else:
with tf.device('device:cpu:0'):
embedding = model.m.predict(np.expand_dims(mfcc, axis=0)) # Female
return embedding
|
[
"deepspeaker.audio_ds.read_mfcc",
"tensorflow.config.experimental.set_visible_devices",
"tensorflow.device",
"numpy.expand_dims",
"deepspeaker.conv_models.DeepSpeakerModel",
"tensorflow.config.experimental.list_physical_devices"
] |
[((299, 317), 'deepspeaker.conv_models.DeepSpeakerModel', 'DeepSpeakerModel', ([], {}), '()\n', (315, 317), False, 'from deepspeaker.conv_models import DeepSpeakerModel\n'), ((501, 533), 'deepspeaker.audio_ds.read_mfcc', 'read_mfcc', (['audio', 'sr', 'win_length'], {}), '(audio, sr, win_length)\n', (510, 533), False, 'from deepspeaker.audio_ds import read_mfcc\n'), ((634, 685), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (678, 685), True, 'import tensorflow as tf\n'), ((739, 797), 'tensorflow.config.experimental.set_visible_devices', 'tf.config.experimental.set_visible_devices', (['gpus[0]', '"""GPU"""'], {}), "(gpus[0], 'GPU')\n", (781, 797), True, 'import tensorflow as tf\n'), ((866, 892), 'tensorflow.device', 'tf.device', (['"""/device:GPU:0"""'], {}), "('/device:GPU:0')\n", (875, 892), True, 'import tensorflow as tf\n'), ((996, 1021), 'tensorflow.device', 'tf.device', (['"""device:cpu:0"""'], {}), "('device:cpu:0')\n", (1005, 1021), True, 'import tensorflow as tf\n'), ((934, 962), 'numpy.expand_dims', 'np.expand_dims', (['mfcc'], {'axis': '(0)'}), '(mfcc, axis=0)\n', (948, 962), True, 'import numpy as np\n'), ((1063, 1091), 'numpy.expand_dims', 'np.expand_dims', (['mfcc'], {'axis': '(0)'}), '(mfcc, axis=0)\n', (1077, 1091), True, 'import numpy as np\n')]
|
import numpy as np
import scipy.stats as st
import cv2
import time
import os
import glob
def gauss_kernel(size=21, sigma=3, inchannels=3, outchannels=3):
interval = (2 * sigma + 1.0) / size
x = np.linspace(-sigma-interval/2,sigma+interval/2,size+1)
ker1d = np.diff(st.norm.cdf(x))
kernel_raw = np.sqrt(np.outer(ker1d, ker1d))
kernel = kernel_raw / kernel_raw.sum()
out_filter = np.array(kernel, dtype=np.float32)
out_filter = out_filter.reshape((1, 1, size, size))
out_filter = np.tile(out_filter, [outchannels, inchannels, 1, 1])
return out_filter
def np_free_form_mask(maxVertex, maxLength, maxBrushWidth, maxAngle, h, w):
mask = np.zeros((h, w, 1), np.float32)
numVertex = np.random.randint(maxVertex + 1)
startY = np.random.randint(h)
startX = np.random.randint(w)
brushWidth = 0
for i in range(numVertex):
angle = np.random.randint(maxAngle + 1)
angle = angle / 360.0 * 2 * np.pi
if i % 2 == 0:
angle = 2 * np.pi - angle
length = np.random.randint(maxLength + 1)
brushWidth = np.random.randint(10, maxBrushWidth + 1) // 2 * 2
nextY = startY + length * np.cos(angle)
nextX = startX + length * np.sin(angle)
nextY = np.maximum(np.minimum(nextY, h - 1), 0).astype(np.int)
nextX = np.maximum(np.minimum(nextX, w - 1), 0).astype(np.int)
cv2.line(mask, (startY, startX), (nextY, nextX), 1, brushWidth)
cv2.circle(mask, (startY, startX), brushWidth // 2, 2)
startY, startX = nextY, nextX
cv2.circle(mask, (startY, startX), brushWidth // 2, 2)
return mask
def generate_rect_mask(im_size, mask_size, margin=8, rand_mask=True):
mask = np.zeros((im_size[0], im_size[1])).astype(np.float32)
if rand_mask:
sz0, sz1 = mask_size[0], mask_size[1]
of0 = np.random.randint(margin, im_size[0] - sz0 - margin)
of1 = np.random.randint(margin, im_size[1] - sz1 - margin)
else:
sz0, sz1 = mask_size[0], mask_size[1]
of0 = (im_size[0] - sz0) // 2
of1 = (im_size[1] - sz1) // 2
mask[of0:of0+sz0, of1:of1+sz1] = 1
mask = np.expand_dims(mask, axis=0)
mask = np.expand_dims(mask, axis=0)
rect = np.array([[of0, sz0, of1, sz1]], dtype=int)
return mask, rect
def generate_stroke_mask(im_size, parts=10, maxVertex=20, maxLength=100, maxBrushWidth=24, maxAngle=360):
mask = np.zeros((im_size[0], im_size[1], 1), dtype=np.float32)
for i in range(parts):
mask = mask + np_free_form_mask(maxVertex, maxLength, maxBrushWidth, maxAngle, im_size[0], im_size[1])
mask = np.minimum(mask, 1.0)
mask = np.transpose(mask, [2, 0, 1])
mask = np.expand_dims(mask, 0)
return mask
def generate_mask(type, im_size, mask_size):
if type == 'rect':
return generate_rect_mask(im_size, mask_size)
else:
return generate_stroke_mask(im_size), None
def getLatest(folder_path):
files = glob.glob(folder_path)
file_times = list(map(lambda x: time.ctime(os.path.getctime(x)), files))
return files[sorted(range(len(file_times)), key=lambda x: file_times[x])[-1]]
def get_file_mask(filepath):
mask = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
mask = np.minimum(mask, 1.0)
mask=np.expand_dims(mask, axis=0)
mask=np.expand_dims(mask, axis=0)
return mask
|
[
"cv2.line",
"cv2.circle",
"numpy.minimum",
"numpy.outer",
"numpy.zeros",
"numpy.expand_dims",
"numpy.transpose",
"os.path.getctime",
"scipy.stats.norm.cdf",
"cv2.imread",
"numpy.random.randint",
"numpy.array",
"numpy.tile",
"numpy.linspace",
"glob.glob",
"numpy.cos",
"numpy.sin"
] |
[((203, 269), 'numpy.linspace', 'np.linspace', (['(-sigma - interval / 2)', '(sigma + interval / 2)', '(size + 1)'], {}), '(-sigma - interval / 2, sigma + interval / 2, size + 1)\n', (214, 269), True, 'import numpy as np\n'), ((403, 437), 'numpy.array', 'np.array', (['kernel'], {'dtype': 'np.float32'}), '(kernel, dtype=np.float32)\n', (411, 437), True, 'import numpy as np\n'), ((511, 563), 'numpy.tile', 'np.tile', (['out_filter', '[outchannels, inchannels, 1, 1]'], {}), '(out_filter, [outchannels, inchannels, 1, 1])\n', (518, 563), True, 'import numpy as np\n'), ((675, 706), 'numpy.zeros', 'np.zeros', (['(h, w, 1)', 'np.float32'], {}), '((h, w, 1), np.float32)\n', (683, 706), True, 'import numpy as np\n'), ((723, 755), 'numpy.random.randint', 'np.random.randint', (['(maxVertex + 1)'], {}), '(maxVertex + 1)\n', (740, 755), True, 'import numpy as np\n'), ((769, 789), 'numpy.random.randint', 'np.random.randint', (['h'], {}), '(h)\n', (786, 789), True, 'import numpy as np\n'), ((803, 823), 'numpy.random.randint', 'np.random.randint', (['w'], {}), '(w)\n', (820, 823), True, 'import numpy as np\n'), ((1564, 1618), 'cv2.circle', 'cv2.circle', (['mask', '(startY, startX)', '(brushWidth // 2)', '(2)'], {}), '(mask, (startY, startX), brushWidth // 2, 2)\n', (1574, 1618), False, 'import cv2\n'), ((2152, 2180), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (2166, 2180), True, 'import numpy as np\n'), ((2192, 2220), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (2206, 2220), True, 'import numpy as np\n'), ((2232, 2275), 'numpy.array', 'np.array', (['[[of0, sz0, of1, sz1]]'], {'dtype': 'int'}), '([[of0, sz0, of1, sz1]], dtype=int)\n', (2240, 2275), True, 'import numpy as np\n'), ((2417, 2472), 'numpy.zeros', 'np.zeros', (['(im_size[0], im_size[1], 1)'], {'dtype': 'np.float32'}), '((im_size[0], im_size[1], 1), dtype=np.float32)\n', (2425, 2472), True, 'import numpy as np\n'), ((2622, 2643), 'numpy.minimum', 'np.minimum', (['mask', '(1.0)'], {}), '(mask, 1.0)\n', (2632, 2643), True, 'import numpy as np\n'), ((2655, 2684), 'numpy.transpose', 'np.transpose', (['mask', '[2, 0, 1]'], {}), '(mask, [2, 0, 1])\n', (2667, 2684), True, 'import numpy as np\n'), ((2696, 2719), 'numpy.expand_dims', 'np.expand_dims', (['mask', '(0)'], {}), '(mask, 0)\n', (2710, 2719), True, 'import numpy as np\n'), ((2963, 2985), 'glob.glob', 'glob.glob', (['folder_path'], {}), '(folder_path)\n', (2972, 2985), False, 'import glob\n'), ((3186, 3228), 'cv2.imread', 'cv2.imread', (['filepath', 'cv2.IMREAD_GRAYSCALE'], {}), '(filepath, cv2.IMREAD_GRAYSCALE)\n', (3196, 3228), False, 'import cv2\n'), ((3240, 3261), 'numpy.minimum', 'np.minimum', (['mask', '(1.0)'], {}), '(mask, 1.0)\n', (3250, 3261), True, 'import numpy as np\n'), ((3271, 3299), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (3285, 3299), True, 'import numpy as np\n'), ((3309, 3337), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (3323, 3337), True, 'import numpy as np\n'), ((278, 292), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['x'], {}), '(x)\n', (289, 292), True, 'import scipy.stats as st\n'), ((319, 341), 'numpy.outer', 'np.outer', (['ker1d', 'ker1d'], {}), '(ker1d, ker1d)\n', (327, 341), True, 'import numpy as np\n'), ((890, 921), 'numpy.random.randint', 'np.random.randint', (['(maxAngle + 1)'], {}), '(maxAngle + 1)\n', (907, 921), True, 'import numpy as np\n'), ((1042, 1074), 'numpy.random.randint', 'np.random.randint', (['(maxLength + 1)'], {}), '(maxLength + 1)\n', (1059, 1074), True, 'import numpy as np\n'), ((1394, 1457), 'cv2.line', 'cv2.line', (['mask', '(startY, startX)', '(nextY, nextX)', '(1)', 'brushWidth'], {}), '(mask, (startY, startX), (nextY, nextX), 1, brushWidth)\n', (1402, 1457), False, 'import cv2\n'), ((1466, 1520), 'cv2.circle', 'cv2.circle', (['mask', '(startY, startX)', '(brushWidth // 2)', '(2)'], {}), '(mask, (startY, startX), brushWidth // 2, 2)\n', (1476, 1520), False, 'import cv2\n'), ((1850, 1902), 'numpy.random.randint', 'np.random.randint', (['margin', '(im_size[0] - sz0 - margin)'], {}), '(margin, im_size[0] - sz0 - margin)\n', (1867, 1902), True, 'import numpy as np\n'), ((1917, 1969), 'numpy.random.randint', 'np.random.randint', (['margin', '(im_size[1] - sz1 - margin)'], {}), '(margin, im_size[1] - sz1 - margin)\n', (1934, 1969), True, 'import numpy as np\n'), ((1718, 1752), 'numpy.zeros', 'np.zeros', (['(im_size[0], im_size[1])'], {}), '((im_size[0], im_size[1]))\n', (1726, 1752), True, 'import numpy as np\n'), ((1096, 1136), 'numpy.random.randint', 'np.random.randint', (['(10)', '(maxBrushWidth + 1)'], {}), '(10, maxBrushWidth + 1)\n', (1113, 1136), True, 'import numpy as np\n'), ((1180, 1193), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1186, 1193), True, 'import numpy as np\n'), ((1228, 1241), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1234, 1241), True, 'import numpy as np\n'), ((1270, 1294), 'numpy.minimum', 'np.minimum', (['nextY', '(h - 1)'], {}), '(nextY, h - 1)\n', (1280, 1294), True, 'import numpy as np\n'), ((1341, 1365), 'numpy.minimum', 'np.minimum', (['nextX', '(w - 1)'], {}), '(nextX, w - 1)\n', (1351, 1365), True, 'import numpy as np\n'), ((3033, 3052), 'os.path.getctime', 'os.path.getctime', (['x'], {}), '(x)\n', (3049, 3052), False, 'import os\n')]
|
"""Support for SimpliSafe locks."""
import logging
from simplipy.errors import SimplipyError
from simplipy.lock import LockStates
from simplipy.websocket import EVENT_LOCK_LOCKED, EVENT_LOCK_UNLOCKED
from homeassistant.components.lock import LockDevice
from homeassistant.core import callback
from . import SimpliSafeEntity
from .const import DATA_CLIENT, DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTR_LOCK_LOW_BATTERY = "lock_low_battery"
ATTR_JAMMED = "jammed"
ATTR_PIN_PAD_LOW_BATTERY = "pin_pad_low_battery"
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up SimpliSafe locks based on a config entry."""
simplisafe = hass.data[DOMAIN][DATA_CLIENT][entry.entry_id]
async_add_entities(
[
SimpliSafeLock(simplisafe, system, lock)
for system in simplisafe.systems.values()
for lock in system.locks.values()
]
)
class SimpliSafeLock(SimpliSafeEntity, LockDevice):
"""Define a SimpliSafe lock."""
def __init__(self, simplisafe, system, lock):
"""Initialize."""
super().__init__(simplisafe, system, lock.name, serial=lock.serial)
self._is_locked = False
self._lock = lock
for event_type in (EVENT_LOCK_LOCKED, EVENT_LOCK_UNLOCKED):
self.websocket_events_to_listen_for.append(event_type)
@property
def is_locked(self):
"""Return true if the lock is locked."""
return self._is_locked
async def async_lock(self, **kwargs):
"""Lock the lock."""
try:
await self._lock.lock()
except SimplipyError as err:
_LOGGER.error('Error while locking "%s": %s', self._lock.name, err)
return
self._is_locked = True
async def async_unlock(self, **kwargs):
"""Unlock the lock."""
try:
await self._lock.unlock()
except SimplipyError as err:
_LOGGER.error('Error while unlocking "%s": %s', self._lock.name, err)
return
self._is_locked = False
@callback
def async_update_from_rest_api(self):
"""Update the entity with the provided REST API data."""
self._attrs.update(
{
ATTR_LOCK_LOW_BATTERY: self._lock.lock_low_battery,
ATTR_JAMMED: self._lock.state == LockStates.jammed,
ATTR_PIN_PAD_LOW_BATTERY: self._lock.pin_pad_low_battery,
}
)
@callback
def async_update_from_websocket_event(self, event):
"""Update the entity with the provided websocket event data."""
if event.event_type == EVENT_LOCK_LOCKED:
self._is_locked = True
else:
self._is_locked = False
|
[
"logging.getLogger"
] |
[((377, 404), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (394, 404), False, 'import logging\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2020-11-30 12:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('time_based', '0038_timebasedactivity_preparation'),
]
operations = [
migrations.RenameField(
model_name='timecontribution',
old_name='end',
new_name='old_end',
),
migrations.RenameField(
model_name='timecontribution',
old_name='start',
new_name='old_start',
),
migrations.AlterField(
model_name='timebasedactivity',
name='preparation',
field=models.DurationField(blank=True, null=True, verbose_name='Preparation time'),
),
]
|
[
"django.db.models.DurationField",
"django.db.migrations.RenameField"
] |
[((315, 408), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""timecontribution"""', 'old_name': '"""end"""', 'new_name': '"""old_end"""'}), "(model_name='timecontribution', old_name='end',\n new_name='old_end')\n", (337, 408), False, 'from django.db import migrations, models\n'), ((461, 558), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""timecontribution"""', 'old_name': '"""start"""', 'new_name': '"""old_start"""'}), "(model_name='timecontribution', old_name='start',\n new_name='old_start')\n", (483, 558), False, 'from django.db import migrations, models\n'), ((728, 804), 'django.db.models.DurationField', 'models.DurationField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Preparation time"""'}), "(blank=True, null=True, verbose_name='Preparation time')\n", (748, 804), False, 'from django.db import migrations, models\n')]
|
# GenerateSpectraPages.py
import argparse
import os
import time
from astropy.coordinates import SkyCoord
from astropy.io import ascii, votable
from astropy.io.votable import parse_single_table, from_table, writeto
from astropy.table import Column, Table
import astropy.units as u
import numpy as np
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Produce preview pages for a set of spectra")
parser.add_argument("-s", "--sbid", help="The id of the ASKAP scheduling block",
type=int, required=True)
parser.add_argument("-g", "--good", help="The sigma threshold for spectra to be included in the detections.html page",
type=float, default=3.0)
parser.add_argument("-b", "--best", help="The sigma threshold for spectra to be included in the best.html page",
type=float, default=5.0)
parser.add_argument("-p", "--parent", help="The parent folder for the processing, will default to sbnnn/ where nnn is the sbid.",
required=False)
args = parser.parse_args()
return args
def output_header(f, title):
f.write('<!doctype html>\n<html lang="en">\n<head>\n<title>{}</title>'.format(title))
with open('style.html') as style:
f.write(style.read())
f.write('\n</head>\n<body>')
f.write('\n<div class="container-fluid">')
f.write('\n<h1 align="middle">{}</h1>'.format(title))
return
def output_location_plots(f, source_map='figures/source_loc.png'):
f.write('\n<div class="row px-3" id="maps">')
f.write('\n<div class="col-md-auto"><h2 class="d-inline font-weight-light text-center text-lg-left mt-4 mb-0">Location</hs></div>')
f.write('\n<div class="col-md-auto">')
f.write('\nField Location')
f.write('\n<a href="figures/field_loc.png" class="d-block mb-4 h-100" data-lightbox="maps">')
f.write('\n<img class="img-fluid img-thumbnail" style="height: 180px" src="figures/field_loc.png" alt="Map of the location of the field.">')
f.write('\n</a>\n</div>')
f.write('\n<div class="col-md-auto">')
has_mw_loc_plot = os.path.exists(os.path.dirname(f.name)+'/figures/source_loc_mw.png')
f.write('\n{}Absorption Locations'.format('Magellanic ' if has_mw_loc_plot else ''))
f.write('\n<a href="{}" class="d-block mb-4 h-100" data-lightbox="maps">'.format(source_map))
f.write('\n<img class="img-fluid img-thumbnail" style="height: 180px" src="{}" alt="Map of the location of the sources.">'.format(source_map))
f.write('\n</a>\n</div>')
if has_mw_loc_plot:
f.write('\n<div class="col-md-auto">')
f.write('\nMilky Way Absorption Locations')
f.write('\n<a href="{}" class="d-block mb-4 h-100" data-lightbox="maps">'.format('figures/source_loc_mw.png'))
f.write('\n<img class="img-fluid img-thumbnail" style="height: 180px" src="{}" alt="Map of the location of the Milky Way sources.">'.format('figures/source_loc_mw.png'))
f.write('\n</a>\n</div>')
print(os.path.dirname(f.name)+'/figures/long_vel.png')
if os.path.exists(os.path.dirname(f.name)+'/figures/long_vel.png'):
f.write('\n<div class="col-md-auto">')
f.write('\n<a href="figures/long_vel.png" class="d-block mb-4 h-100" data-lightbox="maps">')
f.write('\n<img class="img-fluid img-thumbnail" style="height: 180px" src="figures/long_vel.png" alt="Longitude-velocity plot of the spectra.">')
f.write('\n</a>\n</div>')
f.write('\n</div>')
def output_block_title(f, rating, first, count):
if not first:
f.write('\n\n</div><br/>\n')
spec = 'spectrum' if count == 1 else 'spectra'
title = '{} Rating {} {}'.format(count, rating, spec) if rating else '{} Missed {} (with closest source)'.format(count, spec)
f.write('\n<div>')
f.write('\n<div class="col-9 d-inline"><h2 class="d-inline font-weight-light text-center text-lg-left mt-4 mb-0">{}</h2></div>'.format(title))
f.write('\n<div class="col-3 pull-right d-inline"><a class="btn btn-primary" data-toggle="collapse" href="#spectra{0}" role="button" aria-expanded="false" aria-controls="spectra{0}" style="font-size: x-small;">Hide/Show</a></div>'.format(rating))
f.write('\n</div>')
f.write('\n<div class="row text-center text-lg-left collapse show" id="spectra{}">'.format(rating))
def output_img(f, comp_name, rating, id, comment, combined=False):
zoom_file_pattern = 'figures/{0}_combined.png' if combined else 'figures/{0}_spec_zoom.png'
zoom_filename = zoom_file_pattern.format(comp_name)
file_pattern = 'figures/{0}_combined.png' if combined else 'figures/{0}_spec.png'
filename = file_pattern.format(comp_name)
f.write('\n<div class="col-lg-3 col-md-4 col-6 px-2">')
f.write('<figure class="figure d-block">')
f.write('\n<a href="{0}" class="mb-4" data-lightbox="rating{1}">'.format(filename, rating))
f.write('\n<img class="img-fluid img-thumbnail" ')
f.write('src="{0}" alt="Zoomed preview of spectrum at {1}">'.format(zoom_filename, comp_name))
f.write('\n</a>')
f.write('<figcaption class="figure-caption text-right">Source #{} {} {}</figcaption>'.format(id, comp_name, comment))
f.write('\n</figure></div>')
return
def output_non_zoom_img(f, comp_name, rating, id):
file_pattern = 'figures/{0}_spec.png'
filename = file_pattern.format(comp_name)
f.write('\n<div class="col-lg-3 col-md-4 col-6 px-2">')
f.write('<figure class="figure d-block">')
f.write('\n<a href="{0}" class="d-block mb-4 h-100" data-lightbox="rating{1}">'.format(filename, rating))
f.write('\n<img class="img-fluid img-thumbnail" ')
f.write('src="{0}" alt="Preview of spectrum at {0}">'.format(filename))
f.write('\n</a>')
f.write('<figcaption class="figure-caption text-right">Source #{} {}</figcaption>'.format(id, comp_name))
f.write('\n</figure></div>')
return
def output_footer(f):
f.write('\n\n</div>\n</div>\n</body>\n</html>')
return
def output_j19_img(f, gaskap_name, j19_name, rating, sep=None):
name_text = gaskap_name
if sep:
name_text += ' at {:.1f} arcsec'.format(sep)
f.write('\n<div class="col-4">')
f.write('\n<a href="spectra/{0}_spec.png" class="d-block mb-4 h-100" data-lightbox="rating{1}">'.format(gaskap_name, rating))
f.write('\n<img class="img-fluid img-thumbnail" ')
f.write('src="spectra/{0}_spec_zoom.png" alt="Zoomed preview of spectrum at {0}">'.format(gaskap_name))
f.write('\n{0}</a>\n</div>'.format(name_text))
f.write('\n<div class="col-8">')
j19_filename = '../jameson2019figset2/{}_lr.jpg'.format(j19_name)
f.write('\n<a href="{0}" class="d-block mb-4 h-100" data-lightbox="rating{1}">'.format(j19_filename, rating))
f.write('\n<img class="img-fluid img-thumbnail" ')
f.write('src="{0}" alt="Zoomed preview of spectrum at {0}">'.format(j19_filename))
f.write('\n</a>\n</div>')
return
def output_spectra(sbid, table, title, filename, threshold=None, has_other_abs=False, has_mw_abs=False,
verbose=False, source_map=None, max_noise=None):
print (title, filename)
with open(filename, 'w') as f:
output_header(f, title)
if source_map:
output_location_plots(f, source_map=source_map)
else:
output_location_plots(f)
for rating in 'ABCDEF':
targets = table[table['rating']==rating]
if max_noise:
targets = targets[targets['sd_cont'] < max_noise]
if has_other_abs:
targets = targets[targets['has_other_abs'] == 1]
elif has_mw_abs:
targets = targets[targets['has_mw_abs'] == 1]
elif threshold:
targets = targets[(1-targets['min_opacity'])/targets['sd_cont'] > threshold]
sort_order = targets.argsort(['comp_name'])
sorted_targets = targets[sort_order]
comp_names = sorted_targets['comp_name']
ids = sorted_targets['id']
maj_axes = sorted_targets['semi_maj_axis']*2
min_axes = sorted_targets['semi_min_axis']*2
fluxes_int = sorted_targets['flux_int']
print('Rating {} has {} spectra'.format(rating, len(comp_names)))
if verbose:
print (comp_names)
output_block_title(f, rating, rating=='A', len(comp_names))
for idx, name in enumerate(comp_names):
comment = '{:.0f}x{:.0f}" {:.0f} mJy'.format(maj_axes[idx], min_axes[idx], fluxes_int[idx])
output_img(f, name, rating, ids[idx], comment, combined=True)
output_footer(f)
def output_listed_spectra(sbid, table, title, filename, comp_names_list, verbose=False, source_map=None, zoomed=True):
print (title, filename)
with open(filename, 'w') as f:
output_header(f, title)
if source_map:
output_location_plots(f, source_map=source_map)
else:
output_location_plots(f)
for rating in 'ABCDEF':
targets = table[table['rating']==rating]
targets = targets[np.in1d(targets['comp_name'], comp_names_list)]
sort_order = targets.argsort(['comp_name'])
sorted_targets = targets[sort_order]
comp_names = sorted_targets['comp_name']
ids = sorted_targets['id']
maj_axes = sorted_targets['semi_maj_axis']*2
min_axes = sorted_targets['semi_min_axis']*2
fluxes_int = sorted_targets['flux_int']
print('Rating {} has {} spectra'.format(rating, len(comp_names)))
if verbose:
print (comp_names)
output_block_title(f, rating, rating=='A', len(comp_names))
for idx, name in enumerate(comp_names):
comment = '{:.0f}x{:.0f}" {:.0f} mJy'.format(maj_axes[idx], min_axes[idx], fluxes_int[idx])
if zoomed:
output_img(f, name, rating, ids[idx], comment, combined=True)
else:
output_non_zoom_img(f, name, rating, ids[idx])
output_footer(f)
def output_diff_sigma_spectra(sbid, table, title, filename, verbose=False, source_map=None, zoomed=True):
print (title, filename)
with open(filename, 'w') as f:
output_header(f, title)
if source_map:
output_location_plots(f, source_map=source_map)
else:
output_location_plots(f)
sigma_name_map = {2.8: ['J005518-714450', 'J010401-720206', 'J005116-734000', 'J010431-720726', 'J011157-734129', 'J010532-721331', 'J002620-743741'],
2.7:['J011332-740758', 'J003037-742903', 'J013218-715348', 'J005448-725353', 'J010556-714607', 'J012924-733153', 'J003208-735038', 'J012037-703843', 'J004306-732828'],
2.6:['J011134-711414', 'J005715-704046', 'J003936-742018', 'J002411-735717', 'J012306-695600', 'J005014-730326', 'J002222-742825', 'J010932-713453'],
2.5:['J014924-730231', 'J012945-701803', 'J005141-725545', 'J002826-703501', 'J002034-705526'],
3: ['J010532-721331', 'J005448-725353', 'J010556-714607', 'J005715-704046']}
for k,v in sigma_name_map.items():
#v = sigma_name_map[k]
print (k, v)
comp_names_list = v
if k < 2.8:
f.write('\n</div>')
if k < 3:
f.write('\n<h2>Spectra included at 2+ channels of {} sigma cutoff</h2>'.format(k))
else:
f.write('\n</div>\n<h2>Spectra included at 3+ channels of 2.5 sigma cutoff</h2>')
first = True
# TODO: Switch to use source lists
for rating in 'ABCDEF':
targets = table[table['rating']==rating]
targets = targets[np.in1d(targets['comp_name'], comp_names_list)]
sort_order = targets.argsort(['comp_name'])
sorted_targets = targets[sort_order]
comp_names = sorted_targets['comp_name']
ids = sorted_targets['id']
maj_axes = sorted_targets['semi_maj_axis']*2
min_axes = sorted_targets['semi_min_axis']*2
fluxes_int = sorted_targets['flux_int']
print('Rating {} has {} spectra'.format(rating, len(comp_names)))
if len(comp_names) == 0:
continue
if verbose:
print (comp_names)
output_block_title(f, rating, first, len(comp_names))
first = False
for idx, name in enumerate(comp_names):
comment = '{:.0f}x{:.0f}" {:.0f} mJy'.format(maj_axes[idx], min_axes[idx], fluxes_int[idx])
if zoomed:
output_img(f, name, rating, ids[idx], comment, combined=True)
else:
output_non_zoom_img(f, name, rating, ids[idx])
output_footer(f)
def find_j19_matches(gaskap_table, no_match_cat=None):
print ('\nCross-matching with Jameson et al 2019', no_match_cat)
j19_table = ascii.read('jameson2019.csv', format='csv')
col_index = Column(name='index', data=1+np.arange(len(j19_table)))
j19_table.add_column(col_index)
coo_j19 = SkyCoord(j19_table['ra']*u.deg, j19_table['dec']*u.deg)
coo_gaskap = SkyCoord(gaskap_table['ra'], gaskap_table['dec'])
idx_j19, d2d_j19, d3d_j19 = coo_gaskap.match_to_catalog_sky(coo_j19)
matched = d2d_j19 <= 18.5*u.arcsec # This cutoff allows for the widest separation without adding duplicates
matched_j19_idx = idx_j19[matched]
un_matched_j19_idx = [i for i in np.arange(len(j19_table)) if i not in matched_j19_idx]
j19_unmatched = j19_table[un_matched_j19_idx]
print ("Found {} sources in Jameson et al 2019 not in GASKAP data.".format(len(j19_unmatched)))
coo_j19_unm = SkyCoord(j19_unmatched['ra']*u.deg, j19_unmatched['dec']*u.deg)
idx_gaskap, d2d_gaskap, d3d_gaskap = coo_j19_unm.match_to_catalog_sky(coo_gaskap)
close_gaskap_comp_names = gaskap_table[idx_gaskap]['comp_name']
col_closest = Column(name='closest_gaskap', data=close_gaskap_comp_names)
col_gaskap_ra = Column(name='gaskap_ra', data=gaskap_table[idx_gaskap]['ra'])
col_gaskap_dec = Column(name='gaskap_dec', data=gaskap_table[idx_gaskap]['dec'])
col_sep = Column(name='gaskap_sep', data=d2d_gaskap.to(u.arcsec))
j19_unmatched.add_columns([col_closest, col_gaskap_ra, col_gaskap_dec, col_sep])
if no_match_cat:
print (j19_unmatched)
j19_unm_vo_table = from_table(j19_unmatched)
writeto(j19_unm_vo_table, no_match_cat)
return j19_table, idx_j19, d2d_j19, matched, j19_unmatched
def output_j19_comparison(sbid, gaskap_table, j19_table, idx_j19, d2d_j19, j19_match, j19_unmatched, title, filename, match_cat=None):
print (title, filename)
gaskap_targets = gaskap_table[j19_match]
j19_targets = j19_table[idx_j19]
j19_targets = j19_targets[j19_match]
sort_order = gaskap_targets.argsort(['comp_name'])
#comp_names = sorted(targets['comp_name'])
gaskap_tgt_ordered = gaskap_targets[sort_order]
j19_tgt_ordered = j19_targets[sort_order]
with open(filename, 'w') as f:
output_header(f, title)
for rating in 'ABCDEF':
mask = gaskap_tgt_ordered['rating']==rating
subset = gaskap_tgt_ordered[mask]
j19_subset = j19_tgt_ordered[mask]
print('Rating {} has {} spectra'.format(rating, len(subset)))
output_block_title(f, rating, rating=='A', len(subset))
for idx, gaskap_src in enumerate(subset):
gaskap_name = gaskap_src['comp_name']
j19_name = j19_subset[idx]['Source']
output_j19_img(f, gaskap_name, j19_name, rating)
# Add a section for missed spectra
output_block_title(f, None, False, len(j19_unmatched))
for row in j19_unmatched:
gaskap_name = row['closest_gaskap']
j19_name = row['Source']
output_j19_img(f, gaskap_name, j19_name, rating, sep=row['gaskap_sep'])
output_footer(f)
if match_cat:
augmented_table = Table(gaskap_tgt_ordered)
close_j19_comp_names = j19_tgt_ordered['Source']
col_closest = Column(name='closest_j19', data=close_j19_comp_names)
col_gaskap_ra = Column(name='j19_ra', data=j19_tgt_ordered['ra']*u.deg)
col_gaskap_dec = Column(name='j19_dec', data=j19_tgt_ordered['dec']*u.deg)
sep_vals = d2d_j19[j19_match]
sep_vals_sorted = sep_vals[sort_order]
col_sep = Column(name='j19_sep', data=sep_vals_sorted.to(u.arcsec))
augmented_table.add_columns([col_closest, col_gaskap_ra, col_gaskap_dec, col_sep])
#print (augmented_table)
j19_match_vo_table = from_table(augmented_table)
writeto(j19_match_vo_table, match_cat)
def main():
args = parseargs()
start = time.time()
print("#### Started generating spectra pages for sbid {} at {} ####".format(args.sbid,
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start))))
parent_folder = 'sb{}/'.format(args.sbid)
if args.parent:
parent_folder = args.parent
if not os.path.exists(parent_folder):
print("Error: Folder {} does not exist.".format(parent_folder))
return 1
spectra_votable = votable.parse('{}/askap_spectra.vot'.format(parent_folder), pedantic=False)
spectra_table = spectra_votable.get_first_table().to_table()
output_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {}'.format(
args.sbid), '{}/all.html'.format(parent_folder))
output_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} with non MW absorption features'.format(
args.sbid, args.good), '{}/detections.html'.format(parent_folder), has_other_abs=True)
output_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} with {}σ candidate detections'.format(
args.sbid, args.best), '{}/best.html'.format(parent_folder), threshold=args.best)
output_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} with MW absorption features'.format(
args.sbid, args.best), '{}/mw_detections.html'.format(parent_folder), has_mw_abs=True)
max_noise=0.03
output_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} with less than {} noise level'.format(
args.sbid, max_noise), '{}/quiet.html'.format(parent_folder), max_noise=max_noise)
if args.sbid == 10944:
output_diff_sigma_spectra(args.sbid, spectra_table, 'Comparison of sigma cutoffs', '{}/sigmacomp.html'.format(parent_folder))
missed_sources = ['J005448-725353', 'J010532-721331', 'J005014-730326', 'J012924-733153', 'J005217-730157', 'J010556-714607', 'J005141-725545', 'J004306-732828', 'J010401-720206',
'J010359-720144', 'J010404-720145', 'J013032-731741', 'J003524-732223', 'J010919-725600', 'J013218-715348', 'J004718-723947', 'J010431-720726', 'J005116-734000', 'J003037-742903',
'J003037-742901', 'J012733-713639', 'J010932-713453', 'J003936-742018', 'J004808-741206', 'J002411-735717', 'J002143-741500']
output_listed_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} excluded by changed noise'.format(
args.sbid), '{}/excluded.html'.format(parent_folder), missed_sources)
wide_added = ['J012639-731502', 'J012639-731502', 'J005644-725200', 'J011408-732006', 'J005217-730157']
output_listed_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} added by using 3 channels with 2.3 sigma match'.format(
args.sbid), '{}/wide.html'.format(parent_folder), wide_added)
bad_noise = ['J003749-735128',
'J010932-713453',
'J013134-700042',
'J013742-733050',
'J014105-722748']
output_listed_spectra(args.sbid, spectra_table, 'Absorption spectra for SBID {} with poor noise estimates'.format(
args.sbid), '{}/bad_noise.html'.format(parent_folder), bad_noise)
if args.sbid in (8906, 10941, 10944):
j19_table, idx_j19, d2d_j19, j19_match, j19_unmatched = find_j19_matches(spectra_table, no_match_cat='{}/j19_not_matched.vot'.format(parent_folder))
output_j19_comparison(args.sbid, spectra_table, j19_table, idx_j19, d2d_j19, j19_match, j19_unmatched,
'Absorption spectra for SBID {} also in Jameson 19'.format(args.sbid), '{}/j19.html'.format(parent_folder), match_cat='{}/askap_spectra_in_j19.vot'.format(parent_folder))
non_j19_table = gaskap_targets = spectra_table[~j19_match]
print (len(non_j19_table))
output_spectra(args.sbid, non_j19_table, 'Absorption spectra for SBID {} not in J19 with absorption features'.format(
args.sbid), '{}/non_j19_detections.html'.format(parent_folder), has_other_abs=True, source_map='figures/source_loc_nonj19.png')
output_spectra(args.sbid, non_j19_table, 'Absorption spectra for SBID {} not in J19 with {}σ candidate detections'.format(
args.sbid, args.best), '{}/non_j19_best.html'.format(parent_folder), threshold=args.best, source_map='figures/source_loc_nonj19.png')
# Report
end = time.time()
print('#### Processing completed at %s ####' %
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)))
print('Reported %d spectra in %.02f s' %
(len(spectra_table), end - start))
return 0
if __name__ == '__main__':
exit(main())
|
[
"astropy.io.votable.writeto",
"astropy.io.ascii.read",
"argparse.ArgumentParser",
"astropy.table.Table",
"os.path.dirname",
"os.path.exists",
"time.time",
"astropy.io.votable.from_table",
"astropy.table.Column",
"astropy.coordinates.SkyCoord",
"time.localtime",
"numpy.in1d"
] |
[((437, 584), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'description': '"""Produce preview pages for a set of spectra"""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, description=\n 'Produce preview pages for a set of spectra')\n", (460, 584), False, 'import argparse\n'), ((13401, 13444), 'astropy.io.ascii.read', 'ascii.read', (['"""jameson2019.csv"""'], {'format': '"""csv"""'}), "('jameson2019.csv', format='csv')\n", (13411, 13444), False, 'from astropy.io import ascii, votable\n'), ((13567, 13626), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["(j19_table['ra'] * u.deg)", "(j19_table['dec'] * u.deg)"], {}), "(j19_table['ra'] * u.deg, j19_table['dec'] * u.deg)\n", (13575, 13626), False, 'from astropy.coordinates import SkyCoord\n'), ((13640, 13689), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["gaskap_table['ra']", "gaskap_table['dec']"], {}), "(gaskap_table['ra'], gaskap_table['dec'])\n", (13648, 13689), False, 'from astropy.coordinates import SkyCoord\n'), ((14176, 14243), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["(j19_unmatched['ra'] * u.deg)", "(j19_unmatched['dec'] * u.deg)"], {}), "(j19_unmatched['ra'] * u.deg, j19_unmatched['dec'] * u.deg)\n", (14184, 14243), False, 'from astropy.coordinates import SkyCoord\n'), ((14412, 14471), 'astropy.table.Column', 'Column', ([], {'name': '"""closest_gaskap"""', 'data': 'close_gaskap_comp_names'}), "(name='closest_gaskap', data=close_gaskap_comp_names)\n", (14418, 14471), False, 'from astropy.table import Column, Table\n'), ((14492, 14553), 'astropy.table.Column', 'Column', ([], {'name': '"""gaskap_ra"""', 'data': "gaskap_table[idx_gaskap]['ra']"}), "(name='gaskap_ra', data=gaskap_table[idx_gaskap]['ra'])\n", (14498, 14553), False, 'from astropy.table import Column, Table\n'), ((14575, 14638), 'astropy.table.Column', 'Column', ([], {'name': '"""gaskap_dec"""', 'data': "gaskap_table[idx_gaskap]['dec']"}), "(name='gaskap_dec', data=gaskap_table[idx_gaskap]['dec'])\n", (14581, 14638), False, 'from astropy.table import Column, Table\n'), ((17264, 17275), 'time.time', 'time.time', ([], {}), '()\n', (17273, 17275), False, 'import time\n'), ((21578, 21589), 'time.time', 'time.time', ([], {}), '()\n', (21587, 21589), False, 'import time\n'), ((14872, 14897), 'astropy.io.votable.from_table', 'from_table', (['j19_unmatched'], {}), '(j19_unmatched)\n', (14882, 14897), False, 'from astropy.io.votable import parse_single_table, from_table, writeto\n'), ((14906, 14945), 'astropy.io.votable.writeto', 'writeto', (['j19_unm_vo_table', 'no_match_cat'], {}), '(j19_unm_vo_table, no_match_cat)\n', (14913, 14945), False, 'from astropy.io.votable import parse_single_table, from_table, writeto\n'), ((16503, 16528), 'astropy.table.Table', 'Table', (['gaskap_tgt_ordered'], {}), '(gaskap_tgt_ordered)\n', (16508, 16528), False, 'from astropy.table import Column, Table\n'), ((16608, 16661), 'astropy.table.Column', 'Column', ([], {'name': '"""closest_j19"""', 'data': 'close_j19_comp_names'}), "(name='closest_j19', data=close_j19_comp_names)\n", (16614, 16661), False, 'from astropy.table import Column, Table\n'), ((16686, 16743), 'astropy.table.Column', 'Column', ([], {'name': '"""j19_ra"""', 'data': "(j19_tgt_ordered['ra'] * u.deg)"}), "(name='j19_ra', data=j19_tgt_ordered['ra'] * u.deg)\n", (16692, 16743), False, 'from astropy.table import Column, Table\n'), ((16767, 16826), 'astropy.table.Column', 'Column', ([], {'name': '"""j19_dec"""', 'data': "(j19_tgt_ordered['dec'] * u.deg)"}), "(name='j19_dec', data=j19_tgt_ordered['dec'] * u.deg)\n", (16773, 16826), False, 'from astropy.table import Column, Table\n'), ((17139, 17166), 'astropy.io.votable.from_table', 'from_table', (['augmented_table'], {}), '(augmented_table)\n', (17149, 17166), False, 'from astropy.io.votable import parse_single_table, from_table, writeto\n'), ((17175, 17213), 'astropy.io.votable.writeto', 'writeto', (['j19_match_vo_table', 'match_cat'], {}), '(j19_match_vo_table, match_cat)\n', (17182, 17213), False, 'from astropy.io.votable import parse_single_table, from_table, writeto\n'), ((17551, 17580), 'os.path.exists', 'os.path.exists', (['parent_folder'], {}), '(parent_folder)\n', (17565, 17580), False, 'import os\n'), ((2325, 2348), 'os.path.dirname', 'os.path.dirname', (['f.name'], {}), '(f.name)\n', (2340, 2348), False, 'import os\n'), ((3209, 3232), 'os.path.dirname', 'os.path.dirname', (['f.name'], {}), '(f.name)\n', (3224, 3232), False, 'import os\n'), ((3280, 3303), 'os.path.dirname', 'os.path.dirname', (['f.name'], {}), '(f.name)\n', (3295, 3303), False, 'import os\n'), ((9357, 9403), 'numpy.in1d', 'np.in1d', (["targets['comp_name']", 'comp_names_list'], {}), "(targets['comp_name'], comp_names_list)\n", (9364, 9403), True, 'import numpy as np\n'), ((17412, 17433), 'time.localtime', 'time.localtime', (['start'], {}), '(start)\n', (17426, 17433), False, 'import time\n'), ((21686, 21705), 'time.localtime', 'time.localtime', (['end'], {}), '(end)\n', (21700, 21705), False, 'import time\n'), ((12054, 12100), 'numpy.in1d', 'np.in1d', (["targets['comp_name']", 'comp_names_list'], {}), "(targets['comp_name'], comp_names_list)\n", (12061, 12100), True, 'import numpy as np\n')]
|
import base64
import os
import sys
import threading
from collections import defaultdict
from functools import partial
from io import BytesIO
from mimetypes import guess_extension
from typing import Any
import numpy as np
import six
from PIL import Image
from ...debugging.log import LoggerRoot
from ..frameworks import _patched_call, WeightsFileHandler, _Empty
from ..import_bind import PostImportHookPatching
from ...config import running_remotely
from ...model import InputModel, OutputModel, Framework
try:
from google.protobuf.json_format import MessageToDict # noqa
except ImportError:
MessageToDict = None
class TensorflowBinding(object):
@classmethod
def update_current_task(cls, task, patch_reporting=True, patch_model_io=True):
if not task:
IsTensorboardInit.clear_tensorboard_used()
EventTrainsWriter.update_current_task(task)
if patch_reporting:
PatchSummaryToEventTransformer.update_current_task(task)
PatchTensorFlowEager.update_current_task(task)
if patch_model_io:
PatchKerasModelIO.update_current_task(task)
PatchTensorflowModelIO.update_current_task(task)
PatchTensorflow2ModelIO.update_current_task(task)
class IsTensorboardInit(object):
_tensorboard_initialized = False
@classmethod
def tensorboard_used(cls):
return cls._tensorboard_initialized
@classmethod
def set_tensorboard_used(cls):
cls._tensorboard_initialized = True
@classmethod
def clear_tensorboard_used(cls):
cls._tensorboard_initialized = False
@staticmethod
def _patched_tb__init__(original_init, self, *args, **kwargs):
IsTensorboardInit._tensorboard_initialized = True
return original_init(self, *args, **kwargs)
# noinspection PyProtectedMember
class WeightsGradientHistHelper(object):
def __init__(self, logger, report_freq=100, histogram_update_freq_multiplier=10, histogram_granularity=50):
self._logger = logger
self.report_freq = report_freq
self._histogram_granularity = histogram_granularity
self._histogram_update_freq_multiplier = histogram_update_freq_multiplier
self._histogram_update_call_counter = 0
self._hist_report_cache = {}
self._hist_x_granularity = 50
@staticmethod
def _sample_histograms(_hist_iters, _histogram_granularity):
# re-sample history based on distribution of samples across time (steps)
ratio = ((_hist_iters[-1] - _hist_iters[_histogram_granularity]) /
(_hist_iters[_histogram_granularity - 1] - _hist_iters[0])) if \
_hist_iters.size > _histogram_granularity else 0.
cur_idx_below = np.arange(0, min(_hist_iters.size, _histogram_granularity - 1))
np.random.shuffle(cur_idx_below)
cur_idx_below = cur_idx_below[:int(_histogram_granularity * (1.0 - ratio / (1 + ratio)) + 0.5)]
if ratio > 0.0:
cur_idx_above = np.arange(_histogram_granularity - 1, _hist_iters.size)
np.random.shuffle(cur_idx_above)
cur_idx_above = cur_idx_above[:int(_histogram_granularity * ratio / (1 + ratio))]
else:
cur_idx_above = np.array([])
_cur_idx = np.unique(np.sort(np.concatenate((cur_idx_below, cur_idx_above)).astype(np.int)))
return _cur_idx
def add_histogram(self, title, series, step, hist_data):
# only collect histogram every specific interval
self._histogram_update_call_counter += 1
if self._histogram_update_call_counter % self.report_freq != 0 or \
self._histogram_update_call_counter < self.report_freq - 1:
return None
if isinstance(hist_data, dict):
pass
elif isinstance(hist_data, np.ndarray) and len(hist_data.shape) == 2 and np.atleast_2d(hist_data).shape[1] == 3:
# prepare the dictionary, assume numpy
# hist_data['bucketLimit'] is the histogram bucket right side limit, meaning X axis
# hist_data['bucket'] is the histogram height, meaning the Y axis
# notice hist_data[:, 1] is the right side limit, for backwards compatibility we take the left side
hist_data = {'bucketLimit': hist_data[:, 0].tolist(), 'bucket': hist_data[:, 2].tolist()}
else:
# assume we have to do the histogram on the data
hist_data = np.histogram(hist_data, bins=32)
hist_data = {'bucketLimit': hist_data[1].tolist(), 'bucket': hist_data[0].tolist()}
self._add_histogram(title=title, series=series, step=step, hist_data=hist_data)
def _add_histogram(self, title, series, step, hist_data):
# only collect histogram every specific interval
self._histogram_update_call_counter += 1
if self._histogram_update_call_counter % self.report_freq != 0 or \
self._histogram_update_call_counter < self.report_freq - 1:
return None
# generate forward matrix of the histograms
# Y-axis (rows) is iteration (from 0 to current Step)
# X-axis averaged bins (conformed sample 'bucketLimit')
# Z-axis actual value (interpolated 'bucket')
step = EventTrainsWriter._fix_step_counter(title, series, step)
# get histograms from cache
hist_list, hist_iters, minmax = self._hist_report_cache.get((title, series), ([], np.array([]), None))
# resample data so we are always constrained in number of histogram we keep
if hist_iters.size >= self._histogram_granularity ** 2:
idx = self._sample_histograms(hist_iters, self._histogram_granularity)
hist_iters = hist_iters[idx]
hist_list = [hist_list[i] for i in idx]
# check if current sample is not already here (actually happens some times)
if step in hist_iters:
return None
# add current sample, if not already here
hist_iters = np.append(hist_iters, step)
# hist_data['bucketLimit'] is the histogram bucket right side limit, meaning X axis
# hist_data['bucket'] is the histogram height, meaning the Y axis
hist = np.array(list(zip(hist_data['bucketLimit'], hist_data['bucket'])), dtype=np.float32)
hist = hist[~np.isinf(hist[:, 0]), :]
hist_list.append(hist)
# keep track of min/max values of histograms (for later re-binning)
if minmax is None:
minmax = hist[:, 0].min(), hist[:, 0].max()
else:
# noinspection PyUnresolvedReferences
minmax = min(minmax[0], hist[:, 0].min()), max(minmax[1], hist[:, 0].max())
# update the cache
self._hist_report_cache[(title, series)] = hist_list, hist_iters, minmax
# only report histogram every specific interval, but do report the first few, so you know there are histograms
if hist_iters.size < 1 or (hist_iters.size >= self._histogram_update_freq_multiplier and
hist_iters.size % self._histogram_update_freq_multiplier != 0):
return None
# resample histograms on a unified bin axis +- epsilon
_epsilon = abs((minmax[1] - minmax[0])/float(self._hist_x_granularity))
if _epsilon == 0:
_epsilon = 0.01
_minmax = minmax[0] - _epsilon, minmax[1] + _epsilon
prev_xedge = np.arange(start=_minmax[0],
step=(_minmax[1] - _minmax[0]) / float(self._hist_x_granularity - 2), stop=_minmax[1])
# uniformly select histograms and the last one
cur_idx = self._sample_histograms(hist_iters, self._histogram_granularity)
report_hist = np.zeros(shape=(len(cur_idx), prev_xedge.size), dtype=np.float32)
for i, n in enumerate(cur_idx):
h = hist_list[n]
report_hist[i, :] = np.interp(prev_xedge, h[:, 0], h[:, 1], right=0, left=0)
yedges = hist_iters[cur_idx]
xedges = prev_xedge
# if only a single line make, add another zero line, for the scatter plot to draw
if report_hist.shape[0] < 2:
report_hist = np.vstack((np.zeros_like(report_hist), report_hist))
# create 3d line (scatter) of histograms
skipx = max(1, int(xedges.size / 10))
skipy = max(1, int(yedges.size / 10))
xlabels = ['%.2f' % v if i % skipx == 0 else '' for i, v in enumerate(xedges[:-1])]
ylabels = [str(int(v)) if i % skipy == 0 else '' for i, v in enumerate(yedges)]
self._logger.report_surface(
title=title,
series=series,
iteration=0,
xaxis=' ',
yaxis='iteration',
xlabels=xlabels,
ylabels=ylabels,
matrix=report_hist,
camera=(-0.1, +1.3, 1.4))
# noinspection PyMethodMayBeStatic,PyProtectedMember,SpellCheckingInspection
class EventTrainsWriter(object):
"""
TF SummaryWriter implementation that converts the tensorboard's summary into
ClearML events and reports the events (metrics) for an ClearML task (logger).
"""
__main_task = None
_add_lock = threading.RLock()
_series_name_lookup = {}
# store all the created tensorboard writers in the system
# this allows us to as weather a certain tile/series already exist on some EventWriter
# and if it does, then we add to the series name the last token from the logdir
# (so we can differentiate between the two)
# key, value: key=hash(title, graph), value=EventTrainsWriter._id
_title_series_writers_lookup = {}
_event_writers_id_to_logdir = {}
# Protect against step (iteration) reuse, for example,
# steps counter inside an epoch, but wrapping around when epoch ends
# i.e. step = 0..100 then epoch ends and again step = 0..100
# We store the first report per title/series combination, and if wraparound occurs
# we synthetically continue to increase the step/iteration based on the previous epoch counter
# example: _title_series_wraparound_counter[('title', 'series')] =
# {'first_step':None, 'last_step':None, 'adjust_counter':0,}
_title_series_wraparound_counter = {}
@property
def variants(self):
return self._variants
def prepare_report(self):
return self.variants.copy()
def tag_splitter(self, tag, num_split_parts, split_char='/', join_char='_', default_title='variant',
logdir_header='series', auto_reduce_num_split=False, force_add_prefix=None):
"""
Split a tf.summary tag line to variant and metric.
Variant is the first part of the split tag, metric is the second.
:param str tag:
:param int num_split_parts:
:param str split_char: a character to split the tag on
:param str join_char: a character to join the the splits
:param str default_title: variant to use in case no variant can be inferred automatically
:param str logdir_header: if 'series_last' then series=header: series, if 'series then series=series :header,
if 'title_last' then title=header title, if 'title' then title=title header
:param bool auto_reduce_num_split: if True and the tag is split for less parts then requested,
then requested number of split parts is adjusted.
:param str force_add_prefix: always add the prefix to the series name
:return: (str, str) variant and metric
"""
splitted_tag = tag.split(split_char)
if auto_reduce_num_split and num_split_parts > len(splitted_tag) - 1:
num_split_parts = max(1, len(splitted_tag) - 1)
series = join_char.join(splitted_tag[-num_split_parts:])
title = join_char.join(splitted_tag[:-num_split_parts]) or default_title
if force_add_prefix:
series = str(force_add_prefix)+series
# check if we already decided that we need to change the title/series
graph_id = hash((title, series))
if graph_id in self._graph_name_lookup:
return self._graph_name_lookup[graph_id]
# check if someone other than us used this combination
with self._add_lock:
event_writer_id = self._title_series_writers_lookup.get(graph_id, None)
if not event_writer_id:
# put us there
self._title_series_writers_lookup[graph_id] = self._id
elif event_writer_id != self._id:
# if there is someone else, change our series name and store us
org_series = series
org_title = title
other_logdir = self._event_writers_id_to_logdir[event_writer_id]
split_logddir = self._logdir.split('/')
unique_logdir = set(split_logddir) - set(other_logdir.split('/'))
header = '/'.join(s for s in split_logddir if s in unique_logdir)
if logdir_header == 'series_last':
series = header + ': ' + series
elif logdir_header == 'series':
series = series + ' :' + header
elif logdir_header == 'title':
title = title + ' ' + header
else: # logdir_header == 'title_last':
title = header + ' ' + title
graph_id = hash((title, series))
# check if for some reason the new series is already occupied
new_event_writer_id = self._title_series_writers_lookup.get(graph_id)
if new_event_writer_id is not None and new_event_writer_id != self._id:
# well that's about it, nothing else we could do
if logdir_header == 'series_last':
series = str(self._logdir) + ': ' + org_series
elif logdir_header == 'series':
series = org_series + ' :' + str(self._logdir)
elif logdir_header == 'title':
title = org_title + ' ' + str(self._logdir)
else: # logdir_header == 'title_last':
title = str(self._logdir) + ' ' + org_title
graph_id = hash((title, series))
self._title_series_writers_lookup[graph_id] = self._id
# store for next time
self._graph_name_lookup[graph_id] = (title, series)
return title, series
def __init__(self, logger, logdir=None, report_freq=100, image_report_freq=None,
histogram_update_freq_multiplier=10, histogram_granularity=50, max_keep_images=None):
"""
Create a compatible ClearML backend to the TensorFlow SummaryToEventTransformer
Everything will be serialized directly to the ClearML backend, instead of to the standard TF FileWriter
:param logger: The task.logger to use for sending the metrics (def: task.get_logger())
:param report_freq: How often to update the statistics values
:param image_report_freq: How often to upload images (step % image_update_freq == 0)
:param histogram_update_freq_multiplier: How often to upload histogram
(step//update_freq) % histogram_update_freq_multiplier == 0
:param histogram_granularity: How many histograms (lines) to display in the 3d histogram plot
:param max_keep_images: Maximum number of images to save before starting to reuse files (per title/metric pair)
"""
# We are the events_writer, so that's what we'll pass
IsTensorboardInit.set_tensorboard_used()
self._logdir = logdir or ('unknown %d' % len(self._event_writers_id_to_logdir))
# conform directory structure to unix
if os.path.sep == '\\':
self._logdir = self._logdir.replace('\\', '/')
self._id = hash(self._logdir)
self._event_writers_id_to_logdir[self._id] = self._logdir
self.max_keep_images = max_keep_images
self.report_freq = report_freq
self.image_report_freq = image_report_freq if image_report_freq else report_freq
self.histogram_granularity = histogram_granularity
self.histogram_update_freq_multiplier = histogram_update_freq_multiplier
self._histogram_update_call_counter = 0
self._logger = logger
self._visualization_mode = 'RGB' # 'BGR'
self._variants = defaultdict(lambda: ())
self._scalar_report_cache = {}
self._hist_report_cache = {}
self._hist_x_granularity = 50
self._max_step = 0
self._graph_name_lookup = {}
self._generic_tensor_type_name_lookup = {}
self._grad_helper = WeightsGradientHistHelper(
logger=logger,
report_freq=report_freq,
histogram_update_freq_multiplier=histogram_update_freq_multiplier,
histogram_granularity=histogram_granularity
)
def _decode_image(self, img_str, width=None, height=None, color_channels=None):
# noinspection PyBroadException
try:
if isinstance(img_str, bytes):
imdata = img_str
else:
imdata = base64.b64decode(img_str)
output = BytesIO(imdata)
im = Image.open(output)
image = np.asarray(im)
output.close()
if height is not None and height > 0 and width is not None and width > 0:
# noinspection PyArgumentList
val = image.reshape(height, width, -1).astype(np.uint8)
else:
val = image.astype(np.uint8)
if val.ndim == 3 and val.shape[2] == 3:
if self._visualization_mode == 'BGR':
val = val[:, :, [2, 1, 0]]
else:
val = val
elif (val.ndim == 2) or (val.ndim == 3 and val.shape[2] == 1):
val = np.tile(np.atleast_3d(val), (1, 1, 3))
elif val.ndim == 3 and val.shape[2] == 4:
if self._visualization_mode == 'BGR':
val = val[:, :, [2, 1, 0]]
else:
val = val[:, :, [0, 1, 2]]
except KeyboardInterrupt:
raise
except Exception as e:
logger = LoggerRoot.get_base_logger(TensorflowBinding)
logger.warning('Failed decoding debug image [%s, %s, %s]' % (width, height, color_channels))
logger.warning('Error: %s' % e)
val = None
return val
def _add_image_numpy(self, tag, step, img_data_np, max_keep_images=None):
# type: (str, int, np.ndarray, int) -> ()
# only report images every specific interval
if step % self.image_report_freq != 0:
return None
if img_data_np is None:
return
# noinspection PyProtectedMember
title, series = self.tag_splitter(tag, num_split_parts=3, default_title='Images', logdir_header='title',
auto_reduce_num_split=True,
force_add_prefix=self._logger._get_tensorboard_series_prefix())
step = self._fix_step_counter(title, series, step)
if img_data_np.dtype != np.uint8:
# assume scale 0-1
img_data_np = (img_data_np * 255).astype(np.uint8)
# if 3d, pack into one big image
if img_data_np.ndim == 4:
dims = img_data_np.shape
stack_dim = int(np.sqrt(dims[0]))
# noinspection PyArgumentList
res = img_data_np.reshape(stack_dim, stack_dim, *dims[1:]).transpose((0, 2, 1, 3, 4))
tile_size_h = res.shape[0] * res.shape[1]
tile_size_w = res.shape[2] * res.shape[3]
img_data_np = res.reshape(tile_size_h, tile_size_w, -1)
self._logger.report_image(
title=title,
series=series,
iteration=step,
image=img_data_np,
max_image_history=self.max_keep_images if max_keep_images is None else max_keep_images,
)
def _add_image(self, tag, step, img_data):
# only report images every specific interval
if step % self.image_report_freq != 0:
return None
width = img_data.get('width')
height = img_data.get('height')
colorspace = img_data.get('colorspace')
img_str = img_data['encodedImageString']
matrix = self._decode_image(img_str, width=width, height=height, color_channels=colorspace)
if matrix is None:
return
return self._add_image_numpy(tag=tag, step=step, img_data_np=matrix)
def _add_scalar(self, tag, step, scalar_data):
default_title = tag if not self._logger._get_tensorboard_auto_group_scalars() else 'Scalars'
series_per_graph = self._logger._get_tensorboard_single_series_per_graph()
# noinspection PyProtectedMember
title, series = self.tag_splitter(
tag, num_split_parts=1, default_title=default_title,
logdir_header='title' if series_per_graph else 'series_last',
force_add_prefix=self._logger._get_tensorboard_series_prefix()
)
step = self._fix_step_counter(title, series, step)
tag = self._get_add_scalars_event_tag(default_title)
possible_title = tag if series_per_graph else None
possible_tag = None if series_per_graph else tag
title = title + possible_title if possible_title else title
series = possible_tag or series
# update scalar cache
num, value = self._scalar_report_cache.get((title, series), (0, 0))
# nan outputs is a string, it's probably a NaN
if isinstance(scalar_data, six.string_types):
# noinspection PyBroadException
try:
scalar_data = float(scalar_data)
except Exception:
scalar_data = float('nan')
# nan outputs nan
self._scalar_report_cache[(title, series)] = \
(num + 1,
(value + scalar_data) if scalar_data == scalar_data else scalar_data)
# only report images every specific interval
if step % self.report_freq != 0:
return None
# calculate mean and zero cache
num, value = self._scalar_report_cache.get((title, series), (0, 0))
scalar_data = value / num
self._scalar_report_cache[(title, series)] = (0, 0)
self._logger.report_scalar(
title=title,
series=series,
iteration=step,
value=scalar_data,
)
def _add_histogram(self, tag, step, hist_data):
# noinspection PyProtectedMember
title, series = self.tag_splitter(tag, num_split_parts=1, default_title='Histograms',
logdir_header='series',
force_add_prefix=self._logger._get_tensorboard_series_prefix())
self._grad_helper.add_histogram(
title=title,
series=series,
step=step,
hist_data=hist_data
)
def _add_plot(self, tag, step, values, vdict):
# noinspection PyBroadException
try:
if values.get('floatVal'):
plot_values = np.array(values.get('floatVal'), dtype=np.float32)
else:
plot_values = np.frombuffer(base64.b64decode(values['tensorContent'].encode('utf-8')),
dtype=np.float32)
plot_values = plot_values.reshape((int(values['tensorShape']['dim'][0]['size']),
int(values['tensorShape']['dim'][1]['size'])))
if 'metadata' in vdict:
if tag not in self._series_name_lookup:
self._series_name_lookup[tag] = [(tag, vdict['metadata'].get('displayName', ''),
vdict['metadata']['pluginData']['pluginName'])]
else:
# this should not happen, maybe it's another run, let increase the value
self._series_name_lookup[tag] += [(tag + '_%d' % (len(self._series_name_lookup[tag]) + 1),
vdict['metadata'].get('displayName', ''),
vdict['metadata']['pluginData']['pluginName'])]
tag, series, plugin_name = self._series_name_lookup.get(tag, [(tag, tag, '')])[-1]
if 'pr_curve' in plugin_name:
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
num_thresholds = plot_values.shape[1]
width = 1.0 / num_thresholds
thresholds = np.arange(0.0, 1.0, width, dtype=plot_values.dtype)
data_points = ['Threshold ', 'TP ', 'FP ', 'TN ', 'FN ', 'Precision ', ' Recall']
series = [{'name': series, 'data': np.vstack((plot_values[-1], plot_values[-2])).T,
'labels': [''.join(data_points) + '<br> {:.3f} '.format(thresholds[j]) +
' '.join(['%-3.2f' % v for v in plot_values[:, j]]) for j in
range(num_thresholds)]}]
reverse_xaxis = False
else:
reverse_xaxis = False
series = [{'name': series, 'data': plot_values}]
self._logger.report_line_plot(title=tag, series=series, xaxis='', yaxis='',
iteration=step, reverse_xaxis=reverse_xaxis)
except Exception:
pass
def _add_audio(self, tag, step, values, audio_data=None):
# only report images every specific interval
if step % self.image_report_freq != 0:
return None
if values:
audio_str = values['encodedAudioString']
audio_data = base64.b64decode(audio_str)
if audio_data is None:
return
# noinspection PyProtectedMember
title, series = self.tag_splitter(tag, num_split_parts=3, default_title='Audio', logdir_header='title',
auto_reduce_num_split=True,
force_add_prefix=self._logger._get_tensorboard_series_prefix())
step = self._fix_step_counter(title, series, step)
stream = BytesIO(audio_data)
if values:
file_extension = guess_extension(values['contentType']) or \
'.{}'.format(values['contentType'].split('/')[-1])
else:
# assume wav as default
file_extension = '.wav'
self._logger.report_media(
title=title,
series=series,
iteration=step,
stream=stream,
file_extension=file_extension,
max_history=self.max_keep_images,
)
def _add_text(self, tag, step, tensor_bytes):
# noinspection PyProtectedMember
title, series = self.tag_splitter(tag, num_split_parts=3, default_title='Text', logdir_header='title',
auto_reduce_num_split=True,
force_add_prefix=self._logger._get_tensorboard_series_prefix())
step = self._fix_step_counter(title, series, step)
text = tensor_bytes.decode('utf-8', errors='replace')
self._logger.report_media(
title=title,
series=series,
iteration=step,
stream=six.StringIO(text),
file_extension='.txt',
max_history=self.max_keep_images,
)
@staticmethod
def _fix_step_counter(title, series, step):
key = (title, series)
if key not in EventTrainsWriter._title_series_wraparound_counter:
EventTrainsWriter._title_series_wraparound_counter[key] = {'first_step': step, 'last_step': step,
'adjust_counter': 0}
return step
wraparound_counter = EventTrainsWriter._title_series_wraparound_counter[key]
# we decide on wrap around if the current step is less than 10% of the previous step
# notice since counter is int and we want to avoid rounding error, we have double check in the if
if step < wraparound_counter['last_step'] and step < 0.9 * wraparound_counter['last_step']:
# adjust step base line
wraparound_counter['adjust_counter'] += wraparound_counter['last_step'] + (1 if step <= 0 else step)
# return adjusted step
wraparound_counter['last_step'] = step
return step + wraparound_counter['adjust_counter']
def add_event(self, event, step=None, walltime=None, **_):
supported_metrics = {
'simpleValue', 'image', 'histo', 'tensor', 'audio'
}
def get_data(value_dict, metric_search_order):
data = None
metric_type = 'Unsupported'
for variant in metric_search_order:
data = value_dict.get(variant)
if data is not None:
metric_type = variant
break
return metric_type, data
# Support multiple threads accessing this instance (i.e. let TF/Keras do what they need)
with self._add_lock:
# TODO: add report frequency threshold (i.e. if we are sending too much data, increase the report_freq)
# we should measure reports per second and throttle back the reporting details accordingly
msg_dict = MessageToDict(event)
summary = msg_dict.get('summary')
if summary is None:
msg_dict.pop('step', None)
msg_dict.pop('wallTime', None)
keys_list = [key for key in msg_dict.keys() if len(key) > 0]
keys_list = ', '.join(keys_list)
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'event summary not found, message type unsupported: %s' % keys_list)
return
value_dicts = summary.get('value')
# noinspection PyUnusedLocal
walltime = walltime or msg_dict.get('step')
step = step or msg_dict.get('step')
if step is None:
# when we start a new epoch there is no step in the msg_dict,
# we have to extract it manually
if hasattr(event, 'step'):
step = int(event.step)
else:
step = 0
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'Received event without step, assuming step = {}'.format(step))
else:
step = int(step)
self._max_step = max(self._max_step, step)
if value_dicts is None:
LoggerRoot.get_base_logger(TensorflowBinding).debug("Summary arrived without 'value'")
return
for vdict in value_dicts:
tag = vdict.pop('tag', None)
if tag is None:
# we should not get here
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'No tag for \'value\' existing keys %s' % ', '.join(vdict.keys()))
continue
metric, values = get_data(vdict, supported_metrics)
if metric == 'simpleValue':
self._add_scalar(tag=tag, step=step, scalar_data=values)
elif metric == 'histo':
self._add_histogram(tag=tag, step=step, hist_data=values)
elif metric == 'image':
self._add_image(tag=tag, step=step, img_data=values)
elif metric == 'audio':
self._add_audio(tag, step, values)
elif metric == 'tensor' and values.get('dtype') == 'DT_STRING':
# generic tensor
tensor_bytes = base64.b64decode('\n'.join(values['stringVal']))
plugin_type = self._generic_tensor_type_name_lookup.get(tag) or \
vdict.get('metadata', {}).get('pluginData', {}).get('pluginName', '').lower()
if plugin_type == 'audio':
self._generic_tensor_type_name_lookup[tag] = plugin_type
self._add_audio(tag, step, None, tensor_bytes)
elif plugin_type == 'text':
self._generic_tensor_type_name_lookup[tag] = plugin_type
self._add_text(tag, step, tensor_bytes)
else:
# we do not support it
pass
elif metric == 'tensor' and values.get('dtype') == 'DT_FLOAT':
self._add_plot(tag, step, values, vdict)
else:
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'Event unsupported. tag = %s, vdict keys [%s]' % (tag, ', '.join(vdict.keys())))
continue
def get_logdir(self):
""" Returns a temporary directory name for compatibility with FileWriter. This directory is not actually used.
:return: '.'
"""
return '.'
def flush(self):
"""Flushes the event file to disk.
Call this method to make sure that all pending events have been written to
disk.
"""
self._logger.flush()
def close(self):
"""Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore.
"""
self._logger.flush()
def reopen(self):
"""Reopens the EventFileWriter.
Can be called after `close` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the EventFileWriter was not closed.
"""
pass
def _get_add_scalars_event_tag(self, title_prefix):
"""
:param str title_prefix: the table title prefix that was added to the series.
:return: str same as tensorboard use
"""
# HACK - this is tensorboard Summary util function, original path:
# ~/torch/utils/tensorboard/summary.py
def _clean_tag(name):
import re as _re
# noinspection RegExpRedundantEscape
_INVALID_TAG_CHARACTERS = _re.compile(r'[^-/\w\.]')
if name is not None:
new_name = _INVALID_TAG_CHARACTERS.sub('_', name)
new_name = new_name.lstrip('/') # Remove leading slashes
if new_name != name:
LoggerRoot.get_base_logger(TensorflowBinding).debug(
'Summary name %s is illegal; using %s instead.' % (name, new_name))
name = new_name
return name
main_path = self._logdir
# noinspection PyBroadException
try:
main_path = _clean_tag(main_path)
origin_tag = main_path.rpartition("/")[2].replace(title_prefix, "", 1)
if title_prefix and origin_tag[0] == "_": # add_scalars tag
origin_tag = origin_tag[1:] # Remove the first "_" that was added by the main_tag in tensorboard
else:
return ""
except Exception:
origin_tag = ""
return origin_tag
@classmethod
def update_current_task(cls, task):
if cls.__main_task != task:
with cls._add_lock:
cls._series_name_lookup = {}
cls._title_series_writers_lookup = {}
cls._event_writers_id_to_logdir = {}
cls._title_series_wraparound_counter = {}
cls.__main_task = task
# noinspection PyCallingNonCallable
class ProxyEventsWriter(object):
def __init__(self, events):
IsTensorboardInit.set_tensorboard_used()
self._events = events
def _get_sentinel_event(self):
ret = None
for ev in self._events:
if hasattr(ev, '_get_sentinel_event'):
# noinspection PyProtectedMember
ret = ev._get_sentinel_event()
return ret
def get_logdir(self):
ret = None
for ev in self._events:
if hasattr(ev, 'get_logdir'):
ret = ev.get_logdir()
return ret
def reopen(self):
ret = None
for ev in self._events:
if hasattr(ev, 'reopen'):
ret = ev.reopen()
return ret
def add_event(self, *args, **kwargs):
ret = None
for ev in self._events:
if hasattr(ev, 'add_event'):
ret = ev.add_event(*args, **kwargs)
return ret
def flush(self):
ret = None
for ev in self._events:
if hasattr(ev, 'flush'):
ret = ev.flush()
return ret
def close(self):
ret = None
for ev in self._events:
if hasattr(ev, 'close'):
ret = ev.close()
return ret
# noinspection PyPep8Naming
class PatchSummaryToEventTransformer(object):
__main_task = None
__original_getattribute = None
__original_getattributeX = None
_original_add_event = None
_original_add_eventT = None
_original_add_eventX = None
defaults_dict = dict(
report_freq=1, image_report_freq=1, histogram_update_freq_multiplier=5,
histogram_granularity=50)
@staticmethod
def trains_object(self):
if isinstance(self.event_writer, ProxyEventsWriter):
# noinspection PyProtectedMember
trains_writer = [e for e in self.event_writer._events if isinstance(e, EventTrainsWriter)]
return trains_writer[0] if trains_writer else None
elif isinstance(self.event_writer, EventTrainsWriter):
return self.event_writer
if not self.__dict__.get('_trains_defaults'):
self.__dict__['_trains_defaults'] = {}
return self.__dict__['_trains_defaults']
@staticmethod
def update_current_task(task, **kwargs):
PatchSummaryToEventTransformer.defaults_dict.update(kwargs)
PatchSummaryToEventTransformer.__main_task = task
# make sure we patched the SummaryToEventTransformer
PatchSummaryToEventTransformer._patch_summary_to_event_transformer()
PostImportHookPatching.add_on_import('tensorflow',
PatchSummaryToEventTransformer._patch_summary_to_event_transformer)
PostImportHookPatching.add_on_import('torch',
PatchSummaryToEventTransformer._patch_summary_to_event_transformer)
PostImportHookPatching.add_on_import('tensorboardX',
PatchSummaryToEventTransformer._patch_summary_to_event_transformer)
@staticmethod
def _patch_summary_to_event_transformer():
if 'tensorflow' in sys.modules:
try:
from tensorflow.python.summary.writer.writer import SummaryToEventTransformer # noqa
# only patch once
if PatchSummaryToEventTransformer.__original_getattribute is None:
PatchSummaryToEventTransformer.__original_getattribute = SummaryToEventTransformer.__getattribute__
SummaryToEventTransformer.__getattribute__ = PatchSummaryToEventTransformer._patched_getattribute
setattr(SummaryToEventTransformer, 'clearml',
property(PatchSummaryToEventTransformer.trains_object))
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
if 'torch' in sys.modules:
try:
# only patch once
if PatchSummaryToEventTransformer._original_add_eventT is None:
# noinspection PyUnresolvedReferences
from torch.utils.tensorboard.writer import FileWriter as FileWriterT # noqa
PatchSummaryToEventTransformer._original_add_eventT = FileWriterT.add_event
FileWriterT.add_event = PatchSummaryToEventTransformer._patched_add_eventT
setattr(FileWriterT, 'clearml', None)
except ImportError:
# this is a new version of TensorflowX
pass
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
if 'tensorboardX' in sys.modules:
try:
# only patch once
if PatchSummaryToEventTransformer.__original_getattributeX is None:
# noinspection PyUnresolvedReferences
from tensorboardX.writer import SummaryToEventTransformer as SummaryToEventTransformerX # noqa
PatchSummaryToEventTransformer.__original_getattributeX = \
SummaryToEventTransformerX.__getattribute__
SummaryToEventTransformerX.__getattribute__ = PatchSummaryToEventTransformer._patched_getattributeX
setattr(SummaryToEventTransformerX, 'clearml',
property(PatchSummaryToEventTransformer.trains_object))
except ImportError:
# this is a new version of TensorflowX
pass
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
if PatchSummaryToEventTransformer.__original_getattributeX is None:
try:
# only patch once
if PatchSummaryToEventTransformer._original_add_eventX is None:
from tensorboardX.writer import FileWriter as FileWriterX # noqa
PatchSummaryToEventTransformer._original_add_eventX = FileWriterX.add_event
FileWriterX.add_event = PatchSummaryToEventTransformer._patched_add_eventX
setattr(FileWriterX, 'clearml', None)
except ImportError:
# this is a new version of TensorflowX
pass
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
@staticmethod
def _patched_add_eventT(self, *args, **kwargs):
if not hasattr(self, 'clearml') or not PatchSummaryToEventTransformer.__main_task:
return PatchSummaryToEventTransformer._original_add_eventT(self, *args, **kwargs)
if not self.clearml: # noqa
# noinspection PyBroadException
try:
logdir = self.get_logdir()
except Exception:
logdir = None
self.clearml = EventTrainsWriter(PatchSummaryToEventTransformer.__main_task.get_logger(),
logdir=logdir, **PatchSummaryToEventTransformer.defaults_dict)
# noinspection PyBroadException
try:
self.clearml.add_event(*args, **kwargs)
except Exception:
pass
return PatchSummaryToEventTransformer._original_add_eventT(self, *args, **kwargs)
@staticmethod
def _patched_add_eventX(self, *args, **kwargs):
if not hasattr(self, 'clearml') or not PatchSummaryToEventTransformer.__main_task:
return PatchSummaryToEventTransformer._original_add_eventX(self, *args, **kwargs)
if not self.clearml:
# noinspection PyBroadException
try:
logdir = self.get_logdir()
except Exception:
logdir = None
self.clearml = EventTrainsWriter(PatchSummaryToEventTransformer.__main_task.get_logger(),
logdir=logdir, **PatchSummaryToEventTransformer.defaults_dict)
# noinspection PyBroadException
try:
self.clearml.add_event(*args, **kwargs)
except Exception:
pass
return PatchSummaryToEventTransformer._original_add_eventX(self, *args, **kwargs)
@staticmethod
def _patched_getattribute(self, attr):
get_base = PatchSummaryToEventTransformer.__original_getattribute
return PatchSummaryToEventTransformer._patched_getattribute_(self, attr, get_base)
@staticmethod
def _patched_getattributeX(self, attr):
get_base = PatchSummaryToEventTransformer.__original_getattributeX
return PatchSummaryToEventTransformer._patched_getattribute_(self, attr, get_base)
@staticmethod
def _patched_getattribute_(self, attr, get_base):
# no main task, zero chance we have an ClearML event logger
if PatchSummaryToEventTransformer.__main_task is None:
return get_base(self, attr)
# check if we already have an ClearML event logger
__dict__ = get_base(self, '__dict__')
if 'event_writer' not in __dict__ or \
isinstance(__dict__['event_writer'], (ProxyEventsWriter, EventTrainsWriter)):
return get_base(self, attr)
# patch the events writer field, and add a double Event Logger (ClearML and original)
base_eventwriter = __dict__['event_writer']
# noinspection PyBroadException
try:
logdir = base_eventwriter.get_logdir()
except Exception:
logdir = None
defaults_dict = __dict__.get('_trains_defaults') or PatchSummaryToEventTransformer.defaults_dict
trains_event = EventTrainsWriter(PatchSummaryToEventTransformer.__main_task.get_logger(),
logdir=logdir, **defaults_dict)
# order is important, the return value of ProxyEventsWriter is the last object in the list
__dict__['event_writer'] = ProxyEventsWriter([trains_event, base_eventwriter])
return get_base(self, attr)
class _ModelAdapter(object):
""" Model adapter which extends the save and save_weights methods of a Keras Model instance """
_model = None # type: Any
_output_model = None # type: OutputModel
def __init__(self, model, output_model):
super(_ModelAdapter, self).__init__()
super(_ModelAdapter, self).__setattr__('_model', model)
super(_ModelAdapter, self).__setattr__('_output_model', output_model)
super(_ModelAdapter, self).__setattr__('_logger', LoggerRoot.get_base_logger(TensorflowBinding))
def __getattr__(self, attr):
return getattr(self._model, attr)
def __setattr__(self, key, value):
return setattr(self._model, key, value)
def save(self, filepath, overwrite=True, include_optimizer=True):
self._model.save(filepath=filepath, overwrite=overwrite, include_optimizer=include_optimizer)
# TODO: auto generate new objects of filename changes
try:
self._output_model.update_weights(weights_filename=filepath, auto_delete_file=True)
except Exception as ex:
self._logger.error(str(ex))
def save_weights(self, filepath, overwrite=True):
self._model.save_weights(filepath=filepath, overwrite=overwrite)
# TODO: auto generate new objects of filename changes
try:
self._output_model.update_weights(weights_filename=filepath, auto_delete_file=True)
except Exception as ex:
self._logger.error(str(ex))
class PatchModelCheckPointCallback(object):
__main_task = None
__original_getattribute = None
defaults_dict = dict(
config_text=None,
config_dict=None,
label_enumeration=None,
name=None,
comment=None)
@staticmethod
def trains_object(self):
if isinstance(self.model, _ModelAdapter):
# noinspection PyProtectedMember
return self.model._output_model
if not self.__dict__.get('_trains_defaults'):
self.__dict__['_trains_defaults'] = {}
return self.__dict__['_trains_defaults']
@staticmethod
def update_current_task(task, **kwargs):
PatchModelCheckPointCallback.defaults_dict.update(kwargs)
PatchModelCheckPointCallback.__main_task = task
# make sure we patched the SummaryToEventTransformer
PatchModelCheckPointCallback._patch_model_checkpoint()
PostImportHookPatching.add_on_import('keras', PatchModelCheckPointCallback._patch_model_checkpoint)
PostImportHookPatching.add_on_import('tensorflow', PatchModelCheckPointCallback._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint():
is_keras = 'keras' in sys.modules
is_tf_keras = 'tensorflow' in sys.modules
callbacks = None
if is_keras:
try:
import keras.callbacks as callbacks # noqa
except ImportError:
is_keras = False
if not is_keras and is_tf_keras:
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
import tensorflow.python.keras.callbacks as callbacks # noqa
except ImportError:
is_tf_keras = False
callbacks = None
# we have nothing, quit
if not is_keras and not is_tf_keras:
return
try:
# only patch once
if PatchModelCheckPointCallback.__original_getattribute is None and callbacks is not None:
PatchModelCheckPointCallback.__original_getattribute = callbacks.ModelCheckpoint.__getattribute__
callbacks.ModelCheckpoint.__getattribute__ = PatchModelCheckPointCallback._patched_getattribute
setattr(callbacks.ModelCheckpoint, 'clearml',
property(PatchModelCheckPointCallback.trains_object))
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
@staticmethod
def _patched_getattribute(self, attr):
get_base = PatchModelCheckPointCallback.__original_getattribute
# no main task, zero chance we have an ClearML event logger
if PatchModelCheckPointCallback.__main_task is None:
return get_base(self, attr)
# check if we already have an ClearML event logger
__dict__ = get_base(self, '__dict__')
if 'model' not in __dict__ or \
isinstance(__dict__['model'], _ModelAdapter):
return get_base(self, attr)
# patch the events writer field, and add a double Event Logger (ClearML and original)
base_model = __dict__['model']
defaults_dict = __dict__.get('_trains_defaults') or PatchModelCheckPointCallback.defaults_dict
output_model = OutputModel(
PatchModelCheckPointCallback.__main_task,
config_text=defaults_dict.get('config_text'),
config_dict=defaults_dict.get('config_dict'),
name=defaults_dict.get('name'),
comment=defaults_dict.get('comment'),
label_enumeration=defaults_dict.get('label_enumeration') or
PatchModelCheckPointCallback.__main_task.get_labels_enumeration(),
framework=Framework.keras,
)
output_model.set_upload_destination(
PatchModelCheckPointCallback.__main_task.get_output_destination(raise_on_error=False))
trains_model = _ModelAdapter(base_model, output_model)
# order is important, the return value of ProxyEventsWriter is the last object in the list
__dict__['model'] = trains_model
return get_base(self, attr)
# noinspection PyProtectedMember,PyUnresolvedReferences
class PatchTensorFlowEager(object):
__main_task = None
__original_fn_scalar = None
__original_fn_hist = None
__original_fn_image = None
__trains_event_writer = {}
defaults_dict = dict(
report_freq=1, image_report_freq=1, histogram_update_freq_multiplier=5,
histogram_granularity=50)
@staticmethod
def update_current_task(task, **kwargs):
if task != PatchTensorFlowEager.__main_task:
PatchTensorFlowEager.__trains_event_writer = {}
PatchTensorFlowEager.defaults_dict.update(kwargs)
PatchTensorFlowEager.__main_task = task
# make sure we patched the SummaryToEventTransformer
PatchTensorFlowEager._patch_summary_ops()
PostImportHookPatching.add_on_import('tensorflow', PatchTensorFlowEager._patch_summary_ops)
@staticmethod
def _patch_summary_ops():
if PatchTensorFlowEager.__original_fn_scalar is not None:
return
if 'tensorflow' in sys.modules:
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.ops import gen_summary_ops # noqa
PatchTensorFlowEager.__original_fn_scalar = gen_summary_ops.write_scalar_summary
gen_summary_ops.write_scalar_summary = PatchTensorFlowEager._write_scalar_summary
PatchTensorFlowEager.__original_fn_image = gen_summary_ops.write_image_summary
gen_summary_ops.write_image_summary = PatchTensorFlowEager._write_image_summary
PatchTensorFlowEager.__original_fn_hist = gen_summary_ops.write_histogram_summary
gen_summary_ops.write_histogram_summary = PatchTensorFlowEager._write_hist_summary
PatchTensorFlowEager.__write_summary = gen_summary_ops.write_summary
gen_summary_ops.write_summary = PatchTensorFlowEager._write_summary
gen_summary_ops.create_summary_file_writer = partial(IsTensorboardInit._patched_tb__init__,
gen_summary_ops.create_summary_file_writer)
gen_summary_ops.create_summary_db_writer = partial(IsTensorboardInit._patched_tb__init__,
gen_summary_ops.create_summary_db_writer)
except ImportError:
pass
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex))
@staticmethod
def _get_event_writer(writer):
if not PatchTensorFlowEager.__main_task:
return None
if not PatchTensorFlowEager.__trains_event_writer.get(id(writer)):
# noinspection PyBroadException
try:
logdir = writer.get_logdir()
except Exception:
# check if we are in eager mode, let's get the global context lopdir
# noinspection PyBroadException
try:
from tensorflow.python.eager import context # noqa
logdir = context.context().summary_writer._init_op_fn.keywords.get('logdir')
except Exception:
# noinspection PyBroadException
try:
from tensorflow.python.ops.summary_ops_v2 import _summary_state # noqa
logdir = _summary_state.writer._init_op_fn.keywords.get('logdir')
except Exception:
logdir = None
# noinspection PyBroadException
try:
if logdir is not None:
logdir = logdir.numpy().decode()
except Exception:
logdir = None
PatchTensorFlowEager.__trains_event_writer[id(writer)] = EventTrainsWriter(
logger=PatchTensorFlowEager.__main_task.get_logger(), logdir=logdir,
**PatchTensorFlowEager.defaults_dict)
return PatchTensorFlowEager.__trains_event_writer[id(writer)]
@staticmethod
def trains_object(self):
if not PatchTensorFlowEager.__trains_event_writer:
return None
return PatchTensorFlowEager.__trains_event_writer.get(
id(self), list(PatchTensorFlowEager.__trains_event_writer.values())[0])
@staticmethod
def _write_summary(writer, step, tensor, tag, summary_metadata, name=None, **kwargs):
event_writer = PatchTensorFlowEager._get_event_writer(writer)
# make sure we can get the tensors values
if event_writer and isinstance(step, int) or hasattr(step, 'numpy'):
# noinspection PyBroadException
try:
plugin_type = summary_metadata.decode()
if plugin_type.endswith('scalars'):
event_writer._add_scalar(tag=str(tag),
step=int(step.numpy()) if not isinstance(step, int) else step,
scalar_data=tensor.numpy())
elif plugin_type.endswith('images'):
img_data_np = tensor.numpy()
PatchTensorFlowEager._add_image_event_helper(event_writer, img_data_np=img_data_np,
tag=tag, step=step, **kwargs)
elif plugin_type.endswith('histograms'):
event_writer._add_histogram(
tag=str(tag), step=int(step.numpy()) if not isinstance(step, int) else step,
hist_data=tensor.numpy()
)
elif plugin_type.endswith('text'):
event_writer._add_text(
tag=str(tag), step=int(step.numpy()) if not isinstance(step, int) else step,
tensor_bytes=tensor.numpy()
)
elif 'audio' in plugin_type:
audio_bytes_list = [a for a in tensor.numpy().flatten() if a]
for i, audio_bytes in enumerate(audio_bytes_list):
event_writer._add_audio(tag=str(tag) + ('/{}'.format(i) if len(audio_bytes_list) > 1 else ''),
step=int(step.numpy()) if not isinstance(step, int) else step,
values=None, audio_data=audio_bytes)
else:
pass # print('unsupported plugin_type', plugin_type)
except Exception:
pass
return PatchTensorFlowEager.__write_summary(writer, step, tensor, tag, summary_metadata, name, **kwargs)
@staticmethod
def _write_scalar_summary(writer, step, tag, value, name=None, **kwargs):
event_writer = PatchTensorFlowEager._get_event_writer(writer)
if event_writer and isinstance(step, int) or hasattr(step, 'numpy'):
try:
event_writer._add_scalar(tag=str(tag),
step=int(step.numpy()) if not isinstance(step, int) else step,
scalar_data=value.numpy())
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
elif event_writer:
def _report_summary_op(a_writer, a_step, a_tag, a_value, a_name=None, **_):
if isinstance(a_step, int) or hasattr(a_step, 'numpy'):
try:
str_tag = a_tag.numpy()
str_tag = str_tag.decode() if isinstance(str_tag, bytes) else str(str_tag)
event_writer._add_scalar(
tag=str_tag,
step=int(a_step.numpy()) if not isinstance(a_step, int) else a_step,
scalar_data=a_value.numpy())
except Exception as a_ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(
'_report_summary_op: {}'.format(str(a_ex)))
# this is a mix of eager and graph execution
try:
from tensorflow.python.eager import context as _context
if not _context.executing_eagerly():
from tensorflow import py_function
# just creating the operator is enough (for some reason)
# to make sure it is added into the execution tree.
# the operator itself, will do the reporting to the backend
py_function(
_report_summary_op,
inp=[writer, step, tag, value, name], Tout=[])
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return PatchTensorFlowEager.__original_fn_scalar(writer, step, tag, value, name, **kwargs)
@staticmethod
def _write_hist_summary(writer, step, tag, values, name, **kwargs):
event_writer = PatchTensorFlowEager._get_event_writer(writer)
if event_writer and isinstance(step, int) or hasattr(step, 'numpy'):
try:
event_writer._add_histogram(
tag=str(tag), step=int(step.numpy()) if not isinstance(step, int) else step,
hist_data=values.numpy()
)
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
elif event_writer:
def _report_summary_op(a_writer, a_step, a_tag, a_value, a_name=None, **_):
if isinstance(a_step, int) or hasattr(a_step, 'numpy'):
try:
str_tag = a_tag.numpy()
str_tag = str_tag.decode() if isinstance(str_tag, bytes) else str(str_tag)
event_writer._add_histogram(
tag=str_tag,
step=int(a_step.numpy()) if not isinstance(a_step, int) else a_step,
hist_data=a_value.numpy()
)
except Exception as a_ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(
'_report_summary_op: {}'.format(str(a_ex)))
# this is a mix of eager and graph execution
try:
from tensorflow.python.eager import context as _context
if not _context.executing_eagerly():
from tensorflow import py_function
# just creating the operator is enough (for some reason)
# to make sure it is added into the execution tree.
# the operator itself, will do the reporting to the backend
py_function(
_report_summary_op,
inp=[writer, step, tag, values, name], Tout=[])
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return PatchTensorFlowEager.__original_fn_hist(writer, step, tag, values, name, **kwargs)
@staticmethod
def _write_image_summary(writer, step, tag, tensor, bad_color, max_images, name, **kwargs):
event_writer = PatchTensorFlowEager._get_event_writer(writer)
if event_writer and isinstance(step, int) or hasattr(step, 'numpy'):
try:
PatchTensorFlowEager._add_image_event_helper(event_writer, img_data_np=tensor.numpy(),
tag=tag, step=step, **kwargs)
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
elif event_writer:
def _report_summary_op(a_writer, a_step, a_tag, a_tensor, a_bad_color, a_max_images, a_name=None, **_):
if isinstance(a_step, int) or hasattr(a_step, 'numpy'):
try:
str_tag = a_tag.numpy()
str_tag = str_tag.decode() if isinstance(str_tag, bytes) else str(str_tag)
PatchTensorFlowEager._add_image_event_helper(
event_writer, img_data_np=a_tensor.numpy(),
tag=str_tag, step=a_step, **kwargs)
except Exception as a_ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(
'_report_summary_op: {}'.format(str(a_ex)))
# this is a mix of eager and graph execution
try:
from tensorflow.python.eager import context as _context
if not _context.executing_eagerly():
from tensorflow import py_function
# just creating the operator is enough (for some reason)
# to make sure it is added into the execution tree.
# the operator itself, will do the reporting to the backend
py_function(
_report_summary_op,
inp=[writer, step, tag, tensor, bad_color, max_images, name], Tout=[])
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return PatchTensorFlowEager.__original_fn_image(
writer, step, tag, tensor, bad_color, max_images, name, **kwargs)
@staticmethod
def _add_image_event_helper(event_writer, img_data_np, tag, step, **kwargs):
if img_data_np.ndim == 1 and img_data_np.size >= 3 and \
(len(img_data_np[0]) < 10 and len(img_data_np[1]) < 10):
# this is just for making sure these are actually valid numbers
width = int(img_data_np[0].decode()) # noqa: F841
height = int(img_data_np[1].decode()) # noqa: F841
for i in range(2, img_data_np.size):
img_data = {'width': None, 'height': None,
'colorspace': 'RGB', 'encodedImageString': img_data_np[i]}
image_tag = str(tag) + '/sample_{}'.format(i - 2) if img_data_np.size > 3 else str(tag)
event_writer._add_image(tag=image_tag,
step=int(step.numpy()) if not isinstance(step, int) else step,
img_data=img_data)
else:
event_writer._add_image_numpy(tag=str(tag),
step=int(step.numpy()) if not isinstance(step, int) else step,
img_data_np=img_data_np,
max_keep_images=kwargs.get('max_images'))
@staticmethod
def _nothing_op(*_, **__):
"""Convenient else branch for when summaries do not record."""
from tensorflow.python.framework import constant_op
return constant_op.constant(False)
# noinspection PyPep8Naming,SpellCheckingInspection
class PatchKerasModelIO(object):
__main_task = None
__patched_keras = None
__patched_tensorflow = None
@staticmethod
def update_current_task(task, **_):
PatchKerasModelIO.__main_task = task
PatchKerasModelIO._patch_model_checkpoint()
PostImportHookPatching.add_on_import('tensorflow', PatchKerasModelIO._patch_model_checkpoint)
PostImportHookPatching.add_on_import('keras', PatchKerasModelIO._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint():
if 'keras' in sys.modules and not PatchKerasModelIO.__patched_keras:
try:
from keras.engine.network import Network # noqa
except ImportError:
Network = None
try:
from keras.engine.functional import Functional # noqa
except ImportError:
Functional = None
try:
from keras.engine.sequential import Sequential # noqa
except ImportError:
Sequential = None
try:
from keras import models as keras_saving # noqa
except ImportError:
keras_saving = None
# check that we are not patching anything twice
if PatchKerasModelIO.__patched_tensorflow:
PatchKerasModelIO.__patched_keras = [
Network if PatchKerasModelIO.__patched_tensorflow[0] != Network else None,
Sequential if PatchKerasModelIO.__patched_tensorflow[1] != Sequential else None,
keras_saving if PatchKerasModelIO.__patched_tensorflow[2] != keras_saving else None,
Functional if PatchKerasModelIO.__patched_tensorflow[3] != Functional else None,
None,
None,
]
else:
PatchKerasModelIO.__patched_keras = [Network, Sequential, keras_saving, Functional, None, None]
PatchKerasModelIO._patch_io_calls(*PatchKerasModelIO.__patched_keras)
if 'tensorflow' in sys.modules and not PatchKerasModelIO.__patched_tensorflow:
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras.engine.network import Network # noqa
except ImportError:
Network = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras.engine.functional import Functional # noqa
except ImportError:
Functional = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras.engine.sequential import Sequential # noqa
except ImportError:
Sequential = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras import models as keras_saving_legacy # noqa
except ImportError:
keras_saving_legacy = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.keras import models as keras_saving # noqa
except ImportError:
keras_saving = None
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.keras.saving import hdf5_format as keras_hdf5 # noqa
except ImportError:
keras_hdf5 = None
if PatchKerasModelIO.__patched_keras:
PatchKerasModelIO.__patched_tensorflow = [
Network if PatchKerasModelIO.__patched_keras[0] != Network else None,
Sequential if PatchKerasModelIO.__patched_keras[1] != Sequential else None,
keras_saving if PatchKerasModelIO.__patched_keras[2] != keras_saving else None,
Functional if PatchKerasModelIO.__patched_keras[3] != Functional else None,
keras_saving_legacy if PatchKerasModelIO.__patched_keras[4] != keras_saving_legacy else None,
keras_hdf5 if PatchKerasModelIO.__patched_keras[5] != keras_hdf5 else None,
]
else:
PatchKerasModelIO.__patched_tensorflow = [
Network, Sequential, keras_saving, Functional, keras_saving_legacy, keras_hdf5]
PatchKerasModelIO._patch_io_calls(*PatchKerasModelIO.__patched_tensorflow)
@staticmethod
def _patch_io_calls(Network, Sequential, keras_saving, Functional, keras_saving_legacy=None, keras_hdf5=None):
try:
if Sequential is not None:
Sequential._updated_config = _patched_call(Sequential._updated_config,
PatchKerasModelIO._updated_config)
if hasattr(Sequential.from_config, '__func__'):
# noinspection PyUnresolvedReferences
Sequential.from_config = classmethod(_patched_call(Sequential.from_config.__func__,
PatchKerasModelIO._from_config))
else:
Sequential.from_config = _patched_call(Sequential.from_config, PatchKerasModelIO._from_config)
if Network is not None:
Network._updated_config = _patched_call(Network._updated_config, PatchKerasModelIO._updated_config)
if hasattr(Sequential.from_config, '__func__'):
# noinspection PyUnresolvedReferences
Network.from_config = classmethod(_patched_call(Network.from_config.__func__,
PatchKerasModelIO._from_config))
else:
Network.from_config = _patched_call(Network.from_config, PatchKerasModelIO._from_config)
Network.save = _patched_call(Network.save, PatchKerasModelIO._save)
Network.save_weights = _patched_call(Network.save_weights, PatchKerasModelIO._save_weights)
Network.load_weights = _patched_call(Network.load_weights, PatchKerasModelIO._load_weights)
elif Functional is not None:
Functional._updated_config = _patched_call(
Functional._updated_config, PatchKerasModelIO._updated_config)
if hasattr(Sequential.from_config, '__func__'):
# noinspection PyUnresolvedReferences
Functional.from_config = classmethod(_patched_call(Functional.from_config.__func__,
PatchKerasModelIO._from_config))
else:
Functional.from_config = _patched_call(Functional.from_config, PatchKerasModelIO._from_config)
Functional.save = _patched_call(Functional.save, PatchKerasModelIO._save)
Functional.save_weights = _patched_call(Functional.save_weights, PatchKerasModelIO._save_weights)
Functional.load_weights = _patched_call(Functional.load_weights, PatchKerasModelIO._load_weights)
if keras_saving is not None:
keras_saving.save_model = _patched_call(keras_saving.save_model, PatchKerasModelIO._save_model)
keras_saving.load_model = _patched_call(keras_saving.load_model, PatchKerasModelIO._load_model)
if keras_saving_legacy is not None:
keras_saving_legacy.save_model = _patched_call(
keras_saving_legacy.save_model, PatchKerasModelIO._save_model)
keras_saving_legacy.load_model = _patched_call(
keras_saving_legacy.load_model, PatchKerasModelIO._load_model)
if keras_hdf5 is not None:
keras_hdf5.save_weights_to_hdf5_group = _patched_call(
keras_hdf5.save_weights_to_hdf5_group, PatchKerasModelIO._save_weights)
keras_hdf5.load_weights_from_hdf5_group = _patched_call(
keras_hdf5.load_weights_from_hdf5_group, PatchKerasModelIO._load_weights)
keras_hdf5.load_weights_from_hdf5_group_by_name = _patched_call(
keras_hdf5.load_weights_from_hdf5_group_by_name, PatchKerasModelIO._load_weights)
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
@staticmethod
def _updated_config(original_fn, self):
config = original_fn(self)
# check if we have main task
if PatchKerasModelIO.__main_task is None:
return config
try:
# there is no actual file, so we create the OutputModel without one
# check if object already has InputModel
if not hasattr(self, 'trains_out_model'):
self.trains_out_model = []
# check if object already has InputModel
model_name_id = config.get('name', getattr(self, 'name', 'unknown'))
if self.trains_out_model:
self.trains_out_model[-1].config_dict = config
else:
# todo: support multiple models for the same task
self.trains_out_model.append(OutputModel(
task=PatchKerasModelIO.__main_task,
config_dict=config,
name=PatchKerasModelIO.__main_task.name + ' ' + model_name_id,
label_enumeration=PatchKerasModelIO.__main_task.get_labels_enumeration(),
framework=Framework.keras,
))
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return config
@staticmethod
def _from_config(original_fn, *args, **kwargs):
try:
self = original_fn(*args, **kwargs)
except Exception as ex:
if not running_remotely():
raise ex
self = _Empty()
# check if we have main task
if PatchKerasModelIO.__main_task is None:
return self
try:
# check if object already has InputModel
if not hasattr(self, 'trains_in_model'):
self.trains_in_model = None
# get config
config_dict = kwargs['config'] if 'config' in kwargs else args[0]
# check if object already has InputModel
self.trains_in_model = InputModel.empty(
config_dict=config_dict,
label_enumeration=PatchKerasModelIO.__main_task.get_labels_enumeration(),
)
# todo: support multiple models for the same task
PatchKerasModelIO.__main_task.connect(self.trains_in_model)
# if we are running remotely we should deserialize the object
# because someone might have changed the configuration
# Hack: disabled
if False and running_remotely():
# reload the model
model_config = self.trains_in_model.config_dict
# verify that this is the same model so we are not deserializing a diff model
if (config_dict and config_dict.get('config') and model_config and model_config.get('config') and
config_dict.get('config').get('name') == model_config.get('config').get('name')) or \
(not config_dict and not model_config):
if 'config' in kwargs:
kwargs['config'] = model_config
else:
args = (model_config,) + args[1:]
model = original_fn(*args, **kwargs)
model.trains_in_model = self.trains_in_model
return model
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
return self
@staticmethod
def _load_weights(original_fn, self, *args, **kwargs):
# check if we have main task
if PatchKerasModelIO.__main_task is None:
return original_fn(self, *args, **kwargs)
# get filepath
if self and getattr(self, 'filename', None):
filepath = getattr(self, 'filename', None)
else:
filepath = kwargs['filepath'] if 'filepath' in kwargs else args[0]
# Hack: disabled
if False and running_remotely():
# register/load model weights
filepath = WeightsFileHandler.restore_weights_file(self, filepath, Framework.keras,
PatchKerasModelIO.__main_task)
if 'filepath' in kwargs:
kwargs['filepath'] = filepath
else:
args = (filepath,) + args[1:]
# load model
return original_fn(self, *args, **kwargs)
# try to load the files, if something happened exception will be raised before we register the file
model = original_fn(self, *args, **kwargs)
# register/load model weights
WeightsFileHandler.restore_weights_file(self, filepath, Framework.keras, PatchKerasModelIO.__main_task)
return model
@staticmethod
def _save(original_fn, self, *args, **kwargs):
if hasattr(self, 'trains_out_model') and self.trains_out_model:
# noinspection PyProtectedMember
self.trains_out_model[-1]._processed = False
original_fn(self, *args, **kwargs)
# no need to specially call, because the original save uses "save_model" which we overload
# noinspection PyProtectedMember
if not hasattr(self, 'trains_out_model') or not self.trains_out_model or \
not hasattr(self.trains_out_model[-1], '_processed') or not self.trains_out_model[-1]._processed:
PatchKerasModelIO._update_outputmodel(self, *args, **kwargs)
@staticmethod
def _save_weights(original_fn, self, *args, **kwargs):
original_fn(self, *args, **kwargs)
PatchKerasModelIO._update_outputmodel(self, *args, **kwargs)
@staticmethod
def _update_outputmodel(self, *args, **kwargs):
# check if we have main task
if PatchKerasModelIO.__main_task is None:
return
try:
# get filepath
if self and getattr(self, 'filename', None):
filepath = getattr(self, 'filename', None)
else:
filepath = kwargs['filepath'] if 'filepath' in kwargs else args[0]
# this will already generate an output model
# noinspection PyBroadException
try:
config = self._updated_config()
except Exception:
# we failed to convert the network to json, for some reason (most likely internal keras error)
config = {}
if filepath:
WeightsFileHandler.create_output_model(
self, filepath, Framework.keras, PatchKerasModelIO.__main_task,
config_obj=config or None, singlefile=True)
except Exception as ex:
LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex))
@staticmethod
def _save_model(original_fn, model, filepath, *args, **kwargs):
original_fn(model, filepath, *args, **kwargs)
if PatchKerasModelIO.__main_task:
PatchKerasModelIO._update_outputmodel(model, filepath)
@staticmethod
def _load_model(original_fn, filepath, *args, **kwargs):
if not PatchKerasModelIO.__main_task:
return original_fn(filepath, *args, **kwargs)
empty = _Empty()
# Hack: disabled
if False and running_remotely():
# register/load model weights
filepath = WeightsFileHandler.restore_weights_file(empty, filepath, Framework.keras,
PatchKerasModelIO.__main_task)
model = original_fn(filepath, *args, **kwargs)
else:
model = original_fn(filepath, *args, **kwargs)
# register/load model weights
WeightsFileHandler.restore_weights_file(empty, filepath, Framework.keras, PatchKerasModelIO.__main_task)
# update the input model object
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
class PatchTensorflowModelIO(object):
__main_task = None
__patched = None
@staticmethod
def update_current_task(task, **_):
PatchTensorflowModelIO.__main_task = task
PatchTensorflowModelIO._patch_model_checkpoint()
PostImportHookPatching.add_on_import('tensorflow', PatchTensorflowModelIO._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint():
if PatchTensorflowModelIO.__patched:
return
if 'tensorflow' not in sys.modules:
return
PatchTensorflowModelIO.__patched = True
# noinspection PyBroadException
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
# noinspection PyUnresolvedReferences
from tensorflow.python.training.saver import Saver # noqa
# noinspection PyBroadException
try:
Saver.save = _patched_call(Saver.save, PatchTensorflowModelIO._save)
except Exception:
pass
# noinspection PyBroadException
try:
Saver.restore = _patched_call(Saver.restore, PatchTensorflowModelIO._restore)
except Exception:
pass
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
from tensorflow.saved_model import save # noqa
# actual import
from tensorflow.python.saved_model import save as saved_model # noqa
except ImportError:
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
from tensorflow.saved_model.experimental import save # noqa
# actual import
import tensorflow.saved_model.experimental as saved_model # noqa
except ImportError:
saved_model = None
except Exception:
saved_model = None
except Exception:
saved_model = None
if saved_model is not None:
saved_model.save = _patched_call(saved_model.save, PatchTensorflowModelIO._save_model)
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
# actual import
from tensorflow.saved_model import load # noqa
# noinspection PyUnresolvedReferences
import tensorflow.saved_model as saved_model_load # noqa
saved_model_load.load = _patched_call(saved_model_load.load, PatchTensorflowModelIO._load)
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
# actual import
# noinspection PyUnresolvedReferences
from tensorflow.saved_model import loader as loader1 # noqa
loader1.load = _patched_call(loader1.load, PatchTensorflowModelIO._load)
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
# noinspection PyBroadException
try:
# make sure we import the correct version of save
import tensorflow # noqa
# actual import
# noinspection PyUnresolvedReferences
from tensorflow.compat.v1.saved_model import loader as loader2 # noqa
loader2.load = _patched_call(loader2.load, PatchTensorflowModelIO._load)
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
# noinspection PyBroadException
try:
import tensorflow # noqa
from tensorflow.train import Checkpoint # noqa
# noinspection PyBroadException
try:
Checkpoint.save = _patched_call(Checkpoint.save, PatchTensorflowModelIO._ckpt_save)
except Exception:
pass
# noinspection PyBroadException
try:
Checkpoint.restore = _patched_call(Checkpoint.restore, PatchTensorflowModelIO._ckpt_restore)
except Exception:
pass
# noinspection PyBroadException
try:
Checkpoint.write = _patched_call(Checkpoint.write, PatchTensorflowModelIO._ckpt_write)
except Exception:
pass
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow')
@staticmethod
def _save(original_fn, self, sess, save_path, *args, **kwargs):
saved_path = original_fn(self, sess, save_path, *args, **kwargs)
if not saved_path:
return saved_path
# store output Model
return WeightsFileHandler.create_output_model(self, saved_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
@staticmethod
def _save_model(original_fn, obj, export_dir, *args, **kwargs):
original_fn(obj, export_dir, *args, **kwargs)
# store output Model
WeightsFileHandler.create_output_model(obj, export_dir, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
@staticmethod
def _restore(original_fn, self, sess, save_path, *args, **kwargs):
if PatchTensorflowModelIO.__main_task is None:
return original_fn(self, sess, save_path, *args, **kwargs)
# Hack: disabled
if False and running_remotely():
# register/load model weights
save_path = WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
# load model
return original_fn(self, sess, save_path, *args, **kwargs)
# load model, if something is wrong, exception will be raised before we register the input model
model = original_fn(self, sess, save_path, *args, **kwargs)
# register/load model weights
WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
return model
@staticmethod
def _load(original_fn, sess, tags, export_dir, *args, **saver_kwargs):
if PatchTensorflowModelIO.__main_task is None:
return original_fn(sess, tags, export_dir, *args, **saver_kwargs)
# register input model
empty = _Empty()
# Hack: disabled
if False and running_remotely():
export_dir = WeightsFileHandler.restore_weights_file(empty, export_dir, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
model = original_fn(sess, tags, export_dir, *args, **saver_kwargs)
else:
# try to load model before registering, it might fail
model = original_fn(sess, tags, export_dir, *args, **saver_kwargs)
WeightsFileHandler.restore_weights_file(empty, export_dir, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
@staticmethod
def _ckpt_save(original_fn, self, file_prefix, *args, **kwargs):
checkpoint_path = original_fn(self, file_prefix, *args, **kwargs)
if PatchTensorflowModelIO.__main_task is None:
return checkpoint_path
WeightsFileHandler.create_output_model(self, checkpoint_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
return checkpoint_path
@staticmethod
def _ckpt_write(original_fn, self, file_prefix, *args, **kwargs):
checkpoint_path = original_fn(self, file_prefix, *args, **kwargs)
if PatchTensorflowModelIO.__main_task is None:
return checkpoint_path
WeightsFileHandler.create_output_model(self, checkpoint_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
return checkpoint_path
@staticmethod
def _ckpt_restore(original_fn, self, save_path, *args, **kwargs):
if PatchTensorflowModelIO.__main_task is None:
return original_fn(self, save_path, *args, **kwargs)
# register input model
empty = _Empty()
# Hack: disabled
if False and running_remotely():
save_path = WeightsFileHandler.restore_weights_file(empty, save_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
model = original_fn(self, save_path, *args, **kwargs)
else:
# try to load model before registering it, in case it fails.
model = original_fn(self, save_path, *args, **kwargs)
WeightsFileHandler.restore_weights_file(empty, save_path, Framework.tensorflow,
PatchTensorflowModelIO.__main_task)
if empty.trains_in_model:
# noinspection PyBroadException
try:
model.trains_in_model = empty.trains_in_model
except Exception:
pass
return model
class PatchTensorflow2ModelIO(object):
__main_task = None
__patched = None
@staticmethod
def update_current_task(task, **_):
PatchTensorflow2ModelIO.__main_task = task
PatchTensorflow2ModelIO._patch_model_checkpoint()
PostImportHookPatching.add_on_import('tensorflow', PatchTensorflow2ModelIO._patch_model_checkpoint)
@staticmethod
def _patch_model_checkpoint():
if PatchTensorflow2ModelIO.__patched:
return
if 'tensorflow' not in sys.modules:
return
PatchTensorflow2ModelIO.__patched = True
# noinspection PyBroadException
try:
# hack: make sure tensorflow.__init__ is called
import tensorflow # noqa
from tensorflow.python.training.tracking import util # noqa
# noinspection PyBroadException
try:
util.TrackableSaver.save = _patched_call(util.TrackableSaver.save,
PatchTensorflow2ModelIO._save)
except Exception:
pass
# noinspection PyBroadException
try:
util.TrackableSaver.restore = _patched_call(util.TrackableSaver.restore,
PatchTensorflow2ModelIO._restore)
except Exception:
pass
except ImportError:
pass
except Exception:
LoggerRoot.get_base_logger(TensorflowBinding).debug('Failed patching tensorflow v2')
@staticmethod
def _save(original_fn, self, file_prefix, *args, **kwargs):
model = original_fn(self, file_prefix, *args, **kwargs)
# store output Model
# noinspection PyBroadException
try:
WeightsFileHandler.create_output_model(self, file_prefix, Framework.tensorflow,
PatchTensorflow2ModelIO.__main_task)
except Exception:
pass
return model
@staticmethod
def _restore(original_fn, self, save_path, *args, **kwargs):
if PatchTensorflow2ModelIO.__main_task is None:
return original_fn(self, save_path, *args, **kwargs)
# Hack: disabled
if False and running_remotely():
# register/load model weights
# noinspection PyBroadException
try:
save_path = WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow,
PatchTensorflow2ModelIO.__main_task)
except Exception:
pass
# load model
return original_fn(self, save_path, *args, **kwargs)
# load model, if something is wrong, exception will be raised before we register the input model
model = original_fn(self, save_path, *args, **kwargs)
# register/load model weights
# noinspection PyBroadException
try:
WeightsFileHandler.restore_weights_file(self, save_path, Framework.tensorflow,
PatchTensorflow2ModelIO.__main_task)
except Exception:
pass
return model
|
[
"tensorflow.python.framework.constant_op.constant",
"base64.b64decode",
"collections.defaultdict",
"numpy.histogram",
"numpy.arange",
"numpy.interp",
"google.protobuf.json_format.MessageToDict",
"numpy.atleast_2d",
"tensorflow.py_function",
"numpy.zeros_like",
"tensorflow.python.ops.summary_ops_v2._summary_state.writer._init_op_fn.keywords.get",
"numpy.append",
"tensorflow.python.eager.context.executing_eagerly",
"numpy.random.shuffle",
"tensorflow.python.eager.context.context",
"functools.partial",
"io.BytesIO",
"threading.RLock",
"numpy.asarray",
"numpy.isinf",
"numpy.concatenate",
"re.compile",
"numpy.vstack",
"numpy.atleast_3d",
"PIL.Image.open",
"six.StringIO",
"numpy.array",
"mimetypes.guess_extension",
"numpy.sqrt"
] |
[((9151, 9168), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (9166, 9168), False, 'import threading\n'), ((2812, 2844), 'numpy.random.shuffle', 'np.random.shuffle', (['cur_idx_below'], {}), '(cur_idx_below)\n', (2829, 2844), True, 'import numpy as np\n'), ((5990, 6017), 'numpy.append', 'np.append', (['hist_iters', 'step'], {}), '(hist_iters, step)\n', (5999, 6017), True, 'import numpy as np\n'), ((16393, 16417), 'collections.defaultdict', 'defaultdict', (['(lambda : ())'], {}), '(lambda : ())\n', (16404, 16417), False, 'from collections import defaultdict\n'), ((26576, 26595), 'io.BytesIO', 'BytesIO', (['audio_data'], {}), '(audio_data)\n', (26583, 26595), False, 'from io import BytesIO\n'), ((66940, 66967), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(False)'], {}), '(False)\n', (66960, 66967), False, 'from tensorflow.python.framework import constant_op\n'), ((3001, 3056), 'numpy.arange', 'np.arange', (['(_histogram_granularity - 1)', '_hist_iters.size'], {}), '(_histogram_granularity - 1, _hist_iters.size)\n', (3010, 3056), True, 'import numpy as np\n'), ((3069, 3101), 'numpy.random.shuffle', 'np.random.shuffle', (['cur_idx_above'], {}), '(cur_idx_above)\n', (3086, 3101), True, 'import numpy as np\n'), ((3238, 3250), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3246, 3250), True, 'import numpy as np\n'), ((7874, 7930), 'numpy.interp', 'np.interp', (['prev_xedge', 'h[:, 0]', 'h[:, 1]'], {'right': '(0)', 'left': '(0)'}), '(prev_xedge, h[:, 0], h[:, 1], right=0, left=0)\n', (7883, 7930), True, 'import numpy as np\n'), ((17214, 17229), 'io.BytesIO', 'BytesIO', (['imdata'], {}), '(imdata)\n', (17221, 17229), False, 'from io import BytesIO\n'), ((17247, 17265), 'PIL.Image.open', 'Image.open', (['output'], {}), '(output)\n', (17257, 17265), False, 'from PIL import Image\n'), ((17286, 17300), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (17296, 17300), True, 'import numpy as np\n'), ((26091, 26118), 'base64.b64decode', 'base64.b64decode', (['audio_str'], {}), '(audio_str)\n', (26107, 26118), False, 'import base64\n'), ((29789, 29809), 'google.protobuf.json_format.MessageToDict', 'MessageToDict', (['event'], {}), '(event)\n', (29802, 29809), False, 'from google.protobuf.json_format import MessageToDict\n'), ((34682, 34708), 're.compile', '_re.compile', (['"""[^-/\\\\w\\\\.]"""'], {}), "('[^-/\\\\w\\\\.]')\n", (34693, 34708), True, 'import re as _re\n'), ((4437, 4469), 'numpy.histogram', 'np.histogram', (['hist_data'], {'bins': '(32)'}), '(hist_data, bins=32)\n', (4449, 4469), True, 'import numpy as np\n'), ((5432, 5444), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5440, 5444), True, 'import numpy as np\n'), ((17167, 17192), 'base64.b64decode', 'base64.b64decode', (['img_str'], {}), '(img_str)\n', (17183, 17192), False, 'import base64\n'), ((19474, 19490), 'numpy.sqrt', 'np.sqrt', (['dims[0]'], {}), '(dims[0])\n', (19481, 19490), True, 'import numpy as np\n'), ((24899, 24950), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', 'width'], {'dtype': 'plot_values.dtype'}), '(0.0, 1.0, width, dtype=plot_values.dtype)\n', (24908, 24950), True, 'import numpy as np\n'), ((26644, 26682), 'mimetypes.guess_extension', 'guess_extension', (["values['contentType']"], {}), "(values['contentType'])\n", (26659, 26682), False, 'from mimetypes import guess_extension\n'), ((27717, 27735), 'six.StringIO', 'six.StringIO', (['text'], {}), '(text)\n', (27729, 27735), False, 'import six\n'), ((53950, 54045), 'functools.partial', 'partial', (['IsTensorboardInit._patched_tb__init__', 'gen_summary_ops.create_summary_file_writer'], {}), '(IsTensorboardInit._patched_tb__init__, gen_summary_ops.\n create_summary_file_writer)\n', (53957, 54045), False, 'from functools import partial\n'), ((54169, 54262), 'functools.partial', 'partial', (['IsTensorboardInit._patched_tb__init__', 'gen_summary_ops.create_summary_db_writer'], {}), '(IsTensorboardInit._patched_tb__init__, gen_summary_ops.\n create_summary_db_writer)\n', (54176, 54262), False, 'from functools import partial\n'), ((6305, 6325), 'numpy.isinf', 'np.isinf', (['hist[:, 0]'], {}), '(hist[:, 0])\n', (6313, 6325), True, 'import numpy as np\n'), ((8161, 8187), 'numpy.zeros_like', 'np.zeros_like', (['report_hist'], {}), '(report_hist)\n', (8174, 8187), True, 'import numpy as np\n'), ((3288, 3334), 'numpy.concatenate', 'np.concatenate', (['(cur_idx_below, cur_idx_above)'], {}), '((cur_idx_below, cur_idx_above))\n', (3302, 3334), True, 'import numpy as np\n'), ((17905, 17923), 'numpy.atleast_3d', 'np.atleast_3d', (['val'], {}), '(val)\n', (17918, 17923), True, 'import numpy as np\n'), ((60249, 60277), 'tensorflow.python.eager.context.executing_eagerly', '_context.executing_eagerly', ([], {}), '()\n', (60275, 60277), True, 'from tensorflow.python.eager import context as _context\n'), ((60583, 60661), 'tensorflow.py_function', 'py_function', (['_report_summary_op'], {'inp': '[writer, step, tag, value, name]', 'Tout': '[]'}), '(_report_summary_op, inp=[writer, step, tag, value, name], Tout=[])\n', (60594, 60661), False, 'from tensorflow import py_function\n'), ((62498, 62526), 'tensorflow.python.eager.context.executing_eagerly', '_context.executing_eagerly', ([], {}), '()\n', (62524, 62526), True, 'from tensorflow.python.eager import context as _context\n'), ((62832, 62911), 'tensorflow.py_function', 'py_function', (['_report_summary_op'], {'inp': '[writer, step, tag, values, name]', 'Tout': '[]'}), '(_report_summary_op, inp=[writer, step, tag, values, name], Tout=[])\n', (62843, 62911), False, 'from tensorflow import py_function\n'), ((64724, 64752), 'tensorflow.python.eager.context.executing_eagerly', '_context.executing_eagerly', ([], {}), '()\n', (64750, 64752), True, 'from tensorflow.python.eager import context as _context\n'), ((65058, 65164), 'tensorflow.py_function', 'py_function', (['_report_summary_op'], {'inp': '[writer, step, tag, tensor, bad_color, max_images, name]', 'Tout': '[]'}), '(_report_summary_op, inp=[writer, step, tag, tensor, bad_color,\n max_images, name], Tout=[])\n', (65069, 65164), False, 'from tensorflow import py_function\n'), ((3859, 3883), 'numpy.atleast_2d', 'np.atleast_2d', (['hist_data'], {}), '(hist_data)\n', (3872, 3883), True, 'import numpy as np\n'), ((25116, 25161), 'numpy.vstack', 'np.vstack', (['(plot_values[-1], plot_values[-2])'], {}), '((plot_values[-1], plot_values[-2]))\n', (25125, 25161), True, 'import numpy as np\n'), ((55392, 55448), 'tensorflow.python.ops.summary_ops_v2._summary_state.writer._init_op_fn.keywords.get', '_summary_state.writer._init_op_fn.keywords.get', (['"""logdir"""'], {}), "('logdir')\n", (55438, 55448), False, 'from tensorflow.python.ops.summary_ops_v2 import _summary_state\n'), ((55084, 55101), 'tensorflow.python.eager.context.context', 'context.context', ([], {}), '()\n', (55099, 55101), False, 'from tensorflow.python.eager import context\n')]
|
import pandas as pd
import hoki.hrdiagrams
import hoki.cmd
import hoki.load as load
from hoki.constants import BPASS_TIME_BINS
import warnings
from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning
from hoki.utils.hoki_object import HokiObject
from hoki.utils.hoki_dialogue import HokiDialogue
import numpy as np
import warnings
Dialogue = HokiDialogue()
deprecation='\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' \
'\nDeprecated since hoki v1.6 ' \
'\nPLEASE USE THE hoki.age SUBPACKAGE AND MODULES WITHIN. ' \
'\ne.g. from hoki.age.wizard import AgeWizard' \
'\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n'
warnings.warn(HokiDeprecationWarning(deprecation))
class AgeWizard(HokiObject):
"""
AgeWizard object
"""
def __init__(self, obs_df, model):
"""
Initialisation of the AgeWizard object
Parameters
----------
obs_df: pandas.DataFrame
Observational data. MUST contain a logT and logL column (for HRD comparison) or a col and mag column
(for CMD comparison)
model: str or hoki.hrdiagrams.HRDiagrams() hoki.cmd.CMD()
Location of the modeled HRD or CMD. This can be an already instanciated HRDiagram or CMD() object, or a
path to an HR Diagram file or a pickled CMD.
"""
# Making sure the osbervational properties are given in a format we can use.
if not isinstance(obs_df, pd.DataFrame):
raise HokiFormatError("Observations should be stored in a Data Frame")
if 'name' not in obs_df.columns:
warnings.warn("We expect the name of sources to be given in the 'name' column. "
"If I can't find names I'll make my own ;)", HokiFormatWarning)
# Checking what format they giving for the model:
if isinstance(model, hoki.hrdiagrams.HRDiagram):
self.model = model
elif isinstance(model, hoki.cmd.CMD):
self.model = model
elif isinstance(model, str) and 'hrs' in model:
self.model = load.model_output(model, hr_type='TL')
elif isinstance(model, str):
try:
self.model = load.unpickle(path=model)
except AssertionError:
print('-----------------')
print(
'HOKI DEBUGGER:\nThe model param should be a path to \na BPASS HRDiagram output file or pickled CMD,'
'or\na hoki.hrdiagrams.HRDiagram or a hoki.cmd.CMD')
print('-----------------')
raise HokiFatalError('model is ' + str(type(model)))
else:
print('-----------------')
print('HOKI DEBUGGER:\nThe model param should be a path to \na BPASS HRDiagram output file or pickled CMD,'
'or\na hoki.hrdiagrams.HRDiagram or a hoki.cmd.CMD')
print('-----------------')
raise HokiFatalError('model is ' + str(type(model)))
self.obs_df = obs_df
self.coordinates = find_coordinates(self.obs_df, self.model)
# This line is obsolete but might need revival if we ever want to add the not normalised distributions again
# self._distributions = calculate_distributions_normalised(self.obs_df, self.model)
self.pdfs = calculate_individual_pdfs(self.obs_df, self.model).fillna(0)
self.sources = self.pdfs.columns.to_list()
self.sample_pdf = None
self._most_likely_age = None
def calculate_sample_pdf(self, not_you=None, return_df=False):
self.sample_pdf = calculate_sample_pdf(self.pdfs, not_you=not_you)
if return_df: return self.sample_pdf
@property
def most_likely_age(self):
"""
Finds the most likely age by finding the max value in self.calculate_sample_pdf
"""
if self._most_likely_age is not None: return self._most_likely_age
if self.sample_pdf is None:
warnings.warn('self.multiplied_pdf is not yet defined -- running AgeWizard.combined_pdfs()',
HokiUserWarning)
self.calculate_sample_pdf()
index = self.sample_pdf.index[self.sample_pdf.pdf == max(self.sample_pdf.pdf)].tolist()
return self.t[index]
@property
def most_likely_ages(self):
"""
Finds the most likely ages for all the sources given in the obs_df DataFrame.
"""
# index = self.pdfs.drop('time_bins', axis=1).idxmax(axis=0).tolist()
index = self.pdfs.idxmax(axis=0).tolist()
return self.t[index]
def calculate_p_given_age_range(self, age_range):
"""
Calculates the probability that each source has age within age_range
Parameters
----------
age_range: list or tuple of 2 values
Minimum and Maximum age to consider (inclusive).
Returns
-------
numpy.array containing the probabilities.
"""
# Selects only the rows corresponding to the range age_range[0] to age_range[1] (inclusive)
# and then we sum the probabilities up for each column.
probability = calculate_p_given_age_range(self.pdfs, age_range)
return probability
def find_coordinates(obs_df, model):
"""
Finds the coordinates on a BPASS CMD or HRD that correspond to the given observations
Parameters
----------
obs_df: pandas.DataFrame
Observational data. MUST contain a logT and logL column (for HRD comparison) or a col and mag column
(for CMD comparison)
model: str or hoki.hrdiagrams.HRDiagrams() hoki.cmd.CMD()
Location of the modeled HRD or CMD. This can be an already instanciated HRDiagram or CMD() object, or a
path to an HR Diagram file or a pickled CMD.
Returns
-------
"""
if isinstance(model, hoki.hrdiagrams.HRDiagram):
return _find_hrd_coordinates(obs_df, model)
elif isinstance(model, hoki.cmd.CMD):
return _find_cmd_coordinates(obs_df, model)
else:
raise HokiFormatError("The model should be an instance of hoki.hrdiagrams.HRDiagrams or hoki.cmd.CMD")
def _find_hrd_coordinates(obs_df, myhrd):
"""
Find the BPASS HRD coordinates that match the given observations
Parameters
----------
obs_df: pandas.DataFrame
Observational data. MUST contain a logT and logL column.
myhrd: hoki.hrdiagrams.HRDiagrams
BPASS HRDiagram
Returns
-------
Tuple of lists:(logT coordinates, logL coordinates)
"""
if not isinstance(obs_df, pd.DataFrame):
raise HokiFormatError("obs_df should be a pandas.DataFrame")
if not isinstance(myhrd, hoki.hrdiagrams.HRDiagram):
raise HokiFormatError("model should be an instance of hoki.hrdiagrams.HRDiagrams")
# List if indices that located the HRD location that most closely matches observations
L_i = []
T_i = []
try:
logT, logL = obs_df.logT, obs_df.logL
except AttributeError:
raise HokiFormatError("obs_df should have a logT and a logL column")
# How this works:
# abs(model.L_coord-L)==abs(model.L_coord-L).min() *finds* the HRD location that most closely corresponds to obs.
# np.where(....)[0] *finds* the index of that location (which was originally in L or T space)
# int( ....) is juuust to make sure we get an integer because Python is a motherfucker and adds s.f. for no reason
# Then we append that index to our list.
for T, L in zip(logT, logL):
try:
T = float(T)
# Finds the index that is at the minimum distance in Temperature space and adds it to the list
T_i.append(int((np.where(abs(myhrd.T_coord - T) == abs(myhrd.T_coord - T).min()))[0]))
except TypeError:
T_i.append(int((np.where(abs(myhrd.T_coord - T) == abs(myhrd.T_coord - T).min()))[0][0]))
except ValueError:
warnings.warn("T=" + str(T) + " cannot be converted to a float", HokiUserWarning)
T_i.append(np.nan)
try:
L = float(L)
# Finds the index that is at the minimum distance in Luminosity space and adds it to the list
L_i.append(int((np.where(abs(myhrd.L_coord - L) == abs(myhrd.L_coord - L).min()))[0]))
except TypeError:
L_i.append(int((np.where(abs(myhrd.L_coord - L) == abs(myhrd.L_coord - L).min()))[0][0]))
except ValueError:
warnings.warn("L=" + str(L) + " cannot be converted to a float", HokiUserWarning)
L_i.append(np.nan)
return T_i, L_i
def _find_cmd_coordinates(obs_df, mycmd):
"""
Find the BPASS HRD coordinates that match the given observations
Parameters
----------
obs_df: pandas.DataFrame
Observational data. MUST contain a col and mag column.
mycmd: hoki.cmd.CMD
BPASS CMD
Returns
-------
Tuple of lists:(colour coordinates, magnitude coordinates)
"""
if not isinstance(obs_df, pd.DataFrame):
raise HokiFormatError("obs_df should be a pandas.DataFrame")
if not isinstance(mycmd, hoki.cmd.CMD):
raise HokiFormatError("cmd should be an instance of hoki.cmd.CMD")
# List if indices that located the HRD location that most closely matches observations
col_i = []
mag_i = []
try:
colours, magnitudes = obs_df.col, obs_df.mag
except AttributeError:
raise HokiFormatError("obs_df should have a logT and a logL column")
# How this works:
# abs(model.L_coord-L)==abs(model.L_coord-L).min() *finds* the HRD location that most closely corresponds to obs.
# np.where(....)[0] *finds* the index
# of that location (which was originally in L or T space)
# int( ....) is juuust to make sure we get an integer because Python is a motherfucker and adds s.f. for no reason
# Then we append that index to our list.
for col, mag in zip(colours, magnitudes):
try:
col = float(col)
# Finds the index that is at the minimum distance in Colour space and adds it to the list
col_i.append(int((np.where(abs(mycmd.col_range - col) == abs(mycmd.col_range - col).min()))[0]))
except TypeError:
col_i.append(int((np.where(abs(mycmd.col_range - col) == abs(mycmd.col_range - col).min()))[0][0]))
except ValueError:
warnings.warn("Colour=" + str(col) + " cannot be converted to a float", HokiUserWarning)
col_i.append(np.nan)
try:
mag = float(mag)
# Finds the index that is at the minimum distance in Magnitude space and adds it to the list
mag_i.append(int((np.where(abs(mycmd.mag_range - mag) == abs(mycmd.mag_range - mag).min()))[0]))
except TypeError:
mag_i.append(int((np.where(abs(mycmd.mag_range - mag) == abs(mycmd.mag_range - mag).min()))[0][0]))
except ValueError:
warnings.warn("Magnitude=" + str(mag) + " cannot be converted to a float", HokiUserWarning)
mag_i.append(np.nan)
return col_i, mag_i
def normalise_1d(distribution, crop_the_future=False):
"""
Simple function that devides by the sum of the 1D array or DataFrame given.
"""
if crop_the_future:
distribution = _crop_the_future(distribution)
area = np.sum([bin_t for bin_t in distribution])
return distribution / area
def _crop_the_future(distribution):
# Anything about 10.1 is the future - time bin 42 and above must have proba == 0
array_that_erases_the_future = np.array([1] * 42 + [0] * 9)
return np.array(distribution) * array_that_erases_the_future
def calculate_individual_pdfs(obs_df, model):
"""
Calculates the age pdfs of all the stars in the sample and returns them in a dataframe
Parameters
----------
obs_df: pandas.DataFrame
Dataframe containing the observational data
model: hoki.hrdiagrams.HRDiagrams or hoki.cmd.CMD
BPASS HRDiagram or CMD
Returns
-------
pandas.Dataframe containing the age pdfs of each star
"""
likelihoods = calculate_distributions_normalised(obs_df, model)
pdfs = []
for col in likelihoods.columns:
pdfs.append(normalise_1d(likelihoods[col].values))
return pd.DataFrame(np.array(pdfs).T, columns=likelihoods.columns)
def calculate_distributions(obs_df, model):
"""
Given observations and an HR Diagram, calculates the distribution across ages (not normalised)
Parameters
----------
obs_df: pandas.DataFrame
Observational data. MUST contain a logT and logL column.
model: hoki.hrdiagrams.HRDiagrams or hoki.cmd.CMD
BPASS HRDiagram or CMD
Returns
-------
Age Probability Distribution Functions in a pandas.DataFrame.
"""
# Checking whether it;s HRD or CMD
if isinstance(model, hoki.hrdiagrams.HRDiagram):
x_coord, y_coord = find_coordinates(obs_df, model)
if isinstance(model, hoki.cmd.CMD):
y_coord, x_coord = find_coordinates(obs_df, model) # yeah it's reversed... -_-
# If source names not given we make our own
try:
source_names = obs_df.name
except AttributeError:
warnings.warn("No source names given so I'll make my own", HokiUserWarning)
source_names = ["s" + str(i) for i in range(obs_df.shape[0])]
likelihoods = []
# Time to calcualte the pdfs
for i, name in zip(range(obs_df.shape[0]), source_names):
xi, yi = x_coord[i], y_coord[i] # just saving space
# Here we take care of the possibility that a coordinate is a NaN
if np.isnan(xi) or np.isnan(yi):
warnings.warn("NaN Value encountered in coordinates for source: " + name, HokiUserWarning)
likelihoods.append([0] * 51) # Probability is then 0 at all times - That star doesn't exist in our models
continue
# Here we fill our not-yet-nromalised distribution
distrib_i = []
for model_i in model:
# For each time step i, we retrieve the proba in CMD_i or HRD_i and fill our distribution element distrib_i
# with it. At the end of the for loop we have iterated over all 51 time bins
distrib_i.append(model_i[xi, yi])
# Then we normalise, so that we have proper probability distributions
# pdf_i = normalise_1d(distrib_i)
# finally our pdf is added to the list
likelihoods.append(distrib_i)
# Our list of pdfs (which is a list of lists) is turned into a PDF with the source names as column names
likelihoods_df = pd.DataFrame((np.array(likelihoods)).T, columns=source_names)
# We add the time bins in there because it can make plotting extra convenient.
# distributions_df['time_bins'] = hoki.constants.BPASS_TIME_BINS
return likelihoods_df
def calculate_distributions_normalised(obs_df, model):
"""
Given observations and an HR Diagram, calculates the distribution across ages NORMALISED
Parameters
----------
obs_df: pandas.DataFrame
Observational data. MUST contain a logT and logL column.
model: hoki.hrdiagrams.HRDiagrams or hoki.cmd.CMD
BPASS HRDiagram or CMD
Returns
-------
Age Probability Distribution Functions in a pandas.DataFrame.
"""
# Checking whether it;s HRD or CMD
if isinstance(model, hoki.hrdiagrams.HRDiagram):
x_coord, y_coord = find_coordinates(obs_df, model)
if isinstance(model, hoki.cmd.CMD):
y_coord, x_coord = find_coordinates(obs_df, model) # yeah it's reversed... -_-
# If source names not given we make our own
try:
source_names = obs_df.name
except AttributeError:
warnings.warn("No source names given so I'll make my own", HokiUserWarning)
source_names = ["s" + str(i) for i in range(obs_df.shape[0])]
likelihoods = []
# Time to calcualte the pdfs
for i, name in zip(range(obs_df.shape[0]), source_names):
xi, yi = x_coord[i], y_coord[i] # just saving space
# Here we take care of the possibility that a coordinate is a NaN
if np.isnan(xi) or np.isnan(yi):
warnings.warn("NaN Value encountered in coordinates for source: " + name, HokiUserWarning)
likelihoods.append([0] * 51) # Probability is then 0 at all times - That star doesn't exist in our models
continue
# Here we fill our not-yet-nromalised distribution
distrib_i = []
for model_i in model:
# For each time step i, we retrieve the proba in CMD_i or HRD_i and fill our distribution element distrib_i
# with it. At the end of the for loop we have iterated over all 51 time bins
distrib_i.append(model_i[xi, yi])
# Then we normalise, so that we have proper probability distributions
# pdf_i = normalise_1d(distrib_i)
# finally our pdf is added to the list
likelihoods.append(normalise_1d(distrib_i, crop_the_future=True))
# Our list of pdfs (which is a list of lists) is turned into a PDF with the source names as column names
likelihoods_df = pd.DataFrame((np.array(likelihoods)).T, columns=source_names)
# We add the time bins in there because it can make plotting extra convenient.
# distributions_df['time_bins'] = hoki.constants.BPASS_TIME_BINS
return likelihoods_df
def calculate_sample_pdf(distributions_df, not_you=None):
"""
Adds together all the columns in given in DataFrame apart from the "time_bins" column
Parameters
----------
distributions_df: pandas.DataFrame
DataFrame containing probability distribution functions
not_you: list, optional
List of the column names to ignore. Default is None so all the pdfs are multiplied
Returns
-------
Combined Probability Distribution Function in a pandas.DataFrame.
"""
assert isinstance(distributions_df, pd.DataFrame)
# We start our combined pdf with a list of 1s. We'll the multiply each pdf in sequence.
combined_pdf = [0] * distributions_df.shape[0]
# We want to allow the user to exclude certain columns -- we drop them here.
if not_you:
try:
distributions_df = distributions_df.drop(labels=not_you, axis=1)
except KeyError as e:
message = 'FEATURE DISABLED' + '\nKeyError' + str(
e) + '\nHOKI DIALOGUE: Your labels could not be dropped -- ' \
'all pdfs will be combined \nDEBUGGING ASSISTANT: ' \
'Make sure the labels you listed are spelled correctly:)'
warnings.warn(message, HokiUserWarning)
# We also must be careful not to multiply the time bin column in there so we have a list of the column names
# that remain after the "not_you" exclusion minus the time_bins column.
# columns = [col for col in distributions_df.columns if "time_bins" not in col]
columns = []
if "time_bins" not in distributions_df.columns:
for col in distributions_df.columns:
columns.append(col)
for col in columns:
# for col in distributions_df.columns:
combined_pdf += distributions_df[col].values
combined_df = pd.DataFrame(normalise_1d(combined_pdf) )
combined_df.columns = ['pdf']
return combined_df
def calculate_p_given_age_range(pdfs, age_range=None):
"""
Calculates the probability that each source has age within age_range
Parameters
----------
pdfs: pandas.DataFrame
Age Probability Distributions Functions
age_range: list or tuple of 2 values
Minimum and Maximum age to consider (inclusive).
Returns
-------
numpy.array containing the probabilities.
"""
# Selects only the rows corresponding to the range age_range[0] to age_range[1] (inclusive)
# and then we sum the probabilities up for each column.
probability = pdfs[(np.round(BPASS_TIME_BINS, 2) >= min(age_range))
& (np.round(BPASS_TIME_BINS, 2) <= max(age_range))].sum()
return probability
|
[
"numpy.sum",
"hoki.utils.exceptions.HokiDeprecationWarning",
"numpy.isnan",
"hoki.utils.hoki_dialogue.HokiDialogue",
"hoki.utils.exceptions.HokiFormatError",
"numpy.array",
"hoki.load.model_output",
"warnings.warn",
"numpy.round",
"hoki.load.unpickle"
] |
[((412, 426), 'hoki.utils.hoki_dialogue.HokiDialogue', 'HokiDialogue', ([], {}), '()\n', (424, 426), False, 'from hoki.utils.hoki_dialogue import HokiDialogue\n'), ((792, 827), 'hoki.utils.exceptions.HokiDeprecationWarning', 'HokiDeprecationWarning', (['deprecation'], {}), '(deprecation)\n', (814, 827), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((11476, 11517), 'numpy.sum', 'np.sum', (['[bin_t for bin_t in distribution]'], {}), '([bin_t for bin_t in distribution])\n', (11482, 11517), True, 'import numpy as np\n'), ((11708, 11736), 'numpy.array', 'np.array', (['([1] * 42 + [0] * 9)'], {}), '([1] * 42 + [0] * 9)\n', (11716, 11736), True, 'import numpy as np\n'), ((6740, 6794), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""obs_df should be a pandas.DataFrame"""'], {}), "('obs_df should be a pandas.DataFrame')\n", (6755, 6794), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((6866, 6942), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""model should be an instance of hoki.hrdiagrams.HRDiagrams"""'], {}), "('model should be an instance of hoki.hrdiagrams.HRDiagrams')\n", (6881, 6942), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((9173, 9227), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""obs_df should be a pandas.DataFrame"""'], {}), "('obs_df should be a pandas.DataFrame')\n", (9188, 9227), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((9286, 9346), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""cmd should be an instance of hoki.cmd.CMD"""'], {}), "('cmd should be an instance of hoki.cmd.CMD')\n", (9301, 9346), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((11748, 11770), 'numpy.array', 'np.array', (['distribution'], {}), '(distribution)\n', (11756, 11770), True, 'import numpy as np\n'), ((1618, 1682), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""Observations should be stored in a Data Frame"""'], {}), "('Observations should be stored in a Data Frame')\n", (1633, 1682), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((1737, 1888), 'warnings.warn', 'warnings.warn', (['"""We expect the name of sources to be given in the \'name\' column. If I can\'t find names I\'ll make my own ;)"""', 'HokiFormatWarning'], {}), '(\n "We expect the name of sources to be given in the \'name\' column. If I can\'t find names I\'ll make my own ;)"\n , HokiFormatWarning)\n', (1750, 1888), False, 'import warnings\n'), ((4099, 4218), 'warnings.warn', 'warnings.warn', (['"""self.multiplied_pdf is not yet defined -- running AgeWizard.combined_pdfs()"""', 'HokiUserWarning'], {}), "(\n 'self.multiplied_pdf is not yet defined -- running AgeWizard.combined_pdfs()'\n , HokiUserWarning)\n", (4112, 4218), False, 'import warnings\n'), ((6187, 6293), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""The model should be an instance of hoki.hrdiagrams.HRDiagrams or hoki.cmd.CMD"""'], {}), "(\n 'The model should be an instance of hoki.hrdiagrams.HRDiagrams or hoki.cmd.CMD'\n )\n", (6202, 6293), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((7158, 7220), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""obs_df should have a logT and a logL column"""'], {}), "('obs_df should have a logT and a logL column')\n", (7173, 7220), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((9573, 9635), 'hoki.utils.exceptions.HokiFormatError', 'HokiFormatError', (['"""obs_df should have a logT and a logL column"""'], {}), "('obs_df should have a logT and a logL column')\n", (9588, 9635), False, 'from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError, HokiFormatWarning, HokiDeprecationWarning\n'), ((12449, 12463), 'numpy.array', 'np.array', (['pdfs'], {}), '(pdfs)\n', (12457, 12463), True, 'import numpy as np\n'), ((13366, 13441), 'warnings.warn', 'warnings.warn', (['"""No source names given so I\'ll make my own"""', 'HokiUserWarning'], {}), '("No source names given so I\'ll make my own", HokiUserWarning)\n', (13379, 13441), False, 'import warnings\n'), ((13777, 13789), 'numpy.isnan', 'np.isnan', (['xi'], {}), '(xi)\n', (13785, 13789), True, 'import numpy as np\n'), ((13793, 13805), 'numpy.isnan', 'np.isnan', (['yi'], {}), '(yi)\n', (13801, 13805), True, 'import numpy as np\n'), ((13819, 13913), 'warnings.warn', 'warnings.warn', (["('NaN Value encountered in coordinates for source: ' + name)", 'HokiUserWarning'], {}), "('NaN Value encountered in coordinates for source: ' + name,\n HokiUserWarning)\n", (13832, 13913), False, 'import warnings\n'), ((14770, 14791), 'numpy.array', 'np.array', (['likelihoods'], {}), '(likelihoods)\n', (14778, 14791), True, 'import numpy as np\n'), ((15872, 15947), 'warnings.warn', 'warnings.warn', (['"""No source names given so I\'ll make my own"""', 'HokiUserWarning'], {}), '("No source names given so I\'ll make my own", HokiUserWarning)\n', (15885, 15947), False, 'import warnings\n'), ((16283, 16295), 'numpy.isnan', 'np.isnan', (['xi'], {}), '(xi)\n', (16291, 16295), True, 'import numpy as np\n'), ((16299, 16311), 'numpy.isnan', 'np.isnan', (['yi'], {}), '(yi)\n', (16307, 16311), True, 'import numpy as np\n'), ((16325, 16419), 'warnings.warn', 'warnings.warn', (["('NaN Value encountered in coordinates for source: ' + name)", 'HokiUserWarning'], {}), "('NaN Value encountered in coordinates for source: ' + name,\n HokiUserWarning)\n", (16338, 16419), False, 'import warnings\n'), ((17312, 17333), 'numpy.array', 'np.array', (['likelihoods'], {}), '(likelihoods)\n', (17320, 17333), True, 'import numpy as np\n'), ((18778, 18817), 'warnings.warn', 'warnings.warn', (['message', 'HokiUserWarning'], {}), '(message, HokiUserWarning)\n', (18791, 18817), False, 'import warnings\n'), ((2213, 2251), 'hoki.load.model_output', 'load.model_output', (['model'], {'hr_type': '"""TL"""'}), "(model, hr_type='TL')\n", (2230, 2251), True, 'import hoki.load as load\n'), ((20085, 20113), 'numpy.round', 'np.round', (['BPASS_TIME_BINS', '(2)'], {}), '(BPASS_TIME_BINS, 2)\n', (20093, 20113), True, 'import numpy as np\n'), ((20159, 20187), 'numpy.round', 'np.round', (['BPASS_TIME_BINS', '(2)'], {}), '(BPASS_TIME_BINS, 2)\n', (20167, 20187), True, 'import numpy as np\n'), ((2335, 2360), 'hoki.load.unpickle', 'load.unpickle', ([], {'path': 'model'}), '(path=model)\n', (2348, 2360), True, 'import hoki.load as load\n')]
|
import os
import logging
import torch
import shutil
def allocate_tensors():
"""
init data tensors
:return: data tensors
"""
tensors = dict()
tensors['support_data'] = torch.FloatTensor()
tensors['support_label'] = torch.LongTensor()
tensors['query_data'] = torch.FloatTensor()
tensors['query_label'] = torch.LongTensor()
return tensors
def set_tensors(tensors, batch):
"""
set data to initialized tensors
:param tensors: initialized data tensors
:param batch: current batch of data
:return: None
"""
support_data, support_label, query_data, query_label = batch
tensors['support_data'].resize_(support_data.size()).copy_(support_data)
tensors['support_label'].resize_(support_label.size()).copy_(support_label)
tensors['query_data'].resize_(query_data.size()).copy_(query_data)
tensors['query_label'].resize_(query_label.size()).copy_(query_label)
def set_logging_config(logdir):
"""
set logging configuration
:param logdir: directory put logs
:return: None
"""
if not os.path.exists(logdir):
os.makedirs(logdir)
logging.basicConfig(format="[%(asctime)s] [%(name)s] %(message)s",
level=logging.INFO,
handlers=[logging.FileHandler(os.path.join(logdir, 'log.txt')),
logging.StreamHandler(os.sys.stdout)])
def save_checkpoint(state, is_best, exp_name):
"""
save the checkpoint during training stage
:param state: content to be saved
:param is_best: if DPGN model's performance is the best at current step
:param exp_name: experiment name
:return: None
"""
torch.save(state, os.path.join('{}'.format(exp_name), 'checkpoint.pth.tar'))
if is_best:
shutil.copyfile(os.path.join('{}'.format(exp_name), 'checkpoint.pth.tar'),
os.path.join('{}'.format(exp_name), 'model_best.pth.tar'))
def adjust_learning_rate(optimizers, lr, iteration, dec_lr_step):
"""
adjust learning rate after some iterations
:param optimizers: the optimizers
:param lr: learning rate
:param iteration: current iteration
:param dec_lr_step: decrease learning rate in how many step
:return: None
"""
new_lr = lr * (0.1 ** (int(iteration / dec_lr_step)))
for optimizer in optimizers:
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def label2edge(label, device):
"""
convert ground truth labels into ground truth edges
:param label: ground truth labels
:param device: the gpu device that holds the ground truth edges
:return: ground truth edges
"""
# get size
num_samples = label.size(1)
# reshape
label_i = label.unsqueeze(-1).repeat(1, 1, num_samples)
label_j = label_i.transpose(1, 2)
# compute edge
edge = torch.eq(label_i, label_j).float().to(device)
return edge
def one_hot_encode(num_classes, class_idx, device):
"""
one-hot encode the ground truth
:param num_classes: number of total class
:param class_idx: belonging class's index
:param device: the gpu device that holds the one-hot encoded ground truth label
:return: one-hot encoded ground truth label
"""
return torch.eye(num_classes)[class_idx].to(device)
def preprocessing(num_ways, num_shots, num_queries, batch_size, device):
"""
prepare for train and evaluation
:param num_ways: number of classes for each few-shot task
:param num_shots: number of samples for each class in few-shot task
:param num_queries: number of queries for each class in few-shot task
:param batch_size: how many tasks per batch
:param device: the gpu device that holds all data
:return: number of samples in support set
number of total samples (support and query set)
mask for edges connect query nodes
mask for unlabeled data (for semi-supervised setting)
"""
# set size of support set, query set and total number of data in single task
num_supports = num_ways * num_shots
num_samples = num_supports + num_queries * num_ways
# set edge mask (to distinguish support and query edges)
support_edge_mask = torch.zeros(batch_size, num_samples, num_samples).to(device)
support_edge_mask[:, :num_supports, :num_supports] = 1
query_edge_mask = 1 - support_edge_mask
evaluation_mask = torch.ones(batch_size, num_samples, num_samples).to(device)
return num_supports, num_samples, query_edge_mask, evaluation_mask
def initialize_nodes_edges(batch, num_supports, tensors, batch_size, num_queries, num_ways, device):
"""
:param batch: data batch
:param num_supports: number of samples in support set
:param tensors: initialized tensors for holding data
:param batch_size: how many tasks per batch
:param num_queries: number of samples in query set
:param num_ways: number of classes for each few-shot task
:param device: the gpu device that holds all data
:return: data of support set,
label of support set,
data of query set,
label of query set,
data of support and query set,
label of support and query set,
initialized node features of distribution graph (Vd_(0)),
initialized edge features of point graph (Ep_(0)),
initialized edge_features_of distribution graph (Ed_(0))
"""
# allocate data in this batch to specific variables
set_tensors(tensors, batch)
support_data = tensors['support_data'].squeeze(0)
support_label = tensors['support_label'].squeeze(0)
query_data = tensors['query_data'].squeeze(0)
query_label = tensors['query_label'].squeeze(0)
# initialize nodes of distribution graph
node_gd_init_support = label2edge(support_label, device)
node_gd_init_query = (torch.ones([batch_size, num_queries * num_ways, num_supports])
* torch.tensor(1. / num_supports)).to(device)
node_feature_gd = torch.cat([node_gd_init_support, node_gd_init_query], dim=1)
# initialize edges of point graph
all_data = torch.cat([support_data, query_data], 1)
all_label = torch.cat([support_label, query_label], 1)
all_label_in_edge = label2edge(all_label, device)
edge_feature_gp = all_label_in_edge.clone()
# uniform initialization for point graph's edges
edge_feature_gp[:, num_supports:, :num_supports] = 1. / num_supports
edge_feature_gp[:, :num_supports, num_supports:] = 1. / num_supports
edge_feature_gp[:, num_supports:, num_supports:] = 0
for i in range(num_ways * num_queries):
edge_feature_gp[:, num_supports + i, num_supports + i] = 1
# initialize edges of distribution graph (same as point graph)
edge_feature_gd = edge_feature_gp.clone()
return support_data, support_label, query_data, query_label, all_data, all_label_in_edge, \
node_feature_gd, edge_feature_gp, edge_feature_gd
def backbone_two_stage_initialization(full_data, encoder):
"""
encode raw data by backbone network
:param full_data: raw data
:param encoder: backbone network
:return: last layer logits from backbone network
second last layer logits from backbone network
"""
# encode data
last_layer_data_temp = []
second_last_layer_data_temp = []
for data in full_data.chunk(full_data.size(1), dim=1):
# the encode step
encoded_result = encoder(data.squeeze(1))
# prepare for two stage initialization of DPGN
last_layer_data_temp.append(encoded_result[0])
second_last_layer_data_temp.append(encoded_result[1])
# last_layer_data: (batch_size, num_samples, embedding dimension)
last_layer_data = torch.stack(last_layer_data_temp, dim=1)
# second_last_layer_data: (batch_size, num_samples, embedding dimension)
second_last_layer_data = torch.stack(second_last_layer_data_temp, dim=1)
return last_layer_data, second_last_layer_data
|
[
"torch.eq",
"torch.ones",
"torch.eye",
"torch.stack",
"os.makedirs",
"torch.LongTensor",
"logging.StreamHandler",
"torch.FloatTensor",
"torch.cat",
"os.path.exists",
"torch.zeros",
"os.path.join",
"torch.tensor"
] |
[((193, 212), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (210, 212), False, 'import torch\n'), ((244, 262), 'torch.LongTensor', 'torch.LongTensor', ([], {}), '()\n', (260, 262), False, 'import torch\n'), ((291, 310), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (308, 310), False, 'import torch\n'), ((340, 358), 'torch.LongTensor', 'torch.LongTensor', ([], {}), '()\n', (356, 358), False, 'import torch\n'), ((6076, 6136), 'torch.cat', 'torch.cat', (['[node_gd_init_support, node_gd_init_query]'], {'dim': '(1)'}), '([node_gd_init_support, node_gd_init_query], dim=1)\n', (6085, 6136), False, 'import torch\n'), ((6191, 6231), 'torch.cat', 'torch.cat', (['[support_data, query_data]', '(1)'], {}), '([support_data, query_data], 1)\n', (6200, 6231), False, 'import torch\n'), ((6248, 6290), 'torch.cat', 'torch.cat', (['[support_label, query_label]', '(1)'], {}), '([support_label, query_label], 1)\n', (6257, 6290), False, 'import torch\n'), ((7815, 7855), 'torch.stack', 'torch.stack', (['last_layer_data_temp'], {'dim': '(1)'}), '(last_layer_data_temp, dim=1)\n', (7826, 7855), False, 'import torch\n'), ((7962, 8009), 'torch.stack', 'torch.stack', (['second_last_layer_data_temp'], {'dim': '(1)'}), '(second_last_layer_data_temp, dim=1)\n', (7973, 8009), False, 'import torch\n'), ((1082, 1104), 'os.path.exists', 'os.path.exists', (['logdir'], {}), '(logdir)\n', (1096, 1104), False, 'import os\n'), ((1114, 1133), 'os.makedirs', 'os.makedirs', (['logdir'], {}), '(logdir)\n', (1125, 1133), False, 'import os\n'), ((4257, 4306), 'torch.zeros', 'torch.zeros', (['batch_size', 'num_samples', 'num_samples'], {}), '(batch_size, num_samples, num_samples)\n', (4268, 4306), False, 'import torch\n'), ((4443, 4491), 'torch.ones', 'torch.ones', (['batch_size', 'num_samples', 'num_samples'], {}), '(batch_size, num_samples, num_samples)\n', (4453, 4491), False, 'import torch\n'), ((1371, 1407), 'logging.StreamHandler', 'logging.StreamHandler', (['os.sys.stdout'], {}), '(os.sys.stdout)\n', (1392, 1407), False, 'import logging\n'), ((3289, 3311), 'torch.eye', 'torch.eye', (['num_classes'], {}), '(num_classes)\n', (3298, 3311), False, 'import torch\n'), ((5919, 5981), 'torch.ones', 'torch.ones', (['[batch_size, num_queries * num_ways, num_supports]'], {}), '([batch_size, num_queries * num_ways, num_supports])\n', (5929, 5981), False, 'import torch\n'), ((6010, 6042), 'torch.tensor', 'torch.tensor', (['(1.0 / num_supports)'], {}), '(1.0 / num_supports)\n', (6022, 6042), False, 'import torch\n'), ((1303, 1334), 'os.path.join', 'os.path.join', (['logdir', '"""log.txt"""'], {}), "(logdir, 'log.txt')\n", (1315, 1334), False, 'import os\n'), ((2886, 2912), 'torch.eq', 'torch.eq', (['label_i', 'label_j'], {}), '(label_i, label_j)\n', (2894, 2912), False, 'import torch\n')]
|
#!/usr/bin/env python
import json
from pathlib import Path
from divorce_predictor.data import DataLoader
from divorce_predictor.experiment import Experiment
if __name__ == "__main__":
print("Begin train.py")
project_root = Path(__file__).parent.parent
dataset_path = project_root / "ml" / "input" / "data" / "divorce.csv"
output_path = project_root / "ml" / "output"
hyperparameters_file = project_root / "ml" / "input" / "hyperparameters.json"
hyperparameters = json.load(open(hyperparameters_file, "r"))
data_loader = DataLoader(dataset_path=dataset_path, target_column="Class")
X, y = data_loader.load_dataset()
experiment = Experiment(X, y, hyperparameters)
experiment.setup()
experiment.run()
experiment.persist(output_path)
print("End train.py")
|
[
"divorce_predictor.data.DataLoader",
"divorce_predictor.experiment.Experiment",
"pathlib.Path"
] |
[((552, 612), 'divorce_predictor.data.DataLoader', 'DataLoader', ([], {'dataset_path': 'dataset_path', 'target_column': '"""Class"""'}), "(dataset_path=dataset_path, target_column='Class')\n", (562, 612), False, 'from divorce_predictor.data import DataLoader\n'), ((669, 702), 'divorce_predictor.experiment.Experiment', 'Experiment', (['X', 'y', 'hyperparameters'], {}), '(X, y, hyperparameters)\n', (679, 702), False, 'from divorce_predictor.experiment import Experiment\n'), ((234, 248), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (238, 248), False, 'from pathlib import Path\n')]
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Manages all plugins."""
import importlib
import importlib.machinery
import importlib.util
import inspect
import logging
import os
import sys
import types
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type
try:
import importlib_metadata
except ImportError:
from importlib import metadata as importlib_metadata
from airflow import settings
from airflow.utils.entry_points import entry_points_with_dist
from airflow.utils.file import find_path_from_directory
from airflow.utils.module_loading import as_importable_string
if TYPE_CHECKING:
from airflow.hooks.base import BaseHook
from airflow.timetables.base import Timetable
log = logging.getLogger(__name__)
import_errors: Dict[str, str] = {}
plugins = None # type: Optional[List[AirflowPlugin]]
# Plugin components to integrate as modules
registered_hooks: Optional[List['BaseHook']] = None
macros_modules: Optional[List[Any]] = None
executors_modules: Optional[List[Any]] = None
# Plugin components to integrate directly
admin_views: Optional[List[Any]] = None
flask_blueprints: Optional[List[Any]] = None
menu_links: Optional[List[Any]] = None
flask_appbuilder_views: Optional[List[Any]] = None
flask_appbuilder_menu_links: Optional[List[Any]] = None
global_operator_extra_links: Optional[List[Any]] = None
operator_extra_links: Optional[List[Any]] = None
registered_operator_link_classes: Optional[Dict[str, Type]] = None
timetable_classes: Optional[Dict[str, Type["Timetable"]]] = None
"""Mapping of class names to class of OperatorLinks registered by plugins.
Used by the DAG serialization code to only allow specific classes to be created
during deserialization
"""
PLUGINS_ATTRIBUTES_TO_DUMP = {
"hooks",
"executors",
"macros",
"flask_blueprints",
"appbuilder_views",
"appbuilder_menu_items",
"global_operator_extra_links",
"operator_extra_links",
"source",
}
class AirflowPluginSource:
"""Class used to define an AirflowPluginSource."""
def __str__(self):
raise NotImplementedError
def __html__(self):
raise NotImplementedError
class PluginsDirectorySource(AirflowPluginSource):
"""Class used to define Plugins loaded from Plugins Directory."""
def __init__(self, path):
self.path = os.path.relpath(path, settings.PLUGINS_FOLDER)
def __str__(self):
return f"$PLUGINS_FOLDER/{self.path}"
def __html__(self):
return f"<em>$PLUGINS_FOLDER/</em>{self.path}"
class EntryPointSource(AirflowPluginSource):
"""Class used to define Plugins loaded from entrypoint."""
def __init__(self, entrypoint: importlib_metadata.EntryPoint, dist: importlib_metadata.Distribution):
self.dist = dist.metadata['name']
self.version = dist.version
self.entrypoint = str(entrypoint)
def __str__(self):
return f"{self.dist}=={self.version}: {self.entrypoint}"
def __html__(self):
return f"<em>{self.dist}=={self.version}:</em> {self.entrypoint}"
class AirflowPluginException(Exception):
"""Exception when loading plugin."""
class AirflowPlugin:
"""Class used to define AirflowPlugin."""
name: Optional[str] = None
source: Optional[AirflowPluginSource] = None
hooks: List[Any] = []
executors: List[Any] = []
macros: List[Any] = []
admin_views: List[Any] = []
flask_blueprints: List[Any] = []
menu_links: List[Any] = []
appbuilder_views: List[Any] = []
appbuilder_menu_items: List[Any] = []
# A list of global operator extra links that can redirect users to
# external systems. These extra links will be available on the
# task page in the form of buttons.
#
# Note: the global operator extra link can be overridden at each
# operator level.
global_operator_extra_links: List[Any] = []
# A list of operator extra links to override or add operator links
# to existing Airflow Operators.
# These extra links will be available on the task page in form of
# buttons.
operator_extra_links: List[Any] = []
# A list of timetable classes that can be used for DAG scheduling.
timetables: List[Type["Timetable"]] = []
@classmethod
def validate(cls):
"""Validates that plugin has a name."""
if not cls.name:
raise AirflowPluginException("Your plugin needs a name.")
@classmethod
def on_load(cls, *args, **kwargs):
"""
Executed when the plugin is loaded.
This method is only called once during runtime.
:param args: If future arguments are passed in on call.
:param kwargs: If future arguments are passed in on call.
"""
def is_valid_plugin(plugin_obj):
"""
Check whether a potential object is a subclass of
the AirflowPlugin class.
:param plugin_obj: potential subclass of AirflowPlugin
:return: Whether or not the obj is a valid subclass of
AirflowPlugin
"""
global plugins
if (
inspect.isclass(plugin_obj)
and issubclass(plugin_obj, AirflowPlugin)
and (plugin_obj is not AirflowPlugin)
):
plugin_obj.validate()
return plugin_obj not in plugins
return False
def register_plugin(plugin_instance):
"""
Start plugin load and register it after success initialization
:param plugin_instance: subclass of AirflowPlugin
"""
global plugins
plugin_instance.on_load()
plugins.append(plugin_instance)
def load_entrypoint_plugins():
"""
Load and register plugins AirflowPlugin subclasses from the entrypoints.
The entry_point group should be 'airflow.plugins'.
"""
global import_errors
log.debug("Loading plugins from entrypoints")
for entry_point, dist in entry_points_with_dist('airflow.plugins'):
log.debug('Importing entry_point plugin %s', entry_point.name)
try:
plugin_class = entry_point.load()
if not is_valid_plugin(plugin_class):
continue
plugin_instance = plugin_class()
plugin_instance.source = EntryPointSource(entry_point, dist)
register_plugin(plugin_instance)
except Exception as e:
log.exception("Failed to import plugin %s", entry_point.name)
import_errors[entry_point.module] = str(e)
def load_plugins_from_plugin_directory():
"""Load and register Airflow Plugins from plugins directory"""
global import_errors
log.debug("Loading plugins from directory: %s", settings.PLUGINS_FOLDER)
for file_path in find_path_from_directory(settings.PLUGINS_FOLDER, ".airflowignore"):
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(os.path.split(file_path)[-1])
if file_ext != '.py':
continue
try:
loader = importlib.machinery.SourceFileLoader(mod_name, file_path)
spec = importlib.util.spec_from_loader(mod_name, loader)
mod = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = mod
loader.exec_module(mod)
log.debug('Importing plugin module %s', file_path)
for mod_attr_value in (m for m in mod.__dict__.values() if is_valid_plugin(m)):
plugin_instance = mod_attr_value()
plugin_instance.source = PluginsDirectorySource(file_path)
register_plugin(plugin_instance)
except Exception as e:
log.exception('Failed to import plugin %s', file_path)
import_errors[file_path] = str(e)
def make_module(name: str, objects: List[Any]):
"""Creates new module."""
if not objects:
return None
log.debug('Creating module %s', name)
name = name.lower()
module = types.ModuleType(name)
module._name = name.split('.')[-1] # type: ignore
module._objects = objects # type: ignore
module.__dict__.update((o.__name__, o) for o in objects)
return module
def ensure_plugins_loaded():
"""
Load plugins from plugins directory and entrypoints.
Plugins are only loaded if they have not been previously loaded.
"""
from airflow.stats import Stats
global plugins, registered_hooks
if plugins is not None:
log.debug("Plugins are already loaded. Skipping.")
return
if not settings.PLUGINS_FOLDER:
raise ValueError("Plugins folder is not set")
log.debug("Loading plugins")
with Stats.timer() as timer:
plugins = []
registered_hooks = []
load_plugins_from_plugin_directory()
load_entrypoint_plugins()
# We don't do anything with these for now, but we want to keep track of
# them so we can integrate them in to the UI's Connection screens
for plugin in plugins:
registered_hooks.extend(plugin.hooks)
num_loaded = len(plugins)
if num_loaded > 0:
log.debug("Loading %d plugin(s) took %.2f seconds", num_loaded, timer.duration)
def initialize_web_ui_plugins():
"""Collect extension points for WEB UI"""
global plugins
global flask_blueprints
global flask_appbuilder_views
global flask_appbuilder_menu_links
if (
flask_blueprints is not None
and flask_appbuilder_views is not None
and flask_appbuilder_menu_links is not None
):
return
ensure_plugins_loaded()
if plugins is None:
raise AirflowPluginException("Can't load plugins.")
log.debug("Initialize Web UI plugin")
flask_blueprints = []
flask_appbuilder_views = []
flask_appbuilder_menu_links = []
for plugin in plugins:
flask_appbuilder_views.extend(plugin.appbuilder_views)
flask_appbuilder_menu_links.extend(plugin.appbuilder_menu_items)
flask_blueprints.extend([{'name': plugin.name, 'blueprint': bp} for bp in plugin.flask_blueprints])
if (plugin.admin_views and not plugin.appbuilder_views) or (
plugin.menu_links and not plugin.appbuilder_menu_items
):
log.warning(
"Plugin \'%s\' may not be compatible with the current Airflow version. "
"Please contact the author of the plugin.",
plugin.name,
)
def initialize_extra_operators_links_plugins():
"""Creates modules for loaded extension from extra operators links plugins"""
global global_operator_extra_links
global operator_extra_links
global registered_operator_link_classes
if (
global_operator_extra_links is not None
and operator_extra_links is not None
and registered_operator_link_classes is not None
):
return
ensure_plugins_loaded()
if plugins is None:
raise AirflowPluginException("Can't load plugins.")
log.debug("Initialize extra operators links plugins")
global_operator_extra_links = []
operator_extra_links = []
registered_operator_link_classes = {}
for plugin in plugins:
global_operator_extra_links.extend(plugin.global_operator_extra_links)
operator_extra_links.extend(list(plugin.operator_extra_links))
registered_operator_link_classes.update(
{
f"{link.__class__.__module__}.{link.__class__.__name__}": link.__class__
for link in plugin.operator_extra_links
}
)
def initialize_timetables_plugins():
"""Collect timetable classes registered by plugins."""
global timetable_classes
if timetable_classes is not None:
return
ensure_plugins_loaded()
if plugins is None:
raise AirflowPluginException("Can't load plugins.")
log.debug("Initialize extra timetables plugins")
timetable_classes = {
as_importable_string(timetable_class): timetable_class
for plugin in plugins
for timetable_class in plugin.timetables
}
def integrate_executor_plugins() -> None:
"""Integrate executor plugins to the context."""
global plugins
global executors_modules
if executors_modules is not None:
return
ensure_plugins_loaded()
if plugins is None:
raise AirflowPluginException("Can't load plugins.")
log.debug("Integrate executor plugins")
executors_modules = []
for plugin in plugins:
if plugin.name is None:
raise AirflowPluginException("Invalid plugin name")
plugin_name: str = plugin.name
executors_module = make_module('airflow.executors.' + plugin_name, plugin.executors)
if executors_module:
executors_modules.append(executors_module)
sys.modules[executors_module.__name__] = executors_module
def integrate_macros_plugins() -> None:
"""Integrates macro plugins."""
global plugins
global macros_modules
from airflow import macros
if macros_modules is not None:
return
ensure_plugins_loaded()
if plugins is None:
raise AirflowPluginException("Can't load plugins.")
log.debug("Integrate DAG plugins")
macros_modules = []
for plugin in plugins:
if plugin.name is None:
raise AirflowPluginException("Invalid plugin name")
macros_module = make_module(f'airflow.macros.{plugin.name}', plugin.macros)
if macros_module:
macros_modules.append(macros_module)
sys.modules[macros_module.__name__] = macros_module
# Register the newly created module on airflow.macros such that it
# can be accessed when rendering templates.
setattr(macros, plugin.name, macros_module)
def get_plugin_info(attrs_to_dump: Optional[List[str]] = None) -> List[Dict[str, Any]]:
"""
Dump plugins attributes
:param attrs_to_dump: A list of plugin attributes to dump
:type attrs_to_dump: List
"""
ensure_plugins_loaded()
integrate_executor_plugins()
integrate_macros_plugins()
initialize_web_ui_plugins()
initialize_extra_operators_links_plugins()
if not attrs_to_dump:
attrs_to_dump = PLUGINS_ATTRIBUTES_TO_DUMP
plugins_info = []
if plugins:
for plugin in plugins:
info = {"name": plugin.name}
info.update({n: getattr(plugin, n) for n in attrs_to_dump})
plugins_info.append(info)
return plugins_info
|
[
"airflow.utils.module_loading.as_importable_string",
"importlib.util.spec_from_loader",
"inspect.isclass",
"types.ModuleType",
"airflow.utils.file.find_path_from_directory",
"airflow.stats.Stats.timer",
"os.path.isfile",
"importlib.machinery.SourceFileLoader",
"os.path.relpath",
"airflow.utils.entry_points.entry_points_with_dist",
"os.path.split",
"logging.getLogger",
"importlib.util.module_from_spec"
] |
[((1453, 1480), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1470, 1480), False, 'import logging\n'), ((6529, 6570), 'airflow.utils.entry_points.entry_points_with_dist', 'entry_points_with_dist', (['"""airflow.plugins"""'], {}), "('airflow.plugins')\n", (6551, 6570), False, 'from airflow.utils.entry_points import entry_points_with_dist\n'), ((7336, 7403), 'airflow.utils.file.find_path_from_directory', 'find_path_from_directory', (['settings.PLUGINS_FOLDER', '""".airflowignore"""'], {}), "(settings.PLUGINS_FOLDER, '.airflowignore')\n", (7360, 7403), False, 'from airflow.utils.file import find_path_from_directory\n'), ((8564, 8586), 'types.ModuleType', 'types.ModuleType', (['name'], {}), '(name)\n', (8580, 8586), False, 'import types\n'), ((3058, 3104), 'os.path.relpath', 'os.path.relpath', (['path', 'settings.PLUGINS_FOLDER'], {}), '(path, settings.PLUGINS_FOLDER)\n', (3073, 3104), False, 'import os\n'), ((5760, 5787), 'inspect.isclass', 'inspect.isclass', (['plugin_obj'], {}), '(plugin_obj)\n', (5775, 5787), False, 'import inspect\n'), ((9253, 9266), 'airflow.stats.Stats.timer', 'Stats.timer', ([], {}), '()\n', (9264, 9266), False, 'from airflow.stats import Stats\n'), ((12552, 12589), 'airflow.utils.module_loading.as_importable_string', 'as_importable_string', (['timetable_class'], {}), '(timetable_class)\n', (12572, 12589), False, 'from airflow.utils.module_loading import as_importable_string\n'), ((7420, 7445), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (7434, 7445), False, 'import os\n'), ((7630, 7687), 'importlib.machinery.SourceFileLoader', 'importlib.machinery.SourceFileLoader', (['mod_name', 'file_path'], {}), '(mod_name, file_path)\n', (7666, 7687), False, 'import importlib\n'), ((7707, 7756), 'importlib.util.spec_from_loader', 'importlib.util.spec_from_loader', (['mod_name', 'loader'], {}), '(mod_name, loader)\n', (7738, 7756), False, 'import importlib\n'), ((7775, 7812), 'importlib.util.module_from_spec', 'importlib.util.module_from_spec', (['spec'], {}), '(spec)\n', (7806, 7812), False, 'import importlib\n'), ((7514, 7538), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (7527, 7538), False, 'import os\n')]
|
from detectron2.engine import default_argument_parser
from liuy.implementation.CoCoSegModel import CoCoSegModel
from liuy.implementation.RandomSampler import CoCoRandomSampler
import numpy as np
import random
from liuy.utils.reg_dataset import register_coco_instances_from_selected_image_files
from liuy.utils.local_config import coco_data, debug_data
import copy
def generate_one_curve(
whole_image_id,
coco_data,
sampler,
ins_seg_model,
seed_batch,
batch_size
):
"""
:return:
"""
# initialize the quantity relationship
whole_train_size = len(whole_image_id)
if seed_batch < 1:
seed_batch = int(seed_batch * whole_train_size)
if batch_size < 1:
batch_size = int(batch_size * whole_train_size)
# initally, seed_batch pieces of image were selected randomly
selected_image_id = random.sample(whole_image_id, seed_batch)
# register data set and build data loader
register_coco_instances_from_selected_image_files(
name='coco_from_selected_image',
json_file=coco_data[0]['json_file'],
image_root=coco_data[0]['image_root'],
selected_image_files=selected_image_id
)
data_loader_from_selected_image_files, _ = ins_seg_model.trainer.re_build_train_loader(
'coco_from_selected_image')
n_batches = int(np.ceil(((whole_train_size - seed_batch) * 1 / batch_size))) + 1
for n in range(n_batches):
# check the size in this iter
n_train_size = seed_batch + min((whole_train_size - seed_batch), n * batch_size)
assert n_train_size == len(selected_image_id)
print('{} data ponints for training in iter{}'.format(n_train_size, n))
# start training and test
ins_seg_model.save_selected_image_id(selected_image_id)
ins_seg_model.fit_on_subset(data_loader_from_selected_image_files)
# select new batch
n_sample = min(batch_size, whole_train_size - len(selected_image_id))
new_batch = sampler.select_batch(n_sample=n_sample, already_selected=copy.deepcopy(selected_image_id))
selected_image_id.extend(new_batch)
assert len(new_batch) == n_sample
print('Requested: %d, Selected: %d' % (n_sample, len(new_batch)))
# register dataset and build data loader
register_coco_instances_from_selected_image_files(
name='coco_from_selected_image',
json_file=coco_data[0]['json_file'],
image_root=coco_data[0]['image_root'],
selected_image_files=selected_image_id
)
data_loader_from_selected_image_files, _ = ins_seg_model.trainer.re_build_train_loader(
'coco_from_selected_image')
# reset model
print("--reset model")
ins_seg_model.reset_model()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
project_id = "random"
seg_model = CoCoSegModel(args, project_id=project_id, coco_data=debug_data, resume_or_load=True)
data_loader = seg_model.trainer.data_loader
whole_image_id = []
index_list = data_loader.dataset._dataset._lst
for item in index_list:
whole_image_id.append(item['image_id'])
randomsampler = CoCoRandomSampler("random_sampler", whole_image_id=whole_image_id)
generate_one_curve(
coco_data=copy.deepcopy(debug_data),
whole_image_id=copy.deepcopy(whole_image_id),
sampler=randomsampler,
ins_seg_model=seg_model,
batch_size=100,
seed_batch=100,
)
|
[
"liuy.utils.reg_dataset.register_coco_instances_from_selected_image_files",
"copy.deepcopy",
"numpy.ceil",
"random.sample",
"detectron2.engine.default_argument_parser",
"liuy.implementation.CoCoSegModel.CoCoSegModel",
"liuy.implementation.RandomSampler.CoCoRandomSampler"
] |
[((878, 919), 'random.sample', 'random.sample', (['whole_image_id', 'seed_batch'], {}), '(whole_image_id, seed_batch)\n', (891, 919), False, 'import random\n'), ((970, 1182), 'liuy.utils.reg_dataset.register_coco_instances_from_selected_image_files', 'register_coco_instances_from_selected_image_files', ([], {'name': '"""coco_from_selected_image"""', 'json_file': "coco_data[0]['json_file']", 'image_root': "coco_data[0]['image_root']", 'selected_image_files': 'selected_image_id'}), "(name=\n 'coco_from_selected_image', json_file=coco_data[0]['json_file'],\n image_root=coco_data[0]['image_root'], selected_image_files=\n selected_image_id)\n", (1019, 1182), False, 'from liuy.utils.reg_dataset import register_coco_instances_from_selected_image_files\n'), ((2928, 3016), 'liuy.implementation.CoCoSegModel.CoCoSegModel', 'CoCoSegModel', (['args'], {'project_id': 'project_id', 'coco_data': 'debug_data', 'resume_or_load': '(True)'}), '(args, project_id=project_id, coco_data=debug_data,\n resume_or_load=True)\n', (2940, 3016), False, 'from liuy.implementation.CoCoSegModel import CoCoSegModel\n'), ((3233, 3299), 'liuy.implementation.RandomSampler.CoCoRandomSampler', 'CoCoRandomSampler', (['"""random_sampler"""'], {'whole_image_id': 'whole_image_id'}), "('random_sampler', whole_image_id=whole_image_id)\n", (3250, 3299), False, 'from liuy.implementation.RandomSampler import CoCoRandomSampler\n'), ((2323, 2535), 'liuy.utils.reg_dataset.register_coco_instances_from_selected_image_files', 'register_coco_instances_from_selected_image_files', ([], {'name': '"""coco_from_selected_image"""', 'json_file': "coco_data[0]['json_file']", 'image_root': "coco_data[0]['image_root']", 'selected_image_files': 'selected_image_id'}), "(name=\n 'coco_from_selected_image', json_file=coco_data[0]['json_file'],\n image_root=coco_data[0]['image_root'], selected_image_files=\n selected_image_id)\n", (2372, 2535), False, 'from liuy.utils.reg_dataset import register_coco_instances_from_selected_image_files\n'), ((1356, 1413), 'numpy.ceil', 'np.ceil', (['((whole_train_size - seed_batch) * 1 / batch_size)'], {}), '((whole_train_size - seed_batch) * 1 / batch_size)\n', (1363, 1413), True, 'import numpy as np\n'), ((2847, 2872), 'detectron2.engine.default_argument_parser', 'default_argument_parser', ([], {}), '()\n', (2870, 2872), False, 'from detectron2.engine import default_argument_parser\n'), ((3342, 3367), 'copy.deepcopy', 'copy.deepcopy', (['debug_data'], {}), '(debug_data)\n', (3355, 3367), False, 'import copy\n'), ((3392, 3421), 'copy.deepcopy', 'copy.deepcopy', (['whole_image_id'], {}), '(whole_image_id)\n', (3405, 3421), False, 'import copy\n'), ((2070, 2102), 'copy.deepcopy', 'copy.deepcopy', (['selected_image_id'], {}), '(selected_image_id)\n', (2083, 2102), False, 'import copy\n')]
|
"""
Copyright (c) 2020 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
from tensorflow.keras import Model
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate
from tensorflow.keras.optimizers import Adam
from rl.agents import DDPGAgent
from rl.agents import DQNAgent
from rl.memory import SequentialMemory
from rl.policy import BoltzmannQPolicy
from rl.random import OrnsteinUhlenbeckProcess
class RLAgent:
def __init__(self, env, alg='ddpg'):
self.env = env
nb_actions = env.action_space.shape[0]
nb_states = env.observation_space.shape[0]
if alg == 'ddpg':
self.agent = self._build_ddpg(nb_actions, nb_states)
elif alg == 'dpn':
self.agent = self._build_dqn(nb_actions, nb_states)
else:
raise ValueError('Can not support this reinforcement learning algorithm.')
@staticmethod
# not regression test on DQN, suggest to choose DDPG.
def _build_dqn(nb_actions, nb_states):
# build network
model = Sequential()
model.add(Flatten(input_shape=(1, nb_states)))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='linear'))
# build alg
memory = SequentialMemory(limit=10240, window_length=1)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory,
nb_steps_warmup=10, enable_dueling_network=True, dueling_type='avg',
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(), metrics=['mae'])
return dqn
@staticmethod
def _build_ddpg(nb_actions, nb_states):
# build an actor network
actor = Sequential()
actor.add(Flatten(input_shape=(1, nb_states)))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(16))
actor.add(Activation('relu'))
actor.add(Dense(nb_actions))
actor.add(Activation('sigmoid'))
# build a critic network
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1, nb_states), name='observation_input')
flattened_observation = Flatten()(observation_input)
x = Concatenate()([action_input, flattened_observation])
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(32)(x)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('linear')(x)
critic = Model(inputs=[action_input, observation_input], outputs=x)
# tricks:
memory = SequentialMemory(limit=10240, window_length=1)
oup = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3)
# build ddpg alg
ddpg = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, nb_steps_warmup_actor=100, nb_steps_warmup_critic=100,
random_process=oup, gamma=.99, target_model_update=1e-3)
ddpg.compile(Adam(), metrics=['mae'])
return ddpg
def fit(self, steps, nb_max_episode_steps=100, verbose=0):
self.agent.fit(self.env, nb_steps=steps, nb_max_episode_steps=nb_max_episode_steps, verbose=verbose)
def save(self, filepath):
self.agent.save_weights(filepath, overwrite=True)
def load(self, filepath):
self.agent.load_weights(filepath)
def test(self, episodes, nb_max_episode_steps=10, verbose=0):
self.agent.test(self.env, nb_episodes=episodes, nb_max_episode_steps=nb_max_episode_steps, verbose=verbose)
|
[
"rl.memory.SequentialMemory",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.Dense",
"rl.agents.DDPGAgent",
"rl.policy.BoltzmannQPolicy",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Input",
"tensorflow.keras.Sequential",
"rl.random.OrnsteinUhlenbeckProcess",
"rl.agents.DQNAgent",
"tensorflow.keras.layers.Flatten"
] |
[((1531, 1543), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (1541, 1543), False, 'from tensorflow.keras import Sequential\n'), ((1829, 1875), 'rl.memory.SequentialMemory', 'SequentialMemory', ([], {'limit': '(10240)', 'window_length': '(1)'}), '(limit=10240, window_length=1)\n', (1845, 1875), False, 'from rl.memory import SequentialMemory\n'), ((1893, 1911), 'rl.policy.BoltzmannQPolicy', 'BoltzmannQPolicy', ([], {}), '()\n', (1909, 1911), False, 'from rl.policy import BoltzmannQPolicy\n'), ((1926, 2104), 'rl.agents.DQNAgent', 'DQNAgent', ([], {'model': 'model', 'nb_actions': 'nb_actions', 'memory': 'memory', 'nb_steps_warmup': '(10)', 'enable_dueling_network': '(True)', 'dueling_type': '"""avg"""', 'target_model_update': '(0.01)', 'policy': 'policy'}), "(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup\n =10, enable_dueling_network=True, dueling_type='avg',\n target_model_update=0.01, policy=policy)\n", (1934, 2104), False, 'from rl.agents import DQNAgent\n'), ((2319, 2331), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (2329, 2331), False, 'from tensorflow.keras import Sequential\n'), ((2656, 2703), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(nb_actions,)', 'name': '"""action_input"""'}), "(shape=(nb_actions,), name='action_input')\n", (2661, 2703), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2732, 2785), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(1, nb_states)', 'name': '"""observation_input"""'}), "(shape=(1, nb_states), name='observation_input')\n", (2737, 2785), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((3107, 3165), 'tensorflow.keras.Model', 'Model', ([], {'inputs': '[action_input, observation_input]', 'outputs': 'x'}), '(inputs=[action_input, observation_input], outputs=x)\n', (3112, 3165), False, 'from tensorflow.keras import Model\n'), ((3202, 3248), 'rl.memory.SequentialMemory', 'SequentialMemory', ([], {'limit': '(10240)', 'window_length': '(1)'}), '(limit=10240, window_length=1)\n', (3218, 3248), False, 'from rl.memory import SequentialMemory\n'), ((3263, 3335), 'rl.random.OrnsteinUhlenbeckProcess', 'OrnsteinUhlenbeckProcess', ([], {'size': 'nb_actions', 'theta': '(0.15)', 'mu': '(0.0)', 'sigma': '(0.3)'}), '(size=nb_actions, theta=0.15, mu=0.0, sigma=0.3)\n', (3287, 3335), False, 'from rl.random import OrnsteinUhlenbeckProcess\n'), ((3374, 3610), 'rl.agents.DDPGAgent', 'DDPGAgent', ([], {'nb_actions': 'nb_actions', 'actor': 'actor', 'critic': 'critic', 'critic_action_input': 'action_input', 'memory': 'memory', 'nb_steps_warmup_actor': '(100)', 'nb_steps_warmup_critic': '(100)', 'random_process': 'oup', 'gamma': '(0.99)', 'target_model_update': '(0.001)'}), '(nb_actions=nb_actions, actor=actor, critic=critic,\n critic_action_input=action_input, memory=memory, nb_steps_warmup_actor=\n 100, nb_steps_warmup_critic=100, random_process=oup, gamma=0.99,\n target_model_update=0.001)\n', (3383, 3610), False, 'from rl.agents import DDPGAgent\n'), ((1562, 1597), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {'input_shape': '(1, nb_states)'}), '(input_shape=(1, nb_states))\n', (1569, 1597), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((1617, 1626), 'tensorflow.keras.layers.Dense', 'Dense', (['(16)'], {}), '(16)\n', (1622, 1626), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((1646, 1664), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1656, 1664), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((1684, 1693), 'tensorflow.keras.layers.Dense', 'Dense', (['(16)'], {}), '(16)\n', (1689, 1693), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((1713, 1731), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1723, 1731), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((1751, 1789), 'tensorflow.keras.layers.Dense', 'Dense', (['nb_actions'], {'activation': '"""linear"""'}), "(nb_actions, activation='linear')\n", (1756, 1789), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2162, 2168), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (2166, 2168), False, 'from tensorflow.keras.optimizers import Adam\n'), ((2350, 2385), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {'input_shape': '(1, nb_states)'}), '(input_shape=(1, nb_states))\n', (2357, 2385), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2405, 2414), 'tensorflow.keras.layers.Dense', 'Dense', (['(16)'], {}), '(16)\n', (2410, 2414), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2434, 2452), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2444, 2452), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2472, 2481), 'tensorflow.keras.layers.Dense', 'Dense', (['(16)'], {}), '(16)\n', (2477, 2481), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2501, 2519), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2511, 2519), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2539, 2556), 'tensorflow.keras.layers.Dense', 'Dense', (['nb_actions'], {}), '(nb_actions)\n', (2544, 2556), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2576, 2597), 'tensorflow.keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (2586, 2597), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2818, 2827), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2825, 2827), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2859, 2872), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (2870, 2872), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2924, 2933), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (2929, 2933), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2949, 2967), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2959, 2967), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((2983, 2992), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (2988, 2992), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((3008, 3026), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3018, 3026), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((3042, 3050), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (3047, 3050), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((3066, 3086), 'tensorflow.keras.layers.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (3076, 3086), False, 'from tensorflow.keras.layers import Flatten, Dense, Activation, Input, Concatenate\n'), ((3667, 3673), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (3671, 3673), False, 'from tensorflow.keras.optimizers import Adam\n')]
|
""" A procedural map generator in Python, based on ARENA (Bhojan et. al. 2014) and <NAME>. """
import argparse
from map import Map
parser = argparse.ArgumentParser()
parser.add_argument("width", help="The width of the output. Required", type=int)
parser.add_argument("height", help="The height of the output. Required.", type=int)
args = parser.parse_args()
map = Map(args.width, args.height)
map = map.run()
print(map)
|
[
"argparse.ArgumentParser",
"map.Map"
] |
[((143, 168), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (166, 168), False, 'import argparse\n'), ((370, 398), 'map.Map', 'Map', (['args.width', 'args.height'], {}), '(args.width, args.height)\n', (373, 398), False, 'from map import Map\n')]
|
import time
import math
import numpy as np
import torch
from torch import nn, optim
import torch.nn.functional as F
import sys
import os
import random
sys.path.append("../")
# os.environ["CUDA_VISIBLE_DEVICES"] = '0,1,2,3'
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = 'cpu'
print(device)
def load_data_jay_lyrics():
with open('../data/jaychou_lyrics.txt') as f:
corpus_chars = f.read()
corpus_chars = corpus_chars.replace('\n', ' ').replace('\r', ' ')
corpus_chars = corpus_chars[0:10000]
idx_to_char = list(set(corpus_chars))
char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)])
vocab_size = len(char_to_idx)
corpus_indices = [char_to_idx[char] for char in corpus_chars]
return corpus_indices, char_to_idx, idx_to_char, vocab_size
def one_hot(x, n_class, dtype=torch.float32):
result = torch.zeros(x.shape[0], n_class, dtype=dtype, device=x.device) # shape: (n, n_class)
result.scatter_(1, x.long().view(-1, 1), 1) # result[i, x[i, 0]] = 1
return result
def to_onehot(X, n_class):
return [one_hot(X[:, i], n_class) for i in range(X.shape[1])]
def get_params():
def _one(shape):
param = torch.zeros(shape, device=device, dtype=torch.float32)
nn.init.normal_(param, 0, 0.01)
return torch.nn.Parameter(param)
# 隐藏层参数
W_xh = _one((num_inputs, num_hiddens))
W_hh = _one((num_hiddens, num_hiddens))
b_h = torch.nn.Parameter(torch.zeros(num_hiddens, device=device))
# 输出层参数
W_hq = _one((num_hiddens, num_outputs))
b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device))
return (W_xh, W_hh, b_h, W_hq, b_q)
def rnn(inputs, state, params):
# inputs和outputs皆为num_steps个形状为(batch_size, vocab_size)的矩阵
W_xh, W_hh, b_h, W_hq, b_q = params
H, = state
outputs = []
for X in inputs:
H = torch.tanh(torch.matmul(X, W_xh) + torch.matmul(H, W_hh) + b_h)
Y = torch.matmul(H, W_hq) + b_q
outputs.append(Y)
return outputs, (H,)
def init_rnn_state(batch_size, num_hiddens, device):
return (torch.zeros((batch_size, num_hiddens), device=device), )
def grad_clipping(params, theta, device):
norm = torch.tensor([0.0], device=device)
for param in params:
norm += (param.grad.data ** 2).sum()
norm = norm.sqrt().item()
if norm > theta:
for param in params:
param.grad.data *= (theta / norm)
def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,
num_hiddens, vocab_size, device, idx_to_char, char_to_idx):
state = init_rnn_state(1, num_hiddens, device)
output = [char_to_idx[prefix[0]]] # output记录prefix加上预测的num_chars个字符
for t in range(num_chars + len(prefix) - 1):
# 将上一时间步的输出作为当前时间步的输入
X = to_onehot(torch.tensor([[output[-1]]], device=device), vocab_size)
# 计算输出和更新隐藏状态
(Y, state) = rnn(X, state, params)
# 下一个时间步的输入是prefix里的字符或者当前的最佳预测字符
if t < len(prefix) - 1:
output.append(char_to_idx[prefix[t + 1]])
else:
output.append(Y[0].argmax(dim=1).item())
return ''.join([idx_to_char[i] for i in output])
def data_iter_random(corpus_indices, batch_size, num_steps, device=None):
# 减1是因为对于长度为n的序列,X最多只有包含其中的前n - 1个字符
num_examples = (len(corpus_indices) - 1) // num_steps # 下取整,得到不重叠情况下的样本个数
example_indices = [i * num_steps for i in range(num_examples)] # 每个样本的第一个字符在corpus_indices中的下标
random.shuffle(example_indices)
def _data(i):
# 返回从i开始的长为num_steps的序列
return corpus_indices[i: i + num_steps]
if device is None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
for i in range(0, num_examples, batch_size):
# 每次选出batch_size个随机样本
batch_indices = example_indices[i: i + batch_size] # 当前batch的各个样本的首字符的下标
X = [_data(j) for j in batch_indices]
Y = [_data(j + 1) for j in batch_indices]
yield torch.tensor(X, device=device), torch.tensor(Y, device=device)
def data_iter_consecutive(corpus_indices, batch_size, num_steps, device=None):
if device is None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
corpus_len = len(corpus_indices) // batch_size * batch_size # 保留下来的序列的长度
corpus_indices = corpus_indices[: corpus_len] # 仅保留前corpus_len个字符
indices = torch.tensor(corpus_indices, device=device)
indices = indices.view(batch_size, -1) # resize成(batch_size, )
batch_num = (indices.shape[1] - 1) // num_steps
for i in range(batch_num):
i = i * num_steps
X = indices[:, i: i + num_steps]
Y = indices[:, i + 1: i + num_steps + 1]
yield X, Y
def sgd(params, lr, batch_size):
# 为了和原书保持一致,这里除以了batch_size,但是应该是不用除的,因为一般用PyTorch计算loss时就默认已经
# 沿batch维求了平均了。
for param in params:
param.data -= lr * param.grad / batch_size # 注意这里更改param时用的param.data
def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, device, corpus_indices, idx_to_char,
char_to_idx, is_random_iter, num_epochs, num_steps,
lr, clipping_theta, batch_size, pred_period,
pred_len, prefixes):
# rnn = torch.nn.DataParallel(rnn)
if is_random_iter:
data_iter_fn = data_iter_random
else:
data_iter_fn = data_iter_consecutive
params = get_params()
loss = nn.CrossEntropyLoss()
for epoch in range(num_epochs):
if not is_random_iter: # 如使用相邻采样,在epoch开始时初始化隐藏状态
state = init_rnn_state(batch_size, num_hiddens, device)
l_sum, n, start = 0.0, 0, time.time()
data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device)
for X, Y in data_iter:
if X.shape[0] != batch_size:
continue
if is_random_iter: # 如使用随机采样,在每个小批量更新前初始化隐藏状态
state = init_rnn_state(batch_size, num_hiddens, device)
else: # 否则需要使用detach函数从计算图分离隐藏状态
for s in state:
s.detach_()
# inputs是num_steps个形状为(batch_size, vocab_size)的矩阵
inputs = to_onehot(X, vocab_size)
# outputs有num_steps个形状为(batch_size, vocab_size)的矩阵
(outputs, state) = rnn(inputs, state, params)
# 拼接之后形状为(num_steps * batch_size, vocab_size)
outputs = torch.cat(outputs, dim=0)
# Y的形状是(batch_size, num_steps),转置后再变成形状为
# (num_steps * batch_size,)的向量,这样跟输出的行一一对应
y = torch.flatten(Y.t())
# 使用交叉熵损失计算平均分类误差
l = loss(outputs, y.long())
# 梯度清0
if params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
grad_clipping(params, clipping_theta, device) # 裁剪梯度
sgd(params, lr, 1) # 因为误差已经取过均值,梯度不用再做平均
l_sum += l.item() * y.shape[0]
n += y.shape[0]
if (epoch + 1) % pred_period == 0:
print('epoch %d, perplexity %f, time %.2f sec' % (
epoch + 1, math.exp(l_sum / n), time.time() - start))
for prefix in prefixes:
print(' -', predict_rnn(prefix, pred_len, rnn, params, init_rnn_state,
num_hiddens, vocab_size, device, idx_to_char, char_to_idx))
if __name__ == '__main__':
(corpus_indices, char_to_idx, idx_to_char, vocab_size) = load_data_jay_lyrics()
num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size
# num_inputs: d
# num_hiddens: h, 隐藏单元的个数是超参数
# num_outputs: q
num_epochs, num_steps, batch_size, lr, clipping_theta = 250, 35, 32, 1e2, 1e-2
pred_period, pred_len, prefixes = 50, 50, ['分开', '不分开']
train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, device, corpus_indices, idx_to_char,
char_to_idx, True, num_epochs, num_steps, lr,
clipping_theta, batch_size, pred_period, pred_len,
prefixes)
|
[
"sys.path.append",
"torch.nn.Parameter",
"math.exp",
"random.shuffle",
"torch.nn.CrossEntropyLoss",
"torch.cat",
"time.time",
"torch.nn.init.normal_",
"torch.cuda.is_available",
"torch.zeros",
"torch.matmul",
"torch.tensor"
] |
[((151, 173), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (166, 173), False, 'import sys\n'), ((887, 949), 'torch.zeros', 'torch.zeros', (['x.shape[0]', 'n_class'], {'dtype': 'dtype', 'device': 'x.device'}), '(x.shape[0], n_class, dtype=dtype, device=x.device)\n', (898, 949), False, 'import torch\n'), ((2220, 2254), 'torch.tensor', 'torch.tensor', (['[0.0]'], {'device': 'device'}), '([0.0], device=device)\n', (2232, 2254), False, 'import torch\n'), ((3487, 3518), 'random.shuffle', 'random.shuffle', (['example_indices'], {}), '(example_indices)\n', (3501, 3518), False, 'import random\n'), ((4399, 4442), 'torch.tensor', 'torch.tensor', (['corpus_indices'], {'device': 'device'}), '(corpus_indices, device=device)\n', (4411, 4442), False, 'import torch\n'), ((5491, 5512), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5510, 5512), False, 'from torch import nn, optim\n'), ((1215, 1269), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'dtype': 'torch.float32'}), '(shape, device=device, dtype=torch.float32)\n', (1226, 1269), False, 'import torch\n'), ((1278, 1309), 'torch.nn.init.normal_', 'nn.init.normal_', (['param', '(0)', '(0.01)'], {}), '(param, 0, 0.01)\n', (1293, 1309), False, 'from torch import nn, optim\n'), ((1325, 1350), 'torch.nn.Parameter', 'torch.nn.Parameter', (['param'], {}), '(param)\n', (1343, 1350), False, 'import torch\n'), ((1480, 1519), 'torch.zeros', 'torch.zeros', (['num_hiddens'], {'device': 'device'}), '(num_hiddens, device=device)\n', (1491, 1519), False, 'import torch\n'), ((1606, 1645), 'torch.zeros', 'torch.zeros', (['num_outputs'], {'device': 'device'}), '(num_outputs, device=device)\n', (1617, 1645), False, 'import torch\n'), ((2109, 2162), 'torch.zeros', 'torch.zeros', (['(batch_size, num_hiddens)'], {'device': 'device'}), '((batch_size, num_hiddens), device=device)\n', (2120, 2162), False, 'import torch\n'), ((1964, 1985), 'torch.matmul', 'torch.matmul', (['H', 'W_hq'], {}), '(H, W_hq)\n', (1976, 1985), False, 'import torch\n'), ((2818, 2861), 'torch.tensor', 'torch.tensor', (['[[output[-1]]]'], {'device': 'device'}), '([[output[-1]]], device=device)\n', (2830, 2861), False, 'import torch\n'), ((5711, 5722), 'time.time', 'time.time', ([], {}), '()\n', (5720, 5722), False, 'import time\n'), ((6450, 6475), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(0)'}), '(outputs, dim=0)\n', (6459, 6475), False, 'import torch\n'), ((3682, 3707), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3705, 3707), False, 'import torch\n'), ((3992, 4022), 'torch.tensor', 'torch.tensor', (['X'], {'device': 'device'}), '(X, device=device)\n', (4004, 4022), False, 'import torch\n'), ((4024, 4054), 'torch.tensor', 'torch.tensor', (['Y'], {'device': 'device'}), '(Y, device=device)\n', (4036, 4054), False, 'import torch\n'), ((4198, 4223), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4221, 4223), False, 'import torch\n'), ((1899, 1920), 'torch.matmul', 'torch.matmul', (['X', 'W_xh'], {}), '(X, W_xh)\n', (1911, 1920), False, 'import torch\n'), ((1923, 1944), 'torch.matmul', 'torch.matmul', (['H', 'W_hh'], {}), '(H, W_hh)\n', (1935, 1944), False, 'import torch\n'), ((7186, 7205), 'math.exp', 'math.exp', (['(l_sum / n)'], {}), '(l_sum / n)\n', (7194, 7205), False, 'import math\n'), ((7207, 7218), 'time.time', 'time.time', ([], {}), '()\n', (7216, 7218), False, 'import time\n')]
|
from datetime import date, datetime, time, timezone
from typing import Generator, List, Optional, Tuple
import pandas as pd
from pandas import Timestamp
def get_current_time(tzinfo=timezone.utc):
"""Get current time."""
return datetime.utcnow().replace(tzinfo=tzinfo)
def parse_datetime(value: str, unit: str = "ns") -> datetime:
"""Parse datetime with pandas for nanosecond accuracy."""
return pd.to_datetime(value, unit=unit).replace(tzinfo=timezone.utc)
def timestamp_to_inclusive(
timestamp_from: datetime, timestamp_to: datetime, value: str = "1t"
):
"""Reduce timestamp_to by value, in case results are inclusive."""
ts_to = timestamp_to - pd.Timedelta(value)
if timestamp_from <= ts_to:
return ts_to
else:
return timestamp_to
def parse_period_from_to(
date_from: Optional[str] = None,
time_from: Optional[str] = None,
date_to: Optional[str] = None,
time_to: Optional[str] = None,
) -> Tuple[datetime]:
"""Parse period from/to command line arguments."""
now = get_current_time()
tomorrow = now.date() + pd.Timedelta("1d")
# timestamp_from
date_from = date.fromisoformat(date_from) if date_from else date(2009, 1, 3)
time_from = time.fromisoformat(time_from) if time_from else time.min
# timestamp_to
date_to = date.fromisoformat(date_to) if date_to else tomorrow
time_to = time.fromisoformat(time_to) if time_to else time.min
# UTC, please
timestamp_from = datetime.combine(date_from, time_from).replace(tzinfo=timezone.utc)
timestamp_to = datetime.combine(date_to, time_to).replace(tzinfo=timezone.utc)
# Sane defaults
timestamp_to = get_min_time(now, "1t") if timestamp_to >= now else timestamp_to
timestamp_from = timestamp_to if timestamp_from > timestamp_to else timestamp_from
return timestamp_from, timestamp_to
def to_pydatetime(timestamp: Timestamp) -> datetime:
"""Timestamp to datetime."""
return timestamp.replace(nanosecond=0).to_pydatetime().replace(tzinfo=timezone.utc)
def get_min_time(timestamp: datetime, value: str) -> datetime:
"""Get minimum time."""
return to_pydatetime(pd.to_datetime(timestamp).floor(value))
def get_next_time(timestamp: datetime, value: str) -> datetime:
"""Get next time."""
return get_min_time(timestamp, value=value) + pd.Timedelta(value)
def get_range(timestamp_from: datetime, timestamp_to: datetime, value: str = "1t"):
"""Get timestamps in range, step by value."""
ts_from_rounded = get_min_time(timestamp_from, value)
ts_to_rounded = get_next_time(timestamp_to, value)
return [
to_pydatetime(timestamp)
for timestamp in pd.date_range(ts_from_rounded, ts_to_rounded, freq=value)
if timestamp >= ts_from_rounded and timestamp <= timestamp_to
]
def iter_window(
timestamp_from: datetime,
timestamp_to: datetime,
value: str = "1t",
reverse: bool = False,
) -> Generator[Tuple[datetime, datetime], None, None]:
"""Iter window, by value."""
values = get_range(timestamp_from, timestamp_to, value)
return iter_timestamps(values, reverse=reverse)
def iter_timestamps(
values: List[datetime], reverse: bool = False
) -> Generator[Tuple[datetime, datetime], None, None]:
"""Iter tuples of timestamps, optionally reversed."""
if reverse:
values.reverse()
for start_time, end_time in zip(values, values[1:]):
if reverse:
yield end_time, start_time
else:
yield start_time, end_time
def iter_once(
timestamp_from: datetime, timestamp_to: datetime
) -> Generator[Tuple[datetime, datetime], None, None]:
"""Fake iter, once."""
yield get_min_time(timestamp_from, "1d"), get_next_time(timestamp_to, "1d")
def iter_timeframe(
timestamp_from: datetime,
timestamp_to: datetime,
value: str = "1d",
reverse: bool = False,
) -> Generator[Tuple[datetime, datetime], None, None]:
"""Iter timeframe, including partial increments."""
values = []
head = None
tail = None
step = pd.Timedelta(value)
# Is there at least 1 day?
delta = timestamp_to - timestamp_from
if delta >= step:
ts_from = get_min_time(timestamp_from, value)
ts_to = get_min_time(timestamp_to, value)
# Is there a head?
if timestamp_from != ts_from:
head = timestamp_from, ts_from + step
timestamp_from = ts_from + step
# Is there a tail?
if timestamp_to != ts_to:
tail = ts_to, timestamp_to
timestamp_to = ts_to
# Chceck again, is there at least 1 day?
delta = ts_to - ts_from
if delta >= step:
for val in iter_window(ts_from, ts_to, value, reverse=reverse):
if head:
if val[0] >= head[0]:
values.append(val)
elif tail:
if val[1] <= tail[1]:
values.append(val)
else:
values.append(val)
else:
values.append((timestamp_from, timestamp_to))
# Head or tail
if head and not reverse:
values.insert(0, head)
elif tail and reverse:
values.insert(0, tail)
elif head and reverse:
values.append(head)
elif tail and not reverse:
values.append(tail)
for value in values:
yield value
def iter_missing(
timestamp_from: datetime,
timestamp_to: datetime,
existing: List[datetime],
reverse: bool = False,
) -> Generator[Tuple[datetime, datetime], None, None]:
"""Iter missing, by 1 minute intervals."""
values = []
for ts_from, ts_to in iter_window(timestamp_from, timestamp_to, reverse=reverse):
if ts_from not in existing:
values.append((ts_from, ts_to))
if reverse:
values.reverse()
if len(values):
index = 0
next_index = index + 1
counter = 0
one_minute = pd.Timedelta("1t")
start = values[0][0]
stop = None
while next_index < len(values):
next_start = values[next_index][0]
total_minutes = one_minute * (counter + 1)
if next_start == start + total_minutes:
# Don't increment next_index, b/c delete
stop = values[next_index][1]
del values[next_index]
counter += 1
else:
if stop:
values[index] = start, stop
index = next_index
stop = None
start = next_start
counter = 0
next_index += 1
if stop:
values[-1] = values[-1][0], stop
if reverse:
values.reverse()
return values
|
[
"pandas.date_range",
"datetime.date",
"datetime.date.fromisoformat",
"datetime.datetime.utcnow",
"pandas.to_datetime",
"datetime.time.fromisoformat",
"pandas.Timedelta",
"datetime.datetime.combine"
] |
[((4070, 4089), 'pandas.Timedelta', 'pd.Timedelta', (['value'], {}), '(value)\n', (4082, 4089), True, 'import pandas as pd\n'), ((681, 700), 'pandas.Timedelta', 'pd.Timedelta', (['value'], {}), '(value)\n', (693, 700), True, 'import pandas as pd\n'), ((1098, 1116), 'pandas.Timedelta', 'pd.Timedelta', (['"""1d"""'], {}), "('1d')\n", (1110, 1116), True, 'import pandas as pd\n'), ((1154, 1183), 'datetime.date.fromisoformat', 'date.fromisoformat', (['date_from'], {}), '(date_from)\n', (1172, 1183), False, 'from datetime import date, datetime, time, timezone\n'), ((1202, 1218), 'datetime.date', 'date', (['(2009)', '(1)', '(3)'], {}), '(2009, 1, 3)\n', (1206, 1218), False, 'from datetime import date, datetime, time, timezone\n'), ((1235, 1264), 'datetime.time.fromisoformat', 'time.fromisoformat', (['time_from'], {}), '(time_from)\n', (1253, 1264), False, 'from datetime import date, datetime, time, timezone\n'), ((1325, 1352), 'datetime.date.fromisoformat', 'date.fromisoformat', (['date_to'], {}), '(date_to)\n', (1343, 1352), False, 'from datetime import date, datetime, time, timezone\n'), ((1392, 1419), 'datetime.time.fromisoformat', 'time.fromisoformat', (['time_to'], {}), '(time_to)\n', (1410, 1419), False, 'from datetime import date, datetime, time, timezone\n'), ((2341, 2360), 'pandas.Timedelta', 'pd.Timedelta', (['value'], {}), '(value)\n', (2353, 2360), True, 'import pandas as pd\n'), ((5977, 5995), 'pandas.Timedelta', 'pd.Timedelta', (['"""1t"""'], {}), "('1t')\n", (5989, 5995), True, 'import pandas as pd\n'), ((238, 255), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (253, 255), False, 'from datetime import date, datetime, time, timezone\n'), ((416, 448), 'pandas.to_datetime', 'pd.to_datetime', (['value'], {'unit': 'unit'}), '(value, unit=unit)\n', (430, 448), True, 'import pandas as pd\n'), ((1484, 1522), 'datetime.datetime.combine', 'datetime.combine', (['date_from', 'time_from'], {}), '(date_from, time_from)\n', (1500, 1522), False, 'from datetime import date, datetime, time, timezone\n'), ((1571, 1605), 'datetime.datetime.combine', 'datetime.combine', (['date_to', 'time_to'], {}), '(date_to, time_to)\n', (1587, 1605), False, 'from datetime import date, datetime, time, timezone\n'), ((2681, 2738), 'pandas.date_range', 'pd.date_range', (['ts_from_rounded', 'ts_to_rounded'], {'freq': 'value'}), '(ts_from_rounded, ts_to_rounded, freq=value)\n', (2694, 2738), True, 'import pandas as pd\n'), ((2160, 2185), 'pandas.to_datetime', 'pd.to_datetime', (['timestamp'], {}), '(timestamp)\n', (2174, 2185), True, 'import pandas as pd\n')]
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from yandex.cloud.cdn.v1 import origin_pb2 as yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__pb2
from yandex.cloud.cdn.v1 import origin_service_pb2 as yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
class OriginServiceStub(object):
"""
Origin management service.
Origin is not a standalone entity. It can live only within origin group.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.cdn.v1.OriginService/Get',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.GetOriginRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__pb2.Origin.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.cdn.v1.OriginService/List',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.cdn.v1.OriginService/Create',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.CreateOriginRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Update = channel.unary_unary(
'/yandex.cloud.cdn.v1.OriginService/Update',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.UpdateOriginRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.cdn.v1.OriginService/Delete',
request_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.DeleteOriginRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
class OriginServiceServicer(object):
"""
Origin management service.
Origin is not a standalone entity. It can live only within origin group.
"""
def Get(self, request, context):
"""Get origin in origin group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Lists origins of origin group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Creates origin inside origin group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Updates origin from origin group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes origin from origin group.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OriginServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.GetOriginRequest.FromString,
response_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__pb2.Origin.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.CreateOriginRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.UpdateOriginRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.DeleteOriginRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.cdn.v1.OriginService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class OriginService(object):
"""
Origin management service.
Origin is not a standalone entity. It can live only within origin group.
"""
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.OriginService/Get',
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.GetOriginRequest.SerializeToString,
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__pb2.Origin.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.OriginService/List',
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsRequest.SerializeToString,
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.OriginService/Create',
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.CreateOriginRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.OriginService/Update',
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.UpdateOriginRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.cdn.v1.OriginService/Delete',
yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.DeleteOriginRequest.SerializeToString,
yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
[
"grpc.method_handlers_generic_handler",
"grpc.unary_unary_rpc_method_handler",
"grpc.experimental.unary_unary"
] |
[((5978, 6076), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""yandex.cloud.cdn.v1.OriginService"""', 'rpc_method_handlers'], {}), "('yandex.cloud.cdn.v1.OriginService',\n rpc_method_handlers)\n", (6014, 6076), False, 'import grpc\n'), ((4171, 4436), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.Get'], {'request_deserializer': 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.GetOriginRequest.FromString', 'response_serializer': 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__pb2.Origin.SerializeToString'}), '(servicer.Get, request_deserializer=\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.\n GetOriginRequest.FromString, response_serializer=\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__pb2.Origin.SerializeToString)\n', (4206, 4436), False, 'import grpc\n'), ((4518, 4813), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.List'], {'request_deserializer': 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsRequest.FromString', 'response_serializer': 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsResponse.SerializeToString'}), '(servicer.List, request_deserializer=\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.\n ListOriginsRequest.FromString, response_serializer=\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.\n ListOriginsResponse.SerializeToString)\n', (4553, 4813), False, 'import grpc\n'), ((4892, 5173), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.Create'], {'request_deserializer': 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.CreateOriginRequest.FromString', 'response_serializer': 'yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString'}), '(servicer.Create, request_deserializer=\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.\n CreateOriginRequest.FromString, response_serializer=\n yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.\n SerializeToString)\n', (4927, 5173), False, 'import grpc\n'), ((5252, 5533), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.Update'], {'request_deserializer': 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.UpdateOriginRequest.FromString', 'response_serializer': 'yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString'}), '(servicer.Update, request_deserializer=\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.\n UpdateOriginRequest.FromString, response_serializer=\n yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.\n SerializeToString)\n', (5287, 5533), False, 'import grpc\n'), ((5612, 5893), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.Delete'], {'request_deserializer': 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.DeleteOriginRequest.FromString', 'response_serializer': 'yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString'}), '(servicer.Delete, request_deserializer=\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.\n DeleteOriginRequest.FromString, response_serializer=\n yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.\n SerializeToString)\n', (5647, 5893), False, 'import grpc\n'), ((6661, 7040), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/yandex.cloud.cdn.v1.OriginService/Get"""', 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.GetOriginRequest.SerializeToString', 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__pb2.Origin.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/yandex.cloud.cdn.v1.OriginService/Get',\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.\n GetOriginRequest.SerializeToString,\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__pb2.Origin.FromString,\n options, channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (6690, 7040), False, 'import grpc\n'), ((7382, 7787), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/yandex.cloud.cdn.v1.OriginService/List"""', 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsRequest.SerializeToString', 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.ListOriginsResponse.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/yandex.cloud.cdn.v1.OriginService/List',\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.\n ListOriginsRequest.SerializeToString,\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.\n ListOriginsResponse.FromString, options, channel_credentials, insecure,\n call_credentials, compression, wait_for_ready, timeout, metadata)\n", (7411, 7787), False, 'import grpc\n'), ((8130, 8520), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/yandex.cloud.cdn.v1.OriginService/Create"""', 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.CreateOriginRequest.SerializeToString', 'yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/yandex.cloud.cdn.v1.OriginService/Create',\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.\n CreateOriginRequest.SerializeToString,\n yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,\n options, channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (8159, 8520), False, 'import grpc\n'), ((8864, 9254), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/yandex.cloud.cdn.v1.OriginService/Update"""', 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.UpdateOriginRequest.SerializeToString', 'yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/yandex.cloud.cdn.v1.OriginService/Update',\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.\n UpdateOriginRequest.SerializeToString,\n yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,\n options, channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (8893, 9254), False, 'import grpc\n'), ((9598, 9988), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/yandex.cloud.cdn.v1.OriginService/Delete"""', 'yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.DeleteOriginRequest.SerializeToString', 'yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/yandex.cloud.cdn.v1.OriginService/Delete',\n yandex_dot_cloud_dot_cdn_dot_v1_dot_origin__service__pb2.\n DeleteOriginRequest.SerializeToString,\n yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,\n options, channel_credentials, insecure, call_credentials, compression,\n wait_for_ready, timeout, metadata)\n", (9627, 9988), False, 'import grpc\n')]
|
import os
class Config:
'''
General config
'''
NEWS_BASE_URL = 'https://newsapi.org/v2/everything?q={}&apiKey={}'
NEWS_API_KEY = os.environ.get('NEWS_API_KEY') #<KEY>
SECRET_KEY = os.environ.get('SECRET_KEY')
class ProdConfig(Config):
pass
class DevConfig(Config):
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig
}
|
[
"os.environ.get"
] |
[((151, 181), 'os.environ.get', 'os.environ.get', (['"""NEWS_API_KEY"""'], {}), "('NEWS_API_KEY')\n", (165, 181), False, 'import os\n'), ((206, 234), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (220, 234), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main_test.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
import sys
from PyQt5 import QtWidgets, QtGui, QtCore
import icons_rc
from xldigest.widgets.main_ui import Ui_MainXldigestWindow
from xldigest.widgets.template_manager_window import TemplateManagerWindow
class XldigestMainWindow(QtWidgets.QMainWindow, Ui_MainXldigestWindow):
def __init__(self, parent=None):
super(XldigestMainWindow, self).__init__(parent)
self.setupUi(self)
self.toolBar = QtWidgets.QToolBar("Main Toolbar", parent=self)
self.toolBar.setIconSize(QtCore.QSize(64, 64))
self.toolBar.setMovable(False)
self.toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.overviewIcon = QtGui.QPixmap(':/play.png')
self.toolBar.addAction(QtGui.QIcon(self.overviewIcon), "Overview")
self.datamapIcon = QtGui.QPixmap(':/tools.png')
self.toolBar.addAction(QtGui.QIcon(self.datamapIcon), "Datamap")
self.returnsIcon = QtGui.QPixmap(':/upload.png')
self.toolBar.addAction(QtGui.QIcon(self.returnsIcon), "Returns")
self.templatesIcon = QtGui.QPixmap(':/dev.png')
self.toolBar.addAction(QtGui.QIcon(self.templatesIcon), "Templates")
self.importIcon = QtGui.QPixmap(':/arrow-down.png')
self.toolBar.addAction(QtGui.QIcon(self.importIcon), "Imports")
def main():
application = QtWidgets.QApplication(sys.argv)
window = XldigestMainWindow()
desktop = QtWidgets.QDesktopWidget().availableGeometry()
width = (desktop.width() - window.width()) / 2
height = (desktop.height() - window.height()) / 3
window.show()
window.move(width, height)
sys.exit(application.exec_())
if __name__ == "__main__":
main()
|
[
"PyQt5.QtGui.QIcon",
"PyQt5.QtWidgets.QDesktopWidget",
"PyQt5.QtCore.QSize",
"PyQt5.QtGui.QPixmap",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QToolBar"
] |
[((1594, 1626), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1616, 1626), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((621, 668), 'PyQt5.QtWidgets.QToolBar', 'QtWidgets.QToolBar', (['"""Main Toolbar"""'], {'parent': 'self'}), "('Main Toolbar', parent=self)\n", (639, 668), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((931, 958), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/play.png"""'], {}), "(':/play.png')\n", (944, 958), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((1062, 1090), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/tools.png"""'], {}), "(':/tools.png')\n", (1075, 1090), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((1192, 1221), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/upload.png"""'], {}), "(':/upload.png')\n", (1205, 1221), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((1325, 1351), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/dev.png"""'], {}), "(':/dev.png')\n", (1338, 1351), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((1456, 1489), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/arrow-down.png"""'], {}), "(':/arrow-down.png')\n", (1469, 1489), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((702, 722), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(64)', '(64)'], {}), '(64, 64)\n', (714, 722), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((990, 1020), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['self.overviewIcon'], {}), '(self.overviewIcon)\n', (1001, 1020), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((1122, 1151), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['self.datamapIcon'], {}), '(self.datamapIcon)\n', (1133, 1151), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((1253, 1282), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['self.returnsIcon'], {}), '(self.returnsIcon)\n', (1264, 1282), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((1383, 1414), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['self.templatesIcon'], {}), '(self.templatesIcon)\n', (1394, 1414), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((1521, 1549), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['self.importIcon'], {}), '(self.importIcon)\n', (1532, 1549), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n'), ((1675, 1701), 'PyQt5.QtWidgets.QDesktopWidget', 'QtWidgets.QDesktopWidget', ([], {}), '()\n', (1699, 1701), False, 'from PyQt5 import QtWidgets, QtGui, QtCore\n')]
|
from copy import copy
import cloudpickle
from sortedcontainers import SortedDict, SortedSet
from adaptive.learner.base_learner import BaseLearner
class _IgnoreFirstArgument:
"""Remove the first argument from the call signature.
The SequenceLearner's function receives a tuple ``(index, point)``
but the original function only takes ``point``.
This is the same as `lambda x: function(x[1])`, however, that is not
pickable.
"""
def __init__(self, function):
self.function = function
def __call__(self, index_point, *args, **kwargs):
index, point = index_point
return self.function(point, *args, **kwargs)
def __getstate__(self):
return self.function
def __setstate__(self, function):
self.__init__(function)
class SequenceLearner(BaseLearner):
r"""A learner that will learn a sequence. It simply returns
the points in the provided sequence when asked.
This is useful when your problem cannot be formulated in terms of
another adaptive learner, but you still want to use Adaptive's
routines to run, save, and plot.
Parameters
----------
function : callable
The function to learn. Must take a single element `sequence`.
sequence : sequence
The sequence to learn.
Attributes
----------
data : dict
The data as a mapping from "index of element in sequence" => value.
Notes
-----
From primitive tests, the `~adaptive.SequenceLearner` appears to have a
similar performance to `ipyparallel`\s ``load_balanced_view().map``. With
the added benefit of having results in the local kernel already.
"""
def __init__(self, function, sequence):
self._original_function = function
self.function = _IgnoreFirstArgument(function)
self._to_do_indices = SortedSet({i for i, _ in enumerate(sequence)})
self._ntotal = len(sequence)
self.sequence = copy(sequence)
self.data = SortedDict()
self.pending_points = set()
def ask(self, n, tell_pending=True):
indices = []
points = []
loss_improvements = []
for index in self._to_do_indices:
if len(points) >= n:
break
point = self.sequence[index]
indices.append(index)
points.append((index, point))
loss_improvements.append(1 / self._ntotal)
if tell_pending:
for i, p in zip(indices, points):
self.tell_pending((i, p))
return points, loss_improvements
def loss(self, real=True):
if not (self._to_do_indices or self.pending_points):
return 0
else:
npoints = self.npoints + (0 if real else len(self.pending_points))
return (self._ntotal - npoints) / self._ntotal
def remove_unfinished(self):
for i in self.pending_points:
self._to_do_indices.add(i)
self.pending_points = set()
def tell(self, point, value):
index, point = point
self.data[index] = value
self.pending_points.discard(index)
self._to_do_indices.discard(index)
def tell_pending(self, point):
index, point = point
self.pending_points.add(index)
self._to_do_indices.discard(index)
def done(self):
return not self._to_do_indices and not self.pending_points
def result(self):
"""Get the function values in the same order as ``sequence``."""
if not self.done():
raise Exception("Learner is not yet complete.")
return list(self.data.values())
@property
def npoints(self):
return len(self.data)
def _get_data(self):
return self.data
def _set_data(self, data):
if data:
indices, values = zip(*data.items())
# the points aren't used by tell, so we can safely pass None
points = [(i, None) for i in indices]
self.tell_many(points, values)
def __getstate__(self):
return (
cloudpickle.dumps(self._original_function),
self.sequence,
self._get_data(),
)
def __setstate__(self, state):
function, sequence, data = state
function = cloudpickle.loads(function)
self.__init__(function, sequence)
self._set_data(data)
|
[
"sortedcontainers.SortedDict",
"cloudpickle.loads",
"copy.copy",
"cloudpickle.dumps"
] |
[((1961, 1975), 'copy.copy', 'copy', (['sequence'], {}), '(sequence)\n', (1965, 1975), False, 'from copy import copy\n'), ((1996, 2008), 'sortedcontainers.SortedDict', 'SortedDict', ([], {}), '()\n', (2006, 2008), False, 'from sortedcontainers import SortedDict, SortedSet\n'), ((4287, 4314), 'cloudpickle.loads', 'cloudpickle.loads', (['function'], {}), '(function)\n', (4304, 4314), False, 'import cloudpickle\n'), ((4080, 4122), 'cloudpickle.dumps', 'cloudpickle.dumps', (['self._original_function'], {}), '(self._original_function)\n', (4097, 4122), False, 'import cloudpickle\n')]
|
import pytest
import json
@pytest.fixture
def post_body_share_user():
return json.dumps({
'data': {
'type': 'ShareUser',
'attributes': {
'username': 'TestUser'
}
}
})
@pytest.fixture(params=[
'/api/v2/user/',
'/api/v2/users/',
])
def endpoint(request):
return request.param
@pytest.mark.django_db
class TestSourcesGet:
def test_logged_in(self, endpoint, client, share_user):
resp = client.get(endpoint, HTTP_AUTHORIZATION=share_user.authorization())
assert resp.status_code == 200
user_data = resp.json()['data']
assert len(user_data) == 1
assert user_data[0]['attributes']['username'] == share_user.username
def test_not_logged_in(self, endpoint, client):
resp = client.get(endpoint)
assert resp.status_code == 200
user_data = resp.json()['data']
assert len(user_data) == 0
@pytest.mark.django_db
class TestSourcesPost:
def test_unauthorized_post(self, endpoint, client, post_body_share_user):
assert client.post(
endpoint,
post_body_share_user,
content_type='application/vnd.api+json'
).status_code == 401
def test_authorized_post(self, endpoint, client, share_user, post_body_share_user):
assert client.post(
endpoint,
post_body_share_user,
content_type='application/vnd.api+json',
HTTP_AUTHORIZATION=share_user.authorization(),
).status_code == 405
|
[
"pytest.fixture",
"json.dumps"
] |
[((248, 306), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['/api/v2/user/', '/api/v2/users/']"}), "(params=['/api/v2/user/', '/api/v2/users/'])\n", (262, 306), False, 'import pytest\n'), ((83, 170), 'json.dumps', 'json.dumps', (["{'data': {'type': 'ShareUser', 'attributes': {'username': 'TestUser'}}}"], {}), "({'data': {'type': 'ShareUser', 'attributes': {'username':\n 'TestUser'}}})\n", (93, 170), False, 'import json\n')]
|
import math
a = int(input())
area = 2 * math.sqrt(3) * math.pow(a, 2)
volume = math.pow(a, 3) * math.sqrt(2) / 3
print(round(area, 2), round(volume, 2))
|
[
"math.sqrt",
"math.pow"
] |
[((57, 71), 'math.pow', 'math.pow', (['a', '(2)'], {}), '(a, 2)\n', (65, 71), False, 'import math\n'), ((42, 54), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (51, 54), False, 'import math\n'), ((81, 95), 'math.pow', 'math.pow', (['a', '(3)'], {}), '(a, 3)\n', (89, 95), False, 'import math\n'), ((98, 110), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (107, 110), False, 'import math\n')]
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for data_structures module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.operators import data_structures
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class ListTest(test.TestCase):
def test_new_list_empty(self):
l = data_structures.new_list()
# Can't evaluate an empty list.
# TODO(mdan): sess.run should allow tf.variant maybe?
self.assertTrue(isinstance(l, ops.Tensor))
def test_new_list_tensor(self):
l = data_structures.new_list([3, 4, 5])
self.assertAllEqual(l, [3, 4, 5])
def test_tf_tensor_list_new(self):
l = data_structures.tf_tensor_list_new([3, 4, 5])
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
def test_tf_tensor_list_new_empty(self):
l = data_structures.tf_tensor_list_new([],
element_dtype=dtypes.int32,
element_shape=())
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [])
def test_tf_tensor_list_new_from_tensor(self):
l = data_structures.tf_tensor_list_new(constant_op.constant([3, 4, 5]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
@test_util.run_deprecated_v1
def test_tf_tensor_list_new_illegal_input(self):
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4.0])
# TODO(mdan): It might make more sense to type cast in this case.
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4], element_dtype=dtypes.float32)
# Tensor lists do support heterogeneous lists.
self.assertIsNot(data_structures.tf_tensor_list_new([3, [4, 5]]), None)
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new([3, 4], element_shape=(2,))
with self.assertRaises(ValueError):
data_structures.tf_tensor_list_new(
constant_op.constant([1, 2, 3]), element_shape=[1])
def test_tf_tensor_array_new(self):
l = data_structures.tf_tensor_array_new([3, 4, 5])
t = l.stack()
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [3, 4, 5])
def test_tf_tensor_array_new_illegal_input(self):
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4.0])
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4], element_dtype=dtypes.float32)
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, [4, 5]])
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([3, 4], element_shape=(2,))
with self.assertRaises(ValueError):
data_structures.tf_tensor_array_new([], element_shape=(2,))
# TAs can infer the shape.
self.assertIsNot(
data_structures.tf_tensor_array_new([], element_dtype=dtypes.float32),
None)
def test_append_tensor_list(self):
l = data_structures.new_list()
x = constant_op.constant([1, 2, 3])
l = data_structures.list_append(l, x)
t = list_ops.tensor_list_stack(l, element_dtype=x.dtype)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(t), [[1, 2, 3]])
@test_util.run_deprecated_v1
def test_append_tensorarray(self):
l = tensor_array_ops.TensorArray(dtypes.int32, size=0, dynamic_size=True)
l1 = data_structures.list_append(l, 1)
l2 = data_structures.list_append(l1, 2)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(l1.stack()), [1])
self.assertAllEqual(self.evaluate(l2.stack()), [1, 2])
def test_append_python(self):
l = []
self.assertAllEqual(data_structures.list_append(l, 1), [1])
self.assertAllEqual(data_structures.list_append(l, 2), [1, 2])
def test_pop_tensor_list(self):
initial_list = constant_op.constant([[1, 2], [3, 4]])
elem_shape = constant_op.constant([2])
l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
opts = data_structures.ListPopOpts(
element_dtype=initial_list.dtype,
element_shape=(2,))
with self.assertRaises(NotImplementedError):
data_structures.list_pop(l, 0, opts)
with self.cached_session() as sess:
l, x = data_structures.list_pop(l, None, opts)
self.assertAllEqual(self.evaluate(x), [3, 4])
t = list_ops.tensor_list_stack(l, element_dtype=initial_list.dtype)
self.assertAllEqual(self.evaluate(t), [[1, 2]])
def test_pop_python(self):
l = [1, 2, 3]
opts = data_structures.ListPopOpts(element_dtype=None, element_shape=())
self.assertAllEqual(data_structures.list_pop(l, None, opts), ([1, 2], 3))
self.assertAllEqual(data_structures.list_pop(l, None, opts), ([1], 2))
def test_stack_tensor_list(self):
initial_list = constant_op.constant([[1, 2], [3, 4]])
elem_shape = constant_op.constant([2])
l = list_ops.tensor_list_from_tensor(initial_list, element_shape=elem_shape)
opts = data_structures.ListStackOpts(
element_dtype=initial_list.dtype, original_call=None)
with self.cached_session() as sess:
t = data_structures.list_stack(l, opts)
self.assertAllEqual(self.evaluate(t), self.evaluate(initial_list))
@test_util.run_deprecated_v1
def test_stack_tensor_list_empty(self):
l = list_ops.empty_tensor_list(
element_shape=None, element_dtype=dtypes.variant)
opts = data_structures.ListStackOpts(
element_dtype=dtypes.int32, original_call=None)
# TODO(mdan): Allow stacking empty lists if the dtype and shape are known.
with self.assertRaises(ValueError):
data_structures.list_stack(l, opts)
def test_stack_fallback(self):
def dummy_function(l):
# Lazy person's mock: just transform the argument in a way in which we
# can check that this function was indeed called.
return [x * 2 for x in l]
opts = data_structures.ListStackOpts(
element_dtype=None, original_call=dummy_function)
self.assertAllEqual(data_structures.list_stack([1, 2], opts), [2, 4])
if __name__ == '__main__':
test.main()
|
[
"tensorflow.python.platform.test.main",
"tensorflow.python.autograph.operators.data_structures.new_list",
"tensorflow.python.ops.list_ops.tensor_list_stack",
"tensorflow.python.autograph.operators.data_structures.ListPopOpts",
"tensorflow.python.ops.list_ops.tensor_list_from_tensor",
"tensorflow.python.ops.tensor_array_ops.TensorArray",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.autograph.operators.data_structures.list_pop",
"tensorflow.python.autograph.operators.data_structures.list_append",
"tensorflow.python.ops.list_ops.empty_tensor_list",
"tensorflow.python.autograph.operators.data_structures.ListStackOpts",
"tensorflow.python.autograph.operators.data_structures.list_stack",
"tensorflow.python.autograph.operators.data_structures.tf_tensor_list_new",
"tensorflow.python.autograph.operators.data_structures.tf_tensor_array_new"
] |
[((7390, 7401), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (7399, 7401), False, 'from tensorflow.python.platform import test\n'), ((1312, 1338), 'tensorflow.python.autograph.operators.data_structures.new_list', 'data_structures.new_list', ([], {}), '()\n', (1336, 1338), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((1523, 1558), 'tensorflow.python.autograph.operators.data_structures.new_list', 'data_structures.new_list', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (1547, 1558), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((1643, 1688), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_list_new', 'data_structures.tf_tensor_list_new', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (1677, 1688), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((1697, 1754), 'tensorflow.python.ops.list_ops.tensor_list_stack', 'list_ops.tensor_list_stack', (['l'], {'element_dtype': 'dtypes.int32'}), '(l, element_dtype=dtypes.int32)\n', (1723, 1754), False, 'from tensorflow.python.ops import list_ops\n'), ((1902, 1990), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_list_new', 'data_structures.tf_tensor_list_new', (['[]'], {'element_dtype': 'dtypes.int32', 'element_shape': '()'}), '([], element_dtype=dtypes.int32,\n element_shape=())\n', (1936, 1990), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((2081, 2138), 'tensorflow.python.ops.list_ops.tensor_list_stack', 'list_ops.tensor_list_stack', (['l'], {'element_dtype': 'dtypes.int32'}), '(l, element_dtype=dtypes.int32)\n', (2107, 2138), False, 'from tensorflow.python.ops import list_ops\n'), ((2361, 2418), 'tensorflow.python.ops.list_ops.tensor_list_stack', 'list_ops.tensor_list_stack', (['l'], {'element_dtype': 'dtypes.int32'}), '(l, element_dtype=dtypes.int32)\n', (2387, 2418), False, 'from tensorflow.python.ops import list_ops\n'), ((3304, 3350), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_array_new', 'data_structures.tf_tensor_array_new', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (3339, 3350), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((4232, 4258), 'tensorflow.python.autograph.operators.data_structures.new_list', 'data_structures.new_list', ([], {}), '()\n', (4256, 4258), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((4267, 4298), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (4287, 4298), False, 'from tensorflow.python.framework import constant_op\n'), ((4307, 4340), 'tensorflow.python.autograph.operators.data_structures.list_append', 'data_structures.list_append', (['l', 'x'], {}), '(l, x)\n', (4334, 4340), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((4350, 4402), 'tensorflow.python.ops.list_ops.tensor_list_stack', 'list_ops.tensor_list_stack', (['l'], {'element_dtype': 'x.dtype'}), '(l, element_dtype=x.dtype)\n', (4376, 4402), False, 'from tensorflow.python.ops import list_ops\n'), ((4577, 4646), 'tensorflow.python.ops.tensor_array_ops.TensorArray', 'tensor_array_ops.TensorArray', (['dtypes.int32'], {'size': '(0)', 'dynamic_size': '(True)'}), '(dtypes.int32, size=0, dynamic_size=True)\n', (4605, 4646), False, 'from tensorflow.python.ops import tensor_array_ops\n'), ((4656, 4689), 'tensorflow.python.autograph.operators.data_structures.list_append', 'data_structures.list_append', (['l', '(1)'], {}), '(l, 1)\n', (4683, 4689), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((4699, 4733), 'tensorflow.python.autograph.operators.data_structures.list_append', 'data_structures.list_append', (['l1', '(2)'], {}), '(l1, 2)\n', (4726, 4733), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((5123, 5161), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (5143, 5161), False, 'from tensorflow.python.framework import constant_op\n'), ((5179, 5204), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[2]'], {}), '([2])\n', (5199, 5204), False, 'from tensorflow.python.framework import constant_op\n'), ((5213, 5285), 'tensorflow.python.ops.list_ops.tensor_list_from_tensor', 'list_ops.tensor_list_from_tensor', (['initial_list'], {'element_shape': 'elem_shape'}), '(initial_list, element_shape=elem_shape)\n', (5245, 5285), False, 'from tensorflow.python.ops import list_ops\n'), ((5298, 5384), 'tensorflow.python.autograph.operators.data_structures.ListPopOpts', 'data_structures.ListPopOpts', ([], {'element_dtype': 'initial_list.dtype', 'element_shape': '(2,)'}), '(element_dtype=initial_list.dtype, element_shape\n =(2,))\n', (5325, 5384), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((5824, 5889), 'tensorflow.python.autograph.operators.data_structures.ListPopOpts', 'data_structures.ListPopOpts', ([], {'element_dtype': 'None', 'element_shape': '()'}), '(element_dtype=None, element_shape=())\n', (5851, 5889), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((6099, 6137), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (6119, 6137), False, 'from tensorflow.python.framework import constant_op\n'), ((6155, 6180), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[2]'], {}), '([2])\n', (6175, 6180), False, 'from tensorflow.python.framework import constant_op\n'), ((6189, 6261), 'tensorflow.python.ops.list_ops.tensor_list_from_tensor', 'list_ops.tensor_list_from_tensor', (['initial_list'], {'element_shape': 'elem_shape'}), '(initial_list, element_shape=elem_shape)\n', (6221, 6261), False, 'from tensorflow.python.ops import list_ops\n'), ((6274, 6361), 'tensorflow.python.autograph.operators.data_structures.ListStackOpts', 'data_structures.ListStackOpts', ([], {'element_dtype': 'initial_list.dtype', 'original_call': 'None'}), '(element_dtype=initial_list.dtype,\n original_call=None)\n', (6303, 6361), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((6609, 6685), 'tensorflow.python.ops.list_ops.empty_tensor_list', 'list_ops.empty_tensor_list', ([], {'element_shape': 'None', 'element_dtype': 'dtypes.variant'}), '(element_shape=None, element_dtype=dtypes.variant)\n', (6635, 6685), False, 'from tensorflow.python.ops import list_ops\n'), ((6707, 6784), 'tensorflow.python.autograph.operators.data_structures.ListStackOpts', 'data_structures.ListStackOpts', ([], {'element_dtype': 'dtypes.int32', 'original_call': 'None'}), '(element_dtype=dtypes.int32, original_call=None)\n', (6736, 6784), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((7195, 7274), 'tensorflow.python.autograph.operators.data_structures.ListStackOpts', 'data_structures.ListStackOpts', ([], {'element_dtype': 'None', 'original_call': 'dummy_function'}), '(element_dtype=None, original_call=dummy_function)\n', (7224, 7274), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((2320, 2351), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (2340, 2351), False, 'from tensorflow.python.framework import constant_op\n'), ((2643, 2687), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_list_new', 'data_structures.tf_tensor_list_new', (['[3, 4.0]'], {}), '([3, 4.0])\n', (2677, 2687), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((2804, 2876), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_list_new', 'data_structures.tf_tensor_list_new', (['[3, 4]'], {'element_dtype': 'dtypes.float32'}), '([3, 4], element_dtype=dtypes.float32)\n', (2838, 2876), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((2949, 2996), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_list_new', 'data_structures.tf_tensor_list_new', (['[3, [4, 5]]'], {}), '([3, [4, 5]])\n', (2983, 2996), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((3050, 3112), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_list_new', 'data_structures.tf_tensor_list_new', (['[3, 4]'], {'element_shape': '(2,)'}), '([3, 4], element_shape=(2,))\n', (3084, 3112), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((3563, 3608), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_array_new', 'data_structures.tf_tensor_array_new', (['[3, 4.0]'], {}), '([3, 4.0])\n', (3598, 3608), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((3655, 3728), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_array_new', 'data_structures.tf_tensor_array_new', (['[3, 4]'], {'element_dtype': 'dtypes.float32'}), '([3, 4], element_dtype=dtypes.float32)\n', (3690, 3728), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((3775, 3823), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_array_new', 'data_structures.tf_tensor_array_new', (['[3, [4, 5]]'], {}), '([3, [4, 5]])\n', (3810, 3823), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((3870, 3933), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_array_new', 'data_structures.tf_tensor_array_new', (['[3, 4]'], {'element_shape': '(2,)'}), '([3, 4], element_shape=(2,))\n', (3905, 3933), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((3980, 4039), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_array_new', 'data_structures.tf_tensor_array_new', (['[]'], {'element_shape': '(2,)'}), '([], element_shape=(2,))\n', (4015, 4039), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((4101, 4170), 'tensorflow.python.autograph.operators.data_structures.tf_tensor_array_new', 'data_structures.tf_tensor_array_new', (['[]'], {'element_dtype': 'dtypes.float32'}), '([], element_dtype=dtypes.float32)\n', (4136, 4170), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((4962, 4995), 'tensorflow.python.autograph.operators.data_structures.list_append', 'data_structures.list_append', (['l', '(1)'], {}), '(l, 1)\n', (4989, 4995), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((5026, 5059), 'tensorflow.python.autograph.operators.data_structures.list_append', 'data_structures.list_append', (['l', '(2)'], {}), '(l, 2)\n', (5053, 5059), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((5453, 5489), 'tensorflow.python.autograph.operators.data_structures.list_pop', 'data_structures.list_pop', (['l', '(0)', 'opts'], {}), '(l, 0, opts)\n', (5477, 5489), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((5544, 5583), 'tensorflow.python.autograph.operators.data_structures.list_pop', 'data_structures.list_pop', (['l', 'None', 'opts'], {}), '(l, None, opts)\n', (5568, 5583), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((5647, 5710), 'tensorflow.python.ops.list_ops.tensor_list_stack', 'list_ops.tensor_list_stack', (['l'], {'element_dtype': 'initial_list.dtype'}), '(l, element_dtype=initial_list.dtype)\n', (5673, 5710), False, 'from tensorflow.python.ops import list_ops\n'), ((5914, 5953), 'tensorflow.python.autograph.operators.data_structures.list_pop', 'data_structures.list_pop', (['l', 'None', 'opts'], {}), '(l, None, opts)\n', (5938, 5953), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((5992, 6031), 'tensorflow.python.autograph.operators.data_structures.list_pop', 'data_structures.list_pop', (['l', 'None', 'opts'], {}), '(l, None, opts)\n', (6016, 6031), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((6418, 6453), 'tensorflow.python.autograph.operators.data_structures.list_stack', 'data_structures.list_stack', (['l', 'opts'], {}), '(l, opts)\n', (6444, 6453), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((6920, 6955), 'tensorflow.python.autograph.operators.data_structures.list_stack', 'data_structures.list_stack', (['l', 'opts'], {}), '(l, opts)\n', (6946, 6955), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((7309, 7349), 'tensorflow.python.autograph.operators.data_structures.list_stack', 'data_structures.list_stack', (['[1, 2]', 'opts'], {}), '([1, 2], opts)\n', (7335, 7349), False, 'from tensorflow.python.autograph.operators import data_structures\n'), ((3205, 3236), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (3225, 3236), False, 'from tensorflow.python.framework import constant_op\n')]
|
import pytest
import numpy as np
import torch
from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse
@pytest.mark.parametrize('val', [0, 1, 5])
def test_pad_col_start(val):
x = torch.ones((2, 3))
x_pad = pad_col(x, val, where='start')
pad = torch.ones(2, 1) * val
assert (x_pad == torch.cat([pad, x], dim=1)).all()
@pytest.mark.parametrize('val', [0, 1, 5])
def test_pad_col_end(val):
x = torch.ones((2, 3))
x_pad = pad_col(x, val)
pad = torch.ones(2, 1) * val
assert (x_pad == torch.cat([x, pad], dim=1)).all()
@pytest.mark.parametrize('n', [2, 13, 40])
def test_make_subgrid_1(n):
grid = np.random.uniform(0, 100, n)
grid = np.sort(grid)
new_grid = make_subgrid(grid, 1)
assert len(new_grid) == len(grid)
assert (new_grid == grid).all()
@pytest.mark.parametrize('sub', [2, 10, 20])
@pytest.mark.parametrize('start', [0, 2])
@pytest.mark.parametrize('stop', [4, 100])
@pytest.mark.parametrize('n', [5, 10])
def test_make_subgrid(sub, start, stop, n):
grid = np.linspace(start, stop, n)
new_grid = make_subgrid(grid, sub)
true_new = np.linspace(start, stop, n*sub - (sub-1))
assert len(new_grid) == len(true_new)
assert np.abs(true_new - new_grid).max() < 1e-13
def test_cumsum_reverse_error_dim():
x = torch.randn((5, 3))
with pytest.raises(NotImplementedError):
cumsum_reverse(x, dim=0)
with pytest.raises(NotImplementedError):
cumsum_reverse(x, dim=2)
def test_cumsum_reverse_dim_1():
torch.manual_seed(1234)
x = torch.randn(5, 16)
res_np = x.numpy()[:, ::-1].cumsum(1)[:, ::-1]
res = cumsum_reverse(x, dim=1)
assert np.abs(res.numpy() - res_np).max() < 1e-6
|
[
"numpy.random.uniform",
"torch.ones",
"pycox.models.utils.make_subgrid",
"numpy.abs",
"torch.manual_seed",
"torch.randn",
"torch.cat",
"numpy.sort",
"pytest.raises",
"numpy.linspace",
"pytest.mark.parametrize",
"pycox.models.utils.pad_col",
"pycox.models.utils.cumsum_reverse"
] |
[((117, 158), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', '[0, 1, 5]'], {}), "('val', [0, 1, 5])\n", (140, 158), False, 'import pytest\n'), ((348, 389), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', '[0, 1, 5]'], {}), "('val', [0, 1, 5])\n", (371, 389), False, 'import pytest\n'), ((562, 603), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[2, 13, 40]'], {}), "('n', [2, 13, 40])\n", (585, 603), False, 'import pytest\n'), ((810, 853), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sub"""', '[2, 10, 20]'], {}), "('sub', [2, 10, 20])\n", (833, 853), False, 'import pytest\n'), ((855, 895), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""start"""', '[0, 2]'], {}), "('start', [0, 2])\n", (878, 895), False, 'import pytest\n'), ((897, 938), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stop"""', '[4, 100]'], {}), "('stop', [4, 100])\n", (920, 938), False, 'import pytest\n'), ((940, 977), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[5, 10]'], {}), "('n', [5, 10])\n", (963, 977), False, 'import pytest\n'), ((196, 214), 'torch.ones', 'torch.ones', (['(2, 3)'], {}), '((2, 3))\n', (206, 214), False, 'import torch\n'), ((227, 257), 'pycox.models.utils.pad_col', 'pad_col', (['x', 'val'], {'where': '"""start"""'}), "(x, val, where='start')\n", (234, 257), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((425, 443), 'torch.ones', 'torch.ones', (['(2, 3)'], {}), '((2, 3))\n', (435, 443), False, 'import torch\n'), ((456, 471), 'pycox.models.utils.pad_col', 'pad_col', (['x', 'val'], {}), '(x, val)\n', (463, 471), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((643, 671), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(100)', 'n'], {}), '(0, 100, n)\n', (660, 671), True, 'import numpy as np\n'), ((683, 696), 'numpy.sort', 'np.sort', (['grid'], {}), '(grid)\n', (690, 696), True, 'import numpy as np\n'), ((712, 733), 'pycox.models.utils.make_subgrid', 'make_subgrid', (['grid', '(1)'], {}), '(grid, 1)\n', (724, 733), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((1033, 1060), 'numpy.linspace', 'np.linspace', (['start', 'stop', 'n'], {}), '(start, stop, n)\n', (1044, 1060), True, 'import numpy as np\n'), ((1076, 1099), 'pycox.models.utils.make_subgrid', 'make_subgrid', (['grid', 'sub'], {}), '(grid, sub)\n', (1088, 1099), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((1115, 1160), 'numpy.linspace', 'np.linspace', (['start', 'stop', '(n * sub - (sub - 1))'], {}), '(start, stop, n * sub - (sub - 1))\n', (1126, 1160), True, 'import numpy as np\n'), ((1298, 1317), 'torch.randn', 'torch.randn', (['(5, 3)'], {}), '((5, 3))\n', (1309, 1317), False, 'import torch\n'), ((1512, 1535), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (1529, 1535), False, 'import torch\n'), ((1544, 1562), 'torch.randn', 'torch.randn', (['(5)', '(16)'], {}), '(5, 16)\n', (1555, 1562), False, 'import torch\n'), ((1624, 1648), 'pycox.models.utils.cumsum_reverse', 'cumsum_reverse', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (1638, 1648), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((268, 284), 'torch.ones', 'torch.ones', (['(2)', '(1)'], {}), '(2, 1)\n', (278, 284), False, 'import torch\n'), ((482, 498), 'torch.ones', 'torch.ones', (['(2)', '(1)'], {}), '(2, 1)\n', (492, 498), False, 'import torch\n'), ((1327, 1361), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (1340, 1361), False, 'import pytest\n'), ((1371, 1395), 'pycox.models.utils.cumsum_reverse', 'cumsum_reverse', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (1385, 1395), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((1405, 1439), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (1418, 1439), False, 'import pytest\n'), ((1449, 1473), 'pycox.models.utils.cumsum_reverse', 'cumsum_reverse', (['x'], {'dim': '(2)'}), '(x, dim=2)\n', (1463, 1473), False, 'from pycox.models.utils import pad_col, make_subgrid, cumsum_reverse\n'), ((312, 338), 'torch.cat', 'torch.cat', (['[pad, x]'], {'dim': '(1)'}), '([pad, x], dim=1)\n', (321, 338), False, 'import torch\n'), ((526, 552), 'torch.cat', 'torch.cat', (['[x, pad]'], {'dim': '(1)'}), '([x, pad], dim=1)\n', (535, 552), False, 'import torch\n'), ((1210, 1237), 'numpy.abs', 'np.abs', (['(true_new - new_grid)'], {}), '(true_new - new_grid)\n', (1216, 1237), True, 'import numpy as np\n')]
|
# Created byMartin.cz
# Copyright (c) <NAME>. All rights reserved.
# import modules
import wx
import core
from .. import events
from .. import mwx
from .. ids import *
from .list_ctrl import AuthorsList
from .top_bar import AuthorsTopBar
from .bottom_bar import AuthorsBottomBar
from .edit_dlg import AuthorsEditDlg
class AuthorsView(wx.Dialog):
"""Authors view panel."""
def __init__(self, parent, library, query=""):
"""Initializes authors view panel."""
# init panel
wx.Dialog.__init__(self, parent, -1, size=(470, 400), title="Authors List", style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
self.Bind(wx.EVT_CLOSE, self._on_close)
# set library
self._library = library
# init buffs
self._authors = []
self._changed = False
# make UI
self._make_ui()
# show frame
self.Layout()
self.Centre(wx.BOTH)
self.Show(True)
# set min size
self.SetMinSize(self.GetSize())
# show authors
self.SetQuery(query)
self.ShowAuthors()
def SetLibrary(self, library=None):
"""Sets current library."""
self._library = library
self.ShowAuthors()
def SetQuery(self, value):
"""Sets query to top bar."""
self._top_bar.ChangeQuery(value)
def SetSelectedAuthors(self, authors):
"""Selects specified authors."""
self._list.SetSelectedAuthors(authors)
def ShowAuthors(self):
"""Shows authors according to current query."""
# get current query
query = self._top_bar.GetQuery()
# shows authors
self._show_authors(query)
def GetSelectedAuthors(self):
"""Gets list of selected authors."""
return self._list.GetSelectedAuthors()
def _on_close(self, evt=None):
"""Handles close event."""
if self._changed:
self.EndModal(wx.ID_OK)
else:
self.EndModal(wx.ID_CANCEL)
def _on_query_changed(self, evt=None):
"""Handles query changed event."""
self.ShowAuthors()
def _on_item_activated(self, evt=None):
"""Handles row activated event."""
self._on_edit()
def _on_item_context_menu(self, evt=None):
"""Handles article item context menu event."""
# check library
if self._library is None:
return
# get selected authors
authors = self._list.GetSelectedAuthors()
# init menu
menu = wx.Menu()
menu.Append(ID_AUTHORS_EDIT, "Edit...")
menu.Append(ID_AUTHORS_MERGE, "Merge Selected...")
menu.AppendSeparator()
menu.Append(ID_AUTHORS_DELETE, "Delete Orphans")
# enable items
menu.Enable(ID_AUTHORS_EDIT, len(authors) == 1)
menu.Enable(ID_AUTHORS_MERGE, len(authors) > 1)
menu.Enable(ID_AUTHORS_DELETE, all(x.count == 0 for x in authors))
# bind events
self.Bind(wx.EVT_MENU, self._on_edit, id=ID_AUTHORS_EDIT)
self.Bind(wx.EVT_MENU, self._on_merge, id=ID_AUTHORS_MERGE)
self.Bind(wx.EVT_MENU, self._on_delete, id=ID_AUTHORS_DELETE)
# show menu
self.PopupMenu(menu)
menu.Destroy()
def _on_edit(self, evt=None):
"""Handles edit event."""
# get selected authors
authors = self._list.GetSelectedAuthors()
if len(authors) != 1:
return
# get author
author = authors[0]
# raise edit dialog
dlg = AuthorsEditDlg(self, author, "edit")
response = dlg.ShowModal()
dlg.Destroy()
# check response
if response != wx.ID_OK:
return
# update library
self._library.update(author)
self._changed = True
# update authors
self.ShowAuthors()
self.SetSelectedAuthors([author])
def _on_merge(self, evt=None):
"""Handles merge event."""
# get selected authors
authors = self._list.GetSelectedAuthors()
if len(authors) < 2:
return
# get master
master = max(authors, key=lambda a: len(a.firstname))
# raise edit dialog
dlg = AuthorsEditDlg(self, master, "merge")
response = dlg.ShowModal()
dlg.Destroy()
# check response
if response != wx.ID_OK:
return
# update library
self._library.update(master)
self._changed = True
# merge authors
others = [a for a in authors if a.dbid != master.dbid]
self._library.merge(master, others)
# update authors
self.ShowAuthors()
self.SetSelectedAuthors([master])
def _on_delete(self, evt=None):
"""Handles delete event."""
# get selected authors
authors = self._list.GetSelectedAuthors()
authors = [a for a in authors if a.count == 0]
if len(authors) == 0:
return
# confirm delete
cancel_butt = mwx.DlgButton(wx.ID_CANCEL, "Cancel", size=(80,-1), default=False, space=15)
delete_butt = mwx.DlgButton(wx.ID_OK, "Delete", size=(80,-1), default=True, space=0)
dlg = mwx.MessageDlg(self,
id = -1,
title = "Delete Authors",
message = "Do you really want to permanently delete\nselected authors?",
details = "This operation cannot be undone.",
buttons = [cancel_butt, delete_butt])
response = dlg.ShowModal()
dlg.Destroy()
# quit if canceled
if response != wx.ID_OK:
return
# delete authors
for author in authors:
self._library.delete(author)
self._changed = True
# update authors
self.ShowAuthors()
def _make_ui(self):
"""Makes panel UI."""
# make items
self._top_bar = AuthorsTopBar(self)
self._list = AuthorsList(self)
self._bottom_bar = AuthorsBottomBar(self)
# bind events
self.Bind(events.EVT_AUTHORS_QUERY_CHANGED, self._on_query_changed)
self.Bind(events.EVT_AUTHORS_ITEM_ACTIVATED, self._on_item_activated)
self.Bind(events.EVT_AUTHORS_ITEM_CONTEXT_MENU, self._on_item_context_menu)
# pack UI
self.Sizer = wx.BoxSizer(wx.VERTICAL)
self.Sizer.Add(self._top_bar, 0, wx.EXPAND)
self.Sizer.Add(self._list, 1, wx.EXPAND)
self.Sizer.Add(self._bottom_bar, 0, wx.EXPAND)
def _show_authors(self, query="", order_by=lambda x:x.lastname, reverse=False):
"""Shows authors according to given query."""
self._authors = []
# set status
self._bottom_bar.SetLabel("Loading...")
wx.Yield()
# parse query
if not isinstance(query, core.Query):
query = core.Query(query, core.Author.NAME)
# get authors
if self._library is not None:
self._authors = self._library.search(query)
for item in self._authors:
item.count = self._library.count(item)
# sort authors
self._authors.sort(key=order_by, reverse=reverse)
# update list
self._list.SetAuthors(self._authors)
# update bottom bar
self._update_bottom_bar()
def _update_bottom_bar(self):
"""Updates status label in bottom bar."""
# get label
if not self._authors:
label = "0 authors found"
elif len(self._authors) == 1:
label = "1 author found"
else:
label = "%s authors found" % len(self._authors)
# set label
self._bottom_bar.SetLabel(label)
|
[
"wx.Dialog.__init__",
"wx.Menu",
"core.Query",
"wx.BoxSizer",
"wx.Yield"
] |
[((527, 656), 'wx.Dialog.__init__', 'wx.Dialog.__init__', (['self', 'parent', '(-1)'], {'size': '(470, 400)', 'title': '"""Authors List"""', 'style': '(wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)'}), "(self, parent, -1, size=(470, 400), title='Authors List',\n style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n", (545, 656), False, 'import wx\n'), ((2781, 2790), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (2788, 2790), False, 'import wx\n'), ((6807, 6831), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (6818, 6831), False, 'import wx\n'), ((7258, 7268), 'wx.Yield', 'wx.Yield', ([], {}), '()\n', (7266, 7268), False, 'import wx\n'), ((7366, 7401), 'core.Query', 'core.Query', (['query', 'core.Author.NAME'], {}), '(query, core.Author.NAME)\n', (7376, 7401), False, 'import core\n')]
|
#
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import numpy as np
from numba import cuda
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
train_clustering_model,
run_query,
convert_datestring_to_days,
)
from bdb_tools.readers import build_reader
from dask import delayed
# q25 parameters
Q25_DATE = "2002-01-02"
N_CLUSTERS = 8
CLUSTER_ITERATIONS = 20
N_ITER = 5
def read_tables(config):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
)
ss_cols = ["ss_customer_sk", "ss_sold_date_sk", "ss_ticket_number", "ss_net_paid"]
ws_cols = [
"ws_bill_customer_sk",
"ws_sold_date_sk",
"ws_order_number",
"ws_net_paid",
]
datedim_cols = ["d_date_sk", "d_date"]
ss_ddf = table_reader.read("store_sales", relevant_cols=ss_cols, index=False)
ws_ddf = table_reader.read("web_sales", relevant_cols=ws_cols, index=False)
datedim_ddf = table_reader.read("date_dim", relevant_cols=datedim_cols, index=False)
return (ss_ddf, ws_ddf, datedim_ddf)
def agg_count_distinct(df, group_key, counted_key, client):
"""Returns a Series that is the result of counting distinct instances of 'counted_key' within each 'group_key'.
The series' index will have one entry per unique 'group_key' value.
Workaround for lack of nunique aggregate function on Dask df.
"""
### going via repartition for split_out drop duplicates
unique_df = df[[group_key, counted_key]].map_partitions(
lambda df: df.drop_duplicates()
)
unique_df = unique_df.shuffle(on=[group_key])
unique_df = unique_df.map_partitions(lambda df: df.drop_duplicates())
return unique_df.groupby(group_key)[counted_key].count(split_every=2)
def get_clusters(client, ml_input_df):
import dask_cudf
ml_tasks = [
delayed(train_clustering_model)(df, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER)
for df in ml_input_df.to_delayed()
]
results_dict = client.compute(*ml_tasks, sync=True)
output = ml_input_df.index.to_frame().reset_index(drop=True)
labels_final = dask_cudf.from_cudf(
results_dict["cid_labels"], npartitions=output.npartitions
)
output["label"] = labels_final.reset_index()[0]
# Sort based on CDH6.1 q25-result formatting
output = output.sort_values(["cid"])
results_dict["cid_labels"] = output
return results_dict
def main(client, config):
import dask_cudf
ss_ddf, ws_ddf, datedim_ddf = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
dask_profile=config["dask_profile"],
)
datedim_ddf = datedim_ddf.map_partitions(convert_datestring_to_days)
min_date = np.datetime64(Q25_DATE, "D").astype(int)
# Filter by date
valid_dates_ddf = datedim_ddf[datedim_ddf["d_date"] > min_date].reset_index(
drop=True
)
f_ss_ddf = ss_ddf[ss_ddf["ss_customer_sk"].notnull()].reset_index(drop=True)
f_ws_ddf = ws_ddf[ws_ddf["ws_bill_customer_sk"].notnull()].reset_index(drop=True)
# Merge
ss_merged_df = f_ss_ddf.merge(
valid_dates_ddf, left_on="ss_sold_date_sk", right_on="d_date_sk", how="inner"
)
ws_merged_df = f_ws_ddf.merge(
valid_dates_ddf, left_on="ws_sold_date_sk", right_on="d_date_sk", how="inner"
)
# Roll up store sales
agg_store_sales_ddf = ss_merged_df.groupby("ss_customer_sk").agg(
{"ss_sold_date_sk": "max", "ss_net_paid": "sum"}
)
agg_store_sales_ddf["frequency"] = agg_count_distinct(
ss_merged_df, "ss_customer_sk", "ss_ticket_number", client=client
) # Simulate count distinct
# Same rollup, just different columns for web sales
agg_web_sales_ddf = ws_merged_df.groupby("ws_bill_customer_sk").agg(
{"ws_sold_date_sk": "max", "ws_net_paid": "sum"}
)
agg_web_sales_ddf["frequency"] = agg_count_distinct(
ws_merged_df, "ws_bill_customer_sk", "ws_order_number", client=client
) # Simulate count distinct
agg_store_sales_ddf = agg_store_sales_ddf.reset_index()
agg_web_sales_ddf = agg_web_sales_ddf.reset_index()
shared_columns = ["cid", "most_recent_date", "amount", "frequency"]
agg_store_sales_ddf.columns = shared_columns
agg_web_sales_ddf.columns = shared_columns
agg_sales_ddf = dask_cudf.concat([agg_store_sales_ddf, agg_web_sales_ddf])
cluster_input_ddf = agg_sales_ddf.groupby("cid").agg(
{"most_recent_date": "max", "frequency": "sum", "amount": "sum"}
)
cluster_input_ddf["recency"] = (37621 - cluster_input_ddf["most_recent_date"]) < 60
# Reorder to match refererence examples
cluster_input_ddf = cluster_input_ddf[["recency", "frequency", "amount"]]
# Prepare df for KMeans clustering
cluster_input_ddf["recency"] = cluster_input_ddf["recency"].astype("int64")
cluster_input_ddf["amount"] = cluster_input_ddf["amount"].astype("float64")
cluster_input_ddf = cluster_input_ddf.persist()
results_dict = get_clusters(client=client, ml_input_df=cluster_input_ddf)
return results_dict
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
import cudf
import dask_cudf
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
|
[
"dask.delayed",
"dask_cudf.from_cudf",
"numpy.datetime64",
"bdb_tools.utils.run_query",
"bdb_tools.utils.gpubdb_argparser",
"bdb_tools.cluster_startup.attach_to_cluster",
"bdb_tools.utils.benchmark",
"bdb_tools.readers.build_reader",
"dask_cudf.concat"
] |
[((1004, 1129), 'bdb_tools.readers.build_reader', 'build_reader', ([], {'data_format': "config['file_format']", 'basepath': "config['data_dir']", 'split_row_groups': "config['split_row_groups']"}), "(data_format=config['file_format'], basepath=config['data_dir'],\n split_row_groups=config['split_row_groups'])\n", (1016, 1129), False, 'from bdb_tools.readers import build_reader\n'), ((2758, 2837), 'dask_cudf.from_cudf', 'dask_cudf.from_cudf', (["results_dict['cid_labels']"], {'npartitions': 'output.npartitions'}), "(results_dict['cid_labels'], npartitions=output.npartitions)\n", (2777, 2837), False, 'import dask_cudf\n'), ((3144, 3263), 'bdb_tools.utils.benchmark', 'benchmark', (['read_tables'], {'config': 'config', 'compute_result': "config['get_read_time']", 'dask_profile': "config['dask_profile']"}), "(read_tables, config=config, compute_result=config['get_read_time'\n ], dask_profile=config['dask_profile'])\n", (3153, 3263), False, 'from bdb_tools.utils import benchmark, gpubdb_argparser, train_clustering_model, run_query, convert_datestring_to_days\n'), ((4983, 5041), 'dask_cudf.concat', 'dask_cudf.concat', (['[agg_store_sales_ddf, agg_web_sales_ddf]'], {}), '([agg_store_sales_ddf, agg_web_sales_ddf])\n', (4999, 5041), False, 'import dask_cudf\n'), ((5888, 5906), 'bdb_tools.utils.gpubdb_argparser', 'gpubdb_argparser', ([], {}), '()\n', (5904, 5906), False, 'from bdb_tools.utils import benchmark, gpubdb_argparser, train_clustering_model, run_query, convert_datestring_to_days\n'), ((5924, 5949), 'bdb_tools.cluster_startup.attach_to_cluster', 'attach_to_cluster', (['config'], {}), '(config)\n', (5941, 5949), False, 'from bdb_tools.cluster_startup import attach_to_cluster\n'), ((5954, 6010), 'bdb_tools.utils.run_query', 'run_query', ([], {'config': 'config', 'client': 'client', 'query_func': 'main'}), '(config=config, client=client, query_func=main)\n', (5963, 6010), False, 'from bdb_tools.utils import benchmark, gpubdb_argparser, train_clustering_model, run_query, convert_datestring_to_days\n'), ((2491, 2522), 'dask.delayed', 'delayed', (['train_clustering_model'], {}), '(train_clustering_model)\n', (2498, 2522), False, 'from dask import delayed\n'), ((3386, 3414), 'numpy.datetime64', 'np.datetime64', (['Q25_DATE', '"""D"""'], {}), "(Q25_DATE, 'D')\n", (3399, 3414), True, 'import numpy as np\n')]
|
import json
from abc import ABC, abstractmethod
class State(ABC):
def __init__(self, state_name: str, state_type: str):
"""
Abstract class to describe the base for Steps.
:param state_name: step's symbolic name.
:param state_type: step's type.
"""
self._name: str = state_name
self._type: str = state_type
@abstractmethod
def state_as_map(self) -> {}:
"""
Return object as a map.
:return: dictionary containing aws-relevant json properties.
"""
data = {"Type": self._type}
return data
class StateMachine:
def __init__(self, states: [State], startAt: str):
"""
State machine definition.
:param states: array of states.
:param startAt: name of the starting state.
"""
if not states:
raise Exception("You should provide at least one state in the argument array.")
self._states = states
if not startAt:
raise Exception("You should provide a starting step as argument.")
self._startAt = startAt
def get_as_map(self) -> {}:
"""
Return object as a map.
:return: dictionary containing aws-relevant json properties.
"""
data = {}
data["StartAt"] = self._startAt
data["States"] = self.__states_as_map()
return data
def __states_as_map(self) -> {}:
"""
Convert all of the states into maps.
:return: map of states.
"""
states = {}
for state in self._states:
states[state._name] = state.state_as_map()
return states
class ParallelState(State):
def __init__(self, state_name: str, branches: [StateMachine], next_step: str):
"""
Responsible for parallel execution of its branches.
:param branches: array of branches to be executed in parallel
:param next_step: next step to be executed after all the branches finish
"""
super().__init__(state_name, "Parallel")
if not next_step:
raise Exception("You should provide a valid next step as argument.")
self._next = next_step
if len(branches) < 1:
raise Exception("You should provide at least one branch to the parallel task.")
self._branches = branches
def state_as_map(self) -> {}:
data = super().state_as_map()
data["Next"] = self._next
mapped_branches = []
for branch in self._branches:
mapped_branches.append(branch.get_as_map())
data["Branches"] = mapped_branches
return data
class WaitState(State):
def __init__(self, state_name: str, next_state: str, seconds: int):
"""
State used for waiting.
:param next_state: step to be executed after this one.
:param seconds: time in seconds to be spent waiting in this step.
"""
super().__init__(state_name, "Wait")
if not next_state:
raise Exception("You should provide a valid next step as argument.")
self._next = next_state
if seconds < 0:
raise Exception("Wait time cannot be less than 0 seconds")
self._seconds = seconds
def state_as_map(self) -> {}:
data = super().state_as_map()
data["Next"] = self._next
data["Seconds"] = self._seconds
return data
class SucceedState(State):
def __init__(self, state_name: str):
"""
Terminal state.
"""
super().__init__(state_name, "Succeed")
def state_as_map(self) -> {}:
return super().state_as_map()
class FailState(State):
def __init__(self, state_name: str, error: str, cause: str):
"""
Terminal state that fails current scope.
:param error: error name.
:param cause: human-readable message.
"""
super().__init__(state_name, "Fail")
self._error = error
self._cause = cause
def state_as_map(self) -> {}:
data = super().state_as_map()
data["Error"] = self._error
data["Cause"] = self._cause
return data
class TaskState(State):
def __init__(
self,
state_name: str,
lambda_arn: str,
next_step: str = "",
timeout: int = 60,
is_end_state: bool = False,
):
"""
Task state class.
:param timeout: if the step runs longer than timeout - state fails with States.Timeout.
:param lambda_arn: arn of lambda function to be executed withing step.
:param next_step: next step to be executed after he current one.
:param is_end_state: if set to True, this is a terminal state.
"""
super().__init__(state_name, "Task")
self._resource: str = lambda_arn
self._next: str = next_step
self._end: bool = is_end_state
if timeout <= 0:
raise Exception("Timeout value should be a positive value.")
self._timeout = timeout
def state_as_map(self) -> {}:
data = super().state_as_map()
data["Resource"] = self._resource
if self._next:
data["Next"] = self._next
elif self._end:
data["End"] = self._end
else:
raise Exception("No next step has been specified, nor this is a terminal state.")
data["TimeoutSeconds"] = self._timeout
return data
class StepMachine(StateMachine):
def __init__(self, name: str, states: [State], startAt: str, comment: str = ""):
"""
:param name: step machine symbolic name.
:param comment: optional comment.
"""
super().__init__(states, startAt)
if len(name.strip()) < 2:
raise Exception("Step machine name should have at least two characters.")
self._name = name
self._comment = comment
self.state_machine_arn = None
def get_as_map(self) -> {}:
data = super().get_as_map()
if self._comment:
data["Comment"] = self._comment
return data
def get_as_aws_json(self) -> str:
"""
Convert object to json string.
:return: json-aws state machine definition.
"""
return json.dumps(self.get_as_map())
def execute(self, client, role_arn: str, state_input: str = None) -> str:
"""
Execute the state machine within the current class.
:param client: boto3.client object.
:param role_arn: arn of the role.
:param state_input: optional input.
:return: execution arn.
"""
if not self.state_machine_arn:
self.state_machine_arn = self.__create(client, role_arn)
try:
execution_response = client.start_execution(
stateMachineArn=self.state_machine_arn, input=json.dumps(state_input)
)
except Exception as ex:
print("error during execution - ", ex)
return ""
execution_arn = execution_response.get("executionArn")
return execution_arn
def __create(self, client, role_arn) -> str:
"""
Create the state machine, requires object state.
:param client: boto3.client object.
:param role_arn: arn of the role.
:return: arn of the created state machine.
"""
try:
response = client.create_state_machine(
name=self._name, definition=self.get_as_map(), roleArn=role_arn
)
self.state_machine_arn = response["stateMachineArn"]
except Exception as ex:
print("error: state machine not created - ", ex)
return ""
return response["stateMachineArn"]
def delete(self, client, sm_arn: str = None) -> str:
"""
Delete local step machine, if no arn is specified.
:param client: boto3.client object.
:param sm_arn: optional state machine arn, if not specified - objects sm is deleted.
:return:
"""
arn = sm_arn
if not arn:
if not self.state_machine_arn:
raise Exception("Argument arn is empty and object has no created state machine.")
arn = self.state_machine_arn
try:
response = client.delete_state_machine(stateMachineArn=arn)
return response
except Exception as ex:
print("error: state machine was not deleted - ", ex)
# Example of state json creation, delete for production
starting_state: TaskState = TaskState(
"state_1", "arn:aws:lambda:us-east-1:807794332178:function:ex_lmbd", "LookupCustomerInfo"
)
first_branch_state1 = TaskState(
"LookupAddress", "arn:aws:lambda:us-east-1:807794332178:function:ex_lmbd", is_end_state=True
)
second_branch_state1 = TaskState(
"LookupPhone",
"arn:aws:lambda:us-east-1:807794332178:function:ex_lmbd",
next_step="LookupPhone2",
)
second_branch_state2 = TaskState(
"LookupPhone2", "arn:aws:lambda:us-east-1:807794332178:function:ex_lmbd", is_end_state=True
)
first_branch: StateMachine = StateMachine([first_branch_state1], "LookupAddress")
second_branch: StateMachine = StateMachine([second_branch_state1], "LookupPhone")
parallel_state: ParallelState = ParallelState(
"LookupCustomerInfo", [first_branch, second_branch], "NextState"
)
wait_state: WaitState = WaitState("NextState", "Last", 5)
fail_finish_state: FailState = FailState("Last", "TestException", "Human-readeable")
sample_machine: StepMachine = StepMachine(
name="test",
comment="Simple step pipeline for test",
startAt="state_1",
states=[starting_state, parallel_state, wait_state, fail_finish_state],
)
print(sample_machine.get_as_aws_json())
# End of example
|
[
"json.dumps"
] |
[((6895, 6918), 'json.dumps', 'json.dumps', (['state_input'], {}), '(state_input)\n', (6905, 6918), False, 'import json\n')]
|
# Copyright (c) 2017 <NAME>.
# Cura is released under the terms of the LGPLv3 or higher.
from collections import defaultdict
import threading
from typing import Any, Dict, Optional
from PyQt5.QtCore import pyqtProperty
from UM.Application import Application
from UM.Decorators import override
from UM.MimeTypeDatabase import MimeType, MimeTypeDatabase
from UM.Settings.ContainerStack import ContainerStack
from UM.Settings.SettingInstance import InstanceState
from UM.Settings.ContainerRegistry import ContainerRegistry
from UM.Settings.Interfaces import PropertyEvaluationContext
from UM.Logger import Logger
from . import Exceptions
from .CuraContainerStack import CuraContainerStack
## Represents the Global or Machine stack and its related containers.
#
class GlobalStack(CuraContainerStack):
def __init__(self, container_id: str, *args, **kwargs):
super().__init__(container_id, *args, **kwargs)
self.addMetaDataEntry("type", "machine") # For backward compatibility
self._extruders = {} # type: Dict[str, "ExtruderStack"]
# This property is used to track which settings we are calculating the "resolve" for
# and if so, to bypass the resolve to prevent an infinite recursion that would occur
# if the resolve function tried to access the same property it is a resolve for.
# Per thread we have our own resolving_settings, or strange things sometimes occur.
self._resolving_settings = defaultdict(set) # keys are thread names
## Get the list of extruders of this stack.
#
# \return The extruders registered with this stack.
@pyqtProperty("QVariantMap")
def extruders(self) -> Dict[str, "ExtruderStack"]:
return self._extruders
@classmethod
def getLoadingPriority(cls) -> int:
return 2
@classmethod
def getConfigurationTypeFromSerialized(cls, serialized: str) -> Optional[str]:
configuration_type = super().getConfigurationTypeFromSerialized(serialized)
if configuration_type == "machine":
return "machine_stack"
return configuration_type
## Add an extruder to the list of extruders of this stack.
#
# \param extruder The extruder to add.
#
# \throws Exceptions.TooManyExtrudersError Raised when trying to add an extruder while we
# already have the maximum number of extruders.
def addExtruder(self, extruder: ContainerStack) -> None:
position = extruder.getMetaDataEntry("position")
if position is None:
Logger.log("w", "No position defined for extruder {extruder}, cannot add it to stack {stack}", extruder = extruder.id, stack = self.id)
return
if any(item.getId() == extruder.id for item in self._extruders.values()):
Logger.log("w", "Extruder [%s] has already been added to this stack [%s]", extruder.id, self.getId())
return
self._extruders[position] = extruder
Logger.log("i", "Extruder[%s] added to [%s] at position [%s]", extruder.id, self.id, position)
## Overridden from ContainerStack
#
# This will return the value of the specified property for the specified setting,
# unless the property is "value" and that setting has a "resolve" function set.
# When a resolve is set, it will instead try and execute the resolve first and
# then fall back to the normal "value" property.
#
# \param key The setting key to get the property of.
# \param property_name The property to get the value of.
#
# \return The value of the property for the specified setting, or None if not found.
@override(ContainerStack)
def getProperty(self, key: str, property_name: str, context: Optional[PropertyEvaluationContext] = None) -> Any:
if not self.definition.findDefinitions(key = key):
return None
if context is None:
context = PropertyEvaluationContext()
context.pushContainer(self)
# Handle the "resolve" property.
if self._shouldResolve(key, property_name, context):
current_thread = threading.current_thread()
self._resolving_settings[current_thread.name].add(key)
resolve = super().getProperty(key, "resolve", context)
self._resolving_settings[current_thread.name].remove(key)
if resolve is not None:
return resolve
# Handle the "limit_to_extruder" property.
limit_to_extruder = super().getProperty(key, "limit_to_extruder", context)
if limit_to_extruder is not None:
if limit_to_extruder == -1:
limit_to_extruder = int(Application.getInstance().getMachineManager().defaultExtruderPosition)
limit_to_extruder = str(limit_to_extruder)
if limit_to_extruder is not None and limit_to_extruder != "-1" and limit_to_extruder in self._extruders:
if super().getProperty(key, "settable_per_extruder", context):
result = self._extruders[str(limit_to_extruder)].getProperty(key, property_name, context)
if result is not None:
context.popContainer()
return result
else:
Logger.log("e", "Setting {setting} has limit_to_extruder but is not settable per extruder!", setting = key)
result = super().getProperty(key, property_name, context)
context.popContainer()
return result
## Overridden from ContainerStack
#
# This will simply raise an exception since the Global stack cannot have a next stack.
@override(ContainerStack)
def setNextStack(self, next_stack: ContainerStack) -> None:
raise Exceptions.InvalidOperationError("Global stack cannot have a next stack!")
# protected:
# Determine whether or not we should try to get the "resolve" property instead of the
# requested property.
def _shouldResolve(self, key: str, property_name: str, context: Optional[PropertyEvaluationContext] = None) -> bool:
if property_name is not "value":
# Do not try to resolve anything but the "value" property
return False
current_thread = threading.current_thread()
if key in self._resolving_settings[current_thread.name]:
# To prevent infinite recursion, if getProperty is called with the same key as
# we are already trying to resolve, we should not try to resolve again. Since
# this can happen multiple times when trying to resolve a value, we need to
# track all settings that are being resolved.
return False
setting_state = super().getProperty(key, "state", context = context)
if setting_state is not None and setting_state != InstanceState.Default:
# When the user has explicitly set a value, we should ignore any resolve and
# just return that value.
return False
return True
## private:
global_stack_mime = MimeType(
name = "application/x-cura-globalstack",
comment = "Cura Global Stack",
suffixes = ["global.cfg"]
)
MimeTypeDatabase.addMimeType(global_stack_mime)
ContainerRegistry.addContainerTypeByName(GlobalStack, "global_stack", global_stack_mime.name)
|
[
"UM.MimeTypeDatabase.MimeType",
"UM.MimeTypeDatabase.MimeTypeDatabase.addMimeType",
"UM.Logger.Logger.log",
"UM.Application.Application.getInstance",
"collections.defaultdict",
"UM.Decorators.override",
"PyQt5.QtCore.pyqtProperty",
"UM.Settings.Interfaces.PropertyEvaluationContext",
"threading.current_thread",
"UM.Settings.ContainerRegistry.ContainerRegistry.addContainerTypeByName"
] |
[((7074, 7179), 'UM.MimeTypeDatabase.MimeType', 'MimeType', ([], {'name': '"""application/x-cura-globalstack"""', 'comment': '"""Cura Global Stack"""', 'suffixes': "['global.cfg']"}), "(name='application/x-cura-globalstack', comment='Cura Global Stack',\n suffixes=['global.cfg'])\n", (7082, 7179), False, 'from UM.MimeTypeDatabase import MimeType, MimeTypeDatabase\n'), ((7197, 7244), 'UM.MimeTypeDatabase.MimeTypeDatabase.addMimeType', 'MimeTypeDatabase.addMimeType', (['global_stack_mime'], {}), '(global_stack_mime)\n', (7225, 7244), False, 'from UM.MimeTypeDatabase import MimeType, MimeTypeDatabase\n'), ((7245, 7342), 'UM.Settings.ContainerRegistry.ContainerRegistry.addContainerTypeByName', 'ContainerRegistry.addContainerTypeByName', (['GlobalStack', '"""global_stack"""', 'global_stack_mime.name'], {}), "(GlobalStack, 'global_stack',\n global_stack_mime.name)\n", (7285, 7342), False, 'from UM.Settings.ContainerRegistry import ContainerRegistry\n'), ((1630, 1657), 'PyQt5.QtCore.pyqtProperty', 'pyqtProperty', (['"""QVariantMap"""'], {}), "('QVariantMap')\n", (1642, 1657), False, 'from PyQt5.QtCore import pyqtProperty\n'), ((3699, 3723), 'UM.Decorators.override', 'override', (['ContainerStack'], {}), '(ContainerStack)\n', (3707, 3723), False, 'from UM.Decorators import override\n'), ((5668, 5692), 'UM.Decorators.override', 'override', (['ContainerStack'], {}), '(ContainerStack)\n', (5676, 5692), False, 'from UM.Decorators import override\n'), ((1469, 1485), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (1480, 1485), False, 'from collections import defaultdict\n'), ((3014, 3112), 'UM.Logger.Logger.log', 'Logger.log', (['"""i"""', '"""Extruder[%s] added to [%s] at position [%s]"""', 'extruder.id', 'self.id', 'position'], {}), "('i', 'Extruder[%s] added to [%s] at position [%s]', extruder.id,\n self.id, position)\n", (3024, 3112), False, 'from UM.Logger import Logger\n'), ((6264, 6290), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (6288, 6290), False, 'import threading\n'), ((2589, 2729), 'UM.Logger.Logger.log', 'Logger.log', (['"""w"""', '"""No position defined for extruder {extruder}, cannot add it to stack {stack}"""'], {'extruder': 'extruder.id', 'stack': 'self.id'}), "('w',\n 'No position defined for extruder {extruder}, cannot add it to stack {stack}'\n , extruder=extruder.id, stack=self.id)\n", (2599, 2729), False, 'from UM.Logger import Logger\n'), ((3975, 4002), 'UM.Settings.Interfaces.PropertyEvaluationContext', 'PropertyEvaluationContext', ([], {}), '()\n', (4000, 4002), False, 'from UM.Settings.Interfaces import PropertyEvaluationContext\n'), ((4171, 4197), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (4195, 4197), False, 'import threading\n'), ((5296, 5410), 'UM.Logger.Logger.log', 'Logger.log', (['"""e"""', '"""Setting {setting} has limit_to_extruder but is not settable per extruder!"""'], {'setting': 'key'}), "('e',\n 'Setting {setting} has limit_to_extruder but is not settable per extruder!'\n , setting=key)\n", (5306, 5410), False, 'from UM.Logger import Logger\n'), ((4726, 4751), 'UM.Application.Application.getInstance', 'Application.getInstance', ([], {}), '()\n', (4749, 4751), False, 'from UM.Application import Application\n')]
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard Summary Writer for TensorFlow Eager Execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from tensorflow.contrib.summary import gen_summary_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_op_util
from tensorflow.python.ops import variable_scope
def _maybe_cpu(v):
if isinstance(v, (ops.EagerTensor, ops.Tensor)):
return v.cpu()
else:
return v
def _summary_writer_function(name, tensor, function, family=None):
def record():
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
function(tag, scope)
return True
return record
class SummaryWriter(object):
"""Writes summaries for TensorBoard, compatible with eager execution.
This class is the supported way of writing TensorBoard summaries under
eager execution.
"""
_CPU_DEVICE = "cpu:0"
def __init__(self,
logdir,
max_queue=10,
flush_secs=120,
filename_suffix=""):
"""Summary writer for TensorBoard, compatible with eager execution.
If necessary, multiple instances of `SummaryWriter` can be created, with
distinct `logdir`s and `name`s. Each `SummaryWriter` instance will retain
its independent `global_step` counter and data writing destination.
Example:
```python
writer = tfe.SummaryWriter("my_model")
# ... Code that sets up the model and data batches ...
for _ in xrange(train_iters):
loss = model.train_batch(batch)
writer.scalar("loss", loss)
writer.step()
```
Args:
logdir: Directory in which summary files will be written.
max_queue: Number of summary items to buffer before flushing to
filesystem. If 0, summaries will be flushed immediately.
flush_secs: Number of secondsbetween forced commits to disk.
filename_suffix: Suffix of the event protobuf files in which the summary
data are stored.
Raises:
ValueError: If this constructor is called not under eager execution.
"""
# TODO(apassos, ashankar): Make this class and the underlying
# contrib.summary_ops compatible with graph model and remove this check.
if not context.in_eager_mode():
raise ValueError(
"Use of SummaryWriter is currently supported only with eager "
"execution enabled. File an issue at "
"https://github.com/tensorflow/tensorflow/issues/new to express "
"interest in fixing this.")
# TODO(cais): Consider adding name keyword argument, which if None or empty,
# will register the global global_step that training_util.get_global_step()
# can find.
with context.device(self._CPU_DEVICE):
self._name = uuid.uuid4().hex
self._global_step = 0
self._global_step_tensor = variable_scope.get_variable(
"global_step/summary_writer/" + self._name,
shape=[], dtype=dtypes.int64,
initializer=init_ops.zeros_initializer())
self._global_step_dirty = False
self._resource = gen_summary_ops.summary_writer(shared_name=self._name)
gen_summary_ops.create_summary_file_writer(
self._resource, logdir, max_queue, flush_secs, filename_suffix)
# Delete the resource when this object is deleted
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device=self._CPU_DEVICE)
def step(self):
"""Increment the global step counter of this SummaryWriter instance."""
self._global_step += 1
self._global_step_dirty = True
@property
def global_step(self):
"""Obtain the current global_step value of this SummaryWriter instance.
Returns:
An `int` representing the current value of the global_step of this
`SummaryWriter` instance.
"""
return self._global_step
def _update_global_step_tensor(self):
with context.device(self._CPU_DEVICE):
if self._global_step_dirty:
self._global_step_dirty = False
return state_ops.assign(self._global_step_tensor, self._global_step)
else:
return self._global_step_tensor
def generic(self, name, tensor, metadata, family=None):
"""Write a generic-type summary.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A `Tensor` or compatible value type containing the value of the
summary.
metadata: Metadata about the summary.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
"""
with context.device(self._CPU_DEVICE):
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_summary(
self._resource,
self._update_global_step_tensor(),
_maybe_cpu(tensor),
tag,
_maybe_cpu(metadata),
name=scope)
def scalar(self, name, tensor, family=None):
"""Write a scalar summary.
Args:
name: A name for the generated node. Will also serve as the series name in
TensorBoard.
tensor: A real numeric `Tensor` or compatible value type containing a
single value.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A summary writer function for scalars.
"""
with context.device(self._CPU_DEVICE):
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_scalar_summary(
self._resource, self._update_global_step_tensor(),
tag, _maybe_cpu(tensor), name=scope)
def histogram(self, name, tensor, family=None):
"""Write a histogram summary.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A real numeric `Tensor` or compatible value type. Any shape.
Values to use to build the histogram.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
"""
with context.device(self._CPU_DEVICE):
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_histogram_summary(
self._resource, self._update_global_step_tensor(),
tag, _maybe_cpu(tensor), name=scope)
def image(self, name, tensor, bad_color=None, max_images=3, family=None):
"""Write an image summary."""
with context.device(self._CPU_DEVICE):
if bad_color is None:
bad_color_ = constant_op.constant([255, 0, 0, 255], dtype=dtypes.uint8)
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_image_summary(
self._resource, self._update_global_step_tensor(),
tag, _maybe_cpu(tensor), bad_color_, max_images,
name=scope)
def audio(self, name, tensor, sample_rate, max_outputs, family=None):
"""Write an audio summary.
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`, or
compatible value type.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
"""
with context.device(self._CPU_DEVICE):
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
gen_summary_ops.write_audio_summary(
self._resource, self._update_global_step_tensor(),
tag,
_maybe_cpu(tensor),
sample_rate=_maybe_cpu(sample_rate),
max_outputs=max_outputs,
name=scope)
|
[
"tensorflow.python.ops.summary_op_util.summary_scope",
"uuid.uuid4",
"tensorflow.python.eager.context.in_eager_mode",
"tensorflow.python.eager.context.device",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.ops.resource_variable_ops.EagerResourceDeleter",
"tensorflow.contrib.summary.gen_summary_ops.summary_writer",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.contrib.summary.gen_summary_ops.create_summary_file_writer"
] |
[((1568, 1628), 'tensorflow.python.ops.summary_op_util.summary_scope', 'summary_op_util.summary_scope', (['name', 'family'], {'values': '[tensor]'}), '(name, family, values=[tensor])\n', (1597, 1628), False, 'from tensorflow.python.ops import summary_op_util\n'), ((3281, 3304), 'tensorflow.python.eager.context.in_eager_mode', 'context.in_eager_mode', ([], {}), '()\n', (3302, 3304), False, 'from tensorflow.python.eager import context\n'), ((3753, 3785), 'tensorflow.python.eager.context.device', 'context.device', (['self._CPU_DEVICE'], {}), '(self._CPU_DEVICE)\n', (3767, 3785), False, 'from tensorflow.python.eager import context\n'), ((4120, 4174), 'tensorflow.contrib.summary.gen_summary_ops.summary_writer', 'gen_summary_ops.summary_writer', ([], {'shared_name': 'self._name'}), '(shared_name=self._name)\n', (4150, 4174), False, 'from tensorflow.contrib.summary import gen_summary_ops\n'), ((4181, 4291), 'tensorflow.contrib.summary.gen_summary_ops.create_summary_file_writer', 'gen_summary_ops.create_summary_file_writer', (['self._resource', 'logdir', 'max_queue', 'flush_secs', 'filename_suffix'], {}), '(self._resource, logdir,\n max_queue, flush_secs, filename_suffix)\n', (4223, 4291), False, 'from tensorflow.contrib.summary import gen_summary_ops\n'), ((4386, 4487), 'tensorflow.python.ops.resource_variable_ops.EagerResourceDeleter', 'resource_variable_ops.EagerResourceDeleter', ([], {'handle': 'self._resource', 'handle_device': 'self._CPU_DEVICE'}), '(handle=self._resource,\n handle_device=self._CPU_DEVICE)\n', (4428, 4487), False, 'from tensorflow.python.ops import resource_variable_ops\n'), ((4973, 5005), 'tensorflow.python.eager.context.device', 'context.device', (['self._CPU_DEVICE'], {}), '(self._CPU_DEVICE)\n', (4987, 5005), False, 'from tensorflow.python.eager import context\n'), ((5725, 5757), 'tensorflow.python.eager.context.device', 'context.device', (['self._CPU_DEVICE'], {}), '(self._CPU_DEVICE)\n', (5739, 5757), False, 'from tensorflow.python.eager import context\n'), ((6596, 6628), 'tensorflow.python.eager.context.device', 'context.device', (['self._CPU_DEVICE'], {}), '(self._CPU_DEVICE)\n', (6610, 6628), False, 'from tensorflow.python.eager import context\n'), ((7372, 7404), 'tensorflow.python.eager.context.device', 'context.device', (['self._CPU_DEVICE'], {}), '(self._CPU_DEVICE)\n', (7386, 7404), False, 'from tensorflow.python.eager import context\n'), ((7787, 7819), 'tensorflow.python.eager.context.device', 'context.device', (['self._CPU_DEVICE'], {}), '(self._CPU_DEVICE)\n', (7801, 7819), False, 'from tensorflow.python.eager import context\n'), ((8965, 8997), 'tensorflow.python.eager.context.device', 'context.device', (['self._CPU_DEVICE'], {}), '(self._CPU_DEVICE)\n', (8979, 8997), False, 'from tensorflow.python.eager import context\n'), ((3806, 3818), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3816, 3818), False, 'import uuid\n'), ((5096, 5157), 'tensorflow.python.ops.state_ops.assign', 'state_ops.assign', (['self._global_step_tensor', 'self._global_step'], {}), '(self._global_step_tensor, self._global_step)\n', (5112, 5157), False, 'from tensorflow.python.ops import state_ops\n'), ((5770, 5830), 'tensorflow.python.ops.summary_op_util.summary_scope', 'summary_op_util.summary_scope', (['name', 'family'], {'values': '[tensor]'}), '(name, family, values=[tensor])\n', (5799, 5830), False, 'from tensorflow.python.ops import summary_op_util\n'), ((6641, 6701), 'tensorflow.python.ops.summary_op_util.summary_scope', 'summary_op_util.summary_scope', (['name', 'family'], {'values': '[tensor]'}), '(name, family, values=[tensor])\n', (6670, 6701), False, 'from tensorflow.python.ops import summary_op_util\n'), ((7417, 7477), 'tensorflow.python.ops.summary_op_util.summary_scope', 'summary_op_util.summary_scope', (['name', 'family'], {'values': '[tensor]'}), '(name, family, values=[tensor])\n', (7446, 7477), False, 'from tensorflow.python.ops import summary_op_util\n'), ((7870, 7928), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[255, 0, 0, 255]'], {'dtype': 'dtypes.uint8'}), '([255, 0, 0, 255], dtype=dtypes.uint8)\n', (7890, 7928), False, 'from tensorflow.python.framework import constant_op\n'), ((7940, 8000), 'tensorflow.python.ops.summary_op_util.summary_scope', 'summary_op_util.summary_scope', (['name', 'family'], {'values': '[tensor]'}), '(name, family, values=[tensor])\n', (7969, 8000), False, 'from tensorflow.python.ops import summary_op_util\n'), ((9010, 9070), 'tensorflow.python.ops.summary_op_util.summary_scope', 'summary_op_util.summary_scope', (['name', 'family'], {'values': '[tensor]'}), '(name, family, values=[tensor])\n', (9039, 9070), False, 'from tensorflow.python.ops import summary_op_util\n'), ((4029, 4057), 'tensorflow.python.ops.init_ops.zeros_initializer', 'init_ops.zeros_initializer', ([], {}), '()\n', (4055, 4057), False, 'from tensorflow.python.ops import init_ops\n')]
|
from web3 import Web3
import json
w3 = Web3(Web3.HTTPProvider("HTTP://127.0.0.1:8545"))
with open('../conf/contract/build/ERC721Metadata.abi', 'r') as myfile:
abi = myfile.read()
with open('../conf/contract/build/ERC721Metadata.bin', 'r') as myfile:
binfile = myfile.read()
account = w3.eth.accounts[0]
PDSContract = w3.eth.contract(abi=abi, bytecode=binfile)
tx_hash = PDSContract.constructor("Sofie Access Token", "SAT").transact({'from': account})
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
address = tx_receipt.contractAddress
print(address)
PDSContract_instance = w3.eth.contract(abi=abi, address=address)
tx_hash = PDSContract_instance.functions.mint('0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1', 1234, "metadata").transact({'from': account})
w3.eth.waitForTransactionReceipt(tx_hash)
metadata = PDSContract_instance.functions.getTokenURI(1234).call()
print(metadata)
|
[
"web3.Web3.HTTPProvider"
] |
[((45, 87), 'web3.Web3.HTTPProvider', 'Web3.HTTPProvider', (['"""HTTP://127.0.0.1:8545"""'], {}), "('HTTP://127.0.0.1:8545')\n", (62, 87), False, 'from web3 import Web3\n')]
|
# Copyright (C) 2016 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# ==============================================================================
# Based on original Work Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common image reprocess functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
try:
import cv2
has_cv2 = True
except ImportError:
has_cv2 = False
try:
from skimage import transform
has_skimage = True
except ImportError:
has_skimage = False
# FIXME these decentralized flags get REALLY annoying with module import conflicts, in common spot for now
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('image_size', 299,
"""Provide square images of this size.""")
tf.app.flags.DEFINE_float('image_aspect', 0.0,
"""Aspect ratio based sizing, square image_size*image_size if 0""")
tf.app.flags.DEFINE_string('image_norm', 'default',
"""Either 'caffe' BGR [0,255], 'caffe_rgb' [0, 255],
'frame' per-frame standardize, 'global' standardize, 'default' [-1, 1].""")
tf.app.flags.DEFINE_string('image_fmt', 'jpg',
"""Either 'jpg', 'png', or 'gif'""")
IMAGENET_MEAN_CAFFE = [103.939, 116.779, 123.68]
IMAGENET_MEAN_STD = [
[0.485, 0.456, 0.406], # mean
[0.229, 0.224, 0.225], # std
]
def image_normalize(
image,
method='global',
global_mean_std=IMAGENET_MEAN_STD,
caffe_mean=IMAGENET_MEAN_CAFFE):
"""
Args:
image:
method:
global_mean_std:
caffe_mean:
Returns:
"""
if method == 'caffe' or method == 'caffe_bgr':
print('Caffe BGR normalize', image.get_shape())
# Rescale to [0, 255]
image = tf.mul(image, 255.0)
# Convert RGB to BGR
red, green, blue = tf.split(2, 3, image)
image = tf.concat(2, [blue, green, red])
tf.sub(image, caffe_mean)
elif method == 'caffe_rgb':
print('Caffe RGB normalize', image.get_shape())
# Rescale to [0, 255]
image = tf.mul(image, 255.0)
caffe_mean_rgb = tf.gather(caffe_mean, [2, 1, 0])
image = tf.sub(image, caffe_mean_rgb)
elif method == 'frame':
print("Per-frame standardize", image.get_shape())
mean, var = tf.nn.moments(image, axes=[0, 1], shift=0.3)
std = tf.sqrt(tf.add(var, .001))
image = tf.sub(image, mean)
image = tf.div(image, std)
elif method == 'global':
print('Global standardize', image.get_shape())
image = tf.sub(image, global_mean_std[0])
image = tf.div(image, global_mean_std[1])
else:
assert method == 'default'
print('Default normalize [-1, 1]', image.get_shape())
# Rescale to [-1,1] instead of [0, 1)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
return image
def decode_compressed_image(image_buffer, image_fmt='jpg', depth=3, scope=None):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
with tf.name_scope(scope, 'decode_image', [image_buffer]):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image_fmt = str.lower(image_fmt)
if image_fmt == 'png':
image = tf.image.decode_png(image_buffer, channels=depth)
elif image_fmt == 'gif':
assert depth == 3
image = tf.image.decode_gif(image_buffer)
else:
assert image_fmt == 'jpg' or image_fmt == 'jpeg'
image = tf.image.decode_jpeg(image_buffer, channels=depth)
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def distort_color(image, hue_delta=0.2, thread_id=0, scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: Tensor containing single image.
hue_delta: range for random hue shift
thread_id: preprocessing thread ID.
scope: Optional scope for op_scope.
Returns:
color-distorted image
"""
with tf.name_scope(scope, 'distort_color', [image]):
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=hue_delta)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=hue_delta)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def distort_affine_cv2(image, alpha_affine=10, random_state=None):
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
shape_size = shape[:2]
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
distorted_image = cv2.warpAffine(
image, M, shape_size[::-1], borderMode=cv2.BORDER_REPLICATE) #cv2.BORDER_REFLECT_101)
return distorted_image
def distort_affine_skimage(image, rotation=10.0, shear=5.0, random_state=None):
if random_state is None:
random_state = np.random.RandomState(None)
rot = np.deg2rad(np.random.uniform(-rotation, rotation))
sheer = np.deg2rad(np.random.uniform(-shear, shear))
shape = image.shape
shape_size = shape[:2]
center = np.float32(shape_size) / 2. - 0.5
pre = transform.SimilarityTransform(translation=-center)
affine = transform.AffineTransform(rotation=rot, shear=sheer, translation=center)
tform = pre + affine
distorted_image = transform.warp(image, tform.params, mode='reflect')
return distorted_image.astype(np.float32)
def distort_elastic_cv2(image, alpha=80, sigma=20, random_state=None):
"""Elastic deformation of images as per [Simard2003].
"""
if random_state is None:
random_state = np.random.RandomState(None)
shape_size = image.shape[:2]
# Downscaling the random grid and then upsizing post filter
# improves performance. Approx 3x for scale of 4, diminishing returns after.
grid_scale = 4
alpha //= grid_scale # Does scaling these make sense? seems to provide
sigma //= grid_scale # more similar end result when scaling grid used.
grid_shape = (shape_size[0]//grid_scale, shape_size[1]//grid_scale)
blur_size = int(4 * sigma) | 1
rand_x = cv2.GaussianBlur(
(random_state.rand(*grid_shape) * 2 - 1).astype(np.float32),
ksize=(blur_size, blur_size), sigmaX=sigma) * alpha
rand_y = cv2.GaussianBlur(
(random_state.rand(*grid_shape) * 2 - 1).astype(np.float32),
ksize=(blur_size, blur_size), sigmaX=sigma) * alpha
if grid_scale > 1:
rand_x = cv2.resize(rand_x, shape_size[::-1])
rand_y = cv2.resize(rand_y, shape_size[::-1])
grid_x, grid_y = np.meshgrid(np.arange(shape_size[1]), np.arange(shape_size[0]))
grid_x = (grid_x + rand_x).astype(np.float32)
grid_y = (grid_y + rand_y).astype(np.float32)
distorted_img = cv2.remap(image, grid_x, grid_y,
borderMode=cv2.BORDER_REFLECT_101, interpolation=cv2.INTER_LINEAR)
return distorted_img
distort_params_default = {
'h_flip': True,
'v_flip': False,
'elastic_distortion': False,
'affine_distortion': True,
'aspect_ratio_range': [0.67, 1.33],
'area_range': [0.1, 1.0],
'min_object_covered': 0.1,
'hue_delta': 0.2,
'rotation_range': 10.0,
'shear_range': 5.0,
}
def process_for_train(
image,
height,
width,
bbox=None,
params=distort_params_default,
thread_id=0,
summary_suffix='',
scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
image: 3-D float Tensor of image
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
thread_id: integer indicating the preprocessing thread.
params: distortion parameters dictionary for configurtion distortions
scope: Optional scope for op_scope.
Returns:
3-D float Tensor of distorted image used for training.
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# Display the bounding box in the first thread only.
if not thread_id:
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox)
tf.summary.image('image_with_bounding_boxes%s' % summary_suffix, image_with_box)
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an allowed
# range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=params['min_object_covered'],
aspect_ratio_range=params['aspect_ratio_range'],
area_range=params['area_range'],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
if not thread_id:
image_with_distorted_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), distort_bbox)
tf.summary.image('images_with_distorted_bounding_box%s' % summary_suffix, image_with_distorted_box)
if params['affine_distortion']:
rotation_range = params['rotation_range']
shear_range = params['shear_range']
if has_skimage:
image = tf.py_func(distort_affine_skimage, [image, rotation_range, shear_range], [tf.float32])[0]
#elif has_cv2:
# image = tf.py_func(distort_affine_cv2, [image, angle_range], [tf.float32])[0]
else:
print('Affine image distortion disabled, no cv2 or skimage module present.')
image.set_shape([height, width, 3])
# Crop the image to the specified bounding box.
distorted_image = tf.slice(image, bbox_begin, bbox_size)
# This resizing operation may distort the images because the aspect
# ratio is not respected.
resize_method = tf.image.ResizeMethod.BILINEAR
distorted_image = tf.image.resize_images(distorted_image, [height, width], resize_method)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([height, width, 3])
if not thread_id:
tf.summary.image('cropped_resized_image%s' % summary_suffix, tf.expand_dims(distorted_image, 0))
if params['elastic_distortion']:
if has_cv2:
distorted_image = tf.py_func(distort_elastic_cv2, [distorted_image], [tf.float32])[0]
else:
print('Elastic image distortion disabled, no cv2 module present.')
distorted_image.set_shape([height, width, 3])
# Randomly flip the image horizontally.
if params['h_flip']:
distorted_image = tf.image.random_flip_left_right(distorted_image)
if params['v_flip']:
distorted_image = tf.image.random_flip_up_down(distorted_image)
# Randomly distort the colors.
distorted_image = distort_color(distorted_image, hue_delta=params['hue_delta'], thread_id=thread_id)
if not thread_id:
tf.summary.image('final_distorted_image%s' % summary_suffix, tf.expand_dims(distorted_image, 0))
return distorted_image
eval_params_default = {
'central_crop_fraction': 0.95,
}
def process_for_eval(image, height, width, params=eval_params_default, scope=None):
"""Prepare one image for evaluation.
Args:
image: 3-D float Tensor
height: integer
width: integer
scope: Optional scope for op_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
# Crop the central region of the image
if params['central_crop_fraction'] != 1.0:
image = tf.image.central_crop(image, central_fraction=params['central_crop_fraction'])
# Resize the image to the network height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, [0])
return image
|
[
"tensorflow.app.flags.DEFINE_float",
"tensorflow.clip_by_value",
"skimage.transform.SimilarityTransform",
"cv2.remap",
"cv2.warpAffine",
"tensorflow.image.resize_bilinear",
"tensorflow.image.decode_png",
"numpy.arange",
"tensorflow.split",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.nn.moments",
"tensorflow.gather",
"tensorflow.image.random_contrast",
"tensorflow.concat",
"numpy.random.RandomState",
"tensorflow.div",
"tensorflow.image.central_crop",
"tensorflow.squeeze",
"skimage.transform.AffineTransform",
"tensorflow.name_scope",
"tensorflow.image.decode_gif",
"cv2.resize",
"tensorflow.mul",
"tensorflow.image.resize_images",
"tensorflow.summary.image",
"tensorflow.add",
"tensorflow.image.random_hue",
"tensorflow.image.random_flip_left_right",
"skimage.transform.warp",
"tensorflow.image.random_saturation",
"tensorflow.image.random_brightness",
"tensorflow.expand_dims",
"numpy.random.uniform",
"tensorflow.py_func",
"tensorflow.image.random_flip_up_down",
"numpy.float32",
"tensorflow.shape",
"cv2.getAffineTransform",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.slice",
"tensorflow.sub",
"tensorflow.image.decode_jpeg",
"tensorflow.image.convert_image_dtype"
] |
[((1574, 1663), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""image_size"""', '(299)', '"""Provide square images of this size."""'], {}), "('image_size', 299,\n 'Provide square images of this size.')\n", (1601, 1663), True, 'import tensorflow as tf\n'), ((1692, 1806), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""image_aspect"""', '(0.0)', '"""Aspect ratio based sizing, square image_size*image_size if 0"""'], {}), "('image_aspect', 0.0,\n 'Aspect ratio based sizing, square image_size*image_size if 0')\n", (1717, 1806), True, 'import tensorflow as tf\n'), ((1833, 2049), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""image_norm"""', '"""default"""', '"""Either \'caffe\' BGR [0,255], \'caffe_rgb\' [0, 255],\n \'frame\' per-frame standardize, \'global\' standardize, \'default\' [-1, 1]."""'], {}), '(\'image_norm\', \'default\',\n """Either \'caffe\' BGR [0,255], \'caffe_rgb\' [0, 255],\n \'frame\' per-frame standardize, \'global\' standardize, \'default\' [-1, 1]."""\n )\n', (1859, 2049), True, 'import tensorflow as tf\n'), ((2068, 2147), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""image_fmt"""', '"""jpg"""', '"""Either \'jpg\', \'png\', or \'gif\'"""'], {}), '(\'image_fmt\', \'jpg\', "Either \'jpg\', \'png\', or \'gif\'")\n', (2094, 2147), True, 'import tensorflow as tf\n'), ((7005, 7146), 'numpy.float32', 'np.float32', (['[center_square + square_size, [center_square[0] + square_size, \n center_square[1] - square_size], center_square - square_size]'], {}), '([center_square + square_size, [center_square[0] + square_size, \n center_square[1] - square_size], center_square - square_size])\n', (7015, 7146), True, 'import numpy as np\n'), ((7280, 7314), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (7302, 7314), False, 'import cv2\n'), ((7337, 7412), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', 'shape_size[::-1]'], {'borderMode': 'cv2.BORDER_REPLICATE'}), '(image, M, shape_size[::-1], borderMode=cv2.BORDER_REPLICATE)\n', (7351, 7412), False, 'import cv2\n'), ((7866, 7916), 'skimage.transform.SimilarityTransform', 'transform.SimilarityTransform', ([], {'translation': '(-center)'}), '(translation=-center)\n', (7895, 7916), False, 'from skimage import transform\n'), ((7930, 8002), 'skimage.transform.AffineTransform', 'transform.AffineTransform', ([], {'rotation': 'rot', 'shear': 'sheer', 'translation': 'center'}), '(rotation=rot, shear=sheer, translation=center)\n', (7955, 8002), False, 'from skimage import transform\n'), ((8051, 8102), 'skimage.transform.warp', 'transform.warp', (['image', 'tform.params'], {'mode': '"""reflect"""'}), "(image, tform.params, mode='reflect')\n", (8065, 8102), False, 'from skimage import transform\n'), ((9486, 9589), 'cv2.remap', 'cv2.remap', (['image', 'grid_x', 'grid_y'], {'borderMode': 'cv2.BORDER_REFLECT_101', 'interpolation': 'cv2.INTER_LINEAR'}), '(image, grid_x, grid_y, borderMode=cv2.BORDER_REFLECT_101,\n interpolation=cv2.INTER_LINEAR)\n', (9495, 9589), False, 'import cv2\n'), ((2741, 2761), 'tensorflow.mul', 'tf.mul', (['image', '(255.0)'], {}), '(image, 255.0)\n', (2747, 2761), True, 'import tensorflow as tf\n'), ((2818, 2839), 'tensorflow.split', 'tf.split', (['(2)', '(3)', 'image'], {}), '(2, 3, image)\n', (2826, 2839), True, 'import tensorflow as tf\n'), ((2856, 2888), 'tensorflow.concat', 'tf.concat', (['(2)', '[blue, green, red]'], {}), '(2, [blue, green, red])\n', (2865, 2888), True, 'import tensorflow as tf\n'), ((2897, 2922), 'tensorflow.sub', 'tf.sub', (['image', 'caffe_mean'], {}), '(image, caffe_mean)\n', (2903, 2922), True, 'import tensorflow as tf\n'), ((4194, 4246), 'tensorflow.name_scope', 'tf.name_scope', (['scope', '"""decode_image"""', '[image_buffer]'], {}), "(scope, 'decode_image', [image_buffer])\n", (4207, 4246), True, 'import tensorflow as tf\n'), ((5126, 5179), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['image'], {'dtype': 'tf.float32'}), '(image, dtype=tf.float32)\n', (5154, 5179), True, 'import tensorflow as tf\n'), ((5838, 5884), 'tensorflow.name_scope', 'tf.name_scope', (['scope', '"""distort_color"""', '[image]'], {}), "(scope, 'distort_color', [image])\n", (5851, 5884), True, 'import tensorflow as tf\n'), ((6650, 6683), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['image', '(0.0)', '(1.0)'], {}), '(image, 0.0, 1.0)\n', (6666, 6683), True, 'import tensorflow as tf\n'), ((6826, 6853), 'numpy.random.RandomState', 'np.random.RandomState', (['None'], {}), '(None)\n', (6847, 6853), True, 'import numpy as np\n'), ((6927, 6949), 'numpy.float32', 'np.float32', (['shape_size'], {}), '(shape_size)\n', (6937, 6949), True, 'import numpy as np\n'), ((7609, 7636), 'numpy.random.RandomState', 'np.random.RandomState', (['None'], {}), '(None)\n', (7630, 7636), True, 'import numpy as np\n'), ((7659, 7697), 'numpy.random.uniform', 'np.random.uniform', (['(-rotation)', 'rotation'], {}), '(-rotation, rotation)\n', (7676, 7697), True, 'import numpy as np\n'), ((7722, 7754), 'numpy.random.uniform', 'np.random.uniform', (['(-shear)', 'shear'], {}), '(-shear, shear)\n', (7739, 7754), True, 'import numpy as np\n'), ((8341, 8368), 'numpy.random.RandomState', 'np.random.RandomState', (['None'], {}), '(None)\n', (8362, 8368), True, 'import numpy as np\n'), ((9188, 9224), 'cv2.resize', 'cv2.resize', (['rand_x', 'shape_size[::-1]'], {}), '(rand_x, shape_size[::-1])\n', (9198, 9224), False, 'import cv2\n'), ((9242, 9278), 'cv2.resize', 'cv2.resize', (['rand_y', 'shape_size[::-1]'], {}), '(rand_y, shape_size[::-1])\n', (9252, 9278), False, 'import cv2\n'), ((9313, 9337), 'numpy.arange', 'np.arange', (['shape_size[1]'], {}), '(shape_size[1])\n', (9322, 9337), True, 'import numpy as np\n'), ((9339, 9363), 'numpy.arange', 'np.arange', (['shape_size[0]'], {}), '(shape_size[0])\n', (9348, 9363), True, 'import numpy as np\n'), ((10928, 10995), 'tensorflow.name_scope', 'tf.name_scope', (['scope', '"""distort_image"""', '[image, height, width, bbox]'], {}), "(scope, 'distort_image', [image, height, width, bbox])\n", (10941, 10995), True, 'import tensorflow as tf\n'), ((13284, 13322), 'tensorflow.slice', 'tf.slice', (['image', 'bbox_begin', 'bbox_size'], {}), '(image, bbox_begin, bbox_size)\n', (13292, 13322), True, 'import tensorflow as tf\n'), ((13515, 13586), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['distorted_image', '[height, width]', 'resize_method'], {}), '(distorted_image, [height, width], resize_method)\n', (13537, 13586), True, 'import tensorflow as tf\n'), ((15185, 15243), 'tensorflow.name_scope', 'tf.name_scope', (['scope', '"""eval_image"""', '[image, height, width]'], {}), "(scope, 'eval_image', [image, height, width])\n", (15198, 15243), True, 'import tensorflow as tf\n'), ((15519, 15543), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (15533, 15543), True, 'import tensorflow as tf\n'), ((15560, 15629), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['image', '[height, width]'], {'align_corners': '(False)'}), '(image, [height, width], align_corners=False)\n', (15584, 15629), True, 'import tensorflow as tf\n'), ((15646, 15668), 'tensorflow.squeeze', 'tf.squeeze', (['image', '[0]'], {}), '(image, [0])\n', (15656, 15668), True, 'import tensorflow as tf\n'), ((3057, 3077), 'tensorflow.mul', 'tf.mul', (['image', '(255.0)'], {}), '(image, 255.0)\n', (3063, 3077), True, 'import tensorflow as tf\n'), ((3103, 3135), 'tensorflow.gather', 'tf.gather', (['caffe_mean', '[2, 1, 0]'], {}), '(caffe_mean, [2, 1, 0])\n', (3112, 3135), True, 'import tensorflow as tf\n'), ((3152, 3181), 'tensorflow.sub', 'tf.sub', (['image', 'caffe_mean_rgb'], {}), '(image, caffe_mean_rgb)\n', (3158, 3181), True, 'import tensorflow as tf\n'), ((4595, 4644), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['image_buffer'], {'channels': 'depth'}), '(image_buffer, channels=depth)\n', (4614, 4644), True, 'import tensorflow as tf\n'), ((5978, 6035), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['image'], {'max_delta': '(32.0 / 255.0)'}), '(image, max_delta=32.0 / 255.0)\n', (6004, 6035), True, 'import tensorflow as tf\n'), ((6054, 6109), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (6080, 6109), True, 'import tensorflow as tf\n'), ((6130, 6177), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['image'], {'max_delta': 'hue_delta'}), '(image, max_delta=hue_delta)\n', (6149, 6177), True, 'import tensorflow as tf\n'), ((6198, 6251), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (6222, 6251), True, 'import tensorflow as tf\n'), ((7821, 7843), 'numpy.float32', 'np.float32', (['shape_size'], {}), '(shape_size)\n', (7831, 7843), True, 'import numpy as np\n'), ((11320, 11405), 'tensorflow.summary.image', 'tf.summary.image', (["('image_with_bounding_boxes%s' % summary_suffix)", 'image_with_box'], {}), "('image_with_bounding_boxes%s' % summary_suffix, image_with_box\n )\n", (11336, 11405), True, 'import tensorflow as tf\n'), ((12016, 12031), 'tensorflow.shape', 'tf.shape', (['image'], {}), '(image)\n', (12024, 12031), True, 'import tensorflow as tf\n'), ((12535, 12638), 'tensorflow.summary.image', 'tf.summary.image', (["('images_with_distorted_bounding_box%s' % summary_suffix)", 'image_with_distorted_box'], {}), "('images_with_distorted_bounding_box%s' % summary_suffix,\n image_with_distorted_box)\n", (12551, 12638), True, 'import tensorflow as tf\n'), ((14326, 14374), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['distorted_image'], {}), '(distorted_image)\n', (14357, 14374), True, 'import tensorflow as tf\n'), ((14435, 14480), 'tensorflow.image.random_flip_up_down', 'tf.image.random_flip_up_down', (['distorted_image'], {}), '(distorted_image)\n', (14463, 14480), True, 'import tensorflow as tf\n'), ((15363, 15441), 'tensorflow.image.central_crop', 'tf.image.central_crop', (['image'], {'central_fraction': "params['central_crop_fraction']"}), "(image, central_fraction=params['central_crop_fraction'])\n", (15384, 15441), True, 'import tensorflow as tf\n'), ((3288, 3332), 'tensorflow.nn.moments', 'tf.nn.moments', (['image'], {'axes': '[0, 1]', 'shift': '(0.3)'}), '(image, axes=[0, 1], shift=0.3)\n', (3301, 3332), True, 'import tensorflow as tf\n'), ((3390, 3409), 'tensorflow.sub', 'tf.sub', (['image', 'mean'], {}), '(image, mean)\n', (3396, 3409), True, 'import tensorflow as tf\n'), ((3426, 3444), 'tensorflow.div', 'tf.div', (['image', 'std'], {}), '(image, std)\n', (3432, 3444), True, 'import tensorflow as tf\n'), ((4728, 4761), 'tensorflow.image.decode_gif', 'tf.image.decode_gif', (['image_buffer'], {}), '(image_buffer)\n', (4747, 4761), True, 'import tensorflow as tf\n'), ((4857, 4907), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_buffer'], {'channels': 'depth'}), '(image_buffer, channels=depth)\n', (4877, 4907), True, 'import tensorflow as tf\n'), ((6306, 6363), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['image'], {'max_delta': '(32.0 / 255.0)'}), '(image, max_delta=32.0 / 255.0)\n', (6332, 6363), True, 'import tensorflow as tf\n'), ((6382, 6435), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (6406, 6435), True, 'import tensorflow as tf\n'), ((6456, 6511), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['image'], {'lower': '(0.5)', 'upper': '(1.5)'}), '(image, lower=0.5, upper=1.5)\n', (6482, 6511), True, 'import tensorflow as tf\n'), ((6532, 6579), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['image'], {'max_delta': 'hue_delta'}), '(image, max_delta=hue_delta)\n', (6551, 6579), True, 'import tensorflow as tf\n'), ((11276, 11300), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (11290, 11300), True, 'import tensorflow as tf\n'), ((12483, 12507), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (12497, 12507), True, 'import tensorflow as tf\n'), ((13855, 13889), 'tensorflow.expand_dims', 'tf.expand_dims', (['distorted_image', '(0)'], {}), '(distorted_image, 0)\n', (13869, 13889), True, 'import tensorflow as tf\n'), ((14730, 14764), 'tensorflow.expand_dims', 'tf.expand_dims', (['distorted_image', '(0)'], {}), '(distorted_image, 0)\n', (14744, 14764), True, 'import tensorflow as tf\n'), ((3355, 3373), 'tensorflow.add', 'tf.add', (['var', '(0.001)'], {}), '(var, 0.001)\n', (3361, 3373), True, 'import tensorflow as tf\n'), ((3545, 3578), 'tensorflow.sub', 'tf.sub', (['image', 'global_mean_std[0]'], {}), '(image, global_mean_std[0])\n', (3551, 3578), True, 'import tensorflow as tf\n'), ((3595, 3628), 'tensorflow.div', 'tf.div', (['image', 'global_mean_std[1]'], {}), '(image, global_mean_std[1])\n', (3601, 3628), True, 'import tensorflow as tf\n'), ((3798, 3816), 'tensorflow.sub', 'tf.sub', (['image', '(0.5)'], {}), '(image, 0.5)\n', (3804, 3816), True, 'import tensorflow as tf\n'), ((3833, 3851), 'tensorflow.mul', 'tf.mul', (['image', '(2.0)'], {}), '(image, 2.0)\n', (3839, 3851), True, 'import tensorflow as tf\n'), ((12830, 12921), 'tensorflow.py_func', 'tf.py_func', (['distort_affine_skimage', '[image, rotation_range, shear_range]', '[tf.float32]'], {}), '(distort_affine_skimage, [image, rotation_range, shear_range], [\n tf.float32])\n', (12840, 12921), True, 'import tensorflow as tf\n'), ((13991, 14055), 'tensorflow.py_func', 'tf.py_func', (['distort_elastic_cv2', '[distorted_image]', '[tf.float32]'], {}), '(distort_elastic_cv2, [distorted_image], [tf.float32])\n', (14001, 14055), True, 'import tensorflow as tf\n')]
|
"""
Module for evaluation of machine learning models.
"""
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, r2_score
from scipy.stats import spearmanr
def print_metrics(y_true, y_pred):
print("================================================")
print("Spearman's correlation coef: " + str(spearmanr(y_true, y_pred)[0]))
print("================================================")
print("-----------")
print("R^2 = " + str(r2_score(y_true, y_pred)))
print("R = " + str(np.sqrt(r2_score(y_true, y_pred))))
print("-----------")
|
[
"sklearn.metrics.r2_score",
"scipy.stats.spearmanr"
] |
[((492, 516), 'sklearn.metrics.r2_score', 'r2_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (500, 516), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, r2_score\n'), ((344, 369), 'scipy.stats.spearmanr', 'spearmanr', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (353, 369), False, 'from scipy.stats import spearmanr\n'), ((550, 574), 'sklearn.metrics.r2_score', 'r2_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (558, 574), False, 'from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, r2_score\n')]
|
from django.conf.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('',views.home,name='home'),
path('image/', views.image_upload,name='upload'),
path('profile/', views.profile_info,name='profile'),
path('edit/',views.profile_edit,name='edit'),
path('^new_comment/(\d+)/$' ,views.add_comment,name='newComment'),
path('^comment/(\d+)/$' ,views.comments,name='comments'),
path('likes/(\d+)/$' , views.like_images, name='likes'),
path('user/',views.search_user,name='search_user')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"django.conf.urls.static.static",
"django.conf.urls.path"
] |
[((156, 189), 'django.conf.urls.path', 'path', (['""""""', 'views.home'], {'name': '"""home"""'}), "('', views.home, name='home')\n", (160, 189), False, 'from django.conf.urls import path\n'), ((193, 242), 'django.conf.urls.path', 'path', (['"""image/"""', 'views.image_upload'], {'name': '"""upload"""'}), "('image/', views.image_upload, name='upload')\n", (197, 242), False, 'from django.conf.urls import path\n'), ((247, 299), 'django.conf.urls.path', 'path', (['"""profile/"""', 'views.profile_info'], {'name': '"""profile"""'}), "('profile/', views.profile_info, name='profile')\n", (251, 299), False, 'from django.conf.urls import path\n'), ((304, 350), 'django.conf.urls.path', 'path', (['"""edit/"""', 'views.profile_edit'], {'name': '"""edit"""'}), "('edit/', views.profile_edit, name='edit')\n", (308, 350), False, 'from django.conf.urls import path\n'), ((354, 421), 'django.conf.urls.path', 'path', (['"""^new_comment/(\\\\d+)/$"""', 'views.add_comment'], {'name': '"""newComment"""'}), "('^new_comment/(\\\\d+)/$', views.add_comment, name='newComment')\n", (358, 421), False, 'from django.conf.urls import path\n'), ((425, 483), 'django.conf.urls.path', 'path', (['"""^comment/(\\\\d+)/$"""', 'views.comments'], {'name': '"""comments"""'}), "('^comment/(\\\\d+)/$', views.comments, name='comments')\n", (429, 483), False, 'from django.conf.urls import path\n'), ((487, 542), 'django.conf.urls.path', 'path', (['"""likes/(\\\\d+)/$"""', 'views.like_images'], {'name': '"""likes"""'}), "('likes/(\\\\d+)/$', views.like_images, name='likes')\n", (491, 542), False, 'from django.conf.urls import path\n'), ((548, 600), 'django.conf.urls.path', 'path', (['"""user/"""', 'views.search_user'], {'name': '"""search_user"""'}), "('user/', views.search_user, name='search_user')\n", (552, 600), False, 'from django.conf.urls import path\n'), ((639, 700), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (645, 700), False, 'from django.conf.urls.static import static\n')]
|
import os
from typing import Tuple
from werkzeug.utils import secure_filename
from flask import Flask, flash, request, redirect, url_for, render_template, session
from ms_package.startup import DATA_DIR
from ms_package.reader import Reader
from ms_package.peptide_prediction import PeptideSearch
from ms_package.protein_prediction import ProteinSearch
UPLOAD_FOLDER = os.path.join(DATA_DIR, 'uploads')
ALLOWED_EXTENSIONS = {"mzml", "mzxml", "fasta"}
app = Flask(__name__)
app.secret_key = "someSecretKey"
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_PATH'] = 16 * 1024 * 1024
@app.route("/", methods=['GET', 'POST'])
def home():
if request.method == 'POST': # User choosing file type
file_type = request.form['file_type']
import_message, import_check = toggle_file_type(file_type)
if import_check: # Both File(s) imported
rel_vals = get_values(session['ms_files'])
hits, peptide_hits = get_peptide_hits(session['fasta_file'], session['ms_files'])
prtns = get_proteins(hits)
return render_template('template.html', values=rel_vals, hits=peptide_hits, protein=prtns, import_msg=import_message)
else:
return render_template('template.html', import_msg=import_message)
return render_template('template.html')
@app.route("/upload")
def upload():
return render_template('upload.html')
@app.route("/home", methods=['GET', 'POST'])
def homepage():
return render_template('template.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/uploader', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if request.form["button"] == "Upload":
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
files = request.files.getlist('file')
# if user does not select file, browser also
# submit an empty part without filename
for file in files:
if file.filename == '':
flash('No file selected')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
if check_uploads():
return render_template('template.html', success_msg="File(s) uploaded successfully!")
else:
return render_template('upload.html', success_msg="Please upload the other file!")
return redirect(url_for('home'))
elif request.form['button'] == "Clear Contents":
files = os.listdir(UPLOAD_FOLDER)
for file in files:
os.remove(os.path.join(UPLOAD_FOLDER, file))
return render_template('upload.html', success_msg="Uploaded files successfully removed!")
return redirect(url_for('home'))
def toggle_file_type(file_type: str) -> Tuple[str, bool]:
ext = ".mzML" if file_type == "mzml" else ".mzxml"
ms_files = [
os.path.join(app.config['UPLOAD_FOLDER'], f)
for f in os.listdir(app.config['UPLOAD_FOLDER'])
if f.endswith(ext)
]
fasta_file = [
os.path.join(app.config['UPLOAD_FOLDER'], f)
for f in os.listdir(app.config['UPLOAD_FOLDER'])
if f.endswith(".fasta")
]
if len(ms_files) == 0 and len(fasta_file) == 0:
return "No files uploaded. Please upload files.", False
elif file_type == "mzml" and len(ms_files) != 1:
return "Error importing mzml file. Please clear uploaded contents and upload a single mzml file.", False
elif file_type == "mzxml" and len(ms_files) != 1:
return "Error importing mzxml file. Please clear uploaded contents and upload a single mzxml file.", False
else:
session['file_type'] = file_type
session['ms_files'] = ms_files[0]
session['fasta_file'] = fasta_file[0]
return "File(s) imported successfully!", True
def check_uploads():
n_files = len(os.listdir(app.config['UPLOAD_FOLDER']))
return True if n_files == 2 else False
def get_values(file):
relevant_values = Reader(file)
values = relevant_values.analyse_spectrum()
rel_vals = values.to_html(header="true", table_id="table", index=False, justify="justify-all")
return rel_vals
def get_peptide_hits(fasta_file, ms_file):
peptides = PeptideSearch(fasta_file, ms_file)
hits = peptides.peptide_wrapper()[0]
peptide_hits = hits.to_html(header="true", table_id="table", index=False)
return hits, peptide_hits
def get_proteins(hits):
peptide_list = hits['Peptide hit sequence']
pro_search = ProteinSearch(peptide_list)
pro_search.get_proteins()
ans_with_seq = pro_search.ans_df
ans_without_seq = ans_with_seq.drop('Sequence', axis=1)
proteins = ans_without_seq.to_html(header="true", table_id="table", index=False)
return proteins
if __name__ == '__main__':
flask_port = int(os.environ.get('FLASK_PORT'))
if flask_port:
port = flask_port
else:
port = 5000
app.run(debug=True, host='0.0.0.0', port=port)
|
[
"flask.flash",
"os.makedirs",
"flask.redirect",
"flask.request.files.getlist",
"ms_package.peptide_prediction.PeptideSearch",
"flask.Flask",
"werkzeug.utils.secure_filename",
"os.environ.get",
"ms_package.protein_prediction.ProteinSearch",
"flask.url_for",
"flask.render_template",
"os.path.join",
"os.listdir",
"ms_package.reader.Reader"
] |
[((372, 405), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""uploads"""'], {}), "(DATA_DIR, 'uploads')\n", (384, 405), False, 'import os\n'), ((461, 476), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (466, 476), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((511, 552), 'os.makedirs', 'os.makedirs', (['UPLOAD_FOLDER'], {'exist_ok': '(True)'}), '(UPLOAD_FOLDER, exist_ok=True)\n', (522, 552), False, 'import os\n'), ((1348, 1380), 'flask.render_template', 'render_template', (['"""template.html"""'], {}), "('template.html')\n", (1363, 1380), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((1430, 1460), 'flask.render_template', 'render_template', (['"""upload.html"""'], {}), "('upload.html')\n", (1445, 1460), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((1535, 1567), 'flask.render_template', 'render_template', (['"""template.html"""'], {}), "('template.html')\n", (1550, 1567), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((4455, 4467), 'ms_package.reader.Reader', 'Reader', (['file'], {}), '(file)\n', (4461, 4467), False, 'from ms_package.reader import Reader\n'), ((4695, 4729), 'ms_package.peptide_prediction.PeptideSearch', 'PeptideSearch', (['fasta_file', 'ms_file'], {}), '(fasta_file, ms_file)\n', (4708, 4729), False, 'from ms_package.peptide_prediction import PeptideSearch\n'), ((4970, 4997), 'ms_package.protein_prediction.ProteinSearch', 'ProteinSearch', (['peptide_list'], {}), '(peptide_list)\n', (4983, 4997), False, 'from ms_package.protein_prediction import ProteinSearch\n'), ((3335, 3379), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'f'], {}), "(app.config['UPLOAD_FOLDER'], f)\n", (3347, 3379), False, 'import os\n'), ((3497, 3541), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'f'], {}), "(app.config['UPLOAD_FOLDER'], f)\n", (3509, 3541), False, 'import os\n'), ((4325, 4364), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_FOLDER']"], {}), "(app.config['UPLOAD_FOLDER'])\n", (4335, 4364), False, 'import os\n'), ((5280, 5308), 'os.environ.get', 'os.environ.get', (['"""FLASK_PORT"""'], {}), "('FLASK_PORT')\n", (5294, 5308), False, 'import os\n'), ((1132, 1246), 'flask.render_template', 'render_template', (['"""template.html"""'], {'values': 'rel_vals', 'hits': 'peptide_hits', 'protein': 'prtns', 'import_msg': 'import_message'}), "('template.html', values=rel_vals, hits=peptide_hits,\n protein=prtns, import_msg=import_message)\n", (1147, 1246), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((1276, 1335), 'flask.render_template', 'render_template', (['"""template.html"""'], {'import_msg': 'import_message'}), "('template.html', import_msg=import_message)\n", (1291, 1335), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((2054, 2083), 'flask.request.files.getlist', 'request.files.getlist', (['"""file"""'], {}), "('file')\n", (2075, 2083), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((3178, 3193), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (3185, 3193), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((3397, 3436), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_FOLDER']"], {}), "(app.config['UPLOAD_FOLDER'])\n", (3407, 3436), False, 'import os\n'), ((3559, 3598), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_FOLDER']"], {}), "(app.config['UPLOAD_FOLDER'])\n", (3569, 3598), False, 'import os\n'), ((1967, 1988), 'flask.flash', 'flash', (['"""No file part"""'], {}), "('No file part')\n", (1972, 1988), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((2012, 2033), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (2020, 2033), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((2616, 2694), 'flask.render_template', 'render_template', (['"""template.html"""'], {'success_msg': '"""File(s) uploaded successfully!"""'}), "('template.html', success_msg='File(s) uploaded successfully!')\n", (2631, 2694), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((2736, 2811), 'flask.render_template', 'render_template', (['"""upload.html"""'], {'success_msg': '"""Please upload the other file!"""'}), "('upload.html', success_msg='Please upload the other file!')\n", (2751, 2811), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((2840, 2855), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (2847, 2855), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((2934, 2959), 'os.listdir', 'os.listdir', (['UPLOAD_FOLDER'], {}), '(UPLOAD_FOLDER)\n', (2944, 2959), False, 'import os\n'), ((3071, 3158), 'flask.render_template', 'render_template', (['"""upload.html"""'], {'success_msg': '"""Uploaded files successfully removed!"""'}), "('upload.html', success_msg=\n 'Uploaded files successfully removed!')\n", (3086, 3158), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((2284, 2309), 'flask.flash', 'flash', (['"""No file selected"""'], {}), "('No file selected')\n", (2289, 2309), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((2337, 2358), 'flask.redirect', 'redirect', (['request.url'], {}), '(request.url)\n', (2345, 2358), False, 'from flask import Flask, flash, request, redirect, url_for, render_template, session\n'), ((2447, 2477), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (2462, 2477), False, 'from werkzeug.utils import secure_filename\n'), ((2508, 2559), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'filename'], {}), "(app.config['UPLOAD_FOLDER'], filename)\n", (2520, 2559), False, 'import os\n'), ((3017, 3050), 'os.path.join', 'os.path.join', (['UPLOAD_FOLDER', 'file'], {}), '(UPLOAD_FOLDER, file)\n', (3029, 3050), False, 'import os\n')]
|
# Copyright (c) 2016, Universal Robots A/S,
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Universal Robots A/S nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL UNIVERSAL ROBOTS A/S BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
class ControlHeader(object):
__slots__ = ['command', 'size',]
@staticmethod
def unpack(buf):
rmd = ControlHeader()
(rmd.size, rmd.command) = struct.unpack_from('>HB', buf)
return rmd
class ControlVersion(object):
__slots__ = ['major', 'minor', 'bugfix', 'build']
@staticmethod
def unpack(buf):
rmd = ControlVersion()
(rmd.major, rmd.minor, rmd.bugfix, rmd.build) = struct.unpack_from('>IIII', buf)
return rmd
class ReturnValue(object):
__slots__ = ['success']
@staticmethod
def unpack(buf):
rmd = ReturnValue()
rmd.success = bool(struct.unpack_from('>B', buf)[0])
return rmd
class Message(object):
__slots__ = ['level', 'message', 'source']
EXCEPTION_MESSAGE = 0
ERROR_MESSAGE = 1
WARNING_MESSAGE = 2
INFO_MESSAGE = 3
@staticmethod
def unpack(buf):
rmd = Message()
offset = 0
msg_length = struct.unpack_from(">B", buf, offset)[0]
offset = offset + 1
rmd.message = buf[offset:offset+msg_length]
offset = offset + msg_length
src_length = struct.unpack_from(">B", buf, offset)[0]
offset = offset + 1
rmd.source = buf[offset:offset+src_length]
offset = offset + src_length
rmd.level = struct.unpack_from(">B", buf, offset)[0]
return rmd
def get_item_size(data_type):
if data_type.startswith('VECTOR6'):
return 6
elif data_type.startswith('VECTOR3'):
return 3
return 1
def unpack_field(data, offset, data_type):
size = get_item_size(data_type)
if(data_type == 'VECTOR6D' or
data_type == 'VECTOR3D'):
return [float(data[offset+i]) for i in range(size)]
elif(data_type == 'VECTOR6UINT32'):
return [int(data[offset+i]) for i in range(size)]
elif(data_type == 'DOUBLE'):
return float(data[offset])
elif(data_type == 'UINT32' or
data_type == 'UINT64'):
return int(data[offset])
elif(data_type == 'VECTOR6INT32'):
return [int(data[offset+i]) for i in range(size)]
elif(data_type == 'INT32' or
data_type == 'UINT8'):
return int(data[offset])
raise ValueError('unpack_field: unknown data type: ' + data_type)
class DataObject(object):
recipe_id = None
def pack(self, names, types):
if len(names) != len(types):
raise ValueError('List sizes are not identical.')
l = []
if(self.recipe_id is not None):
l.append(self.recipe_id)
for i in range(len(names)):
if self.__dict__[names[i]] is None:
raise ValueError('Uninitialized parameter: ' + names[i])
if types[i].startswith('VECTOR'):
l.extend(self.__dict__[names[i]])
else:
l.append(self.__dict__[names[i]])
return l
@staticmethod
def unpack(data, names, types):
if len(names) != len(types):
raise ValueError('List sizes are not identical.')
obj = DataObject()
offset = 0
obj.recipe_id = data[0]
for i in range(len(names)):
obj.__dict__[names[i]] = unpack_field(data[1:], offset, types[i])
offset += get_item_size(types[i])
return obj
@staticmethod
def create_empty(names, recipe_id):
obj = DataObject()
for i in range(len(names)):
obj.__dict__[names[i]] = None
obj.recipe_id = recipe_id
return obj
class DataConfig(object):
__slots__ = ['id', 'names', 'types', 'fmt']
@staticmethod
def unpack_recipe(buf):
rmd = DataConfig()
rmd.id = struct.unpack_from('>B', buf)[0]
rmd.types = [type.decode() for type in buf[1:].split(b',')]
rmd.fmt = '>B'
for i in rmd.types:
if i=='INT32':
rmd.fmt += 'i'
elif i=='UINT32':
rmd.fmt += 'I'
elif i=='VECTOR6D':
rmd.fmt += 'd'*6
elif i=='VECTOR3D':
rmd.fmt += 'd'*3
elif i=='VECTOR6INT32':
rmd.fmt += 'i'*6
elif i=='VECTOR6UINT32':
rmd.fmt += 'I'*6
elif i=='DOUBLE':
rmd.fmt += 'd'
elif i=='UINT64':
rmd.fmt += 'Q'
elif i=='UINT8':
rmd.fmt += 'B'
elif i=='IN_USE':
raise ValueError('An input parameter is already in use.')
else:
raise ValueError('Unknown data type: ' + i)
return rmd
def pack(self, state):
l = state.pack(self.names, self.types)
return struct.pack(self.fmt, *l)
def unpack(self, data):
li = struct.unpack_from(self.fmt, data)
return DataObject.unpack(li, self.names, self.types)
|
[
"struct.pack",
"struct.unpack_from"
] |
[((1741, 1771), 'struct.unpack_from', 'struct.unpack_from', (['""">HB"""', 'buf'], {}), "('>HB', buf)\n", (1759, 1771), False, 'import struct\n'), ((2008, 2040), 'struct.unpack_from', 'struct.unpack_from', (['""">IIII"""', 'buf'], {}), "('>IIII', buf)\n", (2026, 2040), False, 'import struct\n'), ((6290, 6315), 'struct.pack', 'struct.pack', (['self.fmt', '*l'], {}), '(self.fmt, *l)\n', (6301, 6315), False, 'import struct\n'), ((6358, 6392), 'struct.unpack_from', 'struct.unpack_from', (['self.fmt', 'data'], {}), '(self.fmt, data)\n', (6376, 6392), False, 'import struct\n'), ((2542, 2579), 'struct.unpack_from', 'struct.unpack_from', (['""">B"""', 'buf', 'offset'], {}), "('>B', buf, offset)\n", (2560, 2579), False, 'import struct\n'), ((2722, 2759), 'struct.unpack_from', 'struct.unpack_from', (['""">B"""', 'buf', 'offset'], {}), "('>B', buf, offset)\n", (2740, 2759), False, 'import struct\n'), ((2899, 2936), 'struct.unpack_from', 'struct.unpack_from', (['""">B"""', 'buf', 'offset'], {}), "('>B', buf, offset)\n", (2917, 2936), False, 'import struct\n'), ((5269, 5298), 'struct.unpack_from', 'struct.unpack_from', (['""">B"""', 'buf'], {}), "('>B', buf)\n", (5287, 5298), False, 'import struct\n'), ((2216, 2245), 'struct.unpack_from', 'struct.unpack_from', (['""">B"""', 'buf'], {}), "('>B', buf)\n", (2234, 2245), False, 'import struct\n')]
|
import sys
from collections import defaultdict
sys.path.insert(0, '../10')
from knot_hash import knot_hash_str
BIT_COUNT = {
'0': 0, # 0000
'1': 1, # 0001
'2': 1, # 0010
'3': 2, # 0011
'4': 1, # 0100
'5': 2, # 0101
'6': 2, # 0110
'7': 3, # 0111
'8': 1, # 1000
'9': 2, # 1001
'a': 2, # 1010
'b': 3, # 1011
'c': 2, # 1100
'd': 3, # 1101
'e': 3, # 1110
'f': 4, # 1111
}
BINARY_SEQ = {
'0': '0000',
'1': '0001',
'2': '0010',
'3': '0011',
'4': '0100',
'5': '0101',
'6': '0110',
'7': '0111',
'8': '1000',
'9': '1001',
'a': '1010',
'b': '1011',
'c': '1100',
'd': '1101',
'e': '1110',
'f': '1111',
}
knots = dict()
def get_knot(key: str) -> str:
if key not in knots:
knots[key] = knot_hash_str(key)
return knots[key]
def bit_counter(hex_str: str) -> int:
res = 0
for digit in hex_str:
res += BIT_COUNT[digit]
return res
def disk_defragmentation(inp, pass1=True):
res = 0
if pass1:
for c in range(128):
res += bit_counter(get_knot(f'{inp}-{c}'))
return res
else:
grid = defaultdict(lambda: 0)
for c in range(128):
y = c * 1j
knot = get_knot(f'{inp}-{c}')
for x, ch in enumerate(knot):
nibble_start = x * 4 + y
b = BINARY_SEQ[ch]
for bit in range(4):
grid[nibble_start + bit] = int(b[bit])
assert sum(grid.values()) == 8074
# print_grid(grid)
res = region_count(grid)
return res
def print_grid(grid):
for c in range(128):
y = c * 1j
s = ''
for x in range(128):
s += '#' if grid[x + y] == 1 else '.'
print(s)
def region_count(grid):
res = 0
for c in range(128):
y = c * 1j
for x in range(128):
pos = x + y
if grid[pos] == 1:
clear_region(grid, pos)
res += 1
return res
def clear_region(grid, pos):
if grid[pos] == 1:
grid[pos] = 0
clear_region(grid, pos - 1j)
clear_region(grid, pos - 1)
clear_region(grid, pos + 1j)
clear_region(grid, pos + 1)
if __name__ == '__main__':
input_value = 'jzgqcdpd'
print(f'Day 14, part 1: {disk_defragmentation(input_value)}')
print(f'Day 14, part 2: {disk_defragmentation(input_value, False)}')
# Day 14, part 1: 8074
# Day 14, part 2: 1212
|
[
"collections.defaultdict",
"knot_hash.knot_hash_str",
"sys.path.insert"
] |
[((48, 75), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../10"""'], {}), "(0, '../10')\n", (63, 75), False, 'import sys\n'), ((833, 851), 'knot_hash.knot_hash_str', 'knot_hash_str', (['key'], {}), '(key)\n', (846, 851), False, 'from knot_hash import knot_hash_str\n'), ((1198, 1221), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (1209, 1221), False, 'from collections import defaultdict\n')]
|
# Copyright (c) 2016-2017 Adobe Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Set up for testing
"""
import logging
import pytest
from six import StringIO
mock_connection_params = {
"org_id": "N/A",
"auth": "N/A",
"user_management_endpoint": 'https://test/',
"logger": None,
"retry_max_attempts": 3,
"retry_first_delay": 1,
"retry_random_delay": 2,
}
class MockResponse:
def __init__(self, status=200, body=None, headers=None, text=None):
self.status_code = status
self.body = body if body is not None else {}
self.headers = headers if headers else {}
self.text = text if text else ""
def json(self):
return self.body
# py.test doesn't divert string logging, so use it
@pytest.fixture
def log_stream():
stream = StringIO()
handler = logging.StreamHandler(stream)
logger = logging.getLogger('test_logger')
logger.setLevel(logging.WARNING)
logger.addHandler(handler)
yield stream, logger
handler.close()
|
[
"logging.StreamHandler",
"logging.getLogger",
"six.StringIO"
] |
[((1842, 1852), 'six.StringIO', 'StringIO', ([], {}), '()\n', (1850, 1852), False, 'from six import StringIO\n'), ((1867, 1896), 'logging.StreamHandler', 'logging.StreamHandler', (['stream'], {}), '(stream)\n', (1888, 1896), False, 'import logging\n'), ((1910, 1942), 'logging.getLogger', 'logging.getLogger', (['"""test_logger"""'], {}), "('test_logger')\n", (1927, 1942), False, 'import logging\n')]
|
from clinter.rules import BaseRule, BaseTest, RuleExecutionError
from clinter.NodeParenter import NodeParenter
from pycparser.c_ast import Node, BinaryOp, UnaryOp, ID, StructRef
from ConditionAnalzer import ConditionAnalyzer
class RuleBrokenNullCheck(BaseRule):
def __init__(self, reporter=None):
BaseRule.__init__(self, "static", reporter)
self.__parenter : NodeParenter = None
@staticmethod
def __is_null_check(node : Node):
"""Verify if the given node is a NULL check
If the node is actually a NULL check, return a tuple containing:
- The node encompassing the reference checked against NULL (an ID or a StructRef)
- The operator used to check ('==' or '!=')
:param node: The node to check
:return: A tuple (Node, str)
"""
if isinstance(node, BinaryOp) and node.op not in ['==', '!=']:
return None, None
if isinstance(node, UnaryOp) and node.op != "!":
return None, None
# Detect broken NULL check of type `if(x)` or `if(x->y)`
if isinstance(node, (ID, StructRef)):
return node, "!="
# Detect broken NULL check of type `x == NULL` or `x->y == NULL` or `NULL == x` (or != equivalents)
if isinstance(node, BinaryOp) and node.op in ['==', '!=']:
if isinstance(node.left, (ID, StructRef)) and isinstance(node.right, ID) and node.right.name == "NULL":
return node.left, node.op
if isinstance(node.right, (ID, StructRef)) and isinstance(node.left, ID) and node.left.name == "NULL":
return node.right, node.op
# Detect broken NULL check of type `!x` or `!x->y`
if isinstance(node, UnaryOp):
if isinstance(node.expr, (ID, StructRef)):
return node.expr, "!="
return None, None
"""def visit_BinaryOp(self, node):
null_checks = []
if node.op not in ('&&', '||', '!=', '=='):
return
if node.op in ('==', '!='):
null_check1 = self.__is_null_check(node)
if null_check1[0] is not None:
null_checks += [null_check1]
for c in node:
if isinstance(c, (ID, StructRef)):
null_check1 = self.__is_null_check(c)
if null_check1[0] is not None:
null_checks += [null_check1]
else:
self.generic_visit(node)
if len(null_checks) == 0:
return
parent_node = self.__parenter.get_first_parent_of_type(node, ['If', 'For', 'While', 'DoWhile'])
print()
print(parent_node.cond)
for (null_check, _) in null_checks:
print("THE NEXT ARE:")
print(self.__parenter.get_all_next_siblings(parent_node.cond, null_check, deep_search=True))
def visit_UnaryOp(self, node):
null_checks = []
if node.op == "!":
null_check1 = self.__is_null_check(node)
if null_check1[0] is not None:
null_checks += [null_check1]
else:
self.generic_visit(node)
if len(null_checks) == 0:
return
parent_node = self.__parenter.get_first_parent_of_type(node, ['If', 'For', 'While', 'DoWhile'])
print()
print(node)
for (null_check, _) in null_checks:
print("THE NEXT ARE:")
print(self.__parenter.get_all_next_siblings(parent_node.cond, null_check, deep_search=True))
"""
def visit_If(self, node):
analyzer = ConditionAnalyzer(node.cond)
analyzer.analyze()
print(analyzer)
def visit_FileAST(self, node):
self.__parenter = NodeParenter.get_instance(node)
self.generic_visit(node)
class TestBrokenNullCheck(BaseTest):
def setUp(self):
self._rule_instance = RuleBrokenNullCheck(self._reporter)
def test1(self):
self._tested_code = """
typedef struct{
int x;
} my_type;
int main(){
my_type *a;
if( b || a->x->y && a && (a->y==1 || toto) || func(a,b)==1 || tab[1][2] == 10) ;
}
"""
self._run_rule()
self.expect_no_error("Expression used as condition OK")
# if !a && a->x
# if a==NULL
|
[
"clinter.rules.BaseRule.__init__",
"ConditionAnalzer.ConditionAnalyzer",
"clinter.NodeParenter.NodeParenter.get_instance"
] |
[((312, 355), 'clinter.rules.BaseRule.__init__', 'BaseRule.__init__', (['self', '"""static"""', 'reporter'], {}), "(self, 'static', reporter)\n", (329, 355), False, 'from clinter.rules import BaseRule, BaseTest, RuleExecutionError\n'), ((3560, 3588), 'ConditionAnalzer.ConditionAnalyzer', 'ConditionAnalyzer', (['node.cond'], {}), '(node.cond)\n', (3577, 3588), False, 'from ConditionAnalzer import ConditionAnalyzer\n'), ((3703, 3734), 'clinter.NodeParenter.NodeParenter.get_instance', 'NodeParenter.get_instance', (['node'], {}), '(node)\n', (3728, 3734), False, 'from clinter.NodeParenter import NodeParenter\n')]
|
from tbraille import tbraille
def test_rendering():
# test with default raster font
expected_result = '⢠⡄⠀⠀⠀⠀⠀⠀⠀⢠⡄\n⢹⡏⣁⣾⠽⠆⠺⢯⣅⢹⡏⣁\n⠀⠉⠁⠈⠉⠁⠉⠉⠁⠀⠉⠁\n\n⣰⣆⡀⢀⣀⠀⢀⣀⡀⣰⣆⡀\n⠸⣇⡤⢿⣚⡃⣙⣳⡦⠸⣇⡤' # noqa
result = tbraille(fontname=None, size=None, text='test\ntest')
print(result)
assert result == expected_result
|
[
"tbraille.tbraille"
] |
[((205, 258), 'tbraille.tbraille', 'tbraille', ([], {'fontname': 'None', 'size': 'None', 'text': '"""test\ntest"""'}), "(fontname=None, size=None, text='test\\ntest')\n", (213, 258), False, 'from tbraille import tbraille\n')]
|
# (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
import json
import os
import pytest
import yaml
from pyaviso import logger, user_config
from pyaviso.authentication import auth
from pyaviso.engine import engine_factory as ef
from pyaviso.event_listeners import event_listener_factory as elf
@pytest.fixture()
def conf() -> user_config.UserConfig: # this automatically configure the logging
c = user_config.UserConfig(conf_path="tests/config.yaml")
return c
@pytest.fixture()
def schema(conf):
# Load test schema
with open("tests/unit/fixtures/listener_schema.json") as schema:
return json.load(schema)
def test_empty_file(conf: user_config.UserConfig, schema):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the notification listener factory
authenticator = auth.Auth.get_auth(conf)
engine_factory: ef.EngineFactory = ef.EngineFactory(conf.notification_engine, authenticator)
listener_factory = elf.EventListenerFactory(engine_factory, schema)
# open the listener yaml file
with open("tests/unit/fixtures/bad_listeners/empty.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
try:
listener_factory.create_listeners(listeners_dict)
except AssertionError as e:
assert e.args[0] == "Event listeners definition cannot be empty"
def test_no_listeners(conf: user_config.UserConfig, schema):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the notification listener factory
authenticator = auth.Auth.get_auth(conf)
engine_factory: ef.EngineFactory = ef.EngineFactory(conf.notification_engine, authenticator)
listener_factory = elf.EventListenerFactory(engine_factory, schema)
# open the listener yaml file
with open("tests/unit/fixtures/bad_listeners/noListeners.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
try:
listener_factory.create_listeners(listeners_dict)
except AssertionError as e:
assert e.args[0] == "Event listeners definition must start with the keyword 'listeners'"
def test_bad_tree_structure(conf: user_config.UserConfig, schema):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the notification listener factory
authenticator = auth.Auth.get_auth(conf)
engine_factory: ef.EngineFactory = ef.EngineFactory(conf.notification_engine, authenticator)
listener_factory = elf.EventListenerFactory(engine_factory, schema)
# open the listener yaml file
with open("tests/unit/fixtures/bad_listeners/badTree.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
try:
listener_factory.create_listeners(listeners_dict)
except AssertionError as e:
assert e.args[0] == "Wrong file structure"
def test_bad_attribute(conf: user_config.UserConfig, schema):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the notification listener factory
authenticator = auth.Auth.get_auth(conf)
engine_factory: ef.EngineFactory = ef.EngineFactory(conf.notification_engine, authenticator)
listener_factory = elf.EventListenerFactory(engine_factory, schema)
# open the listener yaml file
with open("tests/unit/fixtures/bad_listeners/badAttribute.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
try:
listener_factory.create_listeners(listeners_dict)
except AssertionError as e:
assert e.args[0] == "Key day is not allowed"
def test_bad_format(conf: user_config.UserConfig, schema):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the notification listener factory
authenticator = auth.Auth.get_auth(conf)
engine_factory: ef.EngineFactory = ef.EngineFactory(conf.notification_engine, authenticator)
listener_factory = elf.EventListenerFactory(engine_factory, schema)
# open the listener yaml file
with open("tests/unit/fixtures/bad_listeners/badFormat.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
try:
listener_factory.create_listeners(listeners_dict)
except ValueError as e:
assert e.args[0] == "Value 2021-01-01 is not valid for key date"
def test_no_trigger(conf: user_config.UserConfig, schema):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the notification listener factory
authenticator = auth.Auth.get_auth(conf)
engine_factory: ef.EngineFactory = ef.EngineFactory(conf.notification_engine, authenticator)
listener_factory = elf.EventListenerFactory(engine_factory, schema)
# open the listener yaml file
with open("tests/unit/fixtures/bad_listeners/noTrigger.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
try:
listener_factory.create_listeners(listeners_dict)
except AssertionError as e:
assert e.args[0] == "At least one trigger must be defined"
def test_bad_trigger_type(conf: user_config.UserConfig, schema):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the notification listener factory
authenticator = auth.Auth.get_auth(conf)
engine_factory: ef.EngineFactory = ef.EngineFactory(conf.notification_engine, authenticator)
listener_factory = elf.EventListenerFactory(engine_factory, schema)
# open the listener yaml file
with open("tests/unit/fixtures/bad_listeners/badTriggerType.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
try:
listener_factory.create_listeners(listeners_dict)
except KeyError as e:
assert e.args[0] == "Trigger type logger not recognised"
def test_bad_trigger(conf: user_config.UserConfig, schema):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the notification listener factory
authenticator = auth.Auth.get_auth(conf)
engine_factory: ef.EngineFactory = ef.EngineFactory(conf.notification_engine, authenticator)
listener_factory = elf.EventListenerFactory(engine_factory, schema)
# open the listener yaml file
with open("tests/unit/fixtures/bad_listeners/badTrigger.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
try:
listener_factory.create_listeners(listeners_dict)
except AssertionError as e:
assert e.args[0] == "'type' is a mandatory field in trigger"
def test_single_listener_complete(conf: user_config.UserConfig, schema):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the notification listener factory
authenticator = auth.Auth.get_auth(conf)
engine_factory: ef.EngineFactory = ef.EngineFactory(conf.notification_engine, authenticator)
listener_factory = elf.EventListenerFactory(engine_factory, schema)
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/complete_flight_listener.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 1
listener = listeners.pop()
assert listener.keys is not None
assert listener.keys[0] # this will fail if the path was an empty string
def test_single_listener(conf: user_config.UserConfig, schema):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the notification listener factory
authenticator = auth.Auth.get_auth(conf)
engine_factory: ef.EngineFactory = ef.EngineFactory(conf.notification_engine, authenticator)
listener_factory = elf.EventListenerFactory(engine_factory, schema)
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/basic_flight_listener.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 1
listener = listeners.pop()
assert len(listener.keys) == 2
assert listener.keys[0] == "/tmp/aviso/flight/Italy/"
def test_multiple_listener(conf: user_config.UserConfig, schema):
logger.debug(os.environ.get("PYTEST_CURRENT_TEST").split(":")[-1].split(" ")[0])
# create the notification listener factory
authenticator = auth.Auth.get_auth(conf)
engine_factory: ef.EngineFactory = ef.EngineFactory(conf.notification_engine, authenticator)
listener_factory = elf.EventListenerFactory(engine_factory, schema)
# open the listener yaml file
with open("tests/unit/fixtures/good_listeners/multiple_flight_listeners.yaml", "r") as f:
listeners_dict = yaml.safe_load(f.read())
# parse it
listeners: list = listener_factory.create_listeners(listeners_dict)
assert listeners.__len__() == 3
for listener in listeners:
assert listener.keys is not None
assert listener.keys[0] # this will fail if the path was an empty string
|
[
"pyaviso.authentication.auth.Auth.get_auth",
"pyaviso.event_listeners.event_listener_factory.EventListenerFactory",
"json.load",
"pytest.fixture",
"os.environ.get",
"pyaviso.user_config.UserConfig",
"pyaviso.engine.engine_factory.EngineFactory"
] |
[((626, 642), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (640, 642), False, 'import pytest\n'), ((803, 819), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (817, 819), False, 'import pytest\n'), ((733, 786), 'pyaviso.user_config.UserConfig', 'user_config.UserConfig', ([], {'conf_path': '"""tests/config.yaml"""'}), "(conf_path='tests/config.yaml')\n", (755, 786), False, 'from pyaviso import logger, user_config\n'), ((1176, 1200), 'pyaviso.authentication.auth.Auth.get_auth', 'auth.Auth.get_auth', (['conf'], {}), '(conf)\n', (1194, 1200), False, 'from pyaviso.authentication import auth\n'), ((1240, 1297), 'pyaviso.engine.engine_factory.EngineFactory', 'ef.EngineFactory', (['conf.notification_engine', 'authenticator'], {}), '(conf.notification_engine, authenticator)\n', (1256, 1297), True, 'from pyaviso.engine import engine_factory as ef\n'), ((1321, 1369), 'pyaviso.event_listeners.event_listener_factory.EventListenerFactory', 'elf.EventListenerFactory', (['engine_factory', 'schema'], {}), '(engine_factory, schema)\n', (1345, 1369), True, 'from pyaviso.event_listeners import event_listener_factory as elf\n'), ((1929, 1953), 'pyaviso.authentication.auth.Auth.get_auth', 'auth.Auth.get_auth', (['conf'], {}), '(conf)\n', (1947, 1953), False, 'from pyaviso.authentication import auth\n'), ((1993, 2050), 'pyaviso.engine.engine_factory.EngineFactory', 'ef.EngineFactory', (['conf.notification_engine', 'authenticator'], {}), '(conf.notification_engine, authenticator)\n', (2009, 2050), True, 'from pyaviso.engine import engine_factory as ef\n'), ((2074, 2122), 'pyaviso.event_listeners.event_listener_factory.EventListenerFactory', 'elf.EventListenerFactory', (['engine_factory', 'schema'], {}), '(engine_factory, schema)\n', (2098, 2122), True, 'from pyaviso.event_listeners import event_listener_factory as elf\n'), ((2718, 2742), 'pyaviso.authentication.auth.Auth.get_auth', 'auth.Auth.get_auth', (['conf'], {}), '(conf)\n', (2736, 2742), False, 'from pyaviso.authentication import auth\n'), ((2782, 2839), 'pyaviso.engine.engine_factory.EngineFactory', 'ef.EngineFactory', (['conf.notification_engine', 'authenticator'], {}), '(conf.notification_engine, authenticator)\n', (2798, 2839), True, 'from pyaviso.engine import engine_factory as ef\n'), ((2863, 2911), 'pyaviso.event_listeners.event_listener_factory.EventListenerFactory', 'elf.EventListenerFactory', (['engine_factory', 'schema'], {}), '(engine_factory, schema)\n', (2887, 2911), True, 'from pyaviso.event_listeners import event_listener_factory as elf\n'), ((3452, 3476), 'pyaviso.authentication.auth.Auth.get_auth', 'auth.Auth.get_auth', (['conf'], {}), '(conf)\n', (3470, 3476), False, 'from pyaviso.authentication import auth\n'), ((3516, 3573), 'pyaviso.engine.engine_factory.EngineFactory', 'ef.EngineFactory', (['conf.notification_engine', 'authenticator'], {}), '(conf.notification_engine, authenticator)\n', (3532, 3573), True, 'from pyaviso.engine import engine_factory as ef\n'), ((3597, 3645), 'pyaviso.event_listeners.event_listener_factory.EventListenerFactory', 'elf.EventListenerFactory', (['engine_factory', 'schema'], {}), '(engine_factory, schema)\n', (3621, 3645), True, 'from pyaviso.event_listeners import event_listener_factory as elf\n'), ((4190, 4214), 'pyaviso.authentication.auth.Auth.get_auth', 'auth.Auth.get_auth', (['conf'], {}), '(conf)\n', (4208, 4214), False, 'from pyaviso.authentication import auth\n'), ((4254, 4311), 'pyaviso.engine.engine_factory.EngineFactory', 'ef.EngineFactory', (['conf.notification_engine', 'authenticator'], {}), '(conf.notification_engine, authenticator)\n', (4270, 4311), True, 'from pyaviso.engine import engine_factory as ef\n'), ((4335, 4383), 'pyaviso.event_listeners.event_listener_factory.EventListenerFactory', 'elf.EventListenerFactory', (['engine_factory', 'schema'], {}), '(engine_factory, schema)\n', (4359, 4383), True, 'from pyaviso.event_listeners import event_listener_factory as elf\n'), ((4941, 4965), 'pyaviso.authentication.auth.Auth.get_auth', 'auth.Auth.get_auth', (['conf'], {}), '(conf)\n', (4959, 4965), False, 'from pyaviso.authentication import auth\n'), ((5005, 5062), 'pyaviso.engine.engine_factory.EngineFactory', 'ef.EngineFactory', (['conf.notification_engine', 'authenticator'], {}), '(conf.notification_engine, authenticator)\n', (5021, 5062), True, 'from pyaviso.engine import engine_factory as ef\n'), ((5086, 5134), 'pyaviso.event_listeners.event_listener_factory.EventListenerFactory', 'elf.EventListenerFactory', (['engine_factory', 'schema'], {}), '(engine_factory, schema)\n', (5110, 5134), True, 'from pyaviso.event_listeners import event_listener_factory as elf\n'), ((5696, 5720), 'pyaviso.authentication.auth.Auth.get_auth', 'auth.Auth.get_auth', (['conf'], {}), '(conf)\n', (5714, 5720), False, 'from pyaviso.authentication import auth\n'), ((5760, 5817), 'pyaviso.engine.engine_factory.EngineFactory', 'ef.EngineFactory', (['conf.notification_engine', 'authenticator'], {}), '(conf.notification_engine, authenticator)\n', (5776, 5817), True, 'from pyaviso.engine import engine_factory as ef\n'), ((5841, 5889), 'pyaviso.event_listeners.event_listener_factory.EventListenerFactory', 'elf.EventListenerFactory', (['engine_factory', 'schema'], {}), '(engine_factory, schema)\n', (5865, 5889), True, 'from pyaviso.event_listeners import event_listener_factory as elf\n'), ((6443, 6467), 'pyaviso.authentication.auth.Auth.get_auth', 'auth.Auth.get_auth', (['conf'], {}), '(conf)\n', (6461, 6467), False, 'from pyaviso.authentication import auth\n'), ((6507, 6564), 'pyaviso.engine.engine_factory.EngineFactory', 'ef.EngineFactory', (['conf.notification_engine', 'authenticator'], {}), '(conf.notification_engine, authenticator)\n', (6523, 6564), True, 'from pyaviso.engine import engine_factory as ef\n'), ((6588, 6636), 'pyaviso.event_listeners.event_listener_factory.EventListenerFactory', 'elf.EventListenerFactory', (['engine_factory', 'schema'], {}), '(engine_factory, schema)\n', (6612, 6636), True, 'from pyaviso.event_listeners import event_listener_factory as elf\n'), ((7209, 7233), 'pyaviso.authentication.auth.Auth.get_auth', 'auth.Auth.get_auth', (['conf'], {}), '(conf)\n', (7227, 7233), False, 'from pyaviso.authentication import auth\n'), ((7273, 7330), 'pyaviso.engine.engine_factory.EngineFactory', 'ef.EngineFactory', (['conf.notification_engine', 'authenticator'], {}), '(conf.notification_engine, authenticator)\n', (7289, 7330), True, 'from pyaviso.engine import engine_factory as ef\n'), ((7354, 7402), 'pyaviso.event_listeners.event_listener_factory.EventListenerFactory', 'elf.EventListenerFactory', (['engine_factory', 'schema'], {}), '(engine_factory, schema)\n', (7378, 7402), True, 'from pyaviso.event_listeners import event_listener_factory as elf\n'), ((8067, 8091), 'pyaviso.authentication.auth.Auth.get_auth', 'auth.Auth.get_auth', (['conf'], {}), '(conf)\n', (8085, 8091), False, 'from pyaviso.authentication import auth\n'), ((8131, 8188), 'pyaviso.engine.engine_factory.EngineFactory', 'ef.EngineFactory', (['conf.notification_engine', 'authenticator'], {}), '(conf.notification_engine, authenticator)\n', (8147, 8188), True, 'from pyaviso.engine import engine_factory as ef\n'), ((8212, 8260), 'pyaviso.event_listeners.event_listener_factory.EventListenerFactory', 'elf.EventListenerFactory', (['engine_factory', 'schema'], {}), '(engine_factory, schema)\n', (8236, 8260), True, 'from pyaviso.event_listeners import event_listener_factory as elf\n'), ((8902, 8926), 'pyaviso.authentication.auth.Auth.get_auth', 'auth.Auth.get_auth', (['conf'], {}), '(conf)\n', (8920, 8926), False, 'from pyaviso.authentication import auth\n'), ((8966, 9023), 'pyaviso.engine.engine_factory.EngineFactory', 'ef.EngineFactory', (['conf.notification_engine', 'authenticator'], {}), '(conf.notification_engine, authenticator)\n', (8982, 9023), True, 'from pyaviso.engine import engine_factory as ef\n'), ((9047, 9095), 'pyaviso.event_listeners.event_listener_factory.EventListenerFactory', 'elf.EventListenerFactory', (['engine_factory', 'schema'], {}), '(engine_factory, schema)\n', (9071, 9095), True, 'from pyaviso.event_listeners import event_listener_factory as elf\n'), ((945, 962), 'json.load', 'json.load', (['schema'], {}), '(schema)\n', (954, 962), False, 'import json\n'), ((1041, 1078), 'os.environ.get', 'os.environ.get', (['"""PYTEST_CURRENT_TEST"""'], {}), "('PYTEST_CURRENT_TEST')\n", (1055, 1078), False, 'import os\n'), ((1794, 1831), 'os.environ.get', 'os.environ.get', (['"""PYTEST_CURRENT_TEST"""'], {}), "('PYTEST_CURRENT_TEST')\n", (1808, 1831), False, 'import os\n'), ((2583, 2620), 'os.environ.get', 'os.environ.get', (['"""PYTEST_CURRENT_TEST"""'], {}), "('PYTEST_CURRENT_TEST')\n", (2597, 2620), False, 'import os\n'), ((3317, 3354), 'os.environ.get', 'os.environ.get', (['"""PYTEST_CURRENT_TEST"""'], {}), "('PYTEST_CURRENT_TEST')\n", (3331, 3354), False, 'import os\n'), ((4055, 4092), 'os.environ.get', 'os.environ.get', (['"""PYTEST_CURRENT_TEST"""'], {}), "('PYTEST_CURRENT_TEST')\n", (4069, 4092), False, 'import os\n'), ((4806, 4843), 'os.environ.get', 'os.environ.get', (['"""PYTEST_CURRENT_TEST"""'], {}), "('PYTEST_CURRENT_TEST')\n", (4820, 4843), False, 'import os\n'), ((5561, 5598), 'os.environ.get', 'os.environ.get', (['"""PYTEST_CURRENT_TEST"""'], {}), "('PYTEST_CURRENT_TEST')\n", (5575, 5598), False, 'import os\n'), ((6308, 6345), 'os.environ.get', 'os.environ.get', (['"""PYTEST_CURRENT_TEST"""'], {}), "('PYTEST_CURRENT_TEST')\n", (6322, 6345), False, 'import os\n'), ((7074, 7111), 'os.environ.get', 'os.environ.get', (['"""PYTEST_CURRENT_TEST"""'], {}), "('PYTEST_CURRENT_TEST')\n", (7088, 7111), False, 'import os\n'), ((7932, 7969), 'os.environ.get', 'os.environ.get', (['"""PYTEST_CURRENT_TEST"""'], {}), "('PYTEST_CURRENT_TEST')\n", (7946, 7969), False, 'import os\n'), ((8767, 8804), 'os.environ.get', 'os.environ.get', (['"""PYTEST_CURRENT_TEST"""'], {}), "('PYTEST_CURRENT_TEST')\n", (8781, 8804), False, 'import os\n')]
|
#!/usr/bin/env python
"""
This script takes a FASTA file generated by QIIME's split_libraries_fastq.py
(with original FASTQ reads generated by the ART read simulator) and creates
an OTU map using the sequence IDs storing the read identifier and the parent
reference sequence.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, <NAME>
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import sys
import getopt
import hashlib
import re
from skbio.parse.sequences import parse_fasta
def parse_fasta_to_otumap(input_fasta_fp, output_otu_fp):
""" Parse input FASTA file (having QIIME's label format)
and output OTU map
"""
# dict to store OTU ids and corresponding reads
otu_map = {}
# parse seq_id and ref_id from FASTA file
with open(input_fasta_fp, 'U') as fasta_fp:
for label, seq in parse_fasta(fasta_fp):
seq_id = re.split(' ', label)[0]
ref_id = re.split('-| ', label)[1]
if ref_id in otu_map:
otu_map[ref_id].append(seq_id)
else:
otu_map[ref_id] = [seq_id]
# write the map to out file
with open(output_otu_fp, 'w') as otu_map_fp:
for key, value in otu_map.iteritems():
otu_map_fp.write(key + '\t')
for x in value:
otu_map_fp.write(x + '\t')
otu_map_fp.write('\n')
def main(argv):
parse_fasta_to_otumap(input_fasta_fp=sys.argv[1],
output_otu_fp=sys.argv[2])
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"skbio.parse.sequences.parse_fasta",
"re.split"
] |
[((1055, 1076), 'skbio.parse.sequences.parse_fasta', 'parse_fasta', (['fasta_fp'], {}), '(fasta_fp)\n', (1066, 1076), False, 'from skbio.parse.sequences import parse_fasta\n'), ((1099, 1119), 're.split', 're.split', (['""" """', 'label'], {}), "(' ', label)\n", (1107, 1119), False, 'import re\n'), ((1144, 1166), 're.split', 're.split', (['"""-| """', 'label'], {}), "('-| ', label)\n", (1152, 1166), False, 'import re\n')]
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import (
ChatViewSet,
ChatMessageViewSet,
ChatMembersViewSet,
)
router = DefaultRouter()
router.register('', ChatViewSet, basename='chat')
router.register(r'(?P<chat_pk>\w+)/messages', ChatMessageViewSet, basename='message')
router.register(r'(?P<chat_pk>\w+)/members', ChatMembersViewSet, basename='member')
urlpatterns = [
path('', include(router.urls)),
]
|
[
"rest_framework.routers.DefaultRouter",
"django.urls.include"
] |
[((186, 201), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (199, 201), False, 'from rest_framework.routers import DefaultRouter\n'), ((453, 473), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (460, 473), False, 'from django.urls import path, include\n')]
|
# coding: utf-8
import json
import os
import boto3
import pandas as pd
def main():
price_list = []
pricing = boto3.client('pricing', region_name='us-east-1')
paginator = pricing.get_paginator('get_products')
response_iterator = paginator.paginate(
ServiceCode='AmazonEC2',
Filters = [
{'Type': 'TERM_MATCH', 'Field': 'operatingSystem', 'Value': 'Linux'},
{'Type': 'TERM_MATCH', 'Field': 'termType', 'Value': 'OnDemand'},
{'Type': 'TERM_MATCH', 'Field': 'tenancy', 'Value': 'Shared'},
{'Type': 'TERM_MATCH', 'Field': 'preInstalledSw', 'Value': 'NA'},
{'Type': 'TERM_MATCH', 'Field': 'capacitystatus', 'Value': 'Used'},
]
)
for page in response_iterator:
for price in page['PriceList']:
price_list.append(json.loads(price))
price_summary_list = []
df = pd.DataFrame()
for price in price_list:
attributes = dict()
for k,v in price['product']['attributes'].items():
attributes[k] = v
attributes['sku'] = price['product']['sku']
for k1,v1 in price['terms']['OnDemand'].items():
for k2,v2 in v1['priceDimensions'].items():
attributes['OnDemandPrices'] = v2['pricePerUnit']['USD']
attributes['OnDemandPricesUnit'] = v2['unit']
attributes['OnDemandPriceDescription'] = v2['description']
price_summary_list.append(attributes)
ds = pd.Series(attributes)
df = df.append(ds, ignore_index=True)
df.to_csv('ec2-price.csv')
if __name__=='__main__':
main()
|
[
"pandas.DataFrame",
"pandas.Series",
"boto3.client",
"json.loads"
] |
[((120, 168), 'boto3.client', 'boto3.client', (['"""pricing"""'], {'region_name': '"""us-east-1"""'}), "('pricing', region_name='us-east-1')\n", (132, 168), False, 'import boto3\n'), ((892, 906), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (904, 906), True, 'import pandas as pd\n'), ((1488, 1509), 'pandas.Series', 'pd.Series', (['attributes'], {}), '(attributes)\n', (1497, 1509), True, 'import pandas as pd\n'), ((835, 852), 'json.loads', 'json.loads', (['price'], {}), '(price)\n', (845, 852), False, 'import json\n')]
|
from PyQt5.QtWidgets import QTreeView, QHeaderView, QStyledItemDelegate, QStyleOptionViewItem
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QImage
from PyQt5.QtCore import Qt
class StandardImageItem(QStandardItem):
path_visible = './icons/visibility/visible.png'
path_invisible = './icons/visibility/invisible.png'
def __init__(self, visible=True):
super().__init__()
self.visible = visible
self.set_image()
self.setEditable(False)
def set_image(self):
if self.visible:
image = QImage(self.path_visible)
else:
image = QImage(self.path_invisible)
self.setData(image, Qt.DecorationRole)
def toggle_visibility(self):
self.visible = not self.visible
self.set_image()
class IconCenterDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(IconCenterDelegate, self).initStyleOption(option, index)
option.decorationAlignment = (Qt.AlignHCenter | Qt.AlignBottom)
option.decorationPosition = QStyleOptionViewItem.Top
class ShapesTree(QTreeView):
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.model = QStandardItemModel()
self.setModel(self.model)
self.headers = ['Shape', 'Visible', 'Unique ID', 'Parent ID', 'Primal Shape ID', 'Axis System']
self.model.setHorizontalHeaderLabels(self.headers)
self.header().setDefaultAlignment(Qt.AlignCenter)
self.header().setStretchLastSection(False)
self.header().setSectionResizeMode(0, QHeaderView.Stretch)
self.setColumnWidth(1, 60)
self.hideColumn(2)
self.hideColumn(3)
self.hideColumn(4)
self.hideColumn(5)
self.expandAll()
delegate = IconCenterDelegate(self)
self.setItemDelegateForColumn(1, delegate)
self.doubleClicked.connect(self.handle_double_click)
def get_selected_row(self):
data = {}
self.setColumnHidden(2, False)
self.setColumnHidden(3, False)
indices = self.selectedIndexes()
for i, index in enumerate(indices):
item = self.model.itemFromIndex(index)
data[self.headers[i]] = item.data(0)
self.hideColumn(2)
self.hideColumn(3)
return data
def get_item_by_value(self, column, value):
def check_children(parent):
for i in range(parent.rowCount()):
child = parent.child(i, 0)
if parent.child(i, column).data(0) == str(value):
return child
else:
item = check_children(child)
if item:
return item
root = self.model.invisibleRootItem()
return check_children(root)
def create_new_uid(self):
def check_children(parent):
nonlocal uid
for i in range(parent.rowCount()):
child = parent.child(i, 0)
child_uid = int(parent.child(i, 2).data(0))
if child_uid > uid:
uid = child_uid
check_children(child)
root = self.model.invisibleRootItem()
uid = 0
check_children(root)
return uid + 1
def get_item_by_id(self, uid):
return self.get_item_by_value(2, uid)
def append_child_to_selected_row(self, data):
unique_id = self.create_new_uid()
data["Unique ID"] = str(unique_id)
indices = self.selectedIndexes()
parent = self.model.itemFromIndex(indices[0])
pid_index = indices[0].siblingAtColumn(2)
parent_id = self.model.itemFromIndex(pid_index).data(0)
data["Parent ID"] = str(parent_id)
self.append_data(parent, data)
def append_new_data(self, parent_id, data, axis_system=False, primal_child=0):
data["Parent ID"] = str(parent_id)
unique_id = self.create_new_uid()
data["Unique ID"] = str(unique_id)
if parent_id != 0:
parent = self.get_item_by_id(parent_id)
else:
parent = self.model.invisibleRootItem()
data["Primal ID"] = unique_id + primal_child
data["Axis System"] = axis_system
self.append_data(parent, data)
return unique_id
@staticmethod
def append_data(parent, data):
parent.appendRow([
QStandardItem(data['Shape']),
StandardImageItem(data['Visible']),
QStandardItem(str(data['Unique ID'])),
QStandardItem(str(data['Parent ID'])),
QStandardItem(str(data['Primal ID'])),
QStandardItem(str(data['Axis System']))
])
def append_shape(self, shape, axis_data=None):
unique_ids = []
if shape == "point":
unique_ids.append(self.append_new_data(0, {'Shape': 'Point', 'Visible': True}))
elif shape == "line":
unique_ids.append(self.append_new_data(0, {'Shape': 'Line', 'Visible': True}))
elif shape == "polyline":
unique_ids.append(self.append_new_data(0, {'Shape': 'Polyline', 'Visible': True}))
elif shape == "bezier":
self.append_new_data(0, {'Shape': 'Bezier', 'Visible': True}, primal_child=2)
last_id = self.create_new_uid() - 1
unique_ids.append(self.append_new_data(last_id, {'Shape': 'Control Polyline', 'Visible': True}))
unique_ids.append(self.append_new_data(last_id, {'Shape': 'Spline', 'Visible': True}))
elif shape == "comp_quad_bezier":
self.append_new_data(0, {'Shape': 'Quadratic Bezier', 'Visible': True}, primal_child=2)
last_id = self.create_new_uid() - 1
unique_ids.append(self.append_new_data(last_id, {'Shape': 'Control Polyline', 'Visible': True}))
unique_ids.append(self.append_new_data(last_id, {'Shape': 'Spline', 'Visible': True}))
elif shape == "comp_cub_bezier":
self.append_new_data(0, {'Shape': 'Cubic Bezier', 'Visible': True}, primal_child=2)
last_id = self.create_new_uid() - 1
unique_ids.append(self.append_new_data(last_id, {'Shape': 'Control Polyline', 'Visible': True}))
unique_ids.append(self.append_new_data(last_id, {'Shape': 'Spline', 'Visible': True}))
elif shape == "axis_system":
num_of_points = axis_data["points"]
num_of_levels = axis_data["levels"]
unique_ids.append(self.append_new_data(0, {'Shape': 'Axis System', 'Visible': True}, axis_system=True))
last_id = self.create_new_uid() - 1
for i in range(num_of_points):
unique_ids.append(self.append_new_data(last_id, {'Shape': 'Point {}'.format(str(i)), 'Visible': True}))
for i in range(num_of_levels // 2):
unique_ids.append(self.append_new_data(last_id, {'Shape': 'Level X{}'.format(str(i)), 'Visible': True}))
for i in range(num_of_levels // 2):
unique_ids.append(self.append_new_data(last_id, {'Shape': 'Level Y{}'.format(str(i)), 'Visible': True}))
return unique_ids
def handle_double_click(self, item_index):
item_class = type(item_index.model().itemFromIndex(item_index)).__name__
if item_class == "StandardImageItem":
item_index.model().itemFromIndex(item_index).toggle_visibility()
children_count = self.model.itemFromIndex(self.currentIndex().siblingAtColumn(0)).rowCount()
if children_count == 0:
tree_id = int(self.model.itemFromIndex(self.currentIndex().siblingAtColumn(2)).data(0))
self.parent.canvas.toggle_visibility(tree_id)
for i in range(children_count):
tree_id = int(self.model.itemFromIndex(self.currentIndex().siblingAtColumn(0)).child(i, 2).data(0))
self.parent.canvas.toggle_visibility(tree_id)
def get_shapes(self):
root = self.model.invisibleRootItem()
return [{'Name': root.child(i, 0).data(0),
'id': int(root.child(i, 4).data(0))} for i in range(root.rowCount()) if
root.child(i, 5).data(0) == 'False']
def get_axis_systems(self):
root = self.model.invisibleRootItem()
return [{'Name': root.child(i, 0).data(0),
'id': int(root.child(i, 2).data(0))} for i in range(root.rowCount()) if
root.child(i, 5).data(0) == 'True']
|
[
"PyQt5.QtGui.QStandardItem",
"PyQt5.QtGui.QStandardItemModel",
"PyQt5.QtGui.QImage"
] |
[((1244, 1264), 'PyQt5.QtGui.QStandardItemModel', 'QStandardItemModel', ([], {}), '()\n', (1262, 1264), False, 'from PyQt5.QtGui import QStandardItemModel, QStandardItem, QImage\n'), ((563, 588), 'PyQt5.QtGui.QImage', 'QImage', (['self.path_visible'], {}), '(self.path_visible)\n', (569, 588), False, 'from PyQt5.QtGui import QStandardItemModel, QStandardItem, QImage\n'), ((623, 650), 'PyQt5.QtGui.QImage', 'QImage', (['self.path_invisible'], {}), '(self.path_invisible)\n', (629, 650), False, 'from PyQt5.QtGui import QStandardItemModel, QStandardItem, QImage\n'), ((4438, 4466), 'PyQt5.QtGui.QStandardItem', 'QStandardItem', (["data['Shape']"], {}), "(data['Shape'])\n", (4451, 4466), False, 'from PyQt5.QtGui import QStandardItemModel, QStandardItem, QImage\n')]
|
# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import unittest
from keras import Input, Model
from keras.layers import Conv2D, Conv2DTranspose
import model_compression_toolkit as mct
from model_compression_toolkit import QuantizationConfig, QuantizationErrorMethod
from model_compression_toolkit import CoreConfig
from model_compression_toolkit.core.common.bias_correction.compute_bias_correction_of_graph import \
compute_bias_correction_of_graph
from model_compression_toolkit.core.common.constants import RANGE_MIN, RANGE_MAX
from model_compression_toolkit.core.common.mixed_precision.bit_width_setter import set_bit_widths
from model_compression_toolkit.core.common.post_training_quantization import _quantize_fixed_bit_widths_graph
from model_compression_toolkit.core.common.quantization.quantization_analyzer import analyzer_graph
from model_compression_toolkit.core.common.quantization.quantization_params_generation.qparams_computation import \
calculate_quantization_params
from model_compression_toolkit.core.common.quantization.set_node_quantization_config import \
set_quantization_configuration_to_graph
from model_compression_toolkit.core.common.model_collector import ModelCollector
from model_compression_toolkit.core.tpc_models.keras_tp_models.keras_default import generate_keras_default_tpc
from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
def get_random_weights(kernel, in_channels, out_channels):
return np.random.normal(size=[kernel, kernel, in_channels, out_channels])
def create_network():
num_conv_channels = 4
kernel = 3
conv_w1 = get_random_weights(kernel, num_conv_channels, num_conv_channels)
conv_w2 = get_random_weights(kernel, num_conv_channels, num_conv_channels)
inputs = Input(shape=(16, 16, num_conv_channels))
x = Conv2D(num_conv_channels, kernel, use_bias=False)(inputs)
outputs = Conv2DTranspose(num_conv_channels, kernel, use_bias=False)(x)
model = Model(inputs=inputs, outputs=outputs)
model.layers[1].set_weights([conv_w1])
model.layers[2].set_weights([conv_w2])
return model
class TestUniformRangeSelectionWeights(unittest.TestCase):
def test_per_channel_weights_uniform_range_selection_no_clipping(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.NOCLIPPING)
def test_weights_uniform_range_selection_no_clipping(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.NOCLIPPING, per_channel=False)
def test_per_channel_weights_uniform_range_selection_mse(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.MSE)
def test_weights_uniform_range_selection_mse(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.MSE, per_channel=False)
def test_per_channel_weights_uniform_range_selection_mae(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.MAE)
def test_weights_uniform_range_selection_mae(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.MAE, per_channel=False)
def test_per_channel_weights_uniform_range_selection_lp(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.LP)
def test_weights_uniform_range_selection_lp(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.LP, per_channel=False)
def test_per_channel_weights_uniform_range_selection_kl(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.KL)
def test_weights_uniform_range_selection_kl(self):
self.run_test_for_threshold_method(QuantizationErrorMethod.KL, per_channel=False)
def run_test_for_threshold_method(self, threshold_method, per_channel=True):
qc = QuantizationConfig(weights_error_method=threshold_method,
weights_per_channel_threshold=per_channel)
core_config = CoreConfig(n_iter=1, quantization_config=qc)
tp = generate_test_tp_model({
'weights_quantization_method': mct.target_platform.QuantizationMethod.UNIFORM})
tpc = generate_keras_default_tpc(name="uniform_range_selection_test", tp_model=tp)
fw_info = DEFAULT_KERAS_INFO
in_model = create_network()
keras_impl = KerasImplementation()
graph = keras_impl.model_reader(in_model, None) # model reading
graph.set_tpc(tpc)
graph.set_fw_info(fw_info)
graph = set_quantization_configuration_to_graph(graph=graph,
quant_config=core_config.quantization_config,
mixed_precision_enable=core_config.mixed_precision_enable)
for node in graph.nodes:
node.prior_info = keras_impl.get_node_prior_info(node=node,
fw_info=fw_info,
graph=graph)
analyzer_graph(keras_impl.attach_sc_to_node,
graph,
fw_info)
mi = ModelCollector(graph,
fw_info=DEFAULT_KERAS_INFO,
fw_impl=keras_impl)
for i in range(10):
mi.infer([np.random.randn(1, 16, 16, 4)])
calculate_quantization_params(graph,
fw_info,
fw_impl=keras_impl)
tg = compute_bias_correction_of_graph(graph,
fw_info,
keras_impl)
tg = set_bit_widths(core_config.mixed_precision_enable,
tg,
None)
quantized_model, user_info = _quantize_fixed_bit_widths_graph(False,
fw_info,
None,
lambda: [np.random.randn(1, 16, 16, 4)],
None,
tg,
keras_impl)
nodes_list = list(graph.nodes)
conv1_min = nodes_list[0].candidates_quantization_cfg[0].weights_quantization_cfg.weights_quantization_params[RANGE_MIN].flatten()
conv2_min = nodes_list[1].candidates_quantization_cfg[0].weights_quantization_cfg.weights_quantization_params[RANGE_MIN].flatten()
conv1_max = nodes_list[0].candidates_quantization_cfg[0].weights_quantization_cfg.weights_quantization_params[RANGE_MAX].flatten()
conv2_max = nodes_list[1].candidates_quantization_cfg[0].weights_quantization_cfg.weights_quantization_params[RANGE_MAX].flatten()
for range_min, range_max in list(zip(conv1_min, conv1_max)):
self.assertTrue(range_min <= 0 <= range_max,
msg=f"First conv layer quantization range ({range_min}, {range_max}) does not include 0")
for range_min, range_max in list(zip(conv2_min, conv2_max)):
self.assertTrue(range_min <= 0 <= range_max,
msg=f"First conv layer quantization range ({range_min}, {range_max}) does not include 0")
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"keras.Input",
"model_compression_toolkit.core.tpc_models.keras_tp_models.keras_default.generate_keras_default_tpc",
"keras.Model",
"model_compression_toolkit.core.common.quantization.quantization_analyzer.analyzer_graph",
"model_compression_toolkit.core.common.mixed_precision.bit_width_setter.set_bit_widths",
"numpy.random.randn",
"model_compression_toolkit.core.keras.keras_implementation.KerasImplementation",
"model_compression_toolkit.core.common.quantization.quantization_params_generation.qparams_computation.calculate_quantization_params",
"model_compression_toolkit.QuantizationConfig",
"keras.layers.Conv2DTranspose",
"model_compression_toolkit.CoreConfig",
"model_compression_toolkit.core.common.quantization.set_node_quantization_config.set_quantization_configuration_to_graph",
"model_compression_toolkit.core.common.model_collector.ModelCollector",
"model_compression_toolkit.core.common.bias_correction.compute_bias_correction_of_graph.compute_bias_correction_of_graph",
"keras.layers.Conv2D",
"numpy.random.normal",
"tests.common_tests.helpers.generate_test_tp_model.generate_test_tp_model"
] |
[((2331, 2397), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[kernel, kernel, in_channels, out_channels]'}), '(size=[kernel, kernel, in_channels, out_channels])\n', (2347, 2397), True, 'import numpy as np\n'), ((2635, 2675), 'keras.Input', 'Input', ([], {'shape': '(16, 16, num_conv_channels)'}), '(shape=(16, 16, num_conv_channels))\n', (2640, 2675), False, 'from keras import Input, Model\n'), ((2830, 2867), 'keras.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), '(inputs=inputs, outputs=outputs)\n', (2835, 2867), False, 'from keras import Input, Model\n'), ((8292, 8307), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8305, 8307), False, 'import unittest\n'), ((2684, 2733), 'keras.layers.Conv2D', 'Conv2D', (['num_conv_channels', 'kernel'], {'use_bias': '(False)'}), '(num_conv_channels, kernel, use_bias=False)\n', (2690, 2733), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((2756, 2814), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['num_conv_channels', 'kernel'], {'use_bias': '(False)'}), '(num_conv_channels, kernel, use_bias=False)\n', (2771, 2814), False, 'from keras.layers import Conv2D, Conv2DTranspose\n'), ((4595, 4699), 'model_compression_toolkit.QuantizationConfig', 'QuantizationConfig', ([], {'weights_error_method': 'threshold_method', 'weights_per_channel_threshold': 'per_channel'}), '(weights_error_method=threshold_method,\n weights_per_channel_threshold=per_channel)\n', (4613, 4699), False, 'from model_compression_toolkit import QuantizationConfig, QuantizationErrorMethod\n'), ((4750, 4794), 'model_compression_toolkit.CoreConfig', 'CoreConfig', ([], {'n_iter': '(1)', 'quantization_config': 'qc'}), '(n_iter=1, quantization_config=qc)\n', (4760, 4794), False, 'from model_compression_toolkit import CoreConfig\n'), ((4809, 4917), 'tests.common_tests.helpers.generate_test_tp_model.generate_test_tp_model', 'generate_test_tp_model', (["{'weights_quantization_method': mct.target_platform.QuantizationMethod.UNIFORM}"], {}), "({'weights_quantization_method': mct.target_platform.\n QuantizationMethod.UNIFORM})\n", (4831, 4917), False, 'from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model\n'), ((4940, 5016), 'model_compression_toolkit.core.tpc_models.keras_tp_models.keras_default.generate_keras_default_tpc', 'generate_keras_default_tpc', ([], {'name': '"""uniform_range_selection_test"""', 'tp_model': 'tp'}), "(name='uniform_range_selection_test', tp_model=tp)\n", (4966, 5016), False, 'from model_compression_toolkit.core.tpc_models.keras_tp_models.keras_default import generate_keras_default_tpc\n'), ((5112, 5133), 'model_compression_toolkit.core.keras.keras_implementation.KerasImplementation', 'KerasImplementation', ([], {}), '()\n', (5131, 5133), False, 'from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation\n'), ((5285, 5452), 'model_compression_toolkit.core.common.quantization.set_node_quantization_config.set_quantization_configuration_to_graph', 'set_quantization_configuration_to_graph', ([], {'graph': 'graph', 'quant_config': 'core_config.quantization_config', 'mixed_precision_enable': 'core_config.mixed_precision_enable'}), '(graph=graph, quant_config=\n core_config.quantization_config, mixed_precision_enable=core_config.\n mixed_precision_enable)\n', (5324, 5452), False, 'from model_compression_toolkit.core.common.quantization.set_node_quantization_config import set_quantization_configuration_to_graph\n'), ((5820, 5880), 'model_compression_toolkit.core.common.quantization.quantization_analyzer.analyzer_graph', 'analyzer_graph', (['keras_impl.attach_sc_to_node', 'graph', 'fw_info'], {}), '(keras_impl.attach_sc_to_node, graph, fw_info)\n', (5834, 5880), False, 'from model_compression_toolkit.core.common.quantization.quantization_analyzer import analyzer_graph\n'), ((5941, 6010), 'model_compression_toolkit.core.common.model_collector.ModelCollector', 'ModelCollector', (['graph'], {'fw_info': 'DEFAULT_KERAS_INFO', 'fw_impl': 'keras_impl'}), '(graph, fw_info=DEFAULT_KERAS_INFO, fw_impl=keras_impl)\n', (5955, 6010), False, 'from model_compression_toolkit.core.common.model_collector import ModelCollector\n'), ((6159, 6224), 'model_compression_toolkit.core.common.quantization.quantization_params_generation.qparams_computation.calculate_quantization_params', 'calculate_quantization_params', (['graph', 'fw_info'], {'fw_impl': 'keras_impl'}), '(graph, fw_info, fw_impl=keras_impl)\n', (6188, 6224), False, 'from model_compression_toolkit.core.common.quantization.quantization_params_generation.qparams_computation import calculate_quantization_params\n'), ((6315, 6375), 'model_compression_toolkit.core.common.bias_correction.compute_bias_correction_of_graph.compute_bias_correction_of_graph', 'compute_bias_correction_of_graph', (['graph', 'fw_info', 'keras_impl'], {}), '(graph, fw_info, keras_impl)\n', (6347, 6375), False, 'from model_compression_toolkit.core.common.bias_correction.compute_bias_correction_of_graph import compute_bias_correction_of_graph\n'), ((6481, 6541), 'model_compression_toolkit.core.common.mixed_precision.bit_width_setter.set_bit_widths', 'set_bit_widths', (['core_config.mixed_precision_enable', 'tg', 'None'], {}), '(core_config.mixed_precision_enable, tg, None)\n', (6495, 6541), False, 'from model_compression_toolkit.core.common.mixed_precision.bit_width_setter import set_bit_widths\n'), ((6118, 6147), 'numpy.random.randn', 'np.random.randn', (['(1)', '(16)', '(16)', '(4)'], {}), '(1, 16, 16, 4)\n', (6133, 6147), True, 'import numpy as np\n'), ((6910, 6939), 'numpy.random.randn', 'np.random.randn', (['(1)', '(16)', '(16)', '(4)'], {}), '(1, 16, 16, 4)\n', (6925, 6939), True, 'import numpy as np\n')]
|
from bluebottle.bluebottle_drf2.permissions import IsAuthorOrReadOnly
from bluebottle.bluebottle_drf2.views import RetrieveUpdateDeleteAPIView
from bluebottle.bluebottle_utils.utils import get_client_ip
from apps.projects.permissions import IsProjectOwnerOrReadOnly
from apps.tasks.models import Task, TaskMember, TaskFile, Skill
from apps.tasks.permissions import IsTaskAuthorOrReadOnly
from apps.tasks.serializers import TaskSerializer, TaskMemberSerializer, TaskWallPostSerializer, TaskFileSerializer, TaskPreviewSerializer, SkillSerializer
from apps.wallposts.models import WallPost
from django.contrib.contenttypes.models import ContentType
from django.db.models.query_utils import Q
from rest_framework import generics
from rest_framework.generics import ListCreateAPIView
from rest_framework.permissions import IsAuthenticatedOrReadOnly
class TaskPreviewList(generics.ListAPIView):
model = Task
serializer_class = TaskPreviewSerializer
paginate_by = 8
filter_fields = ('status', 'skill', )
def get_queryset(self):
qs = super(TaskPreviewList, self).get_queryset()
project_slug = self.request.QUERY_PARAMS.get('project', None)
if project_slug:
qs = qs.filter(project__slug=project_slug)
text = self.request.QUERY_PARAMS.get('text', None)
if text:
qs = qs.filter(Q(title__icontains=text) |
Q(description__icontains=text) |
Q(end_goal__icontains=text))
ordering = self.request.QUERY_PARAMS.get('ordering', None)
if ordering == 'newest':
qs = qs.order_by('-created')
elif ordering == 'deadline':
qs = qs.order_by('deadline')
qs = qs.exclude(status=Task.TaskStatuses.closed)
return qs
class TaskList(generics.ListCreateAPIView):
model = Task
serializer_class = TaskSerializer
paginate_by = 8
permission_classes = (IsProjectOwnerOrReadOnly,)
filter_fields = ('status', 'expertise', )
def get_queryset(self):
qs = super(TaskList, self).get_queryset()
project_slug = self.request.QUERY_PARAMS.get('project', None)
if project_slug:
qs = qs.filter(project__slug=project_slug)
text = self.request.QUERY_PARAMS.get('text', None)
if text:
qs = qs.filter(Q(title__icontains=text) |
Q(description__icontains=text) |
Q(end_goal__icontains=text))
ordering = self.request.QUERY_PARAMS.get('ordering', None)
if ordering == 'newest':
qs = qs.order_by('-created')
elif ordering == 'deadline':
qs = qs.order_by('deadline')
qs = qs.exclude(status=Task.TaskStatuses.closed)
return qs
def pre_save(self, obj):
obj.author = self.request.user
class TaskDetail(generics.RetrieveUpdateAPIView):
model = Task
permission_classes = (IsAuthorOrReadOnly, )
serializer_class = TaskSerializer
class TaskWallPostMixin(object):
def get_queryset(self):
queryset = super(TaskWallPostMixin, self).get_queryset()
task_type = ContentType.objects.get_for_model(Task)
queryset = queryset.filter(content_type=task_type)
task_id = self.request.QUERY_PARAMS.get('task', None)
if task_id:
queryset = queryset.filter(object_id=task_id)
queryset = queryset.order_by("-created")
return queryset
def pre_save(self, obj):
# task_id = self.request.QUERY_PARAMS.get('task', None)
# task = Task.objects.get(pk=task_id)
# obj.content_object = task
task_type = ContentType.objects.get_for_model(Task)
obj.content_type_id = task_type.id
if not obj.author:
obj.author = self.request.user
else:
obj.editor = self.request.user
obj.ip_address = get_client_ip(self.request)
class TaskWallPostList(TaskWallPostMixin, ListCreateAPIView):
model = WallPost
serializer_class = TaskWallPostSerializer
paginate_by = 4
class TaskWallPostDetail(TaskWallPostMixin, RetrieveUpdateDeleteAPIView):
model = WallPost
serializer_class = TaskWallPostSerializer
permission_classes = (IsAuthorOrReadOnly,)
class TaskMemberList(generics.ListCreateAPIView):
model = TaskMember
serializer_class = TaskMemberSerializer
paginate_by = 50
filter_fields = ('task', )
permission_classes = (IsAuthenticatedOrReadOnly, )
def pre_save(self, obj):
# When creating a task member it should always be by the request.user and have status 'applied'
obj.member = self.request.user
obj.status = TaskMember.TaskMemberStatuses.applied
class TaskMemberDetail(generics.RetrieveUpdateAPIView):
model = TaskMember
serializer_class = TaskMemberSerializer
permission_classes = (IsTaskAuthorOrReadOnly, )
class TaskFileList(generics.ListCreateAPIView):
model = TaskFile
serializer_class = TaskFileSerializer
paginate_by = 50
filter_fields = ('task', )
permission_classes = (IsAuthenticatedOrReadOnly, )
def pre_save(self, obj):
# When creating a task file the author should always be by the request.user
obj.author = self.request.user
class TaskFileDetail(generics.RetrieveUpdateAPIView):
model = TaskFile
serializer_class = TaskFileSerializer
permission_classes = (IsAuthorOrReadOnly, )
class SkillList(generics.ListAPIView):
model = Skill
serializer_class = SkillSerializer
|
[
"django.db.models.query_utils.Q",
"bluebottle.bluebottle_utils.utils.get_client_ip",
"django.contrib.contenttypes.models.ContentType.objects.get_for_model"
] |
[((3166, 3205), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['Task'], {}), '(Task)\n', (3199, 3205), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((3675, 3714), 'django.contrib.contenttypes.models.ContentType.objects.get_for_model', 'ContentType.objects.get_for_model', (['Task'], {}), '(Task)\n', (3708, 3714), False, 'from django.contrib.contenttypes.models import ContentType\n'), ((3911, 3938), 'bluebottle.bluebottle_utils.utils.get_client_ip', 'get_client_ip', (['self.request'], {}), '(self.request)\n', (3924, 3938), False, 'from bluebottle.bluebottle_utils.utils import get_client_ip\n'), ((1471, 1498), 'django.db.models.query_utils.Q', 'Q', ([], {'end_goal__icontains': 'text'}), '(end_goal__icontains=text)\n', (1472, 1498), False, 'from django.db.models.query_utils import Q\n'), ((2466, 2493), 'django.db.models.query_utils.Q', 'Q', ([], {'end_goal__icontains': 'text'}), '(end_goal__icontains=text)\n', (2467, 2493), False, 'from django.db.models.query_utils import Q\n'), ((1357, 1381), 'django.db.models.query_utils.Q', 'Q', ([], {'title__icontains': 'text'}), '(title__icontains=text)\n', (1358, 1381), False, 'from django.db.models.query_utils import Q\n'), ((1411, 1441), 'django.db.models.query_utils.Q', 'Q', ([], {'description__icontains': 'text'}), '(description__icontains=text)\n', (1412, 1441), False, 'from django.db.models.query_utils import Q\n'), ((2352, 2376), 'django.db.models.query_utils.Q', 'Q', ([], {'title__icontains': 'text'}), '(title__icontains=text)\n', (2353, 2376), False, 'from django.db.models.query_utils import Q\n'), ((2406, 2436), 'django.db.models.query_utils.Q', 'Q', ([], {'description__icontains': 'text'}), '(description__icontains=text)\n', (2407, 2436), False, 'from django.db.models.query_utils import Q\n')]
|
import os
import queue
from multiprocessing import Process, cpu_count
def search(paths, query_q, result_q):
lines = []
for path in paths:
lines.extend(l.strip() for l in open(path))
query = query_q.get()
while query:
result_q.put_([l for l in lines if query in l])
print(result_q.qsize())
query = query_q.get()
if __name__ == '__main__':
cpus = cpu_count()
pathnames = [f for f in os.listdir('.') if os.path.isfile(f)]
paths = [pathnames[i::cpus] for i in range(cpus)]
query_queue = [queue.Queue(maxsize=10) for p in range(cpus)]
result_queue = queue.Queue(maxsize=10)
search_procs = [
Process(target=search, args=(p, q, result_queue)) for p, q in zip(paths, query_queue)
]
for proc in search_procs:
proc.start()
for q in query_queue:
q.put('def')
q.put(None)
print(result_queue.qsize())
print(result_queue.qsize())
for r in range(cpus):
for match in result_queue.get():
print(match)
for proc in search_procs:
proc.join()
|
[
"queue.Queue",
"os.path.isfile",
"multiprocessing.Process",
"os.listdir",
"multiprocessing.cpu_count"
] |
[((401, 412), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (410, 412), False, 'from multiprocessing import Process, cpu_count\n'), ((618, 641), 'queue.Queue', 'queue.Queue', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (629, 641), False, 'import queue\n'), ((553, 576), 'queue.Queue', 'queue.Queue', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (564, 576), False, 'import queue\n'), ((672, 721), 'multiprocessing.Process', 'Process', ([], {'target': 'search', 'args': '(p, q, result_queue)'}), '(target=search, args=(p, q, result_queue))\n', (679, 721), False, 'from multiprocessing import Process, cpu_count\n'), ((441, 456), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (451, 456), False, 'import os\n'), ((460, 477), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (474, 477), False, 'import os\n')]
|
'''
kivybooth
=========
A photobooth application, intended to consist of a touch screen, a libgphoto2
compatible camera, and a printer connected via USB.
See README.md for details.
'''
import log, logging
# Libraries
from time import time
from kivy.app import App
from kivy.properties import NumericProperty, StringProperty, BooleanProperty,\
ListProperty
from kivy.clock import Clock
from kivy.animation import Animation
from kivy.uix.screenmanager import Screen
import kivysome
from kivy.config import Config
import os.path
# Modules
import camera
from res.fontawesome import *
logger = logging.getLogger("kb.main")
class KivyBoothApp(App):
index = NumericProperty(-1)
current_title = StringProperty()
time = NumericProperty(0)
show_sourcecode = BooleanProperty(False)
sourcecode = StringProperty()
screen_names = ListProperty([])
hierarchy = ListProperty([])
last_photo = "res/background.png"
def build(self):
# Late loading of application modules - otherwise, they would not find the app instance
import screens
import layout
self.title = 'KivyBooth'
self.screens = screens.load()
self.camera = camera.Camera()
self.root = layout.root
self.go_to_screen("idle")
Clock.schedule_interval(self._update_clock, 1 / 60.)
logger.info("Application started")
def on_pause(self):
return True
def on_resume(self):
pass
def go_to_screen(self, name, direction='left'):
logger.info("Going to screen '%s'", name)
sm = self.root.ids.sm
sm.switch_to(self.screens[name], direction=direction)
self.current_title = self.screens[name].name
def go_hierarchy_previous(self):
ahr = self.hierarchy
if len(ahr) == 1:
return
if ahr:
ahr.pop()
if ahr:
idx = ahr.pop()
self.go_screen(idx)
def _update_clock(self, dt):
self.time = time()
if __name__ == '__main__':
KivyBoothApp().run()
|
[
"kivy.properties.ListProperty",
"kivy.properties.BooleanProperty",
"kivy.properties.StringProperty",
"camera.Camera",
"time.time",
"screens.load",
"kivy.clock.Clock.schedule_interval",
"kivy.properties.NumericProperty",
"logging.getLogger"
] |
[((602, 630), 'logging.getLogger', 'logging.getLogger', (['"""kb.main"""'], {}), "('kb.main')\n", (619, 630), False, 'import log, logging\n'), ((671, 690), 'kivy.properties.NumericProperty', 'NumericProperty', (['(-1)'], {}), '(-1)\n', (686, 690), False, 'from kivy.properties import NumericProperty, StringProperty, BooleanProperty, ListProperty\n'), ((711, 727), 'kivy.properties.StringProperty', 'StringProperty', ([], {}), '()\n', (725, 727), False, 'from kivy.properties import NumericProperty, StringProperty, BooleanProperty, ListProperty\n'), ((739, 757), 'kivy.properties.NumericProperty', 'NumericProperty', (['(0)'], {}), '(0)\n', (754, 757), False, 'from kivy.properties import NumericProperty, StringProperty, BooleanProperty, ListProperty\n'), ((780, 802), 'kivy.properties.BooleanProperty', 'BooleanProperty', (['(False)'], {}), '(False)\n', (795, 802), False, 'from kivy.properties import NumericProperty, StringProperty, BooleanProperty, ListProperty\n'), ((820, 836), 'kivy.properties.StringProperty', 'StringProperty', ([], {}), '()\n', (834, 836), False, 'from kivy.properties import NumericProperty, StringProperty, BooleanProperty, ListProperty\n'), ((856, 872), 'kivy.properties.ListProperty', 'ListProperty', (['[]'], {}), '([])\n', (868, 872), False, 'from kivy.properties import NumericProperty, StringProperty, BooleanProperty, ListProperty\n'), ((889, 905), 'kivy.properties.ListProperty', 'ListProperty', (['[]'], {}), '([])\n', (901, 905), False, 'from kivy.properties import NumericProperty, StringProperty, BooleanProperty, ListProperty\n'), ((1163, 1177), 'screens.load', 'screens.load', ([], {}), '()\n', (1175, 1177), False, 'import screens\n'), ((1200, 1215), 'camera.Camera', 'camera.Camera', ([], {}), '()\n', (1213, 1215), False, 'import camera\n'), ((1290, 1343), 'kivy.clock.Clock.schedule_interval', 'Clock.schedule_interval', (['self._update_clock', '(1 / 60.0)'], {}), '(self._update_clock, 1 / 60.0)\n', (1313, 1343), False, 'from kivy.clock import Clock\n'), ((1998, 2004), 'time.time', 'time', ([], {}), '()\n', (2002, 2004), False, 'from time import time\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ambari_agent import main
main.MEMORY_LEAK_DEBUG_FILEPATH = "/tmp/memory_leak_debug.out"
import os
import unittest
import tempfile
from mock.mock import patch, MagicMock, call
from ambari_agent.AmbariConfig import AmbariConfig
from ambari_commons import shell
from ambari_commons.shell import shellRunner
from sys import platform as _platform
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
import subprocess, time
@not_for_platform(PLATFORM_WINDOWS)
class TestShell(unittest.TestCase):
@patch("os.setuid")
def test_changeUid(self, os_setUIDMock):
shell.threadLocal.uid = 9999
shell.changeUid()
self.assertTrue(os_setUIDMock.called)
@patch("pwd.getpwnam")
def test_shellRunner_run(self, getpwnamMock):
sh = shellRunner()
result = sh.run(['echo'])
self.assertEquals(result['exitCode'], 0)
self.assertEquals(result['error'], '')
getpwnamMock.return_value = [os.getuid(), os.getuid(), os.getuid()]
result = sh.run(['echo'], 'non_exist_user_name')
self.assertEquals(result['exitCode'], 0)
self.assertEquals(result['error'], '')
def test_kill_process_with_children(self):
if _platform == "linux" or _platform == "linux2": # Test is Linux-specific
sleep_cmd = "sleep 314"
test_cmd = """ (({0}) & ({0} & {0})) """.format(sleep_cmd)
# Starting process tree (multiple process groups)
test_process = subprocess.Popen(test_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
time.sleep(0.3) # Delay to allow subprocess to start
# Check if processes are running
ps_cmd = """ps aux """
ps_process = subprocess.Popen(ps_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
(out, err) = ps_process.communicate()
self.assertTrue(sleep_cmd in out)
# Kill test process
shell.kill_process_with_children(test_process.pid)
test_process.communicate()
# Now test process should not be running
ps_process = subprocess.Popen(ps_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
(out, err) = ps_process.communicate()
self.assertFalse(sleep_cmd in out)
else:
# Do not run under other systems
pass
|
[
"subprocess.Popen",
"mock.mock.patch",
"ambari_commons.shell.changeUid",
"only_for_platform.not_for_platform",
"time.sleep",
"ambari_commons.shell.kill_process_with_children",
"os.getuid",
"ambari_commons.shell.shellRunner"
] |
[((1250, 1284), 'only_for_platform.not_for_platform', 'not_for_platform', (['PLATFORM_WINDOWS'], {}), '(PLATFORM_WINDOWS)\n', (1266, 1284), False, 'from only_for_platform import not_for_platform, PLATFORM_WINDOWS\n'), ((1326, 1344), 'mock.mock.patch', 'patch', (['"""os.setuid"""'], {}), "('os.setuid')\n", (1331, 1344), False, 'from mock.mock import patch, MagicMock, call\n'), ((1490, 1511), 'mock.mock.patch', 'patch', (['"""pwd.getpwnam"""'], {}), "('pwd.getpwnam')\n", (1495, 1511), False, 'from mock.mock import patch, MagicMock, call\n'), ((1425, 1442), 'ambari_commons.shell.changeUid', 'shell.changeUid', ([], {}), '()\n', (1440, 1442), False, 'from ambari_commons import shell\n'), ((1569, 1582), 'ambari_commons.shell.shellRunner', 'shellRunner', ([], {}), '()\n', (1580, 1582), False, 'from ambari_commons.shell import shellRunner\n'), ((1735, 1746), 'os.getuid', 'os.getuid', ([], {}), '()\n', (1744, 1746), False, 'import os\n'), ((1748, 1759), 'os.getuid', 'os.getuid', ([], {}), '()\n', (1757, 1759), False, 'import os\n'), ((1761, 1772), 'os.getuid', 'os.getuid', ([], {}), '()\n', (1770, 1772), False, 'import os\n'), ((2212, 2302), 'subprocess.Popen', 'subprocess.Popen', (['test_cmd'], {'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '(test_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE,\n shell=True)\n', (2228, 2302), False, 'import subprocess, time\n'), ((2305, 2320), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2315, 2320), False, 'import subprocess, time\n'), ((2445, 2533), 'subprocess.Popen', 'subprocess.Popen', (['ps_cmd'], {'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '(ps_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE,\n shell=True)\n', (2461, 2533), False, 'import subprocess, time\n'), ((2646, 2696), 'ambari_commons.shell.kill_process_with_children', 'shell.kill_process_with_children', (['test_process.pid'], {}), '(test_process.pid)\n', (2678, 2696), False, 'from ambari_commons import shell\n'), ((2796, 2884), 'subprocess.Popen', 'subprocess.Popen', (['ps_cmd'], {'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '(ps_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE,\n shell=True)\n', (2812, 2884), False, 'import subprocess, time\n')]
|
import numpy as np
import copy
from pommerman import constants
from pommerman import utility
STEP_COUNT_POS = 0
DONE_POS = 1
AMMO_POS = 0
BLAST_STRENGTH_POS = 1
CAN_KICK_POS = 2
ALIVE_POS = 3
ROW_POS = 4
COL_POS = 5
class EnvSimulator:
@staticmethod
def get_initial_game_data(obs, my_id, max_steps=1000):
board_size = len(obs['board'])
game_data = EnvSimulator.get_board(board_size, obs['board'])
agent_0_pos = EnvSimulator.get_position(game_data, 0, True)
agent_1_pos = EnvSimulator.get_position(game_data, 1, True)
game_info = np.zeros((1, board_size)).astype(np.uint16)
game_info[0, STEP_COUNT_POS] = int(obs['step_count'])
game_info[0, DONE_POS] = 0
player1row = np.zeros((1, board_size)).astype(np.uint16)
player1row[0, AMMO_POS] = int(obs['ammo'])
player1row[0, BLAST_STRENGTH_POS] = int(obs['blast_strength'])
player1row[0, CAN_KICK_POS] = int(obs['can_kick'])
player1row[0, ALIVE_POS] = 1
player1row[0, ROW_POS] = agent_0_pos[0]
player1row[0, COL_POS] = agent_0_pos[1]
player2row = np.zeros((1, board_size)).astype(np.uint16)
player2row[0, AMMO_POS] = 1
player2row[0, BLAST_STRENGTH_POS] = constants.DEFAULT_BLAST_STRENGTH
player2row[0, CAN_KICK_POS] = False
player2row[0, ALIVE_POS] = 1
player2row[0, ROW_POS] = agent_1_pos[0]
player2row[0, COL_POS] = agent_1_pos[1]
bomb = np.zeros((1, board_size)).astype(np.uint16)
game_data = np.vstack([game_data, game_info, player1row, player2row])
return game_data
@staticmethod
def update(game_data, obs, my_id):
enemy_id = 0
if my_id == 0:
enemy_id = 1
step_count = EnvSimulator._get_game_value(game_data, STEP_COUNT_POS)
if game_data.shape[1] != len(obs['board']):
raise ValueError('Invalid update: boardsize different!')
if step_count + 1 != int(obs['step_count']) and (step_count != 0 or int(obs['step_count']) != 0):
raise ValueError('Invalid update: missed step count!')
EnvSimulator._set_game_value(game_data, STEP_COUNT_POS, obs['step_count'])
new_board = EnvSimulator._get_game_data_from_obs(obs)
new_board = EnvSimulator.get_board(game_data.shape[1], obs['board'])
new_bomb_life = EnvSimulator.get_board(game_data.shape[1], obs['bomb_life'], 0)
# get actions
actions = {}
for agent_id in [10, 11]:
old_pos = EnvSimulator.get_position(game_data, agent_id, True)
new_pos = EnvSimulator.get_position(new_board, agent_id + 10, True)
if old_pos != new_pos:
actions[agent_id] = EnvSimulator.get_direction(old_pos, new_pos).value
elif new_bomb_life[new_pos] == constants.DEFAULT_BOMB_LIFE:
actions[agent_id] = constants.Action.Bomb.value
else:
actions[agent_id] = constants.Action.Stop.value
EnvSimulator.act(game_data, actions)
reset = False
# compare boards
if not EnvSimulator.boards_equal(EnvSimulator.get_game_data_board(game_data), new_board, True):
a1bomb, a2bomb, kick, flame = EnvSimulator.get_boards_differences(
EnvSimulator.get_game_data_board(game_data), new_board)
#print(a1bomb, a2bomb, kick, flame)
if a1bomb and my_id != 0:
ammo = EnvSimulator._get_agent_value(game_data, 0, AMMO_POS)
EnvSimulator._set_agent_value(game_data, 0, AMMO_POS, ammo+1)
elif a2bomb and my_id != 1:
ammo = EnvSimulator._get_agent_value(game_data, 1, AMMO_POS)
EnvSimulator._set_agent_value(game_data, 1, AMMO_POS, ammo + 1)
elif kick and EnvSimulator._get_agent_value(game_data, my_id, CAN_KICK_POS) == int(obs['can_kick']):
EnvSimulator._set_agent_value(game_data, enemy_id, CAN_KICK_POS, 1)
elif flame and EnvSimulator._get_agent_value(game_data, my_id, BLAST_STRENGTH_POS) == int(obs['blast_strength']):
blast = EnvSimulator._get_agent_value(game_data, enemy_id, BLAST_STRENGTH_POS)
EnvSimulator._set_agent_value(game_data, enemy_id, BLAST_STRENGTH_POS, blast+1)
reset = True
EnvSimulator._set_agent_value(game_data, enemy_id, AMMO_POS, int(obs['ammo']))
EnvSimulator._set_agent_value(game_data, enemy_id, BLAST_STRENGTH_POS, int(obs['blast_strength']))
EnvSimulator._set_agent_value(game_data, enemy_id, CAN_KICK_POS, int(obs['can_kick']))
# update board because of items
game_data[0:game_data.shape[1], 0:game_data.shape[1]] = new_board
return game_data, actions, reset
@staticmethod
def _get_game_data_from_obs(obs):
board_size = len(obs['board'])
board = EnvSimulator.get_board(board_size, obs['board'])
blast_strength = obs['bomb_blast_strength']
bomb_life = obs['bomb_life']
for row in range(len(board)):
for col in range(len(board[0])):
if (board[row, col] == 10 or board[row, col] == 11) and blast_strength[row, col] > 0.0:
# agent over bomb
value = 10000 + (board[row, col]-7)*1000 + int(blast_strength[row, col])*10 + int(bomb_life[row, col])
board[row, col] = value
if board[row, col] == 3: # bomb
agent_id = 0
value = 10000 + (board[row, col]-7)*1000 + int(blast_strength[row, col])*10 + int(bomb_life[row, col])
return
@staticmethod
def get_game_data_board(game_data):
return game_data[0:game_data.shape[1], 0:game_data.shape[1]]
@staticmethod
def act(game_data, actions):
MIN_FIRE = 20
AGENT_0 = 10
AGENT_1 = 11
if EnvSimulator.get_done(game_data):
return
#print(game_data, actions)
# move objects
pos_agent0_prev = None
pos_agent0 = None
pos_agent1_prev = None
pos_agent1 = None
pos_bomb_prev = []
for row in range(game_data.shape[1]):
for col in range(game_data.shape[1]):
if EnvSimulator._is_fire(game_data, (row, col)):
game_data[row, col] -= 1
if game_data[row, col] == MIN_FIRE:
game_data[row, col] = 0
elif game_data[row, col] == AGENT_1 or game_data[row, col] >= 14000:
pos_agent1_prev = (row, col)
pos_agent1 = EnvSimulator.handle_agent_move(game_data, 1, row, col, actions[1])
elif game_data[row, col] == AGENT_0 or game_data[row, col] >= 13000:
pos_agent0_prev = (row, col)
pos_agent0 = EnvSimulator.handle_agent_move(game_data, 0, row, col, actions[0])
if game_data[row, col] >= 10000:
pos_bomb_prev.append((row, col))
if pos_agent0 == pos_agent1:
pos_agent0 = pos_agent0_prev
pos_agent1 = pos_agent1_prev
# move bombs
pos_bomb = []
change = False
invalid_values = [constants.Item.Rigid.value, constants.Item.Wood.value, constants.Item.Kick,
constants.Item.IncrRange, constants.Item.ExtraBomb]
for bomb_pos in pos_bomb_prev:
bomb = game_data[bomb_pos]
direction = int((bomb % 1000) / 100)
if direction == 0 and bomb_pos == pos_agent0:
if pos_agent0 != pos_agent0_prev: # kick bomb
direction = EnvSimulator.get_direction(pos_agent0_prev, pos_agent0).value
elif int((bomb % 10000) / 1000) != 1 and int((bomb % 10000) / 1000) != 3:
raise ValueError("Fatal Error")
elif direction == 0 and bomb_pos == pos_agent1:
if pos_agent1 != pos_agent1_prev: # kick bomb
direction = EnvSimulator.get_direction(pos_agent1_prev, pos_agent1).value
elif int((bomb % 10000) / 1000) != 2 and int((bomb % 10000) / 1000) != 4:
raise ValueError("Fatal Error")
new_bomb_pos = bomb_pos
if direction > 0:
change = True
row, col = bomb_pos
if EnvSimulator._is_valid_direction(game_data, row, col, direction, invalid_values):
new_bomb_pos = utility.get_next_position(bomb_pos, constants.Action(direction))
if (row, col) == pos_agent0 or (row, col) == pos_agent1:
new_bomb_pos = bomb_pos
pos_bomb.append(new_bomb_pos)
while change:
change = False
# bomb <-> bomb
for i in range(len(pos_bomb)):
pos = pos_bomb[i]
for j in range(len(pos_bomb)):
if i != j and pos == pos_bomb[j]:
pos_bomb[i] = pos_bomb_prev[i]
pos_bomb[j] = pos_bomb_prev[j]
change = True
if pos_bomb[i] == pos_agent0 and (pos_bomb[i] != pos_bomb_prev[i] or pos_agent0 != pos_agent0_prev):
pos_agent0 = pos_agent0_prev
pos_bomb[i] = pos_bomb_prev[i]
change = True
elif pos_bomb[i] == pos_agent1 and (pos_bomb[i] != pos_bomb_prev[i] or pos_agent1 != pos_agent1_prev):
pos_agent1 = pos_agent1_prev
pos_bomb[i] = pos_bomb_prev[i]
change = True
for i in range(len(pos_bomb)):
cur_value = game_data[pos_bomb_prev[i]]
life = int(cur_value % 10) - 1
if 20 < game_data[pos_bomb[i]] < 30:
life = 0
strength = int((cur_value % 100) / 10)
direction = EnvSimulator.get_direction(pos_bomb[i], pos_bomb_prev[i]).value
player = int((cur_value % 10000) / 1000)
if player > 2:
player -= 2
if pos_agent0 == pos_bomb[i] or pos_agent1 == pos_bomb[i]:
player += 2
game_data[pos_bomb_prev[i]] = 0
game_data[pos_bomb[i]] = 10000 + player * 1000 + direction * 100 + strength * 10 + life
# set agent
#print(pos_agent0, pos_agent1)
EnvSimulator._agent_collect(game_data, 0, pos_agent0)
EnvSimulator._agent_collect(game_data, 1, pos_agent1)
if pos_agent0_prev != pos_agent0:
if game_data[pos_agent0_prev] < 10000:
game_data[pos_agent0_prev] = 0
if EnvSimulator._is_fire(game_data, pos_agent0):
EnvSimulator._agent_died(game_data, 0)
else:
game_data[pos_agent0] = AGENT_0
if pos_agent1_prev != pos_agent1:
if game_data[pos_agent1_prev] < 10000:
game_data[pos_agent1_prev] = 0
if EnvSimulator._is_fire(game_data, pos_agent1):
EnvSimulator._agent_died(game_data, 1)
else:
game_data[pos_agent1] = AGENT_1
# fire bombs
fire = True
while fire:
fire = False
for bomb in pos_bomb:
bomb_value = game_data[bomb]
if int(bomb_value % 10) == 0:
strength = int((bomb_value % 100) / 10)
EnvSimulator._set_fire(game_data, bomb[0], bomb[1], True)
EnvSimulator._fire_bomb(game_data, bomb[0], bomb[1], 0, 1, strength - 1) # right
EnvSimulator._fire_bomb(game_data, bomb[0], bomb[1], 0, -1, strength - 1) # left
EnvSimulator._fire_bomb(game_data, bomb[0], bomb[1], 1, 0, strength - 1) # down
EnvSimulator._fire_bomb(game_data, bomb[0], bomb[1], -1, 0, strength - 1) # up
fire = True
#print('result: ', game_data)
@staticmethod
def handle_agent_move(game_data, agent_id, row, col, action):
if action == constants.Action.Stop.value:
return row, col
elif action == constants.Action.Bomb.value:
ammo = EnvSimulator._get_agent_value(game_data, agent_id, AMMO_POS)
if game_data[row, col] < 10000 and ammo > 0:
game_data[row, col] = 10009 + (agent_id + 3) * 1000 + EnvSimulator._get_agent_value(game_data, agent_id, BLAST_STRENGTH_POS) * 10
EnvSimulator._set_agent_value(game_data, agent_id, AMMO_POS, ammo-1)
return row, col
else:
invalid_values = [constants.Item.Rigid.value, constants.Item.Wood.value]
if EnvSimulator._is_valid_direction(game_data, row, col, action, invalid_values):
return utility.get_next_position((row, col), constants.Action(action))
else:
return row, col
@staticmethod
def _agent_collect(game_data, agent_id, pos):
item = game_data[pos]
if item == constants.Item.Kick.value:
EnvSimulator._set_agent_value(game_data, agent_id, CAN_KICK_POS, 1)
elif item == constants.Item.ExtraBomb.value:
cur_ammo = EnvSimulator._get_agent_value(game_data, agent_id, AMMO_POS)
EnvSimulator._set_agent_value(game_data, agent_id, AMMO_POS, cur_ammo + 1)
elif item == constants.Item.IncrRange.value:
cur_range = EnvSimulator._get_agent_value(game_data, agent_id, BLAST_STRENGTH_POS)
EnvSimulator._set_agent_value(game_data, agent_id, BLAST_STRENGTH_POS, cur_range + 1)
@staticmethod
def _position_on_board(game_data, row, col):
return all([game_data.shape[1] > row, game_data.shape[1] > col, row >= 0, col >= 0])
@staticmethod
def _is_fire(game_data, pos):
return 20 < game_data[pos] < 30
@staticmethod
def _fire_bomb(game_data, row, col, row_off, col_off, strength):
if strength <= 0:
return
next_row = row + row_off
next_col = col + col_off
if not EnvSimulator._position_on_board(game_data, next_row, next_col):
return
if utility.position_in_items(game_data, (next_row, next_col), [constants.Item.Rigid, constants.Item.Wood]):
return
EnvSimulator._set_fire(game_data, next_row, next_col, False)
EnvSimulator._fire_bomb(game_data, next_row, next_col, row_off, col_off, strength - 1)
@staticmethod
def _set_fire(game_data, row, col, first):
prev_value = game_data[row, col]
if prev_value > 14000 or prev_value == 11:
EnvSimulator._agent_died(game_data, 1)
if prev_value > 13000 or prev_value == 10:
EnvSimulator._agent_died(game_data, 0)
if not first and prev_value > 10000:
prev_value -= int(prev_value % 10)
else:
if first and prev_value > 10000:
# increase ammo
player = int((prev_value % 10000) / 1000)
if player == 1 or player == 3:
player = 0
else:
player = 1
ammo = EnvSimulator._get_agent_value(game_data, player, AMMO_POS)
EnvSimulator._set_agent_value(game_data, player, AMMO_POS, ammo+1)
game_data[row, col] = 22
@staticmethod
def _agent_died(game_data, agent_id):
EnvSimulator._set_agent_value(game_data, agent_id, ALIVE_POS, 0)
EnvSimulator._set_game_value(game_data, DONE_POS, 1)
@staticmethod
def _is_valid_direction(board, row, col, direction, invalid_values=None):
if invalid_values is None:
invalid_values = [item.value for item in [constants.Item.Rigid, constants.Item.Wood]]
if constants.Action(direction) == constants.Action.Stop:
return True
if constants.Action(direction) == constants.Action.Up:
return row - 1 >= 0 and board[row - 1][col] not in invalid_values
if constants.Action(direction) == constants.Action.Down:
return row + 1 < len(board) and board[row + 1][col] not in invalid_values
if constants.Action(direction) == constants.Action.Left:
return col - 1 >= 0 and board[row][col - 1] not in invalid_values
if constants.Action(direction) == constants.Action.Right:
return col + 1 < len(board[0]) and board[row][col + 1] not in invalid_values
raise constants.InvalidAction("We did not receive a valid direction: ", direction)
@staticmethod
def get_direction(position, next_position):
if position == next_position:
return constants.Action.Stop
x, y = position
next_x, next_y = next_position
if x == next_x:
if y < next_y:
return constants.Action.Right
else:
return constants.Action.Left
elif y == next_y:
if x < next_x:
return constants.Action.Down
else:
return constants.Action.Up
raise constants.InvalidAction(
"We did not receive a valid position transition.")
@staticmethod
def _get_agent_value(game_data, agent_id, value):
return game_data[game_data.shape[0] - 2 + agent_id, value]
@staticmethod
def _set_agent_value(game_data, agent_id, value, val):
game_data[game_data.shape[0] - 2 + agent_id, value] = val
@staticmethod
def _get_game_value(game_data, value):
return game_data[game_data.shape[0] - 3, value]
@staticmethod
def _set_game_value(game_data, value, val):
game_data[game_data.shape[0] - 3, value] = val
@staticmethod
def get_done(game_data):
return bool(EnvSimulator._get_game_value(game_data, DONE_POS))
@staticmethod
def get_alive(game_data):
alive = {0: bool(game_data[game_data.shape[0] - 2, ALIVE_POS]),
1: bool(game_data[game_data.shape[0] - 1, ALIVE_POS])}
return alive
@staticmethod
def get_board(board_size, board_array, init_value=constants.Item.Passage.value):
board = np.ones((board_size, board_size)).astype(np.uint16)
board *= init_value
for x in range(board_size):
for y in range(board_size):
board[x, y] = board_array[x][y]
return board
@staticmethod
def get_position(board, item, is_single_pos):
pos = np.where(board == item)
pos = list(zip(pos[0], pos[1]))
if is_single_pos:
if len(pos) != 1:
raise ValueError("Invalid pos count!")
return pos[0]
else:
return pos
@staticmethod
def get_valid_actions(board, flames, bombs, agent, actions):
return actions
@staticmethod
def boards_equal(board1, board2, ignore_items):
if ignore_items:
board1 = copy.deepcopy(board1)
board2 = copy.deepcopy(board2)
board1[board1 == constants.Item.ExtraBomb.value] = constants.Item.Passage.value
board1[board1 == constants.Item.IncrRange.value] = constants.Item.Passage.value
board1[board1 == constants.Item.Kick.value] = constants.Item.Passage.value
board2[board2 == constants.Item.ExtraBomb.value] = constants.Item.Passage.value
board2[board2 == constants.Item.IncrRange.value] = constants.Item.Passage.value
board2[board2 == constants.Item.Kick.value] = constants.Item.Passage.value
comparison = (board1 == board2)
return comparison.all()
@staticmethod
def get_boards_differences(board1, board2):
board1 = copy.deepcopy(board1)
board2 = copy.deepcopy(board2)
board1[board1 == constants.Item.ExtraBomb.value] = constants.Item.Passage.value
board1[board1 == constants.Item.IncrRange.value] = constants.Item.Passage.value
board1[board1 == constants.Item.Kick.value] = constants.Item.Passage.value
board2[board2 == constants.Item.ExtraBomb.value] = constants.Item.Passage.value
board2[board2 == constants.Item.IncrRange.value] = constants.Item.Passage.value
board2[board2 == constants.Item.Kick.value] = constants.Item.Passage.value
a1bomb = a2bomb = kick = flame = False
comparison = (board1 == board2)
diffs = np.where(comparison is False)
if len(diffs) >= 2:
diffs = list(zip(diffs[0], diffs[1]))
for diff in diffs:
prev_item = board1[diff]
new_item = board2[diff]
if prev_item is constants.Item.Agent1 and new_item is constants.Item.Bomb:
a1bomb = True
elif prev_item is constants.Item.Agent2 and new_item is constants.Item.Bomb:
a2bomb = True
elif prev_item is constants.Item.Passage and new_item is constants.Item.Bomb:
kick = True
elif new_item is constants.Item.Flames:
flame = True
else:
raise ValueError('Invalid difference between maps.')
else:
print(comparison, "diffs: ", diffs)
return a1bomb, a2bomb, kick, flame
@staticmethod
def get_game_state(game_data):
return game_data, EnvSimulator.get_done(game_data)
@staticmethod
def get_game_data(game_state):
return copy.deepcopy(game_state)
|
[
"copy.deepcopy",
"numpy.zeros",
"numpy.ones",
"numpy.where",
"pommerman.constants.InvalidAction",
"pommerman.constants.Action",
"pommerman.utility.position_in_items",
"numpy.vstack"
] |
[((1538, 1595), 'numpy.vstack', 'np.vstack', (['[game_data, game_info, player1row, player2row]'], {}), '([game_data, game_info, player1row, player2row])\n', (1547, 1595), True, 'import numpy as np\n'), ((14201, 14309), 'pommerman.utility.position_in_items', 'utility.position_in_items', (['game_data', '(next_row, next_col)', '[constants.Item.Rigid, constants.Item.Wood]'], {}), '(game_data, (next_row, next_col), [constants.Item.\n Rigid, constants.Item.Wood])\n', (14226, 14309), False, 'from pommerman import utility\n'), ((16500, 16576), 'pommerman.constants.InvalidAction', 'constants.InvalidAction', (['"""We did not receive a valid direction: """', 'direction'], {}), "('We did not receive a valid direction: ', direction)\n", (16523, 16576), False, 'from pommerman import constants\n'), ((17120, 17194), 'pommerman.constants.InvalidAction', 'constants.InvalidAction', (['"""We did not receive a valid position transition."""'], {}), "('We did not receive a valid position transition.')\n", (17143, 17194), False, 'from pommerman import constants\n'), ((18493, 18516), 'numpy.where', 'np.where', (['(board == item)'], {}), '(board == item)\n', (18501, 18516), True, 'import numpy as np\n'), ((19719, 19740), 'copy.deepcopy', 'copy.deepcopy', (['board1'], {}), '(board1)\n', (19732, 19740), False, 'import copy\n'), ((19758, 19779), 'copy.deepcopy', 'copy.deepcopy', (['board2'], {}), '(board2)\n', (19771, 19779), False, 'import copy\n'), ((20402, 20431), 'numpy.where', 'np.where', (['(comparison is False)'], {}), '(comparison is False)\n', (20410, 20431), True, 'import numpy as np\n'), ((21472, 21497), 'copy.deepcopy', 'copy.deepcopy', (['game_state'], {}), '(game_state)\n', (21485, 21497), False, 'import copy\n'), ((15813, 15840), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (15829, 15840), False, 'from pommerman import constants\n'), ((15903, 15930), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (15919, 15930), False, 'from pommerman import constants\n'), ((16045, 16072), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (16061, 16072), False, 'from pommerman import constants\n'), ((16197, 16224), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (16213, 16224), False, 'from pommerman import constants\n'), ((16341, 16368), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (16357, 16368), False, 'from pommerman import constants\n'), ((18955, 18976), 'copy.deepcopy', 'copy.deepcopy', (['board1'], {}), '(board1)\n', (18968, 18976), False, 'import copy\n'), ((18998, 19019), 'copy.deepcopy', 'copy.deepcopy', (['board2'], {}), '(board2)\n', (19011, 19019), False, 'import copy\n'), ((584, 609), 'numpy.zeros', 'np.zeros', (['(1, board_size)'], {}), '((1, board_size))\n', (592, 609), True, 'import numpy as np\n'), ((746, 771), 'numpy.zeros', 'np.zeros', (['(1, board_size)'], {}), '((1, board_size))\n', (754, 771), True, 'import numpy as np\n'), ((1125, 1150), 'numpy.zeros', 'np.zeros', (['(1, board_size)'], {}), '((1, board_size))\n', (1133, 1150), True, 'import numpy as np\n'), ((1474, 1499), 'numpy.zeros', 'np.zeros', (['(1, board_size)'], {}), '((1, board_size))\n', (1482, 1499), True, 'import numpy as np\n'), ((18185, 18218), 'numpy.ones', 'np.ones', (['(board_size, board_size)'], {}), '((board_size, board_size))\n', (18192, 18218), True, 'import numpy as np\n'), ((8550, 8577), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (8566, 8577), False, 'from pommerman import constants\n'), ((12868, 12892), 'pommerman.constants.Action', 'constants.Action', (['action'], {}), '(action)\n', (12884, 12892), False, 'from pommerman import constants\n')]
|
#!/usr/bin/env python3
# 测试http请求
import requests
import urllib
import httplib2
from yvhai.demo.base import YHDemo
headers = {
'User-Agent': ''
}
class HttpRequester(YHDemo):
@staticmethod
def get(url, params):
res1 = HttpRequester.get_by_r(url, params)
res2 = HttpRequester.get_by_urllib(url, params)
res3 = HttpRequester.get_by_httplib2(url, params)
return res2
# 调用requests, params为json/dict
@staticmethod
def get_by_r(url, params):
resp = requests.get(url, params=params)
res = (headers, content) = (resp.headers, resp.content)
return res
# 调用urllib, params内部转为string
@staticmethod
def get_by_urllib(url, params):
queryStr = urllib.parse.urlencode(params)
resp = urllib.request.urlopen(url + '?' + queryStr)
res = (headers, content) = (resp.headers, resp.read())
return res
# 调用httplib2
@staticmethod
def get_by_httplib2(url, params):
hObj = httplib2.Http(url)
res = (headers, content) = hObj.request(url, 'GET')
return res
@staticmethod
def demo(args=[]):
res = HttpRequester.get('https://baidu.com', {'ie': 'utf-8', 'tn': 'baidu', 'wd': 'hallo'})
print(res)
if __name__ == "__main__":
HttpRequester.demo()
|
[
"httplib2.Http",
"urllib.parse.urlencode",
"urllib.request.urlopen",
"requests.get"
] |
[((514, 546), 'requests.get', 'requests.get', (['url'], {'params': 'params'}), '(url, params=params)\n', (526, 546), False, 'import requests\n'), ((737, 767), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['params'], {}), '(params)\n', (759, 767), False, 'import urllib\n'), ((783, 827), 'urllib.request.urlopen', 'urllib.request.urlopen', (["(url + '?' + queryStr)"], {}), "(url + '?' + queryStr)\n", (805, 827), False, 'import urllib\n'), ((999, 1017), 'httplib2.Http', 'httplib2.Http', (['url'], {}), '(url)\n', (1012, 1017), False, 'import httplib2\n')]
|
#!/usr/bin/env python
import sys
import django
from django.conf import settings
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"bread",
]
settings.configure(
DEBUG=True,
USE_TZ=True,
USE_I18N=True,
DATABASES={"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}},
MIDDLEWARE_CLASSES=(),
SITE_ID=1,
INSTALLED_APPS=INSTALLED_APPS,
)
django.setup()
if __name__ == "__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"django.conf.settings.configure",
"django.setup",
"django.core.management.execute_from_command_line"
] |
[((208, 426), 'django.conf.settings.configure', 'settings.configure', ([], {'DEBUG': '(True)', 'USE_TZ': '(True)', 'USE_I18N': '(True)', 'DATABASES': "{'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:'}}", 'MIDDLEWARE_CLASSES': '()', 'SITE_ID': '(1)', 'INSTALLED_APPS': 'INSTALLED_APPS'}), "(DEBUG=True, USE_TZ=True, USE_I18N=True, DATABASES={\n 'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:'}\n }, MIDDLEWARE_CLASSES=(), SITE_ID=1, INSTALLED_APPS=INSTALLED_APPS)\n", (226, 426), False, 'from django.conf import settings\n'), ((449, 463), 'django.setup', 'django.setup', ([], {}), '()\n', (461, 463), False, 'import django\n'), ((562, 597), 'django.core.management.execute_from_command_line', 'execute_from_command_line', (['sys.argv'], {}), '(sys.argv)\n', (587, 597), False, 'from django.core.management import execute_from_command_line\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 30 16:33:05 2018
@author: lhe39759
"""
import keras
import os
import PIL
import numpy as np
import tensorflow as tf
import sys
sys.path.append(r'C:\Users\lhe39759\Documents\GitHub/')
from SliceOPy import NetSlice, DataSlice
from model.losses import bce_dice_loss, bce_dice_loss_jake, dice_loss, weighted_bce_dice_loss, weighted_dice_loss, dice_coeff
def loadImg():
path = "Patterns/"
patOptions = ["annealing_twins","Brass bronze","Ductile_Cast_Iron","Grey_Cast_Iron","hypoeutectoid_steel","malleable_cast_iron","superalloy"]
image_array = []
for folder in patOptions:
folder_array = []
for filename in os.listdir(path+folder+"/"):
if filename.endswith(".png"):
insertImage1 = np.asarray(PIL.Image.open(path+folder+"/"+filename).convert('L'))
insertImage1.setflags(write=1)
insertImage1 = np.pad(insertImage1, (300,300), 'symmetric')
folder_array.append(np.array(insertImage1[:256,:256]))
image_array.append(np.array(folder_array))
return (np.array(image_array))
def generateData():
images = loadImg()
features = []
labels = []
for folder in range(0,images.shape[0]):
for image in images[folder]:
features.append(image)
labels.append(folder)
return np.array(features),np.array(labels).reshape(len(labels),1)
features, labels = generateData()
#%%
#model = keras.Sequential()
#model.add(keras.layers.Conv2D(64, (3, 3), input_shape=(256,256,1),padding="same",data_format= keras.backend.image_data_format()))
#model.add(keras.layers.Activation('relu'))
#model.add(keras.layers.MaxPooling2D(pool_size=(2, 2),data_format= keras.backend.image_data_format()))
##
#model.add(keras.layers.Conv2D(32, (3, 3),padding="same",data_format= keras.backend.image_data_format()))
#
###
## model.add(keras.layers.Conv2D(64, (2, 2),data_format= K.image_data_format()))
## model.add(keras.layers.Activation('relu'))
## model.add(keras.layers.MaxPooling2D(pool_size=(2, 2),data_format= K.image_data_format()))
#model.add(keras.layers.Flatten()) # this converts our 3D feature maps to 1D feature vectors
#model.add(keras.layers.Dense(200))
#model.add(keras.layers.Activation('relu'))
## model.add(keras.layers.Dense(64))
## model.add(keras.layers.Activation('relu'))
## model.add(keras.layers.Dropout(0.25))
#model.add(keras.layers.Dense(7))
#model.add(keras.layers.Activation('sigmoid'))
#model.add(keras.layers.Softmax())
model = keras.Sequential([
keras.layers.Conv2D(32,(3, 3),input_shape=(256,256,1), activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(64, (2,2), activation='relu'),
# keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(32, activation=tf.nn.sigmoid),
keras.layers.Dense(16, activation=tf.nn.sigmoid),
keras.layers.Dense(7, activation=tf.nn.softmax)
])
print(features.shape,labels.shape)
data = DataSlice(Features = features, Labels = labels,Shuffle=True,Split_Ratio = 0.7,Channel_Features= (256,256))
data.oneHot(7)
print(data)
model = NetSlice(model,'keras', Data_Slice=data)
#model.loadModel('pattern_model',customObject={'dice_coeff':dice_coeff})
model.compileModel(keras.optimizers.Adam(lr=0.001), 'categorical_crossentropy', ['accuracy'])
model.trainModel(Epochs = 100,Batch_size=100, Verbose = 2)
model.saveModel("pattern_model")
|
[
"sys.path.append",
"numpy.pad",
"keras.layers.MaxPooling2D",
"keras.optimizers.Adam",
"keras.layers.Flatten",
"PIL.Image.open",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Conv2D",
"SliceOPy.NetSlice",
"SliceOPy.DataSlice",
"os.listdir"
] |
[((176, 234), 'sys.path.append', 'sys.path.append', (['"""C:\\\\Users\\\\lhe39759\\\\Documents\\\\GitHub/"""'], {}), "('C:\\\\Users\\\\lhe39759\\\\Documents\\\\GitHub/')\n", (191, 234), False, 'import sys\n'), ((3161, 3268), 'SliceOPy.DataSlice', 'DataSlice', ([], {'Features': 'features', 'Labels': 'labels', 'Shuffle': '(True)', 'Split_Ratio': '(0.7)', 'Channel_Features': '(256, 256)'}), '(Features=features, Labels=labels, Shuffle=True, Split_Ratio=0.7,\n Channel_Features=(256, 256))\n', (3170, 3268), False, 'from SliceOPy import NetSlice, DataSlice\n'), ((3303, 3344), 'SliceOPy.NetSlice', 'NetSlice', (['model', '"""keras"""'], {'Data_Slice': 'data'}), "(model, 'keras', Data_Slice=data)\n", (3311, 3344), False, 'from SliceOPy import NetSlice, DataSlice\n'), ((1126, 1147), 'numpy.array', 'np.array', (['image_array'], {}), '(image_array)\n', (1134, 1147), True, 'import numpy as np\n'), ((3436, 3467), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (3457, 3467), False, 'import keras\n'), ((700, 731), 'os.listdir', 'os.listdir', (["(path + folder + '/')"], {}), "(path + folder + '/')\n", (710, 731), False, 'import os\n'), ((1407, 1425), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (1415, 1425), True, 'import numpy as np\n'), ((2649, 2726), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(32)', '(3, 3)'], {'input_shape': '(256, 256, 1)', 'activation': '"""relu"""'}), "(32, (3, 3), input_shape=(256, 256, 1), activation='relu')\n", (2668, 2726), False, 'import keras\n'), ((2737, 2780), 'keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2762, 2780), False, 'import keras\n'), ((2795, 2845), 'keras.layers.Conv2D', 'keras.layers.Conv2D', (['(64)', '(2, 2)'], {'activation': '"""relu"""'}), "(64, (2, 2), activation='relu')\n", (2814, 2845), False, 'import keras\n'), ((2918, 2940), 'keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (2938, 2940), False, 'import keras\n'), ((2950, 2998), 'keras.layers.Dense', 'keras.layers.Dense', (['(32)'], {'activation': 'tf.nn.sigmoid'}), '(32, activation=tf.nn.sigmoid)\n', (2968, 2998), False, 'import keras\n'), ((3008, 3056), 'keras.layers.Dense', 'keras.layers.Dense', (['(16)'], {'activation': 'tf.nn.sigmoid'}), '(16, activation=tf.nn.sigmoid)\n', (3026, 3056), False, 'import keras\n'), ((3067, 3114), 'keras.layers.Dense', 'keras.layers.Dense', (['(7)'], {'activation': 'tf.nn.softmax'}), '(7, activation=tf.nn.softmax)\n', (3085, 3114), False, 'import keras\n'), ((1089, 1111), 'numpy.array', 'np.array', (['folder_array'], {}), '(folder_array)\n', (1097, 1111), True, 'import numpy as np\n'), ((946, 991), 'numpy.pad', 'np.pad', (['insertImage1', '(300, 300)', '"""symmetric"""'], {}), "(insertImage1, (300, 300), 'symmetric')\n", (952, 991), True, 'import numpy as np\n'), ((1426, 1442), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1434, 1442), True, 'import numpy as np\n'), ((1027, 1061), 'numpy.array', 'np.array', (['insertImage1[:256, :256]'], {}), '(insertImage1[:256, :256])\n', (1035, 1061), True, 'import numpy as np\n'), ((813, 859), 'PIL.Image.open', 'PIL.Image.open', (["(path + folder + '/' + filename)"], {}), "(path + folder + '/' + filename)\n", (827, 859), False, 'import PIL\n')]
|
import sklearn, re, nltk, base64, json, urllib2, os
import numpy as np
import cPickle as pickle
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
import os
MIN_RESULTS = 30 # Minimum number of results needed for valid user input
BASE_SEARCH_URL = 'https://api.twitter.com/1.1/search/tweets.json?'
class TweetMining(object):
def __init__(self, method = 'tf_idf_old'):
nltk.data.path.append('nltk_data/')
self.method = method
self.setup()
# Sets up Twitter API connection
def setup(self):
if os.path.isfile("config.py"):
config = {}
execfile("config.py", config)
consumer_key = config["consumer_key"]
consumer_secret = config["consumer_secret"]
elif os.path.isfile("project_template/config.py"):
config = {}
execfile("project_template/config.py", config)
consumer_key = config["consumer_key"]
consumer_secret = config["consumer_secret"]
else:
consumer_key = os.getenv('CONSUMER_KEY')
consumer_secret = os.getenv('CONSUMER_SECRET')
bearer_token = '%s:%s' % (consumer_key, consumer_secret)
bearer_token_64 = base64.b64encode(bearer_token)
token_request = urllib2.Request('https://api.twitter.com/oauth2/token')
token_request.add_header('Content-Type', 'application/x-www-form-urlencoded;charset=UTF-8')
token_request.add_header('Authorization', 'Basic %s' % bearer_token_64)
token_request.data = 'grant_type=client_credentials'
token_response = urllib2.urlopen(token_request)
token_contents = token_response.read()
token_data = json.loads(token_contents)
self.access_token = token_data['access_token']
if os.path.isfile("smaller_pho_dict.p"):
with open('smaller_pho_dict.p', 'rb') as handle:
self.dict = pickle.load(handle)
else:
with open('project_template/smaller_pho_dict.p', 'rb') as handle:
self.dict = pickle.load(handle)
if self.method == 'tf_idf_new':
if os.path.isfile("idf.pickle"):
with open('idf.pickle', 'rb') as handle:
self.idf = pickle.load(handle)
else:
with open('project_template/idf.pickle', 'rb') as handle:
self.idf = pickle.load(handle)
# Returns list of at most num_words topical words for the given hashtag_set
def get_topical_words(self, hashtag_set, num_words = 30):
hashtag_set = self.cleanup_tags(hashtag_set)
if self.method == 'tf_idf_old':
statuses = [t['text'] for t in self.get_tweets(hashtag_set, 100)]
if len(statuses) < MIN_RESULTS:
return []
self.process_tweets(statuses)
vect = TfidfVectorizer(min_df = 2, stop_words = 'english', strip_accents = 'ascii')
matrix = vect.fit_transform(statuses)
top_indices = np.argsort(vect.idf_)[::-1]
features = vect.get_feature_names()
return [features[i] for i in top_indices[:num_words]]
elif self.method == 'tf_idf_new':
statuses = [t['text'] for t in self.get_tweets(hashtag_set, 200 * len(hashtag_set))]
if len(statuses) < MIN_RESULTS:
return [], []
self.process_tweets(statuses, nouns_only = False)
getIDF = lambda word : self.idf[word] if word in self.idf else 0
vect = CountVectorizer(stop_words = 'english', strip_accents = 'ascii')
tf = vect.fit_transform([' '.join(statuses)]).toarray()
features = vect.get_feature_names()
idf_vals = np.array([np.log(1600000.0 / (1 + getIDF(word))) for word in features])
tfidf = np.multiply(tf, idf_vals)
top_indices = np.argsort(tfidf[0])[::-1]
max_tfidf = tfidf[0][top_indices[0]]
frequencies = [(features[i], 80 * (tfidf[0][i] / max_tfidf)) for i in top_indices[:40]]
top_words = [(word, max_tfidf * 1.01) for word in hashtag_set if word.upper() in self.dict and word not in features]
for i in top_indices:
word = features[i]
if not any(word in pair for pair in top_words) and word.upper() in self.dict:
top_words.append((word, tfidf[0][i]))
if len(top_words) == num_words:
break
return top_words, frequencies
else:
raise Exception('Error: Invalid method specified')
# Helper function for get_topical_words
# Cleans up hashtag list input by stripping hashtags if they exist
def cleanup_tags(self, hashtags):
return [h.strip(',').strip('#').strip() for h in hashtags]
# Helper function for get_topical_words
# Returns list of dicts; access "text" key to get status text
# hashtag_set is a list of hashtags to search for (don't include #)
def get_tweets(self, hashtag_set, num_tweets = 500):
num_queries = num_tweets / 100
extra_tweets = num_tweets % 100
base_query = BASE_SEARCH_URL + 'q='
for i in range(len(hashtag_set)):
base_query += '%23' + hashtag_set[i]
if i < len(hashtag_set) - 1:
base_query += '%20OR%20'
base_query += '&lang=en&result_type=recent&count=100'
def callAPI(query_url):
request = urllib2.Request(query_url)
request.add_header('Authorization', 'Bearer %s' % self.access_token)
response = urllib2.urlopen(request)
contents = response.read()
return json.loads(contents)
result = []
query = base_query
for q in range(num_queries):
statuses = callAPI(query)['statuses']
if statuses == []:
return []
result.extend(statuses)
minID = min([status['id'] for status in statuses])
query = base_query + '&max_id=' + str(minID)
if extra_tweets > 0 and not out_of_tweets:
query = re.sub(r'&count=\d+', '', query) + '&count=' + str(extra_tweets)
result.extend(callAPI(query)['statuses'])
return result
# Helper method for get_topical_words
# Processes statuses in-place by removing irrelevant components
def process_tweets(self, statuses, nouns_only = True):
for i in range(len(statuses)):
statuses[i] = re.sub(r'\S*/\S*', '', statuses[i]) # Links
statuses[i] = re.sub(r'htt\S*', '', statuses[i]) # Hanging https
statuses[i] = re.sub(r'#\S*', '', statuses[i]) # Hashtag symbols
statuses[i] = re.sub(r'(RT)*( )?@\S*', '', statuses[i]) # RT, @user
statuses[i] = re.sub(r'(RT |rt[^a-z])', '', statuses[i]) # RT/rt
statuses[i] = re.sub(r'\S*\d+\S*', '', statuses[i]) # Numerical
statuses[i] = re.sub(r"\w+'[^s ]+", '', statuses[i]) # Contractions
statuses[i] = re.sub(r'&\S+;', '', statuses[i]) # HTML entities
if nouns_only:
pos_info = nltk.pos_tag(nltk.word_tokenize(statuses[i]))
statuses[i] = ' '.join([word[0] for word in pos_info if 'NN' in word[1]])
|
[
"os.getenv",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.multiply",
"json.loads",
"sklearn.feature_extraction.text.TfidfVectorizer",
"urllib2.Request",
"nltk.data.path.append",
"cPickle.load",
"nltk.word_tokenize",
"numpy.argsort",
"os.path.isfile",
"base64.b64encode",
"re.sub",
"urllib2.urlopen"
] |
[((451, 486), 'nltk.data.path.append', 'nltk.data.path.append', (['"""nltk_data/"""'], {}), "('nltk_data/')\n", (472, 486), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((607, 634), 'os.path.isfile', 'os.path.isfile', (['"""config.py"""'], {}), "('config.py')\n", (621, 634), False, 'import os\n'), ((1274, 1304), 'base64.b64encode', 'base64.b64encode', (['bearer_token'], {}), '(bearer_token)\n', (1290, 1304), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((1330, 1385), 'urllib2.Request', 'urllib2.Request', (['"""https://api.twitter.com/oauth2/token"""'], {}), "('https://api.twitter.com/oauth2/token')\n", (1345, 1385), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((1653, 1683), 'urllib2.urlopen', 'urllib2.urlopen', (['token_request'], {}), '(token_request)\n', (1668, 1683), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((1752, 1778), 'json.loads', 'json.loads', (['token_contents'], {}), '(token_contents)\n', (1762, 1778), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((1846, 1882), 'os.path.isfile', 'os.path.isfile', (['"""smaller_pho_dict.p"""'], {}), "('smaller_pho_dict.p')\n", (1860, 1882), False, 'import os\n'), ((821, 865), 'os.path.isfile', 'os.path.isfile', (['"""project_template/config.py"""'], {}), "('project_template/config.py')\n", (835, 865), False, 'import os\n'), ((2190, 2218), 'os.path.isfile', 'os.path.isfile', (['"""idf.pickle"""'], {}), "('idf.pickle')\n", (2204, 2218), False, 'import os\n'), ((2917, 2987), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(2)', 'stop_words': '"""english"""', 'strip_accents': '"""ascii"""'}), "(min_df=2, stop_words='english', strip_accents='ascii')\n", (2932, 2987), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((5533, 5559), 'urllib2.Request', 'urllib2.Request', (['query_url'], {}), '(query_url)\n', (5548, 5559), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((5664, 5688), 'urllib2.urlopen', 'urllib2.urlopen', (['request'], {}), '(request)\n', (5679, 5688), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((5747, 5767), 'json.loads', 'json.loads', (['contents'], {}), '(contents)\n', (5757, 5767), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((6565, 6601), 're.sub', 're.sub', (['"""\\\\S*/\\\\S*"""', '""""""', 'statuses[i]'], {}), "('\\\\S*/\\\\S*', '', statuses[i])\n", (6571, 6601), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((6635, 6669), 're.sub', 're.sub', (['"""htt\\\\S*"""', '""""""', 'statuses[i]'], {}), "('htt\\\\S*', '', statuses[i])\n", (6641, 6669), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((6712, 6744), 're.sub', 're.sub', (['"""#\\\\S*"""', '""""""', 'statuses[i]'], {}), "('#\\\\S*', '', statuses[i])\n", (6718, 6744), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((6789, 6830), 're.sub', 're.sub', (['"""(RT)*( )?@\\\\S*"""', '""""""', 'statuses[i]'], {}), "('(RT)*( )?@\\\\S*', '', statuses[i])\n", (6795, 6830), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((6869, 6910), 're.sub', 're.sub', (['"""(RT |rt[^a-z])"""', '""""""', 'statuses[i]'], {}), "('(RT |rt[^a-z])', '', statuses[i])\n", (6875, 6910), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((6946, 6985), 're.sub', 're.sub', (['"""\\\\S*\\\\d+\\\\S*"""', '""""""', 'statuses[i]'], {}), "('\\\\S*\\\\d+\\\\S*', '', statuses[i])\n", (6952, 6985), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((7022, 7060), 're.sub', 're.sub', (['"""\\\\w+\'[^s ]+"""', '""""""', 'statuses[i]'], {}), '("\\\\w+\'[^s ]+", \'\', statuses[i])\n', (7028, 7060), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((7102, 7135), 're.sub', 're.sub', (['"""&\\\\S+;"""', '""""""', 'statuses[i]'], {}), "('&\\\\S+;', '', statuses[i])\n", (7108, 7135), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((1097, 1122), 'os.getenv', 'os.getenv', (['"""CONSUMER_KEY"""'], {}), "('CONSUMER_KEY')\n", (1106, 1122), False, 'import os\n'), ((1153, 1181), 'os.getenv', 'os.getenv', (['"""CONSUMER_SECRET"""'], {}), "('CONSUMER_SECRET')\n", (1162, 1181), False, 'import os\n'), ((1973, 1992), 'cPickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1984, 1992), True, 'import cPickle as pickle\n'), ((2113, 2132), 'cPickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (2124, 2132), True, 'import cPickle as pickle\n'), ((3070, 3091), 'numpy.argsort', 'np.argsort', (['vect.idf_'], {}), '(vect.idf_)\n', (3080, 3091), True, 'import numpy as np\n'), ((3586, 3646), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'stop_words': '"""english"""', 'strip_accents': '"""ascii"""'}), "(stop_words='english', strip_accents='ascii')\n", (3601, 3646), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((3883, 3908), 'numpy.multiply', 'np.multiply', (['tf', 'idf_vals'], {}), '(tf, idf_vals)\n', (3894, 3908), True, 'import numpy as np\n'), ((2308, 2327), 'cPickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (2319, 2327), True, 'import cPickle as pickle\n'), ((2451, 2470), 'cPickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (2462, 2470), True, 'import cPickle as pickle\n'), ((3936, 3956), 'numpy.argsort', 'np.argsort', (['tfidf[0]'], {}), '(tfidf[0])\n', (3946, 3956), True, 'import numpy as np\n'), ((6188, 6220), 're.sub', 're.sub', (['"""&count=\\\\d+"""', '""""""', 'query'], {}), "('&count=\\\\d+', '', query)\n", (6194, 6220), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n'), ((7220, 7251), 'nltk.word_tokenize', 'nltk.word_tokenize', (['statuses[i]'], {}), '(statuses[i])\n', (7238, 7251), False, 'import sklearn, re, nltk, base64, json, urllib2, os\n')]
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.deletion import CASCADE
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=CASCADE, null=True, blank=True)
supervisor = models.ForeignKey(User, on_delete=CASCADE, null=True, blank=True, related_name='subordinates')
canDistributeInventory = models.BooleanField(default=False, blank=True)
canApproveInventory = models.BooleanField(default=False, blank=True)
canApproveLeave = models.BooleanField(default=False, blank=True)
canManageAsset = models.BooleanField(default=False, blank=True)
def __str__(self):
return self.user.get_full_name()
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
try:
profile = instance.profile
except:
profile = Profile.objects.create(user=instance)
profile.save()
def as_json(self):
if self.user is not None:
return dict(
id=self.user.id,
username=self.user.username,
first_name=self.user.first_name,
last_name=self.user.last_name,
email=self.user.email,
manager_id=self.supervisor.id if self.supervisor is not None else None,
can_distribute_inventory=self.canDistributeInventory,
can_approve_inventory=self.canApproveInventory,
can_approve_leave=self.canApproveLeave,
can_manage_asset=self.canManageAsset,)
else:
return None
|
[
"django.db.models.ForeignKey",
"django.db.models.OneToOneField",
"django.dispatch.receiver",
"django.db.models.BooleanField"
] |
[((244, 312), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'CASCADE', 'null': '(True)', 'blank': '(True)'}), '(User, on_delete=CASCADE, null=True, blank=True)\n', (264, 312), False, 'from django.db import models\n'), ((330, 428), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'CASCADE', 'null': '(True)', 'blank': '(True)', 'related_name': '"""subordinates"""'}), "(User, on_delete=CASCADE, null=True, blank=True,\n related_name='subordinates')\n", (347, 428), False, 'from django.db import models\n'), ((454, 500), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'blank': '(True)'}), '(default=False, blank=True)\n', (473, 500), False, 'from django.db import models\n'), ((527, 573), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'blank': '(True)'}), '(default=False, blank=True)\n', (546, 573), False, 'from django.db import models\n'), ((596, 642), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'blank': '(True)'}), '(default=False, blank=True)\n', (615, 642), False, 'from django.db import models\n'), ((664, 710), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'blank': '(True)'}), '(default=False, blank=True)\n', (683, 710), False, 'from django.db import models\n'), ((782, 814), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (790, 814), False, 'from django.dispatch import receiver\n'), ((952, 984), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (960, 984), False, 'from django.dispatch import receiver\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2015-2019 CNRS-UM LIRMM, CNRS-AIST JRL
# Copyright 2020 ANYbotics AG
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import collections
import json
import os
import re
from PySide2 import QtCore, QtGui, QtWidgets
from signal_logger import Silo
from signal_logger import find_log
import signal_logger_ui.ui as ui
from signal_logger_ui.signal_logger_tab import SignalLoggerTab
from signal_logger_ui.signal_logger_types import LineStyle, TextWithFontSize, GraphLabels
UserPlot = collections.namedtuple('UserPlot', [
'title', 'x', 'y1', 'y1d', 'y2', 'y2d', 'grid1', 'grid2', 'style',
'style2', 'graph_labels', 'extra'])
def load_user_plots(fpath):
if not os.path.exists(fpath):
return []
with open(fpath) as f:
user_plot_list = [UserPlot(*x) for x in json.load(f)]
for i, plt in enumerate(user_plot_list):
for y in plt.style:
plt.style[y] = LineStyle(**plt.style[y])
for y in plt.style2:
plt.style2[y] = LineStyle(**plt.style2[y])
if not isinstance(plt.graph_labels, GraphLabels):
for key, value in plt.graph_labels.items():
plt.graph_labels[key] = TextWithFontSize(
**plt.graph_labels[key])
user_plot_list[i] = plt._replace(graph_labels=GraphLabels(
**plt.graph_labels))
return user_plot_list
class CommonStyleDialog(QtWidgets.QDialog):
def __init__(self, parent, name, canvas, style):
super(CommonStyleDialog, self).__init__(parent)
self.name = name
self.canvas = canvas
self.style = style
self.setWindowTitle('Edit {} grid style'.format(name))
self.setModal(True)
self.layout = QtWidgets.QFormLayout(self)
self.linestyle = QtWidgets.QComboBox()
styles = ['-', ':', '--', '-.']
for s in styles:
self.linestyle.addItem(s)
self.linestyle.setCurrentIndex(styles.index(style.linestyle))
self.layout.addRow("Style", self.linestyle)
self.linewidth = QtWidgets.QLineEdit(str(style.linewidth))
self.linewidth.setValidator(QtGui.QDoubleValidator(0.01, 1e6, 2))
self.layout.addRow("Width", self.linewidth)
self.color = QtGui.QColor(style.color)
self.colorButton = QtWidgets.QPushButton("#")
self.colorButton.setStyleSheet(
"background-color: {}; color: {}".format(
self.style.color, self.style.color))
self.colorButton.released.connect(self.selectColor)
self.layout.addRow("Color", self.colorButton)
confirmLayout = QtWidgets.QHBoxLayout()
okButton = QtWidgets.QPushButton("Ok", self)
confirmLayout.addWidget(okButton)
okButton.clicked.connect(self.accept)
applyButton = QtWidgets.QPushButton("Apply", self)
confirmLayout.addWidget(applyButton)
applyButton.clicked.connect(self.apply)
cancelButton = QtWidgets.QPushButton("Cancel", self)
confirmLayout.addWidget(cancelButton)
cancelButton.clicked.connect(self.reject)
self.layout.addRow(confirmLayout)
def selectColor(self):
color = QtWidgets.QColorDialog.getColor(self.color)
if color.isValid():
self.color = color
self.colorButton.setStyleSheet(
"background-color: {}; color: {}".format(
self.color.name(), self.color.name()))
def apply(self):
self.style.linestyle = self.linestyle.currentText()
self.style.linewidth = float(self.linewidth.text())
self.style.color = self.color.name()
def accept(self):
super(CommonStyleDialog, self).accept()
self.apply()
class GridStyleDialog(CommonStyleDialog):
def __init__(self, parent, name, canvas, style):
super(GridStyleDialog, self).__init__(parent, name, canvas, style)
self.enabled = QtWidgets.QCheckBox()
self.enabled.setChecked(style.visible)
self.layout.insertRow(0, "Visible", self.enabled)
self.save = QtWidgets.QCheckBox()
self.layout.insertRow(
self.layout.rowCount() - 2, "Save as default", self.save)
def apply(self):
super(GridStyleDialog, self).apply()
self.style.visible = self.enabled.isChecked()
self.canvas.draw()
if self.save.isChecked():
self.parent().gridStyles[self.name] = self.style
with open(self.parent().gridStyleFile, 'w') as f:
json.dump(
self.parent().gridStyles, f, default=lambda o: o.__dict__)
class LineStyleDialog(CommonStyleDialog):
def __init__(self, parent, name, canvas, style, set_style_fn):
super(LineStyleDialog, self).__init__(parent, name, canvas, style)
self.set_style = set_style_fn
self.labelInput = QtWidgets.QLineEdit(style.label)
self.layout.insertRow(0, "Label", self.labelInput)
def apply(self):
super(LineStyleDialog, self).apply()
self.style.label = self.labelInput.text()
self.set_style(self.name, self.style)
self.canvas.draw()
class AllLineStyleDialog(QtWidgets.QDialog):
def __init__(self, parent, name, canvas, plots, style_fn):
super(AllLineStyleDialog, self).__init__(parent)
self.name = name
self.canvas = canvas
self.plots = plots
self.style = style_fn
self.setWindowTitle('Edit {} graph line style'.format(name))
self.setModal(True)
self.layout = QtWidgets.QGridLayout(self)
row = 0
[
self.layout.addWidget(QtWidgets.QLabel(txt), row, i)
for i, txt in enumerate(["Label", "Style", "Width", "Color"])
]
row += 1
self.plotWidgets = {}
def makeLineStyleComboBox(style):
ret = QtWidgets.QComboBox()
[ret.addItem(s) for s in ['-', ':', '--', '-.']]
ret.setCurrentIndex(['-', ':', '--', '-.'].index(style.linestyle))
return ret
def makeLineWidthEdit(style):
ret = QtWidgets.QLineEdit(str(style.linewidth))
ret.setValidator(QtGui.QDoubleValidator(0.01, 1e6, 2))
return ret
def makeColorButton(self, style):
ret = QtWidgets.QPushButton("#")
ret.color = QtGui.QColor(style.color)
ret.setStyleSheet(
"background-color: {color}; color: {color}".format(
color=style.color))
ret.released.connect(lambda bt=ret: self.selectColor(bt))
return ret
def add_plot(self, plot, style):
self.plotWidgets[plot] = [
QtWidgets.QLineEdit(style.label),
makeLineStyleComboBox(style),
makeLineWidthEdit(style),
makeColorButton(self, style)
]
[
self.layout.addWidget(w, row, i)
for i, w in enumerate(self.plotWidgets[plot])
]
for p in self.plots:
add_plot(self, p, self.style(p))
row += 1
hlayout = QtWidgets.QHBoxLayout()
okButton = QtWidgets.QPushButton("Ok", self)
okButton.clicked.connect(self.accept)
cancelButton = QtWidgets.QPushButton("Cancel", self)
cancelButton.clicked.connect(self.reject)
applyButton = QtWidgets.QPushButton("Apply", self)
applyButton.clicked.connect(self.apply)
hlayout.addWidget(okButton)
hlayout.addWidget(cancelButton)
hlayout.addWidget(applyButton)
self.layout.addLayout(hlayout, row, 1, 1, 3)
def selectColor(self, button):
color = QtWidgets.QColorDialog.getColor(button.color)
if color.isValid():
button.color = color
button.setStyleSheet(
"background-color: {color}; color: {color}".format(
color=color.name()))
def apply(self):
for y, widgets in self.plotWidgets.items():
label = widgets[0].text()
linestyle = widgets[1].currentText()
linewidth = float(widgets[2].text())
color = widgets[3].color.name()
st = LineStyle(
label=label, linestyle=linestyle, linewidth=linewidth,
color=color)
self.style(y, st)
self.canvas.draw()
def accept(self):
super(AllLineStyleDialog, self).accept()
self.apply()
class LabelsTitleEditDialog(QtWidgets.QDialog):
def __init__(self, parent, canvas):
super(LabelsTitleEditDialog, self).__init__(parent)
self.canvas = canvas
self.setWindowTitle('Edit graph title')
self.setModal(True)
self.layout = QtWidgets.QGridLayout(self)
row = 0
self.titleEdit = QtWidgets.QLineEdit(canvas.title())
self.titleFontsizeEdit = QtWidgets.QLineEdit(str(canvas.title_fontsize()))
self.titleFontsizeEdit.setValidator(QtGui.QDoubleValidator(1, 1e6, 1))
self.layout.addWidget(QtWidgets.QLabel("Title"), row, 0)
self.layout.addWidget(self.titleEdit, row, 1)
self.layout.addWidget(self.titleFontsizeEdit, row, 2)
row += 1
self.xLabelEdit = QtWidgets.QLineEdit(canvas.x_label())
self.xLabelFontsizeEdit = QtWidgets.QLineEdit(
str(canvas.x_label_fontsize()))
self.xLabelFontsizeEdit.setValidator(QtGui.QDoubleValidator(1, 1e6, 1))
self.layout.addWidget(QtWidgets.QLabel("X label"), row, 0)
self.layout.addWidget(self.xLabelEdit, row, 1)
self.layout.addWidget(self.xLabelFontsizeEdit, row, 2)
row += 1
self.y1LabelEdit = QtWidgets.QLineEdit(canvas.y1_label())
self.y1LabelFontsizeEdit = QtWidgets.QLineEdit(
str(canvas.y1_label_fontsize()))
self.y1LabelFontsizeEdit.setValidator(
QtGui.QDoubleValidator(1, 1e6, 1))
self.layout.addWidget(QtWidgets.QLabel("Y1 label"), row, 0)
self.layout.addWidget(self.y1LabelEdit, row, 1)
self.layout.addWidget(self.y1LabelFontsizeEdit, row, 2)
row += 1
self.y2LabelEdit = QtWidgets.QLineEdit(canvas.y2_label())
self.y2LabelFontsizeEdit = QtWidgets.QLineEdit(
str(canvas.y2_label_fontsize()))
self.y2LabelFontsizeEdit.setValidator(
QtGui.QDoubleValidator(1, 1e6, 1))
self.layout.addWidget(QtWidgets.QLabel("Y2 label"), row, 0)
self.layout.addWidget(self.y2LabelEdit, row, 1)
self.layout.addWidget(self.y2LabelFontsizeEdit, row, 2)
row += 1
self.extraLayout = QtWidgets.QGridLayout()
extraRow = 0
self.extraLayout.addWidget(QtWidgets.QLabel("Tick size"), extraRow, 0)
self.extraLayout.addWidget(QtWidgets.QLabel("Label padding"), extraRow, 1)
self.extraLayout.addWidget(QtWidgets.QLabel("Top offset"), extraRow, 2)
self.extraLayout.addWidget(QtWidgets.QLabel("Bottom offset"), extraRow, 3)
extraRow += 1
self.tickSizeEdit = QtWidgets.QLineEdit(str(canvas.tick_fontsize()))
self.tickSizeEdit.setValidator(QtGui.QDoubleValidator(1, 1e6, 1))
self.labelPaddingEdit = QtWidgets.QLineEdit(str(canvas.labelpad()))
self.labelPaddingEdit.setValidator(QtGui.QDoubleValidator(1, 1e6, 1))
self.topOffsetEdit = QtWidgets.QLineEdit(str(canvas.top_offset()))
self.topOffsetEdit.setValidator(QtGui.QDoubleValidator(0, 1, 3))
self.bottomOffsetEdit = QtWidgets.QLineEdit(str(canvas.bottom_offset()))
self.bottomOffsetEdit.setValidator(QtGui.QDoubleValidator(0, 1, 3))
self.extraLayout.addWidget(self.tickSizeEdit, extraRow, 0)
self.extraLayout.addWidget(self.labelPaddingEdit, extraRow, 1)
self.extraLayout.addWidget(self.topOffsetEdit, extraRow, 2)
self.extraLayout.addWidget(self.bottomOffsetEdit, extraRow, 3)
extraRow += 1
self.extraLayout.addWidget(
QtWidgets.QLabel("Legend size"), extraRow, 0)
self.extraLayout.addWidget(
QtWidgets.QLabel("Legend Y1 columns"), extraRow, 1, 1, 2)
self.extraLayout.addWidget(
QtWidgets.QLabel("Legend Y2 columns"), extraRow, 3, 1, 2)
extraRow += 1
self.legendSizeEdit = QtWidgets.QLineEdit(str(canvas.legend_fontsize()))
self.legendSizeEdit.setValidator(QtGui.QDoubleValidator(1, 1e6, 1))
self.y1LegendNColEdit = QtWidgets.QLineEdit(str(canvas.y1_legend_ncol()))
self.y1LegendNColEdit.setValidator(QtGui.QIntValidator(1, 100))
self.y2LegendNColEdit = QtWidgets.QLineEdit(str(canvas.y2_legend_ncol()))
self.y2LegendNColEdit.setValidator(QtGui.QIntValidator(1, 100))
self.extraLayout.addWidget(self.legendSizeEdit, extraRow, 0)
self.extraLayout.addWidget(self.y1LegendNColEdit, extraRow, 1, 1, 2)
self.extraLayout.addWidget(self.y2LegendNColEdit, extraRow, 3, 1, 2)
extraRow += 1
self.layout.addLayout(self.extraLayout, row, 0, extraRow, 3)
row += extraRow
hlayout = QtWidgets.QHBoxLayout()
Ok = QtWidgets.QPushButton("Ok")
Ok.clicked.connect(self.accept)
hlayout.addWidget(Ok)
Cancel = QtWidgets.QPushButton("Cancel")
Cancel.clicked.connect(self.reject)
hlayout.addWidget(Cancel)
Apply = QtWidgets.QPushButton("Apply")
Apply.clicked.connect(self.apply)
hlayout.addWidget(Apply)
self.layout.addLayout(hlayout, row, 0, 1, 3)
def apply(self):
self.canvas.title(self.titleEdit.text())
self.canvas.title_fontsize(float(self.titleFontsizeEdit.text()))
self.canvas.x_label(self.xLabelEdit.text())
self.canvas.x_label_fontsize(self.xLabelFontsizeEdit.text())
self.canvas.y1_label(self.y1LabelEdit.text())
self.canvas.y1_label_fontsize(self.y1LabelFontsizeEdit.text())
self.canvas.y2_label(self.y2LabelEdit.text())
self.canvas.y2_label_fontsize(self.y2LabelFontsizeEdit.text())
self.canvas.tick_fontsize(float(self.tickSizeEdit.text()))
self.canvas.legend_fontsize(float(self.legendSizeEdit.text()))
self.canvas.labelpad(float(self.labelPaddingEdit.text()))
self.canvas.top_offset(float(self.topOffsetEdit.text()))
self.canvas.bottom_offset(float(self.bottomOffsetEdit.text()))
self.canvas.y1_legend_ncol(int(self.y1LegendNColEdit.text()))
self.canvas.y2_legend_ncol(int(self.y2LegendNColEdit.text()))
self.canvas.draw()
def accept(self):
super(LabelsTitleEditDialog, self).accept()
self.apply()
class SignalLoggerUI(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(SignalLoggerUI, self).__init__(parent)
self.__init__ui = ui.MainWindow()
self.ui = ui.MainWindow()
self.ui.setupUi(self)
self.tab_re = re.compile('^Plot [0-9]+$')
self.data = {}
self.gridStyles = {
'left': LineStyle(),
'right': LineStyle(linestyle=':')}
self.gridStyleFile = \
os.path.expanduser("~") + "/.config/signal_logger/grid_style.json"
if os.path.exists(self.gridStyleFile):
with open(self.gridStyleFile) as f:
data = json.load(f)
for k in self.gridStyles.keys():
if k in data:
self.gridStyles[k] = LineStyle(**data[k])
UserPlot.__new__.__defaults__ = (
self.gridStyles['left'], self.gridStyles['right'], {}, {},
GraphLabels(), {})
self.user_plot_file = os.path.expanduser("~") + "/.config/signal_logger/custom_plots.json"
self.reload_user_plots()
self.styleMenu = QtWidgets.QMenu("Style", self.ui.menubar)
# Line style menu
self.lineStyleMenu = QtWidgets.QMenu("Lines", self.styleMenu)
def fillLineStyleMenu(self):
self.lineStyleMenu.clear()
canvas = self.getCanvas()
def makePlotMenu(self, name, plots, style_fn):
if len(plots) < 1:
return
menu = QtWidgets.QMenu(name, self.lineStyleMenu)
group = QtWidgets.QActionGroup(menu)
action = QtWidgets.QAction("All", group)
action.triggered.connect(
lambda: AllLineStyleDialog(
self, name, self.getCanvas(), plots, style_fn).exec_())
group.addAction(action)
sep = QtWidgets.QAction(group)
sep.setSeparator(True)
group.addAction(sep)
for y in plots:
style = style_fn(y)
action = QtWidgets.QAction(style.label, group)
action.triggered.connect(
lambda yin=y, stylein=style:
LineStyleDialog(
self, yin, self.getCanvas(), stylein,
style_fn).exec_())
group.addAction(action)
menu.addActions(group.actions())
self.lineStyleMenu.addMenu(menu)
makePlotMenu(
self, "Left", canvas.axes_plots.keys(), canvas.style_left)
makePlotMenu(
self, "Right", canvas.axes2_plots.keys(), canvas.style_right)
self.lineStyleMenu.aboutToShow.connect(lambda: fillLineStyleMenu(self))
self.styleMenu.addMenu(self.lineStyleMenu)
# Grid style menu
self.gridStyleMenu = QtWidgets.QMenu("Grids", self.styleMenu)
self.gridDisplayActionGroup = QtWidgets.QActionGroup(self.gridStyleMenu)
self.gridDisplayActionGroup.setExclusive(True)
self.leftGridAction = QtWidgets.QAction(
"Left", self.gridDisplayActionGroup)
self.leftGridAction.triggered.connect(
lambda: GridStyleDialog(
self, "left", self.getCanvas(), self.getCanvas().grid).exec_())
self.gridDisplayActionGroup.addAction(self.leftGridAction)
self.rightGridAction = QtWidgets.QAction(
"Right", self.gridDisplayActionGroup)
self.rightGridAction.triggered.connect(
lambda: GridStyleDialog(
self, "right", self.getCanvas(),
self.getCanvas().grid2).exec_())
self.gridDisplayActionGroup.addAction(self.rightGridAction)
self.gridStyleMenu.addActions(self.gridDisplayActionGroup.actions())
self.styleMenu.addMenu(self.gridStyleMenu)
# Labels
self.titleAction = QtWidgets.QAction(
"Title, labels and fonts", self.styleMenu)
self.titleAction.triggered.connect(
lambda: LabelsTitleEditDialog(self, self.getCanvas()).exec_())
self.styleMenu.addAction(self.titleAction)
self.ui.menubar.addMenu(self.styleMenu)
self.addApplicationShortcut(
QtCore.Qt.CTRL + QtCore.Qt.Key_O, self.shortcutOpenFile)
self.addApplicationShortcut(
QtCore.Qt.CTRL + QtCore.Qt.Key_W, self.shortcutCloseTab)
self.addApplicationShortcut(
QtCore.Qt.CTRL + QtCore.Qt.Key_PageDown, self.shortcutNextTab)
self.addApplicationShortcut(
QtCore.Qt.CTRL + QtCore.Qt.Key_PageUp, self.shortcutPreviousTab)
self.addApplicationShortcut(
QtCore.Qt.CTRL + QtCore.Qt.Key_T, self.shortcutNewTab)
self.addApplicationShortcut(
QtCore.Qt.CTRL + QtCore.Qt.Key_S, self.save_user_plot)
self.addApplicationShortcut(
QtCore.Qt.CTRL + QtCore.Qt.Key_A, self.shortcutAxesDialog)
def reload_user_plots(self):
self.user_plots = load_user_plots(self.user_plot_file)
self.update_user_plot_menu()
def save_user_plots(self):
# encoding for serializing objects to python3
# introduced as in python3, filter/map returns iterators, not lists
def encode(o):
if isinstance(o, filter) or isinstance(o, map):
return list(o)
return vars(o)
conf_dir = os.path.dirname(self.user_plot_file)
if not os.path.exists(conf_dir):
os.makedirs(conf_dir)
with open(self.user_plot_file, 'w') as f:
json.dump(
self.user_plots, f, default=lambda o:encode(o), indent=2,
separators=(',', ': '))
self.update_user_plot_menu()
def addApplicationShortcut(self, key, callback):
shortcut = QtWidgets.QShortcut(self)
shortcut.setKey(key)
shortcut.setContext(QtCore.Qt.ShortcutContext.ApplicationShortcut)
shortcut.activated.connect(lambda: callback())
def update_user_plot_menu(self):
self.ui.menuUserPlots.clear()
def _gen_closure(fnc,arg):
""" utility to generate a closure, discards state from triggered Qt signal"""
return lambda s: fnc(arg)
for p in self.user_plots:
act = QtWidgets.QAction(p.title, self.ui.menuUserPlots)
act.triggered.connect(_gen_closure(self.plot_user_plot, p))
self.ui.menuUserPlots.addAction(act)
self.ui.menuUserPlots.addSeparator()
act = QtWidgets.QAction("Reload custom plots", self.ui.menuUserPlots)
act.triggered.connect(self.reload_user_plots)
self.ui.menuUserPlots.addAction(act)
if len(self.user_plots) > 0:
rmUserPlotMenu = QtWidgets.QMenu(
"Remove plot", self.ui.menuUserPlots)
for p in self.user_plots:
act = QtWidgets.QAction(p.title, self.ui.menuUserPlots)
act.triggered.connect(
_gen_closure(self.remove_user_plot, p))
rmUserPlotMenu.addAction(act)
self.ui.menuUserPlots.addMenu(rmUserPlotMenu)
act = QtWidgets.QAction("Save current plot", self.ui.menuUserPlots)
act.triggered.connect(self.save_user_plot)
self.ui.menuUserPlots.addAction(act)
def save_user_plot(self):
tab = self.ui.tabWidget.currentWidget()
canvas = tab.ui.canvas
valid = len(canvas.axes_plots) != 0 or len(canvas.axes2_plots) != 0
if not valid:
err_diag = QtWidgets.QMessageBox(self)
err_diag.setModal(True)
err_diag.setText("Cannot save custom plot if nothing is shown")
err_diag.exec_()
return
defaultTitle = self.ui.tabWidget.tabText(
self.ui.tabWidget.currentIndex())
if defaultTitle.startswith("Plot"):
defaultTitle = ""
title, ok = QtWidgets.QInputDialog.getText(
self, "Custom plot", "Title:", text=defaultTitle)
if ok:
y1 = list(filter(
lambda k: k in self.data.keys(),
canvas.axes_plots.keys()))
y2 = list(filter(
lambda k: k in self.data.keys(),
canvas.axes2_plots.keys()))
y1d = list(map(
lambda sp: "{}_{}".format(sp.name, sp.id),
filter(lambda sp: sp.idx == 0, tab.specials.values())))
y2d = list(map(
lambda sp: "{}_{}".format(sp.name, sp.id),
filter(lambda sp: sp.idx == 1, tab.specials.values())))
style = {
y: canvas.style_left(y)
for y in canvas.axes_plots.keys()}
style2 = {
y: canvas.style_right(y)
for y in canvas.axes2_plots.keys()}
found = False
extra = {
p: getattr(self.getCanvas(), p)()
for p in [
"tick_fontsize", "legend_fontsize", "labelpad",
"top_offset", "bottom_offset", "y1_legend_ncol",
"y2_legend_ncol"]}
user_plot = UserPlot(
title, tab.x_data, y1, y1d, y2, y2d, self.getCanvas().grid,
self.getCanvas().grid2, style, style2, GraphLabels(
title=TextWithFontSize(
canvas.title(), canvas.title_fontsize()),
x_label=TextWithFontSize(
canvas.x_label(), canvas.x_label_fontsize()),
y1_label=TextWithFontSize(
canvas.y1_label(), canvas.y1_label_fontsize()),
y2_label=TextWithFontSize(
canvas.y2_label(), canvas.y2_label_fontsize())),
extra)
for i in range(len(self.user_plots)):
if self.user_plots[i].title == title:
self.user_plots[i] = user_plot
found = True
break
if not found:
self.user_plots.append(user_plot)
self.save_user_plots()
def plot_user_plot(self, p):
valid = p.x in self.data.keys() and all([
y in self.data.keys() for x in [p.y1, p.y2] for y in x])
if not valid:
missing_entries = ""
if p.x not in self.data.keys():
missing_entries += "- {}\n".format(p.x)
for x in [p.y1, p.y1d, p.y2, p.y2d]:
for y in x:
if y not in self.data.keys():
missing_entries += "- {}\n".format(y)
missing_entries = missing_entries[:-1]
err_diag = QtWidgets.QMessageBox(self)
err_diag.setModal(True)
err_diag.setText(
"Plot {} is not valid for this log file, some data is "
"missing\nMissing entries:\n{}".format(
p.title, missing_entries))
err_diag.exec_()
return
plotW = SignalLoggerTab.UserPlot(self, p)
self.ui.tabWidget.insertTab(
self.ui.tabWidget.count() - 1, plotW, p.title)
self.ui.tabWidget.setCurrentIndex(self.ui.tabWidget.count() - 2)
self.updateClosable()
def remove_user_plot(self, p_in):
for p in self.user_plots:
if p.title == p_in.title:
self.user_plots.remove(p)
break
self.save_user_plots()
def getCanvas(self):
return self.ui.tabWidget.currentWidget().ui.canvas
@QtCore.Slot()
def on_actionLoad_triggered(self):
fpath = QtWidgets.QFileDialog.getOpenFileName(self, "Log file")[0]
if len(fpath):
self.load_log(fpath)
@QtCore.Slot()
def on_actionExit_triggered(self):
QtWidgets.QApplication.quit()
@QtCore.Slot(int)
def on_tabWidget_currentChanged(self, idx):
if idx == self.ui.tabWidget.count() - 1:
plotW = SignalLoggerTab(self)
plotW.setData(self.data)
plotW.setGridStyles(self.gridStyles)
j = 1
for i in range(self.ui.tabWidget.count() - 1):
if self.tab_re.match(self.ui.tabWidget.tabText(i)):
j += 1
self.ui.tabWidget.insertTab(
self.ui.tabWidget.count() - 1, plotW, "Plot {}".format(j))
self.ui.tabWidget.setCurrentIndex(self.ui.tabWidget.count() - 2)
self.updateClosable()
@QtCore.Slot(int)
def on_tabWidget_tabCloseRequested(self, idx):
if self.ui.tabWidget.currentIndex() == idx:
self.ui.tabWidget.setCurrentIndex(abs(idx - 1))
self.ui.tabWidget.removeTab(idx)
j = 1
for i in range(self.ui.tabWidget.count() - 1):
if self.tab_re.match(self.ui.tabWidget.tabText(i)):
self.ui.tabWidget.setTabText(i, "Plot {}".format(j))
j += 1
self.updateClosable()
def updateClosable(self):
has_closable = self.ui.tabWidget.count() > 2
self.ui.tabWidget.setTabsClosable(has_closable)
if has_closable:
self.ui.tabWidget.tabBar().tabButton(
self.ui.tabWidget.count() - 1, QtWidgets.QTabBar.RightSide).hide()
def shortcutOpenFile(self):
self.ui.actionLoad.triggered.emit()
def shortcutCloseTab(self):
if self.ui.tabWidget.tabsClosable():
self.ui.tabWidget.tabCloseRequested.emit(
self.ui.tabWidget.currentIndex())
def shortcutPreviousTab(self):
if self.ui.tabWidget.currentIndex() > 0:
self.ui.tabWidget.setCurrentIndex(
self.ui.tabWidget.currentIndex() - 1)
def shortcutNextTab(self):
if self.ui.tabWidget.currentIndex() < self.ui.tabWidget.count() - 2:
self.ui.tabWidget.setCurrentIndex(
self.ui.tabWidget.currentIndex() + 1)
def shortcutNewTab(self):
self.ui.tabWidget.setCurrentIndex(self.ui.tabWidget.count() - 1)
def shortcutAxesDialog(self):
self.ui.tabWidget.currentWidget().ui.canvas.axesDialog()
def load_log(self, fpath, logdir=None):
fpath = find_log(fpath, logdir=logdir)
self.silo = Silo(fpath, logdir, print_log_file_path=True)
self.data = self.silo.data
self.ui.consoleWidget.start(self.silo, self.ui)
for i in range(self.ui.tabWidget.count() - 1):
tab = self.ui.tabWidget.widget(i)
assert(isinstance(tab, SignalLoggerTab))
tab.setData(self.data)
tab.setGridStyles(self.gridStyles)
self.setWindowTitle("Signal Logger - {}".format(
os.path.basename(fpath)))
|
[
"PySide2.QtWidgets.QFileDialog.getOpenFileName",
"PySide2.QtGui.QColor",
"PySide2.QtWidgets.QMessageBox",
"PySide2.QtWidgets.QActionGroup",
"PySide2.QtWidgets.QPushButton",
"PySide2.QtWidgets.QMenu",
"PySide2.QtWidgets.QFormLayout",
"PySide2.QtWidgets.QHBoxLayout",
"PySide2.QtCore.Slot",
"os.path.dirname",
"os.path.exists",
"PySide2.QtWidgets.QLabel",
"PySide2.QtWidgets.QCheckBox",
"PySide2.QtWidgets.QShortcut",
"signal_logger_ui.signal_logger_tab.SignalLoggerTab.UserPlot",
"PySide2.QtWidgets.QAction",
"signal_logger_ui.ui.MainWindow",
"PySide2.QtWidgets.QInputDialog.getText",
"os.path.basename",
"PySide2.QtWidgets.QApplication.quit",
"PySide2.QtWidgets.QComboBox",
"PySide2.QtWidgets.QLineEdit",
"PySide2.QtGui.QIntValidator",
"re.compile",
"json.load",
"os.makedirs",
"PySide2.QtWidgets.QGridLayout",
"PySide2.QtWidgets.QColorDialog.getColor",
"signal_logger.Silo",
"signal_logger_ui.signal_logger_types.LineStyle",
"signal_logger_ui.signal_logger_types.GraphLabels",
"signal_logger_ui.signal_logger_tab.SignalLoggerTab",
"collections.namedtuple",
"signal_logger_ui.signal_logger_types.TextWithFontSize",
"signal_logger.find_log",
"PySide2.QtGui.QDoubleValidator",
"os.path.expanduser"
] |
[((1772, 1914), 'collections.namedtuple', 'collections.namedtuple', (['"""UserPlot"""', "['title', 'x', 'y1', 'y1d', 'y2', 'y2d', 'grid1', 'grid2', 'style',\n 'style2', 'graph_labels', 'extra']"], {}), "('UserPlot', ['title', 'x', 'y1', 'y1d', 'y2', 'y2d',\n 'grid1', 'grid2', 'style', 'style2', 'graph_labels', 'extra'])\n", (1794, 1914), False, 'import collections\n'), ((27572, 27585), 'PySide2.QtCore.Slot', 'QtCore.Slot', ([], {}), '()\n', (27583, 27585), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((27762, 27775), 'PySide2.QtCore.Slot', 'QtCore.Slot', ([], {}), '()\n', (27773, 27775), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((27859, 27875), 'PySide2.QtCore.Slot', 'QtCore.Slot', (['int'], {}), '(int)\n', (27870, 27875), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((28506, 28522), 'PySide2.QtCore.Slot', 'QtCore.Slot', (['int'], {}), '(int)\n', (28517, 28522), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((1961, 1982), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (1975, 1982), False, 'import os\n'), ((3048, 3075), 'PySide2.QtWidgets.QFormLayout', 'QtWidgets.QFormLayout', (['self'], {}), '(self)\n', (3069, 3075), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((3102, 3123), 'PySide2.QtWidgets.QComboBox', 'QtWidgets.QComboBox', ([], {}), '()\n', (3121, 3123), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((3565, 3590), 'PySide2.QtGui.QColor', 'QtGui.QColor', (['style.color'], {}), '(style.color)\n', (3577, 3590), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((3618, 3644), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""#"""'], {}), "('#')\n", (3639, 3644), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((3931, 3954), 'PySide2.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (3952, 3954), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((3974, 4007), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Ok"""', 'self'], {}), "('Ok', self)\n", (3995, 4007), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((4118, 4154), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Apply"""', 'self'], {}), "('Apply', self)\n", (4139, 4154), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((4271, 4308), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Cancel"""', 'self'], {}), "('Cancel', self)\n", (4292, 4308), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((4491, 4534), 'PySide2.QtWidgets.QColorDialog.getColor', 'QtWidgets.QColorDialog.getColor', (['self.color'], {}), '(self.color)\n', (4522, 4534), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((5231, 5252), 'PySide2.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', ([], {}), '()\n', (5250, 5252), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((5379, 5400), 'PySide2.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', ([], {}), '()\n', (5398, 5400), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((6164, 6196), 'PySide2.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['style.label'], {}), '(style.label)\n', (6183, 6196), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((6847, 6874), 'PySide2.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self'], {}), '(self)\n', (6868, 6874), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((8436, 8459), 'PySide2.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (8457, 8459), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((8479, 8512), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Ok"""', 'self'], {}), "('Ok', self)\n", (8500, 8512), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((8582, 8619), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Cancel"""', 'self'], {}), "('Cancel', self)\n", (8603, 8619), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((8692, 8728), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Apply"""', 'self'], {}), "('Apply', self)\n", (8713, 8728), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((8997, 9042), 'PySide2.QtWidgets.QColorDialog.getColor', 'QtWidgets.QColorDialog.getColor', (['button.color'], {}), '(button.color)\n', (9028, 9042), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((10060, 10087), 'PySide2.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self'], {}), '(self)\n', (10081, 10087), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((11935, 11958), 'PySide2.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ([], {}), '()\n', (11956, 11958), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((14391, 14414), 'PySide2.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (14412, 14414), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((14428, 14455), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Ok"""'], {}), "('Ok')\n", (14449, 14455), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((14543, 14574), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Cancel"""'], {}), "('Cancel')\n", (14564, 14574), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((14669, 14699), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""Apply"""'], {}), "('Apply')\n", (14690, 14699), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((16110, 16125), 'signal_logger_ui.ui.MainWindow', 'ui.MainWindow', ([], {}), '()\n', (16123, 16125), True, 'import signal_logger_ui.ui as ui\n'), ((16144, 16159), 'signal_logger_ui.ui.MainWindow', 'ui.MainWindow', ([], {}), '()\n', (16157, 16159), True, 'import signal_logger_ui.ui as ui\n'), ((16214, 16241), 're.compile', 're.compile', (['"""^Plot [0-9]+$"""'], {}), "('^Plot [0-9]+$')\n", (16224, 16241), False, 'import re\n'), ((16496, 16530), 'os.path.exists', 'os.path.exists', (['self.gridStyleFile'], {}), '(self.gridStyleFile)\n', (16510, 16530), False, 'import os\n'), ((17068, 17109), 'PySide2.QtWidgets.QMenu', 'QtWidgets.QMenu', (['"""Style"""', 'self.ui.menubar'], {}), "('Style', self.ui.menubar)\n", (17083, 17109), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((17166, 17206), 'PySide2.QtWidgets.QMenu', 'QtWidgets.QMenu', (['"""Lines"""', 'self.styleMenu'], {}), "('Lines', self.styleMenu)\n", (17181, 17206), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((18879, 18919), 'PySide2.QtWidgets.QMenu', 'QtWidgets.QMenu', (['"""Grids"""', 'self.styleMenu'], {}), "('Grids', self.styleMenu)\n", (18894, 18919), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((18958, 19000), 'PySide2.QtWidgets.QActionGroup', 'QtWidgets.QActionGroup', (['self.gridStyleMenu'], {}), '(self.gridStyleMenu)\n', (18980, 19000), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((19086, 19140), 'PySide2.QtWidgets.QAction', 'QtWidgets.QAction', (['"""Left"""', 'self.gridDisplayActionGroup'], {}), "('Left', self.gridDisplayActionGroup)\n", (19103, 19140), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((19416, 19471), 'PySide2.QtWidgets.QAction', 'QtWidgets.QAction', (['"""Right"""', 'self.gridDisplayActionGroup'], {}), "('Right', self.gridDisplayActionGroup)\n", (19433, 19471), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((19909, 19969), 'PySide2.QtWidgets.QAction', 'QtWidgets.QAction', (['"""Title, labels and fonts"""', 'self.styleMenu'], {}), "('Title, labels and fonts', self.styleMenu)\n", (19926, 19969), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((21418, 21454), 'os.path.dirname', 'os.path.dirname', (['self.user_plot_file'], {}), '(self.user_plot_file)\n', (21433, 21454), False, 'import os\n'), ((21827, 21852), 'PySide2.QtWidgets.QShortcut', 'QtWidgets.QShortcut', (['self'], {}), '(self)\n', (21846, 21852), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((22551, 22614), 'PySide2.QtWidgets.QAction', 'QtWidgets.QAction', (['"""Reload custom plots"""', 'self.ui.menuUserPlots'], {}), "('Reload custom plots', self.ui.menuUserPlots)\n", (22568, 22614), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((23178, 23239), 'PySide2.QtWidgets.QAction', 'QtWidgets.QAction', (['"""Save current plot"""', 'self.ui.menuUserPlots'], {}), "('Save current plot', self.ui.menuUserPlots)\n", (23195, 23239), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((23945, 24030), 'PySide2.QtWidgets.QInputDialog.getText', 'QtWidgets.QInputDialog.getText', (['self', '"""Custom plot"""', '"""Title:"""'], {'text': 'defaultTitle'}), "(self, 'Custom plot', 'Title:', text=defaultTitle\n )\n", (23975, 24030), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((27042, 27075), 'signal_logger_ui.signal_logger_tab.SignalLoggerTab.UserPlot', 'SignalLoggerTab.UserPlot', (['self', 'p'], {}), '(self, p)\n', (27066, 27075), False, 'from signal_logger_ui.signal_logger_tab import SignalLoggerTab\n'), ((27823, 27852), 'PySide2.QtWidgets.QApplication.quit', 'QtWidgets.QApplication.quit', ([], {}), '()\n', (27850, 27852), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((30200, 30230), 'signal_logger.find_log', 'find_log', (['fpath'], {'logdir': 'logdir'}), '(fpath, logdir=logdir)\n', (30208, 30230), False, 'from signal_logger import find_log\n'), ((30251, 30296), 'signal_logger.Silo', 'Silo', (['fpath', 'logdir'], {'print_log_file_path': '(True)'}), '(fpath, logdir, print_log_file_path=True)\n', (30255, 30296), False, 'from signal_logger import Silo\n'), ((3453, 3495), 'PySide2.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['(0.01)', '(1000000.0)', '(2)'], {}), '(0.01, 1000000.0, 2)\n', (3475, 3495), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((7160, 7181), 'PySide2.QtWidgets.QComboBox', 'QtWidgets.QComboBox', ([], {}), '()\n', (7179, 7181), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((7595, 7621), 'PySide2.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['"""#"""'], {}), "('#')\n", (7616, 7621), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((7646, 7671), 'PySide2.QtGui.QColor', 'QtGui.QColor', (['style.color'], {}), '(style.color)\n', (7658, 7671), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((9518, 9595), 'signal_logger_ui.signal_logger_types.LineStyle', 'LineStyle', ([], {'label': 'label', 'linestyle': 'linestyle', 'linewidth': 'linewidth', 'color': 'color'}), '(label=label, linestyle=linestyle, linewidth=linewidth, color=color)\n', (9527, 9595), False, 'from signal_logger_ui.signal_logger_types import LineStyle, TextWithFontSize, GraphLabels\n'), ((10294, 10333), 'PySide2.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['(1)', '(1000000.0)', '(1)'], {}), '(1, 1000000.0, 1)\n', (10316, 10333), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((10359, 10384), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Title"""'], {}), "('Title')\n", (10375, 10384), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((10736, 10775), 'PySide2.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['(1)', '(1000000.0)', '(1)'], {}), '(1, 1000000.0, 1)\n', (10758, 10775), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((10801, 10828), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""X label"""'], {}), "('X label')\n", (10817, 10828), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((11200, 11239), 'PySide2.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['(1)', '(1000000.0)', '(1)'], {}), '(1, 1000000.0, 1)\n', (11222, 11239), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((11265, 11293), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Y1 label"""'], {}), "('Y1 label')\n", (11281, 11293), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((11667, 11706), 'PySide2.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['(1)', '(1000000.0)', '(1)'], {}), '(1, 1000000.0, 1)\n', (11689, 11706), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((11732, 11760), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Y2 label"""'], {}), "('Y2 label')\n", (11748, 11760), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((12016, 12045), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Tick size"""'], {}), "('Tick size')\n", (12032, 12045), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((12095, 12128), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Label padding"""'], {}), "('Label padding')\n", (12111, 12128), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((12178, 12208), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Top offset"""'], {}), "('Top offset')\n", (12194, 12208), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((12258, 12291), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Bottom offset"""'], {}), "('Bottom offset')\n", (12274, 12291), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((12445, 12484), 'PySide2.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['(1)', '(1000000.0)', '(1)'], {}), '(1, 1000000.0, 1)\n', (12467, 12484), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((12599, 12638), 'PySide2.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['(1)', '(1000000.0)', '(1)'], {}), '(1, 1000000.0, 1)\n', (12621, 12638), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((12749, 12780), 'PySide2.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (12771, 12780), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((12906, 12937), 'PySide2.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (12928, 12937), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((13287, 13318), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Legend size"""'], {}), "('Legend size')\n", (13303, 13318), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((13381, 13418), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Legend Y1 columns"""'], {}), "('Legend Y1 columns')\n", (13397, 13418), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((13487, 13524), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Legend Y2 columns"""'], {}), "('Legend Y2 columns')\n", (13503, 13524), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((13690, 13729), 'PySide2.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['(1)', '(1000000.0)', '(1)'], {}), '(1, 1000000.0, 1)\n', (13712, 13729), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((13850, 13877), 'PySide2.QtGui.QIntValidator', 'QtGui.QIntValidator', (['(1)', '(100)'], {}), '(1, 100)\n', (13869, 13877), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((14004, 14031), 'PySide2.QtGui.QIntValidator', 'QtGui.QIntValidator', (['(1)', '(100)'], {}), '(1, 100)\n', (14023, 14031), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((16315, 16326), 'signal_logger_ui.signal_logger_types.LineStyle', 'LineStyle', ([], {}), '()\n', (16324, 16326), False, 'from signal_logger_ui.signal_logger_types import LineStyle, TextWithFontSize, GraphLabels\n'), ((16349, 16373), 'signal_logger_ui.signal_logger_types.LineStyle', 'LineStyle', ([], {'linestyle': '""":"""'}), "(linestyle=':')\n", (16358, 16373), False, 'from signal_logger_ui.signal_logger_types import LineStyle, TextWithFontSize, GraphLabels\n'), ((16418, 16441), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (16436, 16441), False, 'import os\n'), ((16890, 16903), 'signal_logger_ui.signal_logger_types.GraphLabels', 'GraphLabels', ([], {}), '()\n', (16901, 16903), False, 'from signal_logger_ui.signal_logger_types import LineStyle, TextWithFontSize, GraphLabels\n'), ((16940, 16963), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (16958, 16963), False, 'import os\n'), ((21470, 21494), 'os.path.exists', 'os.path.exists', (['conf_dir'], {}), '(conf_dir)\n', (21484, 21494), False, 'import os\n'), ((21508, 21529), 'os.makedirs', 'os.makedirs', (['conf_dir'], {}), '(conf_dir)\n', (21519, 21529), False, 'import os\n'), ((22321, 22370), 'PySide2.QtWidgets.QAction', 'QtWidgets.QAction', (['p.title', 'self.ui.menuUserPlots'], {}), '(p.title, self.ui.menuUserPlots)\n', (22338, 22370), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((22780, 22833), 'PySide2.QtWidgets.QMenu', 'QtWidgets.QMenu', (['"""Remove plot"""', 'self.ui.menuUserPlots'], {}), "('Remove plot', self.ui.menuUserPlots)\n", (22795, 22833), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((23567, 23594), 'PySide2.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', (['self'], {}), '(self)\n', (23588, 23594), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((26709, 26736), 'PySide2.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', (['self'], {}), '(self)\n', (26730, 26736), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((27641, 27696), 'PySide2.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', (['self', '"""Log file"""'], {}), "(self, 'Log file')\n", (27678, 27696), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((27993, 28014), 'signal_logger_ui.signal_logger_tab.SignalLoggerTab', 'SignalLoggerTab', (['self'], {}), '(self)\n', (28008, 28014), False, 'from signal_logger_ui.signal_logger_tab import SignalLoggerTab\n'), ((2077, 2089), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2086, 2089), False, 'import json\n'), ((2203, 2228), 'signal_logger_ui.signal_logger_types.LineStyle', 'LineStyle', ([], {}), '(**plt.style[y])\n', (2212, 2228), False, 'from signal_logger_ui.signal_logger_types import LineStyle, TextWithFontSize, GraphLabels\n'), ((2294, 2320), 'signal_logger_ui.signal_logger_types.LineStyle', 'LineStyle', ([], {}), '(**plt.style2[y])\n', (2303, 2320), False, 'from signal_logger_ui.signal_logger_types import LineStyle, TextWithFontSize, GraphLabels\n'), ((6936, 6957), 'PySide2.QtWidgets.QLabel', 'QtWidgets.QLabel', (['txt'], {}), '(txt)\n', (6952, 6957), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((7473, 7515), 'PySide2.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['(0.01)', '(1000000.0)', '(2)'], {}), '(0.01, 1000000.0, 2)\n', (7495, 7515), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((8001, 8033), 'PySide2.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['style.label'], {}), '(style.label)\n', (8020, 8033), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((16603, 16615), 'json.load', 'json.load', (['f'], {}), '(f)\n', (16612, 16615), False, 'import json\n'), ((17467, 17508), 'PySide2.QtWidgets.QMenu', 'QtWidgets.QMenu', (['name', 'self.lineStyleMenu'], {}), '(name, self.lineStyleMenu)\n', (17482, 17508), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((17533, 17561), 'PySide2.QtWidgets.QActionGroup', 'QtWidgets.QActionGroup', (['menu'], {}), '(menu)\n', (17555, 17561), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((17587, 17618), 'PySide2.QtWidgets.QAction', 'QtWidgets.QAction', (['"""All"""', 'group'], {}), "('All', group)\n", (17604, 17618), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((17851, 17875), 'PySide2.QtWidgets.QAction', 'QtWidgets.QAction', (['group'], {}), '(group)\n', (17868, 17875), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((22911, 22960), 'PySide2.QtWidgets.QAction', 'QtWidgets.QAction', (['p.title', 'self.ui.menuUserPlots'], {}), '(p.title, self.ui.menuUserPlots)\n', (22928, 22960), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((30693, 30716), 'os.path.basename', 'os.path.basename', (['fpath'], {}), '(fpath)\n', (30709, 30716), False, 'import os\n'), ((2487, 2528), 'signal_logger_ui.signal_logger_types.TextWithFontSize', 'TextWithFontSize', ([], {}), '(**plt.graph_labels[key])\n', (2503, 2528), False, 'from signal_logger_ui.signal_logger_types import LineStyle, TextWithFontSize, GraphLabels\n'), ((18053, 18090), 'PySide2.QtWidgets.QAction', 'QtWidgets.QAction', (['style.label', 'group'], {}), '(style.label, group)\n', (18070, 18090), False, 'from PySide2 import QtCore, QtGui, QtWidgets\n'), ((2616, 2647), 'signal_logger_ui.signal_logger_types.GraphLabels', 'GraphLabels', ([], {}), '(**plt.graph_labels)\n', (2627, 2647), False, 'from signal_logger_ui.signal_logger_types import LineStyle, TextWithFontSize, GraphLabels\n'), ((16744, 16764), 'signal_logger_ui.signal_logger_types.LineStyle', 'LineStyle', ([], {}), '(**data[k])\n', (16753, 16764), False, 'from signal_logger_ui.signal_logger_types import LineStyle, TextWithFontSize, GraphLabels\n')]
|
"""
GSMA data import unit tests.
Copyright (c) 2018 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from os import path
import zipfile
import pytest
from click.testing import CliRunner
from dirbs.importer.gsma_data_importer import GSMADataImporter
from dirbs.cli.importer import cli as dirbs_import_cli
from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents
from _fixtures import * # noqa: F403, F401
from _importer_params import GSMADataParams
def test_extract(db_conn, metadata_db_conn, mocked_config, tmpdir, logger, mocked_statsd):
"""Test Depot not available because it is not available on the command-line .
Verify that a zipped txt file can be imported.
"""
fn = 'gsma_simple_extraction_anonymized.txt'
abs_fn = path.join(path.abspath(path.dirname(__file__) + '/unittest_data/gsma'), fn)
zip_name = path.join(str(tmpdir), path.split(fn)[1][:-3] + 'zip')
with zipfile.ZipFile(zip_name, mode='w') as zf:
zf.write(abs_fn, arcname=path.split(fn)[1])
with get_importer(GSMADataImporter,
db_conn,
metadata_db_conn,
mocked_config.db_config,
tmpdir,
logger,
mocked_statsd,
GSMADataParams(filename=zip_name,
extract=True)) as imp:
imp.import_data()
def test_cli_gsma_importer(postgres, db_conn, tmpdir, mocked_config, logger):
"""Test Depot not available yet.
Verify that the CLI import command for GSMA is working properly.
"""
here = path.abspath(path.dirname(__file__))
data_dir = path.join(here, 'unittest_data/gsma')
valid_csv_gsma_data_file_name = 'sample_gsma_import_list_anonymized.txt'
valid_csv_gsma_data_file = path.join(data_dir, valid_csv_gsma_data_file_name)
# create a zip file inside a temp dir
valid_zip_gsma_data_file_path = str(tmpdir.join('sample_gsma_import_list_anonymized.zip'))
with zipfile.ZipFile(valid_zip_gsma_data_file_path, 'w') as valid_csv_operator_data_file_zfile:
# zipfile write() method supports an extra argument (arcname) which is
# the archive name to be stored in the zip file.
valid_csv_operator_data_file_zfile.write(valid_csv_gsma_data_file,
valid_csv_gsma_data_file_name)
# Run dirbs-report using db args from the temp postgres instance
runner = CliRunner() # noqa
result = runner.invoke(dirbs_import_cli, ['gsma_tac', valid_zip_gsma_data_file_path],
obj={'APP_CONFIG': mocked_config})
with db_conn.cursor() as cursor:
cursor.execute('SELECT tac FROM gsma_data ORDER BY tac')
result_list = [res.tac for res in cursor]
assert result.exit_code == 0
assert result_list == ['01234401', '01234402', '01234403', '01234404', '01234405', '01234406', '01234407']
def test_row_count_stats(postgres, db_conn, tmpdir, mocked_config, logger):
"""Test Depot not available yet.
Verify output stats for CLI import command.
"""
# Part 1) populate gsma_table and verify before import row count
# Part 2) import gsma file containing one duplicate (same row) and verify that staging_row_count value includes
# duplicates and import_table_new_row_count doesn't.
with db_conn, db_conn.cursor() as cursor:
cursor.execute("""INSERT INTO historic_gsma_data(tac,
manufacturer,
bands,
allocation_date,
model_name,
device_type,
start_date,
end_date)
VALUES('01234410',
'AManufacturer',
'd3bdf1170bf4b026e6e29b15a0d66a5ca83f1944',
NOW(),
'AMODEL',
'Handheld',
NOW(),
NULL),
('01234411',
'AManufacturer',
'd3bdf1170bf4b026e6e29b15a0d66a5ca83f1944',
NOW(),
'AMODEL',
'Handheld',
NOW(),
NULL)
""")
assert cursor.rowcount == 2
cursor.execute("""REFRESH MATERIALIZED VIEW CONCURRENTLY gsma_data""")
here = path.abspath(path.dirname(__file__))
data_dir = path.join(here, 'unittest_data/gsma')
valid_csv_gsma_data_file_name = 'sample_gsma_import_list_dupl_anonymized.txt'
valid_csv_gsma_data_file = path.join(data_dir, valid_csv_gsma_data_file_name)
# create a zip file inside a temp dir
valid_zip_gsma_data_file_path = str(tmpdir.join('sample_gsma_import_list_dupl_anonymized.zip'))
with zipfile.ZipFile(valid_zip_gsma_data_file_path, 'w') as valid_csv_operator_data_file_zfile:
# zipfile write() method supports an extra argument (arcname) which is
# the archive name to be stored in the zip file.
valid_csv_operator_data_file_zfile.write(valid_csv_gsma_data_file,
valid_csv_gsma_data_file_name)
runner = CliRunner()
result = runner.invoke(dirbs_import_cli, ['gsma_tac', valid_zip_gsma_data_file_path],
obj={'APP_CONFIG': mocked_config})
with db_conn, db_conn.cursor() as cursor:
cursor.execute('SELECT tac FROM gsma_data ORDER BY tac')
result_list = [res.tac for res in cursor]
assert result.exit_code == 0
assert result_list == ['01234401', '01234402', '01234403', '01234404', '01234405', '01234406', '01234407']
assert len(result_list) == 7
# Test Part 1)
assert 'Rows in table prior to import: 2' in logger_stream_contents(logger)
# Test Part 2) - self.staging_row_count
assert 'Rows supplied in full input file: 8' in logger_stream_contents(logger)
# Test Part 2) - import_table_new_row_count=rows_before + rows_inserted - rows_deleted
assert 'Rows in table after import: 7' in logger_stream_contents(logger)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_white_spaces.txt')],
indirect=True)
def test_preprocess_trim(gsma_tac_db_importer, logger, db_conn):
"""Test Depot ID not known yet.
Verify that a white space at the start or the end of a field can be handled and imported successfully.
The expected behaviour is that the white space is stripped out.
"""
expect_success(gsma_tac_db_importer, 2, db_conn, logger)
with db_conn, db_conn.cursor() as cursor:
cursor.execute('SELECT tac, manufacturer, bands, model_name FROM gsma_data ORDER BY tac')
result = [(x.tac, x.manufacturer, x.bands, x.model_name) for x in cursor.fetchall()]
assert result == [('21782434', None, 'a0a0db6e9eccb4a8c3a85452b79db6c793398d6a',
'927824c30540c400f59b6c02aeb0a30d5033eb1a'),
('38245933', '326d9e7920b30b698f189a83d2be6f4384496ebc',
'6cc923523f 690fe51b51efc747451bfbbe1994d9',
'cff96c002766bde09400d9030ad2d055e62b7a45')]
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_doublequotes.txt')],
indirect=True)
def test_preprocess_quoted(gsma_tac_db_importer, logger, db_conn):
"""Test Depot ID 96571/2.
Verify that a double quote at the start of a field can be handled and imported successfully.
The expected behaviour is that the double quote is stripped out.
"""
expect_success(gsma_tac_db_importer, 3, db_conn, logger)
with db_conn, db_conn.cursor() as cursor:
cursor.execute("""SELECT optional_fields->'marketing_name' AS mn
FROM gsma_data
WHERE tac = \'38245933\'""") # noqa Q444
assert cursor.fetchone().mn == 'Test Marketing Name'
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_escaped_quotes.txt')],
indirect=True)
def test_preprocess_escaped_quotes(gsma_tac_db_importer, logger, db_conn):
"""Test Depot ID 96767/2.
Verify that it if a field is enclosed in double quote, these are simply stripped out.
"""
expect_success(gsma_tac_db_importer, 1, db_conn, logger)
with db_conn, db_conn.cursor() as cursor:
cursor.execute("""SELECT optional_fields->'marketing_name' AS mn FROM gsma_data ORDER BY tac""") # noqa Q444
assert cursor.fetchone().mn == 'A Marketing name'
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_uneven_columns.txt')],
indirect=True)
def test_preprocess_uneven_rows(gsma_tac_db_importer, logger):
"""Test Depot ID 96695/18.
Verify that the gsma data file is rejected and not
imported if inconsistent number of fields per row.
"""
expect_failure(gsma_tac_db_importer, exc_message='Inconsistent number of fields per row')
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_missing_headers_july_2016.txt')],
indirect=True)
def test_preprocess_missing_headers(gsma_tac_db_importer, logger):
"""Test Depot ID 96573/4.
Verify that the gsma data file is rejected and not imported if a header column is missing.
Test file with no extra fields and missing headers.
"""
expect_failure(gsma_tac_db_importer, exc_message='Missing mandatory field')
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_small_july_2016.txt')],
indirect=True)
def test_preprocess_no_extra(gsma_tac_db_importer, logger, db_conn):
"""Test Depot ID 96670/10.
Verify that data import of GSMA Data is successful
when no extra fields are added to the data.
"""
expect_success(gsma_tac_db_importer, 3, db_conn, logger)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_extracolumns_July_2016.txt')],
indirect=True)
def test_preprocess_extra(db_conn, gsma_tac_db_importer):
"""Test Depot ID 96581/12.
Verify that data import of GSMA Data is
successful when the extra fields are added to the data.
"""
gsma_tac_db_importer.import_data()
with db_conn, db_conn.cursor() as cursor:
cursor.execute("""SELECT optional_fields->'marketing_name' AS mn FROM gsma_data ORDER BY tac""") # noqa Q444
assert cursor.fetchone().mn == 'test'
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_small_july_2016.txt')],
indirect=True)
def test_repeat_data_upload(gsma_tac_db_importer, mocked_config, logger, mocked_statsd, db_conn,
metadata_db_conn, tmpdir):
"""Test Depot ID 96579/10.
Verify that valid GSMA Data can be successfully imported into the database
when repeating the import of the same file.
"""
expect_success(gsma_tac_db_importer, 3, db_conn, logger)
with get_importer(GSMADataImporter,
db_conn,
metadata_db_conn,
mocked_config.db_config,
tmpdir,
logger,
mocked_statsd,
GSMADataParams(filename='gsma_dump_small_july_2016.txt')) as imp:
expect_success(imp, 3, db_conn, logger)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(
filename='gsma_dump_duplicaterecord_2016_large.txt')],
indirect=True)
def test_duplicate_tac_count(gsma_tac_db_importer):
"""Test duplicates."""
# gsma entries: 38245933(2 entries), 38245932(4 entries), 38245931(2 entries)
# expect the duplicates to be 1+3+1=5
expect_failure(gsma_tac_db_importer, exc_message='Conflicting rows check failed '
'(5 rows with same primary key and conflicting data)')
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_partialduplicaterecord_2016.txt')],
indirect=True)
def test_duplicate_tac_mismatch(gsma_tac_db_importer):
"""Test Depot ID not known yet.
Verify that partial duplicate entries(Same TAC and at least 1 identical column value) in
another row is marked as duplicate and is not imported into the DB.
"""
expect_failure(gsma_tac_db_importer, exc_message='Conflicting rows check failed '
'(1 rows with same primary key and conflicting data)')
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_BlankTac_july_2016.txt')],
indirect=True)
def test_invalid_column_data_one(gsma_tac_db_importer, logger, db_conn, tmpdir):
"""Test Depot ID 96570/10.
Verify that GSMA data is pre-checked for invalid column specific information
and is not inserted into the DB.
"""
expect_failure(gsma_tac_db_importer,
exc_message='length(8) fails for line: 1, column: TAC, value: "BlankTac123"\\nFAIL')
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_InvalidTac_july_2016.txt')],
indirect=True)
def test_invalid_column_data_two(gsma_tac_db_importer, logger, db_conn, tmpdir):
"""Test Depot ID 96570/10.
Verify that GSMA data is pre-checked for invalid column specific information
and is not inserted into the DB.
"""
expect_failure(gsma_tac_db_importer,
exc_message='length(8) fails for line: 1, column: TAC, value: "9113177"\\nFAIL')
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_InvalidDate_july_2016.txt')],
indirect=True)
def test_invalid_column_data_three(gsma_tac_db_importer, logger, db_conn, tmpdir):
"""Test Depot ID 96570/10.
Verify that GSMA data is pre-checked for invalid column specific information
and is not inserted into the DB.
"""
expect_failure(gsma_tac_db_importer,
exc_message='fails for line: 1, column: Allocation_Date, value: "2011-23-Sep"\\nFAIL')
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_emptynontac_july_2016.txt')],
indirect=True)
def test_empty_values(gsma_tac_db_importer, logger, db_conn):
"""Test Depot ID 96576/7.
Verify that GSMA Data with null non-TAC values successfully passes
validation and is imported into the database.
"""
expect_success(gsma_tac_db_importer, 5, db_conn, logger)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(
filename='gsma_dump_doublequotes.txt')],
indirect=True)
def test_dubious_quoting(gsma_tac_db_importer, logger, db_conn):
"""Test Depot ID 96571/2.
Verify that a file containing a row with a single double quote is successfully imported.
The following test does not conform to RFC-4180, but is accepted
by both the validator, Postgres and Python CSV. We keep this test
to ensure it keeps passing.
"""
expect_success(gsma_tac_db_importer, 3, db_conn, logger)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(
filename='gsma_start_field_mandatory_quote.txt')],
indirect=True)
def test_start_mandatory_field_quote(gsma_tac_db_importer, logger, db_conn):
"""Test Depot ID 96768/2.
Verify that GSMA data can be successfully be imported if
mandatory fields start with quotes.
"""
expect_success(gsma_tac_db_importer, 1, db_conn, logger)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_commadelimited_2016.txt')],
indirect=True)
def test_incorrect_delimiter(gsma_tac_db_importer, logger):
"""Test Depot ID 96572/3.
Verify that the GSMA data file is rejected and not
imported if it is not "|" delimited.
"""
expect_failure(gsma_tac_db_importer, exc_message='Missing mandatory field')
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_mixedcases_2016.txt')],
indirect=True)
def test_headers_mixed_cases(gsma_tac_db_importer, db_conn, logger):
"""Test Depot ID 96574/5.
Verify that the GSMA data file is accepted and imported if
the headers have mixed cases.
"""
expect_success(gsma_tac_db_importer, 3, db_conn, logger)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_malformeddate_2016.txt')],
indirect=True)
def test_malformed_date(gsma_tac_db_importer, db_conn, logger):
"""Test Depot ID 96575/6.
Verify that the GSMA data file is rejected if
the file contains a malformed date.
"""
expect_failure(gsma_tac_db_importer,
exc_message='fails for line: 3, column: Allocation_Date, value: "Sep-23-2011"\\nFAIL')
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_emptynontac_july_2016.txt')],
indirect=True)
def test_null_non_tac_entries(gsma_tac_db_importer, db_conn, logger):
"""Test Depot ID 96576/7.
Verify that GSMA Data with null non-TAC values successfully
passes validation and is imported into the database.
"""
expect_success(gsma_tac_db_importer, 5, db_conn, logger)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_requiredfields_reordered_july_2016.txt')],
indirect=True)
def test_reordered_columns(gsma_tac_db_importer, db_conn, logger):
"""Test Depot ID 96580/11.
Verify that Data Import of GSMA Data is
successful when the columns are re-ordered.
"""
expect_success(gsma_tac_db_importer, 3, db_conn, logger)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_extracolumns_July_2016.txt')],
indirect=True)
def test_data_new_columns(gsma_tac_db_importer, db_conn, logger):
"""Test Depot ID 96581/12.
Verify that data import of GSMA Data is
successful when the new columns are added to the data.
"""
expect_success(gsma_tac_db_importer, 3, db_conn, logger)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_large_july_2016.txt',
extract=False)],
indirect=True)
def test_historical_check_fails(gsma_tac_db_importer, mocked_config, logger, mocked_statsd, db_conn,
metadata_db_conn, tmpdir):
"""Test Depot ID 96582/13.
Verify that data is not imported if the historical check fails.
"""
expect_success(gsma_tac_db_importer, 24727, db_conn, logger)
# Try a small import
with get_importer(GSMADataImporter,
db_conn,
metadata_db_conn,
mocked_config.db_config,
tmpdir,
logger,
mocked_statsd,
GSMADataParams(filename='gsma_dump_small_july_2016.txt',
import_size_variation_percent=mocked_config.gsma_threshold_config.
import_size_variation_percent,
import_size_variation_absolute=mocked_config.gsma_threshold_config.
import_size_variation_absolute,
extract=False)) as gsma_small_importer:
expect_failure(gsma_small_importer, exc_message='Failed import size historic check')
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_small_july_2016.txt',
extract=False)],
indirect=True)
def test_historical_check_passes(gsma_tac_db_importer, mocked_config, logger, mocked_statsd, db_conn,
metadata_db_conn, tmpdir):
"""Test Depot ID 96583/14.
Verify that data is successfully imported if the historical check passes.
"""
expect_success(gsma_tac_db_importer, 3, db_conn, logger)
with get_importer(GSMADataImporter,
db_conn,
metadata_db_conn,
mocked_config.db_config,
tmpdir,
logger,
mocked_statsd,
GSMADataParams(filename='gsma_dump_large_july_2016.txt',
import_size_variation_percent=mocked_config.gsma_threshold_config.
import_size_variation_percent,
import_size_variation_absolute=mocked_config.gsma_threshold_config.
import_size_variation_absolute,
extract=False)) as imp:
expect_success(imp, 24727, db_conn, logger)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_rat_computation_check.txt')],
indirect=True)
def test_rat_bitmask_computation(gsma_tac_db_importer, logger, db_conn):
"""Test Depot ID unknown.
Verify that the RAT bitmask is computed corrected based on band capability.
"""
expect_success(gsma_tac_db_importer, 9, db_conn, logger)
with db_conn.cursor() as cursor:
# Test GSM only model
cursor.execute('SELECT rat_bitmask FROM gsma_data WHERE tac = \'01132222\'')
result = cursor.fetchone()
assert result[0] == int('00000000000000000000000001000000', 2)
# Test LTE only model
cursor.execute('SELECT rat_bitmask FROM gsma_data WHERE tac = \'41233333\'')
result = cursor.fetchone()
assert result[0] == int('00000000000000000001000000000000', 2)
# Test GSM + WCDMA model
cursor.execute('SELECT rat_bitmask FROM gsma_data WHERE tac = \'41255555\'')
result = cursor.fetchone()
assert result[0] == int('00000000000000000000001001000000', 2)
# Test WCDMA only model
cursor.execute('SELECT rat_bitmask FROM gsma_data WHERE tac = \'41266666\'')
result = cursor.fetchone()
assert result[0] == int('00000000000000000000001000000000', 2)
# Test GSM + WCDMA + LTE model
cursor.execute('SELECT rat_bitmask FROM gsma_data WHERE tac = \'41277777\'')
result = cursor.fetchone()
assert result[0] == int('00000000000000000001001001000000', 2)
# Test GSM + LTE model
cursor.execute('SELECT rat_bitmask FROM gsma_data WHERE tac = \'41288888\'')
result = cursor.fetchone()
assert result[0] == int('00000000000000000001000001000000', 2)
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_large_july_2016.txt',
extract=False)],
indirect=True)
def test_validation_check_override(gsma_tac_db_importer, mocked_config, logger, mocked_statsd, db_conn,
metadata_db_conn, tmpdir):
"""Test Depot ID 96586/17.
Verify that the user can override historical checks when importing GSMA Data files.
"""
expect_success(gsma_tac_db_importer, 24727, db_conn, logger)
with get_importer(GSMADataImporter,
db_conn,
metadata_db_conn,
mocked_config.db_config,
tmpdir,
logger,
mocked_statsd,
GSMADataParams(filename='gsma_dump_small_july_2016.txt',
perform_historic_check=False,
extract=False)) as imp:
expect_success(imp, 3, db_conn, logger)
def test_historic_threshold_config_cli(postgres, db_conn, tmpdir, mocked_config, monkeypatch):
"""Test Depot not available yet. Verify that historic treeshold is configurable by yaml."""
monkeypatch.setattr(mocked_config.gsma_threshold_config, 'import_size_variation_absolute', 0.3, raising=False)
monkeypatch.setattr(mocked_config.gsma_threshold_config, 'import_size_variation_percent', 0.3, raising=False)
here = path.abspath(path.dirname(__file__))
data_dir = path.join(here, 'unittest_data/gsma')
valid_csv_gsma_data_file_name = 'sample_gsma_import_list_anonymized.txt'
valid_csv_gsma_data_file = path.join(data_dir, valid_csv_gsma_data_file_name)
# create a zip file inside a temp dir
valid_zip_gsma_data_file_path = str(tmpdir.join('sample_gsma_import_list_anonymized.zip'))
with zipfile.ZipFile(valid_zip_gsma_data_file_path, 'w') as valid_csv_operator_data_file_zfile:
# zipfile write() method supports an extra argument (arcname) which is
# the archive name to be stored in the zip file.
valid_csv_operator_data_file_zfile.write(valid_csv_gsma_data_file,
valid_csv_gsma_data_file_name)
# Run dirbs-report using db args from the temp postgres instance
runner = CliRunner() # noqa
runner.invoke(dirbs_import_cli, ['gsma_tac', valid_zip_gsma_data_file_path],
obj={'APP_CONFIG': mocked_config})
with db_conn, db_conn.cursor() as cur:
cur.execute("""SELECT extra_metadata->'historic_size_variation_max_pct' AS historic_size_variation_max_pct,
extra_metadata->'historic_size_variation_max_abs' AS historic_size_variation_max_abs
FROM job_metadata
WHERE command = \'dirbs-import\'""") # noqa Q444
res = cur.fetchall()
assert len(res) == 1
res[0].historic_size_variation_max_pct == 0.3
res[0].historic_size_variation_max_abs == 0.3
@pytest.mark.parametrize('gsma_tac_db_importer',
[GSMADataParams(filename='gsma_dump_small_july_2016.txt',
extract=False)],
indirect=True)
def test_same_import_twice(gsma_tac_db_importer, mocked_config, logger, mocked_statsd, db_conn,
metadata_db_conn, tmpdir):
"""Test Depot not known yet.
Verify that if we import twice the same file, same entries are ignored and not added to the historic table.
"""
expect_success(gsma_tac_db_importer, 3, db_conn, logger)
with db_conn.cursor() as cursor:
cursor.execute('SELECT * FROM historic_gsma_data')
first_import = cursor.rowcount
with get_importer(GSMADataImporter,
db_conn,
metadata_db_conn,
mocked_config.db_config,
tmpdir,
logger,
mocked_statsd,
GSMADataParams(filename='gsma_dump_small_july_2016.txt',
extract=False)) as imp:
expect_success(imp, 3, db_conn, logger)
with db_conn.cursor() as cursor:
cursor.execute('SELECT * FROM historic_gsma_data')
second_import = cursor.rowcount
assert first_import == second_import == 3
|
[
"_importer_params.GSMADataParams",
"zipfile.ZipFile",
"_helpers.expect_failure",
"_helpers.logger_stream_contents",
"os.path.dirname",
"_helpers.expect_success",
"click.testing.CliRunner",
"os.path.split",
"os.path.join"
] |
[((3289, 3326), 'os.path.join', 'path.join', (['here', '"""unittest_data/gsma"""'], {}), "(here, 'unittest_data/gsma')\n", (3298, 3326), False, 'from os import path\n'), ((3435, 3485), 'os.path.join', 'path.join', (['data_dir', 'valid_csv_gsma_data_file_name'], {}), '(data_dir, valid_csv_gsma_data_file_name)\n', (3444, 3485), False, 'from os import path\n'), ((4098, 4109), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (4107, 4109), False, 'from click.testing import CliRunner\n'), ((6654, 6691), 'os.path.join', 'path.join', (['here', '"""unittest_data/gsma"""'], {}), "(here, 'unittest_data/gsma')\n", (6663, 6691), False, 'from os import path\n'), ((6805, 6855), 'os.path.join', 'path.join', (['data_dir', 'valid_csv_gsma_data_file_name'], {}), '(data_dir, valid_csv_gsma_data_file_name)\n', (6814, 6855), False, 'from os import path\n'), ((7402, 7413), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (7411, 7413), False, 'from click.testing import CliRunner\n'), ((8763, 8819), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(2)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 2, db_conn, logger)\n', (8777, 8819), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((9895, 9951), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(3)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 3, db_conn, logger)\n', (9909, 9951), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((10622, 10678), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(1)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 1, db_conn, logger)\n', (10636, 10678), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((11293, 11387), '_helpers.expect_failure', 'expect_failure', (['gsma_tac_db_importer'], {'exc_message': '"""Inconsistent number of fields per row"""'}), "(gsma_tac_db_importer, exc_message=\n 'Inconsistent number of fields per row')\n", (11307, 11387), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((11830, 11905), '_helpers.expect_failure', 'expect_failure', (['gsma_tac_db_importer'], {'exc_message': '"""Missing mandatory field"""'}), "(gsma_tac_db_importer, exc_message='Missing mandatory field')\n", (11844, 11905), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((12298, 12354), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(3)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 3, db_conn, logger)\n', (12312, 12354), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((13488, 13544), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(3)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 3, db_conn, logger)\n', (13502, 13544), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((14360, 14503), '_helpers.expect_failure', 'expect_failure', (['gsma_tac_db_importer'], {'exc_message': '"""Conflicting rows check failed (5 rows with same primary key and conflicting data)"""'}), "(gsma_tac_db_importer, exc_message=\n 'Conflicting rows check failed (5 rows with same primary key and conflicting data)'\n )\n", (14374, 14503), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((15007, 15150), '_helpers.expect_failure', 'expect_failure', (['gsma_tac_db_importer'], {'exc_message': '"""Conflicting rows check failed (1 rows with same primary key and conflicting data)"""'}), "(gsma_tac_db_importer, exc_message=\n 'Conflicting rows check failed (1 rows with same primary key and conflicting data)'\n )\n", (15021, 15150), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((15619, 15745), '_helpers.expect_failure', 'expect_failure', (['gsma_tac_db_importer'], {'exc_message': '"""length(8) fails for line: 1, column: TAC, value: "BlankTac123"\\\\nFAIL"""'}), '(gsma_tac_db_importer, exc_message=\n \'length(8) fails for line: 1, column: TAC, value: "BlankTac123"\\\\nFAIL\')\n', (15633, 15745), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((16184, 16306), '_helpers.expect_failure', 'expect_failure', (['gsma_tac_db_importer'], {'exc_message': '"""length(8) fails for line: 1, column: TAC, value: "9113177"\\\\nFAIL"""'}), '(gsma_tac_db_importer, exc_message=\n \'length(8) fails for line: 1, column: TAC, value: "9113177"\\\\nFAIL\')\n', (16198, 16306), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((16748, 16876), '_helpers.expect_failure', 'expect_failure', (['gsma_tac_db_importer'], {'exc_message': '"""fails for line: 1, column: Allocation_Date, value: "2011-23-Sep"\\\\nFAIL"""'}), '(gsma_tac_db_importer, exc_message=\n \'fails for line: 1, column: Allocation_Date, value: "2011-23-Sep"\\\\nFAIL\')\n', (16762, 16876), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((17299, 17355), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(5)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 5, db_conn, logger)\n', (17313, 17355), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((17931, 17987), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(3)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 3, db_conn, logger)\n', (17945, 17987), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((18422, 18478), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(1)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 1, db_conn, logger)\n', (18436, 18478), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((18858, 18933), '_helpers.expect_failure', 'expect_failure', (['gsma_tac_db_importer'], {'exc_message': '"""Missing mandatory field"""'}), "(gsma_tac_db_importer, exc_message='Missing mandatory field')\n", (18872, 18933), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((19319, 19375), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(3)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 3, db_conn, logger)\n', (19333, 19375), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((19752, 19880), '_helpers.expect_failure', 'expect_failure', (['gsma_tac_db_importer'], {'exc_message': '"""fails for line: 3, column: Allocation_Date, value: "Sep-23-2011"\\\\nFAIL"""'}), '(gsma_tac_db_importer, exc_message=\n \'fails for line: 3, column: Allocation_Date, value: "Sep-23-2011"\\\\nFAIL\')\n', (19766, 19880), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((20311, 20367), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(5)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 5, db_conn, logger)\n', (20325, 20367), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((20766, 20822), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(3)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 3, db_conn, logger)\n', (20780, 20822), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((21219, 21275), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(3)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 3, db_conn, logger)\n', (21233, 21275), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((21780, 21840), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(24727)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 24727, db_conn, logger)\n', (21794, 21840), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((23233, 23289), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(3)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 3, db_conn, logger)\n', (23247, 23289), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((24461, 24517), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(9)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 9, db_conn, logger)\n', (24475, 24517), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((26431, 26491), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(24727)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 24727, db_conn, logger)\n', (26445, 26491), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((27488, 27525), 'os.path.join', 'path.join', (['here', '"""unittest_data/gsma"""'], {}), "(here, 'unittest_data/gsma')\n", (27497, 27525), False, 'from os import path\n'), ((27634, 27684), 'os.path.join', 'path.join', (['data_dir', 'valid_csv_gsma_data_file_name'], {}), '(data_dir, valid_csv_gsma_data_file_name)\n', (27643, 27684), False, 'from os import path\n'), ((28297, 28308), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (28306, 28308), False, 'from click.testing import CliRunner\n'), ((29549, 29605), '_helpers.expect_success', 'expect_success', (['gsma_tac_db_importer', '(3)', 'db_conn', 'logger'], {}), '(gsma_tac_db_importer, 3, db_conn, logger)\n', (29563, 29605), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((2539, 2574), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_name'], {'mode': '"""w"""'}), "(zip_name, mode='w')\n", (2554, 2574), False, 'import zipfile\n'), ((3250, 3272), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (3262, 3272), False, 'from os import path\n'), ((3633, 3684), 'zipfile.ZipFile', 'zipfile.ZipFile', (['valid_zip_gsma_data_file_path', '"""w"""'], {}), "(valid_zip_gsma_data_file_path, 'w')\n", (3648, 3684), False, 'import zipfile\n'), ((6615, 6637), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (6627, 6637), False, 'from os import path\n'), ((7007, 7058), 'zipfile.ZipFile', 'zipfile.ZipFile', (['valid_zip_gsma_data_file_path', '"""w"""'], {}), "(valid_zip_gsma_data_file_path, 'w')\n", (7022, 7058), False, 'import zipfile\n'), ((7973, 8003), '_helpers.logger_stream_contents', 'logger_stream_contents', (['logger'], {}), '(logger)\n', (7995, 8003), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((8101, 8131), '_helpers.logger_stream_contents', 'logger_stream_contents', (['logger'], {}), '(logger)\n', (8123, 8131), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((8270, 8300), '_helpers.logger_stream_contents', 'logger_stream_contents', (['logger'], {}), '(logger)\n', (8292, 8300), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((8378, 8431), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_white_spaces.txt"""'}), "(filename='gsma_dump_white_spaces.txt')\n", (8392, 8431), False, 'from _importer_params import GSMADataParams\n'), ((9523, 9576), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_doublequotes.txt"""'}), "(filename='gsma_dump_doublequotes.txt')\n", (9537, 9576), False, 'from _importer_params import GSMADataParams\n'), ((10321, 10371), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_escaped_quotes.txt"""'}), "(filename='gsma_escaped_quotes.txt')\n", (10335, 10371), False, 'from _importer_params import GSMADataParams\n'), ((10978, 11033), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_uneven_columns.txt"""'}), "(filename='gsma_dump_uneven_columns.txt')\n", (10992, 11033), False, 'from _importer_params import GSMADataParams\n'), ((11460, 11526), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_missing_headers_july_2016.txt"""'}), "(filename='gsma_dump_missing_headers_july_2016.txt')\n", (11474, 11526), False, 'from _importer_params import GSMADataParams\n'), ((11983, 12039), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_small_july_2016.txt"""'}), "(filename='gsma_dump_small_july_2016.txt')\n", (11997, 12039), False, 'from _importer_params import GSMADataParams\n'), ((12432, 12495), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_extracolumns_July_2016.txt"""'}), "(filename='gsma_dump_extracolumns_July_2016.txt')\n", (12446, 12495), False, 'from _importer_params import GSMADataParams\n'), ((13896, 13935), '_helpers.expect_success', 'expect_success', (['imp', '(3)', 'db_conn', 'logger'], {}), '(imp, 3, db_conn, logger)\n', (13910, 13935), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((13066, 13122), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_small_july_2016.txt"""'}), "(filename='gsma_dump_small_july_2016.txt')\n", (13080, 13122), False, 'from _importer_params import GSMADataParams\n'), ((14013, 14080), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_duplicaterecord_2016_large.txt"""'}), "(filename='gsma_dump_duplicaterecord_2016_large.txt')\n", (14027, 14080), False, 'from _importer_params import GSMADataParams\n'), ((14627, 14695), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_partialduplicaterecord_2016.txt"""'}), "(filename='gsma_dump_partialduplicaterecord_2016.txt')\n", (14641, 14695), False, 'from _importer_params import GSMADataParams\n'), ((15274, 15333), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_BlankTac_july_2016.txt"""'}), "(filename='gsma_dump_BlankTac_july_2016.txt')\n", (15288, 15333), False, 'from _importer_params import GSMADataParams\n'), ((15837, 15898), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_InvalidTac_july_2016.txt"""'}), "(filename='gsma_dump_InvalidTac_july_2016.txt')\n", (15851, 15898), False, 'from _importer_params import GSMADataParams\n'), ((16398, 16460), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_InvalidDate_july_2016.txt"""'}), "(filename='gsma_dump_InvalidDate_july_2016.txt')\n", (16412, 16460), False, 'from _importer_params import GSMADataParams\n'), ((16968, 17030), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_emptynontac_july_2016.txt"""'}), "(filename='gsma_dump_emptynontac_july_2016.txt')\n", (16982, 17030), False, 'from _importer_params import GSMADataParams\n'), ((17433, 17486), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_doublequotes.txt"""'}), "(filename='gsma_dump_doublequotes.txt')\n", (17447, 17486), False, 'from _importer_params import GSMADataParams\n'), ((18065, 18128), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_start_field_mandatory_quote.txt"""'}), "(filename='gsma_start_field_mandatory_quote.txt')\n", (18079, 18128), False, 'from _importer_params import GSMADataParams\n'), ((18556, 18616), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_commadelimited_2016.txt"""'}), "(filename='gsma_dump_commadelimited_2016.txt')\n", (18570, 18616), False, 'from _importer_params import GSMADataParams\n'), ((19011, 19067), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_mixedcases_2016.txt"""'}), "(filename='gsma_dump_mixedcases_2016.txt')\n", (19025, 19067), False, 'from _importer_params import GSMADataParams\n'), ((19453, 19512), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_malformeddate_2016.txt"""'}), "(filename='gsma_dump_malformeddate_2016.txt')\n", (19467, 19512), False, 'from _importer_params import GSMADataParams\n'), ((19972, 20034), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_emptynontac_july_2016.txt"""'}), "(filename='gsma_dump_emptynontac_july_2016.txt')\n", (19986, 20034), False, 'from _importer_params import GSMADataParams\n'), ((20445, 20520), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_requiredfields_reordered_july_2016.txt"""'}), "(filename='gsma_dump_requiredfields_reordered_july_2016.txt')\n", (20459, 20520), False, 'from _importer_params import GSMADataParams\n'), ((20900, 20963), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_extracolumns_July_2016.txt"""'}), "(filename='gsma_dump_extracolumns_July_2016.txt')\n", (20914, 20963), False, 'from _importer_params import GSMADataParams\n'), ((22632, 22721), '_helpers.expect_failure', 'expect_failure', (['gsma_small_importer'], {'exc_message': '"""Failed import size historic check"""'}), "(gsma_small_importer, exc_message=\n 'Failed import size historic check')\n", (22646, 22721), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((21353, 21424), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_large_july_2016.txt"""', 'extract': '(False)'}), "(filename='gsma_dump_large_july_2016.txt', extract=False)\n", (21367, 21424), False, 'from _importer_params import GSMADataParams\n'), ((24039, 24082), '_helpers.expect_success', 'expect_success', (['imp', '(24727)', 'db_conn', 'logger'], {}), '(imp, 24727, db_conn, logger)\n', (24053, 24082), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((22794, 22865), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_small_july_2016.txt"""', 'extract': '(False)'}), "(filename='gsma_dump_small_july_2016.txt', extract=False)\n", (22808, 22865), False, 'from _importer_params import GSMADataParams\n'), ((24160, 24222), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_rat_computation_check.txt"""'}), "(filename='gsma_dump_rat_computation_check.txt')\n", (24174, 24222), False, 'from _importer_params import GSMADataParams\n'), ((26962, 27001), '_helpers.expect_success', 'expect_success', (['imp', '(3)', 'db_conn', 'logger'], {}), '(imp, 3, db_conn, logger)\n', (26976, 27001), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((25978, 26049), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_large_july_2016.txt"""', 'extract': '(False)'}), "(filename='gsma_dump_large_july_2016.txt', extract=False)\n", (25992, 26049), False, 'from _importer_params import GSMADataParams\n'), ((27449, 27471), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (27461, 27471), False, 'from os import path\n'), ((27832, 27883), 'zipfile.ZipFile', 'zipfile.ZipFile', (['valid_zip_gsma_data_file_path', '"""w"""'], {}), "(valid_zip_gsma_data_file_path, 'w')\n", (27847, 27883), False, 'import zipfile\n'), ((30145, 30184), '_helpers.expect_success', 'expect_success', (['imp', '(3)', 'db_conn', 'logger'], {}), '(imp, 3, db_conn, logger)\n', (30159, 30184), False, 'from _helpers import get_importer, expect_success, expect_failure, logger_stream_contents\n'), ((29086, 29157), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_small_july_2016.txt"""', 'extract': '(False)'}), "(filename='gsma_dump_small_july_2016.txt', extract=False)\n", (29100, 29157), False, 'from _importer_params import GSMADataParams\n'), ((2911, 2958), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': 'zip_name', 'extract': '(True)'}), '(filename=zip_name, extract=True)\n', (2925, 2958), False, 'from _importer_params import GSMADataParams\n'), ((13822, 13878), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_small_july_2016.txt"""'}), "(filename='gsma_dump_small_july_2016.txt')\n", (13836, 13878), False, 'from _importer_params import GSMADataParams\n'), ((22144, 22429), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_small_july_2016.txt"""', 'import_size_variation_percent': 'mocked_config.gsma_threshold_config.import_size_variation_percent', 'import_size_variation_absolute': 'mocked_config.gsma_threshold_config.import_size_variation_absolute', 'extract': '(False)'}), "(filename='gsma_dump_small_july_2016.txt',\n import_size_variation_percent=mocked_config.gsma_threshold_config.\n import_size_variation_percent, import_size_variation_absolute=\n mocked_config.gsma_threshold_config.import_size_variation_absolute,\n extract=False)\n", (22158, 22429), False, 'from _importer_params import GSMADataParams\n'), ((23567, 23852), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_large_july_2016.txt"""', 'import_size_variation_percent': 'mocked_config.gsma_threshold_config.import_size_variation_percent', 'import_size_variation_absolute': 'mocked_config.gsma_threshold_config.import_size_variation_absolute', 'extract': '(False)'}), "(filename='gsma_dump_large_july_2016.txt',\n import_size_variation_percent=mocked_config.gsma_threshold_config.\n import_size_variation_percent, import_size_variation_absolute=\n mocked_config.gsma_threshold_config.import_size_variation_absolute,\n extract=False)\n", (23581, 23852), False, 'from _importer_params import GSMADataParams\n'), ((26769, 26874), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_small_july_2016.txt"""', 'perform_historic_check': '(False)', 'extract': '(False)'}), "(filename='gsma_dump_small_july_2016.txt',\n perform_historic_check=False, extract=False)\n", (26783, 26874), False, 'from _importer_params import GSMADataParams\n'), ((30019, 30090), '_importer_params.GSMADataParams', 'GSMADataParams', ([], {'filename': '"""gsma_dump_small_july_2016.txt"""', 'extract': '(False)'}), "(filename='gsma_dump_small_july_2016.txt', extract=False)\n", (30033, 30090), False, 'from _importer_params import GSMADataParams\n'), ((2407, 2429), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (2419, 2429), False, 'from os import path\n'), ((2498, 2512), 'os.path.split', 'path.split', (['fn'], {}), '(fn)\n', (2508, 2512), False, 'from os import path\n'), ((2615, 2629), 'os.path.split', 'path.split', (['fn'], {}), '(fn)\n', (2625, 2629), False, 'from os import path\n')]
|
from os.path import exists
from sys import argv
from GTFSProcessor import Routes
EXACT_ARGS_NUM = 3
USAGE_STR = '''Usage:
routes_at_stop.py [database-Path] [stop_id]'''
if __name__ == '__main__':
if len(argv) != EXACT_ARGS_NUM:
print("Invalid arguments. %s" % USAGE_STR)
exit()
if not argv[1].endswith('.sqlite'):
print("1st Argument must be a .sqlite database. %s"
% USAGE_STR)
exit()
if not exists(argv[1]):
print("Database %s not found." % argv[1])
exit()
if Routes.check_if_stop_exists(argv[1],argv[2]):
route_set = Routes.get_route_ids_passing_through_stop(argv[1], argv[2])
print('Stop ID : %s' % argv[0])
stop_name = Routes.get_stop_name(argv[1], argv[2])
print('Stop Name : %s' % argv[0])
if len(route_set) == 0:
print('No routes found stopping at ID %d' % argv[2])
else:
print('Routes Stopping:')
for route_id in route_set:
route_short_name = Routes.get_route_short_name(
argv[1], route_id)
route_long_name = Routes.get_route_long_name(
argv[1], route_id)
earliest_dep = Routes.get_earliest_service_for_stop_on_trip(
argv[1], route_id, argv[2])
latest_dep = Routes.get_latest_service_for_stop_on_trip(
argv[1], route_id, argv[2])
print("%s - %s (earliest %s; latest %s)" % (route_short_name,
route_long_name, earliest_dep, latest_dep))
else:
print('Stop %s not found in database.' % argv[2])
|
[
"GTFSProcessor.Routes.get_route_long_name",
"GTFSProcessor.Routes.get_latest_service_for_stop_on_trip",
"GTFSProcessor.Routes.get_stop_name",
"GTFSProcessor.Routes.get_route_short_name",
"os.path.exists",
"GTFSProcessor.Routes.get_earliest_service_for_stop_on_trip",
"GTFSProcessor.Routes.check_if_stop_exists",
"GTFSProcessor.Routes.get_route_ids_passing_through_stop"
] |
[((617, 662), 'GTFSProcessor.Routes.check_if_stop_exists', 'Routes.check_if_stop_exists', (['argv[1]', 'argv[2]'], {}), '(argv[1], argv[2])\n', (644, 662), False, 'from GTFSProcessor import Routes\n'), ((528, 543), 'os.path.exists', 'exists', (['argv[1]'], {}), '(argv[1])\n', (534, 543), False, 'from os.path import exists\n'), ((683, 742), 'GTFSProcessor.Routes.get_route_ids_passing_through_stop', 'Routes.get_route_ids_passing_through_stop', (['argv[1]', 'argv[2]'], {}), '(argv[1], argv[2])\n', (724, 742), False, 'from GTFSProcessor import Routes\n'), ((803, 841), 'GTFSProcessor.Routes.get_stop_name', 'Routes.get_stop_name', (['argv[1]', 'argv[2]'], {}), '(argv[1], argv[2])\n', (823, 841), False, 'from GTFSProcessor import Routes\n'), ((1107, 1153), 'GTFSProcessor.Routes.get_route_short_name', 'Routes.get_route_short_name', (['argv[1]', 'route_id'], {}), '(argv[1], route_id)\n', (1134, 1153), False, 'from GTFSProcessor import Routes\n'), ((1213, 1258), 'GTFSProcessor.Routes.get_route_long_name', 'Routes.get_route_long_name', (['argv[1]', 'route_id'], {}), '(argv[1], route_id)\n', (1239, 1258), False, 'from GTFSProcessor import Routes\n'), ((1315, 1387), 'GTFSProcessor.Routes.get_earliest_service_for_stop_on_trip', 'Routes.get_earliest_service_for_stop_on_trip', (['argv[1]', 'route_id', 'argv[2]'], {}), '(argv[1], route_id, argv[2])\n', (1359, 1387), False, 'from GTFSProcessor import Routes\n'), ((1442, 1512), 'GTFSProcessor.Routes.get_latest_service_for_stop_on_trip', 'Routes.get_latest_service_for_stop_on_trip', (['argv[1]', 'route_id', 'argv[2]'], {}), '(argv[1], route_id, argv[2])\n', (1484, 1512), False, 'from GTFSProcessor import Routes\n')]
|
import streamlit as st
import plotly.express as px
import math
from datetime import datetime
from pyspark.sql import types
schema = types.StructType([
types.StructField('id', types.IntegerType()),
types.StructField('reputation', types.IntegerType()),
types.StructField('display_name', types.StringType()),
types.StructField('location', types.StringType()),
types.StructField('last_access_date', types.TimestampType()),
types.StructField('title', types.StringType()),
types.StructField('tags', types.ArrayType(types.StringType())),
types.StructField('answered', types.IntegerType()),
])
def form_tags(x):
tags = []
for i in x.split(','):
if(i != ''):
tags.append(i.strip())
return tags
def app(args, spark, model):
data = args[0]
prob = args[1]
score = args[2]
st.header('Will the user answer')
option = st.selectbox(
'What is the user id?',
tuple(data['id'].unique()))
user_id = option.item()
reputation = st.text_input("Please enter reputation")
if(reputation != ''):
reputation = int(reputation)
else:
reputation = 0
tag = st.text_input("Please enter tags")
tags = form_tags(tag)
st.write(tags)
title = st.text_input('Please enter the question title').strip()
'''
creating the user tuple to input to the model
'''
user = (user_id, reputation, 'test-name', '', datetime.strptime('2021-12-03 22:03:04', '%Y-%m-%d %H:%M:%S'), title, tags, 1)
input_data = spark.createDataFrame([user], schema)
predictions = model.transform(input_data).select('prediction').first()['prediction']
probability_to_answer = prob[(prob['id'] == user_id) & prob['tag'].isin(tags)]['percent'].mean()
'''
Adding a weighted probability based on the model and history of answering activity of the particular user. There is a higher
weight for the activity of the user as the model tends to overfit.
'''
weighted_prediction = 0.4 * predictions
weighted_prob = 0
if(not math.isnan(probability_to_answer)):
weighted_prob = 0.6 * (probability_to_answer / 100)
if(title != ''):
st.write('Prediction : {}'.format(round(weighted_prediction + weighted_prob)))
with st.expander('Classification confusion Matrix'):
fig = px.density_heatmap(data, x="answered", y="prediction")#, height=400)
st.plotly_chart(fig)
st.write('Accuracy: {}'.format(score))
with st.expander("View data"):
st.dataframe(data)
|
[
"math.isnan",
"streamlit.text_input",
"streamlit.plotly_chart",
"streamlit.dataframe",
"pyspark.sql.types.StringType",
"streamlit.header",
"streamlit.expander",
"streamlit.write",
"plotly.express.density_heatmap",
"datetime.datetime.strptime",
"pyspark.sql.types.TimestampType",
"pyspark.sql.types.IntegerType"
] |
[((845, 878), 'streamlit.header', 'st.header', (['"""Will the user answer"""'], {}), "('Will the user answer')\n", (854, 878), True, 'import streamlit as st\n'), ((1019, 1059), 'streamlit.text_input', 'st.text_input', (['"""Please enter reputation"""'], {}), "('Please enter reputation')\n", (1032, 1059), True, 'import streamlit as st\n'), ((1166, 1200), 'streamlit.text_input', 'st.text_input', (['"""Please enter tags"""'], {}), "('Please enter tags')\n", (1179, 1200), True, 'import streamlit as st\n'), ((1231, 1245), 'streamlit.write', 'st.write', (['tags'], {}), '(tags)\n', (1239, 1245), True, 'import streamlit as st\n'), ((1438, 1499), 'datetime.datetime.strptime', 'datetime.strptime', (['"""2021-12-03 22:03:04"""', '"""%Y-%m-%d %H:%M:%S"""'], {}), "('2021-12-03 22:03:04', '%Y-%m-%d %H:%M:%S')\n", (1455, 1499), False, 'from datetime import datetime\n'), ((2074, 2107), 'math.isnan', 'math.isnan', (['probability_to_answer'], {}), '(probability_to_answer)\n', (2084, 2107), False, 'import math\n'), ((2288, 2334), 'streamlit.expander', 'st.expander', (['"""Classification confusion Matrix"""'], {}), "('Classification confusion Matrix')\n", (2299, 2334), True, 'import streamlit as st\n'), ((2350, 2404), 'plotly.express.density_heatmap', 'px.density_heatmap', (['data'], {'x': '"""answered"""', 'y': '"""prediction"""'}), "(data, x='answered', y='prediction')\n", (2368, 2404), True, 'import plotly.express as px\n'), ((2427, 2447), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {}), '(fig)\n', (2442, 2447), True, 'import streamlit as st\n'), ((2504, 2528), 'streamlit.expander', 'st.expander', (['"""View data"""'], {}), "('View data')\n", (2515, 2528), True, 'import streamlit as st\n'), ((2538, 2556), 'streamlit.dataframe', 'st.dataframe', (['data'], {}), '(data)\n', (2550, 2556), True, 'import streamlit as st\n'), ((181, 200), 'pyspark.sql.types.IntegerType', 'types.IntegerType', ([], {}), '()\n', (198, 200), False, 'from pyspark.sql import types\n'), ((239, 258), 'pyspark.sql.types.IntegerType', 'types.IntegerType', ([], {}), '()\n', (256, 258), False, 'from pyspark.sql import types\n'), ((299, 317), 'pyspark.sql.types.StringType', 'types.StringType', ([], {}), '()\n', (315, 317), False, 'from pyspark.sql import types\n'), ((354, 372), 'pyspark.sql.types.StringType', 'types.StringType', ([], {}), '()\n', (370, 372), False, 'from pyspark.sql import types\n'), ((417, 438), 'pyspark.sql.types.TimestampType', 'types.TimestampType', ([], {}), '()\n', (436, 438), False, 'from pyspark.sql import types\n'), ((472, 490), 'pyspark.sql.types.StringType', 'types.StringType', ([], {}), '()\n', (488, 490), False, 'from pyspark.sql import types\n'), ((595, 614), 'pyspark.sql.types.IntegerType', 'types.IntegerType', ([], {}), '()\n', (612, 614), False, 'from pyspark.sql import types\n'), ((1258, 1306), 'streamlit.text_input', 'st.text_input', (['"""Please enter the question title"""'], {}), "('Please enter the question title')\n", (1271, 1306), True, 'import streamlit as st\n'), ((539, 557), 'pyspark.sql.types.StringType', 'types.StringType', ([], {}), '()\n', (555, 557), False, 'from pyspark.sql import types\n')]
|
"""
Data feeding function for train and test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
# from src.data_utils import Dataset
from data_utils import Dataset
def input_fn(training, params):
"""
Simple input_fn for our 3D U-Net estimator, handling train and test data
preparation.
Args:
training (bool): Whether we are training or testing.
params (dict): Params for setting up the data. Expected keys are:
max_scans (int): Maximum number of scans we see in any patient.
train_img_size (int): Width and height of resized training images.
batch_size (int): Number of of patient in each batch for training.
num_classes (int): Number of mutually exclusive output classes.
train_dataset_path (str): Path to pickled
:class:`src.data_utils.Dataset` object.
test_dataset_path (str): Path to pickled
:class:`src.data_utils.Dataset` object.
Returns:
:class:`tf.dataset.Dataset`: An instantiated Dataset object.
"""
print('os.path.abspath(__file__) in input_fn.py: ', os.path.abspath(__file__))
package_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# for training we use a batch number and pad each 3D scan to have equal
# depth, width and height have already been set to 128 in preprocessing
max_s = params['max_scans']
w = h = params['train_img_size']
if training:
dataset = Dataset.load_dataset(
os.path.join(package_root, params['train_dataset_path'])
).create_tf_dataset().shuffle(
# we have 70 train examples, this will provide good shuffling
buffer_size=70
).repeat().padded_batch(
batch_size=params['batch_size'],
padded_shapes=(
[max_s, w, h, 1], [max_s, w, h, params['num_classes']]
)
)
# for testing we use the unscaled images with their original dims,
# we still pad the depth dimension to max_s though
else:
# predicting a resized dataset, i.e. all have same width height?
resized = 'resized' in params['test_dataset_path']
dataset = Dataset.load_dataset(
os.path.join(package_root, params['test_dataset_path'])
).create_tf_dataset(
resized=resized
).padded_batch(
# we have different sized test scans so we need batch 1
batch_size=1,
padded_shapes=(
[max_s, None, None, 1],
[max_s, None, None, params['num_classes']]
)
)
# Commented by NJ https://stackoverflow.com/questions/60665717/module-tensorflow-core-api-v2-data-has-no-attribute-iterator
# tf.compat.v1.disable_eager_execution()
print(tf.__version__)
# The GitHub which I am trying to convert is pretty old
# iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
iterator = tf.compat.v1.data.Iterator.from_structure(tf.compat.v1.data.get_output_types(dataset),tf.compat.v1.data.get_output_shapes(dataset))
dataset_init_op = iterator.make_initializer(dataset)
# Modified by NJ
# tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, dataset_init_op)
tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.TABLE_INITIALIZERS, dataset_init_op)
next_element = iterator.get_next()
# extremely hack way of getting tf.estimator to return labels at pred time
# see https://github.com/tensorflow/tensorflow/issues/17824
features = {'x': next_element[0], 'y': next_element[1]}
return features, next_element[1]
|
[
"os.path.abspath",
"tensorflow.compat.v1.add_to_collection",
"tensorflow.compat.v1.data.get_output_shapes",
"tensorflow.compat.v1.data.get_output_types",
"os.path.join"
] |
[((3387, 3481), 'tensorflow.compat.v1.add_to_collection', 'tf.compat.v1.add_to_collection', (['tf.compat.v1.GraphKeys.TABLE_INITIALIZERS', 'dataset_init_op'], {}), '(tf.compat.v1.GraphKeys.TABLE_INITIALIZERS,\n dataset_init_op)\n', (3417, 3481), True, 'import tensorflow as tf\n'), ((1221, 1246), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1236, 1246), False, 'import os\n'), ((3136, 3179), 'tensorflow.compat.v1.data.get_output_types', 'tf.compat.v1.data.get_output_types', (['dataset'], {}), '(dataset)\n', (3170, 3179), True, 'import tensorflow as tf\n'), ((3180, 3224), 'tensorflow.compat.v1.data.get_output_shapes', 'tf.compat.v1.data.get_output_shapes', (['dataset'], {}), '(dataset)\n', (3215, 3224), True, 'import tensorflow as tf\n'), ((1299, 1324), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1314, 1324), False, 'import os\n'), ((2342, 2397), 'os.path.join', 'os.path.join', (['package_root', "params['test_dataset_path']"], {}), "(package_root, params['test_dataset_path'])\n", (2354, 2397), False, 'import os\n'), ((1622, 1678), 'os.path.join', 'os.path.join', (['package_root', "params['train_dataset_path']"], {}), "(package_root, params['train_dataset_path'])\n", (1634, 1678), False, 'import os\n')]
|
import re
import inspect
from werkzeug.wrappers import Response as BaseResponse
from flask.views import View as FlaskView
from ..utils import json
from flask import request, current_app, session
from werkzeug.datastructures import Headers
from ..core.exc import ImproperlyConfigured
from ..http import exc, status, Payload, Response
from ..utils.decorators import cached_property, export
from ..http.status import is_http_status_code
from ..helpers import uzi
from .. import helpers
from .options import ViewOptions as BaseViewOptions, viewoption
__all__ = [
'View',
]
def has_own_attr(obj, name):
return name in obj.__dict__
http_method_funcs = frozenset(['get', 'post', 'head', 'options', 'delete', 'put', 'trace', 'patch'])
def declared_http_methods(cls):
""" declared
Returns a list of methods that can be routed to.
This a monkey patch for flask_classy.get_interesting_members.
to allow definition of non-routable methods.
"""
for name, fn in inspect.getmembers(cls, predicate=inspect.isfunction):
if name in http_method_funcs and is_instance_method(fn) and not inspect.ismethod(fn):
yield name
# for name, fn in ((n, getattr(cls, n)) for n in dir(cls)):
# if inspect.ismethod(fn) and name in http_method_funcs:
# yield name
def is_instance_method(method):
if inspect.isfunction(method):
argspec = inspect.getfullargspec(method)
args = argspec[0]
return args and args[0] == 'self'
return False
@export
class ViewType(type):
def __new__(mcls, name, bases, dct):
dct.setdefault('endpoint', None)
cls = super(ViewType, mcls).__new__(mcls, name, bases, dct)
if 'methods' not in dct:
methods = set(cls.methods or [])
for key in dct:
if key in http_method_funcs:
methods.add(key.upper())
# If we have no method at all in there we don't want to
# add a method list. (This is for instance the case for
# the base class or another subclass of a base method view
# that does not introduce new methods).
if methods:
cls.methods = list(sorted(methods))
decorators = []
for c in cls.mro():
if hasattr(c, 'decorators') and isinstance(c.decorators, (list, tuple)):
for d in reversed(c.decorators):
if d not in decorators:
decorators.append(d)
cls.decorators = decorators
cls.declared_methods = set(declared_http_methods(cls))
cls._meta = cls._create_options()
cls._meta._prepare()
return cls
def _create_options(cls):
opts_cls = cls._get_options_cls()
meta = getattr(cls, 'Meta', None)
base = getattr(cls, '_meta', None)
return opts_cls(cls, meta, base)
def _get_options_cls(cls):
bases = []
for c in cls.mro():
oc = getattr(cls, 'OPTIONS_CLASS', None)
if oc and not list(filter(lambda x: issubclass(x, oc), bases)):
bases.append(oc)
return type('%sOptions' % cls.__name__, tuple(bases), {})
@export
class ViewOptions(BaseViewOptions):
declared_methods = viewoption(lambda o,*a: set(declared_http_methods(o.view)))
@viewoption(default=Void)
def methods(self, value, base_value=None):
"""List of declared Http methods.
"""
if value is Void:
value = set(base_value or [])
for key, val in self.view.__dict__.items():
if key in http_method_funcs and is_instance_method(val):
value.add(key.upper())
# If we have no method at all in there we don't want to
# add a method list. (This is for instance the case for
# the base class or another subclass of a base method view
# that does not introduce new methods).
return value and list(sorted(value)) or None
@viewoption
def decorators(self, value, bv=None):
value = list(reversed(value or ()))
for c in self.view.mro():
if c is self.view or not isinstance(c, ViewType):
continue
for d in c._meta.decorators:
if d not in value:
value.append(d)
return value
class View(FlaskView, metaclass=ViewType):
#: A list of methods this view can handle.
OPTIONS_CLASS = ViewOptions
methods = None
decorators = ()
endpoint = None
payload_class = Payload
mimetype = None
default_status = 200
default_response_headers = None
@property
def app(self):
return current_app._get_current_object()
@property
def session(self):
return session._get_current_object()
@cached_property
def payload(self):
return self.created_payload()
@property
def headers(self):
return self.payload.headers
@classmethod
def _get_default_view(cls):
if not hasattr(cls, '_default_view'):
def view(*args, **kwargs):
self = view.view_class()
response = self(*args, **kwargs)
return response
name = cls.endpoint or uzi.snake(cls.__name__)
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# We attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
cls._default_view = view
return cls._default_view
@classmethod
def as_view(cls, name=None, *class_args, **class_kwargs):
"""Converts the class into an actual view function that can be used
with the routing system. Internally this generates a function on the
fly which will instantiate the :class:`View` on each request and call
the :meth:`dispatch_request` method on it.
The arguments passed to :meth:`as_view` are forwarded to the
constructor of the class.
"""
if name is None:
if class_args or class_kwargs:
raise TypeError(
'View name is required when class_args or class_kwargs '
'are provided was not set on view. %s.')
else:
return cls._get_default_view()
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
response = self(*args, **kwargs)
return response
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# We attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
return view
def created_payload(self):
cls = self.payload_class
return cls(
status=self.default_status,
mimetype=self.mimetype,
headers=self.default_response_headers,
data=self.create_payload_data_store(),
errors=self.create_payload_error_store(),
context=self.get_payload_context()
)
def create_payload_data_store(self):
return None
def create_payload_error_store(self):
return None
def get_payload_context(self):
return {}
def abort(self, status, *args, **kwargs):
if status and not is_http_status_code(status):
raise ValueError('%s is not a valid HTTP status code.' % (status,))
kwargs.setdefault('payload', self.payload)
return exc.abort(status, *args, **kwargs)
def dispatch(self):
return self.build_response()
def build_response(self):
return self.payload.to_response()
def http_method_not_allowed(self, *args, **kwargs):
"""
If `request.method` does not correspond to a handler method,
determine what kind of exception to raise.
"""
return self.abort(status.HTTP_405_METHOD_NOT_ALLOWED)
def initial(self, request, *args, **kwargs):
"""Runs anything that needs to occur prior to calling the method handler.
"""
# Ensure that the incoming request is permitted
# self.perform_authentication(request)
# self.check_permissions(request)
# self.check_throttles(request)
pass
def finalize_response(self, request, response, *args, **kwargs):
"""
Returns the final response object.
"""
# Make the error obvious if a proper response is not returned
assert isinstance(response, BaseResponse), (
'Expected a `Response` object '
'to be returned from the view, but received a `%s`'
% type(response)
)
return response
def handle_exception(self, e):
"""
Handle any exception that occurs, by returning an appropriate response,
or re-raising the error.
"""
# if isinstance(exc, (exc.NotAuthenticated, exc.AuthenticationFailed)):
# # WWW-Authenticate header for 401 responses, else coerce to 403
# auth_header = self.get_authenticate_header(self.request)
# if auth_header:
# exc.auth_header = auth_header
# else:
# exc.status_code = status.HTTP_403_FORBIDDEN
self.raise_uncaught_exception(e)
def raise_uncaught_exception(self, e):
raise
# Note: Views are made CSRF exempt from within `as_view` as to prevent
# accidental removal of this exemption in cases where `handle` needs to
# be overridden.
def __call__(self, *args, **kwargs):
"""
`.dispatch()` is pretty much the same as Django's regular dispatch,
but with extra hooks for startup, finalize, and exception handling.
"""
self.args = args
self.kwargs = kwargs
self.request = request
try:
self.initial(request, *args, **kwargs)
# Get the appropriate handler method
if request.method.lower() in self.declared_methods:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
response = handler(*args, **kwargs)
if response is None:
response = self.dispatch()
except Exception as e:
response = self.handle_exception(e)
return self.finalize_response(request, response, *args, **kwargs)
|
[
"inspect.ismethod",
"inspect.getfullargspec",
"flask.request.method.lower",
"flask.current_app._get_current_object",
"inspect.isfunction",
"flask.session._get_current_object",
"inspect.getmembers"
] |
[((967, 1020), 'inspect.getmembers', 'inspect.getmembers', (['cls'], {'predicate': 'inspect.isfunction'}), '(cls, predicate=inspect.isfunction)\n', (985, 1020), False, 'import inspect\n'), ((1298, 1324), 'inspect.isfunction', 'inspect.isfunction', (['method'], {}), '(method)\n', (1316, 1324), False, 'import inspect\n'), ((1338, 1368), 'inspect.getfullargspec', 'inspect.getfullargspec', (['method'], {}), '(method)\n', (1360, 1368), False, 'import inspect\n'), ((4117, 4150), 'flask.current_app._get_current_object', 'current_app._get_current_object', ([], {}), '()\n', (4148, 4150), False, 'from flask import request, current_app, session\n'), ((4192, 4221), 'flask.session._get_current_object', 'session._get_current_object', ([], {}), '()\n', (4219, 4221), False, 'from flask import request, current_app, session\n'), ((1088, 1108), 'inspect.ismethod', 'inspect.ismethod', (['fn'], {}), '(fn)\n', (1104, 1108), False, 'import inspect\n'), ((9475, 9497), 'flask.request.method.lower', 'request.method.lower', ([], {}), '()\n', (9495, 9497), False, 'from flask import request, current_app, session\n'), ((9552, 9574), 'flask.request.method.lower', 'request.method.lower', ([], {}), '()\n', (9572, 9574), False, 'from flask import request, current_app, session\n')]
|
# -*- coding:utf-8 -*-
"""
股票技术指标接口
Created on 2018/05/26
@author: <NAME>
@group : **
@contact: <EMAIL>
"""
def ma(data, n=10, val_name="close"):
import numpy as np
'''
移动平均线 Moving Average
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
移动平均线时长,时间单位根据data决定
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
list
移动平均线
'''
values = []
MA = []
for index, row in data.iterrows():
values.append(row[val_name])
if len(values) == n:
del values[0]
MA.append(np.average(values))
return np.asarray(MA)
def md(data, n=10, val_name="close"):
import numpy as np
'''
移动标准差
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
移动平均线时长,时间单位根据data决定
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
list
移动平均线
'''
values = []
MD = []
for index, row in data.iterrows():
values.append(row[val_name])
if len(values) == n:
del values[0]
MD.append(np.std(values))
return np.asarray(MD)
def _get_day_ema(prices, n):
a = 1 - 2 / (n + 1)
day_ema = 0
for index, price in enumerate(reversed(prices)):
day_ema += a ** index * price
return day_ema
def ema(data, n=12, val_name="close"):
import numpy as np
'''
指数平均数指标 Exponential Moving Average
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
移动平均线时长,时间单位根据data决定
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
EMA:numpy.ndarray<numpy.float64>
指数平均数指标
'''
prices = []
EMA = []
for index, row in data.iterrows():
if index == 0:
past_ema = row[val_name]
EMA.append(row[val_name])
else:
# Y=[2*X+(N-1)*Y’]/(N+1)
today_ema = (2 * row[val_name] + (n - 1) * past_ema) / (n + 1)
past_ema = today_ema
EMA.append(today_ema)
return np.asarray(EMA)
def macd(data, quick_n=12, slow_n=26, dem_n=9, val_name="close"):
import numpy as np
'''
指数平滑异同平均线(MACD: Moving Average Convergence Divergence)
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
quick_n:int
DIFF差离值中快速移动天数
slow_n:int
DIFF差离值中慢速移动天数
dem_n:int
DEM讯号线的移动天数
val_name:string
计算哪一列的列名,默认为 close 收盘值
return
-------
OSC:numpy.ndarray<numpy.float64>
MACD bar / OSC 差值柱形图 DIFF - DEM
DIFF:numpy.ndarray<numpy.float64>
差离值
DEM:numpy.ndarray<numpy.float64>
讯号线
'''
ema_quick = np.asarray(ema(data, quick_n, val_name))
ema_slow = np.asarray(ema(data, slow_n, val_name))
DIFF = ema_quick - ema_slow
data["diff"] = DIFF
DEM = ema(data, dem_n, "diff")
OSC = DIFF - DEM
return OSC, DIFF, DEM
def kdj(data):
import numpy as np
'''
随机指标KDJ
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
K:numpy.ndarray<numpy.float64>
K线
D:numpy.ndarray<numpy.float64>
D线
J:numpy.ndarray<numpy.float64>
J线
'''
K, D, J = [], [], []
last_k, last_d = None, None
for index, row in data.iterrows():
if last_k is None or last_d is None:
last_k = 50
last_d = 50
c, l, h = row["close"], row["low"], row["high"]
rsv = (c - l) / (h - l) * 100
k = (2 / 3) * last_k + (1 / 3) * rsv
d = (2 / 3) * last_d + (1 / 3) * k
j = 3 * k - 2 * d
K.append(k)
D.append(d)
J.append(j)
last_k, last_d = k, d
return np.asarray(K), np.asarray(D), np.asarray(J)
def rsi(data, n=6, val_name="close"):
import numpy as np
'''
相对强弱指标RSI
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,时间单位根据data决定
return
-------
RSI:numpy.ndarray<numpy.float64>
RSI线
'''
RSI = []
UP = []
DOWN = []
for index, row in data.iterrows():
if index == 0:
past_value = row[val_name]
RSI.append(0)
else:
diff = row[val_name] - past_value
if diff > 0:
UP.append(diff)
DOWN.append(0)
else:
UP.append(0)
DOWN.append(diff)
if len(UP) == n:
del UP[0]
if len(DOWN) == n:
del DOWN[0]
past_value = row[val_name]
rsi = np.sum(UP) / (-np.sum(DOWN) + np.sum(UP)) * 100
RSI.append(rsi)
return np.asarray(RSI)
def boll(data, n=10, val_name="close", k=2):
'''
布林线指标BOLL
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,时间单位根据data决定
return
-------
BOLL:numpy.ndarray<numpy.float64>
中轨线
UPPER:numpy.ndarray<numpy.float64>
D线
J:numpy.ndarray<numpy.float64>
J线
'''
BOLL = ma(data, n, val_name)
MD = md(data, n, val_name)
UPPER = BOLL + k * MD
LOWER = BOLL - k * MD
return BOLL, UPPER, LOWER
def wnr(data, n=14):
'''
威廉指标 w&r
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,时间单位根据data决定
return
-------
WNR:numpy.ndarray<numpy.float64>
威廉指标
'''
high_prices = []
low_prices = []
WNR = []
for index, row in data.iterrows():
high_prices.append(row["high"])
if len(high_prices) == n:
del high_prices[0]
low_prices.append(row["low"])
if len(low_prices) == n:
del low_prices[0]
highest = max(high_prices)
lowest = min(low_prices)
wnr = (highest - row["close"]) / (highest - lowest) * 100
WNR.append(wnr)
return WNR
def _get_any_ma(arr, n):
import numpy as np
MA = []
values = []
for val in arr:
values.append(val)
if len(values) == n:
del values[0]
MA.append(np.average(values))
return np.asarray(MA)
def dmi(data, n=14, m=14, k=6):
import numpy as np
'''
动向指标或趋向指标 DMI
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
+-DI(n): DI统计时长,默认14
m:int
ADX(m): ADX统计时常参数,默认14
k:int
ADXR(k): ADXR统计k个周期前数据,默认6
return
-------
P_DI:numpy.ndarray<numpy.float64>
+DI指标
M_DI:numpy.ndarray<numpy.float64>
-DI指标
ADX:numpy.ndarray<numpy.float64>
ADX指标
ADXR:numpy.ndarray<numpy.float64>
ADXR指标
ref.
-------
https://www.mk-mode.com/octopress/2012/03/03/03002038/
'''
# 上升动向(+DM)
P_DM = [0.]
# 下降动向(-DM)
M_DM = [0.]
# 真实波幅TR
TR = [0.]
# 动向
DX = [0.]
P_DI = [0.]
M_DI = [0.]
for index, row in data.iterrows():
if index == 0:
past_row = row
else:
p_dm = row["high"] - past_row["high"]
m_dm = past_row["low"] - row["low"]
if (p_dm < 0 and m_dm < 0) or (np.isclose(p_dm, m_dm)):
p_dm = 0
m_dm = 0
if p_dm > m_dm:
m_dm = 0
if m_dm > p_dm:
p_dm = 0
P_DM.append(p_dm)
M_DM.append(m_dm)
tr = max(row["high"] - past_row["low"], row["high"] - past_row["close"], past_row["close"] - row["low"])
TR.append(tr)
if len(P_DM) == n:
del P_DM[0]
if len(M_DM) == n:
del M_DM[0]
if len(TR) == n:
del TR[0]
# 上升方向线(+DI)
p_di = (np.average(P_DM) / np.average(TR)) * 100
P_DI.append(p_di)
# 下降方向线(-DI)
m_di = (np.average(M_DM) / np.average(TR)) * 100
M_DI.append(m_di)
# 当日+DI与-DI
# p_day_di = (p_dm / tr) * 100
# m_day_di = (m_dm / tr) * 100
# 动向DX
# dx=(di dif÷di sum) ×100
# di dif为上升指标和下降指标的价差的绝对值
# di sum为上升指标和下降指标的总和
# adx就是dx的一定周期n的移动平均值。
if (p_di + m_di) == 0:
dx = 0
else:
dx = (abs(p_di - m_di) / (p_di + m_di)) * 100
DX.append(dx)
past_row = row
ADX = _get_any_ma(DX, m)
#
# # 估计数值ADXR
ADXR = []
for index, adx in enumerate(ADX):
if index >= k:
adxr = (adx + ADX[index - k]) / 2
ADXR.append(adxr)
else:
ADXR.append(0)
return P_DI, M_DI, ADX, ADXR
def bias(data, n=5):
import numpy as np
'''
乖离率 bias
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认5
return
-------
BIAS:numpy.ndarray<numpy.float64>
乖离率指标
'''
MA = ma(data, n)
CLOSES = data["close"]
BIAS = (np.true_divide((CLOSES - MA), MA)) * (100 / 100)
return BIAS
def asi(data, n=5):
import numpy as np
'''
振动升降指标 ASI
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认5
return
-------
ASI:numpy.ndarray<numpy.float64>
振动升降指标
'''
SI = []
for index, row in data.iterrows():
if index == 0:
last_row = row
SI.append(0.)
else:
a = abs(row["close"] - last_row["close"])
b = abs(row["low"] - last_row["close"])
c = abs(row["high"] - last_row["close"])
d = abs(last_row["close"] - last_row["open"])
if b > a and b > c:
r = b + (1 / 2) * a + (1 / 4) * d
elif c > a and c > b:
r = c + (1 / 4) * d
else:
r = 0
e = row["close"] - last_row["close"]
f = row["close"] - last_row["open"]
g = last_row["close"] - last_row["open"]
x = e + (1 / 2) * f + g
k = max(a, b)
l = 3
if np.isclose(r, 0) or np.isclose(l, 0):
si = 0
else:
si = 50 * (x / r) * (k / l)
SI.append(si)
ASI = _get_any_ma(SI, n)
return ASI
def vr(data, n=26):
import numpy as np
'''
Volatility Volume Ratio 成交量变异率
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认26
return
-------
VR:numpy.ndarray<numpy.float64>
成交量变异率
'''
VR = []
AV_volumes, BV_volumes, CV_volumes = [], [], []
for index, row in data.iterrows():
if row["close"] > row["open"]:
AV_volumes.append(row["volume"])
elif row["close"] < row["open"]:
BV_volumes.append(row["volume"])
else:
CV_volumes.append(row["volume"])
if len(AV_volumes) == n:
del AV_volumes[0]
if len(BV_volumes) == n:
del BV_volumes[0]
if len(CV_volumes) == n:
del CV_volumes[0]
avs = sum(AV_volumes)
bvs = sum(BV_volumes)
cvs = sum(CV_volumes)
if (bvs + (1 / 2) * cvs) != 0:
vr = (avs + (1 / 2) * cvs) / (bvs + (1 / 2) * cvs)
else:
vr = 0
VR.append(vr)
return np.asarray(VR)
def arbr(data, n=26):
import numpy as np
'''
AR 指标 BR指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认26
return
-------
AR:numpy.ndarray<numpy.float64>
AR指标
BR:numpy.ndarray<numpy.float64>
BR指标
'''
H, L, O, PC = np.array([0]), np.array([0]), np.array([0]), np.array([0])
AR, BR = np.array([0]), np.array([0])
for index, row in data.iterrows():
if index == 0:
last_row = row
else:
h = row["high"]
H = np.append(H, [h])
if len(H) == n:
H = np.delete(H, 0)
l = row["low"]
L = np.append(L, [l])
if len(L) == n:
L = np.delete(L, 0)
o = row["open"]
O = np.append(O, [o])
if len(O) == n:
O = np.delete(O, 0)
pc = last_row["close"]
PC = np.append(PC, [pc])
if len(PC) == n:
PC = np.delete(PC, 0)
ar = (np.sum(np.asarray(H) - np.asarray(O)) / sum(np.asarray(O) - np.asarray(L))) * 100
AR = np.append(AR, [ar])
br = (np.sum(np.asarray(H) - np.asarray(PC)) / sum(np.asarray(PC) - np.asarray(L))) * 100
BR = np.append(BR, [br])
last_row = row
return np.asarray(AR), np.asarray(BR)
def dpo(data, n=20, m=6):
'''
区间震荡线指标 DPO
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认20
m:int
MADPO的参数M,默认6
return
-------
DPO:numpy.ndarray<numpy.float64>
DPO指标
MADPO:numpy.ndarray<numpy.float64>
MADPO指标
'''
CLOSES = data["close"]
DPO = CLOSES - ma(data, int(n / 2 + 1))
MADPO = _get_any_ma(DPO, m)
return DPO, MADPO
def trix(data, n=12, m=20):
import numpy as np
'''
三重指数平滑平均线 TRIX
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认12
m:int
TRMA的参数M,默认20
return
-------
TRIX:numpy.ndarray<numpy.float64>
AR指标
TRMA:numpy.ndarray<numpy.float64>
BR指标
'''
CLOSES = []
TRIX = []
for index, row in data.iterrows():
CLOSES.append(row["close"])
if len(CLOSES) == n:
del CLOSES[0]
tr = np.average(CLOSES)
if index == 0:
past_tr = tr
TRIX.append(0)
else:
trix = (tr - past_tr) / past_tr * 100
TRIX.append(trix)
TRMA = _get_any_ma(TRIX, m)
return TRIX, TRMA
def bbi(data):
import numpy as np
'''
Bull And Bearlndex 多空指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
BBI:numpy.ndarray<numpy.float64>
BBI指标
'''
CS = []
BBI = []
for index, row in data.iterrows():
CS.append(row["close"])
if len(CS) < 24:
BBI.append(row["close"])
else:
bbi = np.average([np.average(CS[-3:]), np.average(CS[-6:]), np.average(CS[-12:]), np.average(CS[-24:])])
BBI.append(bbi)
return np.asarray(BBI)
def mtm(data, n=6):
import numpy as np
'''
Momentum Index 动量指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
n:int
统计时长,默认6
return
-------
MTM:numpy.ndarray<numpy.float64>
MTM动量指标
'''
MTM = []
CN = []
for index, row in data.iterrows():
if index < n - 1:
MTM.append(0.)
else:
mtm = row["close"] - CN[index - n]
MTM.append(mtm)
CN.append(row["close"])
return np.asarray(MTM)
def obv(data):
import numpy as np
'''
On Balance Volume 能量潮指标
Parameters
------
data:pandas.DataFrame
通过 get_h_data 取得的股票数据
return
-------
OBV:numpy.ndarray<numpy.float64>
OBV能量潮指标
'''
tmp = np.true_divide(((data["close"] - data["low"]) - (data["high"] - data["close"])), (data["high"] - data["low"]))
OBV = tmp * data["volume"]
return OBV
def sar(data, n=4):
raise Exception("Not implemented yet")
def plot_all(data, is_show=True, output=None):
import matplotlib.pyplot as plt
from pylab import rcParams
import numpy as np
rcParams['figure.figsize'] = 18, 50
plt.figure()
# 收盘价
plt.subplot(20, 1, 1)
plt.plot(data["date"], data["close"], label="close")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 移动平均线
plt.subplot(20, 1, 2)
MA = ma(data, n=10)
plt.plot(data["date"], MA, label="MA(n=10)")
plt.plot(data["date"], data["close"], label="CLOSE PRICE")
plt.title("MA")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 移动标准差
n = 10
plt.subplot(20, 1, 3)
MD = md(data, n)
plt.plot(data["date"], MD, label="MD(n=10)")
plt.title("MD")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 指数平均数指标
plt.subplot(20, 1, 4)
EMA = ema(data, n)
plt.plot(data["date"], EMA, label="EMA(n=12)")
plt.title("EMA")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 指数平滑异同平均线(MACD: Moving Average Convergence Divergence)
plt.subplot(20, 1, 5)
OSC, DIFF, DEM = macd(data, n)
plt.plot(data["date"], OSC, label="OSC")
plt.plot(data["date"], DIFF, label="DIFF")
plt.plot(data["date"], DEM, label="DEM")
plt.title("MACD")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 随机指标
plt.subplot(20, 1, 6)
K, D, J = kdj(data)
plt.plot(data["date"], K, label="K")
plt.plot(data["date"], D, label="D")
plt.plot(data["date"], J, label="J")
plt.title("KDJ")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 相对强弱指标
plt.subplot(20, 1, 7)
RSI6 = rsi(data, 6)
RSI12 = rsi(data, 12)
RSI24 = rsi(data, 24)
plt.plot(data["date"], RSI6, label="RSI(n=6)")
plt.plot(data["date"], RSI12, label="RSI(n=12)")
plt.plot(data["date"], RSI24, label="RSI(n=24)")
plt.title("RSI")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# BOLL 林线指标
plt.subplot(20, 1, 8)
BOLL, UPPER, LOWER = boll(data)
plt.plot(data["date"], BOLL, label="BOLL(n=10)")
plt.plot(data["date"], UPPER, label="UPPER(n=10)")
plt.plot(data["date"], LOWER, label="LOWER(n=10)")
plt.plot(data["date"], data["close"], label="CLOSE PRICE")
plt.title("BOLL")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# W&R 威廉指标
plt.subplot(20, 1, 9)
WNR = wnr(data, n=14)
plt.plot(data["date"], WNR, label="WNR(n=14)")
plt.title("WNR")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 动向或趋向指标
plt.subplot(20, 1, 10)
P_DI, M_DI, ADX, ADXR = dmi(data)
plt.plot(data["date"], P_DI, label="+DI(n=14)")
plt.plot(data["date"], M_DI, label="-DI(n=14)")
plt.plot(data["date"], ADX, label="ADX(m=14)")
plt.plot(data["date"], ADXR, label="ADXR(k=6)")
plt.title("DMI")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 乖离值
plt.subplot(20, 1, 11)
BIAS = bias(data, n=5)
plt.plot(data["date"], BIAS, label="BIAS(n=5)")
plt.title("BIAS")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 振动升降指标
plt.subplot(20, 1, 12)
ASI = asi(data, n=5)
plt.plot(data["date"], ASI, label="ASI(n=5)")
plt.title("ASI")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 振动升降指标
plt.subplot(20, 1, 13)
VR = vr(data, n=26)
plt.plot(data["date"], VR, label="VR(n=26)")
plt.title("VR")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 振动升降指标
plt.subplot(20, 1, 14)
AR, BR = arbr(data, n=26)
plt.plot(data["date"], AR, label="AR(n=26)")
plt.plot(data["date"], BR, label="BR(n=26)")
plt.title("ARBR")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 区间震荡线
plt.subplot(20, 1, 15)
DPO, MADPO = dpo(data, n=20, m=6)
plt.plot(data["date"], DPO, label="DPO(n=20)")
plt.plot(data["date"], MADPO, label="MADPO(m=6)")
plt.title("DPO")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 三重指数平滑平均线
plt.subplot(20, 1, 16)
TRIX, TRMA = trix(data, n=12, m=20)
plt.plot(data["date"], TRIX, label="DPO(n=12)")
plt.plot(data["date"], TRMA, label="MADPO(m=20)")
plt.title("TRIX")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 多空指标
plt.subplot(20, 1, 17)
BBI = bbi(data)
plt.plot(data["date"], BBI, label="BBI(3,6,12,24)")
plt.title("BBI")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 动量指标
plt.subplot(20, 1, 18)
MTM = mtm(data, n=6)
plt.plot(data["date"], MTM, label="MTM(n=6)")
plt.title("MTM")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
# 动量指标
plt.subplot(20, 1, 19)
OBV = obv(data)
plt.plot(data["date"], OBV, label="OBV")
plt.title("OBV")
plt.xlabel('date')
plt.ylabel('value')
plt.legend()
plt.xticks(rotation=90)
plt.tight_layout()
if is_show:
plt.show()
if output is not None:
plt.savefig(output)
|
[
"matplotlib.pyplot.title",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.isclose",
"matplotlib.pyplot.tight_layout",
"numpy.true_divide",
"numpy.std",
"numpy.append",
"matplotlib.pyplot.xticks",
"numpy.average",
"matplotlib.pyplot.show",
"numpy.asarray",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"numpy.delete",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((685, 699), 'numpy.asarray', 'np.asarray', (['MA'], {}), '(MA)\n', (695, 699), True, 'import numpy as np\n'), ((1257, 1271), 'numpy.asarray', 'np.asarray', (['MD'], {}), '(MD)\n', (1267, 1271), True, 'import numpy as np\n'), ((2290, 2305), 'numpy.asarray', 'np.asarray', (['EMA'], {}), '(EMA)\n', (2300, 2305), True, 'import numpy as np\n'), ((5271, 5286), 'numpy.asarray', 'np.asarray', (['RSI'], {}), '(RSI)\n', (5281, 5286), True, 'import numpy as np\n'), ((6907, 6921), 'numpy.asarray', 'np.asarray', (['MA'], {}), '(MA)\n', (6917, 6921), True, 'import numpy as np\n'), ((12531, 12545), 'numpy.asarray', 'np.asarray', (['VR'], {}), '(VR)\n', (12541, 12545), True, 'import numpy as np\n'), ((16063, 16078), 'numpy.asarray', 'np.asarray', (['BBI'], {}), '(BBI)\n', (16073, 16078), True, 'import numpy as np\n'), ((16664, 16679), 'numpy.asarray', 'np.asarray', (['MTM'], {}), '(MTM)\n', (16674, 16679), True, 'import numpy as np\n'), ((16988, 17096), 'numpy.true_divide', 'np.true_divide', (["(data['close'] - data['low'] - (data['high'] - data['close']))", "(data['high'] - data['low'])"], {}), "(data['close'] - data['low'] - (data['high'] - data['close']),\n data['high'] - data['low'])\n", (17002, 17096), True, 'import numpy as np\n'), ((17394, 17406), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17404, 17406), True, 'import matplotlib.pyplot as plt\n'), ((17421, 17442), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(1)'], {}), '(20, 1, 1)\n', (17432, 17442), True, 'import matplotlib.pyplot as plt\n'), ((17447, 17499), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", "data['close']"], {'label': '"""close"""'}), "(data['date'], data['close'], label='close')\n", (17455, 17499), True, 'import matplotlib.pyplot as plt\n'), ((17504, 17522), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (17514, 17522), True, 'import matplotlib.pyplot as plt\n'), ((17527, 17546), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (17537, 17546), True, 'import matplotlib.pyplot as plt\n'), ((17551, 17563), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (17561, 17563), True, 'import matplotlib.pyplot as plt\n'), ((17568, 17591), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (17578, 17591), True, 'import matplotlib.pyplot as plt\n'), ((17609, 17630), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(2)'], {}), '(20, 1, 2)\n', (17620, 17630), True, 'import matplotlib.pyplot as plt\n'), ((17659, 17703), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'MA'], {'label': '"""MA(n=10)"""'}), "(data['date'], MA, label='MA(n=10)')\n", (17667, 17703), True, 'import matplotlib.pyplot as plt\n'), ((17708, 17766), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", "data['close']"], {'label': '"""CLOSE PRICE"""'}), "(data['date'], data['close'], label='CLOSE PRICE')\n", (17716, 17766), True, 'import matplotlib.pyplot as plt\n'), ((17771, 17786), 'matplotlib.pyplot.title', 'plt.title', (['"""MA"""'], {}), "('MA')\n", (17780, 17786), True, 'import matplotlib.pyplot as plt\n'), ((17791, 17809), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (17801, 17809), True, 'import matplotlib.pyplot as plt\n'), ((17814, 17833), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (17824, 17833), True, 'import matplotlib.pyplot as plt\n'), ((17838, 17850), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (17848, 17850), True, 'import matplotlib.pyplot as plt\n'), ((17855, 17878), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (17865, 17878), True, 'import matplotlib.pyplot as plt\n'), ((17907, 17928), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(3)'], {}), '(20, 1, 3)\n', (17918, 17928), True, 'import matplotlib.pyplot as plt\n'), ((17954, 17998), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'MD'], {'label': '"""MD(n=10)"""'}), "(data['date'], MD, label='MD(n=10)')\n", (17962, 17998), True, 'import matplotlib.pyplot as plt\n'), ((18003, 18018), 'matplotlib.pyplot.title', 'plt.title', (['"""MD"""'], {}), "('MD')\n", (18012, 18018), True, 'import matplotlib.pyplot as plt\n'), ((18023, 18041), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (18033, 18041), True, 'import matplotlib.pyplot as plt\n'), ((18046, 18065), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (18056, 18065), True, 'import matplotlib.pyplot as plt\n'), ((18070, 18082), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18080, 18082), True, 'import matplotlib.pyplot as plt\n'), ((18087, 18110), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (18097, 18110), True, 'import matplotlib.pyplot as plt\n'), ((18130, 18151), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(4)'], {}), '(20, 1, 4)\n', (18141, 18151), True, 'import matplotlib.pyplot as plt\n'), ((18179, 18225), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'EMA'], {'label': '"""EMA(n=12)"""'}), "(data['date'], EMA, label='EMA(n=12)')\n", (18187, 18225), True, 'import matplotlib.pyplot as plt\n'), ((18230, 18246), 'matplotlib.pyplot.title', 'plt.title', (['"""EMA"""'], {}), "('EMA')\n", (18239, 18246), True, 'import matplotlib.pyplot as plt\n'), ((18251, 18269), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (18261, 18269), True, 'import matplotlib.pyplot as plt\n'), ((18274, 18293), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (18284, 18293), True, 'import matplotlib.pyplot as plt\n'), ((18298, 18310), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18308, 18310), True, 'import matplotlib.pyplot as plt\n'), ((18315, 18338), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (18325, 18338), True, 'import matplotlib.pyplot as plt\n'), ((18405, 18426), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(5)'], {}), '(20, 1, 5)\n', (18416, 18426), True, 'import matplotlib.pyplot as plt\n'), ((18466, 18506), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'OSC'], {'label': '"""OSC"""'}), "(data['date'], OSC, label='OSC')\n", (18474, 18506), True, 'import matplotlib.pyplot as plt\n'), ((18511, 18553), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'DIFF'], {'label': '"""DIFF"""'}), "(data['date'], DIFF, label='DIFF')\n", (18519, 18553), True, 'import matplotlib.pyplot as plt\n'), ((18558, 18598), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'DEM'], {'label': '"""DEM"""'}), "(data['date'], DEM, label='DEM')\n", (18566, 18598), True, 'import matplotlib.pyplot as plt\n'), ((18603, 18620), 'matplotlib.pyplot.title', 'plt.title', (['"""MACD"""'], {}), "('MACD')\n", (18612, 18620), True, 'import matplotlib.pyplot as plt\n'), ((18625, 18643), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (18635, 18643), True, 'import matplotlib.pyplot as plt\n'), ((18648, 18667), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (18658, 18667), True, 'import matplotlib.pyplot as plt\n'), ((18672, 18684), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18682, 18684), True, 'import matplotlib.pyplot as plt\n'), ((18689, 18712), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (18699, 18712), True, 'import matplotlib.pyplot as plt\n'), ((18729, 18750), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(6)'], {}), '(20, 1, 6)\n', (18740, 18750), True, 'import matplotlib.pyplot as plt\n'), ((18779, 18815), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'K'], {'label': '"""K"""'}), "(data['date'], K, label='K')\n", (18787, 18815), True, 'import matplotlib.pyplot as plt\n'), ((18820, 18856), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'D'], {'label': '"""D"""'}), "(data['date'], D, label='D')\n", (18828, 18856), True, 'import matplotlib.pyplot as plt\n'), ((18861, 18897), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'J'], {'label': '"""J"""'}), "(data['date'], J, label='J')\n", (18869, 18897), True, 'import matplotlib.pyplot as plt\n'), ((18902, 18918), 'matplotlib.pyplot.title', 'plt.title', (['"""KDJ"""'], {}), "('KDJ')\n", (18911, 18918), True, 'import matplotlib.pyplot as plt\n'), ((18923, 18941), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (18933, 18941), True, 'import matplotlib.pyplot as plt\n'), ((18946, 18965), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (18956, 18965), True, 'import matplotlib.pyplot as plt\n'), ((18970, 18982), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (18980, 18982), True, 'import matplotlib.pyplot as plt\n'), ((18987, 19010), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (18997, 19010), True, 'import matplotlib.pyplot as plt\n'), ((19029, 19050), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(7)'], {}), '(20, 1, 7)\n', (19040, 19050), True, 'import matplotlib.pyplot as plt\n'), ((19131, 19177), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'RSI6'], {'label': '"""RSI(n=6)"""'}), "(data['date'], RSI6, label='RSI(n=6)')\n", (19139, 19177), True, 'import matplotlib.pyplot as plt\n'), ((19182, 19230), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'RSI12'], {'label': '"""RSI(n=12)"""'}), "(data['date'], RSI12, label='RSI(n=12)')\n", (19190, 19230), True, 'import matplotlib.pyplot as plt\n'), ((19235, 19283), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'RSI24'], {'label': '"""RSI(n=24)"""'}), "(data['date'], RSI24, label='RSI(n=24)')\n", (19243, 19283), True, 'import matplotlib.pyplot as plt\n'), ((19288, 19304), 'matplotlib.pyplot.title', 'plt.title', (['"""RSI"""'], {}), "('RSI')\n", (19297, 19304), True, 'import matplotlib.pyplot as plt\n'), ((19309, 19327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (19319, 19327), True, 'import matplotlib.pyplot as plt\n'), ((19332, 19351), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (19342, 19351), True, 'import matplotlib.pyplot as plt\n'), ((19356, 19368), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19366, 19368), True, 'import matplotlib.pyplot as plt\n'), ((19373, 19396), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (19383, 19396), True, 'import matplotlib.pyplot as plt\n'), ((19418, 19439), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(8)'], {}), '(20, 1, 8)\n', (19429, 19439), True, 'import matplotlib.pyplot as plt\n'), ((19480, 19528), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'BOLL'], {'label': '"""BOLL(n=10)"""'}), "(data['date'], BOLL, label='BOLL(n=10)')\n", (19488, 19528), True, 'import matplotlib.pyplot as plt\n'), ((19533, 19583), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'UPPER'], {'label': '"""UPPER(n=10)"""'}), "(data['date'], UPPER, label='UPPER(n=10)')\n", (19541, 19583), True, 'import matplotlib.pyplot as plt\n'), ((19588, 19638), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'LOWER'], {'label': '"""LOWER(n=10)"""'}), "(data['date'], LOWER, label='LOWER(n=10)')\n", (19596, 19638), True, 'import matplotlib.pyplot as plt\n'), ((19643, 19701), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", "data['close']"], {'label': '"""CLOSE PRICE"""'}), "(data['date'], data['close'], label='CLOSE PRICE')\n", (19651, 19701), True, 'import matplotlib.pyplot as plt\n'), ((19706, 19723), 'matplotlib.pyplot.title', 'plt.title', (['"""BOLL"""'], {}), "('BOLL')\n", (19715, 19723), True, 'import matplotlib.pyplot as plt\n'), ((19728, 19746), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (19738, 19746), True, 'import matplotlib.pyplot as plt\n'), ((19751, 19770), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (19761, 19770), True, 'import matplotlib.pyplot as plt\n'), ((19775, 19787), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19785, 19787), True, 'import matplotlib.pyplot as plt\n'), ((19792, 19815), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (19802, 19815), True, 'import matplotlib.pyplot as plt\n'), ((19836, 19857), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(9)'], {}), '(20, 1, 9)\n', (19847, 19857), True, 'import matplotlib.pyplot as plt\n'), ((19888, 19934), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'WNR'], {'label': '"""WNR(n=14)"""'}), "(data['date'], WNR, label='WNR(n=14)')\n", (19896, 19934), True, 'import matplotlib.pyplot as plt\n'), ((19939, 19955), 'matplotlib.pyplot.title', 'plt.title', (['"""WNR"""'], {}), "('WNR')\n", (19948, 19955), True, 'import matplotlib.pyplot as plt\n'), ((19960, 19978), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (19970, 19978), True, 'import matplotlib.pyplot as plt\n'), ((19983, 20002), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (19993, 20002), True, 'import matplotlib.pyplot as plt\n'), ((20007, 20019), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20017, 20019), True, 'import matplotlib.pyplot as plt\n'), ((20024, 20047), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (20034, 20047), True, 'import matplotlib.pyplot as plt\n'), ((20067, 20089), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(10)'], {}), '(20, 1, 10)\n', (20078, 20089), True, 'import matplotlib.pyplot as plt\n'), ((20132, 20179), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'P_DI'], {'label': '"""+DI(n=14)"""'}), "(data['date'], P_DI, label='+DI(n=14)')\n", (20140, 20179), True, 'import matplotlib.pyplot as plt\n'), ((20184, 20231), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'M_DI'], {'label': '"""-DI(n=14)"""'}), "(data['date'], M_DI, label='-DI(n=14)')\n", (20192, 20231), True, 'import matplotlib.pyplot as plt\n'), ((20236, 20282), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'ADX'], {'label': '"""ADX(m=14)"""'}), "(data['date'], ADX, label='ADX(m=14)')\n", (20244, 20282), True, 'import matplotlib.pyplot as plt\n'), ((20287, 20334), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'ADXR'], {'label': '"""ADXR(k=6)"""'}), "(data['date'], ADXR, label='ADXR(k=6)')\n", (20295, 20334), True, 'import matplotlib.pyplot as plt\n'), ((20339, 20355), 'matplotlib.pyplot.title', 'plt.title', (['"""DMI"""'], {}), "('DMI')\n", (20348, 20355), True, 'import matplotlib.pyplot as plt\n'), ((20360, 20378), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (20370, 20378), True, 'import matplotlib.pyplot as plt\n'), ((20383, 20402), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (20393, 20402), True, 'import matplotlib.pyplot as plt\n'), ((20407, 20419), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20417, 20419), True, 'import matplotlib.pyplot as plt\n'), ((20424, 20447), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (20434, 20447), True, 'import matplotlib.pyplot as plt\n'), ((20463, 20485), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(11)'], {}), '(20, 1, 11)\n', (20474, 20485), True, 'import matplotlib.pyplot as plt\n'), ((20517, 20564), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'BIAS'], {'label': '"""BIAS(n=5)"""'}), "(data['date'], BIAS, label='BIAS(n=5)')\n", (20525, 20564), True, 'import matplotlib.pyplot as plt\n'), ((20569, 20586), 'matplotlib.pyplot.title', 'plt.title', (['"""BIAS"""'], {}), "('BIAS')\n", (20578, 20586), True, 'import matplotlib.pyplot as plt\n'), ((20591, 20609), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (20601, 20609), True, 'import matplotlib.pyplot as plt\n'), ((20614, 20633), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (20624, 20633), True, 'import matplotlib.pyplot as plt\n'), ((20638, 20650), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20648, 20650), True, 'import matplotlib.pyplot as plt\n'), ((20655, 20678), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (20665, 20678), True, 'import matplotlib.pyplot as plt\n'), ((20697, 20719), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(12)'], {}), '(20, 1, 12)\n', (20708, 20719), True, 'import matplotlib.pyplot as plt\n'), ((20749, 20794), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'ASI'], {'label': '"""ASI(n=5)"""'}), "(data['date'], ASI, label='ASI(n=5)')\n", (20757, 20794), True, 'import matplotlib.pyplot as plt\n'), ((20799, 20815), 'matplotlib.pyplot.title', 'plt.title', (['"""ASI"""'], {}), "('ASI')\n", (20808, 20815), True, 'import matplotlib.pyplot as plt\n'), ((20820, 20838), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (20830, 20838), True, 'import matplotlib.pyplot as plt\n'), ((20843, 20862), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (20853, 20862), True, 'import matplotlib.pyplot as plt\n'), ((20867, 20879), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20877, 20879), True, 'import matplotlib.pyplot as plt\n'), ((20884, 20907), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (20894, 20907), True, 'import matplotlib.pyplot as plt\n'), ((20926, 20948), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(13)'], {}), '(20, 1, 13)\n', (20937, 20948), True, 'import matplotlib.pyplot as plt\n'), ((20977, 21021), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'VR'], {'label': '"""VR(n=26)"""'}), "(data['date'], VR, label='VR(n=26)')\n", (20985, 21021), True, 'import matplotlib.pyplot as plt\n'), ((21026, 21041), 'matplotlib.pyplot.title', 'plt.title', (['"""VR"""'], {}), "('VR')\n", (21035, 21041), True, 'import matplotlib.pyplot as plt\n'), ((21046, 21064), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (21056, 21064), True, 'import matplotlib.pyplot as plt\n'), ((21069, 21088), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (21079, 21088), True, 'import matplotlib.pyplot as plt\n'), ((21093, 21105), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21103, 21105), True, 'import matplotlib.pyplot as plt\n'), ((21110, 21133), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (21120, 21133), True, 'import matplotlib.pyplot as plt\n'), ((21152, 21174), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(14)'], {}), '(20, 1, 14)\n', (21163, 21174), True, 'import matplotlib.pyplot as plt\n'), ((21209, 21253), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'AR'], {'label': '"""AR(n=26)"""'}), "(data['date'], AR, label='AR(n=26)')\n", (21217, 21253), True, 'import matplotlib.pyplot as plt\n'), ((21258, 21302), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'BR'], {'label': '"""BR(n=26)"""'}), "(data['date'], BR, label='BR(n=26)')\n", (21266, 21302), True, 'import matplotlib.pyplot as plt\n'), ((21307, 21324), 'matplotlib.pyplot.title', 'plt.title', (['"""ARBR"""'], {}), "('ARBR')\n", (21316, 21324), True, 'import matplotlib.pyplot as plt\n'), ((21329, 21347), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (21339, 21347), True, 'import matplotlib.pyplot as plt\n'), ((21352, 21371), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (21362, 21371), True, 'import matplotlib.pyplot as plt\n'), ((21376, 21388), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21386, 21388), True, 'import matplotlib.pyplot as plt\n'), ((21393, 21416), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (21403, 21416), True, 'import matplotlib.pyplot as plt\n'), ((21434, 21456), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(15)'], {}), '(20, 1, 15)\n', (21445, 21456), True, 'import matplotlib.pyplot as plt\n'), ((21499, 21545), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'DPO'], {'label': '"""DPO(n=20)"""'}), "(data['date'], DPO, label='DPO(n=20)')\n", (21507, 21545), True, 'import matplotlib.pyplot as plt\n'), ((21550, 21599), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'MADPO'], {'label': '"""MADPO(m=6)"""'}), "(data['date'], MADPO, label='MADPO(m=6)')\n", (21558, 21599), True, 'import matplotlib.pyplot as plt\n'), ((21604, 21620), 'matplotlib.pyplot.title', 'plt.title', (['"""DPO"""'], {}), "('DPO')\n", (21613, 21620), True, 'import matplotlib.pyplot as plt\n'), ((21625, 21643), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (21635, 21643), True, 'import matplotlib.pyplot as plt\n'), ((21648, 21667), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (21658, 21667), True, 'import matplotlib.pyplot as plt\n'), ((21672, 21684), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21682, 21684), True, 'import matplotlib.pyplot as plt\n'), ((21689, 21712), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (21699, 21712), True, 'import matplotlib.pyplot as plt\n'), ((21734, 21756), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(16)'], {}), '(20, 1, 16)\n', (21745, 21756), True, 'import matplotlib.pyplot as plt\n'), ((21801, 21848), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'TRIX'], {'label': '"""DPO(n=12)"""'}), "(data['date'], TRIX, label='DPO(n=12)')\n", (21809, 21848), True, 'import matplotlib.pyplot as plt\n'), ((21853, 21902), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'TRMA'], {'label': '"""MADPO(m=20)"""'}), "(data['date'], TRMA, label='MADPO(m=20)')\n", (21861, 21902), True, 'import matplotlib.pyplot as plt\n'), ((21907, 21924), 'matplotlib.pyplot.title', 'plt.title', (['"""TRIX"""'], {}), "('TRIX')\n", (21916, 21924), True, 'import matplotlib.pyplot as plt\n'), ((21929, 21947), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (21939, 21947), True, 'import matplotlib.pyplot as plt\n'), ((21952, 21971), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (21962, 21971), True, 'import matplotlib.pyplot as plt\n'), ((21976, 21988), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21986, 21988), True, 'import matplotlib.pyplot as plt\n'), ((21993, 22016), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (22003, 22016), True, 'import matplotlib.pyplot as plt\n'), ((22033, 22055), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(17)'], {}), '(20, 1, 17)\n', (22044, 22055), True, 'import matplotlib.pyplot as plt\n'), ((22080, 22131), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'BBI'], {'label': '"""BBI(3,6,12,24)"""'}), "(data['date'], BBI, label='BBI(3,6,12,24)')\n", (22088, 22131), True, 'import matplotlib.pyplot as plt\n'), ((22136, 22152), 'matplotlib.pyplot.title', 'plt.title', (['"""BBI"""'], {}), "('BBI')\n", (22145, 22152), True, 'import matplotlib.pyplot as plt\n'), ((22157, 22175), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (22167, 22175), True, 'import matplotlib.pyplot as plt\n'), ((22180, 22199), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (22190, 22199), True, 'import matplotlib.pyplot as plt\n'), ((22204, 22216), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (22214, 22216), True, 'import matplotlib.pyplot as plt\n'), ((22221, 22244), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (22231, 22244), True, 'import matplotlib.pyplot as plt\n'), ((22261, 22283), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(18)'], {}), '(20, 1, 18)\n', (22272, 22283), True, 'import matplotlib.pyplot as plt\n'), ((22313, 22358), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'MTM'], {'label': '"""MTM(n=6)"""'}), "(data['date'], MTM, label='MTM(n=6)')\n", (22321, 22358), True, 'import matplotlib.pyplot as plt\n'), ((22363, 22379), 'matplotlib.pyplot.title', 'plt.title', (['"""MTM"""'], {}), "('MTM')\n", (22372, 22379), True, 'import matplotlib.pyplot as plt\n'), ((22384, 22402), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (22394, 22402), True, 'import matplotlib.pyplot as plt\n'), ((22407, 22426), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (22417, 22426), True, 'import matplotlib.pyplot as plt\n'), ((22431, 22443), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (22441, 22443), True, 'import matplotlib.pyplot as plt\n'), ((22448, 22471), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (22458, 22471), True, 'import matplotlib.pyplot as plt\n'), ((22488, 22510), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(20)', '(1)', '(19)'], {}), '(20, 1, 19)\n', (22499, 22510), True, 'import matplotlib.pyplot as plt\n'), ((22535, 22575), 'matplotlib.pyplot.plot', 'plt.plot', (["data['date']", 'OBV'], {'label': '"""OBV"""'}), "(data['date'], OBV, label='OBV')\n", (22543, 22575), True, 'import matplotlib.pyplot as plt\n'), ((22580, 22596), 'matplotlib.pyplot.title', 'plt.title', (['"""OBV"""'], {}), "('OBV')\n", (22589, 22596), True, 'import matplotlib.pyplot as plt\n'), ((22601, 22619), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (22611, 22619), True, 'import matplotlib.pyplot as plt\n'), ((22624, 22643), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""value"""'], {}), "('value')\n", (22634, 22643), True, 'import matplotlib.pyplot as plt\n'), ((22648, 22660), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (22658, 22660), True, 'import matplotlib.pyplot as plt\n'), ((22665, 22688), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (22675, 22688), True, 'import matplotlib.pyplot as plt\n'), ((22694, 22712), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22710, 22712), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4222), 'numpy.asarray', 'np.asarray', (['K'], {}), '(K)\n', (4219, 4222), True, 'import numpy as np\n'), ((4224, 4237), 'numpy.asarray', 'np.asarray', (['D'], {}), '(D)\n', (4234, 4237), True, 'import numpy as np\n'), ((4239, 4252), 'numpy.asarray', 'np.asarray', (['J'], {}), '(J)\n', (4249, 4252), True, 'import numpy as np\n'), ((10014, 10045), 'numpy.true_divide', 'np.true_divide', (['(CLOSES - MA)', 'MA'], {}), '(CLOSES - MA, MA)\n', (10028, 10045), True, 'import numpy as np\n'), ((12952, 12965), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (12960, 12965), True, 'import numpy as np\n'), ((12967, 12980), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (12975, 12980), True, 'import numpy as np\n'), ((12982, 12995), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (12990, 12995), True, 'import numpy as np\n'), ((12997, 13010), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (13005, 13010), True, 'import numpy as np\n'), ((13025, 13038), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (13033, 13038), True, 'import numpy as np\n'), ((13040, 13053), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (13048, 13053), True, 'import numpy as np\n'), ((13993, 14007), 'numpy.asarray', 'np.asarray', (['AR'], {}), '(AR)\n', (14003, 14007), True, 'import numpy as np\n'), ((14009, 14023), 'numpy.asarray', 'np.asarray', (['BR'], {}), '(BR)\n', (14019, 14023), True, 'import numpy as np\n'), ((15192, 15210), 'numpy.average', 'np.average', (['CLOSES'], {}), '(CLOSES)\n', (15202, 15210), True, 'import numpy as np\n'), ((22738, 22748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22746, 22748), True, 'import matplotlib.pyplot as plt\n'), ((22785, 22804), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output'], {}), '(output)\n', (22796, 22804), True, 'import matplotlib.pyplot as plt\n'), ((653, 671), 'numpy.average', 'np.average', (['values'], {}), '(values)\n', (663, 671), True, 'import numpy as np\n'), ((1229, 1243), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (1235, 1243), True, 'import numpy as np\n'), ((6876, 6894), 'numpy.average', 'np.average', (['values'], {}), '(values)\n', (6886, 6894), True, 'import numpy as np\n'), ((13204, 13221), 'numpy.append', 'np.append', (['H', '[h]'], {}), '(H, [h])\n', (13213, 13221), True, 'import numpy as np\n'), ((13329, 13346), 'numpy.append', 'np.append', (['L', '[l]'], {}), '(L, [l])\n', (13338, 13346), True, 'import numpy as np\n'), ((13455, 13472), 'numpy.append', 'np.append', (['O', '[o]'], {}), '(O, [o])\n', (13464, 13472), True, 'import numpy as np\n'), ((13589, 13608), 'numpy.append', 'np.append', (['PC', '[pc]'], {}), '(PC, [pc])\n', (13598, 13608), True, 'import numpy as np\n'), ((13794, 13813), 'numpy.append', 'np.append', (['AR', '[ar]'], {}), '(AR, [ar])\n', (13803, 13813), True, 'import numpy as np\n'), ((13933, 13952), 'numpy.append', 'np.append', (['BR', '[br]'], {}), '(BR, [br])\n', (13942, 13952), True, 'import numpy as np\n'), ((8078, 8100), 'numpy.isclose', 'np.isclose', (['p_dm', 'm_dm'], {}), '(p_dm, m_dm)\n', (8088, 8100), True, 'import numpy as np\n'), ((11205, 11221), 'numpy.isclose', 'np.isclose', (['r', '(0)'], {}), '(r, 0)\n', (11215, 11221), True, 'import numpy as np\n'), ((11225, 11241), 'numpy.isclose', 'np.isclose', (['l', '(0)'], {}), '(l, 0)\n', (11235, 11241), True, 'import numpy as np\n'), ((13270, 13285), 'numpy.delete', 'np.delete', (['H', '(0)'], {}), '(H, 0)\n', (13279, 13285), True, 'import numpy as np\n'), ((13395, 13410), 'numpy.delete', 'np.delete', (['L', '(0)'], {}), '(L, 0)\n', (13404, 13410), True, 'import numpy as np\n'), ((13521, 13536), 'numpy.delete', 'np.delete', (['O', '(0)'], {}), '(O, 0)\n', (13530, 13536), True, 'import numpy as np\n'), ((13659, 13675), 'numpy.delete', 'np.delete', (['PC', '(0)'], {}), '(PC, 0)\n', (13668, 13675), True, 'import numpy as np\n'), ((5183, 5193), 'numpy.sum', 'np.sum', (['UP'], {}), '(UP)\n', (5189, 5193), True, 'import numpy as np\n'), ((8684, 8700), 'numpy.average', 'np.average', (['P_DM'], {}), '(P_DM)\n', (8694, 8700), True, 'import numpy as np\n'), ((8703, 8717), 'numpy.average', 'np.average', (['TR'], {}), '(TR)\n', (8713, 8717), True, 'import numpy as np\n'), ((8801, 8817), 'numpy.average', 'np.average', (['M_DM'], {}), '(M_DM)\n', (8811, 8817), True, 'import numpy as np\n'), ((8820, 8834), 'numpy.average', 'np.average', (['TR'], {}), '(TR)\n', (8830, 8834), True, 'import numpy as np\n'), ((15936, 15955), 'numpy.average', 'np.average', (['CS[-3:]'], {}), '(CS[-3:])\n', (15946, 15955), True, 'import numpy as np\n'), ((15957, 15976), 'numpy.average', 'np.average', (['CS[-6:]'], {}), '(CS[-6:])\n', (15967, 15976), True, 'import numpy as np\n'), ((15978, 15998), 'numpy.average', 'np.average', (['CS[-12:]'], {}), '(CS[-12:])\n', (15988, 15998), True, 'import numpy as np\n'), ((16000, 16020), 'numpy.average', 'np.average', (['CS[-24:]'], {}), '(CS[-24:])\n', (16010, 16020), True, 'import numpy as np\n'), ((5213, 5223), 'numpy.sum', 'np.sum', (['UP'], {}), '(UP)\n', (5219, 5223), True, 'import numpy as np\n'), ((5198, 5210), 'numpy.sum', 'np.sum', (['DOWN'], {}), '(DOWN)\n', (5204, 5210), True, 'import numpy as np\n'), ((13702, 13715), 'numpy.asarray', 'np.asarray', (['H'], {}), '(H)\n', (13712, 13715), True, 'import numpy as np\n'), ((13718, 13731), 'numpy.asarray', 'np.asarray', (['O'], {}), '(O)\n', (13728, 13731), True, 'import numpy as np\n'), ((13739, 13752), 'numpy.asarray', 'np.asarray', (['O'], {}), '(O)\n', (13749, 13752), True, 'import numpy as np\n'), ((13755, 13768), 'numpy.asarray', 'np.asarray', (['L'], {}), '(L)\n', (13765, 13768), True, 'import numpy as np\n'), ((13839, 13852), 'numpy.asarray', 'np.asarray', (['H'], {}), '(H)\n', (13849, 13852), True, 'import numpy as np\n'), ((13855, 13869), 'numpy.asarray', 'np.asarray', (['PC'], {}), '(PC)\n', (13865, 13869), True, 'import numpy as np\n'), ((13877, 13891), 'numpy.asarray', 'np.asarray', (['PC'], {}), '(PC)\n', (13887, 13891), True, 'import numpy as np\n'), ((13894, 13907), 'numpy.asarray', 'np.asarray', (['L'], {}), '(L)\n', (13904, 13907), True, 'import numpy as np\n')]
|
import mxnet as mx
import mxnet.ndarray as nd
import mxnet.gluon as gluon
import gluonnlp
from mxnet.io import NDArrayIter
from tqdm import tqdm
import json
import argparse
import pandas as pd
import os
import sys
import numpy as np
from gensim.corpora import Dictionary
from sklearn.metrics import recall_score, confusion_matrix
def load_data(trainFile, dct, ctx = mx.cpu(0)):
labels = []
num_lines = sum(1 for line in open(trainFile))
array = nd.ones((num_lines, SEQ_LENGTH), dtype='float32', ctx = ctx)
print("Loading data: ")
pbar = tqdm(total = num_lines)
with open(trainFile) as f:
for i, line in enumerate(f):
l = json.loads(line)
text = l['tokenized_text']
label = l['type']
labels.append(label)
array[i] = tokens_to_idx(text, dct)
pbar.update(1)
pbar.close()
return array, label_binarize(labels, ctx)
def tokens_to_idx(tokens, dct, ctx = mx.cpu(0)):
array = [dct.token2id[token] if token in dct.token2id else -1 for token in tokens]
if len(array) > SEQ_LENGTH:
array = array[0:SEQ_LENGTH]
else:
array.extend([-1 for i in range(0, SEQ_LENGTH - len(array))])
return nd.array(array, ctx = ctx)
def label_binarize(labels, ctx = mx.cpu(0)):
lab = nd.zeros(len(labels), ctx = ctx)
for i, label in enumerate(labels):
if label == 'fake':
lab[i] = 1
return lab
def recall(y, y_hat):
y = y.asnumpy()
y_hat = y_hat.asnumpy()
return recall_score(y, y_hat), confusion_matrix(y, y_hat).ravel()
class LSTM(gluon.Block):
def __init__(self, vocab_size, num_embed, num_hidden, num_layers, dropout, **kwargs):
super(LSTM, self).__init__(**kwargs)
with self.name_scope():
self.encoder = gluon.nn.Embedding(vocab_size, num_embed)
self.LSTM1 = gluon.rnn.LSTM(num_embed, num_layers, layout = 'NTC', bidirectional = True)
self.dropout = gluon.nn.Dropout(dropout)
self.fc1 = gluon.nn.Dense(1, activation='sigmoid')
def forward(self, inputs, hidden):
emb = self.encoder(inputs)
output, hidden = self.LSTM1(emb, hidden)
output = self.dropout(output)
output = self.fc1(output)
return output, hidden
def begin_state(self, *args, **kwargs):
return self.LSTM1.begin_state(*args, **kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Arguments for LSTM model')
parser.add_argument('--test', nargs='+', type=str, help = "Validation set file", required=True)
parser.add_argument('--input', type=str, help = "Input directory for the model files")
parser.add_argument('--dictFile', type=str, help = "Path to the dictionary file")
parser.add_argument('--SEQ_LENGTH', type = int, help = "Fixed size length to expand or srink text")
parser.add_argument('--EMBEDDING_DIM', type = int, help = "Size of the embedding dimention")
parser.add_argument('--HIDDEN', type = int, help = "Size of the hidden layer")
parser.add_argument('--LAYERS', type = int, help = "Number of hidden layers")
parser.add_argument('--DROPOUT', type = float, help = "Number of hidden layers")
parser.add_argument('--BATCH_SIZE', type = int, help = "Batch size")
parser.add_argument('--utils', type=str, help = "Helper directory")
parser.add_argument('--db', type=str, help = "DB name", required=True)
parser.add_argument('--collection', type=str, help = "DB collection")
parser.add_argument('--host', type=str, help = "DB host")
parser.add_argument('--port', type=int, help = "Port number of db")
args = parser.parse_args()
sys.path.append(args.utils)
from register_experiment import Register
testFiles = args.test
SEQ_LENGTH = args.SEQ_LENGTH
EMBEDDING_DIM = args.EMBEDDING_DIM
HIDDEN = args.HIDDEN
LAYERS = args.LAYERS
DROPOUT = args.DROPOUT
BATCH_SIZE = args.BATCH_SIZE
ctx = mx.gpu(1)
files = []
# r=root, d=directories, f = files
for r, d, f in os.walk(args.input):
for file in f:
files.append(os.path.join(r, file))
files.sort()
r = Register(args.host, args.port, args.db, args.collection)
print(r.getLastExperiment())
pbar = tqdm(len(testFiles))
for i, test_file in enumerate(testFiles):
dct = Dictionary.load(args.dictFile)
array, labels = load_data(test_file, dct)
acc = mx.metric.Accuracy()
accuracy = []
for j, model in enumerate(files):
recall_list = []
cfMatrix = []
net = LSTM(len(dct), EMBEDDING_DIM, HIDDEN, LAYERS, DROPOUT)
net.load_parameters(model, ctx=ctx)
hidden = net.begin_state(func=mx.nd.zeros, batch_size=BATCH_SIZE, ctx = ctx)
nd_iter = NDArrayIter(data={'data':array},
label={'softmax_label':labels},
batch_size=BATCH_SIZE)
for batch in nd_iter:
output, _ = net(batch.data[0].copyto(ctx), hidden)
pred = output > 0.5
y = batch.label[0]
acc.update(y, pred)
rec, mat = recall(y, pred)
recall_list.append(rec)
cfMatrix.append(mat)
accuracy.append(acc.get()[1])
r.addEpochs(j, {'accuracy' : acc.get()[1], 'recall' : np.mean(recall_list), 'Confusion Matrix' : list(map(int, sum(cfMatrix)))}, r.getLastExperiment() + 1, 'valid')
pbar.update(1)
pbar.close()
r.closeExperiment(r.getLastExperiment() + 1)
|
[
"mxnet.ndarray.array",
"argparse.ArgumentParser",
"os.walk",
"mxnet.gluon.nn.Dropout",
"numpy.mean",
"os.path.join",
"sys.path.append",
"json.loads",
"gensim.corpora.Dictionary.load",
"mxnet.io.NDArrayIter",
"mxnet.gpu",
"tqdm.tqdm",
"mxnet.gluon.rnn.LSTM",
"register_experiment.Register",
"mxnet.metric.Accuracy",
"sklearn.metrics.recall_score",
"mxnet.cpu",
"mxnet.ndarray.ones",
"mxnet.gluon.nn.Dense",
"sklearn.metrics.confusion_matrix",
"mxnet.gluon.nn.Embedding"
] |
[((368, 377), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (374, 377), True, 'import mxnet as mx\n'), ((450, 508), 'mxnet.ndarray.ones', 'nd.ones', (['(num_lines, SEQ_LENGTH)'], {'dtype': '"""float32"""', 'ctx': 'ctx'}), "((num_lines, SEQ_LENGTH), dtype='float32', ctx=ctx)\n", (457, 508), True, 'import mxnet.ndarray as nd\n'), ((544, 565), 'tqdm.tqdm', 'tqdm', ([], {'total': 'num_lines'}), '(total=num_lines)\n', (548, 565), False, 'from tqdm import tqdm\n'), ((878, 887), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (884, 887), True, 'import mxnet as mx\n'), ((1136, 1160), 'mxnet.ndarray.array', 'nd.array', (['array'], {'ctx': 'ctx'}), '(array, ctx=ctx)\n', (1144, 1160), True, 'import mxnet.ndarray as nd\n'), ((1197, 1206), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (1203, 1206), True, 'import mxnet as mx\n'), ((2220, 2283), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Arguments for LSTM model"""'}), "(description='Arguments for LSTM model')\n", (2243, 2283), False, 'import argparse\n'), ((3429, 3456), 'sys.path.append', 'sys.path.append', (['args.utils'], {}), '(args.utils)\n', (3444, 3456), False, 'import sys\n'), ((3696, 3705), 'mxnet.gpu', 'mx.gpu', (['(1)'], {}), '(1)\n', (3702, 3705), True, 'import mxnet as mx\n'), ((3771, 3790), 'os.walk', 'os.walk', (['args.input'], {}), '(args.input)\n', (3778, 3790), False, 'import os\n'), ((3868, 3924), 'register_experiment.Register', 'Register', (['args.host', 'args.port', 'args.db', 'args.collection'], {}), '(args.host, args.port, args.db, args.collection)\n', (3876, 3924), False, 'from register_experiment import Register\n'), ((1406, 1428), 'sklearn.metrics.recall_score', 'recall_score', (['y', 'y_hat'], {}), '(y, y_hat)\n', (1418, 1428), False, 'from sklearn.metrics import recall_score, confusion_matrix\n'), ((4036, 4066), 'gensim.corpora.Dictionary.load', 'Dictionary.load', (['args.dictFile'], {}), '(args.dictFile)\n', (4051, 4066), False, 'from gensim.corpora import Dictionary\n'), ((4119, 4139), 'mxnet.metric.Accuracy', 'mx.metric.Accuracy', ([], {}), '()\n', (4137, 4139), True, 'import mxnet as mx\n'), ((634, 650), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (644, 650), False, 'import json\n'), ((1662, 1703), 'mxnet.gluon.nn.Embedding', 'gluon.nn.Embedding', (['vocab_size', 'num_embed'], {}), '(vocab_size, num_embed)\n', (1680, 1703), True, 'import mxnet.gluon as gluon\n'), ((1720, 1791), 'mxnet.gluon.rnn.LSTM', 'gluon.rnn.LSTM', (['num_embed', 'num_layers'], {'layout': '"""NTC"""', 'bidirectional': '(True)'}), "(num_embed, num_layers, layout='NTC', bidirectional=True)\n", (1734, 1791), True, 'import mxnet.gluon as gluon\n'), ((1814, 1839), 'mxnet.gluon.nn.Dropout', 'gluon.nn.Dropout', (['dropout'], {}), '(dropout)\n', (1830, 1839), True, 'import mxnet.gluon as gluon\n'), ((1854, 1893), 'mxnet.gluon.nn.Dense', 'gluon.nn.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1868, 1893), True, 'import mxnet.gluon as gluon\n'), ((4425, 4518), 'mxnet.io.NDArrayIter', 'NDArrayIter', ([], {'data': "{'data': array}", 'label': "{'softmax_label': labels}", 'batch_size': 'BATCH_SIZE'}), "(data={'data': array}, label={'softmax_label': labels},\n batch_size=BATCH_SIZE)\n", (4436, 4518), False, 'from mxnet.io import NDArrayIter\n'), ((1430, 1456), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y', 'y_hat'], {}), '(y, y_hat)\n', (1446, 1456), False, 'from sklearn.metrics import recall_score, confusion_matrix\n'), ((3825, 3846), 'os.path.join', 'os.path.join', (['r', 'file'], {}), '(r, file)\n', (3837, 3846), False, 'import os\n'), ((4856, 4876), 'numpy.mean', 'np.mean', (['recall_list'], {}), '(recall_list)\n', (4863, 4876), True, 'import numpy as np\n')]
|
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
import tensorflow as tf
import numpy as np
import pandas as pd
import time
from numpy import genfromtxt
from scipy import stats
# In[2]:
start_time = time.time()
# In[3]:
def read_data(file_name):
df = pd.read_csv(file_name, sep='\t', header=None)
return df
# In[4]:
df = read_data('lezhin_public_dataset_training.tsv')
# In[5]:
# df.iloc[:, :20]
del df[7], df[8], df[16], df[18]
# In[6]:
df.describe()
# In[7]:
features = df.iloc[:, 1:].values
labels = df.iloc[:, :1].values
print(stats.describe(features).variance)
print(features.shape, labels.shape)
# In[8]:
rnd_indices = np.random.rand(len(features)) < 0.70
train_x = features[rnd_indices]
train_y = labels[rnd_indices]
test_x = features[~rnd_indices]
test_y = labels[~rnd_indices]
print("train row count : %d, test row count : %d" % (train_x.shape[0], test_x.shape[0]))
feature_count = train_x.shape[1]
label_count = train_y.shape[1]
print(feature_count, label_count)
# In[9]:
training_epochs = 90
learning_rate = 0.01
cost_history = np.empty(shape=[1],dtype=float)
nb_classes = 2
X = tf.placeholder(tf.float32,[None,feature_count])
Y = tf.placeholder(tf.int32,[None,label_count])
Y_one_hot = tf.one_hot(Y, nb_classes) # one hot
print("one_hot", Y_one_hot)
Y_one_hot = tf.reshape(Y_one_hot, [-1, nb_classes])
print("reshape", Y_one_hot)
# In[10]:
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.1)), tf.Variable(tf.random_normal([shape[1]]))
def make_hidden_layer(previous_h, weight, bias, p_keep_hidden, is_dropout=True):
h = tf.nn.relu(tf.matmul(previous_h, weight) + bias)
if is_dropout:
h = tf.nn.dropout(h, p_keep_hidden)
return h
def model(X, p_keep_hidden):
s_1 = feature_count + 2
s_2 = feature_count + 2
s_3 = feature_count
w_h, b = init_weights([feature_count, s_1])
w_h2, b2 = init_weights([s_1, s_2])
w_h3, b3 = init_weights([s_2, s_3])
w_o, b_o = init_weights([s_3, nb_classes])
h = make_hidden_layer(X, w_h, b, p_keep_hidden)
h2 = make_hidden_layer(h, w_h2, b2, p_keep_hidden)
h3 = make_hidden_layer(h2, w_h3, b3, p_keep_hidden, False)
return tf.matmul(h3, w_o) + b_o
# In[11]:
p_keep_hidden = tf.placeholder("float")
h0 = model(X, p_keep_hidden)
# In[12]:
# Cross entropy cost/loss
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=h0, labels=Y_one_hot))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# In[13]:
prediction = tf.argmax(h0, 1)
correct_prediction = tf.equal(prediction, tf.argmax(Y_one_hot, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# In[14]:
print(train_x.shape, train_y.shape)
print(test_x.shape, test_y.shape)
print(X.shape, Y.shape)
training_dropout_h = 0.95
batch_size = 2000
batch_length = int(train_x.shape[0] / batch_size)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(training_epochs + 1):
for batch_num in range(batch_length):
start_idx = batch_num * batch_size
end_idx = (train_x.shape[0] - 1) if batch_num == batch_length - 1 else (batch_num + 1) * batch_size
if batch_num % 200 == 0 or batch_num == batch_length - 1:
print("batch num : %d / %d, index: %d ~ %d" % (batch_num, batch_length - 1, start_idx, end_idx))
sess.run(optimizer, feed_dict={X: train_x[start_idx:end_idx], Y: train_y[start_idx:end_idx], p_keep_hidden: training_dropout_h})
loss, acc = sess.run([cost, accuracy], feed_dict={
X: train_x, Y: train_y, p_keep_hidden: training_dropout_h})
cost_history = np.append(cost_history, acc)
if step % 4 == 0:
print("Step: {:5}\tLoss: {:.3f}\tAcc: {:.2%}".format(
step, loss, acc))
# Test model and check accuracy
pre = tf.argmax(h0, 1)
test_yy = np.transpose(test_y.ravel())
print(test_yy.shape)
correct_prediction = tf.equal(pre, test_yy)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Test Accuracy:', sess.run(accuracy, feed_dict={X: test_x, p_keep_hidden: 1.0}))
# In[15]:
print(cost_history.shape)
plt.plot(range(len(cost_history)),cost_history)
plt.axis([0,training_epochs,0,1])
plt.show()
# In[16]:
sess.close()
end_time = time.time()
print("processing time : %d seconds" % (end_time - start_time,))
|
[
"pandas.read_csv",
"numpy.empty",
"tensorflow.reshape",
"tensorflow.matmul",
"scipy.stats.describe",
"tensorflow.one_hot",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.placeholder",
"tensorflow.cast",
"numpy.append",
"tensorflow.equal",
"matplotlib.pyplot.show",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.random_normal",
"tensorflow.argmax",
"matplotlib.pyplot.axis",
"time.time",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.dropout"
] |
[((255, 266), 'time.time', 'time.time', ([], {}), '()\n', (264, 266), False, 'import time\n'), ((1126, 1158), 'numpy.empty', 'np.empty', ([], {'shape': '[1]', 'dtype': 'float'}), '(shape=[1], dtype=float)\n', (1134, 1158), True, 'import numpy as np\n'), ((1178, 1227), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, feature_count]'], {}), '(tf.float32, [None, feature_count])\n', (1192, 1227), True, 'import tensorflow as tf\n'), ((1230, 1275), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, label_count]'], {}), '(tf.int32, [None, label_count])\n', (1244, 1275), True, 'import tensorflow as tf\n'), ((1286, 1311), 'tensorflow.one_hot', 'tf.one_hot', (['Y', 'nb_classes'], {}), '(Y, nb_classes)\n', (1296, 1311), True, 'import tensorflow as tf\n'), ((1363, 1402), 'tensorflow.reshape', 'tf.reshape', (['Y_one_hot', '[-1, nb_classes]'], {}), '(Y_one_hot, [-1, nb_classes])\n', (1373, 1402), True, 'import tensorflow as tf\n'), ((2322, 2345), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {}), "('float')\n", (2336, 2345), True, 'import tensorflow as tf\n'), ((2598, 2614), 'tensorflow.argmax', 'tf.argmax', (['h0', '(1)'], {}), '(h0, 1)\n', (2607, 2614), True, 'import tensorflow as tf\n'), ((4403, 4439), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, training_epochs, 0, 1]'], {}), '([0, training_epochs, 0, 1])\n', (4411, 4439), True, 'import matplotlib.pyplot as plt\n'), ((4437, 4447), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4445, 4447), True, 'import matplotlib.pyplot as plt\n'), ((4485, 4496), 'time.time', 'time.time', ([], {}), '()\n', (4494, 4496), False, 'import time\n'), ((314, 359), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'sep': '"""\t"""', 'header': 'None'}), "(file_name, sep='\\t', header=None)\n", (325, 359), True, 'import pandas as pd\n'), ((2437, 2505), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'h0', 'labels': 'Y_one_hot'}), '(logits=h0, labels=Y_one_hot)\n', (2476, 2505), True, 'import tensorflow as tf\n'), ((2657, 2680), 'tensorflow.argmax', 'tf.argmax', (['Y_one_hot', '(1)'], {}), '(Y_one_hot, 1)\n', (2666, 2680), True, 'import tensorflow as tf\n'), ((2708, 2747), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (2715, 2747), True, 'import tensorflow as tf\n'), ((2957, 2969), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2967, 2969), True, 'import tensorflow as tf\n'), ((4021, 4037), 'tensorflow.argmax', 'tf.argmax', (['h0', '(1)'], {}), '(h0, 1)\n', (4030, 4037), True, 'import tensorflow as tf\n'), ((4131, 4153), 'tensorflow.equal', 'tf.equal', (['pre', 'test_yy'], {}), '(pre, test_yy)\n', (4139, 4153), True, 'import tensorflow as tf\n'), ((610, 634), 'scipy.stats.describe', 'stats.describe', (['features'], {}), '(features)\n', (624, 634), False, 'from scipy import stats\n'), ((1742, 1773), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h', 'p_keep_hidden'], {}), '(h, p_keep_hidden)\n', (1755, 1773), True, 'import tensorflow as tf\n'), ((2268, 2286), 'tensorflow.matmul', 'tf.matmul', (['h3', 'w_o'], {}), '(h3, w_o)\n', (2277, 2286), True, 'import tensorflow as tf\n'), ((2519, 2556), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (2541, 2556), True, 'import tensorflow as tf\n'), ((2992, 3025), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3023, 3025), True, 'import tensorflow as tf\n'), ((3807, 3835), 'numpy.append', 'np.append', (['cost_history', 'acc'], {}), '(cost_history, acc)\n', (3816, 3835), True, 'import numpy as np\n'), ((4184, 4223), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (4191, 4223), True, 'import tensorflow as tf\n'), ((1492, 1527), 'tensorflow.random_normal', 'tf.random_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (1508, 1527), True, 'import tensorflow as tf\n'), ((1542, 1570), 'tensorflow.random_normal', 'tf.random_normal', (['[shape[1]]'], {}), '([shape[1]])\n', (1558, 1570), True, 'import tensorflow as tf\n'), ((1673, 1702), 'tensorflow.matmul', 'tf.matmul', (['previous_h', 'weight'], {}), '(previous_h, weight)\n', (1682, 1702), True, 'import tensorflow as tf\n')]
|
from typing import Tuple
import numpy as np
import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset
def split_tabular_normal_only_train(
df: pd.DataFrame,
y_label: str = "label",
train_ratio: float = 0.7,
val_ratio: float = 0.7,
shuffle: bool = False,
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""make dataset: train(normal), valid(normal), test(normal, abnormal)
Parameters
----------
df : pd.DataFrame
_description_
y_label : str, optional
_description_, by default 'label'
train_ratio : float, optional
_description_, by default 0.7
val_ratio : float, optional
ratio between normal in valid and normal in test, by default 0.7
shuffle : bool, optional
shuffle when split the dataset, by default False
Returns
-------
Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]
normal_train, normal_val, normal_abnormal_test
"""
normal = df.loc[df[y_label] == 0, :].reset_index(drop=True)
abnormal_test = df.loc[df[y_label] == 1, :].reset_index(drop=True)
normal_train, normal_val = train_test_split(
normal, train_size=train_ratio, shuffle=shuffle
)
normal_val, normal_test = train_test_split(
normal_val, train_size=val_ratio, shuffle=shuffle
)
normal_val = normal_val.reset_index(drop=True)
normal_abnormal_test = pd.concat([normal_test, abnormal_test], axis=0).reset_index(
drop=True
)
return normal_train, normal_val, normal_abnormal_test
class tabularDataset(Dataset):
def __init__(self, x: np.ndarray, y: np.ndarray):
"""make Dataset (type cast to torch.float32)
Parameters
----------
x : np.ndarray
_description_
y : np.ndarray
_description_
"""
super().__init__()
self.x = torch.tensor(x, dtype=torch.float32)
self.y = torch.tensor(y, dtype=torch.float32)
def __len__(self) -> int:
return self.x.shape[0]
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
return self.x[idx, :], self.y[idx]
|
[
"sklearn.model_selection.train_test_split",
"pandas.concat",
"torch.tensor"
] |
[((1184, 1249), 'sklearn.model_selection.train_test_split', 'train_test_split', (['normal'], {'train_size': 'train_ratio', 'shuffle': 'shuffle'}), '(normal, train_size=train_ratio, shuffle=shuffle)\n', (1200, 1249), False, 'from sklearn.model_selection import train_test_split\n'), ((1294, 1361), 'sklearn.model_selection.train_test_split', 'train_test_split', (['normal_val'], {'train_size': 'val_ratio', 'shuffle': 'shuffle'}), '(normal_val, train_size=val_ratio, shuffle=shuffle)\n', (1310, 1361), False, 'from sklearn.model_selection import train_test_split\n'), ((1931, 1967), 'torch.tensor', 'torch.tensor', (['x'], {'dtype': 'torch.float32'}), '(x, dtype=torch.float32)\n', (1943, 1967), False, 'import torch\n'), ((1985, 2021), 'torch.tensor', 'torch.tensor', (['y'], {'dtype': 'torch.float32'}), '(y, dtype=torch.float32)\n', (1997, 2021), False, 'import torch\n'), ((1454, 1501), 'pandas.concat', 'pd.concat', (['[normal_test, abnormal_test]'], {'axis': '(0)'}), '([normal_test, abnormal_test], axis=0)\n', (1463, 1501), True, 'import pandas as pd\n')]
|
from flask import Flask, request, redirect, url_for, render_template
from ghost_jukebox import app, basic_auth, common, conf
from ghost_jukebox.models import info, card
import math
import os
from PIL import Image, ImageDraw, ImageFont
"""
Cards
Cards are the backbone of the Ghost Jukebox. They are the analog of Records, CDs, and Tapes
Cards are PVC ID Cards, probably, that have a cover image on one side and a QR Code on the back.
A Card, much like a Record, CD, or Tape, contains the essence of Music. In this case, the QR Code
references an index in the <card> table, which in turn references something playable from Spotify
(a track, album, artist, or playlist) or an online radio station.
Specifically, the QR Code is a link in the pattern of https://<host>/s//QR#### (I hope I don't
get more than 9999 of these). The link is meant to be read by the Ghost Jukebox itself, but will
also just bring up the info page on the referenced music item if scanned by a usual QR Code Reader.
This view handles the creation and editing of cards, as well as handling calls that might be gotten
from a scan of the QR code on the back of the card.
"""
# Types (Maybe I'll add to these!)
SPOTIFY_TRACK = 0
SPOTIFY_ALBUM = 1
SPOTIFY_ARTIST = 2
SPOTIFY_PLAYLIST = 3
ONLINE_RADIO = 4
SPOTIFY_TYPES = [SPOTIFY_TRACK, SPOTIFY_ALBUM, SPOTIFY_ARTIST, SPOTIFY_PLAYLIST]
TYPE_NAMES = {
SPOTIFY_TRACK: 'track',
SPOTIFY_ALBUM: 'album',
SPOTIFY_ARTIST: 'artist',
SPOTIFY_PLAYLIST: 'playlist',
ONLINE_RADIO:'radio'
}
FONT_PATH = '/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf'
# The images we generate are meant to fit on a standard CR-80 PVC ID Card
# Which has these dimensions in portrait
CARD_WIDTH = 638
CARD_HEIGHT = 1012
PATTERN_WIDTH = CARD_WIDTH * 2 # I write on _both_ sides of the ID Card
PATTERN_HEIGHT = CARD_HEIGHT
PATTERN_MARGIN = 300 # Put this margin in between patterns on the paper
PAPER_WIDTH = 85 * 30
PAPER_HEIGHT = 110 * 30
# These are the locations of the cards spread over the paper. Each sheet of 8.5 x 11 paper can hold only 4 cards, front and back
PATTERN_LOCATIONS = [
( int((PAPER_HEIGHT - PATTERN_MARGIN)/2 - PATTERN_WIDTH), int((PAPER_WIDTH - PATTERN_MARGIN)/2 - PATTERN_HEIGHT) ),
( int((PAPER_HEIGHT + PATTERN_MARGIN)/2), int((PAPER_WIDTH - PATTERN_MARGIN)/2 - PATTERN_HEIGHT) ),
( int((PAPER_HEIGHT - PATTERN_MARGIN)/2 - PATTERN_WIDTH), int((PAPER_WIDTH + PATTERN_MARGIN)/2) ),
( int((PAPER_HEIGHT + PATTERN_MARGIN)/2), int((PAPER_WIDTH + PATTERN_MARGIN)/2) )
]
# Helper Funcs: the files pertinent to the individual cards are stored in a directory given by the code:
def static_dir(code=None, code_num=None):
if code_num:
code = card.strify(code_num)
return "cards/qr{}".format(code)
def full_dir(code=None, code_num=None):
return "/home/pi/server/ghost_jukebox/static/{}".format(static_dir(code, code_num))
@app.route('/s//QR<code>')
@basic_auth.login_required
def get_qr_info(code):
card_info = card.get_card_info(code)
if not card_info:
return 'blah'
if card_info.card_type == SPOTIFY_TRACK:
return redirect(url_for('track_info', track_id=card_info.item_id))
elif card_info.card_type == SPOTIFY_ALBUM:
return redirect(url_for('album_info', album_id=card_info.item_id))
elif card_info.card_type == SPOTIFY_ARTIST:
return redirect(url_for('artist_info', artist_id=card_info.item_id))
elif card_info.card_type == SPOTIFY_PLAYLIST:
return redirect(url_for('playlist_info', playlist_id=card_info.item_id))
elif card_info.card_type == ONLINE_RADIO:
# PENDING REDESIGN
return 'blah'
@app.route('/s//play/QR<code>')
@basic_auth.login_required
def play_qr(code):
return 'HAH!'
@app.route('/s//radio/QR<code>')
@basic_auth.login_required
def generate_radio(code):
return 'HAH'
@app.route('/s//card/edit')
@basic_auth.login_required
def edit_card_view():
return edit_card(
code = request.args.get('code'),
card_type = request.args.get('card_type'),
item_id = request.args.get('item_id'),
text = request.args.get('text')
)
def edit_card(errors=[], code=None, card_type=None, item_id=None, text=None):
if all([i is not None for i in [code, card_type, item_id, text]]):
return render_template(
'card_edit.html',
errors = errors,
editing = True,
code = code,
card_type = card_type,
item_id = item_id,
text = text,
card_img = url_for('static', filename="{}/final.jpg".format(static_dir(code))),
cache_breaker = common.random_string(5)
)
return render_template(
'card_edit.html',
errors = errors,
editing = False,
code = '',
card_type = 0,
item_id = '',
text = '',
cache_breaker = common.random_string(5)
)
@app.route('/s//QR<code>/view')
@basic_auth.login_required
def view_card(code):
info = card.get_card_info(code)
if info:
return edit_card(
code=code,
card_type=info.card_type,
item_id=info.item_id,
text=info.title
)
else:
return edit_card()
# This does the heavy lifting of actually saving a given card
@app.route('/s//card/save', methods=['GET', 'POST'])
@basic_auth.login_required
def save_card():
if request.method == 'GET':
card_type = request.args.get('card_type')
item_id = request.args.get('item_id')
text = request.args.get('text')
specific_code = request.args.get('code')
image_url = request.args.get('image_url')
elif request.method == 'POST':
card_type = request.form.get('card_type')
item_id = request.form.get('item_id')
text = request.form.get('text')
specific_code = request.form.get('code')
image_url = request.form.get('image_url')
if not card_type or not item_id or not text:
return edit_card(errors='Fully specify the form!')
code = specific_code if specific_code else card.get_next_code()
carddir = full_dir(code)
# try first to get an image upload
app.logger.info(request.files)
uploaded_file = common.save_file(request, 'image_upload', carddir)
if not uploaded_file:
# otherwise, see if the image url is specified
if image_url:
uploaded_file = common.download_image(image_url, carddir)
if not uploaded_file:
return edit_card(errors='Failed to download image. Is that the correct file?')
cover_img = Image.open(os.path.join(carddir, uploaded_file))
qr_image = get_qr_image(code)
create_card_pattern(qr_image, cover_img, code, text)
if specific_code:
card.update(card.CardInfo(code, card_type, item_id, text))
else:
card.insert(card.CardInfo(code, card_type, item_id, text))
return redirect(url_for('view_card', code=code))
"""
So: it turns out that the Raspberry Pi is pretty bad at the qr code generation (it takes foreeeevver)
I pregenerated 1000 qr codes using this code (will hopefully remember to do it again when I run out):
from qrcode.main import make, QRCode
from qrcode.image.styledpil import *
from qrcode.image.styles.colormasks import *
from qrcode.image.styles.moduledrawers import *
from qrcode.constants import ERROR_CORRECT_Q
def make_code(code):
qr = QRCode(error_correction=ERROR_CORRECT_Q)
qr.add_data("https://<HOST>/s//QR{0:04d}".format(code))
qr_image = qr.make_image(
image_factory=StyledPilImage,
module_drawer=RoundedModuleDrawer(),
color_mask=RadialGradiantColorMask(center_color = PURPLE_RGBs, edge_color = (0,0,0)),
image_path="/Users/nhawkins/Downloads/ghost2.png"
)
d = '../ghost-jukebox/ghost_jukebox/static/qr_codes/qr{0:04d}'.format(code)
os.mkdir(d)
qr_image.save("{}/qr_code.jpg".format(d))
return qr_image
for i in range(1, 1000):
make_code()
Oh, and! I wrote the code to allow QR Codes with Fancy Styles, that is in another of my repos and in a PR
to python-qrcode
"""
def get_qr_image(code):
filename = full_dir(code) + "/qr_code.jpg"
qr_image = Image.open(filename)
return qr_image
# Arrange an individual card image, like
# +----------+----------+
# | | id |
# |xxxxxxxxxx| QR x x |
# |xxcoverxxx| x xx |
# |xxxxxxxxxx| xx xx |
# |xxxxxxxxxx| x x x |
# | title | title |
# | | |
# +----------+----------+
def create_card_pattern(qr_code_img, cover_img, code, text):
card = Image.new("RGB", (PATTERN_WIDTH, PATTERN_HEIGHT), (255,255,255))
# put the cover image on the left side, taking up the full width and the proportionate height
cover_width, cover_height = cover_img.size
cover_img = cover_img.resize((CARD_WIDTH, int(cover_height * CARD_WIDTH / cover_width)), Image.LANCZOS)
cover_width, cover_height = cover_img.size
card.paste(cover_img, (0, int((CARD_HEIGHT - cover_height) / 2)))
cover_img_bottom = int((CARD_HEIGHT + cover_height) / 2)
# put the QR Code on the right side, taking up the full width and the proportionate height (but the QR code
# has margin built in, so it won't actually take the full width)
qr_code_width, qr_code_height = qr_code_img.size
qr_code_img = qr_code_img.resize((CARD_WIDTH, int(qr_code_height * CARD_WIDTH / qr_code_width)), Image.LANCZOS)
qr_code_width, qr_code_height = qr_code_img.size
qr_code_top = int((CARD_HEIGHT - qr_code_height) / 2)
qr_code_bottom = int((CARD_HEIGHT + qr_code_height) / 2)
card.paste(qr_code_img, (CARD_WIDTH, qr_code_top))
# Draw the rectangles, so that I know where to cut out
carddraw = ImageDraw.Draw(card)
carddraw.rectangle((0,0,CARD_WIDTH - 1, CARD_HEIGHT - 1), outline=(127,127,127))
carddraw.rectangle((CARD_WIDTH,0,CARD_WIDTH*2 - 1, CARD_HEIGHT - 1), outline=(127,127,127))
# Draw the text "QR####" on top of the qr code
text_box(
'QR{}'.format(code),
carddraw,
font(FONT_PATH, 16),
(CARD_WIDTH + 60, qr_code_top - 20, CARD_WIDTH - 120, 20),
horizontal_allignment = ALLIGNMENT_CENTER,
vertical_allignment = ALLIGNMENT_BOTTOM,
fill=(0,0,0)
)
# Draw the text for the given card under the Cover Image
text_box(
text,
carddraw,
font(FONT_PATH, 32),
(60, cover_img_bottom + 30, CARD_WIDTH - 120, 20),
horizontal_allignment = ALLIGNMENT_CENTER,
vertical_allignment = ALLIGNMENT_TOP,
fill=(0,0,0)
)
# Draw the text for the given card under the qr code
text_box(
text,
carddraw,
font(FONT_PATH, 32),
(CARD_WIDTH + 60, qr_code_bottom, CARD_WIDTH - 120, 20),
horizontal_allignment = ALLIGNMENT_CENTER,
vertical_allignment = ALLIGNMENT_TOP,
fill=(0,0,0)
)
# save the image!
card.save(full_dir(code) + "/final.jpg")
@app.route('/s//QRCards')
@basic_auth.login_required
def all_cards():
card_infos = card.get_all_sorted()
return render_template(
'all_cards.html',
cards=card_infos,
type_names=TYPE_NAMES
)
# This method generates a PDF for printing out multiple cards at a time
# it takes in an optional first and last GET parameter, with which to limit the cards displayed,
# and puts the cards 4-to-a-page into a PDF. The call to card.get_all_sorted ensures that
# the cards are displayed sorted primarily by type (track, album, artist, playlist, radio)
# and secondarily alphabetically by the "title"
@app.route('/s//QRCards.pdf')
@basic_auth.login_required
def make_and_view_pdf():
largest = card.get_largest_code()
first = request.args.get('first')
try:
first = min(max(int(first),1), largest - 1)
except:
first = 1
last = request.args.get('last')
try:
last = max(min(int(last), largest), 1)
except:
last = largest
app.logger.info('Making PDF of {}-{} when the largest QR Codes is {}'.format(first, last, largest))
card_infos = card.get_all_sorted(first, last)
CARDS_PER_PAGE = 4
pages = split_list(list([info.code for info in card_infos]), CARDS_PER_PAGE)
static_file = 'QRCards{}-{}.pdf'.format(first, last)
file = '/home/pi/server/ghost_jukebox/static/{}'.format(static_file)
for i, page in enumerate(pages):
make_pdf_page(file, page, i == 0)
return redirect(url_for('static', filename=static_file))
# This function just splits liszt into lists of no more than max_size length
def split_list(liszt, max_size):
return [
liszt[i*max_size : i*max_size + max_size]
for i in range(int(math.ceil(float(len(liszt))/max_size)))
]
# Arranges an individual page of cards, like:
# +-------------+
# | +--+ +--+ |
# | +b-+ +d-+ |
# | +--+ +--+ |
# | |
# | +--+ +--+ |
# | +a-+ +c-+ |
# | +--+ +--+ |
# +-------------+
def make_pdf_page(file, page, first):
image_paths = ['{}/final.jpg'.format(full_dir(code)) for code in page]
images = [Image.open(path) for path in image_paths]
pdf_page = Image.new("RGB", (PAPER_HEIGHT, PAPER_WIDTH), (255,255,255))
for i in range(len(images)):
pdf_page.paste(images[i], PATTERN_LOCATIONS[i])
pdf_page = pdf_page.transpose(Image.ROTATE_90)
pdf_page.save(
file,
resolution = 300,
title = 'QR Cards',
author = '<NAME>',
append = not first # Append this image to the PDF file if this is not the first, otherwise create the file
)
"""
The Text Box
Probably deserves its own place but I think it will only get used by this file
Oh Well
...
Draws the text <text> on the ImageDraw <image_draw> in the box (specified as a 4-ple of [x,y,width,height])
with the font <font> and the allignments as given. Passes other arguments to the ImageDraw.text function
(for example, fill is a good one to use here).
Can be used to center text horizontally and vertically, as well as right-align and bottom-allign (although it defaults to
left- and top-allignment). Nothing is done to prevent overflow, but the y and height values from the box will be used for vertical
allignment
Example usage:
img = Image.new("RGB", (300,300), (255,255,255))
img_draw = ImageDraw.Draw(img)
text_box(
"this is a text\n that respects linebreaks and will also break on spaces",
img_draw,
font("/Library/Fonts/Times New Roman Bold Italic.ttf", 16),
(20, 20, 260,260),
ALLIGNMENT_RIGHT,
ALLIGNMENT_CENTER,
fill=(255,0,255)
)
img.show()
"""
# The various allignments.
# horizontal_allignment can take ALLIGNMENT_LEFT, ALLIGNMENT_CENTER, and ALLIGNMENT_RIGHT
# verical_allignment can take ALLIGNMENT_TOP, ALLIGNMENT_CENTER, and ALLIGNMENT_BOTTOM
ALLIGNMENT_LEFT = 0
ALLIGNMENT_CENTER = 1
ALLIGNMENT_RIGHT = 2
ALLIGNMENT_TOP = 3
ALLIGNMENT_BOTTOM = 4
def text_box(text, image_draw, font, box, horizontal_allignment = ALLIGNMENT_LEFT, vertical_allignment = ALLIGNMENT_TOP, **kwargs):
x = box[0]
y = box[1]
width = box[2]
height = box[3]
lines = text.split('\n')
true_lines = []
for line in lines:
if font.getsize(line)[0] <= width:
true_lines.append(line)
else:
current_line = ''
for word in line.split(' '):
if font.getsize(current_line + word)[0] <= width:
current_line += ' ' + word
else:
true_lines.append(current_line)
current_line = word
true_lines.append(current_line)
x_offset = y_offset = 0
lineheight = font.getsize(true_lines[0])[1] * 1.2 # Give a margin of 0.2x the font height
if vertical_allignment == ALLIGNMENT_CENTER:
y = int(y + height / 2)
y_offset = - (len(true_lines) * lineheight) / 2
elif vertical_allignment == ALLIGNMENT_BOTTOM:
y = int(y + height)
y_offset = - (len(true_lines) * lineheight)
for line in true_lines:
linewidth = font.getsize(line)[0]
if horizontal_allignment == ALLIGNMENT_CENTER:
x_offset = (width - linewidth) / 2
elif horizontal_allignment == ALLIGNMENT_RIGHT:
x_offset = width - linewidth
image_draw.text(
(int(x + x_offset), int(y + y_offset)),
line,
font=font,
**kwargs
)
y_offset += lineheight
# helper function for fonts
def font(font_path, size=12):
return ImageFont.truetype(font_path, size=size, encoding="unic")
|
[
"PIL.Image.new",
"flask.request.form.get",
"ghost_jukebox.app.route",
"ghost_jukebox.models.card.get_largest_code",
"ghost_jukebox.models.card.get_all_sorted",
"flask.url_for",
"ghost_jukebox.common.random_string",
"os.path.join",
"flask.request.args.get",
"ghost_jukebox.common.save_file",
"ghost_jukebox.app.logger.info",
"ghost_jukebox.common.download_image",
"flask.render_template",
"ghost_jukebox.models.card.get_card_info",
"PIL.ImageDraw.Draw",
"ghost_jukebox.models.card.get_next_code",
"ghost_jukebox.models.card.paste",
"ghost_jukebox.models.card.CardInfo",
"PIL.Image.open",
"PIL.ImageFont.truetype",
"ghost_jukebox.models.card.strify"
] |
[((2979, 3004), 'ghost_jukebox.app.route', 'app.route', (['"""/s//QR<code>"""'], {}), "('/s//QR<code>')\n", (2988, 3004), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((3736, 3766), 'ghost_jukebox.app.route', 'app.route', (['"""/s//play/QR<code>"""'], {}), "('/s//play/QR<code>')\n", (3745, 3766), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((3833, 3864), 'ghost_jukebox.app.route', 'app.route', (['"""/s//radio/QR<code>"""'], {}), "('/s//radio/QR<code>')\n", (3842, 3864), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((3938, 3964), 'ghost_jukebox.app.route', 'app.route', (['"""/s//card/edit"""'], {}), "('/s//card/edit')\n", (3947, 3964), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((5050, 5080), 'ghost_jukebox.app.route', 'app.route', (['"""/s//QR<code>/view"""'], {}), "('/s//QR<code>/view')\n", (5059, 5080), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((5442, 5493), 'ghost_jukebox.app.route', 'app.route', (['"""/s//card/save"""'], {'methods': "['GET', 'POST']"}), "('/s//card/save', methods=['GET', 'POST'])\n", (5451, 5493), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((11357, 11381), 'ghost_jukebox.app.route', 'app.route', (['"""/s//QRCards"""'], {}), "('/s//QRCards')\n", (11366, 11381), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((11983, 12011), 'ghost_jukebox.app.route', 'app.route', (['"""/s//QRCards.pdf"""'], {}), "('/s//QRCards.pdf')\n", (11992, 12011), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((3071, 3095), 'ghost_jukebox.models.card.get_card_info', 'card.get_card_info', (['code'], {}), '(code)\n', (3089, 3095), False, 'from ghost_jukebox.models import info, card\n'), ((5140, 5164), 'ghost_jukebox.models.card.get_card_info', 'card.get_card_info', (['code'], {}), '(code)\n', (5158, 5164), False, 'from ghost_jukebox.models import info, card\n'), ((6375, 6405), 'ghost_jukebox.app.logger.info', 'app.logger.info', (['request.files'], {}), '(request.files)\n', (6390, 6405), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((6426, 6476), 'ghost_jukebox.common.save_file', 'common.save_file', (['request', '"""image_upload"""', 'carddir'], {}), "(request, 'image_upload', carddir)\n", (6442, 6476), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((8480, 8500), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (8490, 8500), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((8943, 9009), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(PATTERN_WIDTH, PATTERN_HEIGHT)', '(255, 255, 255)'], {}), "('RGB', (PATTERN_WIDTH, PATTERN_HEIGHT), (255, 255, 255))\n", (8952, 9009), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((9971, 10021), 'ghost_jukebox.models.card.paste', 'card.paste', (['qr_code_img', '(CARD_WIDTH, qr_code_top)'], {}), '(qr_code_img, (CARD_WIDTH, qr_code_top))\n', (9981, 10021), False, 'from ghost_jukebox.models import info, card\n'), ((10101, 10121), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['card'], {}), '(card)\n', (10115, 10121), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((11443, 11464), 'ghost_jukebox.models.card.get_all_sorted', 'card.get_all_sorted', ([], {}), '()\n', (11462, 11464), False, 'from ghost_jukebox.models import info, card\n'), ((11477, 11551), 'flask.render_template', 'render_template', (['"""all_cards.html"""'], {'cards': 'card_infos', 'type_names': 'TYPE_NAMES'}), "('all_cards.html', cards=card_infos, type_names=TYPE_NAMES)\n", (11492, 11551), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((12078, 12101), 'ghost_jukebox.models.card.get_largest_code', 'card.get_largest_code', ([], {}), '()\n', (12099, 12101), False, 'from ghost_jukebox.models import info, card\n'), ((12115, 12140), 'flask.request.args.get', 'request.args.get', (['"""first"""'], {}), "('first')\n", (12131, 12140), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((12244, 12268), 'flask.request.args.get', 'request.args.get', (['"""last"""'], {}), "('last')\n", (12260, 12268), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((12482, 12514), 'ghost_jukebox.models.card.get_all_sorted', 'card.get_all_sorted', (['first', 'last'], {}), '(first, last)\n', (12501, 12514), False, 'from ghost_jukebox.models import info, card\n'), ((13649, 13711), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(PAPER_HEIGHT, PAPER_WIDTH)', '(255, 255, 255)'], {}), "('RGB', (PAPER_HEIGHT, PAPER_WIDTH), (255, 255, 255))\n", (13658, 13711), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((17098, 17155), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font_path'], {'size': 'size', 'encoding': '"""unic"""'}), "(font_path, size=size, encoding='unic')\n", (17116, 17155), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2787, 2808), 'ghost_jukebox.models.card.strify', 'card.strify', (['code_num'], {}), '(code_num)\n', (2798, 2808), False, 'from ghost_jukebox.models import info, card\n'), ((5594, 5623), 'flask.request.args.get', 'request.args.get', (['"""card_type"""'], {}), "('card_type')\n", (5610, 5623), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((5648, 5675), 'flask.request.args.get', 'request.args.get', (['"""item_id"""'], {}), "('item_id')\n", (5664, 5675), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((5700, 5724), 'flask.request.args.get', 'request.args.get', (['"""text"""'], {}), "('text')\n", (5716, 5724), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((5749, 5773), 'flask.request.args.get', 'request.args.get', (['"""code"""'], {}), "('code')\n", (5765, 5773), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((5798, 5827), 'flask.request.args.get', 'request.args.get', (['"""image_url"""'], {}), "('image_url')\n", (5814, 5827), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((6281, 6301), 'ghost_jukebox.models.card.get_next_code', 'card.get_next_code', ([], {}), '()\n', (6299, 6301), False, 'from ghost_jukebox.models import info, card\n'), ((6799, 6835), 'os.path.join', 'os.path.join', (['carddir', 'uploaded_file'], {}), '(carddir, uploaded_file)\n', (6811, 6835), False, 'import os\n'), ((7118, 7149), 'flask.url_for', 'url_for', (['"""view_card"""'], {'code': 'code'}), "('view_card', code=code)\n", (7125, 7149), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((12865, 12904), 'flask.url_for', 'url_for', (['"""static"""'], {'filename': 'static_file'}), "('static', filename=static_file)\n", (12872, 12904), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((13591, 13607), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (13601, 13607), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3210, 3259), 'flask.url_for', 'url_for', (['"""track_info"""'], {'track_id': 'card_info.item_id'}), "('track_info', track_id=card_info.item_id)\n", (3217, 3259), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((4056, 4080), 'flask.request.args.get', 'request.args.get', (['"""code"""'], {}), "('code')\n", (4072, 4080), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((4102, 4131), 'flask.request.args.get', 'request.args.get', (['"""card_type"""'], {}), "('card_type')\n", (4118, 4131), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((4153, 4180), 'flask.request.args.get', 'request.args.get', (['"""item_id"""'], {}), "('item_id')\n", (4169, 4180), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((4202, 4226), 'flask.request.args.get', 'request.args.get', (['"""text"""'], {}), "('text')\n", (4218, 4226), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((5018, 5041), 'ghost_jukebox.common.random_string', 'common.random_string', (['(5)'], {}), '(5)\n', (5038, 5041), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((5887, 5916), 'flask.request.form.get', 'request.form.get', (['"""card_type"""'], {}), "('card_type')\n", (5903, 5916), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((5941, 5968), 'flask.request.form.get', 'request.form.get', (['"""item_id"""'], {}), "('item_id')\n", (5957, 5968), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((5993, 6017), 'flask.request.form.get', 'request.form.get', (['"""text"""'], {}), "('text')\n", (6009, 6017), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((6042, 6066), 'flask.request.form.get', 'request.form.get', (['"""code"""'], {}), "('code')\n", (6058, 6066), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((6091, 6120), 'flask.request.form.get', 'request.form.get', (['"""image_url"""'], {}), "('image_url')\n", (6107, 6120), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((6608, 6649), 'ghost_jukebox.common.download_image', 'common.download_image', (['image_url', 'carddir'], {}), '(image_url, carddir)\n', (6629, 6649), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((6973, 7018), 'ghost_jukebox.models.card.CardInfo', 'card.CardInfo', (['code', 'card_type', 'item_id', 'text'], {}), '(code, card_type, item_id, text)\n', (6986, 7018), False, 'from ghost_jukebox.models import info, card\n'), ((7050, 7095), 'ghost_jukebox.models.card.CardInfo', 'card.CardInfo', (['code', 'card_type', 'item_id', 'text'], {}), '(code, card_type, item_id, text)\n', (7063, 7095), False, 'from ghost_jukebox.models import info, card\n'), ((3332, 3381), 'flask.url_for', 'url_for', (['"""album_info"""'], {'album_id': 'card_info.item_id'}), "('album_info', album_id=card_info.item_id)\n", (3339, 3381), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((4756, 4779), 'ghost_jukebox.common.random_string', 'common.random_string', (['(5)'], {}), '(5)\n', (4776, 4779), False, 'from ghost_jukebox import app, basic_auth, common, conf\n'), ((3455, 3506), 'flask.url_for', 'url_for', (['"""artist_info"""'], {'artist_id': 'card_info.item_id'}), "('artist_info', artist_id=card_info.item_id)\n", (3462, 3506), False, 'from flask import Flask, request, redirect, url_for, render_template\n'), ((3582, 3637), 'flask.url_for', 'url_for', (['"""playlist_info"""'], {'playlist_id': 'card_info.item_id'}), "('playlist_info', playlist_id=card_info.item_id)\n", (3589, 3637), False, 'from flask import Flask, request, redirect, url_for, render_template\n')]
|
from __future__ import division
from itertools import combinations
from collections import Counter
class Omega(object):
def __init__(self, comms1, comms2):
self.nodes1 = self.get_node_assignment(comms1)
self.nodes2 = self.get_node_assignment(comms2)
self.nodes = list(set().union([node for i, com in comms2.items() for node in com],
[node for i, com in comms1.items() for node in com]))
J, K, N, obs, tuples1, tuples2 = self.__observed()
exp = self.__expected(J, K, N, tuples1, tuples2)
self.omega_score = self.__calc_omega(obs, exp)
@staticmethod
def get_node_assignment(comms):
"""
returns a dictionary with node-cluster assignments of the form {node_id :[cluster1, cluster_3]}
:param comms:
:return:
"""
nodes = {}
for i, com in comms.items():
for node in com:
try:
nodes[node].append(i)
except KeyError:
nodes[node] = [i]
return nodes
@staticmethod
def num_of_common_clusters(u, v, nodes_dict):
"""
return the number of clusters in which the pair u,v appears in the
:param u:
:param v:
:param nodes_dict:
:return:
"""
try:
_sum = len(set(nodes_dict[u]) & set(nodes_dict[v]))
except KeyError:
_sum = 0
return _sum
def __observed(self):
N = 0
tuples1 = {}
J = 0
for u, v in combinations(self.nodes, 2):
N += 1
n = self.num_of_common_clusters(u, v, self.nodes1)
tuples1[(u, v)] = self.num_of_common_clusters(u, v, self.nodes1)
J = n if n > J else J
tuples2 = {}
K = 0
for u, v in combinations(self.nodes, 2):
n = self.num_of_common_clusters(u, v, self.nodes2)
tuples2[(u, v)] = self.num_of_common_clusters(u, v, self.nodes2)
K = n if n > K else K
A = {j: 0 for j in range(min(J, K)+1)}
for (u, v), n in tuples1.items():
try:
if n == tuples2[(u, v)]:
A[n] += 1
except KeyError:
pass
obs = sum(A[j]/N for j in range(min(J, K)+1))
return J, K, N, obs, tuples1, tuples2
@staticmethod
def __expected(J, K, N, tuples1, tuples2):
N1 = Counter(tuples1.values())
N2 = Counter(tuples2.values())
exp = sum((N1[j]*N2[j])/(N**2) for j in range(min(J, K)+1))
return exp
@staticmethod
def __calc_omega(obs, exp):
if exp == obs == 1:
return 1.0
else:
return (obs-exp)/(1-exp)
|
[
"itertools.combinations"
] |
[((1576, 1603), 'itertools.combinations', 'combinations', (['self.nodes', '(2)'], {}), '(self.nodes, 2)\n', (1588, 1603), False, 'from itertools import combinations\n'), ((1853, 1880), 'itertools.combinations', 'combinations', (['self.nodes', '(2)'], {}), '(self.nodes, 2)\n', (1865, 1880), False, 'from itertools import combinations\n')]
|
import os
import sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../..'))
from utils.general_class import ModelPlugin
from utils.ortools_op import SolveMaxMatching
from utils.visual_op import matrix_image2big_image
from utils.writer_op import write_pkl, write_gif
from utils.tqdm_op import tqdm_range
from utils.eval_op import DisentanglemetricFactorMask, DisentanglemetricFactorJointMask
from utils.np_op import np_softmax
from tfops.transform_op import apply_tf_op, apply_tf_op_multi_output, apply_tf_op_multi_input
from tfops.train_op import get_train_op_v2
from tfops.lr_op import DECAY_DICT, DECAY_PARAMS_DICT
from tfops.nets import encoder1_64, decoder1_64
from tfops.loss import sigmoid_cross_entropy_without_mean, vae_kl_cost_weight
import tensorflow as tf
import numpy as np
class Model(ModelPlugin):
def __init__(self, dataset, logfilepath, args):
super().__init__(dataset, logfilepath, args)
self.build()
def build(self):
self.logger.info("Model building starts")
tf.reset_default_graph()
tf.set_random_seed(self.args.rseed)
self.input1 = tf.placeholder(tf.float32, shape = [self.args.nbatch, self.height, self.width, self.nchannel])
self.istrain = tf.placeholder(tf.bool, shape= [])
self.generate_sess()
# Encoding
self.encoder_net = encoder1_64
self.decoder_net = decoder1_64
# Encoder
self.mean_total, self.stddev_total = tf.split(self.encoder_net(self.input1, output_dim=2*self.args.nconti, scope='encoder', reuse=False)['output'], num_or_size_splits=2, axis=1)
self.stddev_total = tf.nn.softplus(self.stddev_total)
self.z_sample = tf.add(self.mean_total, tf.multiply(self.stddev_total, tf.random_normal([self.args.nbatch, self.args.nconti])))
self.dec_output = self.decoder_net(z=self.z_sample, output_channel=self.nchannel, scope="decoder", reuse=False)['output']
# Unary vector
self.rec_cost_vector = sigmoid_cross_entropy_without_mean(labels=self.input1, logits=self.dec_output)
self.rec_cost = tf.reduce_mean(self.rec_cost_vector)
self.loss_list = list()
for idx in range(self.args.nconti):
weight = tf.constant(np.array((idx+1)*[self.args.beta_min] + (self.args.nconti-idx-1)*[self.args.beta_max]), dtype=tf.float32)
kl_cost = vae_kl_cost_weight(mean=self.mean_total, stddev=self.stddev_total, weight=weight)
self.loss_list.append(self.rec_cost+kl_cost+tf.losses.get_regularization_loss())
# Decode
self.latent_ph = tf.placeholder(tf.float32, shape = [self.args.nbatch, self.args.nconti])
self.dec_output_ph = tf.nn.sigmoid(self.decoder_net(z=self.latent_ph, output_channel=self.nchannel, scope="decoder", reuse=True)['output'])
self.logger.info("Model building ends")
def decode(self, latent_input):
return apply_tf_op(inputs=latent_input, session=self.sess, input_gate=self.latent_ph, output_gate=self.dec_output_ph, batch_size=self.args.nbatch)
def set_up_train(self):
self.logger.info("Model setting up train starts")
if not hasattr(self, 'start_iter'): self.start_iter = 0
self.logger.info("Start iter: {}".format(self.start_iter))
decay_func = DECAY_DICT[self.args.dtype]
decay_params = DECAY_PARAMS_DICT[self.args.dtype][self.args.nbatch][self.args.dptype].copy()
decay_params['initial_step'] = self.start_iter
self.lr, update_step_op = decay_func(**decay_params)
self.update_step_op = [update_step_op]
var_list = [v for v in tf.trainable_variables() if 'encoder' in v.name] + [v for v in tf.trainable_variables() if 'decoder' in v.name]
with tf.control_dependencies(tf.get_collection("update_ops")):
self.train_op_list = [get_train_op_v2(tf.train.AdamOptimizer(learning_rate=self.lr, beta1=0.9, beta2=0.999), loss=self.loss_list[v], var_list=var_list) for v in range(self.args.nconti)]
self.logger.info("Model setting up train ends")
def run_batch(self, train_idx):
feed_dict = dict()
feed_dict[self.input1] = self.dataset.next_batch(batch_size=self.args.nbatch)[0]
feed_dict[self.istrain] = True
idx = min(train_idx, self.args.nconti-1)
self.sess.run([self.train_op_list[idx]], feed_dict=feed_dict)
def train(self, niter, piter, siter, save_dir=None, asset_dir=None):
self.logger.info("Model training starts")
final_iter = self.start_iter+niter
max_accuracy = -1
for iter_ in tqdm_range(self.start_iter, final_iter):
train_idx = (iter_ - self.start_iter)//piter
self.run_batch(train_idx)
if (iter_+1)%siter==0 or iter_+1==final_iter:
accuracy = self.evaluate()
self.latent_traversal_gif(path=asset_dir+'{}.gif'.format(iter_+1))
if max_accuracy==-1 or max_accuracy<accuracy:
self.save(iter_, save_dir)
self.logger.info("Save process")
max_accuracy = accuracy
self.logger.info("Model training ends")
def evaluate(self, print_option=False, eps=1e-8, nsample=1024):
total_mean, total_std = self.get_mean_std()
return DisentanglemetricFactorMask(mean=total_mean, std=total_std, nclasses=self.dataset.latents_sizes, sampler=self.dataset.next_batch_latent_fix_idx, print_option=print_option)
def get_mean_std(self):
total_mean, total_std = apply_tf_op_multi_output(inputs=self.image, session=self.sess, input_gate=self.input1, output_gate_list=[self.mean_total, self.stddev_total], batch_size=self.args.nbatch, train_gate=self.istrain)
return total_mean, total_std
def latent_traversal_gif(self, path, nimage=50, nmin=-1.0, nmax=1.0):
gif = list()
for i in range(nimage):
value = nmin + (nmax - nmin)*i/nimage
latent_conti = value*np.eye(self.args.nconti)
gif.append(matrix_image2big_image(np.expand_dims(self.decode(latent_input=latent_conti), axis=0)))
write_gif(content=gif, path=path)
|
[
"tensorflow.trainable_variables",
"tensorflow.get_collection",
"tensorflow.reset_default_graph",
"utils.eval_op.DisentanglemetricFactorMask",
"os.path.dirname",
"tensorflow.set_random_seed",
"tfops.transform_op.apply_tf_op_multi_output",
"tensorflow.placeholder",
"tensorflow.losses.get_regularization_loss",
"tfops.transform_op.apply_tf_op",
"tensorflow.reduce_mean",
"utils.tqdm_op.tqdm_range",
"tensorflow.random_normal",
"utils.writer_op.write_gif",
"tfops.loss.vae_kl_cost_weight",
"tfops.loss.sigmoid_cross_entropy_without_mean",
"numpy.array",
"tensorflow.nn.softplus",
"numpy.eye",
"tensorflow.train.AdamOptimizer"
] |
[((1050, 1074), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1072, 1074), True, 'import tensorflow as tf\n'), ((1083, 1118), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['self.args.rseed'], {}), '(self.args.rseed)\n', (1101, 1118), True, 'import tensorflow as tf\n'), ((1142, 1238), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[self.args.nbatch, self.height, self.width, self.nchannel]'}), '(tf.float32, shape=[self.args.nbatch, self.height, self.width,\n self.nchannel])\n', (1156, 1238), True, 'import tensorflow as tf\n'), ((1260, 1293), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'shape': '[]'}), '(tf.bool, shape=[])\n', (1274, 1293), True, 'import tensorflow as tf\n'), ((1655, 1688), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['self.stddev_total'], {}), '(self.stddev_total)\n', (1669, 1688), True, 'import tensorflow as tf\n'), ((2011, 2089), 'tfops.loss.sigmoid_cross_entropy_without_mean', 'sigmoid_cross_entropy_without_mean', ([], {'labels': 'self.input1', 'logits': 'self.dec_output'}), '(labels=self.input1, logits=self.dec_output)\n', (2045, 2089), False, 'from tfops.loss import sigmoid_cross_entropy_without_mean, vae_kl_cost_weight\n'), ((2114, 2150), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.rec_cost_vector'], {}), '(self.rec_cost_vector)\n', (2128, 2150), True, 'import tensorflow as tf\n'), ((2607, 2677), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[self.args.nbatch, self.args.nconti]'}), '(tf.float32, shape=[self.args.nbatch, self.args.nconti])\n', (2621, 2677), True, 'import tensorflow as tf\n'), ((2929, 3073), 'tfops.transform_op.apply_tf_op', 'apply_tf_op', ([], {'inputs': 'latent_input', 'session': 'self.sess', 'input_gate': 'self.latent_ph', 'output_gate': 'self.dec_output_ph', 'batch_size': 'self.args.nbatch'}), '(inputs=latent_input, session=self.sess, input_gate=self.\n latent_ph, output_gate=self.dec_output_ph, batch_size=self.args.nbatch)\n', (2940, 3073), False, 'from tfops.transform_op import apply_tf_op, apply_tf_op_multi_output, apply_tf_op_multi_input\n'), ((4602, 4641), 'utils.tqdm_op.tqdm_range', 'tqdm_range', (['self.start_iter', 'final_iter'], {}), '(self.start_iter, final_iter)\n', (4612, 4641), False, 'from utils.tqdm_op import tqdm_range\n'), ((5314, 5494), 'utils.eval_op.DisentanglemetricFactorMask', 'DisentanglemetricFactorMask', ([], {'mean': 'total_mean', 'std': 'total_std', 'nclasses': 'self.dataset.latents_sizes', 'sampler': 'self.dataset.next_batch_latent_fix_idx', 'print_option': 'print_option'}), '(mean=total_mean, std=total_std, nclasses=self.\n dataset.latents_sizes, sampler=self.dataset.next_batch_latent_fix_idx,\n print_option=print_option)\n', (5341, 5494), False, 'from utils.eval_op import DisentanglemetricFactorMask, DisentanglemetricFactorJointMask\n'), ((5547, 5751), 'tfops.transform_op.apply_tf_op_multi_output', 'apply_tf_op_multi_output', ([], {'inputs': 'self.image', 'session': 'self.sess', 'input_gate': 'self.input1', 'output_gate_list': '[self.mean_total, self.stddev_total]', 'batch_size': 'self.args.nbatch', 'train_gate': 'self.istrain'}), '(inputs=self.image, session=self.sess, input_gate=\n self.input1, output_gate_list=[self.mean_total, self.stddev_total],\n batch_size=self.args.nbatch, train_gate=self.istrain)\n', (5571, 5751), False, 'from tfops.transform_op import apply_tf_op, apply_tf_op_multi_output, apply_tf_op_multi_input\n'), ((6135, 6168), 'utils.writer_op.write_gif', 'write_gif', ([], {'content': 'gif', 'path': 'path'}), '(content=gif, path=path)\n', (6144, 6168), False, 'from utils.writer_op import write_pkl, write_gif\n'), ((66, 91), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (81, 91), False, 'import os\n'), ((2389, 2475), 'tfops.loss.vae_kl_cost_weight', 'vae_kl_cost_weight', ([], {'mean': 'self.mean_total', 'stddev': 'self.stddev_total', 'weight': 'weight'}), '(mean=self.mean_total, stddev=self.stddev_total, weight=\n weight)\n', (2407, 2475), False, 'from tfops.loss import sigmoid_cross_entropy_without_mean, vae_kl_cost_weight\n'), ((1769, 1823), 'tensorflow.random_normal', 'tf.random_normal', (['[self.args.nbatch, self.args.nconti]'], {}), '([self.args.nbatch, self.args.nconti])\n', (1785, 1823), True, 'import tensorflow as tf\n'), ((2261, 2361), 'numpy.array', 'np.array', (['((idx + 1) * [self.args.beta_min] + (self.args.nconti - idx - 1) * [self.\n args.beta_max])'], {}), '((idx + 1) * [self.args.beta_min] + (self.args.nconti - idx - 1) *\n [self.args.beta_max])\n', (2269, 2361), True, 'import numpy as np\n'), ((3787, 3818), 'tensorflow.get_collection', 'tf.get_collection', (['"""update_ops"""'], {}), "('update_ops')\n", (3804, 3818), True, 'import tensorflow as tf\n'), ((5991, 6015), 'numpy.eye', 'np.eye', (['self.args.nconti'], {}), '(self.args.nconti)\n', (5997, 6015), True, 'import numpy as np\n'), ((2527, 2562), 'tensorflow.losses.get_regularization_loss', 'tf.losses.get_regularization_loss', ([], {}), '()\n', (2560, 2562), True, 'import tensorflow as tf\n'), ((3636, 3660), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3658, 3660), True, 'import tensorflow as tf\n'), ((3699, 3723), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3721, 3723), True, 'import tensorflow as tf\n'), ((3871, 3940), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.lr', 'beta1': '(0.9)', 'beta2': '(0.999)'}), '(learning_rate=self.lr, beta1=0.9, beta2=0.999)\n', (3893, 3940), True, 'import tensorflow as tf\n')]
|
import unittest
from scrapy.item import Item, Field
import six
class ItemTest(unittest.TestCase):
def assertSortedEqual(self, first, second, msg=None):
return self.assertEqual(sorted(first), sorted(second), msg)
def test_simple(self):
class TestItem(Item):
name = Field()
i = TestItem()
i['name'] = u'name'
self.assertEqual(i['name'], u'name')
def test_init(self):
class TestItem(Item):
name = Field()
i = TestItem()
self.assertRaises(KeyError, i.__getitem__, 'name')
i2 = TestItem(name=u'<NAME>')
self.assertEqual(i2['name'], u'<NAME>')
i3 = TestItem({'name': u'<NAME>'})
self.assertEqual(i3['name'], u'<NAME>')
i4 = TestItem(i3)
self.assertEqual(i4['name'], u'<NAME>')
self.assertRaises(KeyError, TestItem, {'name': u'<NAME>',
'other': u'foo'})
def test_invalid_field(self):
class TestItem(Item):
pass
i = TestItem()
self.assertRaises(KeyError, i.__setitem__, 'field', 'text')
self.assertRaises(KeyError, i.__getitem__, 'field')
def test_repr(self):
class TestItem(Item):
name = Field()
number = Field()
i = TestItem()
i['name'] = u'<NAME>'
i['number'] = 123
itemrepr = repr(i)
if six.PY2:
self.assertEqual(itemrepr,
"{'name': u'<NAME>', 'number': 123}")
else:
self.assertEqual(itemrepr,
"{'name': '<NAME>', 'number': 123}")
i2 = eval(itemrepr)
self.assertEqual(i2['name'], '<NAME>')
self.assertEqual(i2['number'], 123)
def test_private_attr(self):
class TestItem(Item):
name = Field()
i = TestItem()
i._private = 'test'
self.assertEqual(i._private, 'test')
def test_raise_getattr(self):
class TestItem(Item):
name = Field()
i = TestItem()
self.assertRaises(AttributeError, getattr, i, 'name')
def test_raise_setattr(self):
class TestItem(Item):
name = Field()
i = TestItem()
self.assertRaises(AttributeError, setattr, i, 'name', 'john')
def test_custom_methods(self):
class TestItem(Item):
name = Field()
def get_name(self):
return self['name']
def change_name(self, name):
self['name'] = name
i = TestItem()
self.assertRaises(KeyError, i.get_name)
i['name'] = u'lala'
self.assertEqual(i.get_name(), u'lala')
i.change_name(u'other')
self.assertEqual(i.get_name(), 'other')
def test_metaclass(self):
class TestItem(Item):
name = Field()
keys = Field()
values = Field()
i = TestItem()
i['name'] = u'John'
self.assertEqual(list(i.keys()), ['name'])
self.assertEqual(list(i.values()), ['John'])
i['keys'] = u'Keys'
i['values'] = u'Values'
self.assertSortedEqual(list(i.keys()), ['keys', 'values', 'name'])
self.assertSortedEqual(list(i.values()), [u'Keys', u'Values', u'John'])
def test_metaclass_inheritance(self):
class BaseItem(Item):
name = Field()
keys = Field()
values = Field()
class TestItem(BaseItem):
keys = Field()
i = TestItem()
i['keys'] = 3
self.assertEqual(list(i.keys()), ['keys'])
self.assertEqual(list(i.values()), [3])
def test_to_dict(self):
class TestItem(Item):
name = Field()
i = TestItem()
i['name'] = u'John'
self.assertEqual(dict(i), {'name': u'John'})
def test_copy(self):
class TestItem(Item):
name = Field()
item = TestItem({'name':'lower'})
copied_item = item.copy()
self.assertNotEqual(id(item), id(copied_item))
copied_item['name'] = copied_item['name'].upper()
self.assertNotEqual(item['name'], copied_item['name'])
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"scrapy.item.Field"
] |
[((4239, 4254), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4252, 4254), False, 'import unittest\n'), ((305, 312), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (310, 312), False, 'from scrapy.item import Item, Field\n'), ((485, 492), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (490, 492), False, 'from scrapy.item import Item, Field\n'), ((1271, 1278), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (1276, 1278), False, 'from scrapy.item import Item, Field\n'), ((1300, 1307), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (1305, 1307), False, 'from scrapy.item import Item, Field\n'), ((1864, 1871), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (1869, 1871), False, 'from scrapy.item import Item, Field\n'), ((2053, 2060), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (2058, 2060), False, 'from scrapy.item import Item, Field\n'), ((2231, 2238), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (2236, 2238), False, 'from scrapy.item import Item, Field\n'), ((2418, 2425), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (2423, 2425), False, 'from scrapy.item import Item, Field\n'), ((2881, 2888), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (2886, 2888), False, 'from scrapy.item import Item, Field\n'), ((2908, 2915), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (2913, 2915), False, 'from scrapy.item import Item, Field\n'), ((2937, 2944), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (2942, 2944), False, 'from scrapy.item import Item, Field\n'), ((3409, 3416), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (3414, 3416), False, 'from scrapy.item import Item, Field\n'), ((3436, 3443), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (3441, 3443), False, 'from scrapy.item import Item, Field\n'), ((3465, 3472), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (3470, 3472), False, 'from scrapy.item import Item, Field\n'), ((3527, 3534), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (3532, 3534), False, 'from scrapy.item import Item, Field\n'), ((3758, 3765), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (3763, 3765), False, 'from scrapy.item import Item, Field\n'), ((3946, 3953), 'scrapy.item.Field', 'Field', ([], {}), '()\n', (3951, 3953), False, 'from scrapy.item import Item, Field\n')]
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Context manager openers for various fileobject types
"""
from os.path import splitext
import gzip
import bz2
class Opener(object):
""" Class to accept, maybe open, and context-manage file-likes / filenames
Provides context manager to close files that the constructor opened for you.
Parameters
----------
fileish : str or file-like
if str, then open with suitable opening method. If file-like, accept as
is
\*args : positional arguments
passed to opening method when `fileish` is str. ``mode``, if not
specified, is `rb`. ``compresslevel``, if relevant, and not specified,
is set from class variable ``default_compresslevel``
\*\*kwargs : keyword arguments
passed to opening method when `fileish` is str. Change of defaults as
for \*args
"""
gz_def = (gzip.open, ('mode', 'compresslevel'))
bz2_def = (bz2.BZ2File, ('mode', 'buffering', 'compresslevel'))
compress_ext_map = {
'.gz': gz_def,
'.bz2': bz2_def,
None: (open, ('mode', 'buffering'))
}
#: default compression level when writing gz and bz2 files
default_compresslevel = 1
def __init__(self, fileish, *args, **kwargs):
if self._is_fileobj(fileish):
self.fobj = fileish
self.me_opened = False
self._name = None
return
_, ext = splitext(fileish)
if ext in self.compress_ext_map:
is_compressor = True
opener, arg_names = self.compress_ext_map[ext]
else:
is_compressor = False
opener, arg_names = self.compress_ext_map[None]
# Get full arguments to check for mode and compresslevel
full_kwargs = kwargs.copy()
n_args = len(args)
full_kwargs.update(dict(zip(arg_names[:n_args], args)))
# Set default mode
if not 'mode' in full_kwargs:
kwargs['mode'] = 'rb'
if is_compressor and not 'compresslevel' in kwargs:
kwargs['compresslevel'] = self.default_compresslevel
self.fobj = opener(fileish, *args, **kwargs)
self._name = fileish
self.me_opened = True
def _is_fileobj(self, obj):
""" Is `obj` a file-like object?
"""
return hasattr(obj, 'read') and hasattr(obj, 'write')
@property
def closed(self):
return self.fobj.closed
@property
def name(self):
""" Return ``self.fobj.name`` or self._name if not present
self._name will be None if object was created with a fileobj, otherwise
it will be the filename.
"""
try:
return self.fobj.name
except AttributeError:
return self._name
@property
def mode(self):
return self.fobj.mode
def read(self, *args, **kwargs):
return self.fobj.read(*args, **kwargs)
def write(self, *args, **kwargs):
return self.fobj.write(*args, **kwargs)
def seek(self, *args, **kwargs):
return self.fobj.seek(*args, **kwargs)
def tell(self, *args, **kwargs):
return self.fobj.tell(*args, **kwargs)
def close(self, *args, **kwargs):
return self.fobj.close(*args, **kwargs)
def close_if_mine(self):
""" Close ``self.fobj`` iff we opened it in the constructor
"""
if self.me_opened:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close_if_mine()
|
[
"os.path.splitext"
] |
[((1786, 1803), 'os.path.splitext', 'splitext', (['fileish'], {}), '(fileish)\n', (1794, 1803), False, 'from os.path import splitext\n')]
|