code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# import the flask library
from flask import Flask
from config import Config
# configure an object of class Flask with __name__
app = Flask(__name__)
app.config.from_object(Config)
# here app is a package not to be confused with directory app
from app import routes
|
[
"flask.Flask"
] |
[((135, 150), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (140, 150), False, 'from flask import Flask\n')]
|
# -*- coding: utf-8 -*-
r""" Manipulate posteriors of Bernoulli/Beta experiments., for discounted Bayesian policies (:class:`Policies.DiscountedBayesianIndexPolicy`).
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "<NAME>"
__version__ = "0.9"
# Local imports
try:
from .Beta import Beta, bernoulliBinarization
from .with_proba import with_proba
except (ImportError, SystemError):
from Beta import Beta, bernoulliBinarization
from with_proba import with_proba
try:
from numpy.random import beta as betavariate # Faster! Yes!
except ImportError:
from random import betavariate
from scipy.special import btdtri
# --- Constants
#: Default value for the discount factor :math:`\gamma\in(0,1)`.
#: ``0.95`` is empirically a reasonable value for short-term non-stationary experiments.
GAMMA = 0.95
# --- Class
class DiscountedBeta(Beta):
r""" Manipulate posteriors of Bernoulli/Beta experiments, for discounted Bayesian policies (:class:`Policies.DiscountedBayesianIndexPolicy`).
- It keeps :math:`\tilde{S}(t)` and :math:`\tilde{F}(t)` the *discounted* counts of successes and failures (S and F).
"""
def __init__(self, gamma=GAMMA, a=1, b=1):
r""" Create a Beta posterior :math:`\mathrm{Beta}(\alpha, \beta)` with no observation, i.e., :math:`\alpha = 1` and :math:`\beta = 1` by default."""
assert a >= 0, "Error: parameter 'a' for Beta posterior has to be >= 0." # DEBUG
self._a = a
assert b >= 0, "Error: parameter 'b' for Beta posterior has to be >= 0." # DEBUG
self._b = b
self.N = [0, 0] #: List of two parameters [a, b]
assert 0 < gamma <= 1, "Error: for a DiscountedBayesianIndexPolicy policy, the discount factor has to be in (0,1], but it was {}.".format(gamma) # DEBUG
if gamma == 1:
print("Warning: gamma = 1 is stupid, just use a regular Beta posterior!") # DEBUG
self.gamma = gamma #: Discount factor :math:`\gamma\in(0,1)`.
def __str__(self):
return r"DiscountedBeta(\alpha={:.3g}, \beta={:.3g})".format(self.N[1], self.N[0])
def reset(self, a=None, b=None):
"""Reset alpha and beta, both to 0 as when creating a new default DiscountedBeta."""
if a is None:
a = self._a
if b is None:
b = self._b
self.N = [0, 0]
def sample(self):
"""Get a random sample from the DiscountedBeta posterior (using :func:`numpy.random.betavariate`).
- Used only by :class:`Thompson` Sampling and :class:`AdBandits` so far.
"""
return betavariate(self._a + self.N[1], self._b + self.N[0])
def quantile(self, p):
"""Return the p quantile of the DiscountedBeta posterior (using :func:`scipy.stats.btdtri`).
- Used only by :class:`BayesUCB` and :class:`AdBandits` so far.
"""
return btdtri(self._a + self.N[1], self._b + self.N[0], p)
# Bug: do not call btdtri with (0.5,0.5,0.5) in scipy version < 0.9 (old)
def forget(self, obs):
"""Forget the last observation, and undiscount the count of observations."""
# print("Info: calling DiscountedBeta.forget() with obs = {}, self.N = {} and self.gamma = {} ...".format(obs, self.N, self.gamma)) # DEBUG
# FIXED update this code, to accept obs that are FLOAT in [0, 1] and not just in {0, 1}...
binaryObs = bernoulliBinarization(obs)
self.N[binaryObs] = (self.N[binaryObs] - 1) / self.gamma
otherObs = 1 - binaryObs
self.N[otherObs] = self.N[otherObs] / self.gamma
def update(self, obs):
r""" Add an observation, and discount the previous observations.
- If obs is 1, update :math:`\alpha` the count of positive observations,
- If it is 0, update :math:`\beta` the count of negative observations.
- But instead of using :math:`\tilde{S}(t) = S(t)` and :math:`\tilde{N}(t) = N(t)`, they are updated at each time step using the discount factor :math:`\gamma`:
.. math::
\tilde{S}(t+1) &= \gamma \tilde{S}(t) + r(t),
\tilde{F}(t+1) &= \gamma \tilde{F}(t) + (1 - r(t)).
.. note:: Otherwise, a trick with :func:`bernoulliBinarization` has to be used.
"""
# print("Info: calling DiscountedBeta.update() with obs = {}, self.N = {} and self.gamma = {} ...".format(obs, self.N, self.gamma)) # DEBUG
# FIXED update this code, to accept obs that are FLOAT in [0, 1] and not just in {0, 1}...
binaryObs = bernoulliBinarization(obs)
self.N[binaryObs] = self.gamma * self.N[binaryObs] + 1
otherObs = 1 - binaryObs
self.N[otherObs] = self.gamma * self.N[otherObs]
def discount(self):
r""" Simply discount the old observation, when no observation is given at this time.
.. math::
\tilde{S}(t+1) &= \gamma \tilde{S}(t),
\tilde{F}(t+1) &= \gamma \tilde{F}(t).
"""
# print("Info: calling DiscountedBeta.discount() self.N = {} and self.gamma = {} ...".format(self.N, self.gamma)) # DEBUG
self.N[0] = max(0, self.gamma * self.N[0])
self.N[1] = max(0, self.gamma * self.N[1])
def undiscount(self):
r""" Simply cancel the discount on the old observation, when no observation is given at this time.
.. math::
\tilde{S}(t+1) &= \frac{1}{\gamma} \tilde{S}(t),
\tilde{F}(t+1) &= \frac{1}{\gamma} \tilde{F}(t).
"""
# print("Info: calling DiscountedBeta.undiscount() self.N = {} and self.gamma = {} ...".format(self.N, self.gamma)) # DEBUG
self.N[0] = max(0, self.N[0] / self.gamma)
self.N[1] = max(0, self.N[1] / self.gamma)
|
[
"scipy.special.btdtri",
"Beta.bernoulliBinarization",
"random.betavariate"
] |
[((2623, 2676), 'random.betavariate', 'betavariate', (['(self._a + self.N[1])', '(self._b + self.N[0])'], {}), '(self._a + self.N[1], self._b + self.N[0])\n', (2634, 2676), False, 'from random import betavariate\n'), ((2906, 2957), 'scipy.special.btdtri', 'btdtri', (['(self._a + self.N[1])', '(self._b + self.N[0])', 'p'], {}), '(self._a + self.N[1], self._b + self.N[0], p)\n', (2912, 2957), False, 'from scipy.special import btdtri\n'), ((3421, 3447), 'Beta.bernoulliBinarization', 'bernoulliBinarization', (['obs'], {}), '(obs)\n', (3442, 3447), False, 'from Beta import Beta, bernoulliBinarization\n'), ((4545, 4571), 'Beta.bernoulliBinarization', 'bernoulliBinarization', (['obs'], {}), '(obs)\n', (4566, 4571), False, 'from Beta import Beta, bernoulliBinarization\n')]
|
import turtle as player
wn = player.Screen()
wn.title("disney level animation")
wn.bgcolor("Black")
#create new shapes
wn.register_shape("invader.gif")
wn.register_shape("invader2.gif")
player.shape("invader.gif")
player.frame = 0
#copying the video
player.frames = ["invader.gif", "invader2.gif"]
def player_animate():
player.frame += 1
if player.frame >= len(player.frames):
player.frame = 0
player.shape(player.frames[player.frame])
#TIMER STARTO
wn.ontimer(player_animate, 500)
player_animate()
while True:
wn.update()
print("space")
print("vaidable")
wn.mainloop()
|
[
"turtle.shape",
"turtle.Screen"
] |
[((33, 48), 'turtle.Screen', 'player.Screen', ([], {}), '()\n', (46, 48), True, 'import turtle as player\n'), ((193, 220), 'turtle.shape', 'player.shape', (['"""invader.gif"""'], {}), "('invader.gif')\n", (205, 220), True, 'import turtle as player\n'), ((417, 458), 'turtle.shape', 'player.shape', (['player.frames[player.frame]'], {}), '(player.frames[player.frame])\n', (429, 458), True, 'import turtle as player\n')]
|
# Mockup of a plan:
#
# Keyboard buttons:
# get my id
# if in family -> leave family
# if in family and is_family_creator -> invite a person by id | kick a person
# if not in family -> create a family | join a family
#
# Database
# user -> user_id | family_id
# family -> family_id | user_list | creator_id
# bills -> bill_id | family_id | user_id | price | message
from datetime import datetime
from math import ceil
import sqlite3
from tokenize import String
from aiogram import Bot, Dispatcher, executor, types
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher import FSMContext
from aiogram.contrib.fsm_storage.memory import MemoryStorage
import user as usr
import markups as mk
import family as fam
import bill as b
import text_templates as tt
import state_handler as sh
conn = sqlite3.connect("data.db")
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS users (user_id INTEGER, family_id INTEGER, username TEXT)")
c.execute("CREATE TABLE IF NOT EXISTS families (family_id INTEGER, creator_id INTEGER)")
c.execute("CREATE TABLE IF NOT EXISTS invites (user_id INTEGER, family_id INTEGER)")
c.execute("CREATE TABLE IF NOT EXISTS bills (bill_id INTEGER, family_id INTEGER, user_id INGEGER, price REAL, message TEXT, date TEXT)")
conn.commit()
with open("token.txt", "r") as token_file:
token = token_file.readline().rstrip()
storage = MemoryStorage()
bot = Bot(token=token)
dp = Dispatcher(bot, storage=storage)
@dp.message_handler(commands=['start'])
async def welcome(message: types.Message):
user = usr.User(message.chat.id)
await bot.send_message(
chat_id=user.get_user_id(),
text=tt.greeting,
reply_markup=mk.get_markup_start(user),
)
@dp.message_handler()
async def msg(message: types.Message):
user = usr.User(message.chat.id)
if message.text == tt.get_my_id:
await bot.send_message(
chat_id=user.get_user_id(),
text=user.get_user_id(),
)
elif message.text == tt.change_my_name:
await bot.send_message(
chat_id=user.get_user_id(),
text=f"Введите новое имя или нажмите на кнопку \"{tt.back}\"",
reply_markup=mk.single_button(mk.btnCancelDelete)
)
await sh.changeName.name.set()
elif message.text == tt.create_family:
if not user.is_in_family():
fam.create_family(user.get_user_id())
await bot.send_message(
chat_id=user.get_user_id(),
text=f"Семья с ID {user.get_family().get_family_id()} была создана.",
reply_markup=mk.get_markup_start(user)
)
elif message.text == tt.leave_family:
if user.is_in_family():
family = user.get_family()
family.remove_user(user.get_user_id())
await bot.send_message(
chat_id=user.get_user_id(),
text=f"Вы вышли из семьи.",
reply_markup=mk.get_markup_start(user)
)
elif message.text == tt.my_invites:
if not user.is_in_family():
if user.get_invites():
text = tt.my_invites
markup = mk.get_markup_myInvites(user)
else:
text = tt.no_active_invites
markup = types.InlineKeyboardMarkup()
await bot.send_message(
chat_id=user.get_user_id(),
text=text,
reply_markup=markup
)
elif message.text == tt.invite_to_family:
await bot.send_message(
chat_id=user.get_user_id(),
text=f"Введите ID пользователя или нажмите на кнопку \"{tt.back}\".",
reply_markup=mk.single_button(mk.btnCancelDelete)
)
await sh.InviteToFamily.invited_id.set()
elif message.text == tt.kick_from_family:
if user.is_in_family():
if user.get_family().get_creator().get_user_id() == user.get_user_id():
await bot.send_message(
chat_id=user.get_user_id(),
text=f"Выберите пользователя, которого хотите выгнать из семьи: ",
reply_markup=mk.get_markup_kickFromFamily(user.get_family())
)
elif message.text in [tt.family_bills_last_30_days, tt.my_bills_last_30_days]:
if user.is_in_family():
text = f"{tt.line_separator}\n"
if message.text == tt.family_bills_last_30_days:
bill_list = user.get_family().get_bills_30_days()
own = False
else:
bill_list = user.get_family().get_bills_30_days(user.get_user_id())
own = True
markup = types.InlineKeyboardMarkup()
if len(bill_list) > 30:
markup = mk.get_markup_billsPage(pagenum=1, maxpages=ceil(len(bill_list) / 30), own=own)
bill_list = bill_list[:30]
for bill in bill_list[::-1]:
text += f"{'{:.2f}'.format(bill.get_price())} руб. - \"{bill.get_message()}\"\nДобавлено {bill.get_user().get_name()} {datetime.strftime(bill.get_date(), '%d-%m-%y в %H:%M')}\n{tt.line_separator}\n"
text += f"{tt.family_bills_last_30_days if message.text == tt.family_bills_last_30_days else tt.my_bills_last_30_days}: {'{:.2f}'.format(user.get_family().get_total_30_days(None if message.text == tt.family_bills_last_30_days else user.get_user_id()))}руб."
await bot.send_message(
chat_id=user.get_user_id(),
text=text,
reply_markup=markup
)
else:
if user.is_in_family():
try:
price = float(message.text.split(" - ")[0])
msg = message.text.split(" - ")[1]
if len(msg) > 120:
text = f"Сообщение не может быть больше 120 символов!"
else:
b.create_bill(user.get_family().get_family_id(), user.get_user_id(), price, msg)
text = f"Счет на {price}руб. был добавлен с сообщением \"{msg}\"."
except:
text = tt.error
else:
text = "Вступите в семью для добавления трат."
await bot.send_message(
chat_id=user.get_user_id(),
text=text,
)
@dp.callback_query_handler()
async def process_callback(callback_query: types.CallbackQuery):
user = usr.User(callback_query.message.chat.id)
call_data = callback_query.data
message_id = callback_query.message.message_id
if call_data.startswith("acceptFamily"):
if fam.family_exists(call_data[12:]):
family = fam.Family(call_data[12:])
if not user.is_in_family() and user.is_invited(family.get_family_id()):
family.add_user(user.get_user_id())
user.delete_invite(family.get_family_id())
text = f"Вы вступили в семью с ID {user.get_family().get_family_id()}."
markup = mk.get_markup_start(user)
else:
text = f"Семьи с ID {call_data[12:]} больше не существует."
user.delete_invite(call_data[12:])
markup = mk.single_button(mk.btnBackMyInvites)
await bot.edit_message_text(
chat_id=user.get_user_id(),
message_id=message_id,
text=text,
reply_markup=markup
)
elif call_data == "myInvites":
if not user.is_in_family():
if user.get_invites():
text = tt.my_invites
markup = mk.get_markup_myInvites(user)
else:
text = tt.no_active_invites
markup = types.InlineKeyboardMarkup()
await bot.edit_message_text(
chat_id=user.get_user_id(),
message_id=message_id,
text=text,
reply_markup=markup
)
elif call_data.startswith("declineFamily"):
family = fam.Family(call_data[13:])
if not user.is_in_family() and user.is_invited(family.get_family_id()):
user.delete_invite(family.get_family_id())
if user.get_invites():
text = tt.my_invites
markup = mk.get_markup_myInvites(user)
else:
text = tt.no_active_invites
markup = types.InlineKeyboardMarkup()
await bot.edit_message_text(
chat_id=user.get_user_id(),
message_id=message_id,
text=text,
reply_markup=markup
)
elif call_data == "kickFromFamily":
if user.is_in_family():
if user.get_family().get_creator().get_user_id() == user.get_user_id():
await bot.edit_message_text(
chat_id=user.get_user_id(),
message_id=message_id,
text=f"Выберите пользователя, которого хотите выгнать из семьи: ",
reply_markup=mk.get_markup_kickFromFamily(user.get_family())
)
elif call_data.startswith("kickFromFamily"):
kicked_user = usr.User(call_data[14:])
family = kicked_user.get_family()
if user.get_user_id() == kicked_user.get_user_id():
text = "Вы не можете выгнать самого себя!"
elif user.is_in_family() and kicked_user.is_in_family():
if kicked_user.get_family().get_creator().get_user_id() == user.get_user_id() and kicked_user.get_family().get_family_id() == user.get_family().get_family_id():
await bot.send_message(
chat_id=kicked_user.get_user_id(),
text=f"Вы были выгнаны из семьи с ID {family.get_family_id()}.",
)
text = f"Пользователь {kicked_user.get_name()} был выгнан из семьи."
family.remove_user(kicked_user.get_user_id())
await bot.edit_message_text(
chat_id=user.get_user_id(),
message_id=message_id,
text=text,
reply_markup=mk.single_button(mk.btnBackKickFromFamily)
)
elif call_data.startswith("billsPage") or call_data.startswith("ownbillsPage"):
if call_data.startswith("ownbillsPage"):
pagenum = int(call_data[12:])
bill_list = user.get_family().get_bills_30_days(user_id=user.get_user_id())
own = True
else:
pagenum = int(call_data[9:])
bill_list = user.get_family().get_bills_30_days()
own = False
bill_offset_start = 0 if pagenum == 1 else 30*(pagenum-1)
maxpages = ceil(len(bill_list) / 30)
bill_list = bill_list[bill_offset_start:bill_offset_start+30]
text = ""
for bill in bill_list[::-1]:
text += f"{'{:.2f}'.format(bill.get_price())} руб. - \"{bill.get_message()}\"\nДобавлено {bill.get_user().get_name()} {datetime.strftime(bill.get_date(), '%d-%m-%y в %H:%M')}\n{tt.line_separator}\n"
text += f"{tt.family_bills_last_30_days if not own else tt.my_bills_last_30_days}: {'{:.2f}'.format(user.get_family().get_total_30_days(None if not own else user.get_user_id()))}руб."
await bot.edit_message_text(
text=text,
chat_id=user.get_user_id(),
message_id=message_id,
reply_markup=mk.get_markup_billsPage(pagenum, maxpages=maxpages, own=own)
)
@dp.message_handler(state=sh.InviteToFamily.invited_id)
async def inviteToFamilySetInvitedID(message: types.Message, state: FSMContext):
user = usr.User(message.chat.id)
family = user.get_family()
if usr.user_exists(message.text):
invited_user = usr.User(message.text)
if invited_user.is_in_family():
text = f"Пользователь {invited_user.get_name()} уже находится в семье."
elif invited_user.is_invited(family.get_family_id()):
text = f"Пользователь {invited_user.get_name()} уже был приглашен в семью с ID {family.get_family_id()}."
else:
try:
invited_user.create_invite(user.get_family().get_family_id())
await bot.send_message(
chat_id=invited_user.get_user_id(),
text=f"Вы были приглашены в семью с ID {user.get_family().get_family_id()}.",
reply_markup=mk.single_button(mk.btnMyInvites)
)
text = f"Пользователь {invited_user.get_name()} был приглашен в семью с ID {user.get_family().get_family_id()}."
except:
text = tt.error
else:
text = f"Пользователя с ID {message.text} не существует."
await bot.send_message(
chat_id=user.get_user_id(),
text=text
)
await state.finish()
@dp.message_handler(state=sh.changeName.name)
async def changeNameSetName(message: types.Message, state: FSMContext):
user = usr.User(message.chat.id)
user.set_name(message.text)
await bot.send_message(
chat_id=message.chat.id,
text=f"Ваше имя было изменено на \"{message.text}\".",
)
await state.finish()
@dp.callback_query_handler(state='*')
async def cancelState(callback_query: types.CallbackQuery, state: FSMContext):
user = usr.User(callback_query.message.chat.id)
call_data = callback_query.data
if call_data == "cancelDelete":
try:
await bot.delete_message(
chat_id=user.get_user_id(),
message_id=callback_query.message.message_id
)
except:
pass # It's a shame I had to do this.
await state.finish()
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)
|
[
"aiogram.executor.start_polling",
"markups.single_button",
"aiogram.types.InlineKeyboardMarkup",
"markups.get_markup_start",
"aiogram.contrib.fsm_storage.memory.MemoryStorage",
"state_handler.InviteToFamily.invited_id.set",
"aiogram.Dispatcher",
"state_handler.changeName.name.set",
"user.user_exists",
"aiogram.Bot",
"sqlite3.connect",
"family.family_exists",
"family.Family",
"markups.get_markup_myInvites",
"markups.get_markup_billsPage",
"user.User"
] |
[((825, 851), 'sqlite3.connect', 'sqlite3.connect', (['"""data.db"""'], {}), "('data.db')\n", (840, 851), False, 'import sqlite3\n'), ((1392, 1407), 'aiogram.contrib.fsm_storage.memory.MemoryStorage', 'MemoryStorage', ([], {}), '()\n', (1405, 1407), False, 'from aiogram.contrib.fsm_storage.memory import MemoryStorage\n'), ((1414, 1430), 'aiogram.Bot', 'Bot', ([], {'token': 'token'}), '(token=token)\n', (1417, 1430), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((1436, 1468), 'aiogram.Dispatcher', 'Dispatcher', (['bot'], {'storage': 'storage'}), '(bot, storage=storage)\n', (1446, 1468), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((1564, 1589), 'user.User', 'usr.User', (['message.chat.id'], {}), '(message.chat.id)\n', (1572, 1589), True, 'import user as usr\n'), ((1807, 1832), 'user.User', 'usr.User', (['message.chat.id'], {}), '(message.chat.id)\n', (1815, 1832), True, 'import user as usr\n'), ((6443, 6483), 'user.User', 'usr.User', (['callback_query.message.chat.id'], {}), '(callback_query.message.chat.id)\n', (6451, 6483), True, 'import user as usr\n'), ((11631, 11656), 'user.User', 'usr.User', (['message.chat.id'], {}), '(message.chat.id)\n', (11639, 11656), True, 'import user as usr\n'), ((11695, 11724), 'user.user_exists', 'usr.user_exists', (['message.text'], {}), '(message.text)\n', (11710, 11724), True, 'import user as usr\n'), ((12970, 12995), 'user.User', 'usr.User', (['message.chat.id'], {}), '(message.chat.id)\n', (12978, 12995), True, 'import user as usr\n'), ((13312, 13352), 'user.User', 'usr.User', (['callback_query.message.chat.id'], {}), '(callback_query.message.chat.id)\n', (13320, 13352), True, 'import user as usr\n'), ((13727, 13772), 'aiogram.executor.start_polling', 'executor.start_polling', (['dp'], {'skip_updates': '(True)'}), '(dp, skip_updates=True)\n', (13749, 13772), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((6628, 6661), 'family.family_exists', 'fam.family_exists', (['call_data[12:]'], {}), '(call_data[12:])\n', (6645, 6661), True, 'import family as fam\n'), ((11749, 11771), 'user.User', 'usr.User', (['message.text'], {}), '(message.text)\n', (11757, 11771), True, 'import user as usr\n'), ((6684, 6710), 'family.Family', 'fam.Family', (['call_data[12:]'], {}), '(call_data[12:])\n', (6694, 6710), True, 'import family as fam\n'), ((7200, 7237), 'markups.single_button', 'mk.single_button', (['mk.btnBackMyInvites'], {}), '(mk.btnBackMyInvites)\n', (7216, 7237), True, 'import markups as mk\n'), ((1701, 1726), 'markups.get_markup_start', 'mk.get_markup_start', (['user'], {}), '(user)\n', (1720, 1726), True, 'import markups as mk\n'), ((2266, 2290), 'state_handler.changeName.name.set', 'sh.changeName.name.set', ([], {}), '()\n', (2288, 2290), True, 'import state_handler as sh\n'), ((7020, 7045), 'markups.get_markup_start', 'mk.get_markup_start', (['user'], {}), '(user)\n', (7039, 7045), True, 'import markups as mk\n'), ((8015, 8041), 'family.Family', 'fam.Family', (['call_data[13:]'], {}), '(call_data[13:])\n', (8025, 8041), True, 'import family as fam\n'), ((7603, 7632), 'markups.get_markup_myInvites', 'mk.get_markup_myInvites', (['user'], {}), '(user)\n', (7626, 7632), True, 'import markups as mk\n'), ((7720, 7748), 'aiogram.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (7746, 7748), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((2205, 2241), 'markups.single_button', 'mk.single_button', (['mk.btnCancelDelete'], {}), '(mk.btnCancelDelete)\n', (2221, 2241), True, 'import markups as mk\n'), ((8274, 8303), 'markups.get_markup_myInvites', 'mk.get_markup_myInvites', (['user'], {}), '(user)\n', (8297, 8303), True, 'import markups as mk\n'), ((8391, 8419), 'aiogram.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (8417, 8419), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((9170, 9194), 'user.User', 'usr.User', (['call_data[14:]'], {}), '(call_data[14:])\n', (9178, 9194), True, 'import user as usr\n'), ((2615, 2640), 'markups.get_markup_start', 'mk.get_markup_start', (['user'], {}), '(user)\n', (2634, 2640), True, 'import markups as mk\n'), ((3185, 3214), 'markups.get_markup_myInvites', 'mk.get_markup_myInvites', (['user'], {}), '(user)\n', (3208, 3214), True, 'import markups as mk\n'), ((3302, 3330), 'aiogram.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (3328, 3330), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((3754, 3788), 'state_handler.InviteToFamily.invited_id.set', 'sh.InviteToFamily.invited_id.set', ([], {}), '()\n', (3786, 3788), True, 'import state_handler as sh\n'), ((12412, 12445), 'markups.single_button', 'mk.single_button', (['mk.btnMyInvites'], {}), '(mk.btnMyInvites)\n', (12428, 12445), True, 'import markups as mk\n'), ((2972, 2997), 'markups.get_markup_start', 'mk.get_markup_start', (['user'], {}), '(user)\n', (2991, 2997), True, 'import markups as mk\n'), ((10120, 10162), 'markups.single_button', 'mk.single_button', (['mk.btnBackKickFromFamily'], {}), '(mk.btnBackKickFromFamily)\n', (10136, 10162), True, 'import markups as mk\n'), ((3693, 3729), 'markups.single_button', 'mk.single_button', (['mk.btnCancelDelete'], {}), '(mk.btnCancelDelete)\n', (3709, 3729), True, 'import markups as mk\n'), ((4690, 4718), 'aiogram.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (4716, 4718), False, 'from aiogram import Bot, Dispatcher, executor, types\n'), ((11410, 11470), 'markups.get_markup_billsPage', 'mk.get_markup_billsPage', (['pagenum'], {'maxpages': 'maxpages', 'own': 'own'}), '(pagenum, maxpages=maxpages, own=own)\n', (11433, 11470), True, 'import markups as mk\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import socket
from maro.communication import Proxy
def get_random_port():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as temp_socket:
temp_socket.bind(("", 0))
random_port = temp_socket.getsockname()[1]
return random_port
def proxy_generator(component_type, redis_port):
proxy_parameters = {
"group_name": "communication_unit_test",
"redis_address": ("localhost", redis_port),
"log_enable": False
}
component_type_expected_peers_map = {
"receiver": {"sender": 1},
"sender": {"receiver": 1},
"master": {"worker": 5},
"worker": {"master": 1}
}
proxy = Proxy(
component_type=component_type,
expected_peers=component_type_expected_peers_map[component_type],
**proxy_parameters
)
return proxy
|
[
"maro.communication.Proxy",
"socket.socket"
] |
[((743, 870), 'maro.communication.Proxy', 'Proxy', ([], {'component_type': 'component_type', 'expected_peers': 'component_type_expected_peers_map[component_type]'}), '(component_type=component_type, expected_peers=\n component_type_expected_peers_map[component_type], **proxy_parameters)\n', (748, 870), False, 'from maro.communication import Proxy\n'), ((160, 209), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (173, 209), False, 'import socket\n')]
|
"""
This code validates the performance of VGG16 after L-OBS prunning
"""
import torch
import torch.backends.cudnn as cudnn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from models.vgg import vgg16_bn
from utils import validate, adjust_mean_var
import numpy as np
import os
from datetime import datetime
use_cuda = torch.cuda.is_available()
# -------------------------------------------- User Config ------------------------------------
# Specify parameters path
traindir = '/home/shangyu/imagenet-train'
valdir = '/home/shangyu/imagenet-val'
pruned_weight_root = './VGG16/pruned_weight'
pruned_parameter_root = './VGG16/pruned_param'
if not os.path.exists(pruned_parameter_root):
os.makedirs(pruned_parameter_root)
pretrain_model_path = './VGG16/vgg16_bn-6c64b313.pth'
n_validate_batch = 100 # Number of batches used for validation
validate_batch_size = 50 # Batch size of validation
adjust_batch_size = 128
n_adjust_batch = 500
# Prune data is retrieved from
# Learning bothWeights and Connections for Efficient Neural Networks (Song Han NIPS2015)
layer_name_list = {
'features.0': 55,
'features.3': 20,
'features.7': 35,
'features.10': 35,
'features.14': 55,
'features.17': 25,
'features.20': 40,
'features.24': 30,
'features.27': 30,
'features.30': 35,
'features.34': 35,
'features.37': 30,
'features.40': 35,
'classifier.0': 5,
'classifier.3': 5,
'classifier.6': 25
}
# -------------------------------------------- User Config ------------------------------------
net = vgg16_bn()
# net.load_state_dict(torch.load(pretrain_model_path))
# param = net.state_dict()
param = torch.load(pretrain_model_path)
total_nnz = 0
total_nelements = 0
n_weight_used = 0
n_total_weight = len(os.listdir('%s/CR_5' %(pruned_weight_root))) # It should be 16 * 2 = 32
for layer_name, CR in layer_name_list.items():
# if not os.path.exists('%s/CR_%d/%s.npy' %(pruned_weight_root, CR, layer_name)):
# continue
pruned_weight = np.load('%s/CR_%d/%s.weight.npy' %(pruned_weight_root, CR, layer_name))
pruned_bias = np.load('%s/CR_%d/%s.bias.npy' %(pruned_weight_root, CR, layer_name))
# print pruned_weight
# raw_input()
# Calculate sparsity
this_sparsity = np.count_nonzero(pruned_weight) + np.count_nonzero(pruned_bias)
this_total = pruned_weight.size + pruned_bias.size
print ('%s CR: %f' %(layer_name, float(this_sparsity)/float(this_total)))
total_nnz += this_sparsity
total_nelements += this_total
param['%s.weight' %layer_name] = torch.FloatTensor(pruned_weight)
param['%s.bias' %layer_name] = torch.FloatTensor(pruned_bias)
n_weight_used += 2
# assert(n_weight_used == n_total_weight)
print ('Prune weights used: %d/%d' %(n_weight_used, n_total_weight))
overall_CR = float(total_nnz) / float(total_nelements)
print ('Overall compression rate (nnz/total): %f' %overall_CR)
net.load_state_dict(param)
torch.save(param, open('%s/CR-%.3f.pth' %(pruned_parameter_root, overall_CR), 'w'))
if use_cuda:
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
# Load training dataset for mean/var adjust
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
trainDataset = datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(trainDataset, batch_size = adjust_batch_size, shuffle=True)
print ('[%s] Begin adjust.' %(datetime.now()))
adjust_mean_var(net, train_loader, None, n_adjust_batch, use_cuda)
print ('[%s] Adjust finish. Now saving parameters' %(datetime.now()))
# Load validation dataset
print('==> Preparing data..')
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size = validate_batch_size, shuffle=True)
validate(net, val_loader, None, None, n_validate_batch, use_cuda)
|
[
"numpy.load",
"torch.cuda.device_count",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"utils.adjust_mean_var",
"torch.FloatTensor",
"torchvision.transforms.CenterCrop",
"datetime.datetime.now",
"models.vgg.vgg16_bn",
"torchvision.transforms.RandomHorizontalFlip",
"torch.cuda.is_available",
"os.listdir",
"torchvision.transforms.Resize",
"numpy.count_nonzero",
"os.makedirs",
"utils.validate",
"torchvision.transforms.RandomResizedCrop",
"torchvision.transforms.ToTensor"
] |
[((361, 386), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (384, 386), False, 'import torch\n'), ((1590, 1600), 'models.vgg.vgg16_bn', 'vgg16_bn', ([], {}), '()\n', (1598, 1600), False, 'from models.vgg import vgg16_bn\n'), ((1691, 1722), 'torch.load', 'torch.load', (['pretrain_model_path'], {}), '(pretrain_model_path)\n', (1701, 1722), False, 'import torch\n'), ((3251, 3326), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3271, 3326), True, 'import torchvision.transforms as transforms\n'), ((3527, 3616), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainDataset'], {'batch_size': 'adjust_batch_size', 'shuffle': '(True)'}), '(trainDataset, batch_size=adjust_batch_size,\n shuffle=True)\n', (3554, 3616), False, 'import torch\n'), ((3662, 3728), 'utils.adjust_mean_var', 'adjust_mean_var', (['net', 'train_loader', 'None', 'n_adjust_batch', 'use_cuda'], {}), '(net, train_loader, None, n_adjust_batch, use_cuda)\n', (3677, 3728), False, 'from utils import validate, adjust_mean_var\n'), ((4099, 4164), 'utils.validate', 'validate', (['net', 'val_loader', 'None', 'None', 'n_validate_batch', 'use_cuda'], {}), '(net, val_loader, None, None, n_validate_batch, use_cuda)\n', (4107, 4164), False, 'from utils import validate, adjust_mean_var\n'), ((688, 725), 'os.path.exists', 'os.path.exists', (['pruned_parameter_root'], {}), '(pruned_parameter_root)\n', (702, 725), False, 'import os\n'), ((731, 765), 'os.makedirs', 'os.makedirs', (['pruned_parameter_root'], {}), '(pruned_parameter_root)\n', (742, 765), False, 'import os\n'), ((1796, 1838), 'os.listdir', 'os.listdir', (["('%s/CR_5' % pruned_weight_root)"], {}), "('%s/CR_5' % pruned_weight_root)\n", (1806, 1838), False, 'import os\n'), ((2043, 2115), 'numpy.load', 'np.load', (["('%s/CR_%d/%s.weight.npy' % (pruned_weight_root, CR, layer_name))"], {}), "('%s/CR_%d/%s.weight.npy' % (pruned_weight_root, CR, layer_name))\n", (2050, 2115), True, 'import numpy as np\n'), ((2133, 2203), 'numpy.load', 'np.load', (["('%s/CR_%d/%s.bias.npy' % (pruned_weight_root, CR, layer_name))"], {}), "('%s/CR_%d/%s.bias.npy' % (pruned_weight_root, CR, layer_name))\n", (2140, 2203), True, 'import numpy as np\n'), ((2593, 2625), 'torch.FloatTensor', 'torch.FloatTensor', (['pruned_weight'], {}), '(pruned_weight)\n', (2610, 2625), False, 'import torch\n'), ((2661, 2691), 'torch.FloatTensor', 'torch.FloatTensor', (['pruned_bias'], {}), '(pruned_bias)\n', (2678, 2691), False, 'import torch\n'), ((2292, 2323), 'numpy.count_nonzero', 'np.count_nonzero', (['pruned_weight'], {}), '(pruned_weight)\n', (2308, 2323), True, 'import numpy as np\n'), ((2326, 2355), 'numpy.count_nonzero', 'np.count_nonzero', (['pruned_bias'], {}), '(pruned_bias)\n', (2342, 2355), True, 'import numpy as np\n'), ((3645, 3659), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3657, 3659), False, 'from datetime import datetime\n'), ((3782, 3796), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3794, 3796), False, 'from datetime import datetime\n'), ((3396, 3429), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {}), '(224)\n', (3424, 3429), True, 'import torchvision.transforms as transforms\n'), ((3433, 3466), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3464, 3466), True, 'import torchvision.transforms as transforms\n'), ((3470, 3491), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3489, 3491), True, 'import torchvision.transforms as transforms\n'), ((3139, 3164), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3162, 3164), False, 'import torch\n'), ((3951, 3973), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (3968, 3973), True, 'import torchvision.transforms as transforms\n'), ((3977, 4003), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (3998, 4003), True, 'import torchvision.transforms as transforms\n'), ((4007, 4028), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4026, 4028), True, 'import torchvision.transforms as transforms\n')]
|
from flask import Flask, render_template, jsonify, request, url_for
from shapely.geometry import Point as Shapely_point, mapping
from geojson import Point as Geoj_point, Polygon as Geoj_polygon, Feature, FeatureCollection
from datetime import datetime
from sqlalchemy import *
import pandas as pd
import geopandas as gpd
import numpy as np
import psycopg2 as pg
import json
import leaflet as L
from elastic_app_search import Client
from elasticsearch import Elasticsearch
from elasticapm.contrib.flask import ElasticAPM
import matplotlib.colors as cl
import h3
import h3.api.basic_int as h3int
import json
import h3pandas
import cmasher as cmr
import plotly
import plotly.express as px
from scipy.stats import percentileofscore
from scipy import stats
import plotly.graph_objects as go
import os
import datetime
from netCDF4 import Dataset
import shapely.wkt
import folium
import ftplib
from ftplib import FTP
from pathlib import Path
from os import path, walk
############ globals
outDir = '/home/sumer/my_project_dir/ncep/'
updated_data_available_file = '/home/sumer/weather/weather-forecast/updated_data_available.txt'
#outDir = '/root/ncep/data/'
#updated_data_available_file = '/root/ncep/scripts/updated_data_available.txt'
list_of_ncfiles = [x for x in os.listdir(outDir) if x.endswith('.nc')]
list_of_ncfiles.sort()
time_dim = len(list_of_ncfiles)
varDict = {'TMP_2maboveground': 'Air Temp [C] (2 m above surface)',
'TSOIL_0D1M0D4mbelowground':'Soil Temperature [C] - 0.1-0.4 m below ground',
'SOILW_0D1M0D4mbelowground':'Volumetric Soil Moisture Content [Fraction] - 0.1-0.4 m below ground',
'CRAIN_surface':'Rainfall Boolean [1/0]',
}
#varList = ['TMP_2maboveground','TSOIL_0D1M0D4mbelowground','SOILW_0D1M0D4mbelowground', 'CRAIN_surface']
varList = list(varDict.keys())
var_val3D = []
var_val4D = []
#NOTE: the variable are in opposite order var_val4D[lat, lon, forecast_time_index, 0/1/2/3, where 0=CRAIN, 1=SOILW... etc]
updatedDtStr = list_of_ncfiles[0].split('__')[0]
updatedDt = datetime.datetime.strptime(updatedDtStr,'%Y%m%d_%H%M%S')
updatedDtDisplay = datetime.datetime.strftime(updatedDt, '%Y-%m-%dT%H%M%S')
#get the forecast end dt
forecastEndDtStr = list_of_ncfiles[-1].split('__')[1].split('__')[0]
forecastEndDt = datetime.datetime.strptime(forecastEndDtStr,'%Y%m%d_%H%M%S')
forecastEndDtDisplay = datetime.datetime.strftime(forecastEndDt, '%Y-%m-%dT%H%M%S')
i=0
for varName in varList:
tm_arr = []
print('Reading data for :'+varName)
j=0
for f in list_of_ncfiles:
#f = '20211209_000000__20211212_210000__093___gfs.t00z.pgrb2.0p25.f093.grb2.nc'
ncin = Dataset(outDir+f, "r")
titleStr = varDict[varName]
var_mat = ncin.variables[varName][:]
if 'Temp' in titleStr:
var_val = var_mat.squeeze() - 273.15 #convert to DegC
else:
var_val = var_mat.squeeze()
lons = ncin.variables['longitude'][:]
lats = ncin.variables['latitude'][:]
tms = ncin.variables['time'][:]
#tmstmpStr = datetime.datetime.fromtimestamp(tm.data[0]).strftime('%Y%m%d%H%M%S')
if j>0:
var_val3D = np.dstack((var_val3D,var_val.data))
else:
var_val3D = var_val.data
tm_arr.append(tms.data[0])
ncin.close()
j=j+1
if i>0:
var_val3D_rshp = np.reshape(var_val3D , (720,1440,time_dim,1))
var_val4D = np.append( var_val3D_rshp , var_val4D , axis = 3)
else:
var_val4D = np.reshape(var_val3D , (720,1440,time_dim,1))
i=i+1
def getWeatherForecastVars():
weatherForecastVars = {}
weatherForecastVars['source'] = 'United States NOAA - NOMADS Global Forecast Model'
weatherForecastVars['variables'] = list(varDict.values())
weatherForecastVars['updated at time [UTC]'] = updatedDt
weatherForecastVars['forecast start time [UTC]'] = updatedDtDisplay
weatherForecastVars['forecast end time [UTC]'] = forecastEndDtDisplay
weatherForecastVars['forecast type'] = 'hourly'
weatherForecastVars['Number of time intervals'] = time_dim
return weatherForecastVars
def getWeatherForecast(lon, lat):
df = pd.DataFrame()
try:
lat = float(lat)
lon = float(lon)
varList = list(varDict.keys())
df = pd.DataFrame()
idx=0
updated_dtStr = list_of_ncfiles[0].split('__')[0]
updated_dt = datetime.datetime.strptime(updated_dtStr, '%Y%m%d_%H%M%S')
for f in list_of_ncfiles:
dtStr = f.split('__')[1]
forecast_dt = datetime.datetime.strptime(dtStr, '%Y%m%d_%H%M%S')
#print([f,updated_dt, forecast_dt])
ncin = Dataset(outDir+f, "r")
#valList = list(ncin.variables.keys())
#extract the variable of interest from the list
for varName in varList:
titleStr = varDict[varName]
var_mat = ncin.variables[varName][:]
if 'Temp' in titleStr:
var_val = var_mat.squeeze() - 273.15 #convert to DegC
else:
var_val = var_mat.squeeze()
lons = ncin.variables['longitude'][:]
lats = ncin.variables['latitude'][:]
lon_ind = [i for i,v in enumerate(lons.data) if v >= lon][0]
lat_ind = [i for i,v in enumerate(lats.data) if v >= lat][0]
vv = var_val[lat_ind, lon_ind]
df.loc[idx,'UPDATED_DATE_UTC']=updated_dt
df.loc[idx,'FORECAST_DATE_UTC']=forecast_dt
df.loc[idx,'MEASURE']=titleStr
df.loc[idx,'lon']=lon
df.loc[idx,'lat']=lat
df.loc[idx,'VALUE']=vv
idx=idx+1
ncin.close()
except Exception as e:
print(e)
return df
def get4DWeatherForecast(lon, lat):
df_all = pd.DataFrame()
try:
lat = float(lat)
lon = float(lon)
idx=3
updated_dtStr = list_of_ncfiles[0].split('__')[0]
updated_dt = datetime.datetime.strptime(updated_dtStr, '%Y%m%d_%H%M%S')
df_all = pd.DataFrame()
updated_dts = [updated_dt for x in range(0,len(tm_arr))]
forecast_dts = [datetime.datetime.utcfromtimestamp(int(x)) for x in tm_arr]
df_all['UPDATED_DATE_UTC']=updated_dts
df_all['FORECAST_DATE_UTC']=forecast_dts
for varName in varList:
df = pd.DataFrame()
print(varName)
#try:
titleStr = varDict[varName]
lon_ind = [i for i,v in enumerate(lons.data) if v >= lon][0]
lat_ind = [i for i,v in enumerate(lats.data) if v >= lat][0]
vv = var_val4D[lat_ind, lon_ind,:,idx]
df[titleStr]=vv
df_all = pd.concat([df_all, df],axis=1)
idx=idx-1
except Exception as e:
print(e)
return df_all
############
#create the app
app = Flask(__name__)
app.config['JSON_SORT_KEYS']=False
error_res = {}
#rendering the entry using any of these routes:
@app.route('/')
@app.route('/index')
@app.route('/home')
def index():
return render_template('index.html')
#global weather forecast implementation
@app.route('/weatherForecastVariables')
def weatherForecastVariables():
try:
weatherForcastVars = getWeatherForecastVars()
except ValueError:
error_res['db function call error'] = 'function call failed for getWeatherForecastVars'
error_msg = jsonify(error_res)
return jsonify(weatherForcastVars)
#global weather forecast implementation
@app.route('/weatherForecast')
def weatherForecast():
lat = request.args.get('lat')
lon = request.args.get('lon')
try:
weatherForcast_df = get4DWeatherForecast(lon, lat)
except ValueError:
error_res['db function call error'] = 'DB function call failed for getWeatherForecast'
error_res['value given'] = 'lat='+str(lat)+', lon='+(str(lon))
error_msg = jsonify(error_res)
if len(weatherForcast_df)>0:
res = jsonify(weatherForcast_df.to_dict(orient='records'))
else:
res = "{'Error': 'WeatherForecast function returned no data'}"
return res
#main to run the app
if __name__ == '__main__':
extra_files = [updated_data_available_file,]
"""
#For auto-reload if anyhing changes in the entire directory do the following:
extra_dirs = [outDir,]
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in walk(extra_dir):
for filename in files:
filename = path.join(dirname, filename)
if path.isfile(filename):
extra_files.append(filename)
"""
app.run(host='0.0.0.0' , port=5000, debug=True, extra_files=extra_files)
|
[
"datetime.datetime.strftime",
"pandas.DataFrame",
"netCDF4.Dataset",
"numpy.dstack",
"flask.request.args.get",
"flask.Flask",
"numpy.append",
"datetime.datetime.strptime",
"flask.jsonify",
"numpy.reshape",
"flask.render_template",
"pandas.concat",
"os.listdir"
] |
[((2025, 2082), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['updatedDtStr', '"""%Y%m%d_%H%M%S"""'], {}), "(updatedDtStr, '%Y%m%d_%H%M%S')\n", (2051, 2082), False, 'import datetime\n'), ((2101, 2157), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['updatedDt', '"""%Y-%m-%dT%H%M%S"""'], {}), "(updatedDt, '%Y-%m-%dT%H%M%S')\n", (2127, 2157), False, 'import datetime\n'), ((2269, 2330), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['forecastEndDtStr', '"""%Y%m%d_%H%M%S"""'], {}), "(forecastEndDtStr, '%Y%m%d_%H%M%S')\n", (2295, 2330), False, 'import datetime\n'), ((2353, 2413), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['forecastEndDt', '"""%Y-%m-%dT%H%M%S"""'], {}), "(forecastEndDt, '%Y-%m-%dT%H%M%S')\n", (2379, 2413), False, 'import datetime\n'), ((6224, 6239), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (6229, 6239), False, 'from flask import Flask, render_template, jsonify, request, url_for\n'), ((3975, 3989), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3987, 3989), True, 'import pandas as pd\n'), ((5338, 5352), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5350, 5352), True, 'import pandas as pd\n'), ((6418, 6447), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (6433, 6447), False, 'from flask import Flask, render_template, jsonify, request, url_for\n'), ((6769, 6796), 'flask.jsonify', 'jsonify', (['weatherForcastVars'], {}), '(weatherForcastVars)\n', (6776, 6796), False, 'from flask import Flask, render_template, jsonify, request, url_for\n'), ((6899, 6922), 'flask.request.args.get', 'request.args.get', (['"""lat"""'], {}), "('lat')\n", (6915, 6922), False, 'from flask import Flask, render_template, jsonify, request, url_for\n'), ((6930, 6953), 'flask.request.args.get', 'request.args.get', (['"""lon"""'], {}), "('lon')\n", (6946, 6953), False, 'from flask import Flask, render_template, jsonify, request, url_for\n'), ((1266, 1284), 'os.listdir', 'os.listdir', (['outDir'], {}), '(outDir)\n', (1276, 1284), False, 'import os\n'), ((2619, 2643), 'netCDF4.Dataset', 'Dataset', (['(outDir + f)', '"""r"""'], {}), "(outDir + f, 'r')\n", (2626, 2643), False, 'from netCDF4 import Dataset\n'), ((3210, 3257), 'numpy.reshape', 'np.reshape', (['var_val3D', '(720, 1440, time_dim, 1)'], {}), '(var_val3D, (720, 1440, time_dim, 1))\n', (3220, 3257), True, 'import numpy as np\n'), ((3270, 3314), 'numpy.append', 'np.append', (['var_val3D_rshp', 'var_val4D'], {'axis': '(3)'}), '(var_val3D_rshp, var_val4D, axis=3)\n', (3279, 3314), True, 'import numpy as np\n'), ((3341, 3388), 'numpy.reshape', 'np.reshape', (['var_val3D', '(720, 1440, time_dim, 1)'], {}), '(var_val3D, (720, 1440, time_dim, 1))\n', (3351, 3388), True, 'import numpy as np\n'), ((4078, 4092), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4090, 4092), True, 'import pandas as pd\n'), ((4168, 4226), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['updated_dtStr', '"""%Y%m%d_%H%M%S"""'], {}), "(updated_dtStr, '%Y%m%d_%H%M%S')\n", (4194, 4226), False, 'import datetime\n'), ((5473, 5531), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['updated_dtStr', '"""%Y%m%d_%H%M%S"""'], {}), "(updated_dtStr, '%Y%m%d_%H%M%S')\n", (5499, 5531), False, 'import datetime\n'), ((5544, 5558), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5556, 5558), True, 'import pandas as pd\n'), ((3057, 3093), 'numpy.dstack', 'np.dstack', (['(var_val3D, var_val.data)'], {}), '((var_val3D, var_val.data))\n', (3066, 3093), True, 'import numpy as np\n'), ((4300, 4350), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dtStr', '"""%Y%m%d_%H%M%S"""'], {}), "(dtStr, '%Y%m%d_%H%M%S')\n", (4326, 4350), False, 'import datetime\n'), ((4405, 4429), 'netCDF4.Dataset', 'Dataset', (['(outDir + f)', '"""r"""'], {}), "(outDir + f, 'r')\n", (4412, 4429), False, 'from netCDF4 import Dataset\n'), ((5815, 5829), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5827, 5829), True, 'import pandas as pd\n'), ((6091, 6122), 'pandas.concat', 'pd.concat', (['[df_all, df]'], {'axis': '(1)'}), '([df_all, df], axis=1)\n', (6100, 6122), True, 'import pandas as pd\n'), ((6741, 6759), 'flask.jsonify', 'jsonify', (['error_res'], {}), '(error_res)\n', (6748, 6759), False, 'from flask import Flask, render_template, jsonify, request, url_for\n'), ((7202, 7220), 'flask.jsonify', 'jsonify', (['error_res'], {}), '(error_res)\n', (7209, 7220), False, 'from flask import Flask, render_template, jsonify, request, url_for\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pandas as pd
from pandapower.shortcircuit.idx_brch import IKSS_F, IKSS_T, IP_F, IP_T, ITH_F, ITH_T
from pandapower.shortcircuit.idx_bus import IKSS1, IP, ITH, IKSS2, R_EQUIV_OHM, X_EQUIV_OHM, SKSS
from pandapower.pypower.idx_bus import BUS_TYPE, BASE_KV
BRANCH_RESULTS_KEYS = ("branch_ikss_f", "branch_ikss_t",
"branch_ip_f", "branch_ip_t",
"branch_ith_f", "branch_ith_t")
def _copy_result_to_ppci_orig(ppci_orig, ppci, ppci_bus, calc_options):
if ppci_orig is ppci:
return
ppci_orig["bus"][ppci_bus, :] = ppci["bus"][ppci_bus, :]
if calc_options["branch_results"]:
if calc_options["return_all_currents"]:
ppci_orig["internal"]["br_res_ks_ppci_bus"] =\
ppci_bus if "br_res_ks_ppci_bus" not in ppci_orig["internal"]\
else np.r_[ppci_orig["internal"]["br_res_ks_ppci_bus"], ppci_bus]
for res_key in BRANCH_RESULTS_KEYS:
# Skip not required data points
if res_key not in ppci["internal"]:
continue
if res_key not in ppci_orig["internal"]:
ppci_orig["internal"][res_key] = ppci["internal"][res_key]
else:
ppci_orig["internal"][res_key] = np.c_[ppci_orig["internal"][res_key],
ppci["internal"][res_key]]
else:
case = calc_options["case"]
branch_results_cols = [IKSS_F, IKSS_T, IP_F, IP_T, ITH_F, ITH_T]
if case == "max":
ppci_orig["branch"][:, branch_results_cols] =\
np.maximum(np.nan_to_num(ppci["branch"][:, branch_results_cols]),
np.nan_to_num(ppci_orig["branch"][:, branch_results_cols]))
else:
ppci_orig["branch"][:, branch_results_cols] =\
np.minimum(np.nan_to_num(ppci["branch"][:, branch_results_cols], nan=1e10),
np.nan_to_num(ppci_orig["branch"][:, branch_results_cols], nan=1e10))
def _get_bus_ppc_idx_for_br_all_results(net, ppc, bus):
bus_lookup = net._pd2ppc_lookups["bus"]
if bus is None:
bus = net.bus.index
ppc_index = bus_lookup[bus]
ppc_index[ppc["bus"][ppc_index, BUS_TYPE] == 4] = -1
return bus, ppc_index
def _extract_results(net, ppc, ppc_0, bus):
_get_bus_results(net, ppc, ppc_0, bus)
if net._options["branch_results"]:
if net._options['return_all_currents']:
_get_line_all_results(net, ppc, bus)
_get_trafo_all_results(net, ppc, bus)
_get_trafo3w_all_results(net, ppc, bus)
else:
_get_line_results(net, ppc)
_get_trafo_results(net, ppc)
_get_trafo3w_results(net, ppc)
def _get_bus_results(net, ppc, ppc_0, bus):
bus_lookup = net._pd2ppc_lookups["bus"]
ppc_index = bus_lookup[net.bus.index]
if net["_options"]["fault"] == "1ph":
net.res_bus_sc["ikss_ka"] = ppc_0["bus"][ppc_index, IKSS1] + ppc["bus"][ppc_index, IKSS2]
net.res_bus_sc["rk0_ohm"] = ppc_0["bus"][ppc_index, R_EQUIV_OHM]
net.res_bus_sc["xk0_ohm"] = ppc_0["bus"][ppc_index, X_EQUIV_OHM]
# in trafo3w, we add very high numbers (1e10) as impedances to block current
# here, we need to replace such high values by np.inf
baseZ = ppc_0["bus"][ppc_index, BASE_KV] ** 2 / ppc_0["baseMVA"]
net.res_bus_sc["xk0_ohm"].loc[net.res_bus_sc["xk0_ohm"]/baseZ > 1e9] = np.inf
net.res_bus_sc["rk0_ohm"].loc[net.res_bus_sc["rk0_ohm"]/baseZ > 1e9] = np.inf
else:
net.res_bus_sc["ikss_ka"] = ppc["bus"][ppc_index, IKSS1] + ppc["bus"][ppc_index, IKSS2]
net.res_bus_sc["skss_mw"] = ppc["bus"][ppc_index, SKSS]
if net._options["ip"]:
net.res_bus_sc["ip_ka"] = ppc["bus"][ppc_index, IP]
if net._options["ith"]:
net.res_bus_sc["ith_ka"] = ppc["bus"][ppc_index, ITH]
# Export also equivalent rk, xk on the calculated bus
net.res_bus_sc["rk_ohm"] = ppc["bus"][ppc_index, R_EQUIV_OHM]
net.res_bus_sc["xk_ohm"] = ppc["bus"][ppc_index, X_EQUIV_OHM]
# if for some reason (e.g. contribution of ext_grid set close to 0) we used very high values for rk, xk, we replace them by np.inf
baseZ = ppc["bus"][ppc_index, BASE_KV] ** 2 / ppc["baseMVA"]
net.res_bus_sc["rk_ohm"].loc[net.res_bus_sc["rk_ohm"] / baseZ > 1e9] = np.inf
net.res_bus_sc["xk_ohm"].loc[net.res_bus_sc["xk_ohm"] / baseZ > 1e9] = np.inf
net.res_bus_sc = net.res_bus_sc.loc[bus, :]
def _get_line_results(net, ppc):
branch_lookup = net._pd2ppc_lookups["branch"]
case = net._options["case"]
if "line" in branch_lookup:
f, t = branch_lookup["line"]
minmax = np.max if case == "max" else np.min
net.res_line_sc["ikss_ka"] = minmax(ppc["branch"][f:t, [IKSS_F, IKSS_T]].real, axis=1)
if net._options["ip"]:
net.res_line_sc["ip_ka"] = minmax(ppc["branch"][f:t, [IP_F, IP_T]].real, axis=1)
if net._options["ith"]:
net.res_line_sc["ith_ka"] = minmax(ppc["branch"][f:t, [ITH_F, ITH_T]].real, axis=1)
def _get_line_all_results(net, ppc, bus):
case = net._options["case"]
bus, ppc_index = _get_bus_ppc_idx_for_br_all_results(net, ppc, bus)
branch_lookup = net._pd2ppc_lookups["branch"]
multindex = pd.MultiIndex.from_product([net.res_line_sc.index, bus], names=['line','bus'])
net.res_line_sc = net.res_line_sc.reindex(multindex)
if "line" in branch_lookup:
f, t = branch_lookup["line"]
minmax = np.maximum if case == "max" else np.minimum
net.res_line_sc["ikss_ka"] = minmax(ppc["internal"]["branch_ikss_f"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1),
ppc["internal"]["branch_ikss_t"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1))
if net._options["ip"]:
net.res_line_sc["ip_ka"] = minmax(ppc["internal"]["branch_ip_f"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1),
ppc["internal"]["branch_ip_t"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1))
if net._options["ith"]:
net.res_line_sc["ith_ka"] = minmax(ppc["internal"]["branch_ith_f"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1),
ppc["internal"]["branch_ith_t"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1))
def _get_trafo_results(net, ppc):
branch_lookup = net._pd2ppc_lookups["branch"]
if "trafo" in branch_lookup:
f, t = branch_lookup["trafo"]
net.res_trafo_sc["ikss_hv_ka"] = ppc["branch"][f:t, IKSS_F].real
net.res_trafo_sc["ikss_lv_ka"] = ppc["branch"][f:t, IKSS_T].real
def _get_trafo_all_results(net, ppc, bus):
bus, ppc_index = _get_bus_ppc_idx_for_br_all_results(net, ppc, bus)
branch_lookup = net._pd2ppc_lookups["branch"]
multindex = pd.MultiIndex.from_product([net.res_trafo_sc.index, bus], names=['trafo', 'bus'])
net.res_trafo_sc = net.res_trafo_sc.reindex(multindex)
if "trafo" in branch_lookup:
f, t = branch_lookup["trafo"]
net.res_trafo_sc["ikss_hv_ka"] = ppc["internal"]["branch_ikss_f"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1)
net.res_trafo_sc["ikss_lv_ka"] = ppc["internal"]["branch_ikss_t"].iloc[f:t,:].loc[:, ppc_index].values.real.reshape(-1, 1)
def _get_trafo3w_results(net, ppc):
branch_lookup = net._pd2ppc_lookups["branch"]
if "trafo3w" in branch_lookup:
f, t = net._pd2ppc_lookups["branch"]["trafo3w"]
hv = int(f + (t - f) / 3)
mv = int(f + 2 * (t - f) / 3)
lv = t
net.res_trafo3w_sc["ikss_hv_ka"] = ppc["branch"][f:hv, IKSS_F].real
net.res_trafo3w_sc["ikss_mv_ka"] = ppc["branch"][hv:mv, IKSS_T].real
net.res_trafo3w_sc["ikss_lv_ka"] = ppc["branch"][mv:lv, IKSS_T].real
def _get_trafo3w_all_results(net, ppc, bus):
bus, ppc_index = _get_bus_ppc_idx_for_br_all_results(net, ppc, bus)
branch_lookup = net._pd2ppc_lookups["branch"]
multindex = pd.MultiIndex.from_product([net.res_trafo3w_sc.index, bus], names=['trafo3w', 'bus'])
net.res_trafo3w_sc = net.res_trafo3w_sc.reindex(multindex)
if "trafo3w" in branch_lookup:
f, t = branch_lookup["trafo3w"]
hv = int(f + (t - f) / 3)
mv = int(f + 2 * (t - f) / 3)
lv = t
net.res_trafo3w_sc["ikss_hv_ka"] = ppc["internal"]["branch_ikss_f"].iloc[f:hv,:].loc[:, ppc_index].values.real.reshape(-1, 1)
net.res_trafo3w_sc["ikss_mv_ka"] = ppc["internal"]["branch_ikss_t"].iloc[hv:mv, :].loc[:, ppc_index].values.real.reshape(-1, 1)
net.res_trafo3w_sc["ikss_lv_ka"] = ppc["internal"]["branch_ikss_t"].iloc[mv:lv, :].loc[:, ppc_index].values.real.reshape(-1, 1)
|
[
"pandas.MultiIndex.from_product",
"numpy.nan_to_num"
] |
[((5733, 5812), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[net.res_line_sc.index, bus]'], {'names': "['line', 'bus']"}), "([net.res_line_sc.index, bus], names=['line', 'bus'])\n", (5759, 5812), True, 'import pandas as pd\n'), ((7392, 7477), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[net.res_trafo_sc.index, bus]'], {'names': "['trafo', 'bus']"}), "([net.res_trafo_sc.index, bus], names=['trafo',\n 'bus'])\n", (7418, 7477), True, 'import pandas as pd\n'), ((8574, 8664), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[net.res_trafo3w_sc.index, bus]'], {'names': "['trafo3w', 'bus']"}), "([net.res_trafo3w_sc.index, bus], names=[\n 'trafo3w', 'bus'])\n", (8600, 8664), True, 'import pandas as pd\n'), ((1932, 1985), 'numpy.nan_to_num', 'np.nan_to_num', (["ppci['branch'][:, branch_results_cols]"], {}), "(ppci['branch'][:, branch_results_cols])\n", (1945, 1985), True, 'import numpy as np\n'), ((2019, 2077), 'numpy.nan_to_num', 'np.nan_to_num', (["ppci_orig['branch'][:, branch_results_cols]"], {}), "(ppci_orig['branch'][:, branch_results_cols])\n", (2032, 2077), True, 'import numpy as np\n'), ((2194, 2266), 'numpy.nan_to_num', 'np.nan_to_num', (["ppci['branch'][:, branch_results_cols]"], {'nan': '(10000000000.0)'}), "(ppci['branch'][:, branch_results_cols], nan=10000000000.0)\n", (2207, 2266), True, 'import numpy as np\n'), ((2291, 2368), 'numpy.nan_to_num', 'np.nan_to_num', (["ppci_orig['branch'][:, branch_results_cols]"], {'nan': '(10000000000.0)'}), "(ppci_orig['branch'][:, branch_results_cols], nan=10000000000.0)\n", (2304, 2368), True, 'import numpy as np\n')]
|
from django.db import models
from app.models.user import User
class Vehicle(models.Model):
user = models.ForeignKey(User, null=True)
plate = models.CharField(max_length=255, blank=False)
brand = models.CharField(max_length=255, blank=False)
name = models.CharField(max_length=255, blank=False)
color = models.CharField(max_length=255, blank=True)
class Meta:
ordering = ('user', 'brand',)
|
[
"django.db.models.ForeignKey",
"django.db.models.CharField"
] |
[((104, 138), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'null': '(True)'}), '(User, null=True)\n', (121, 138), False, 'from django.db import models\n'), ((152, 197), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(False)'}), '(max_length=255, blank=False)\n', (168, 197), False, 'from django.db import models\n'), ((210, 255), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(False)'}), '(max_length=255, blank=False)\n', (226, 255), False, 'from django.db import models\n'), ((267, 312), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(False)'}), '(max_length=255, blank=False)\n', (283, 312), False, 'from django.db import models\n'), ((325, 369), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (341, 369), False, 'from django.db import models\n')]
|
import threading
import DobotDllType as dType
CON_STR = {
dType.DobotConnect.DobotConnect_NoError: "DobotConnect_NoError",
dType.DobotConnect.DobotConnect_NotFound: "DobotConnect_NotFound",
dType.DobotConnect.DobotConnect_Occupied: "DobotConnect_Occupied"}
#Load Dll
api = dType.load()
#Connect Dobot
state = dType.ConnectDobot(api, "", 115200)[0]
print("Connect status:",CON_STR[state])
if (state == dType.DobotConnect.DobotConnect_NoError):
#Clean Command Queued
dType.SetQueuedCmdClear(api)
#Async Motion Params Setting
dType.SetHOMEParams(api, 200, 200, 200, 200, isQueued = 1)
dType.SetPTPJointParams(api, 200, 200, 200, 200, 200, 200, 200, 200, isQueued = 1)
dType.SetPTPCommonParams(api, 100, 100, isQueued = 1)
#Async Home
dType.SetHOMECmd(api, temp = 0, isQueued = 1)
#Async PTP Motion
for i in range(0, 5):
if i % 2 == 0:
offset = 50
else:
offset = -50
lastIndex = dType.SetPTPCmd(api, dType.PTPMode.PTPMOVLXYZMode, 200 + offset, offset, offset, offset, isQueued = 1)[0]
#Start to Execute Command Queued
dType.SetQueuedCmdStartExec(api)
#Wait for Executing Last Command
while lastIndex > dType.GetQueuedCmdCurrentIndex(api)[0]:
dType.dSleep(100)
#Stop to Execute Command Queued
dType.SetQueuedCmdStopExec(api)
#Disconnect Dobot
dType.DisconnectDobot(api)
|
[
"DobotDllType.ConnectDobot",
"DobotDllType.SetHOMECmd",
"DobotDllType.SetPTPCommonParams",
"DobotDllType.dSleep",
"DobotDllType.SetPTPJointParams",
"DobotDllType.GetQueuedCmdCurrentIndex",
"DobotDllType.SetPTPCmd",
"DobotDllType.SetQueuedCmdStartExec",
"DobotDllType.SetQueuedCmdStopExec",
"DobotDllType.DisconnectDobot",
"DobotDllType.SetQueuedCmdClear",
"DobotDllType.SetHOMEParams",
"DobotDllType.load"
] |
[((297, 309), 'DobotDllType.load', 'dType.load', ([], {}), '()\n', (307, 309), True, 'import DobotDllType as dType\n'), ((1431, 1457), 'DobotDllType.DisconnectDobot', 'dType.DisconnectDobot', (['api'], {}), '(api)\n', (1452, 1457), True, 'import DobotDllType as dType\n'), ((337, 372), 'DobotDllType.ConnectDobot', 'dType.ConnectDobot', (['api', '""""""', '(115200)'], {}), "(api, '', 115200)\n", (355, 372), True, 'import DobotDllType as dType\n'), ((509, 537), 'DobotDllType.SetQueuedCmdClear', 'dType.SetQueuedCmdClear', (['api'], {}), '(api)\n', (532, 537), True, 'import DobotDllType as dType\n'), ((579, 635), 'DobotDllType.SetHOMEParams', 'dType.SetHOMEParams', (['api', '(200)', '(200)', '(200)', '(200)'], {'isQueued': '(1)'}), '(api, 200, 200, 200, 200, isQueued=1)\n', (598, 635), True, 'import DobotDllType as dType\n'), ((643, 728), 'DobotDllType.SetPTPJointParams', 'dType.SetPTPJointParams', (['api', '(200)', '(200)', '(200)', '(200)', '(200)', '(200)', '(200)', '(200)'], {'isQueued': '(1)'}), '(api, 200, 200, 200, 200, 200, 200, 200, 200, isQueued=1\n )\n', (666, 728), True, 'import DobotDllType as dType\n'), ((731, 782), 'DobotDllType.SetPTPCommonParams', 'dType.SetPTPCommonParams', (['api', '(100)', '(100)'], {'isQueued': '(1)'}), '(api, 100, 100, isQueued=1)\n', (755, 782), True, 'import DobotDllType as dType\n'), ((809, 850), 'DobotDllType.SetHOMECmd', 'dType.SetHOMECmd', (['api'], {'temp': '(0)', 'isQueued': '(1)'}), '(api, temp=0, isQueued=1)\n', (825, 850), True, 'import DobotDllType as dType\n'), ((1169, 1201), 'DobotDllType.SetQueuedCmdStartExec', 'dType.SetQueuedCmdStartExec', (['api'], {}), '(api)\n', (1196, 1201), True, 'import DobotDllType as dType\n'), ((1377, 1408), 'DobotDllType.SetQueuedCmdStopExec', 'dType.SetQueuedCmdStopExec', (['api'], {}), '(api)\n', (1403, 1408), True, 'import DobotDllType as dType\n'), ((1315, 1332), 'DobotDllType.dSleep', 'dType.dSleep', (['(100)'], {}), '(100)\n', (1327, 1332), True, 'import DobotDllType as dType\n'), ((1018, 1122), 'DobotDllType.SetPTPCmd', 'dType.SetPTPCmd', (['api', 'dType.PTPMode.PTPMOVLXYZMode', '(200 + offset)', 'offset', 'offset', 'offset'], {'isQueued': '(1)'}), '(api, dType.PTPMode.PTPMOVLXYZMode, 200 + offset, offset,\n offset, offset, isQueued=1)\n', (1033, 1122), True, 'import DobotDllType as dType\n'), ((1266, 1301), 'DobotDllType.GetQueuedCmdCurrentIndex', 'dType.GetQueuedCmdCurrentIndex', (['api'], {}), '(api)\n', (1296, 1301), True, 'import DobotDllType as dType\n')]
|
from django.shortcuts import render
from libs.http import render_json
from vip.models import Vip
def info(request):
vip_info = []
for vip in Vip.objects.exclude(level=0).order_by('level'):
v_info = vip.to_dict()
v_info['perms'] = []
for perm in vip.perms:
v_info['perms'].append(perm.to_dict())
vip_info.append(v_info)
return render_json(data=vip_info)
|
[
"vip.models.Vip.objects.exclude",
"libs.http.render_json"
] |
[((389, 415), 'libs.http.render_json', 'render_json', ([], {'data': 'vip_info'}), '(data=vip_info)\n', (400, 415), False, 'from libs.http import render_json\n'), ((153, 181), 'vip.models.Vip.objects.exclude', 'Vip.objects.exclude', ([], {'level': '(0)'}), '(level=0)\n', (172, 181), False, 'from vip.models import Vip\n')]
|
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.native.config.environment import HostLibcDev
from pants.backend.native.subsystems.utils.parse_search_dirs import ParseSearchDirs
from pants.base.hash_utils import hash_file
from pants.option.custom_types import dir_option
from pants.subsystem.subsystem import Subsystem
from pants.util.memo import memoized_property
class LibcDev(Subsystem):
"""Subsystem to detect and provide the host's installed version of a libc "dev" package.
A libc "dev" package is provided on most Linux systems by default, but may not be located at any
standardized path. We define a libc dev package as one which provides crti.o, an object file which
is part of any libc implementation and is required to create executables (more information
available at https://wiki.osdev.org/Creating_a_C_Library).
NB: This is currently unused except in CI, because we have no plans to support creating native
executables from C or C++ sources yet (PRs welcome!). It is used to provide an "end-to-end" test
of the compilation and linking toolchain in CI by creating and invoking a "hello world"
executable.
"""
options_scope = 'libc'
class HostLibcDevResolutionError(Exception): pass
@classmethod
def subsystem_dependencies(cls):
return super(LibcDev, cls).subsystem_dependencies() + (ParseSearchDirs.scoped(cls),)
@memoized_property
def _parse_search_dirs(self):
return ParseSearchDirs.scoped_instance(self)
@classmethod
def register_options(cls, register):
super(LibcDev, cls).register_options(register)
register('--enable-libc-search', type=bool, default=False, fingerprint=True, advanced=True,
help="Whether to search for the host's libc installation. Set to False if the host "
"does not have a libc install with crti.o -- this file is necessary to create "
"executables on Linux hosts.")
register('--libc-dir', type=dir_option, default=None, fingerprint=True, advanced=True,
help='A directory containing a host-specific crti.o from libc.')
register('--host-compiler', type=str, default='gcc', fingerprint=True, advanced=True,
help='The host compiler to invoke with -print-search-dirs to find the host libc.')
# NB: crti.o is required to create executables on Linux. Our provided gcc and clang can find it if
# the containing directory is within the LIBRARY_PATH environment variable when we invoke the
# compiler.
_LIBC_INIT_OBJECT_FILE = 'crti.o'
def _get_host_libc_from_host_compiler(self):
"""Locate the host's libc-dev installation using a specified host compiler's search dirs."""
compiler_exe = self.get_options().host_compiler
# Implicitly, we are passing in the environment of the executing pants process to
# `get_compiler_library_dirs()`.
# These directories are checked to exist!
library_dirs = self._parse_search_dirs.get_compiler_library_dirs(compiler_exe)
libc_crti_object_file = None
for libc_dir_candidate in library_dirs:
maybe_libc_crti = os.path.join(libc_dir_candidate, self._LIBC_INIT_OBJECT_FILE)
if os.path.isfile(maybe_libc_crti):
libc_crti_object_file = maybe_libc_crti
break
if not libc_crti_object_file:
raise self.HostLibcDevResolutionError(
"Could not locate {fname} in library search dirs {dirs} from compiler: {compiler!r}. "
"You may need to install a libc dev package for the current system. "
"For many operating systems, this package is named 'libc-dev' or 'libc6-dev'."
.format(fname=self._LIBC_INIT_OBJECT_FILE, dirs=library_dirs, compiler=compiler_exe))
return HostLibcDev(crti_object=libc_crti_object_file,
fingerprint=hash_file(libc_crti_object_file))
@memoized_property
def _host_libc(self):
"""Use the --libc-dir option if provided, otherwise invoke a host compiler to find libc dev."""
libc_dir_option = self.get_options().libc_dir
if libc_dir_option:
maybe_libc_crti = os.path.join(libc_dir_option, self._LIBC_INIT_OBJECT_FILE)
if os.path.isfile(maybe_libc_crti):
return HostLibcDev(crti_object=maybe_libc_crti,
fingerprint=hash_file(maybe_libc_crti))
raise self.HostLibcDevResolutionError(
"Could not locate {} in directory {} provided by the --libc-dir option."
.format(self._LIBC_INIT_OBJECT_FILE, libc_dir_option))
return self._get_host_libc_from_host_compiler()
def get_libc_dirs(self, platform):
if not self.get_options().enable_libc_search:
return []
return platform.resolve_platform_specific({
'darwin': lambda: [],
'linux': lambda: [self._host_libc.get_lib_dir()],
})
|
[
"pants.backend.native.subsystems.utils.parse_search_dirs.ParseSearchDirs.scoped",
"pants.backend.native.subsystems.utils.parse_search_dirs.ParseSearchDirs.scoped_instance",
"os.path.isfile",
"pants.base.hash_utils.hash_file",
"os.path.join"
] |
[((1642, 1679), 'pants.backend.native.subsystems.utils.parse_search_dirs.ParseSearchDirs.scoped_instance', 'ParseSearchDirs.scoped_instance', (['self'], {}), '(self)\n', (1673, 1679), False, 'from pants.backend.native.subsystems.utils.parse_search_dirs import ParseSearchDirs\n'), ((3283, 3344), 'os.path.join', 'os.path.join', (['libc_dir_candidate', 'self._LIBC_INIT_OBJECT_FILE'], {}), '(libc_dir_candidate, self._LIBC_INIT_OBJECT_FILE)\n', (3295, 3344), False, 'import os\n'), ((3354, 3385), 'os.path.isfile', 'os.path.isfile', (['maybe_libc_crti'], {}), '(maybe_libc_crti)\n', (3368, 3385), False, 'import os\n'), ((4255, 4313), 'os.path.join', 'os.path.join', (['libc_dir_option', 'self._LIBC_INIT_OBJECT_FILE'], {}), '(libc_dir_option, self._LIBC_INIT_OBJECT_FILE)\n', (4267, 4313), False, 'import os\n'), ((4323, 4354), 'os.path.isfile', 'os.path.isfile', (['maybe_libc_crti'], {}), '(maybe_libc_crti)\n', (4337, 4354), False, 'import os\n'), ((1547, 1574), 'pants.backend.native.subsystems.utils.parse_search_dirs.ParseSearchDirs.scoped', 'ParseSearchDirs.scoped', (['cls'], {}), '(cls)\n', (1569, 1574), False, 'from pants.backend.native.subsystems.utils.parse_search_dirs import ParseSearchDirs\n'), ((3977, 4009), 'pants.base.hash_utils.hash_file', 'hash_file', (['libc_crti_object_file'], {}), '(libc_crti_object_file)\n', (3986, 4009), False, 'from pants.base.hash_utils import hash_file\n'), ((4451, 4477), 'pants.base.hash_utils.hash_file', 'hash_file', (['maybe_libc_crti'], {}), '(maybe_libc_crti)\n', (4460, 4477), False, 'from pants.base.hash_utils import hash_file\n')]
|
import numpy as np
import ipyvolume as ipv
import h5py
import os
import matplotlib.pyplot as plt
import sys
from tqdm import tqdm
kinect_dir = '../dataset/kinect/'
dir = '../dataset/data/'
kinect_files = os.listdir(kinect_dir)
missing_file_count = 0
def get_vibe_dir(x):
x1 = x[16,:] - x[0,:]
x2 = x[17,:] - x[0,:]
return np.cross(x1,x2)
def get_kinect_dir(x):
x1 = x[8,:] - x[0,:]
x2 = x[4,:] - x[0,:]
return np.cross(x1, x2)
def get_kinect_peron(i):
# this function returns Kinect skeleton given the index
f = i + '.skeleton.npy'
p = np.zeros((300,2,25,3))
if f not in kinect_files:
# print(f)
pass
else:
kp = np.load(os.path.join(kinect_dir, f))
if kp.shape[0] != 0:
if kp.shape[0] == 1:
p[:,0,:,:] = kp[0,:,:,:]
else:
p[:,0,:,:] = kp[0,:,:,:]
p[:,1,:,:] = kp[1,:,:,:]
return p[:256,:,:,:]
def order_root(kinect_person, vibe):
vibe = vibe.reshape((256, 2, 24, 3))
# kinect_person = kinect_person[::4,:,:,:]
person = vibe[:,:,:]
left = kinect_person[:,0,0,:].reshape((256,1,3))
right = kinect_person[:,1,0,:].reshape((256,1,3))
person1 = person[:,0,:,:] + left
person2 = person[:,1,:,:] + right
v1 = get_vibe_dir(person[0,0,:,:])
v2 = get_vibe_dir(person[0,1,:,:])
v_cross = np.cross(v1, v2)
k1 = get_kinect_dir(kinect_person[0,0,:,:])
k2 = get_kinect_dir(kinect_person[0,1,:,:])
k_cross = np.cross(k1,k2)
dot_prod = np.sum(v_cross*k_cross)
# print(dot_prod)
if dot_prod > 0:
# right direction
return left, right
elif dot_prod < 0:
# Wrong Direction
return right, left
else:
# one person missing
return left, right
def get_root(x, y, train_file_names):
count = 0
root_list = []
# person_2_cls = [50,51,52,53,54,55,56,57,58,59,60]
person_2_cls = [50,51,52,53,54,55,56,57,58,59,60,106, 107, 108, 109
,110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120]
for i in tqdm(range(train_file_names.shape[0])):
file_name = train_file_names[i]
if len(file_name) >= 2 and len(file_name) != 20:
file_name = file_name[0]
if str(file_name)[0] == '[':
file_name = file_name[0]
root = np.zeros((256, 2, 3))
if y[i] in person_2_cls:
p = get_kinect_peron(file_name)
left, right = order_root(p, x[i])
if y[i] == 60:
root[:,0:1,:] = right
root[:,1:,:] = left
else:
root[:,0:1,:] = left
root[:,1:,:] = right
root_list.append(root)
return np.array(root_list)
if __name__ == '__main__':
f = h5py.File(os.path.join(dir, 'NTU_VIBE_CSet_120.h5'), 'r')
# train data
x = f['x'][:]
y = f['y'][:]
train_file_names = np.load(os.path.join(dir, 'Train_File_order.npy'), allow_pickle=True)
# print(x.shape)
train_root = get_root(x, y, train_file_names)
print(train_root.shape)
np.save(dir + 'Train_root.npy', train_root)
# test data
# test_x = f['test_x'][:]
# test_y = f['test_y'][:]
# test_file_names = np.load(os.path.join(dir, 'Test_File_order.npy'), allow_pickle=True)
# test_root = get_root(test_x, test_y, test_file_names)
# print(test_root.shape)
# np.save(dir + 'Test_root.npy', test_root)
|
[
"numpy.save",
"numpy.sum",
"numpy.zeros",
"numpy.cross",
"numpy.array",
"os.path.join",
"os.listdir"
] |
[((206, 228), 'os.listdir', 'os.listdir', (['kinect_dir'], {}), '(kinect_dir)\n', (216, 228), False, 'import os\n'), ((337, 353), 'numpy.cross', 'np.cross', (['x1', 'x2'], {}), '(x1, x2)\n', (345, 353), True, 'import numpy as np\n'), ((439, 455), 'numpy.cross', 'np.cross', (['x1', 'x2'], {}), '(x1, x2)\n', (447, 455), True, 'import numpy as np\n'), ((578, 603), 'numpy.zeros', 'np.zeros', (['(300, 2, 25, 3)'], {}), '((300, 2, 25, 3))\n', (586, 603), True, 'import numpy as np\n'), ((1383, 1399), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (1391, 1399), True, 'import numpy as np\n'), ((1511, 1527), 'numpy.cross', 'np.cross', (['k1', 'k2'], {}), '(k1, k2)\n', (1519, 1527), True, 'import numpy as np\n'), ((1547, 1572), 'numpy.sum', 'np.sum', (['(v_cross * k_cross)'], {}), '(v_cross * k_cross)\n', (1553, 1572), True, 'import numpy as np\n'), ((2769, 2788), 'numpy.array', 'np.array', (['root_list'], {}), '(root_list)\n', (2777, 2788), True, 'import numpy as np\n'), ((3107, 3150), 'numpy.save', 'np.save', (["(dir + 'Train_root.npy')", 'train_root'], {}), "(dir + 'Train_root.npy', train_root)\n", (3114, 3150), True, 'import numpy as np\n'), ((2367, 2388), 'numpy.zeros', 'np.zeros', (['(256, 2, 3)'], {}), '((256, 2, 3))\n', (2375, 2388), True, 'import numpy as np\n'), ((2833, 2874), 'os.path.join', 'os.path.join', (['dir', '"""NTU_VIBE_CSet_120.h5"""'], {}), "(dir, 'NTU_VIBE_CSet_120.h5')\n", (2845, 2874), False, 'import os\n'), ((2954, 2995), 'os.path.join', 'os.path.join', (['dir', '"""Train_File_order.npy"""'], {}), "(dir, 'Train_File_order.npy')\n", (2966, 2995), False, 'import os\n'), ((694, 721), 'os.path.join', 'os.path.join', (['kinect_dir', 'f'], {}), '(kinect_dir, f)\n', (706, 721), False, 'import os\n')]
|
# %% [markdown]
# #
import os
import pickle
import warnings
from operator import itemgetter
from pathlib import Path
from timeit import default_timer as timer
import colorcet as cc
import community as cm
import matplotlib.colors as mplc
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from matplotlib.cm import ScalarMappable
from sklearn.model_selection import ParameterGrid
from graspy.embed import AdjacencySpectralEmbed, LaplacianSpectralEmbed
from graspy.plot import gridplot, heatmap, pairplot
from graspy.utils import symmetrize
from src.data import load_everything, load_metagraph, load_networkx
from src.embed import lse, preprocess_graph
from src.graph import MetaGraph, preprocess
from src.hierarchy import signal_flow
from src.io import savefig, saveobj, saveskels, savecsv
from src.utils import get_blockmodel_df, get_sbm_prob
from src.visualization import (
CLASS_COLOR_DICT,
CLASS_IND_DICT,
barplot_text,
bartreeplot,
draw_networkx_nice,
get_color_dict,
get_colors,
palplot,
probplot,
sankey,
screeplot,
stacked_barplot,
random_names,
)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
# %% [markdown]
# # Parameters
BRAIN_VERSION = "2020-03-02"
BLIND = True
SAVEFIGS = False
SAVESKELS = False
SAVEOBJS = True
np.random.seed(9812343)
sns.set_context("talk")
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
plt.close()
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, save_on=True, **kws)
def stashskel(name, ids, labels, colors=None, palette=None, **kws):
saveskels(
name,
ids,
labels,
colors=colors,
palette=None,
foldername=FNAME,
save_on=SAVESKELS,
**kws,
)
def stashobj(obj, name, **kws):
saveobj(obj, name, foldername=FNAME, save_on=SAVEOBJS, **kws)
graph_type = "G"
threshold = 3
binarize = True
# load and preprocess the data
mg = load_metagraph(graph_type, version=BRAIN_VERSION)
mg = preprocess(
mg, threshold=threshold, sym_threshold=True, remove_pdiff=True, binarize=binarize
)
#%%
import leidenalg as la
import igraph as ig
def _process_metagraph(mg, temp_loc):
adj = mg.adj
adj = symmetrize(adj, method="avg")
mg = MetaGraph(adj, mg.meta)
nx.write_graphml(mg.g, temp_loc)
def run_leiden(
mg,
temp_loc=None,
implementation="igraph",
partition_type=la.CPMVertexPartition,
**kws,
):
if temp_loc is None:
temp_loc = f"maggot_models/data/interim/temp-{np.random.randint(1e8)}.graphml"
else:
temp_loc = f"maggot_models/data/interim/{temp_loc}.graphml"
_process_metagraph(mg, temp_loc)
g = ig.Graph.Read_GraphML(temp_loc)
os.remove(temp_loc)
nodes = [int(v["id"]) for v in g.vs]
if implementation == "igraph":
vert_part = g.community_leiden(**kws)
elif implementation == "leidenalg":
vert_part = la.find_partition(g, partition_type, **kws)
labels = vert_part.membership
partition = pd.Series(data=labels, index=nodes)
return partition, vert_part.modularity
# %% [markdown]
# #
temp_loc = f"maggot_models/data/interim/temp-{np.random.randint(1e8)}.graphml"
_process_metagraph(mg, temp_loc)
g = ig.Graph.Read_GraphML(temp_loc)
os.remove(temp_loc)
nodes = [int(v["id"]) for v in g.vs]
vert_part = g.community_multilevel()
labels = vert_part.membership
partition = pd.Series(data=labels, index=nodes)
# %% [markdown]
# #
partition, modularity = run_leiden(
mg,
implementation="igraph",
resolution_parameter=0.1,
beta=0.1,
partition_type=la.CPMVertexPartition,
weights="weight",
n_iterations=-1,
)
print(partition.nunique())
# %% [markdown]
# #
pred_labels = partition
pred_labels = pred_labels[pred_labels.index.isin(mg.meta.index)]
partition = pred_labels.astype(int)
class_labels = mg["Merge Class"]
lineage_labels = mg["lineage"]
basename = ""
title = ""
def augment_classes(class_labels, lineage_labels, fill_unk=True):
if fill_unk:
classlin_labels = class_labels.copy()
fill_inds = np.where(class_labels == "unk")[0]
classlin_labels[fill_inds] = lineage_labels[fill_inds]
used_inds = np.array(list(CLASS_IND_DICT.values()))
unused_inds = np.setdiff1d(range(len(cc.glasbey_light)), used_inds)
lineage_color_dict = dict(
zip(np.unique(lineage_labels), np.array(cc.glasbey_light)[unused_inds])
)
color_dict = {**CLASS_COLOR_DICT, **lineage_color_dict}
hatch_dict = {}
for key, val in color_dict.items():
if key[0] == "~":
hatch_dict[key] = "//"
else:
hatch_dict[key] = ""
else:
color_dict = "class"
hatch_dict = None
return classlin_labels, color_dict, hatch_dict
lineage_labels = np.vectorize(lambda x: "~" + x)(lineage_labels)
classlin_labels, color_dict, hatch_dict = augment_classes(class_labels, lineage_labels)
# TODO then sort all of them by proportion of sensory/motor
# barplot by merge class and lineage
_, _, order = barplot_text(
partition,
classlin_labels,
color_dict=color_dict,
plot_proportions=False,
norm_bar_width=True,
figsize=(24, 18),
title=title,
hatch_dict=hatch_dict,
return_order=True,
)
stashfig(basename + "barplot-mergeclasslin-props")
plt.close()
category_order = np.unique(partition)[order]
fig, axs = barplot_text(
partition,
class_labels,
color_dict=color_dict,
plot_proportions=False,
norm_bar_width=True,
figsize=(24, 18),
title=title,
hatch_dict=None,
category_order=category_order,
)
stashfig(basename + "barplot-mergeclass-props")
fig, axs = barplot_text(
partition,
class_labels,
color_dict=color_dict,
plot_proportions=False,
norm_bar_width=False,
figsize=(24, 18),
title=title,
hatch_dict=None,
category_order=category_order,
)
stashfig(basename + "barplot-mergeclass-counts")
plt.close()
# TODO add gridmap
counts = False
weights = False
prob_df = get_blockmodel_df(
mg.adj, partition, return_counts=counts, use_weights=weights
)
prob_df = prob_df.reindex(category_order, axis=0)
prob_df = prob_df.reindex(category_order, axis=1)
probplot(100 * prob_df, fmt="2.0f", figsize=(20, 20), title=title, font_scale=0.7)
stashfig(basename + f"probplot-counts{counts}-weights{weights}")
plt.close()
|
[
"os.remove",
"numpy.random.seed",
"src.io.savefig",
"src.io.saveskels",
"src.graph.MetaGraph",
"igraph.Graph.Read_GraphML",
"numpy.random.randint",
"numpy.unique",
"src.graph.preprocess",
"matplotlib.pyplot.close",
"seaborn.set_context",
"numpy.vectorize",
"os.path.basename",
"src.visualization.barplot_text",
"pandas.Series",
"src.visualization.probplot",
"graspy.utils.symmetrize",
"src.io.saveobj",
"src.utils.get_blockmodel_df",
"src.data.load_metagraph",
"leidenalg.find_partition",
"numpy.where",
"numpy.array",
"src.io.savecsv",
"src.visualization.CLASS_IND_DICT.values",
"networkx.write_graphml"
] |
[((1392, 1415), 'numpy.random.seed', 'np.random.seed', (['(9812343)'], {}), '(9812343)\n', (1406, 1415), True, 'import numpy as np\n'), ((1416, 1439), 'seaborn.set_context', 'sns.set_context', (['"""talk"""'], {}), "('talk')\n", (1431, 1439), True, 'import seaborn as sns\n'), ((2069, 2118), 'src.data.load_metagraph', 'load_metagraph', (['graph_type'], {'version': 'BRAIN_VERSION'}), '(graph_type, version=BRAIN_VERSION)\n', (2083, 2118), False, 'from src.data import load_everything, load_metagraph, load_networkx\n'), ((2124, 2221), 'src.graph.preprocess', 'preprocess', (['mg'], {'threshold': 'threshold', 'sym_threshold': '(True)', 'remove_pdiff': '(True)', 'binarize': 'binarize'}), '(mg, threshold=threshold, sym_threshold=True, remove_pdiff=True,\n binarize=binarize)\n', (2134, 2221), False, 'from src.graph import MetaGraph, preprocess\n'), ((3354, 3385), 'igraph.Graph.Read_GraphML', 'ig.Graph.Read_GraphML', (['temp_loc'], {}), '(temp_loc)\n', (3375, 3385), True, 'import igraph as ig\n'), ((3386, 3405), 'os.remove', 'os.remove', (['temp_loc'], {}), '(temp_loc)\n', (3395, 3405), False, 'import os\n'), ((3522, 3557), 'pandas.Series', 'pd.Series', ([], {'data': 'labels', 'index': 'nodes'}), '(data=labels, index=nodes)\n', (3531, 3557), True, 'import pandas as pd\n'), ((5202, 5392), 'src.visualization.barplot_text', 'barplot_text', (['partition', 'classlin_labels'], {'color_dict': 'color_dict', 'plot_proportions': '(False)', 'norm_bar_width': '(True)', 'figsize': '(24, 18)', 'title': 'title', 'hatch_dict': 'hatch_dict', 'return_order': '(True)'}), '(partition, classlin_labels, color_dict=color_dict,\n plot_proportions=False, norm_bar_width=True, figsize=(24, 18), title=\n title, hatch_dict=hatch_dict, return_order=True)\n', (5214, 5392), False, 'from src.visualization import CLASS_COLOR_DICT, CLASS_IND_DICT, barplot_text, bartreeplot, draw_networkx_nice, get_color_dict, get_colors, palplot, probplot, sankey, screeplot, stacked_barplot, random_names\n'), ((5474, 5485), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5483, 5485), True, 'import matplotlib.pyplot as plt\n'), ((5543, 5736), 'src.visualization.barplot_text', 'barplot_text', (['partition', 'class_labels'], {'color_dict': 'color_dict', 'plot_proportions': '(False)', 'norm_bar_width': '(True)', 'figsize': '(24, 18)', 'title': 'title', 'hatch_dict': 'None', 'category_order': 'category_order'}), '(partition, class_labels, color_dict=color_dict,\n plot_proportions=False, norm_bar_width=True, figsize=(24, 18), title=\n title, hatch_dict=None, category_order=category_order)\n', (5555, 5736), False, 'from src.visualization import CLASS_COLOR_DICT, CLASS_IND_DICT, barplot_text, bartreeplot, draw_networkx_nice, get_color_dict, get_colors, palplot, probplot, sankey, screeplot, stacked_barplot, random_names\n'), ((5827, 6021), 'src.visualization.barplot_text', 'barplot_text', (['partition', 'class_labels'], {'color_dict': 'color_dict', 'plot_proportions': '(False)', 'norm_bar_width': '(False)', 'figsize': '(24, 18)', 'title': 'title', 'hatch_dict': 'None', 'category_order': 'category_order'}), '(partition, class_labels, color_dict=color_dict,\n plot_proportions=False, norm_bar_width=False, figsize=(24, 18), title=\n title, hatch_dict=None, category_order=category_order)\n', (5839, 6021), False, 'from src.visualization import CLASS_COLOR_DICT, CLASS_IND_DICT, barplot_text, bartreeplot, draw_networkx_nice, get_color_dict, get_colors, palplot, probplot, sankey, screeplot, stacked_barplot, random_names\n'), ((6101, 6112), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6110, 6112), True, 'import matplotlib.pyplot as plt\n'), ((6175, 6254), 'src.utils.get_blockmodel_df', 'get_blockmodel_df', (['mg.adj', 'partition'], {'return_counts': 'counts', 'use_weights': 'weights'}), '(mg.adj, partition, return_counts=counts, use_weights=weights)\n', (6192, 6254), False, 'from src.utils import get_blockmodel_df, get_sbm_prob\n'), ((6361, 6447), 'src.visualization.probplot', 'probplot', (['(100 * prob_df)'], {'fmt': '"""2.0f"""', 'figsize': '(20, 20)', 'title': 'title', 'font_scale': '(0.7)'}), "(100 * prob_df, fmt='2.0f', figsize=(20, 20), title=title,\n font_scale=0.7)\n", (6369, 6447), False, 'from src.visualization import CLASS_COLOR_DICT, CLASS_IND_DICT, barplot_text, bartreeplot, draw_networkx_nice, get_color_dict, get_colors, palplot, probplot, sankey, screeplot, stacked_barplot, random_names\n'), ((6509, 6520), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6518, 6520), True, 'import matplotlib.pyplot as plt\n'), ((1220, 1246), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1236, 1246), False, 'import os\n'), ((1473, 1525), 'src.io.savefig', 'savefig', (['name'], {'foldername': 'FNAME', 'save_on': '(True)'}), '(name, foldername=FNAME, save_on=True, **kws)\n', (1480, 1525), False, 'from src.io import savefig, saveobj, saveskels, savecsv\n'), ((1530, 1541), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1539, 1541), True, 'import matplotlib.pyplot as plt\n'), ((1579, 1635), 'src.io.savecsv', 'savecsv', (['df', 'name'], {'foldername': 'FNAME', 'save_on': '(True)'}), '(df, name, foldername=FNAME, save_on=True, **kws)\n', (1586, 1635), False, 'from src.io import savefig, saveobj, saveskels, savecsv\n'), ((1710, 1815), 'src.io.saveskels', 'saveskels', (['name', 'ids', 'labels'], {'colors': 'colors', 'palette': 'None', 'foldername': 'FNAME', 'save_on': 'SAVESKELS'}), '(name, ids, labels, colors=colors, palette=None, foldername=FNAME,\n save_on=SAVESKELS, **kws)\n', (1719, 1815), False, 'from src.io import savefig, saveobj, saveskels, savecsv\n'), ((1921, 1982), 'src.io.saveobj', 'saveobj', (['obj', 'name'], {'foldername': 'FNAME', 'save_on': 'SAVEOBJS'}), '(obj, name, foldername=FNAME, save_on=SAVEOBJS, **kws)\n', (1928, 1982), False, 'from src.io import savefig, saveobj, saveskels, savecsv\n'), ((2340, 2369), 'graspy.utils.symmetrize', 'symmetrize', (['adj'], {'method': '"""avg"""'}), "(adj, method='avg')\n", (2350, 2369), False, 'from graspy.utils import symmetrize\n'), ((2379, 2402), 'src.graph.MetaGraph', 'MetaGraph', (['adj', 'mg.meta'], {}), '(adj, mg.meta)\n', (2388, 2402), False, 'from src.graph import MetaGraph, preprocess\n'), ((2407, 2439), 'networkx.write_graphml', 'nx.write_graphml', (['mg.g', 'temp_loc'], {}), '(mg.g, temp_loc)\n', (2423, 2439), True, 'import networkx as nx\n'), ((2805, 2836), 'igraph.Graph.Read_GraphML', 'ig.Graph.Read_GraphML', (['temp_loc'], {}), '(temp_loc)\n', (2826, 2836), True, 'import igraph as ig\n'), ((2841, 2860), 'os.remove', 'os.remove', (['temp_loc'], {}), '(temp_loc)\n', (2850, 2860), False, 'import os\n'), ((3137, 3172), 'pandas.Series', 'pd.Series', ([], {'data': 'labels', 'index': 'nodes'}), '(data=labels, index=nodes)\n', (3146, 3172), True, 'import pandas as pd\n'), ((4954, 4985), 'numpy.vectorize', 'np.vectorize', (["(lambda x: '~' + x)"], {}), "(lambda x: '~' + x)\n", (4966, 4985), True, 'import numpy as np\n'), ((5503, 5523), 'numpy.unique', 'np.unique', (['partition'], {}), '(partition)\n', (5512, 5523), True, 'import numpy as np\n'), ((3284, 3314), 'numpy.random.randint', 'np.random.randint', (['(100000000.0)'], {}), '(100000000.0)\n', (3301, 3314), True, 'import numpy as np\n'), ((3043, 3086), 'leidenalg.find_partition', 'la.find_partition', (['g', 'partition_type'], {}), '(g, partition_type, **kws)\n', (3060, 3086), True, 'import leidenalg as la\n'), ((4200, 4231), 'numpy.where', 'np.where', (["(class_labels == 'unk')"], {}), "(class_labels == 'unk')\n", (4208, 4231), True, 'import numpy as np\n'), ((2649, 2679), 'numpy.random.randint', 'np.random.randint', (['(100000000.0)'], {}), '(100000000.0)\n', (2666, 2679), True, 'import numpy as np\n'), ((4332, 4355), 'src.visualization.CLASS_IND_DICT.values', 'CLASS_IND_DICT.values', ([], {}), '()\n', (4353, 4355), False, 'from src.visualization import CLASS_COLOR_DICT, CLASS_IND_DICT, barplot_text, bartreeplot, draw_networkx_nice, get_color_dict, get_colors, palplot, probplot, sankey, screeplot, stacked_barplot, random_names\n'), ((4485, 4510), 'numpy.unique', 'np.unique', (['lineage_labels'], {}), '(lineage_labels)\n', (4494, 4510), True, 'import numpy as np\n'), ((4512, 4538), 'numpy.array', 'np.array', (['cc.glasbey_light'], {}), '(cc.glasbey_light)\n', (4520, 4538), True, 'import numpy as np\n')]
|
import os
import shutil
import argparse
import datetime
import tensorflow as tf
import model
from get_dataset import get_dataset
from visualizer import Visualizer
tf.enable_eager_execution()
parser = argparse.ArgumentParser(description='Stochastic Gradient Langevin Dynamics')
parser.add_argument('--hparams', type=str, default=None,
help='The name of a file containing comma separated list of "name=value" pairs.')
args = parser.parse_args()
tf.set_random_seed(1202)
def main():
# train logistic regression with stocastic gradient Langevin Gradient
if not os.path.isdir("log/"):
os.makedirs("log/")
now = datetime.datetime.today()
logdir = "log/log%s/" % now.strftime("%Y%m%d-%H%M")
os.makedirs(logdir)
# tensorboard
writer = tf.contrib.summary.create_file_writer(logdir)
global_step=tf.train.get_or_create_global_step()
writer.set_as_default()
# read hyperparameters from file
hparams = tf.contrib.training.HParams(
lr=0.1,
model="SGLD_LR",
epoch=10,
batch_size=10)
if args.hparams:
shutil.copyfile(args.hparams, logdir + "params")
hparams_from_file = ""
with open(args.hparams, "r") as f:
for l in f.readlines():
hparams_from_file += l
hparams.parse(hparams_from_file)
# choose model
if hparams.model == "SGLD_LR":
nn = model.SGLD_LR(hparams)
train_dataset, train_dataset_size = get_dataset(hparams.model, "train")
val_dataset, val_dataset_size = get_dataset(hparams.model, "validation")
else:
raise "Invalid parameter for hparams.model"
visualizer = Visualizer()
# train
epsilon_ = hparams.lr
step = 0
for epoch in range(hparams.epoch):
train_dataset_iter = train_dataset.shuffle(train_dataset_size).batch(hparams.batch_size)
for batch, data in enumerate(train_dataset_iter):
global_step.assign_add(1)
step += 1
epsilon_ = hparams.lr / (1 + 0.05 * step)
epsilon = tf.convert_to_tensor(epsilon_, tf.float32)
loss = nn.loss(data["data"], data["label"]).numpy()
accuracy = nn.accuracy(data["data"], data["label"]).numpy()
visualizer.store_results(nn)
nn.update(data["data"], data["label"], epsilon, train_dataset_size)
with tf.contrib.summary.record_summaries_every_n_global_steps(10):
tf.contrib.summary.scalar('loss', loss)
tf.contrib.summary.scalar('accuracy', accuracy)
tf.contrib.summary.scalar('epsilon', epsilon)
grads_vars = nn.grads_variances()
for i in range(len(grads_vars)):
tf.contrib.summary.scalar('grads_var%d' % (i+1), grads_vars[i])
print("epoch %3d\tbatch %4d\tloss %.4f\taccuracy %.4f" % (epoch+1, batch+1, loss, accuracy))
for l_epoch in range(100):
print("langevin epoch %3d" % (l_epoch+1))
train_dataset_iter = train_dataset.shuffle(train_dataset_size).batch(hparams.batch_size)
for batch, data in enumerate(train_dataset_iter):
visualizer.store_results(nn)
nn.update(data["data"], data["label"], epsilon, train_dataset_size)
# visualize
visualizer.save_results(logdir, train_dataset)
if __name__ == "__main__":
main()
|
[
"tensorflow.contrib.summary.scalar",
"tensorflow.contrib.training.HParams",
"datetime.datetime.today",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.isdir",
"tensorflow.convert_to_tensor",
"tensorflow.train.get_or_create_global_step",
"model.SGLD_LR",
"tensorflow.set_random_seed",
"tensorflow.contrib.summary.create_file_writer",
"visualizer.Visualizer",
"get_dataset.get_dataset",
"tensorflow.enable_eager_execution",
"shutil.copyfile",
"tensorflow.contrib.summary.record_summaries_every_n_global_steps"
] |
[((166, 193), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (191, 193), True, 'import tensorflow as tf\n'), ((204, 280), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Stochastic Gradient Langevin Dynamics"""'}), "(description='Stochastic Gradient Langevin Dynamics')\n", (227, 280), False, 'import argparse\n'), ((468, 492), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1202)'], {}), '(1202)\n', (486, 492), True, 'import tensorflow as tf\n'), ((653, 678), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (676, 678), False, 'import datetime\n'), ((739, 758), 'os.makedirs', 'os.makedirs', (['logdir'], {}), '(logdir)\n', (750, 758), False, 'import os\n'), ((791, 836), 'tensorflow.contrib.summary.create_file_writer', 'tf.contrib.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (828, 836), True, 'import tensorflow as tf\n'), ((853, 889), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (887, 889), True, 'import tensorflow as tf\n'), ((970, 1047), 'tensorflow.contrib.training.HParams', 'tf.contrib.training.HParams', ([], {'lr': '(0.1)', 'model': '"""SGLD_LR"""', 'epoch': '(10)', 'batch_size': '(10)'}), "(lr=0.1, model='SGLD_LR', epoch=10, batch_size=10)\n", (997, 1047), True, 'import tensorflow as tf\n'), ((1711, 1723), 'visualizer.Visualizer', 'Visualizer', ([], {}), '()\n', (1721, 1723), False, 'from visualizer import Visualizer\n'), ((592, 613), 'os.path.isdir', 'os.path.isdir', (['"""log/"""'], {}), "('log/')\n", (605, 613), False, 'import os\n'), ((623, 642), 'os.makedirs', 'os.makedirs', (['"""log/"""'], {}), "('log/')\n", (634, 642), False, 'import os\n'), ((1143, 1191), 'shutil.copyfile', 'shutil.copyfile', (['args.hparams', "(logdir + 'params')"], {}), "(args.hparams, logdir + 'params')\n", (1158, 1191), False, 'import shutil\n'), ((1447, 1469), 'model.SGLD_LR', 'model.SGLD_LR', (['hparams'], {}), '(hparams)\n', (1460, 1469), False, 'import model\n'), ((1514, 1549), 'get_dataset.get_dataset', 'get_dataset', (['hparams.model', '"""train"""'], {}), "(hparams.model, 'train')\n", (1525, 1549), False, 'from get_dataset import get_dataset\n'), ((1590, 1630), 'get_dataset.get_dataset', 'get_dataset', (['hparams.model', '"""validation"""'], {}), "(hparams.model, 'validation')\n", (1601, 1630), False, 'from get_dataset import get_dataset\n'), ((2108, 2150), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['epsilon_', 'tf.float32'], {}), '(epsilon_, tf.float32)\n', (2128, 2150), True, 'import tensorflow as tf\n'), ((2429, 2489), 'tensorflow.contrib.summary.record_summaries_every_n_global_steps', 'tf.contrib.summary.record_summaries_every_n_global_steps', (['(10)'], {}), '(10)\n', (2485, 2489), True, 'import tensorflow as tf\n'), ((2507, 2546), 'tensorflow.contrib.summary.scalar', 'tf.contrib.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (2532, 2546), True, 'import tensorflow as tf\n'), ((2563, 2610), 'tensorflow.contrib.summary.scalar', 'tf.contrib.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (2588, 2610), True, 'import tensorflow as tf\n'), ((2627, 2672), 'tensorflow.contrib.summary.scalar', 'tf.contrib.summary.scalar', (['"""epsilon"""', 'epsilon'], {}), "('epsilon', epsilon)\n", (2652, 2672), True, 'import tensorflow as tf\n'), ((2793, 2858), 'tensorflow.contrib.summary.scalar', 'tf.contrib.summary.scalar', (["('grads_var%d' % (i + 1))", 'grads_vars[i]'], {}), "('grads_var%d' % (i + 1), grads_vars[i])\n", (2818, 2858), True, 'import tensorflow as tf\n')]
|
# Written by <NAME>, Seoul National University (<EMAIL>)
# Some parts of the code were referenced from or inspired by below
# - <NAME>'s code (https://github.com/tbepler/protein-sequence-embedding-iclr2019)
# PLUS
""" MLP model classes and functions """
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, cfg, per_seq=False):
""" MLP model for fine-tuning prediction tasks """
super(MLP, self).__init__()
self.drop = nn.Dropout(cfg.dropout)
self.relu = nn.ReLU()
self.per_seq = per_seq
if self.per_seq:
self.attention = nn.Linear(cfg.input_dim, 1)
self.hidden = nn.Linear(cfg.input_dim, cfg.hidden_dim)
self.output = nn.Linear(cfg.hidden_dim, cfg.num_classes)
def forward(self, X):
logits = []
for x in X:
if self.per_seq:
att = self.attention(x)
x = torch.sum(x * F.softmax(att, 1).expand_as(x), 0)
x = self.drop(self.relu(self.hidden(x)))
x = self.output(x)
if self.per_seq: logits.append(x)
else: logits.append(x.unsqueeze(0))
return logits
def load_weights(self, pretrained_model):
# load pretrained_model weights
state_dict = {}
for key, value in torch.load(pretrained_model, map_location=torch.device('cpu')).items():
if key.startswith("module"): state_dict[key[7:]] = value
else: state_dict[key] = value
self.load_state_dict(state_dict)
|
[
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.nn.functional.softmax",
"torch.nn.Linear",
"torch.device"
] |
[((506, 529), 'torch.nn.Dropout', 'nn.Dropout', (['cfg.dropout'], {}), '(cfg.dropout)\n', (516, 529), True, 'import torch.nn as nn\n'), ((550, 559), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (557, 559), True, 'import torch.nn as nn\n'), ((695, 735), 'torch.nn.Linear', 'nn.Linear', (['cfg.input_dim', 'cfg.hidden_dim'], {}), '(cfg.input_dim, cfg.hidden_dim)\n', (704, 735), True, 'import torch.nn as nn\n'), ((758, 800), 'torch.nn.Linear', 'nn.Linear', (['cfg.hidden_dim', 'cfg.num_classes'], {}), '(cfg.hidden_dim, cfg.num_classes)\n', (767, 800), True, 'import torch.nn as nn\n'), ((645, 672), 'torch.nn.Linear', 'nn.Linear', (['cfg.input_dim', '(1)'], {}), '(cfg.input_dim, 1)\n', (654, 672), True, 'import torch.nn as nn\n'), ((1396, 1415), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1408, 1415), False, 'import torch\n'), ((971, 988), 'torch.nn.functional.softmax', 'F.softmax', (['att', '(1)'], {}), '(att, 1)\n', (980, 988), True, 'import torch.nn.functional as F\n')]
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Preprocess text data and save binary Dataset objects along with tokenizers to a directory.
Should NOT be used for class-incremental scenario, because of arbitrary vocabulary order.
"""
import os
import sys
import logging
import argparse
from functools import reduce
from os.path import join as path_join
from random import shuffle
import toml
import torch
import pandas as pd
import transformers
import new_semantic_parsing as nsp
from new_semantic_parsing import utils
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger(os.path.basename(__file__))
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def parse_args(args=None):
parser = argparse.ArgumentParser()
# fmt: off
parser.add_argument("--data", required=True,
help="path to TOP dataset directory")
parser.add_argument("--text-tokenizer", required=True,
help="pratrained tokenizer name or path to a saved tokenizer")
parser.add_argument("--output-dir", required=True,
help="directory to save preprocessed data")
parser.add_argument("--seed", default=34)
# splitting parameters
parser.add_argument("--split-class", default=None,
help="remove --split-ratio of the class from the training dataset and make a finetune_data; "
"do not perform split by default")
parser.add_argument("--split-amount", default=None, type=float,
help="0 < --split-amount < 1, amount of data to remove from the training dataset")
# fmt: on
args = parser.parse_args(args)
if args.split_amount is not None:
if not 0.0 < args.split_amount < 1.0:
raise ValueError("--split-amount should be between 0. and 1.")
if args.split_class is not None:
if args.split_amount is None:
raise ValueError("--split-amount should be specified if --split-class is provided")
return args
def train_finetune_split(train_data, schema_vocab, split_amount, split_class=None):
"""Split train_data into train and finetune parts with ratio split_amount.
Train part should contain all classses from the original train_data.
If split_class is provided, split across examples containing this class.
E.i. split_amount of data with split_class goes to finetune set.
Args:
train_data: pd.DataFrame
schema_vocab: set of tokens
split_amount: float
split_class: if provided, split across the specified class
"""
# Get a small set of examples that contains all classes from schema_vocab
required_example_ids = utils.get_required_example_ids(schema_vocab, train_data)
ids = set(range(len(train_data)))
if split_class is not None:
ids = set(train_data.index[train_data.schema.str.contains(split_class)])
logger.info(f"Moving {100 * split_amount}% of {split_class} into a finetuning subset")
_take = int(len(ids) * split_amount)
_leave = len(ids) - _take
logger.info(
f"Take {_take} class examples to finetuning set and leave {_leave} class examles in"
" training set."
)
if len(ids) == 0:
raise RuntimeError(f"Cannot find specified class {split_class} in the data.")
split_ids = list(ids - required_example_ids)
take = int(len(split_ids) * split_amount)
leave = len(train_data) - take
assert take > 0
logger.info(f"Taking {take} examples and leaving {leave} examples")
shuffle(split_ids)
subset_ids = split_ids[:take]
subset_ids_set = set(subset_ids)
all_ids = set(range(len(train_data)))
assert len(subset_ids_set.intersection(required_example_ids)) == 0
train_data_ids = list(all_ids - subset_ids_set | required_example_ids)
finetune_data = train_data.iloc[subset_ids]
train_data = train_data.iloc[train_data_ids]
return train_data, finetune_data
def main(args):
utils.set_seed(args.seed)
if os.path.exists(args.output_dir):
raise ValueError(f"output_dir {args.output_dir} already exists")
# File structure:
# that's text\tthat 's text\t[IN:UNSUPPORTED that 's text]
train_path = path_join(path_join(args.data, "train.tsv"))
train_data = pd.read_table(train_path, names=["text", "tokens", "schema"])
full_train_data_size = len(train_data) # used to check the train/finetune split
finetune_data, finetune_path = None, None
# NOTE: Do not use this for class-incremental scenario, where vocab order is important
schema_vocab = list(reduce(set.union, map(utils.get_vocab_top_schema, train_data.schema)))
if args.split_amount is not None:
# finetune part is not used by train script, but used by retrain script
logger.info("Splitting the training dataset")
train_data, finetune_data = train_finetune_split(
train_data, schema_vocab, args.split_amount, args.split_class
)
os.makedirs(args.output_dir)
finetune_path = path_join(args.output_dir, "finetune.tsv")
logger.info(f"Saving the finetune_data to {finetune_path}")
finetune_data.to_csv(finetune_path, sep="\t", index=False, header=False)
train_path = path_join(args.output_dir, "train.tsv")
logger.info(f"Saving the modified training set to {train_path}")
train_data.to_csv(train_path, sep="\t", index=False, header=False)
logger.info("Getting schema vocabulary")
if args.split_amount is not None:
# NOTE: Do not use this for class-incremental scenario, where vocab order is important
finetune_schema_vocab = list(reduce(
set.union, map(utils.get_vocab_top_schema, finetune_data.schema)
))
vocab_delta = set(finetune_schema_vocab) - set(schema_vocab)
if len(vocab_delta) > 0:
logger.warning(
f"Finetuning subset contains vocabulary elements not from the training subset"
)
logger.warning(f"New elements: {', '.join(vocab_delta)}")
logger.info(f"Schema vocabulary size: {len(schema_vocab)}")
logger.info("Building tokenizers")
text_tokenizer = transformers.AutoTokenizer.from_pretrained(args.text_tokenizer, use_fast=True)
schema_tokenizer = nsp.TopSchemaTokenizer(schema_vocab, text_tokenizer)
logger.info("Tokenizing train dataset")
train_dataset = nsp.data.make_dataset(train_path, schema_tokenizer)
logger.info("Tokenizing validation and test datasets")
valid_dataset = nsp.data.make_dataset(path_join(args.data, "eval.tsv"), schema_tokenizer)
test_dataset = nsp.data.make_dataset(path_join(args.data, "test.tsv"), schema_tokenizer)
finetune_dataset = None
if args.split_amount is not None:
logger.info("Tokenizing finetune set")
finetune_dataset = nsp.data.make_dataset(finetune_path, schema_tokenizer)
logger.info(f"Original train set size: {full_train_data_size}")
logger.info(f"Reduced train set size: {len(train_dataset)}")
logger.info(f"Finetune set size: {len(finetune_dataset)}")
train_finetune_data_size = len(train_dataset) + len(finetune_dataset)
if train_finetune_data_size != full_train_data_size:
raise RuntimeError(f"{train_finetune_data_size} != {full_train_data_size}")
logger.info(f"Saving config, data and tokenizer to {args.output_dir}")
os.makedirs(args.output_dir, exist_ok=True)
with open(path_join(args.output_dir, "args.toml"), "w") as f:
args_dict = {"version": nsp.SAVE_FORMAT_VERSION, **vars(args)}
toml.dump(args_dict, f)
# text tokenizer is saved along with schema_tokenizer
model_type = None
if not os.path.exists(args.text_tokenizer):
model_type = utils.get_model_type(args.text_tokenizer)
schema_tokenizer.save(path_join(args.output_dir, "tokenizer"), encoder_model_type=model_type)
data_state = {
"train_dataset": train_dataset,
"valid_dataset": valid_dataset,
"test_dataset": test_dataset,
"finetune_dataset": finetune_dataset,
"version": nsp.SAVE_FORMAT_VERSION,
}
torch.save(data_state, path_join(args.output_dir, "data.pkl"))
if __name__ == "__main__":
args = parse_args()
main(args)
|
[
"new_semantic_parsing.utils.set_seed",
"new_semantic_parsing.utils.get_required_example_ids",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.path.basename",
"os.makedirs",
"random.shuffle",
"new_semantic_parsing.data.make_dataset",
"os.path.exists",
"new_semantic_parsing.utils.get_model_type",
"transformers.AutoTokenizer.from_pretrained",
"new_semantic_parsing.TopSchemaTokenizer",
"pandas.read_table",
"os.path.join",
"toml.dump"
] |
[((1135, 1295), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s | %(levelname)s | %(name)s | %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""', 'level': 'logging.INFO', 'stream': 'sys.stdout'}), "(format=\n '%(asctime)s | %(levelname)s | %(name)s | %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S', level=logging.INFO, stream=sys.stdout)\n", (1154, 1295), False, 'import logging\n'), ((1332, 1358), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1348, 1358), False, 'import os\n'), ((1450, 1475), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1473, 1475), False, 'import argparse\n'), ((3429, 3485), 'new_semantic_parsing.utils.get_required_example_ids', 'utils.get_required_example_ids', (['schema_vocab', 'train_data'], {}), '(schema_vocab, train_data)\n', (3459, 3485), False, 'from new_semantic_parsing import utils\n'), ((4317, 4335), 'random.shuffle', 'shuffle', (['split_ids'], {}), '(split_ids)\n', (4324, 4335), False, 'from random import shuffle\n'), ((4755, 4780), 'new_semantic_parsing.utils.set_seed', 'utils.set_seed', (['args.seed'], {}), '(args.seed)\n', (4769, 4780), False, 'from new_semantic_parsing import utils\n'), ((4789, 4820), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (4803, 4820), False, 'import os\n'), ((5060, 5121), 'pandas.read_table', 'pd.read_table', (['train_path'], {'names': "['text', 'tokens', 'schema']"}), "(train_path, names=['text', 'tokens', 'schema'])\n", (5073, 5121), True, 'import pandas as pd\n'), ((6968, 7046), 'transformers.AutoTokenizer.from_pretrained', 'transformers.AutoTokenizer.from_pretrained', (['args.text_tokenizer'], {'use_fast': '(True)'}), '(args.text_tokenizer, use_fast=True)\n', (7010, 7046), False, 'import transformers\n'), ((7070, 7122), 'new_semantic_parsing.TopSchemaTokenizer', 'nsp.TopSchemaTokenizer', (['schema_vocab', 'text_tokenizer'], {}), '(schema_vocab, text_tokenizer)\n', (7092, 7122), True, 'import new_semantic_parsing as nsp\n'), ((7188, 7239), 'new_semantic_parsing.data.make_dataset', 'nsp.data.make_dataset', (['train_path', 'schema_tokenizer'], {}), '(train_path, schema_tokenizer)\n', (7209, 7239), True, 'import new_semantic_parsing as nsp\n'), ((8207, 8250), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (8218, 8250), False, 'import os\n'), ((5008, 5041), 'os.path.join', 'path_join', (['args.data', '"""train.tsv"""'], {}), "(args.data, 'train.tsv')\n", (5017, 5041), True, 'from os.path import join as path_join\n'), ((5764, 5792), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (5775, 5792), False, 'import os\n'), ((5818, 5860), 'os.path.join', 'path_join', (['args.output_dir', '"""finetune.tsv"""'], {}), "(args.output_dir, 'finetune.tsv')\n", (5827, 5860), True, 'from os.path import join as path_join\n'), ((6032, 6071), 'os.path.join', 'path_join', (['args.output_dir', '"""train.tsv"""'], {}), "(args.output_dir, 'train.tsv')\n", (6041, 6071), True, 'from os.path import join as path_join\n'), ((7342, 7374), 'os.path.join', 'path_join', (['args.data', '"""eval.tsv"""'], {}), "(args.data, 'eval.tsv')\n", (7351, 7374), True, 'from os.path import join as path_join\n'), ((7435, 7467), 'os.path.join', 'path_join', (['args.data', '"""test.tsv"""'], {}), "(args.data, 'test.tsv')\n", (7444, 7467), True, 'from os.path import join as path_join\n'), ((7628, 7682), 'new_semantic_parsing.data.make_dataset', 'nsp.data.make_dataset', (['finetune_path', 'schema_tokenizer'], {}), '(finetune_path, schema_tokenizer)\n', (7649, 7682), True, 'import new_semantic_parsing as nsp\n'), ((8397, 8420), 'toml.dump', 'toml.dump', (['args_dict', 'f'], {}), '(args_dict, f)\n', (8406, 8420), False, 'import toml\n'), ((8513, 8548), 'os.path.exists', 'os.path.exists', (['args.text_tokenizer'], {}), '(args.text_tokenizer)\n', (8527, 8548), False, 'import os\n'), ((8571, 8612), 'new_semantic_parsing.utils.get_model_type', 'utils.get_model_type', (['args.text_tokenizer'], {}), '(args.text_tokenizer)\n', (8591, 8612), False, 'from new_semantic_parsing import utils\n'), ((8640, 8679), 'os.path.join', 'path_join', (['args.output_dir', '"""tokenizer"""'], {}), "(args.output_dir, 'tokenizer')\n", (8649, 8679), True, 'from os.path import join as path_join\n'), ((8974, 9012), 'os.path.join', 'path_join', (['args.output_dir', '"""data.pkl"""'], {}), "(args.output_dir, 'data.pkl')\n", (8983, 9012), True, 'from os.path import join as path_join\n'), ((8266, 8305), 'os.path.join', 'path_join', (['args.output_dir', '"""args.toml"""'], {}), "(args.output_dir, 'args.toml')\n", (8275, 8305), True, 'from os.path import join as path_join\n')]
|
import numpy as np
from Redbox_v2 import file_manager as fm
import pandas as pd
from scipy.fftpack import rfft, rfftfreq
import matplotlib.pyplot as plt
import os
import math
def rms_time_dom(signal):
N = len(signal)
return math.sqrt(np.sum(np.power(signal,2))/N)
def rms_freq_dom(amplitude):
return math.sqrt(2*np.sum(np.power(amplitude, 2)))/2
def n_minutes_max(signal, dt, n=5):
""""
:param signal (np.array or list)
:param n: n-minutes range to obtain maximum (int)
:param dt: sample space or time between samples
this functions does not return the real time of the occuring
return: numpy array with
"""
maximums = np.zeros(1)
samples = int((n*60)/dt)
start = 0
end = samples
while start < len(signal):
selection = signal[start:end]
maximums = np.append(maximums, [np.amax(selection),np.amin(selection)])
start = end
end = min(len(signal), start + samples)
maximums = np.delete(maximums,0)
return maximums
def n_seconds_min_max(data, dt, n):
"""
:param data = 2D array with t and velocity in one direction
:param dt = sample space or time between samples
:param n = in seconds for which interval maximum value is determined
collect minimum and maximum values of the data over an interval
"""
samples = int(1/dt * n)
start = 0
end = samples
min_max_array = np.zeros([1, 2]) # col 1 = time [s], col 2 = min and max of direction
while start < data.shape[0]:
index_max = start + np.argmax(data[start:end, 1])
index_min = start + np.argmin(data[start:end, 1])
x = np.array([[data[index_max,0], data[index_max,1]], [data[index_min,0], data[index_min,1]]])
min_max_array = np.concatenate((min_max_array, x), axis=0)
start = end
end += samples
min_max_array = np.delete(min_max_array,0,0)
min_max_array = min_max_array[min_max_array[:,0].argsort()]
return min_max_array
def FFT(signal,dT):
"""
:param signal: [array]
:param dT: sample space [float]
"""
ampl = np.abs(rfft(signal)) * 2.0 / len(signal)
freq = rfftfreq(len(ampl),d=dT)
return ampl, freq
def FFT_amplitude(signal):
"""
:param signal: [array]
"""
ampl = np.abs(rfft(signal)) * 2.0 / len(signal)
return ampl
def OneThird_octave(low, high):
""""
:param low: lowest required frequency band
:param high: highest required frequency band
this function starts at the highest band and
"""
one_third_octave = 2**(1/3)
last_band = high
first_band = last_band
N = 0
while first_band > low:
first_band = first_band/one_third_octave
N += 1
first_band = first_band * one_third_octave
return first_band * np.logspace(0, N, endpoint=False, num=N, base=one_third_octave)
def FFT_to_OneThird_Octave(amplitude, df, low, high):
"""
:param amplitude: amplitudes of the FFT [array]
:param frequency: frequencies of the FFT [array]
"""
one_third_octave = 2 ** (1 / 3)
spectrum = OneThird_octave(low, high)
rms_amplitude = np.empty(len(spectrum))
#check if the maximum available frequency exceeds the upper bound
if (df*len(amplitude))*one_third_octave**0.5 > high:
lower_bound = spectrum[0] / one_third_octave ** 0.5
upper_bound = spectrum[0] * one_third_octave ** 0.5
for n in range(rms_amplitude.size):
rms_amplitude[n] = rms_freq_dom(amplitude[int(lower_bound // df)*2:int(upper_bound // df)*2])
lower_bound = lower_bound * one_third_octave
upper_bound = upper_bound * one_third_octave
return rms_amplitude, spectrum
else:
print("ERROR frequency range is not large enough")
return
def FFT_to_OneThird_Octave2(amplitude, df, spectrum):
"""
:param amplitude: amplitudes of the FFT [array]
:param frequency: frequencies of the FFT [array]
"""
one_third_octave = 2 ** (1 / 3)
spectrum = spectrum
rms_amplitude = np.empty(len(spectrum))
high = spectrum[-1]
#check if the maximum available frequency exceeds the upper bound
if (df*len(amplitude))*one_third_octave**0.5 > high:
lower_bound = spectrum[0] / one_third_octave ** 0.5
upper_bound = spectrum[0] * one_third_octave ** 0.5
for n in range(rms_amplitude.size):
rms_amplitude[n] = rms_freq_dom(amplitude[int(lower_bound // df)*2:int(upper_bound // df)*2])
lower_bound = lower_bound * one_third_octave
upper_bound = upper_bound * one_third_octave
return rms_amplitude
else:
print("ERROR frequency range is not large enough")
return
"""
integration and differentiation
"""
def integ_to_disp(vel,dt):
"""
:param vel: velocity obtained from data (np.array)
:return: (np.array) (displacement)
"""
disp = np.zeros(len(vel))
disp = disp[:-1]
for i in range(1, len(disp)):
disp[i] = disp[i - 1] + (vel[i + 1] - vel[i]) * dt
return disp
def diff_to_acc(vel,dt):
"""
:param vel: velocity obtained from data(lst)
:return: (tpl) (acceleration)
"""
acc = np.zeros(len(vel))
acc = acc[:-1]
for i in range(0, len(acc)):
acc[i] = (vel[i + 1] - vel[i]) / dt
return acc
def select_part(start, stop, to_select):
"""
TODO nagaan of deze functie werkelijk nuttig is
:param start: start of selection(flt/int)
:param stop: end time of selection (flt/int)
:return: (tpl) (displacement u, velocity v)
"""
i = int(start / dt)
j = int(stop / dt)
lst = []
for k in range(i,j):
lst.append(to_select[k])
lst_t = np.linspace(start, dt, stop)
return lst_t, lst
""" SBR methods"""
def compute_veff_sbr(v,T,Ts=0.125, a=8):
"""
:param =df = vels (mm/s)
:param = T = sample space (s)
:param a = each a'th sample is used
"""
l = int(np.log2(v.size)+1) #nth-power
N_org = v.size
N = 2**l
t = np.linspace(0,N*T,N,endpoint=False)
v = np.pad(v,(0,N-v.size),'constant')
vibrations_fft = np.fft.fft(v)
f = np.linspace(0, 1 / T, N, endpoint=False)
f_mod=f
f_mod[f<1.0]=0.1
weight = 1 / np.sqrt(1 + (5.6 / f_mod) ** 2)
vibrations_fft_w = weight * vibrations_fft
vibrations_w = np.fft.ifft(vibrations_fft_w).real
t_sel = t[:N_org:a]
vibrations_w = vibrations_w[:N_org:a]
v_sqrd_w = vibrations_w ** 2
v_eff = np.zeros(t_sel.size)
dt = t_sel[1] - t_sel[0]
print('compute v_eff')
for i in range(t_sel.size - 1):
g_xi = np.exp(-t_sel[:i + 1][::-1] / Ts)
v_eff[i] = np.sqrt(1 / Ts * np.trapz(g_xi * v_sqrd_w[:i + 1], dx=dt))
fm.progress(i,t_sel.size-1,"processing %s of %s" % (i + 1, t_sel.size))
idx = np.argmax(v_eff)
return v_eff[idx], t_sel, vibrations_w, v_eff
def plot_SBR_B(save_to_path,vibrations, vibrations_w,v_eff,t_sel):
"""
vibrations, vibrations_w,v_eff are optional arguments
"""
plt.figure(figsize=(10, 6))
if vibrations:
plt.plot(t_sel, vibrations, label="signal")
if vibrations_w:
plt.plot(t_sel, vibrations_w, label="weighted_signal")
if v_eff:
plt.plot(t_sel, v_eff, label="v_eff")
plt.text(t[idx], v_eff[idx], "max v_eff: {}".format(round(v_eff[idx], 3)), color="r")
plt.xlabel("t [s]")
plt.ylabel("v [mm/s]")
plt.title("velocity")
plt.legend()
plt.savefig(save_to_path.format("png"))
plt.show()
def plot_SBR_B_xyz(save_to_path,vibrations, vibrations_w,v_eff,t_sel):
"""
TODO check use of pandas plotting wrapper
vibrations, vibrations_w,v_eff are optional arguments (tpl)
"""
fig = plt.figure(figsize=(10, 18))
ax1 = fig.add_subplot(3,1,1)
ax2 = fig.add_subplot(3,1,2)
ax3 = fig.add_subplot(3,1,3)
if vibrations:
ax1.plot(t_sel, vibrations[0], label="signal")
ax2.plot(t_sel, vibrations[1], label="signal")
ax3.plot(t_sel, vibrations[2], label="signal")
if vibrations_w:
ax1.plot(t_sel, vibrations_w[0], label="weighted_signal")
ax2.plot(t_sel, vibrations_w[1], label="weighted_signal")
ax3.plot(t_sel, vibrations_w[2], label="weighted_signal")
if v_eff:
idx = [np.argmax(v_eff[x]) for x in range(len(v_eff))]
ax1.plot(t_sel, v_eff, label="v_eff")
ax1.text(t[idx[0]], v_eff[0][idx[0]], "max v_eff: {}".format(round(v_eff[idx], 3)), color="r")
ax2.plot(t_sel, v_eff, label="v_eff")
ax2.text(t[idx[1]], v_eff[1][idx[1]], "max v_eff: {}".format(round(v_eff[idx], 3)), color="r")
ax3.plot(t_sel, v_eff, label="v_eff")
ax3.text(t[idx[1]], v_eff[2][idx[2]], "max v_eff: {}".format(round(v_eff[idx], 3)), color="r")
plt.xlabel("t [s]")
plt.ylabel("v [mm/s]")
plt.title("velocity")
plt.legend()
plt.savefig(save_to_path.format("png"))
plt.show()
|
[
"matplotlib.pyplot.title",
"scipy.fftpack.rfft",
"numpy.amin",
"numpy.argmax",
"numpy.logspace",
"numpy.argmin",
"matplotlib.pyplot.figure",
"numpy.exp",
"numpy.pad",
"numpy.fft.fft",
"numpy.power",
"numpy.linspace",
"numpy.fft.ifft",
"numpy.trapz",
"matplotlib.pyplot.show",
"numpy.log2",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"numpy.delete",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"Redbox_v2.file_manager.progress",
"numpy.zeros",
"numpy.amax",
"numpy.array",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] |
[((701, 712), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (709, 712), True, 'import numpy as np\n'), ((1015, 1037), 'numpy.delete', 'np.delete', (['maximums', '(0)'], {}), '(maximums, 0)\n', (1024, 1037), True, 'import numpy as np\n'), ((1467, 1483), 'numpy.zeros', 'np.zeros', (['[1, 2]'], {}), '([1, 2])\n', (1475, 1483), True, 'import numpy as np\n'), ((1933, 1963), 'numpy.delete', 'np.delete', (['min_max_array', '(0)', '(0)'], {}), '(min_max_array, 0, 0)\n', (1942, 1963), True, 'import numpy as np\n'), ((5924, 5952), 'numpy.linspace', 'np.linspace', (['start', 'dt', 'stop'], {}), '(start, dt, stop)\n', (5935, 5952), True, 'import numpy as np\n'), ((6255, 6295), 'numpy.linspace', 'np.linspace', (['(0)', '(N * T)', 'N'], {'endpoint': '(False)'}), '(0, N * T, N, endpoint=False)\n', (6266, 6295), True, 'import numpy as np\n'), ((6302, 6340), 'numpy.pad', 'np.pad', (['v', '(0, N - v.size)', '"""constant"""'], {}), "(v, (0, N - v.size), 'constant')\n", (6308, 6340), True, 'import numpy as np\n'), ((6358, 6371), 'numpy.fft.fft', 'np.fft.fft', (['v'], {}), '(v)\n', (6368, 6371), True, 'import numpy as np\n'), ((6383, 6423), 'numpy.linspace', 'np.linspace', (['(0)', '(1 / T)', 'N'], {'endpoint': '(False)'}), '(0, 1 / T, N, endpoint=False)\n', (6394, 6423), True, 'import numpy as np\n'), ((6735, 6755), 'numpy.zeros', 'np.zeros', (['t_sel.size'], {}), '(t_sel.size)\n', (6743, 6755), True, 'import numpy as np\n'), ((7074, 7090), 'numpy.argmax', 'np.argmax', (['v_eff'], {}), '(v_eff)\n', (7083, 7090), True, 'import numpy as np\n'), ((7296, 7323), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (7306, 7323), True, 'import matplotlib.pyplot as plt\n'), ((7645, 7664), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t [s]"""'], {}), "('t [s]')\n", (7655, 7664), True, 'import matplotlib.pyplot as plt\n'), ((7670, 7692), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""v [mm/s]"""'], {}), "('v [mm/s]')\n", (7680, 7692), True, 'import matplotlib.pyplot as plt\n'), ((7698, 7719), 'matplotlib.pyplot.title', 'plt.title', (['"""velocity"""'], {}), "('velocity')\n", (7707, 7719), True, 'import matplotlib.pyplot as plt\n'), ((7725, 7737), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7735, 7737), True, 'import matplotlib.pyplot as plt\n'), ((7788, 7798), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7796, 7798), True, 'import matplotlib.pyplot as plt\n'), ((8016, 8044), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 18)'}), '(figsize=(10, 18))\n', (8026, 8044), True, 'import matplotlib.pyplot as plt\n'), ((9095, 9114), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t [s]"""'], {}), "('t [s]')\n", (9105, 9114), True, 'import matplotlib.pyplot as plt\n'), ((9120, 9142), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""v [mm/s]"""'], {}), "('v [mm/s]')\n", (9130, 9142), True, 'import matplotlib.pyplot as plt\n'), ((9148, 9169), 'matplotlib.pyplot.title', 'plt.title', (['"""velocity"""'], {}), "('velocity')\n", (9157, 9169), True, 'import matplotlib.pyplot as plt\n'), ((9175, 9187), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9185, 9187), True, 'import matplotlib.pyplot as plt\n'), ((9238, 9248), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9246, 9248), True, 'import matplotlib.pyplot as plt\n'), ((1704, 1802), 'numpy.array', 'np.array', (['[[data[index_max, 0], data[index_max, 1]], [data[index_min, 0], data[\n index_min, 1]]]'], {}), '([[data[index_max, 0], data[index_max, 1]], [data[index_min, 0],\n data[index_min, 1]]])\n', (1712, 1802), True, 'import numpy as np\n'), ((1820, 1862), 'numpy.concatenate', 'np.concatenate', (['(min_max_array, x)'], {'axis': '(0)'}), '((min_max_array, x), axis=0)\n', (1834, 1862), True, 'import numpy as np\n'), ((2892, 2955), 'numpy.logspace', 'np.logspace', (['(0)', 'N'], {'endpoint': '(False)', 'num': 'N', 'base': 'one_third_octave'}), '(0, N, endpoint=False, num=N, base=one_third_octave)\n', (2903, 2955), True, 'import numpy as np\n'), ((6481, 6512), 'numpy.sqrt', 'np.sqrt', (['(1 + (5.6 / f_mod) ** 2)'], {}), '(1 + (5.6 / f_mod) ** 2)\n', (6488, 6512), True, 'import numpy as np\n'), ((6581, 6610), 'numpy.fft.ifft', 'np.fft.ifft', (['vibrations_fft_w'], {}), '(vibrations_fft_w)\n', (6592, 6610), True, 'import numpy as np\n'), ((6867, 6900), 'numpy.exp', 'np.exp', (['(-t_sel[:i + 1][::-1] / Ts)'], {}), '(-t_sel[:i + 1][::-1] / Ts)\n', (6873, 6900), True, 'import numpy as np\n'), ((6989, 7064), 'Redbox_v2.file_manager.progress', 'fm.progress', (['i', '(t_sel.size - 1)', "('processing %s of %s' % (i + 1, t_sel.size))"], {}), "(i, t_sel.size - 1, 'processing %s of %s' % (i + 1, t_sel.size))\n", (7000, 7064), True, 'from Redbox_v2 import file_manager as fm\n'), ((7353, 7396), 'matplotlib.pyplot.plot', 'plt.plot', (['t_sel', 'vibrations'], {'label': '"""signal"""'}), "(t_sel, vibrations, label='signal')\n", (7361, 7396), True, 'import matplotlib.pyplot as plt\n'), ((7428, 7482), 'matplotlib.pyplot.plot', 'plt.plot', (['t_sel', 'vibrations_w'], {'label': '"""weighted_signal"""'}), "(t_sel, vibrations_w, label='weighted_signal')\n", (7436, 7482), True, 'import matplotlib.pyplot as plt\n'), ((7507, 7544), 'matplotlib.pyplot.plot', 'plt.plot', (['t_sel', 'v_eff'], {'label': '"""v_eff"""'}), "(t_sel, v_eff, label='v_eff')\n", (7515, 7544), True, 'import matplotlib.pyplot as plt\n'), ((1602, 1631), 'numpy.argmax', 'np.argmax', (['data[start:end, 1]'], {}), '(data[start:end, 1])\n', (1611, 1631), True, 'import numpy as np\n'), ((1661, 1690), 'numpy.argmin', 'np.argmin', (['data[start:end, 1]'], {}), '(data[start:end, 1])\n', (1670, 1690), True, 'import numpy as np\n'), ((6182, 6197), 'numpy.log2', 'np.log2', (['v.size'], {}), '(v.size)\n', (6189, 6197), True, 'import numpy as np\n'), ((8589, 8608), 'numpy.argmax', 'np.argmax', (['v_eff[x]'], {}), '(v_eff[x])\n', (8598, 8608), True, 'import numpy as np\n'), ((264, 283), 'numpy.power', 'np.power', (['signal', '(2)'], {}), '(signal, 2)\n', (272, 283), True, 'import numpy as np\n'), ((889, 907), 'numpy.amax', 'np.amax', (['selection'], {}), '(selection)\n', (896, 907), True, 'import numpy as np\n'), ((908, 926), 'numpy.amin', 'np.amin', (['selection'], {}), '(selection)\n', (915, 926), True, 'import numpy as np\n'), ((2180, 2192), 'scipy.fftpack.rfft', 'rfft', (['signal'], {}), '(signal)\n', (2184, 2192), False, 'from scipy.fftpack import rfft, rfftfreq\n'), ((2371, 2383), 'scipy.fftpack.rfft', 'rfft', (['signal'], {}), '(signal)\n', (2375, 2383), False, 'from scipy.fftpack import rfft, rfftfreq\n'), ((6938, 6978), 'numpy.trapz', 'np.trapz', (['(g_xi * v_sqrd_w[:i + 1])'], {'dx': 'dt'}), '(g_xi * v_sqrd_w[:i + 1], dx=dt)\n', (6946, 6978), True, 'import numpy as np\n'), ((352, 374), 'numpy.power', 'np.power', (['amplitude', '(2)'], {}), '(amplitude, 2)\n', (360, 374), True, 'import numpy as np\n')]
|
# Copyright 2021 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The FastEstimator implementation of SimCLR with ResNet9 on CIFAIR10.
This code took reference from google implementation (https://github.com/google-research/simclr).
Note that we use the ciFAIR10 dataset instead (https://cvjena.github.io/cifair/)
"""
import tempfile
import tensorflow as tf
from tensorflow.keras import layers
import fastestimator as fe
from fastestimator.dataset.data.cifair10 import load_data
from fastestimator.op.numpyop.meta import Sometimes
from fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop
from fastestimator.op.numpyop.univariate import ColorJitter, GaussianBlur, ToFloat, ToGray
from fastestimator.op.tensorop import LambdaOp, TensorOp
from fastestimator.op.tensorop.loss import CrossEntropy
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
from fastestimator.trace.io import ModelSaver
from fastestimator.trace.metric import Accuracy
def ResNet9(input_size=(32, 32, 3), head_len=128, classes=10):
"""A small 9-layer ResNet Tensorflow model for cifar10 image classification.
The model architecture is from https://github.com/davidcpage/cifar10-fast
Args:
input_size: The size of the input tensor (height, width, channels).
classes: The number of outputs the model should generate.
Raises:
ValueError: Length of `input_size` is not 3.
ValueError: `input_size`[0] or `input_size`[1] is not a multiple of 16.
Returns:
A TensorFlow ResNet9 model.
"""
# prep layers
inp = layers.Input(shape=input_size)
x = layers.Conv2D(64, 3, padding='same')(inp)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
# layer1
x = layers.Conv2D(128, 3, padding='same')(x)
x = layers.MaxPool2D()(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Add()([x, residual(x, 128)])
# layer2
x = layers.Conv2D(256, 3, padding='same')(x)
x = layers.MaxPool2D()(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
# layer3
x = layers.Conv2D(512, 3, padding='same')(x)
x = layers.MaxPool2D()(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Add()([x, residual(x, 512)])
# layers4
x = layers.GlobalMaxPool2D()(x)
code = layers.Flatten()(x)
p_head = layers.Dense(head_len)(code)
model_con = tf.keras.Model(inputs=inp, outputs=p_head)
s_head = layers.Dense(classes)(code)
s_head = layers.Activation('softmax', dtype='float32')(s_head)
model_finetune = tf.keras.Model(inputs=inp, outputs=s_head)
return model_con, model_finetune
def residual(x, num_channel):
"""A ResNet unit for ResNet9.
Args:
x: Input Keras tensor.
num_channel: The number of layer channel.
Return:
Output Keras tensor.
"""
x = layers.Conv2D(num_channel, 3, padding='same')(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2D(num_channel, 3, padding='same')(x)
x = layers.BatchNormalization(momentum=0.8)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
return x
class NTXentOp(TensorOp):
def __init__(self, arg1, arg2, outputs, temperature=1.0, mode=None):
super().__init__(inputs=(arg1, arg2), outputs=outputs, mode=mode)
self.temperature = temperature
def forward(self, data, state):
arg1, arg2 = data
loss = NTXent(arg1, arg2, self.temperature)
return loss
def NTXent(A, B, temperature):
large_number = 1e9
batch_size = tf.shape(A)[0]
A = tf.math.l2_normalize(A, -1)
B = tf.math.l2_normalize(B, -1)
mask = tf.one_hot(tf.range(batch_size), batch_size)
labels = tf.one_hot(tf.range(batch_size), 2 * batch_size)
aa = tf.matmul(A, A, transpose_b=True) / temperature
aa = aa - mask * large_number
ab = tf.matmul(A, B, transpose_b=True) / temperature
bb = tf.matmul(B, B, transpose_b=True) / temperature
bb = bb - mask * large_number
ba = tf.matmul(B, A, transpose_b=True) / temperature
loss_a = tf.nn.softmax_cross_entropy_with_logits(labels, tf.concat([ab, aa], 1))
loss_b = tf.nn.softmax_cross_entropy_with_logits(labels, tf.concat([ba, bb], 1))
loss = tf.reduce_mean(loss_a + loss_b)
return loss, ab, labels
def pretrain_model(epochs, batch_size, train_steps_per_epoch, save_dir):
# step 1: prepare dataset
train_data, test_data = load_data()
pipeline = fe.Pipeline(
train_data=train_data,
batch_size=batch_size,
ops=[
PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x"),
# augmentation 1
RandomCrop(32, 32, image_in="x", image_out="x_aug"),
Sometimes(HorizontalFlip(image_in="x_aug", image_out="x_aug"), prob=0.5),
Sometimes(
ColorJitter(inputs="x_aug", outputs="x_aug", brightness=0.8, contrast=0.8, saturation=0.8, hue=0.2),
prob=0.8),
Sometimes(ToGray(inputs="x_aug", outputs="x_aug"), prob=0.2),
Sometimes(GaussianBlur(inputs="x_aug", outputs="x_aug", blur_limit=(3, 3), sigma_limit=(0.1, 2.0)),
prob=0.5),
ToFloat(inputs="x_aug", outputs="x_aug"),
# augmentation 2
RandomCrop(32, 32, image_in="x", image_out="x_aug2"),
Sometimes(HorizontalFlip(image_in="x_aug2", image_out="x_aug2"), prob=0.5),
Sometimes(
ColorJitter(inputs="x_aug2", outputs="x_aug2", brightness=0.8, contrast=0.8, saturation=0.8, hue=0.2),
prob=0.8),
Sometimes(ToGray(inputs="x_aug2", outputs="x_aug2"), prob=0.2),
Sometimes(GaussianBlur(inputs="x_aug2", outputs="x_aug2", blur_limit=(3, 3), sigma_limit=(0.1, 2.0)),
prob=0.5),
ToFloat(inputs="x_aug2", outputs="x_aug2")
])
# step 2: prepare network
model_con, model_finetune = fe.build(model_fn=ResNet9, optimizer_fn=["adam", "adam"])
network = fe.Network(ops=[
LambdaOp(lambda x, y: tf.concat([x, y], axis=0), inputs=["x_aug", "x_aug2"], outputs="x_com"),
ModelOp(model=model_con, inputs="x_com", outputs="y_com"),
LambdaOp(lambda x: tf.split(x, 2, axis=0), inputs="y_com", outputs=["y_pred", "y_pred2"]),
NTXentOp(arg1="y_pred", arg2="y_pred2", outputs=["NTXent", "logit", "label"]),
UpdateOp(model=model_con, loss_name="NTXent")
])
# step 3: prepare estimator
traces = [
Accuracy(true_key="label", pred_key="logit", mode="train", output_name="contrastive_accuracy"),
ModelSaver(model=model_con, save_dir=save_dir),
]
estimator = fe.Estimator(pipeline=pipeline,
network=network,
epochs=epochs,
traces=traces,
train_steps_per_epoch=train_steps_per_epoch)
estimator.fit()
return model_con, model_finetune
def finetune_model(model, epochs, batch_size, train_steps_per_epoch, save_dir):
train_data, test_data = load_data()
train_data = train_data.split(0.1)
pipeline = fe.Pipeline(train_data=train_data,
eval_data=test_data,
batch_size=batch_size,
ops=[
ToFloat(inputs="x", outputs="x"),
])
network = fe.Network(ops=[
ModelOp(model=model, inputs="x", outputs="y_pred"),
CrossEntropy(inputs=["y_pred", "y"], outputs="ce"),
UpdateOp(model=model, loss_name="ce")
])
traces = [
Accuracy(true_key="y", pred_key="y_pred"),
]
estimator = fe.Estimator(pipeline=pipeline,
network=network,
epochs=epochs,
traces=traces,
train_steps_per_epoch=train_steps_per_epoch)
estimator.fit()
def fastestimator_run(epochs_pretrain=50,
epochs_finetune=10,
batch_size=512,
train_steps_per_epoch=None,
save_dir=tempfile.mkdtemp()):
model_con, model_finetune = pretrain_model(epochs_pretrain, batch_size, train_steps_per_epoch, save_dir)
finetune_model(model_finetune, epochs_finetune, batch_size, train_steps_per_epoch, save_dir)
if __name__ == "__main__":
fastestimator_run()
|
[
"fastestimator.op.numpyop.univariate.ColorJitter",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.matmul",
"fastestimator.dataset.data.cifair10.load_data",
"tensorflow.keras.layers.MaxPool2D",
"fastestimator.trace.io.ModelSaver",
"fastestimator.build",
"tensorflow.split",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.concat",
"fastestimator.op.tensorop.loss.CrossEntropy",
"fastestimator.op.numpyop.multivariate.PadIfNeeded",
"tempfile.mkdtemp",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Input",
"fastestimator.trace.metric.Accuracy",
"fastestimator.op.numpyop.univariate.GaussianBlur",
"tensorflow.keras.layers.GlobalMaxPool2D",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.keras.Model",
"fastestimator.op.numpyop.multivariate.HorizontalFlip",
"tensorflow.math.l2_normalize",
"fastestimator.op.numpyop.univariate.ToGray",
"fastestimator.op.numpyop.univariate.ToFloat",
"tensorflow.keras.layers.Conv2D",
"fastestimator.op.tensorop.model.UpdateOp",
"fastestimator.Estimator",
"fastestimator.op.numpyop.multivariate.RandomCrop",
"fastestimator.op.tensorop.model.ModelOp",
"tensorflow.shape",
"tensorflow.keras.layers.Add"
] |
[((2222, 2252), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'input_size'}), '(shape=input_size)\n', (2234, 2252), False, 'from tensorflow.keras import layers\n'), ((3167, 3209), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inp', 'outputs': 'p_head'}), '(inputs=inp, outputs=p_head)\n', (3181, 3209), True, 'import tensorflow as tf\n'), ((3340, 3382), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inp', 'outputs': 's_head'}), '(inputs=inp, outputs=s_head)\n', (3354, 3382), True, 'import tensorflow as tf\n'), ((4381, 4408), 'tensorflow.math.l2_normalize', 'tf.math.l2_normalize', (['A', '(-1)'], {}), '(A, -1)\n', (4401, 4408), True, 'import tensorflow as tf\n'), ((4417, 4444), 'tensorflow.math.l2_normalize', 'tf.math.l2_normalize', (['B', '(-1)'], {}), '(B, -1)\n', (4437, 4444), True, 'import tensorflow as tf\n'), ((5042, 5073), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(loss_a + loss_b)'], {}), '(loss_a + loss_b)\n', (5056, 5073), True, 'import tensorflow as tf\n'), ((5236, 5247), 'fastestimator.dataset.data.cifair10.load_data', 'load_data', ([], {}), '()\n', (5245, 5247), False, 'from fastestimator.dataset.data.cifair10 import load_data\n'), ((6761, 6818), 'fastestimator.build', 'fe.build', ([], {'model_fn': 'ResNet9', 'optimizer_fn': "['adam', 'adam']"}), "(model_fn=ResNet9, optimizer_fn=['adam', 'adam'])\n", (6769, 6818), True, 'import fastestimator as fe\n'), ((7497, 7625), 'fastestimator.Estimator', 'fe.Estimator', ([], {'pipeline': 'pipeline', 'network': 'network', 'epochs': 'epochs', 'traces': 'traces', 'train_steps_per_epoch': 'train_steps_per_epoch'}), '(pipeline=pipeline, network=network, epochs=epochs, traces=\n traces, train_steps_per_epoch=train_steps_per_epoch)\n', (7509, 7625), True, 'import fastestimator as fe\n'), ((7905, 7916), 'fastestimator.dataset.data.cifair10.load_data', 'load_data', ([], {}), '()\n', (7914, 7916), False, 'from fastestimator.dataset.data.cifair10 import load_data\n'), ((8526, 8654), 'fastestimator.Estimator', 'fe.Estimator', ([], {'pipeline': 'pipeline', 'network': 'network', 'epochs': 'epochs', 'traces': 'traces', 'train_steps_per_epoch': 'train_steps_per_epoch'}), '(pipeline=pipeline, network=network, epochs=epochs, traces=\n traces, train_steps_per_epoch=train_steps_per_epoch)\n', (8538, 8654), True, 'import fastestimator as fe\n'), ((8991, 9009), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (9007, 9009), False, 'import tempfile\n'), ((2261, 2297), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3)'], {'padding': '"""same"""'}), "(64, 3, padding='same')\n", (2274, 2297), False, 'from tensorflow.keras import layers\n'), ((2311, 2350), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (2336, 2350), False, 'from tensorflow.keras import layers\n'), ((2362, 2389), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (2378, 2389), False, 'from tensorflow.keras import layers\n'), ((2414, 2451), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(128)', '(3)'], {'padding': '"""same"""'}), "(128, 3, padding='same')\n", (2427, 2451), False, 'from tensorflow.keras import layers\n'), ((2463, 2481), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {}), '()\n', (2479, 2481), False, 'from tensorflow.keras import layers\n'), ((2493, 2532), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (2518, 2532), False, 'from tensorflow.keras import layers\n'), ((2544, 2571), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (2560, 2571), False, 'from tensorflow.keras import layers\n'), ((2583, 2595), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (2593, 2595), False, 'from tensorflow.keras import layers\n'), ((2640, 2677), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(256)', '(3)'], {'padding': '"""same"""'}), "(256, 3, padding='same')\n", (2653, 2677), False, 'from tensorflow.keras import layers\n'), ((2689, 2707), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {}), '()\n', (2705, 2707), False, 'from tensorflow.keras import layers\n'), ((2719, 2758), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (2744, 2758), False, 'from tensorflow.keras import layers\n'), ((2770, 2797), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (2786, 2797), False, 'from tensorflow.keras import layers\n'), ((2822, 2859), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(512)', '(3)'], {'padding': '"""same"""'}), "(512, 3, padding='same')\n", (2835, 2859), False, 'from tensorflow.keras import layers\n'), ((2871, 2889), 'tensorflow.keras.layers.MaxPool2D', 'layers.MaxPool2D', ([], {}), '()\n', (2887, 2889), False, 'from tensorflow.keras import layers\n'), ((2901, 2940), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (2926, 2940), False, 'from tensorflow.keras import layers\n'), ((2952, 2979), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (2968, 2979), False, 'from tensorflow.keras import layers\n'), ((2991, 3003), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (3001, 3003), False, 'from tensorflow.keras import layers\n'), ((3049, 3073), 'tensorflow.keras.layers.GlobalMaxPool2D', 'layers.GlobalMaxPool2D', ([], {}), '()\n', (3071, 3073), False, 'from tensorflow.keras import layers\n'), ((3088, 3104), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (3102, 3104), False, 'from tensorflow.keras import layers\n'), ((3122, 3144), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['head_len'], {}), '(head_len)\n', (3134, 3144), False, 'from tensorflow.keras import layers\n'), ((3224, 3245), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['classes'], {}), '(classes)\n', (3236, 3245), False, 'from tensorflow.keras import layers\n'), ((3265, 3310), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""softmax"""'], {'dtype': '"""float32"""'}), "('softmax', dtype='float32')\n", (3282, 3310), False, 'from tensorflow.keras import layers\n'), ((3637, 3682), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['num_channel', '(3)'], {'padding': '"""same"""'}), "(num_channel, 3, padding='same')\n", (3650, 3682), False, 'from tensorflow.keras import layers\n'), ((3694, 3733), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (3719, 3733), False, 'from tensorflow.keras import layers\n'), ((3745, 3772), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (3761, 3772), False, 'from tensorflow.keras import layers\n'), ((3784, 3829), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['num_channel', '(3)'], {'padding': '"""same"""'}), "(num_channel, 3, padding='same')\n", (3797, 3829), False, 'from tensorflow.keras import layers\n'), ((3841, 3880), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (3866, 3880), False, 'from tensorflow.keras import layers\n'), ((3892, 3919), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (3908, 3919), False, 'from tensorflow.keras import layers\n'), ((4358, 4369), 'tensorflow.shape', 'tf.shape', (['A'], {}), '(A)\n', (4366, 4369), True, 'import tensorflow as tf\n'), ((4468, 4488), 'tensorflow.range', 'tf.range', (['batch_size'], {}), '(batch_size)\n', (4476, 4488), True, 'import tensorflow as tf\n'), ((4526, 4546), 'tensorflow.range', 'tf.range', (['batch_size'], {}), '(batch_size)\n', (4534, 4546), True, 'import tensorflow as tf\n'), ((4574, 4607), 'tensorflow.matmul', 'tf.matmul', (['A', 'A'], {'transpose_b': '(True)'}), '(A, A, transpose_b=True)\n', (4583, 4607), True, 'import tensorflow as tf\n'), ((4665, 4698), 'tensorflow.matmul', 'tf.matmul', (['A', 'B'], {'transpose_b': '(True)'}), '(A, B, transpose_b=True)\n', (4674, 4698), True, 'import tensorflow as tf\n'), ((4722, 4755), 'tensorflow.matmul', 'tf.matmul', (['B', 'B'], {'transpose_b': '(True)'}), '(B, B, transpose_b=True)\n', (4731, 4755), True, 'import tensorflow as tf\n'), ((4813, 4846), 'tensorflow.matmul', 'tf.matmul', (['B', 'A'], {'transpose_b': '(True)'}), '(B, A, transpose_b=True)\n', (4822, 4846), True, 'import tensorflow as tf\n'), ((4922, 4944), 'tensorflow.concat', 'tf.concat', (['[ab, aa]', '(1)'], {}), '([ab, aa], 1)\n', (4931, 4944), True, 'import tensorflow as tf\n'), ((5007, 5029), 'tensorflow.concat', 'tf.concat', (['[ba, bb]', '(1)'], {}), '([ba, bb], 1)\n', (5016, 5029), True, 'import tensorflow as tf\n'), ((7323, 7422), 'fastestimator.trace.metric.Accuracy', 'Accuracy', ([], {'true_key': '"""label"""', 'pred_key': '"""logit"""', 'mode': '"""train"""', 'output_name': '"""contrastive_accuracy"""'}), "(true_key='label', pred_key='logit', mode='train', output_name=\n 'contrastive_accuracy')\n", (7331, 7422), False, 'from fastestimator.trace.metric import Accuracy\n'), ((7427, 7473), 'fastestimator.trace.io.ModelSaver', 'ModelSaver', ([], {'model': 'model_con', 'save_dir': 'save_dir'}), '(model=model_con, save_dir=save_dir)\n', (7437, 7473), False, 'from fastestimator.trace.io import ModelSaver\n'), ((8461, 8502), 'fastestimator.trace.metric.Accuracy', 'Accuracy', ([], {'true_key': '"""y"""', 'pred_key': '"""y_pred"""'}), "(true_key='y', pred_key='y_pred')\n", (8469, 8502), False, 'from fastestimator.trace.metric import Accuracy\n'), ((5364, 5433), 'fastestimator.op.numpyop.multivariate.PadIfNeeded', 'PadIfNeeded', ([], {'min_height': '(40)', 'min_width': '(40)', 'image_in': '"""x"""', 'image_out': '"""x"""'}), "(min_height=40, min_width=40, image_in='x', image_out='x')\n", (5375, 5433), False, 'from fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop\n'), ((5477, 5528), 'fastestimator.op.numpyop.multivariate.RandomCrop', 'RandomCrop', (['(32)', '(32)'], {'image_in': '"""x"""', 'image_out': '"""x_aug"""'}), "(32, 32, image_in='x', image_out='x_aug')\n", (5487, 5528), False, 'from fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop\n'), ((6014, 6054), 'fastestimator.op.numpyop.univariate.ToFloat', 'ToFloat', ([], {'inputs': '"""x_aug"""', 'outputs': '"""x_aug"""'}), "(inputs='x_aug', outputs='x_aug')\n", (6021, 6054), False, 'from fastestimator.op.numpyop.univariate import ColorJitter, GaussianBlur, ToFloat, ToGray\n'), ((6098, 6150), 'fastestimator.op.numpyop.multivariate.RandomCrop', 'RandomCrop', (['(32)', '(32)'], {'image_in': '"""x"""', 'image_out': '"""x_aug2"""'}), "(32, 32, image_in='x', image_out='x_aug2')\n", (6108, 6150), False, 'from fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop\n'), ((6644, 6686), 'fastestimator.op.numpyop.univariate.ToFloat', 'ToFloat', ([], {'inputs': '"""x_aug2"""', 'outputs': '"""x_aug2"""'}), "(inputs='x_aug2', outputs='x_aug2')\n", (6651, 6686), False, 'from fastestimator.op.numpyop.univariate import ColorJitter, GaussianBlur, ToFloat, ToGray\n'), ((6961, 7018), 'fastestimator.op.tensorop.model.ModelOp', 'ModelOp', ([], {'model': 'model_con', 'inputs': '"""x_com"""', 'outputs': '"""y_com"""'}), "(model=model_con, inputs='x_com', outputs='y_com')\n", (6968, 7018), False, 'from fastestimator.op.tensorop.model import ModelOp, UpdateOp\n'), ((7214, 7259), 'fastestimator.op.tensorop.model.UpdateOp', 'UpdateOp', ([], {'model': 'model_con', 'loss_name': '"""NTXent"""'}), "(model=model_con, loss_name='NTXent')\n", (7222, 7259), False, 'from fastestimator.op.tensorop.model import ModelOp, UpdateOp\n'), ((8168, 8200), 'fastestimator.op.numpyop.univariate.ToFloat', 'ToFloat', ([], {'inputs': '"""x"""', 'outputs': '"""x"""'}), "(inputs='x', outputs='x')\n", (8175, 8200), False, 'from fastestimator.op.numpyop.univariate import ColorJitter, GaussianBlur, ToFloat, ToGray\n'), ((8272, 8322), 'fastestimator.op.tensorop.model.ModelOp', 'ModelOp', ([], {'model': 'model', 'inputs': '"""x"""', 'outputs': '"""y_pred"""'}), "(model=model, inputs='x', outputs='y_pred')\n", (8279, 8322), False, 'from fastestimator.op.tensorop.model import ModelOp, UpdateOp\n'), ((8332, 8382), 'fastestimator.op.tensorop.loss.CrossEntropy', 'CrossEntropy', ([], {'inputs': "['y_pred', 'y']", 'outputs': '"""ce"""'}), "(inputs=['y_pred', 'y'], outputs='ce')\n", (8344, 8382), False, 'from fastestimator.op.tensorop.loss import CrossEntropy\n'), ((8392, 8429), 'fastestimator.op.tensorop.model.UpdateOp', 'UpdateOp', ([], {'model': 'model', 'loss_name': '"""ce"""'}), "(model=model, loss_name='ce')\n", (8400, 8429), False, 'from fastestimator.op.tensorop.model import ModelOp, UpdateOp\n'), ((5552, 5603), 'fastestimator.op.numpyop.multivariate.HorizontalFlip', 'HorizontalFlip', ([], {'image_in': '"""x_aug"""', 'image_out': '"""x_aug"""'}), "(image_in='x_aug', image_out='x_aug')\n", (5566, 5603), False, 'from fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop\n'), ((5655, 5758), 'fastestimator.op.numpyop.univariate.ColorJitter', 'ColorJitter', ([], {'inputs': '"""x_aug"""', 'outputs': '"""x_aug"""', 'brightness': '(0.8)', 'contrast': '(0.8)', 'saturation': '(0.8)', 'hue': '(0.2)'}), "(inputs='x_aug', outputs='x_aug', brightness=0.8, contrast=0.8,\n saturation=0.8, hue=0.2)\n", (5666, 5758), False, 'from fastestimator.op.numpyop.univariate import ColorJitter, GaussianBlur, ToFloat, ToGray\n'), ((5805, 5844), 'fastestimator.op.numpyop.univariate.ToGray', 'ToGray', ([], {'inputs': '"""x_aug"""', 'outputs': '"""x_aug"""'}), "(inputs='x_aug', outputs='x_aug')\n", (5811, 5844), False, 'from fastestimator.op.numpyop.univariate import ColorJitter, GaussianBlur, ToFloat, ToGray\n'), ((5879, 5971), 'fastestimator.op.numpyop.univariate.GaussianBlur', 'GaussianBlur', ([], {'inputs': '"""x_aug"""', 'outputs': '"""x_aug"""', 'blur_limit': '(3, 3)', 'sigma_limit': '(0.1, 2.0)'}), "(inputs='x_aug', outputs='x_aug', blur_limit=(3, 3),\n sigma_limit=(0.1, 2.0))\n", (5891, 5971), False, 'from fastestimator.op.numpyop.univariate import ColorJitter, GaussianBlur, ToFloat, ToGray\n'), ((6174, 6227), 'fastestimator.op.numpyop.multivariate.HorizontalFlip', 'HorizontalFlip', ([], {'image_in': '"""x_aug2"""', 'image_out': '"""x_aug2"""'}), "(image_in='x_aug2', image_out='x_aug2')\n", (6188, 6227), False, 'from fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop\n'), ((6279, 6384), 'fastestimator.op.numpyop.univariate.ColorJitter', 'ColorJitter', ([], {'inputs': '"""x_aug2"""', 'outputs': '"""x_aug2"""', 'brightness': '(0.8)', 'contrast': '(0.8)', 'saturation': '(0.8)', 'hue': '(0.2)'}), "(inputs='x_aug2', outputs='x_aug2', brightness=0.8, contrast=0.8,\n saturation=0.8, hue=0.2)\n", (6290, 6384), False, 'from fastestimator.op.numpyop.univariate import ColorJitter, GaussianBlur, ToFloat, ToGray\n'), ((6431, 6472), 'fastestimator.op.numpyop.univariate.ToGray', 'ToGray', ([], {'inputs': '"""x_aug2"""', 'outputs': '"""x_aug2"""'}), "(inputs='x_aug2', outputs='x_aug2')\n", (6437, 6472), False, 'from fastestimator.op.numpyop.univariate import ColorJitter, GaussianBlur, ToFloat, ToGray\n'), ((6507, 6601), 'fastestimator.op.numpyop.univariate.GaussianBlur', 'GaussianBlur', ([], {'inputs': '"""x_aug2"""', 'outputs': '"""x_aug2"""', 'blur_limit': '(3, 3)', 'sigma_limit': '(0.1, 2.0)'}), "(inputs='x_aug2', outputs='x_aug2', blur_limit=(3, 3),\n sigma_limit=(0.1, 2.0))\n", (6519, 6601), False, 'from fastestimator.op.numpyop.univariate import ColorJitter, GaussianBlur, ToFloat, ToGray\n'), ((6880, 6905), 'tensorflow.concat', 'tf.concat', (['[x, y]'], {'axis': '(0)'}), '([x, y], axis=0)\n', (6889, 6905), True, 'import tensorflow as tf\n'), ((7047, 7069), 'tensorflow.split', 'tf.split', (['x', '(2)'], {'axis': '(0)'}), '(x, 2, axis=0)\n', (7055, 7069), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import argparse
from basic_modules.workflow import Workflow
from utils import logger
from CHiC.tool.run_chicago import ChicagoTool
#################################################
class process_run_chicago(Workflow):
"""
Function for processing capture Hi-C fastq files. Files are aligned,
filtered and analysed for Cpature Hi-C peaks
"""
def __init__(self, configuration=None):
"""
initiate the class
Parameters:
-----------
Configuration: dict
dictinoary with parameters for different tools, indicating
how to run each of them
"""
logger.info("Initiating process_runChicago")
if configuration is None:
configuration = {}
self.configuration.update(configuration)
def run(self, input_files, metadata, output_files):
"""
This main function that run the chicago pipeline with runChicago.R wrapper
Parameters
----------
input_files: dict
location with the .chinput files.
chinput_file: str in case there is one input file
chinput_file: comma separated list in case there
is more than one input file.
metadata: dict
Input metadata, str
output: dict
output file locations
Returns
-------
output_files : dict
Folder location with the output files
output_metadata: dict
Output metadata for the associated files in output_files
"""
try:
chicago_caller = ChicagoTool(self.configuration)
output_files_generated, output_metadata = chicago_caller.run(
input_files, metadata, output_files)
return output_files_generated, output_metadata
except IOError:
logger.info("chicago failed to generate output files =(")
################################################################
def main_json(config, in_metadata, out_metadata):
"""
Alternative main function
This function launches the app using configuration written in
two json files: config.json and metadata.json
"""
# 1. Instantiate and launch the App
print("1. Instantiate and launch the App")
from apps.jsonapp import JSONApp
app = JSONApp()
results = app.launch(process_run_chicago,
config,
in_metadata,
out_metadata)
# 2. The App has finished
print("2. Execution finished; see " + out_metadata)
print(results)
return results
###############################################################
if __name__ == "__main__":
#set up the command line parameters
PARSER = argparse.ArgumentParser(
description="Chicago algorithm for capture Hi-C peak detection")
PARSER.add_argument("--config", help="Configuration file")
PARSER.add_argument(
"--in_metadata", help="Location of metadata file")
PARSER.add_argument(
"--out_metadata", help="Location of output metadata file")
PARSER.add_argument(
"--local", action="store_const", const=True, default=False)
#Get matching parameters from the command line
ARGS = PARSER.parse_args()
CONFIG = ARGS.config
IN_METADATA = ARGS.in_metadata
OUT_METADATA = ARGS.out_metadata
LOCAL = ARGS.local
if LOCAL:
import sys
sys._run_from_cmdl = True # pylint: disable=protected-access
RESULTS = main_json(CONFIG, IN_METADATA, OUT_METADATA)
print(RESULTS)
|
[
"apps.jsonapp.JSONApp",
"utils.logger.info",
"argparse.ArgumentParser",
"CHiC.tool.run_chicago.ChicagoTool"
] |
[((3062, 3071), 'apps.jsonapp.JSONApp', 'JSONApp', ([], {}), '()\n', (3069, 3071), False, 'from apps.jsonapp import JSONApp\n'), ((3501, 3594), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chicago algorithm for capture Hi-C peak detection"""'}), "(description=\n 'Chicago algorithm for capture Hi-C peak detection')\n", (3524, 3594), False, 'import argparse\n'), ((1367, 1411), 'utils.logger.info', 'logger.info', (['"""Initiating process_runChicago"""'], {}), "('Initiating process_runChicago')\n", (1378, 1411), False, 'from utils import logger\n'), ((2331, 2362), 'CHiC.tool.run_chicago.ChicagoTool', 'ChicagoTool', (['self.configuration'], {}), '(self.configuration)\n', (2342, 2362), False, 'from CHiC.tool.run_chicago import ChicagoTool\n'), ((2588, 2645), 'utils.logger.info', 'logger.info', (['"""chicago failed to generate output files =("""'], {}), "('chicago failed to generate output files =(')\n", (2599, 2645), False, 'from utils import logger\n')]
|
import asyncio
import beneath
import psycopg2
import json
import yaml
from datetime import datetime
from schemas import get_schema, check_for_and_encode_ts
with open(".development.yaml", "r") as ymlfile:
config = yaml.safe_load(ymlfile)
POLLING_INTERVAL = 5
SCHEMA = """
type Change @schema {
table: String! @key
timestamp: Timestamp! @key
operation: String! @key
value: String!
}
"""
def connect_to_source_db():
conn = psycopg2.connect(
database=config["postgres"]["database"],
user=config["postgres"]["username"],
password=config["postgres"]["password"],
host=config["postgres"]["host"],
port="5432",
)
conn.autocommit = True
cursor = conn.cursor()
return cursor
cursor = connect_to_source_db()
async def get_all_changes(p):
while True:
cursor.execute(
f"""
SELECT data FROM pg_logical_slot_get_changes('{config['postgres']['replication_slot']}', NULL, NULL,
'include-lsn', 'True', 'include-timestamp', 'True', 'add-tables', '{','.join(config['postgres']['tables'])}');
"""
)
txns = cursor.fetchall()
for txn in txns:
txn_json = json.loads(txn[0])
changes = txn_json["change"]
for change in changes:
yield {
"table": change["table"],
"timestamp": datetime.strptime(
f"{txn_json['timestamp']}00", "%Y-%m-%d %H:%M:%S.%f%z"
),
"operation": change["kind"],
"value": json.dumps(
{
k: change.get(k)
for k in (
"columnnames",
"columntypes",
"columnvalues",
"oldkeys",
)
}
),
}
# TODO: consider checkpointing the LSN (but pg_logical_slot_get_changes() doesn't let us choose LSN position)
# p.checkpoints.set("nextlsn", txn_json["nextlsn"])
await asyncio.sleep(POLLING_INTERVAL)
def filter_for_table(table):
async def filter(in_record):
if in_record["table"] == table:
# construct out_record
value_blob = json.loads(in_record["value"])
if in_record["operation"] in ["insert", "update"]:
# TODO: test more types, might have to do more type conversions
out_record = {
col: check_for_and_encode_ts(
value_blob["columntypes"][i], value_blob["columnvalues"][i]
)
for (i, col) in enumerate(value_blob["columnnames"])
}
out_record["_updated_at"] = in_record["timestamp"]
if in_record["operation"] == "delete":
# TODO: Handle required non-key columns.
# - Option1: Set Replica Identity to FULL for all tables
# -- not ideal, since a) requires more user setup and b) passes more data
# - Option2: Just generate synthetic data here
# -- the row is getting deleted anyways, so doesn't really matter what the values are
# print(value_blob)
out_record = dict(
zip(
value_blob["oldkeys"]["keynames"],
value_blob["oldkeys"]["keyvalues"],
)
)
out_record["_updated_at"] = in_record["timestamp"]
out_record["_deleted_at"] = in_record["timestamp"]
yield out_record
return filter
def fan_out(p, all_changes, list_of_tables):
# list_of_tables: ["schemaA.table1", "schemaA.table2", "schemaB.table1", ...]
for schema_table in list_of_tables:
schema = schema_table.split(".")[0] # a Postgres "schema" (a namespace)
table = schema_table.split(".")[1]
table_changes = p.apply(all_changes, filter_for_table(table))
p.write_table(
table_changes,
f"{config['beneath']['username']}/{config['beneath']['project']}/{config['postgres']['database']}-{schema}-{table}",
schema=get_schema(cursor, table), # a Beneath "schema" (type info)
description=f"{table} table replicated from Postgres",
)
if __name__ == "__main__":
p = beneath.Pipeline(parse_args=True, disable_checkpoints=True)
p.description = "Postgres CDC"
all_changes = p.generate(get_all_changes)
p.write_table(
all_changes,
f"{config['beneath']['username']}/{config['beneath']['project']}/{config['postgres']['database']}-cdc",
schema=SCHEMA,
description="Raw data captured from a Postgres CDC service",
)
fan_out(p, all_changes, config["postgres"]["tables"])
p.main()
|
[
"schemas.check_for_and_encode_ts",
"json.loads",
"asyncio.sleep",
"beneath.Pipeline",
"schemas.get_schema",
"datetime.datetime.strptime",
"yaml.safe_load",
"psycopg2.connect"
] |
[((218, 241), 'yaml.safe_load', 'yaml.safe_load', (['ymlfile'], {}), '(ymlfile)\n', (232, 241), False, 'import yaml\n'), ((448, 639), 'psycopg2.connect', 'psycopg2.connect', ([], {'database': "config['postgres']['database']", 'user': "config['postgres']['username']", 'password': "config['postgres']['password']", 'host': "config['postgres']['host']", 'port': '"""5432"""'}), "(database=config['postgres']['database'], user=config[\n 'postgres']['username'], password=config['postgres']['password'], host=\n config['postgres']['host'], port='5432')\n", (464, 639), False, 'import psycopg2\n'), ((4522, 4581), 'beneath.Pipeline', 'beneath.Pipeline', ([], {'parse_args': '(True)', 'disable_checkpoints': '(True)'}), '(parse_args=True, disable_checkpoints=True)\n', (4538, 4581), False, 'import beneath\n'), ((1203, 1221), 'json.loads', 'json.loads', (['txn[0]'], {}), '(txn[0])\n', (1213, 1221), False, 'import json\n'), ((2196, 2227), 'asyncio.sleep', 'asyncio.sleep', (['POLLING_INTERVAL'], {}), '(POLLING_INTERVAL)\n', (2209, 2227), False, 'import asyncio\n'), ((2393, 2423), 'json.loads', 'json.loads', (["in_record['value']"], {}), "(in_record['value'])\n", (2403, 2423), False, 'import json\n'), ((4347, 4372), 'schemas.get_schema', 'get_schema', (['cursor', 'table'], {}), '(cursor, table)\n', (4357, 4372), False, 'from schemas import get_schema, check_for_and_encode_ts\n'), ((2623, 2712), 'schemas.check_for_and_encode_ts', 'check_for_and_encode_ts', (["value_blob['columntypes'][i]", "value_blob['columnvalues'][i]"], {}), "(value_blob['columntypes'][i], value_blob[\n 'columnvalues'][i])\n", (2646, 2712), False, 'from schemas import get_schema, check_for_and_encode_ts\n'), ((1401, 1474), 'datetime.datetime.strptime', 'datetime.strptime', (['f"""{txn_json[\'timestamp\']}00"""', '"""%Y-%m-%d %H:%M:%S.%f%z"""'], {}), '(f"{txn_json[\'timestamp\']}00", \'%Y-%m-%d %H:%M:%S.%f%z\')\n', (1418, 1474), False, 'from datetime import datetime\n')]
|
from warnings import warn
warn("pytools.log was moved to https://github.com/illinois-ceesd/logpyle/. "
"I will try to import that for you. If the import fails, say "
"'pip install logpyle', and change your imports from 'pytools.log' "
"to 'logpyle'.", DeprecationWarning)
from logpyle import * # noqa # pylint: disable=import-error
|
[
"warnings.warn"
] |
[((27, 273), 'warnings.warn', 'warn', (['"""pytools.log was moved to https://github.com/illinois-ceesd/logpyle/. I will try to import that for you. If the import fails, say \'pip install logpyle\', and change your imports from \'pytools.log\' to \'logpyle\'."""', 'DeprecationWarning'], {}), '(\n "pytools.log was moved to https://github.com/illinois-ceesd/logpyle/. I will try to import that for you. If the import fails, say \'pip install logpyle\', and change your imports from \'pytools.log\' to \'logpyle\'."\n , DeprecationWarning)\n', (31, 273), False, 'from warnings import warn\n')]
|
# main imports
import sys, os, argparse
import numpy as np
import random
import time
import json
# image processing imports
from PIL import Image
from ipfml.processing import transform, segmentation
from ipfml import utils
# modules imports
sys.path.insert(0, '') # trick to enable import of main folder module
import custom_config as cfg
from modules.utils import data as dt
from data_attributes import get_image_features
# getting configuration information
zone_folder = cfg.zone_folder
min_max_filename = cfg.min_max_filename_extension
# define all scenes values
scenes_list = cfg.scenes_names
scenes_indexes = cfg.scenes_indices
choices = cfg.normalization_choices
zones = cfg.zones_indices
seuil_expe_filename = cfg.seuil_expe_filename
features_choices = cfg.features_choices_labels
output_data_folder = cfg.output_data_folder
data_augmented_filename = cfg.data_augmented_filename
generic_output_file_svd = '_random.csv'
def generate_data_svd(data_type, mode, path):
"""
@brief Method which generates all .csv files from scenes
@param data_type, feature choice
@param mode, normalization choice
@param path, data augmented path
@return nothing
"""
scenes = os.listdir(path)
# remove min max file from scenes folder
scenes = [s for s in scenes if min_max_filename and generic_output_file_svd not in s]
# keep in memory min and max data found from data_type
min_val_found = sys.maxsize
max_val_found = 0
data_min_max_filename = os.path.join(path, data_type + min_max_filename)
data_filename = os.path.join(path, data_augmented_filename)
# getting output filename
output_svd_filename = data_type + "_" + mode + generic_output_file_svd
current_file = open(os.path.join(path, output_svd_filename), 'w')
with open(data_filename, 'r') as f:
lines = f.readlines()
number_of_images = len(lines)
for index, line in enumerate(lines):
data = line.split(';')
scene_name = data[0]
number_of_samples = data[2]
label_img = data[3]
img_path = data[4].replace('\n', '')
block = Image.open(os.path.join(path, img_path))
###########################
# feature computation part #
###########################
data = get_image_features(data_type, block)
##################
# Data mode part #
##################
# modify data depending mode
if mode == 'svdne':
# getting max and min information from min_max_filename
with open(data_min_max_filename, 'r') as f:
min_val = float(f.readline())
max_val = float(f.readline())
data = utils.normalize_arr_with_range(data, min_val, max_val)
if mode == 'svdn':
data = utils.normalize_arr(data)
# save min and max found from dataset in order to normalize data using whole data known
if mode == 'svd':
current_min = data.min()
current_max = data.max()
if current_min < min_val_found:
min_val_found = current_min
if current_max > max_val_found:
max_val_found = current_max
# add of index
current_file.write(scene_name + ';' + number_of_samples + ';' + label_img + ';')
for val in data:
current_file.write(str(val) + ";")
print(data_type + "_" + mode + " - " + "{0:.2f}".format((index + 1) / number_of_images * 100.) + "%")
sys.stdout.write("\033[F")
current_file.write('\n')
print('\n')
# save current information about min file found
if mode == 'svd':
with open(data_min_max_filename, 'w') as f:
f.write(str(min_val_found) + '\n')
f.write(str(max_val_found) + '\n')
print("%s_%s : end of data generation\n" % (data_type, mode))
def main():
parser = argparse.ArgumentParser(description="Compute and prepare data of feature of all scenes (keep in memory min and max value found)")
parser.add_argument('--feature', type=str,
help="feature choice in order to compute data (use 'all' if all features are needed)")
parser.add_argument('--folder', type=str, help="folder which contains the whole dataset")
args = parser.parse_args()
p_feature = args.feature
p_folder = args.folder
# generate all or specific feature data
if p_feature == 'all':
for m in features_choices:
generate_data_svd(m, 'svd', p_folder)
generate_data_svd(m, 'svdn', p_folder)
generate_data_svd(m, 'svdne', p_folder)
else:
if p_feature not in features_choices:
raise ValueError('Unknown feature choice : ', features_choices)
generate_data_svd(p_feature, 'svd', p_folder)
generate_data_svd(p_feature, 'svdn', p_folder)
generate_data_svd(p_feature, 'svdne', p_folder)
if __name__== "__main__":
main()
|
[
"sys.stdout.write",
"argparse.ArgumentParser",
"ipfml.utils.normalize_arr",
"sys.path.insert",
"data_attributes.get_image_features",
"ipfml.utils.normalize_arr_with_range",
"os.path.join",
"os.listdir"
] |
[((244, 266), 'sys.path.insert', 'sys.path.insert', (['(0)', '""""""'], {}), "(0, '')\n", (259, 266), False, 'import sys, os, argparse\n'), ((1296, 1312), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1306, 1312), False, 'import sys, os, argparse\n'), ((1591, 1639), 'os.path.join', 'os.path.join', (['path', '(data_type + min_max_filename)'], {}), '(path, data_type + min_max_filename)\n', (1603, 1639), False, 'import sys, os, argparse\n'), ((1660, 1703), 'os.path.join', 'os.path.join', (['path', 'data_augmented_filename'], {}), '(path, data_augmented_filename)\n', (1672, 1703), False, 'import sys, os, argparse\n'), ((4198, 4337), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute and prepare data of feature of all scenes (keep in memory min and max value found)"""'}), "(description=\n 'Compute and prepare data of feature of all scenes (keep in memory min and max value found)'\n )\n", (4221, 4337), False, 'import sys, os, argparse\n'), ((1835, 1874), 'os.path.join', 'os.path.join', (['path', 'output_svd_filename'], {}), '(path, output_svd_filename)\n', (1847, 1874), False, 'import sys, os, argparse\n'), ((2461, 2497), 'data_attributes.get_image_features', 'get_image_features', (['data_type', 'block'], {}), '(data_type, block)\n', (2479, 2497), False, 'from data_attributes import get_image_features\n'), ((3796, 3822), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[F"""'], {}), "('\\x1b[F')\n", (3812, 3822), False, 'import sys, os, argparse\n'), ((2280, 2308), 'os.path.join', 'os.path.join', (['path', 'img_path'], {}), '(path, img_path)\n', (2292, 2308), False, 'import sys, os, argparse\n'), ((2923, 2977), 'ipfml.utils.normalize_arr_with_range', 'utils.normalize_arr_with_range', (['data', 'min_val', 'max_val'], {}), '(data, min_val, max_val)\n', (2953, 2977), False, 'from ipfml import utils\n'), ((3033, 3058), 'ipfml.utils.normalize_arr', 'utils.normalize_arr', (['data'], {}), '(data)\n', (3052, 3058), False, 'from ipfml import utils\n')]
|
# The MIT License (MIT)
#
# Copyright (c) 2020 ETH Zurich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import exputil
import time
try:
from .run_list import *
except (ImportError, SystemError):
from run_list import *
local_shell = exputil.LocalShell()
max_num_processes = 4
# Check that no screen is running
if local_shell.count_screens() != 0:
print("There is a screen already running. "
"Please kill all screens before running this analysis script (killall screen).")
exit(1)
# Generate the commands
commands_to_run = []
for run in get_tcp_run_list():
logs_ns3_dir = "temp/runs/" + run["name"] + "/logs_ns3"
local_shell.remove_force_recursive(logs_ns3_dir)
local_shell.make_full_dir(logs_ns3_dir)
commands_to_run.append(
"cd ../../ns3-sat-sim/simulator; "
"./waf --run=\"main_satnet "
"--run_dir='../../integration_tests/test_manila_dalian_over_kuiper/temp/runs/" + run["name"] + "'\" "
"2>&1 | "
"tee '../../integration_tests/test_manila_dalian_over_kuiper/" + logs_ns3_dir + "/console.txt'"
)
# Run the commands
print("Running commands (at most %d in parallel)..." % max_num_processes)
for i in range(len(commands_to_run)):
print("Starting command %d out of %d: %s" % (i + 1, len(commands_to_run), commands_to_run[i]))
local_shell.detached_exec(commands_to_run[i])
while local_shell.count_screens() >= max_num_processes:
time.sleep(2)
# Awaiting final completion before exiting
print("Waiting completion of the last %d..." % max_num_processes)
while local_shell.count_screens() > 0:
time.sleep(2)
print("Finished.")
|
[
"exputil.LocalShell",
"time.sleep"
] |
[((1254, 1274), 'exputil.LocalShell', 'exputil.LocalShell', ([], {}), '()\n', (1272, 1274), False, 'import exputil\n'), ((2616, 2629), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2626, 2629), False, 'import time\n'), ((2449, 2462), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2459, 2462), False, 'import time\n')]
|
#!/usr/bin/env python3
import boto3
class EmailSender():
def __init__(self, from_address, session=None):
self._from_address = from_address
self._client = boto3.client('ses', region_name='us-east-1')
def send_email(self, to_address, subject, message):
# type checking
if isinstance(to_address, str):
# check if commas are contained
if "," in to_address:
# if commas contained split into multiple address
addresses_to_pass = to_address.split(",")
else:
# if there is no comma, its one address
addresses_to_pass = [to_address]
elif isinstance(to_address, list):
addresses_to_pass = to_address
else:
raise ValueError()
res = self._client.send_email(
Source=self._from_address,
Destination={
'ToAddresses': addresses_to_pass,
'CcAddresses': [],
'BccAddresses': []
},
Message={
'Subject': {
'Data': subject,
'Charset': 'utf-8'
},
'Body': {
'Text': {
'Data': message,
'Charset': 'utf-8'
},
# TODO: check if you can have both
'Html': {
'Data': message,
'Charset': 'utf-8'
}
}
}
)
return res
|
[
"boto3.client"
] |
[((178, 222), 'boto3.client', 'boto3.client', (['"""ses"""'], {'region_name': '"""us-east-1"""'}), "('ses', region_name='us-east-1')\n", (190, 222), False, 'import boto3\n')]
|
import socket
import subprocess
from chatutils import utils
from chatutils.chatio2 import ChatIO
configs = utils.JSONLoader()
HEADER_LEN = configs.dict["system"]["headerLen"]
def commands(client_socket):
# while True:
# breakpoint()
ChatIO().pack_n_send(client_socket, "C", b"<<cmd:#>>")
# client_socket.send(b"<<cmd:#>> ")
cmd_buffer = ChatIO.unpack_data(client_socket)
response = run_cmd(cmd_buffer)
client_socket.send(response)
def run_cmd(command) -> bytes:
# Trim the \n char.
command = command.rstrip().decode()
try:
output = subprocess.check_output(command,
stderr=subprocess.STDOUT,
shell=True)
except:
output = f"Command not found: {command} \r\n"
output = output.encode()
return output
|
[
"chatutils.chatio2.ChatIO.unpack_data",
"chatutils.utils.JSONLoader",
"chatutils.chatio2.ChatIO",
"subprocess.check_output"
] |
[((109, 127), 'chatutils.utils.JSONLoader', 'utils.JSONLoader', ([], {}), '()\n', (125, 127), False, 'from chatutils import utils\n'), ((361, 394), 'chatutils.chatio2.ChatIO.unpack_data', 'ChatIO.unpack_data', (['client_socket'], {}), '(client_socket)\n', (379, 394), False, 'from chatutils.chatio2 import ChatIO\n'), ((586, 656), 'subprocess.check_output', 'subprocess.check_output', (['command'], {'stderr': 'subprocess.STDOUT', 'shell': '(True)'}), '(command, stderr=subprocess.STDOUT, shell=True)\n', (609, 656), False, 'import subprocess\n'), ((249, 257), 'chatutils.chatio2.ChatIO', 'ChatIO', ([], {}), '()\n', (255, 257), False, 'from chatutils.chatio2 import ChatIO\n')]
|
import json
import os
import shlex
import subprocess
import pytest
@pytest.fixture(scope='session')
def example_dir(tests_dir):
return tests_dir / 'example'
@pytest.fixture(scope='session')
def anisble_inventory(example_dir):
return open(example_dir / 'ansible.json', 'r')
@pytest.fixture(scope='session')
def terraform_config(example_dir):
return open(example_dir / 'terraform.tf.json', 'r')
def test_ansible_inventory(tests_dir, example_dir, anisble_inventory):
project_dir = tests_dir.parent
inventory_exe = example_dir / 'ansible_hosts.py'
result = subprocess.run(
shlex.split(str(inventory_exe)),
stdout=subprocess.PIPE,
check=True,
env=dict(os.environ, PYTHONPATH='{}:{}'.format(project_dir, example_dir)),
).stdout
assert json.loads(result.decode()) == json.load(anisble_inventory)
def test_terraform_vars(tests_dir, example_dir, terraform_config):
project_dir = tests_dir.parent
inventory_exe = example_dir / 'terraform_vars.py'
subprocess.run(
shlex.split(str(inventory_exe)),
check=True,
env=dict(os.environ, PYTHONPATH='{}:{}'.format(project_dir, example_dir)),
)
result_path = example_dir / 'terraform_result.tf.json'
result = open(result_path, 'r')
assert json.load(result) == json.load(terraform_config)
result_path.unlink()
|
[
"json.load",
"pytest.fixture"
] |
[((71, 102), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (85, 102), False, 'import pytest\n'), ((167, 198), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (181, 198), False, 'import pytest\n'), ((289, 320), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (303, 320), False, 'import pytest\n'), ((835, 863), 'json.load', 'json.load', (['anisble_inventory'], {}), '(anisble_inventory)\n', (844, 863), False, 'import json\n'), ((1301, 1318), 'json.load', 'json.load', (['result'], {}), '(result)\n', (1310, 1318), False, 'import json\n'), ((1322, 1349), 'json.load', 'json.load', (['terraform_config'], {}), '(terraform_config)\n', (1331, 1349), False, 'import json\n')]
|
import os
import pickle
import numpy as np
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ms2ldaviz.settings")
import django
django.setup()
import jsonpickle
from basicviz.models import Experiment,Document
if __name__ == '__main__':
experiment_name = sys.argv[1]
experiment = Experiment.objects.get(name = experiment_name)
documents = Document.objects.filter(experiment = experiment)
for document in documents:
md = jsonpickle.decode(document.metadata)
if 'm/z' in md:
md['parentmass'] = float(md['m/z'])
document.metadata = jsonpickle.encode(md)
document.save()
|
[
"jsonpickle.encode",
"os.environ.setdefault",
"django.setup",
"jsonpickle.decode",
"basicviz.models.Document.objects.filter",
"basicviz.models.Experiment.objects.get"
] |
[((54, 123), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""ms2ldaviz.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'ms2ldaviz.settings')\n", (75, 123), False, 'import os\n'), ((139, 153), 'django.setup', 'django.setup', ([], {}), '()\n', (151, 153), False, 'import django\n'), ((295, 339), 'basicviz.models.Experiment.objects.get', 'Experiment.objects.get', ([], {'name': 'experiment_name'}), '(name=experiment_name)\n', (317, 339), False, 'from basicviz.models import Experiment, Document\n'), ((355, 401), 'basicviz.models.Document.objects.filter', 'Document.objects.filter', ([], {'experiment': 'experiment'}), '(experiment=experiment)\n', (378, 401), False, 'from basicviz.models import Experiment, Document\n'), ((439, 475), 'jsonpickle.decode', 'jsonpickle.decode', (['document.metadata'], {}), '(document.metadata)\n', (456, 475), False, 'import jsonpickle\n'), ((555, 576), 'jsonpickle.encode', 'jsonpickle.encode', (['md'], {}), '(md)\n', (572, 576), False, 'import jsonpickle\n')]
|
import logging
import inspect
import collections
import random
import torch
logger = logging.getLogger(__name__)
def get_tensors(object_):
""" Get all tensors associated with ``object_``
Args:
object_ (any): Any object to look for tensors.
Returns:
(list of torch.tensor): List of tensors that are associated with ``object_``.
"""
if torch.is_tensor(object_):
return [object_]
elif isinstance(object_, (str, float, int)):
return []
tensors = set()
if isinstance(object_, collections.abc.Mapping):
for value in object_.values():
tensors.update(get_tensors(value))
elif isinstance(object_, collections.abc.Iterable):
for value in object_:
tensors.update(get_tensors(value))
else:
members = [
value for key, value in inspect.getmembers(object_)
if not isinstance(value, (collections.abc.Callable, type(None)))
]
tensors.update(get_tensors(members))
return tensors
def sampler_to_iterator(dataset, sampler):
""" Given a batch sampler or sampler returns examples instead of indices
Args:
dataset (torch.utils.data.Dataset): Dataset to sample from.
sampler (torch.utils.data.sampler.Sampler): Sampler over the dataset.
Returns:
generator over dataset examples
"""
for sample in sampler:
if isinstance(sample, (list, tuple)):
# yield a batch
yield [dataset[i] for i in sample]
else:
# yield a single example
yield dataset[sample]
def datasets_iterator(*datasets):
"""
Args:
*datasets (:class:`list` of :class:`torch.utils.data.Dataset`)
Returns:
generator over rows in ``*datasets``
"""
for dataset in datasets:
for row in dataset:
yield row
def flatten_parameters(model):
""" ``flatten_parameters`` of a RNN model loaded from disk. """
model.apply(lambda m: m.flatten_parameters() if hasattr(m, 'flatten_parameters') else None)
def shuffle(list_, random_seed=123):
""" Shuffle list deterministically based on ``random_seed``.
**Reference:**
https://stackoverflow.com/questions/19306976/python-shuffling-with-a-parameter-to-get-the-same-result
Example:
>>> a = [1, 2, 3, 4, 5]
>>> b = [1, 2, 3, 4, 5]
>>> shuffle(a, random_seed=456)
>>> shuffle(b, random_seed=456)
>>> a == b
True
>>> a, b
([1, 3, 2, 5, 4], [1, 3, 2, 5, 4])
Args:
list_ (list): List to be shuffled.
random_seed (int): Random seed used to shuffle.
Returns:
None:
"""
random.Random(random_seed).shuffle(list_)
def resplit_datasets(dataset, other_dataset, random_seed=None, split=None):
"""Deterministic shuffle and split algorithm.
Given the same two datasets and the same ``random_seed``, the split happens the same exact way
every call.
Args:
dataset (lib.datasets.Dataset): First dataset.
other_dataset (lib.datasets.Dataset): Another dataset.
random_seed (int, optional): Seed to control the shuffle of both datasets.
split (float, optional): If defined it is the percentage of rows that first dataset gets
after split otherwise the original proportions are kept.
Returns:
:class:`lib.datasets.Dataset`, :class:`lib.datasets.Dataset`: Resplit datasets.
"""
# Prevent circular dependency
from torchnlp.datasets import Dataset
concat = dataset.rows + other_dataset.rows
shuffle(concat, random_seed=random_seed)
if split is None:
return Dataset(concat[:len(dataset)]), Dataset(concat[len(dataset):])
else:
split = max(min(round(len(concat) * split), len(concat)), 0)
return Dataset(concat[:split]), Dataset(concat[split:])
def torch_equals_ignore_index(tensor, tensor_other, ignore_index=None):
"""
Compute ``torch.equal`` with the optional mask parameter.
Args:
ignore_index (int, optional): Specifies a ``tensor`` index that is ignored.
Returns:
(bool) Returns ``True`` if target and prediction are equal.
"""
if ignore_index is not None:
assert tensor.size() == tensor_other.size()
mask_arr = tensor.ne(ignore_index)
tensor = tensor.masked_select(mask_arr)
tensor_other = tensor_other.masked_select(mask_arr)
return torch.equal(tensor, tensor_other)
def is_namedtuple(object_):
return hasattr(object_, '_asdict') and isinstance(object_, tuple)
def lengths_to_mask(*lengths, **kwargs):
""" Given a list of lengths, create a batch mask.
Example:
>>> lengths_to_mask([1, 2, 3])
tensor([[1, 0, 0],
[1, 1, 0],
[1, 1, 1]], dtype=torch.uint8)
>>> lengths_to_mask([1, 2, 2], [1, 2, 2])
tensor([[[1, 0],
[0, 0]],
<BLANKLINE>
[[1, 1],
[1, 1]],
<BLANKLINE>
[[1, 1],
[1, 1]]], dtype=torch.uint8)
Args:
*lengths (list of int or torch.Tensor)
**kwargs: Keyword arguments passed to ``torch.zeros`` upon initially creating the returned
tensor.
Returns:
torch.ByteTensor
"""
# Squeeze to deal with random additional dimensions
lengths = [l.squeeze().tolist() if torch.is_tensor(l) else l for l in lengths]
# For cases where length is a scalar, this needs to convert it to a list.
lengths = [l if isinstance(l, list) else [l] for l in lengths]
assert all(len(l) == len(lengths[0]) for l in lengths)
batch_size = len(lengths[0])
other_dimensions = tuple([int(max(l)) for l in lengths])
mask = torch.zeros(batch_size, *other_dimensions, **kwargs)
for i, length in enumerate(zip(*tuple(lengths))):
mask[i][[slice(int(l)) for l in length]].fill_(1)
return mask.byte()
def collate_tensors(batch, stack_tensors=torch.stack):
""" Collate a list of type ``k`` (dict, namedtuple, list, etc.) with tensors.
Inspired by:
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31
Args:
batch (list of k): List of rows of type ``k``.
stack_tensors (callable): Function to stack tensors into a batch.
Returns:
k: Collated batch of type ``k``.
Example use case:
This is useful with ``torch.utils.data.dataloader.DataLoader`` which requires a collate
function. Typically, when collating sequences you'd set
``collate_fn=partial(collate_tensors, stack_tensors=encoders.text.stack_and_pad_tensors)``.
Example:
>>> import torch
>>> batch = [
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... ]
>>> collated = collate_tensors(batch)
>>> {k: t.size() for (k, t) in collated.items()}
{'column_a': torch.Size([2, 5]), 'column_b': torch.Size([2, 5])}
"""
if all([torch.is_tensor(b) for b in batch]):
return stack_tensors(batch)
if (all([isinstance(b, dict) for b in batch]) and
all([b.keys() == batch[0].keys() for b in batch])):
return {key: collate_tensors([d[key] for d in batch], stack_tensors) for key in batch[0]}
elif all([is_namedtuple(b) for b in batch]): # Handle ``namedtuple``
return batch[0].__class__(**collate_tensors([b._asdict() for b in batch], stack_tensors))
elif all([isinstance(b, list) for b in batch]):
# Handle list of lists such each list has some column to be batched, similar to:
# [['a', 'b'], ['a', 'b']] → [['a', 'a'], ['b', 'b']]
transposed = zip(*batch)
return [collate_tensors(samples, stack_tensors) for samples in transposed]
else:
return batch
def tensors_to(tensors, *args, **kwargs):
""" Apply ``torch.Tensor.to`` to tensors in a generic data structure.
Inspired by:
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31
Args:
tensors (tensor, dict, list, namedtuple or tuple): Data structure with tensor values to
move.
*args: Arguments passed to ``torch.Tensor.to``.
**kwargs: Keyword arguments passed to ``torch.Tensor.to``.
Example use case:
This is useful as a complementary function to ``collate_tensors``. Following collating,
it's important to move your tensors to the appropriate device.
Returns:
The inputted ``tensors`` with ``torch.Tensor.to`` applied.
Example:
>>> import torch
>>> batch = [
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... ]
>>> tensors_to(batch, torch.device('cpu')) # doctest: +ELLIPSIS
[{'column_a': tensor(...}]
"""
if torch.is_tensor(tensors):
return tensors.to(*args, **kwargs)
elif isinstance(tensors, dict):
return {k: tensors_to(v, *args, **kwargs) for k, v in tensors.items()}
elif hasattr(tensors, '_asdict') and isinstance(tensors, tuple): # Handle ``namedtuple``
return tensors.__class__(**tensors_to(tensors._asdict(), *args, **kwargs))
elif isinstance(tensors, list):
return [tensors_to(t, *args, **kwargs) for t in tensors]
elif isinstance(tensors, tuple):
return tuple([tensors_to(t, *args, **kwargs) for t in tensors])
else:
return tensors
|
[
"torchnlp.datasets.Dataset",
"random.Random",
"torch.equal",
"torch.zeros",
"torch.is_tensor",
"logging.getLogger",
"inspect.getmembers"
] |
[((87, 114), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (104, 114), False, 'import logging\n'), ((376, 400), 'torch.is_tensor', 'torch.is_tensor', (['object_'], {}), '(object_)\n', (391, 400), False, 'import torch\n'), ((4468, 4501), 'torch.equal', 'torch.equal', (['tensor', 'tensor_other'], {}), '(tensor, tensor_other)\n', (4479, 4501), False, 'import torch\n'), ((5787, 5839), 'torch.zeros', 'torch.zeros', (['batch_size', '*other_dimensions'], {}), '(batch_size, *other_dimensions, **kwargs)\n', (5798, 5839), False, 'import torch\n'), ((9023, 9047), 'torch.is_tensor', 'torch.is_tensor', (['tensors'], {}), '(tensors)\n', (9038, 9047), False, 'import torch\n'), ((2705, 2731), 'random.Random', 'random.Random', (['random_seed'], {}), '(random_seed)\n', (2718, 2731), False, 'import random\n'), ((3842, 3865), 'torchnlp.datasets.Dataset', 'Dataset', (['concat[:split]'], {}), '(concat[:split])\n', (3849, 3865), False, 'from torchnlp.datasets import Dataset\n'), ((3867, 3890), 'torchnlp.datasets.Dataset', 'Dataset', (['concat[split:]'], {}), '(concat[split:])\n', (3874, 3890), False, 'from torchnlp.datasets import Dataset\n'), ((5433, 5451), 'torch.is_tensor', 'torch.is_tensor', (['l'], {}), '(l)\n', (5448, 5451), False, 'import torch\n'), ((7120, 7138), 'torch.is_tensor', 'torch.is_tensor', (['b'], {}), '(b)\n', (7135, 7138), False, 'import torch\n'), ((854, 881), 'inspect.getmembers', 'inspect.getmembers', (['object_'], {}), '(object_)\n', (872, 881), False, 'import inspect\n')]
|
import warnings
import numpy as np
import scipy.linalg as scplin
import scipy.optimize as scpop
import scipy.sparse as scpsp
dfail = {}
try:
import sksparse as sksp
except Exception as err:
sksp = False
dfail['sksparse'] = "For cholesk factorizations"
try:
import scikits.umfpack as skumf
except Exception as err:
skumf = False
dfail['umfpack'] = "For faster sparse matrices"
if len(dfail) > 0:
lstr = [f"\t- {k0}: {v0}" for k0, v0 in dfail.items()]
msg = (
"Consider installing the following for faster inversions:\n"
+ "\n".join(lstr)
)
warnings.warn(msg)
# #############################################################################
# #############################################################################
# Basic routines - augmented tikhonov
# #############################################################################
def inv_linear_augTikho_dense(
Tn=None,
TTn=None,
Tyn=None,
R=None,
yn=None,
sol0=None,
nchan=None,
nbs=None,
mu0=None,
conv_crit=None,
a0bis=None,
b0=None,
a1bis=None,
b1=None,
d=None,
conv_reg=True,
verb=None,
verb2head=None,
**kwdargs,
):
"""
Linear algorithm for Phillips-Tikhonov regularisation
Called "Augmented Tikhonov", dense matrix version
"""
conv = 0. # convergence variable
niter = 0 # number of iterations
mu1 = 0. # regularisation param
# verb
if verb >= 2:
chi2n = np.sum((Tn.dot(sol0) - yn)**2) / nchan
reg = sol0.dot(R.dot(sol0))
temp = f"{nchan} * {chi2n:.3e} + {mu0:.3e} * {reg:.3e}"
print(
f"{verb2head}\n\t\t\t {temp} = {nchan*chi2n + mu0*reg:.3e}",
end='\n',
)
# loop
# Continue until convergence criterion, and at least 2 iterations
while niter < 2 or conv > conv_crit:
# call solver
sol = scplin.solve(
TTn + mu0*R, Tyn,
assume_a='pos', # faster than 'sym'
overwrite_a=True, # no significant gain
overwrite_b=False, # True is faster, but a copy of Tyn is needed
check_finite=False, # small speed gain compared to True
transposed=False,
) # 3
# compute residu, regularity...
res2 = np.sum((Tn.dot(sol)-yn)**2) # residu**2
reg = sol.dot(R.dot(sol)) # regularity term
# update lamb, tau
lamb = a0bis/(0.5*reg + b0) # Update reg. param. estimate
tau = a1bis/(0.5*res2 + b1) # Update noise coef. estimate
mu1 = (lamb/tau) * (2*a1bis/res2)**d # rescale mu with noise estimate
# Compute convergence variable
if conv_reg:
conv = np.abs(mu1 - mu0) / mu1
else:
sol2 = sol**2
sol2max = np.max(sol2)
sol2[sol2 < 0.001*sol2max] = 0.001*sol2max
conv = np.sqrt(np.sum((sol - sol0)**2 / sol2) / nbs)
# verb
if verb >= 2:
temp1 = f"{nchan} * {res2/nchan:.3e} + {mu1:.3e} * {reg:.3e}"
temp2 = f"{res2 + mu1*reg:.3e}"
temp = f"{temp1} = {temp2}"
print(f"\t\t{niter} \t {temp} {tau:.3e} {conv:.3e}")
# update sol0, mu0 for next iteration
sol0[:] = sol[:]
mu0 = mu1
niter += 1
return sol, mu1, res2/nchan, reg, niter, [tau, lamb]
def inv_linear_augTikho_sparse(
Tn=None,
TTn=None,
Tyn=None,
R=None,
yn=None,
sol0=None,
nchan=None,
nbs=None,
mu0=None,
conv_crit=None,
a0bis=None,
b0=None,
a1bis=None,
b1=None,
d=None,
conv_reg=True,
verb=None,
verb2head=None,
maxiter=None,
tol=None,
precond=None, # test
**kwdargs,
):
"""
Linear algorithm for Phillips-Tikhonov regularisation
Called "Augmented Tikhonov", sparese matrix version
see InvLin_AugTikho_V1.__doc__ for details
"""
conv = 0. # convergence variable
niter = 0 # number of iterations
mu1 = 0. # regularisation param
# verb
if verb >= 2:
chi2n = np.sum((Tn.dot(sol0) - yn)**2) / nchan
reg = sol0.dot(R.dot(sol0))
temp = f"{nchan} * {chi2n:.3e} + {mu0:.3e} * {reg:.3e}"
print(
f"{verb2head}\n\t\t\t {temp} = {nchan*chi2n + mu0*reg:.3e}",
end='\n',
)
# loop
# Continue until convergence criterion, and at least 2 iterations
while niter < 2 or conv > conv_crit:
# sol = scpsp.linalg.spsolve(
# TTn + mu0*R, Tyn,
# permc_spec=None,
# use_umfpack=True,
# )
# seems faster
sol, itconv = scpsp.linalg.cg(
TTn + mu0*R, Tyn,
x0=sol0,
tol=tol,
maxiter=maxiter,
M=precond,
)
res2 = np.sum((Tn.dot(sol)-yn)**2) # residu**2
reg = sol.dot(R.dot(sol)) # regularity term
lamb = a0bis/(0.5*reg + b0) # Update reg. param. estimate
tau = a1bis/(0.5*res2 + b1) # Update noise coef. estimate
mu1 = (lamb/tau) * (2*a1bis/res2)**d # rescale mu with noise estimate
# Compute convergence variable
if conv_reg:
conv = np.abs(mu1 - mu0) / mu1
else:
sol2 = sol**2
sol2max = np.max(sol2)
sol2[sol2 < 0.001*sol2max] = 0.001*sol2max
conv = np.sqrt(np.sum((sol - sol0)**2 / sol2) / nbs)
# verb
if verb >= 2:
temp1 = f"{nchan} * {res2/nchan:.3e} + {mu1:.3e} * {reg:.3e}"
temp2 = f"{res2 + mu1*reg:.3e}"
temp = f"{temp1} = {temp2}"
print(
f"\t\t{niter} \t {temp} {tau:.3e} {conv:.3e}"
)
sol0[:] = sol[:] # Update reference solution
niter += 1 # Update number of iterations
mu0 = mu1
return sol, mu1, res2/nchan, reg, niter, [tau, lamb]
def inv_linear_augTikho_chol_dense(
Tn=None,
TTn=None,
Tyn=None,
R=None,
yn=None,
sol0=None,
nchan=None,
nbs=None,
mu0=None,
conv_crit=None,
a0bis=None,
b0=None,
a1bis=None,
b1=None,
d=None,
conv_reg=True,
verb=None,
verb2head=None,
**kwdargs,
):
"""
"""
conv = 0. # convergence variable
niter = 0 # number of iterations
mu1 = 0. # regularisation param
# verb
if verb >= 2:
chi2n = np.sum((Tn.dot(sol0) - yn)**2) / nchan
reg = sol0.dot(R.dot(sol0))
temp = f"{nchan} * {chi2n:.3e} + {mu0:.3e} * {reg:.3e}"
print(
f"{verb2head}\n\t\t\t {temp} = {nchan*chi2n + mu0*reg:.3e}",
end='\n',
)
# loop
# Continue until convergence criterion, and at least 2 iterations
while niter < 2 or conv > conv_crit:
try:
# choleski decomposition requires det(TT + mu0*LL) != 0
# (chol(A).T * chol(A) = A
chol = scplin.cholesky(
TTn + mu0*R,
lower=False,
check_finite=False,
overwrite_a=False,
)
# Use np.linalg.lstsq for double-solving the equation
sol = scplin.cho_solve(
(chol, False), Tyn,
overwrite_b=None,
check_finite=True,
)
except Exception as err:
# call solver
sol = scplin.solve(
TTn + mu0*R, Tyn,
assume_a='sym', # chol failed => not 'pos'
overwrite_a=True, # no significant gain
overwrite_b=False, # True faster, but a copy of Tyn needed
check_finite=False, # small speed gain compared to True
transposed=False,
) # 3
# compute residu, regularity...
res2 = np.sum((Tn.dot(sol)-yn)**2) # residu**2
reg = sol.dot(R.dot(sol)) # regularity term
# update lamb, tau
lamb = a0bis/(0.5*reg + b0) # Update reg. param. estimate
tau = a1bis/(0.5*res2 + b1) # Update noise coef. estimate
mu1 = (lamb/tau) * (2*a1bis/res2)**d # mu rescale with noise estimate
# Compute convergence variable
if conv_reg:
conv = np.abs(mu1 - mu0) / mu1
else:
sol2 = sol**2
sol2max = np.max(sol2)
sol2[sol2 < 0.001*sol2max] = 0.001*sol2max
conv = np.sqrt(np.sum((sol - sol0)**2 / sol2) / nbs)
# verb
if verb >= 2:
temp1 = f"{nchan} * {res2/nchan:.3e} + {mu1:.3e} * {reg:.3e}"
temp2 = f"{res2 + mu1*reg:.3e}"
temp = f"{temp1} = {temp2}"
print(f"\t\t{niter} \t {temp} {tau:.3e} {conv:.3e}")
# update sol0, mu0 for next iteration
sol0[:] = sol[:]
mu0 = mu1
niter += 1
return sol, mu1, res2/nchan, reg, niter, [tau, lamb]
def inv_linear_augTikho_chol_sparse(
Tn=None,
TTn=None,
Tyn=None,
R=None,
yn=None,
sol0=None,
nchan=None,
nbs=None,
mu0=None,
conv_crit=None,
a0bis=None,
b0=None,
a1bis=None,
b1=None,
d=None,
conv_reg=True,
verb=None,
verb2head=None,
**kwdargs,
):
"""
Linear algorithm for Phillips-Tikhonov regularisation
Called "Augmented Tikhonov"
Augmented in the sense that bayesian statistics are combined
with standard Tikhonov regularisation
Determines both noise (common multiplicative coefficient) and
regularisation parameter automatically
We assume here that all arrays are scaled (noise, conditioning...)
Sparse matrixes are also prefered to speed-up the computation
In this method:
tau is an approximation of the inverse of the noise coefficient
lamb is an approximation of the regularisation parameter
N.B.: The noise and reg. param. have probability densities of the form:
f(x) = x^(a-1) * exp(-bx)
This function's maximum is in x = (a-1)/b, so a = b+1 gives a maximum at 1.
(a0, b0) for the reg. param.
(a1, b1) for the noise estimate
Ref:
[1] <NAME>., <NAME>., Inverse Problems, vol.25, nb.2, 025001, 2009
[2] http://www.math.uni-bremen.de/zetem/cms/media.php/250/nov14talk_jin%20bangti.pdf
[3] <NAME>, <NAME>, <NAME>,
"A New Choice Rule for Regularization Parameters in Tikhonov
Regularization", Research report, University of Hong Kong, 2008
"""
conv = 0. # convergence variable
niter = 0 # number of iterations
mu1 = 0. # regularisation param
# verb
if verb >= 2:
chi2n = np.sum((Tn.dot(sol0) - yn)**2) / nchan
reg = sol0.dot(R.dot(sol0))
temp = f"{nchan} * {chi2n:.3e} + {mu0:.3e} * {reg:.3e}"
print(
f"{verb2head}\n\t\t\t {temp} = {nchan*chi2n + mu0*reg:.3e}",
end='\n',
)
# loop
# Continue until convergence criterion, and at least 2 iterations
factor = None
while niter < 2 or conv > conv_crit:
try:
# choleski decomposition requires det(TT + mu0*LL) != 0
# A = (chol(A).T * chol(A)
# optimal if matrix is csc
if sksp is False:
factor = scpsp.linalg.factorized(TTn + mu0*R)
sol = factor(Tyn)
else:
if factor is None:
factor = sksp.cholmod.cholesky(
TTn + mu0*R,
beta=0,
mode='auto',
ordering_method='default',
use_long=False,
)
else:
# re-use same factor
factor.cholesky_inplace(TTn + mu0*R, beta=0)
sol = factor.solve_A(Tyn)
except Exception as err:
# call solver
sol = scpsp.linalg.spsolve(
TTn + mu0*R, Tyn,
permc_spec=None,
use_umfpack=True,
)
# compute residu, regularity...
res2 = np.sum((Tn.dot(sol)-yn)**2) # residu**2
reg = sol.dot(R.dot(sol)) # regularity term
# update lamb, tau
lamb = a0bis/(0.5*reg + b0) # Update reg. param. estimate
tau = a1bis/(0.5*res2 + b1) # Update noise coef. estimate
mu1 = (lamb/tau) * (2*a1bis/res2)**d # Update reg. param. rescaling
# Compute convergence variable
if conv_reg:
conv = np.abs(mu1 - mu0) / mu1
else:
sol2 = sol**2
sol2max = np.max(sol2)
sol2[sol2 < 0.001*sol2max] = 0.001*sol2max
conv = np.sqrt(np.sum((sol - sol0)**2 / sol2) / nbs)
# verb
if verb >= 2:
temp1 = f"{nchan} * {res2/nchan:.3e} + {mu1:.3e} * {reg:.3e}"
temp2 = f"{res2 + mu1*reg:.3e}"
temp = f"{temp1} = {temp2}"
print(f"\t\t{niter} \t {temp} {tau:.3e} {conv:.3e}")
# update sol0, mu0 for next iteration
sol0[:] = sol[:]
mu0 = mu1
niter += 1
return sol, mu1, res2/nchan, reg, niter, [tau, lamb]
def inv_linear_augTikho_pos_dense(
Tn=None,
TTn=None,
Tyn=None,
R=None,
yn=None,
sol0=None,
nchan=None,
nbs=None,
mu0=None,
conv_crit=None,
a0bis=None,
b0=None,
a1bis=None,
b1=None,
d=None,
conv_reg=True,
verb=None,
verb2head=None,
# specific
method=None,
options=None,
bounds=None,
func_val=None,
func_jac=None,
func_hess=None,
**kwdargs,
):
"""
Quadratic algorithm for Phillips-Tikhonov regularisation
Alternative to the linear version with positivity constraint
see TFI.InvLin_AugTikho_V1.__doc__ for details
"""
conv = 0. # convergence variable
niter = 0 # number of iterations
mu1 = 0. # regularisation param
# verb
if verb >= 2:
chi2n = np.sum((Tn.dot(sol0) - yn)**2) / nchan
reg = sol0.dot(R.dot(sol0))
temp = f"{nchan} * {chi2n:.3e} + {mu0:.3e} * {reg:.3e}"
print(
f"{verb2head}\n\t\t\t {temp} = {nchan*chi2n + mu0*reg:.3e}",
end='\n',
)
while niter < 2 or conv > conv_crit:
# quadratic method for positivity constraint
sol = scpop.minimize(
func_val, sol0,
args=(mu0, Tn, yn, TTn, Tyn),
jac=func_jac,
hess=func_hess,
method=method,
bounds=bounds,
options=options,
).x
# compute residu, regularity...
res2 = np.sum((Tn.dot(sol)-yn)**2) # residu**2
reg = sol.dot(R.dot(sol)) # regularity term
# update lamb, tau
lamb = a0bis/(0.5*reg + b0) # Update reg. param. estimate
tau = a1bis/(0.5*res2 + b1) # Update noise coef. estimate
mu1 = (lamb/tau) * (2*a1bis/res2)**d # Update reg. param. rescaling
# Compute convergence variable
if conv_reg:
conv = np.abs(mu1 - mu0) / mu1
else:
sol2 = sol**2
sol2max = np.max(sol2)
sol2[sol2 < 0.001*sol2max] = 0.001*sol2max
conv = np.sqrt(np.sum((sol - sol0)**2 / sol2) / nbs)
# verb
if verb >= 2:
temp1 = f"{nchan} * {res2/nchan:.3e} + {mu1:.3e} * {reg:.3e}"
temp2 = f"{res2 + mu1*reg:.3e}"
temp = f"{temp1} = {temp2}"
print(f"\t\t{niter} \t {temp} {tau:.3e} {conv:.3e}")
# update sol0, mu0 for next iteration
sol0[:] = sol[:]
mu0 = mu1
niter += 1
return sol, mu1, res2/nchan, reg, niter, [tau, lamb]
# #############################################################################
# #############################################################################
# Basic routines - discrepancy principle
# #############################################################################
def inv_linear_DisPrinc_sparse(
Tn=None,
TTn=None,
Tyn=None,
R=None,
yn=None,
sol0=None,
nchan=None,
mu0=None,
precond=None,
verb=None,
verb2head=None,
# specific
chi2n_tol=None,
chi2n_obj=None,
maxiter=None,
tol=None,
**kwdargs,
):
"""
Discrepancy principle: find mu such that chi2n = 1 +/- tol
"""
niter = 0
lchi2n = np.array([np.sum((Tn.dot(sol0) - yn)**2) / nchan])
lmu = np.array([mu0])
chi2n_obj_log = np.log(chi2n_obj)
# verb
if verb >= 2:
reg = sol0.dot(R.dot(sol0))
temp = f"{nchan} * {lchi2n[0]:.3e} + {mu0:.3e} * {reg:.3e}"
print(
f"{verb2head}\n\t\t\t {temp} = {nchan*lchi2n[0] + mu0*reg:.3e}",
end='\n',
)
while niter == 0 or np.abs(lchi2n[-1] - chi2n_obj) > chi2n_tol:
sol, itconv = scpsp.linalg.cg(
TTn + lmu[-1]*R, Tyn,
x0=sol0,
tol=tol,
maxiter=maxiter,
M=precond,
)
lchi2n = np.append(lchi2n, np.sum((Tn.dot(sol) - yn)**2) / nchan)
if niter == 0:
if lchi2n[-1] >= chi2n_obj + chi2n_tol:
lmu = np.append(lmu, lmu[-1] / 50.)
elif lchi2n[-1] <= chi2n_obj - chi2n_tol:
lmu = np.append(lmu, lmu[-1] * 50.)
else:
lmu = np.append(lmu, lmu[-1])
elif niter == 1 or (
np.all(lchi2n >= chi2n_obj + chi2n_tol)
or np.all(lchi2n <= chi2n_obj - chi2n_tol)
):
if lchi2n[-1] >= chi2n_obj + chi2n_tol:
lmu = np.append(lmu, lmu[-1] / 50.)
else:
lmu = np.append(lmu, lmu[-1] * 50.)
else:
if lmu[-2] == lmu[-1]:
# if the algo is stuck => break to avoid infinite loop
ind = np.argmin(lchi2n[1:] - chi2n_obj)
lmu[-1] = lmu[ind]
lchi2n[-1] = lchi2n[ind]
sol, itconv = scpsp.linalg.cg(
TTn + lmu[-1]*R, Tyn,
x0=sol0,
tol=tol,
maxiter=maxiter,
M=precond,
)
break
else:
indsort = np.argsort(lchi2n[1:])
lmu = np.append(lmu, np.exp(np.interp(
chi2n_obj_log,
np.log(lchi2n[1:])[indsort],
np.log(lmu)[indsort]
)))
# verb
if verb >= 2:
reg = sol.dot(R.dot(sol))
res2 = np.sum((Tn.dot(sol)-yn)**2)
temp1 = f"{nchan} * {lchi2n[-1]:.3e} + {lmu[-1]:.3e} * {reg:.3e}"
temp2 = f"{res2 + lmu[-1]*reg:.3e}"
temp = f"{temp1} = {temp2}"
print(f"\t\t{niter} \t {temp}")
sol0[:] = sol
niter += 1
reg = sol.dot(R.dot(sol)) # regularity term
return sol, lmu[-1], lchi2n[-1], reg, niter, None
|
[
"scipy.linalg.solve",
"scipy.optimize.minimize",
"numpy.abs",
"numpy.log",
"numpy.sum",
"scipy.linalg.cholesky",
"scipy.sparse.linalg.cg",
"scipy.linalg.cho_solve",
"scipy.sparse.linalg.factorized",
"sksparse.cholmod.cholesky",
"numpy.argmin",
"numpy.argsort",
"numpy.append",
"numpy.max",
"numpy.array",
"scipy.sparse.linalg.spsolve",
"warnings.warn",
"numpy.all"
] |
[((603, 621), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (616, 621), False, 'import warnings\n'), ((16794, 16809), 'numpy.array', 'np.array', (['[mu0]'], {}), '([mu0])\n', (16802, 16809), True, 'import numpy as np\n'), ((16830, 16847), 'numpy.log', 'np.log', (['chi2n_obj'], {}), '(chi2n_obj)\n', (16836, 16847), True, 'import numpy as np\n'), ((1978, 2105), 'scipy.linalg.solve', 'scplin.solve', (['(TTn + mu0 * R)', 'Tyn'], {'assume_a': '"""pos"""', 'overwrite_a': '(True)', 'overwrite_b': '(False)', 'check_finite': '(False)', 'transposed': '(False)'}), "(TTn + mu0 * R, Tyn, assume_a='pos', overwrite_a=True,\n overwrite_b=False, check_finite=False, transposed=False)\n", (1990, 2105), True, 'import scipy.linalg as scplin\n'), ((4792, 4878), 'scipy.sparse.linalg.cg', 'scpsp.linalg.cg', (['(TTn + mu0 * R)', 'Tyn'], {'x0': 'sol0', 'tol': 'tol', 'maxiter': 'maxiter', 'M': 'precond'}), '(TTn + mu0 * R, Tyn, x0=sol0, tol=tol, maxiter=maxiter, M=\n precond)\n', (4807, 4878), True, 'import scipy.sparse as scpsp\n'), ((17197, 17286), 'scipy.sparse.linalg.cg', 'scpsp.linalg.cg', (['(TTn + lmu[-1] * R)', 'Tyn'], {'x0': 'sol0', 'tol': 'tol', 'maxiter': 'maxiter', 'M': 'precond'}), '(TTn + lmu[-1] * R, Tyn, x0=sol0, tol=tol, maxiter=maxiter,\n M=precond)\n', (17212, 17286), True, 'import scipy.sparse as scpsp\n'), ((2907, 2919), 'numpy.max', 'np.max', (['sol2'], {}), '(sol2)\n', (2913, 2919), True, 'import numpy as np\n'), ((5464, 5476), 'numpy.max', 'np.max', (['sol2'], {}), '(sol2)\n', (5470, 5476), True, 'import numpy as np\n'), ((7148, 7235), 'scipy.linalg.cholesky', 'scplin.cholesky', (['(TTn + mu0 * R)'], {'lower': '(False)', 'check_finite': '(False)', 'overwrite_a': '(False)'}), '(TTn + mu0 * R, lower=False, check_finite=False, overwrite_a\n =False)\n', (7163, 7235), True, 'import scipy.linalg as scplin\n'), ((7392, 7465), 'scipy.linalg.cho_solve', 'scplin.cho_solve', (['(chol, False)', 'Tyn'], {'overwrite_b': 'None', 'check_finite': '(True)'}), '((chol, False), Tyn, overwrite_b=None, check_finite=True)\n', (7408, 7465), True, 'import scipy.linalg as scplin\n'), ((8576, 8588), 'numpy.max', 'np.max', (['sol2'], {}), '(sol2)\n', (8582, 8588), True, 'import numpy as np\n'), ((12875, 12887), 'numpy.max', 'np.max', (['sol2'], {}), '(sol2)\n', (12881, 12887), True, 'import numpy as np\n'), ((14638, 14779), 'scipy.optimize.minimize', 'scpop.minimize', (['func_val', 'sol0'], {'args': '(mu0, Tn, yn, TTn, Tyn)', 'jac': 'func_jac', 'hess': 'func_hess', 'method': 'method', 'bounds': 'bounds', 'options': 'options'}), '(func_val, sol0, args=(mu0, Tn, yn, TTn, Tyn), jac=func_jac,\n hess=func_hess, method=method, bounds=bounds, options=options)\n', (14652, 14779), True, 'import scipy.optimize as scpop\n'), ((15461, 15473), 'numpy.max', 'np.max', (['sol2'], {}), '(sol2)\n', (15467, 15473), True, 'import numpy as np\n'), ((17131, 17161), 'numpy.abs', 'np.abs', (['(lchi2n[-1] - chi2n_obj)'], {}), '(lchi2n[-1] - chi2n_obj)\n', (17137, 17161), True, 'import numpy as np\n'), ((2821, 2838), 'numpy.abs', 'np.abs', (['(mu1 - mu0)'], {}), '(mu1 - mu0)\n', (2827, 2838), True, 'import numpy as np\n'), ((5378, 5395), 'numpy.abs', 'np.abs', (['(mu1 - mu0)'], {}), '(mu1 - mu0)\n', (5384, 5395), True, 'import numpy as np\n'), ((7606, 7733), 'scipy.linalg.solve', 'scplin.solve', (['(TTn + mu0 * R)', 'Tyn'], {'assume_a': '"""sym"""', 'overwrite_a': '(True)', 'overwrite_b': '(False)', 'check_finite': '(False)', 'transposed': '(False)'}), "(TTn + mu0 * R, Tyn, assume_a='sym', overwrite_a=True,\n overwrite_b=False, check_finite=False, transposed=False)\n", (7618, 7733), True, 'import scipy.linalg as scplin\n'), ((8490, 8507), 'numpy.abs', 'np.abs', (['(mu1 - mu0)'], {}), '(mu1 - mu0)\n', (8496, 8507), True, 'import numpy as np\n'), ((11508, 11546), 'scipy.sparse.linalg.factorized', 'scpsp.linalg.factorized', (['(TTn + mu0 * R)'], {}), '(TTn + mu0 * R)\n', (11531, 11546), True, 'import scipy.sparse as scpsp\n'), ((12150, 12225), 'scipy.sparse.linalg.spsolve', 'scpsp.linalg.spsolve', (['(TTn + mu0 * R)', 'Tyn'], {'permc_spec': 'None', 'use_umfpack': '(True)'}), '(TTn + mu0 * R, Tyn, permc_spec=None, use_umfpack=True)\n', (12170, 12225), True, 'import scipy.sparse as scpsp\n'), ((12789, 12806), 'numpy.abs', 'np.abs', (['(mu1 - mu0)'], {}), '(mu1 - mu0)\n', (12795, 12806), True, 'import numpy as np\n'), ((15375, 15392), 'numpy.abs', 'np.abs', (['(mu1 - mu0)'], {}), '(mu1 - mu0)\n', (15381, 15392), True, 'import numpy as np\n'), ((17525, 17555), 'numpy.append', 'np.append', (['lmu', '(lmu[-1] / 50.0)'], {}), '(lmu, lmu[-1] / 50.0)\n', (17534, 17555), True, 'import numpy as np\n'), ((3002, 3034), 'numpy.sum', 'np.sum', (['((sol - sol0) ** 2 / sol2)'], {}), '((sol - sol0) ** 2 / sol2)\n', (3008, 3034), True, 'import numpy as np\n'), ((5559, 5591), 'numpy.sum', 'np.sum', (['((sol - sol0) ** 2 / sol2)'], {}), '((sol - sol0) ** 2 / sol2)\n', (5565, 5591), True, 'import numpy as np\n'), ((8671, 8703), 'numpy.sum', 'np.sum', (['((sol - sol0) ** 2 / sol2)'], {}), '((sol - sol0) ** 2 / sol2)\n', (8677, 8703), True, 'import numpy as np\n'), ((11661, 11766), 'sksparse.cholmod.cholesky', 'sksp.cholmod.cholesky', (['(TTn + mu0 * R)'], {'beta': '(0)', 'mode': '"""auto"""', 'ordering_method': '"""default"""', 'use_long': '(False)'}), "(TTn + mu0 * R, beta=0, mode='auto', ordering_method=\n 'default', use_long=False)\n", (11682, 11766), True, 'import sksparse as sksp\n'), ((12970, 13002), 'numpy.sum', 'np.sum', (['((sol - sol0) ** 2 / sol2)'], {}), '((sol - sol0) ** 2 / sol2)\n', (12976, 13002), True, 'import numpy as np\n'), ((15556, 15588), 'numpy.sum', 'np.sum', (['((sol - sol0) ** 2 / sol2)'], {}), '((sol - sol0) ** 2 / sol2)\n', (15562, 15588), True, 'import numpy as np\n'), ((17631, 17661), 'numpy.append', 'np.append', (['lmu', '(lmu[-1] * 50.0)'], {}), '(lmu, lmu[-1] * 50.0)\n', (17640, 17661), True, 'import numpy as np\n'), ((17701, 17724), 'numpy.append', 'np.append', (['lmu', 'lmu[-1]'], {}), '(lmu, lmu[-1])\n', (17710, 17724), True, 'import numpy as np\n'), ((17766, 17805), 'numpy.all', 'np.all', (['(lchi2n >= chi2n_obj + chi2n_tol)'], {}), '(lchi2n >= chi2n_obj + chi2n_tol)\n', (17772, 17805), True, 'import numpy as np\n'), ((17821, 17860), 'numpy.all', 'np.all', (['(lchi2n <= chi2n_obj - chi2n_tol)'], {}), '(lchi2n <= chi2n_obj - chi2n_tol)\n', (17827, 17860), True, 'import numpy as np\n'), ((17946, 17976), 'numpy.append', 'np.append', (['lmu', '(lmu[-1] / 50.0)'], {}), '(lmu, lmu[-1] / 50.0)\n', (17955, 17976), True, 'import numpy as np\n'), ((18016, 18046), 'numpy.append', 'np.append', (['lmu', '(lmu[-1] * 50.0)'], {}), '(lmu, lmu[-1] * 50.0)\n', (18025, 18046), True, 'import numpy as np\n'), ((18188, 18221), 'numpy.argmin', 'np.argmin', (['(lchi2n[1:] - chi2n_obj)'], {}), '(lchi2n[1:] - chi2n_obj)\n', (18197, 18221), True, 'import numpy as np\n'), ((18328, 18417), 'scipy.sparse.linalg.cg', 'scpsp.linalg.cg', (['(TTn + lmu[-1] * R)', 'Tyn'], {'x0': 'sol0', 'tol': 'tol', 'maxiter': 'maxiter', 'M': 'precond'}), '(TTn + lmu[-1] * R, Tyn, x0=sol0, tol=tol, maxiter=maxiter,\n M=precond)\n', (18343, 18417), True, 'import scipy.sparse as scpsp\n'), ((18597, 18619), 'numpy.argsort', 'np.argsort', (['lchi2n[1:]'], {}), '(lchi2n[1:])\n', (18607, 18619), True, 'import numpy as np\n'), ((18730, 18748), 'numpy.log', 'np.log', (['lchi2n[1:]'], {}), '(lchi2n[1:])\n', (18736, 18748), True, 'import numpy as np\n'), ((18779, 18790), 'numpy.log', 'np.log', (['lmu'], {}), '(lmu)\n', (18785, 18790), True, 'import numpy as np\n')]
|
#
# SPDX-License-Identifier: Apache-2.0
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from dataclasses import dataclass, field
from gettext import translation
from typing import TYPE_CHECKING
from thespiae.exception import ThespiaeError, generate___str__
if TYPE_CHECKING:
from typing import Sequence, Collection, Mapping, Any
from .data import AppEntry, ConfigPath
t = translation('thespiae', fallback=True)
_ = t.gettext
@generate___str__
@dataclass(frozen=True)
class AppEntryMixin:
app_entry: AppEntry = field(metadata={'format': _('configuration entry for: {0.ref}')})
@generate___str__
@dataclass(frozen=True)
class _ConfigPathsMixin:
config_paths: Sequence[ConfigPath] = field(metadata={'format': _('corresponding paths: {}')})
@generate___str__
@dataclass(frozen=True)
class _ConfigPathMixin:
config_path: Sequence[ConfigPath] = field(metadata={'format': _('corresponding path: {}')})
@generate___str__
@dataclass(frozen=True)
class _ExpectedValueTypeMixin:
expected: type = field(metadata={'format': _('expected value type: {0.__name__}')})
received: type = field(metadata={'format': _('received value type: {0.__name__}')})
@generate___str__
@dataclass(frozen=True)
class _FieldMixin:
field_name: str = field(metadata={'format': _('config field name: {}')})
field_index: str = field(metadata={'format': _('collection index: {}')})
@generate___str__(_('Required field is missing'))
@dataclass(frozen=True)
class AppDataMissingFieldError(_FieldMixin, _ConfigPathsMixin, ThespiaeError):
pass
@generate___str__(_('Unexpected config value'))
@dataclass(frozen=True)
class AppDataFieldValueTypeError(_ExpectedValueTypeMixin, _FieldMixin, _ConfigPathsMixin, ThespiaeError):
pass
@generate___str__(_('Circular field reference'))
@dataclass(frozen=True)
class AppDataCircularReferenceError(_FieldMixin, _ConfigPathsMixin, ThespiaeError):
pass
@generate___str__(_('Invalid configuration field value'))
@dataclass(frozen=True)
class ConfigElementTypeError(_ExpectedValueTypeMixin, _ConfigPathMixin, ThespiaeError):
pass
@generate___str__(_('Excessive configuration attribute found'))
@dataclass(frozen=True)
class ConfigExcessiveAttributeError(_ConfigPathMixin, ThespiaeError):
pass
@generate___str__(_('Required config attributes not found'))
@dataclass(frozen=True)
class ConfigRequiredAttributesNotFoundError(_ConfigPathMixin, ThespiaeError):
attributes: Collection[str] = field(metadata={'format': _('missing field names: {}')})
@generate___str__(_('Another configuration entry with the same identity has been found'))
@dataclass(frozen=True)
class ConfigDuplicatedEntryIdentityError(_ConfigPathsMixin, ThespiaeError):
another_paths: Sequence[ConfigPath] = field(metadata={'format': _('another paths: {}')})
identity_values: Mapping[str, Any] = field(metadata={'format': _('identity field values: {}')})
@generate___str__(_('Unable to complete config branches with required identity attributes'))
@dataclass(frozen=True)
class ConfigIncompleteBranchesError(_ConfigPathsMixin, ThespiaeError):
attributes: Collection[str] = field(metadata={'format': _('missing field names: {}')})
class _CircularReferenceError(ThespiaeError):
pass
|
[
"gettext.translation",
"dataclasses.dataclass"
] |
[((947, 985), 'gettext.translation', 'translation', (['"""thespiae"""'], {'fallback': '(True)'}), "('thespiae', fallback=True)\n", (958, 985), False, 'from gettext import translation\n'), ((1021, 1043), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1030, 1043), False, 'from dataclasses import dataclass, field\n'), ((1178, 1200), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1187, 1200), False, 'from dataclasses import dataclass, field\n'), ((1345, 1367), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1354, 1367), False, 'from dataclasses import dataclass, field\n'), ((1509, 1531), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1518, 1531), False, 'from dataclasses import dataclass, field\n'), ((1760, 1782), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1769, 1782), False, 'from dataclasses import dataclass, field\n'), ((2009, 2031), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (2018, 2031), False, 'from dataclasses import dataclass, field\n'), ((2171, 2193), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (2180, 2193), False, 'from dataclasses import dataclass, field\n'), ((2361, 2383), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (2370, 2383), False, 'from dataclasses import dataclass, field\n'), ((2538, 2560), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (2547, 2560), False, 'from dataclasses import dataclass, field\n'), ((2725, 2747), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (2734, 2747), False, 'from dataclasses import dataclass, field\n'), ((2891, 2913), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (2900, 2913), False, 'from dataclasses import dataclass, field\n'), ((3176, 3198), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (3185, 3198), False, 'from dataclasses import dataclass, field\n'), ((3564, 3586), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (3573, 3586), False, 'from dataclasses import dataclass, field\n')]
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, ValidationError
from app.models import Book, Author, Category
class AddBook(FlaskForm):
title = StringField("Tytuł książki", validators=[DataRequired()])
author = StringField(
"Autor/Autorzy",
validators=[DataRequired()],
description='Np. "<NAME>, <NAME>"',
)
category = StringField(
"Kategoria",
validators=[DataRequired()],
description="Przynajmniej jedna kategoria.",
)
description = TextAreaField("Opis", validators=[DataRequired()])
submit = SubmitField("Dodaj do biblioteki!")
def validate_title(self, title):
"""Basic validator. Assumption/simplification: two books with one title do not exist."""
# In a commercial project a better solution will be making validation based on
# books' ISBN number.
book = Book.query.filter_by(title=self.title.data).first()
if book is not None:
raise ValidationError("Książka o takim tytule już znajduje się w bazie.")
class ImportBooks(FlaskForm):
intitle = StringField("Tytuł książki")
inauthor = StringField("Autor")
inpublisher = StringField("Wydawca")
subject = StringField("Kategoria")
isbn = StringField("ISBN")
submit = SubmitField("Zatwierdź dane.")
class FilterBooks(FlaskForm):
filter_a = StringField("Autor")
filter_c = StringField("Kategoria")
submit = SubmitField("Filtruj")
|
[
"app.models.Book.query.filter_by",
"wtforms.SubmitField",
"wtforms.StringField",
"wtforms.validators.DataRequired",
"wtforms.validators.ValidationError"
] |
[((663, 698), 'wtforms.SubmitField', 'SubmitField', (['"""Dodaj do biblioteki!"""'], {}), "('Dodaj do biblioteki!')\n", (674, 698), False, 'from wtforms import StringField, SubmitField, TextAreaField\n'), ((1179, 1207), 'wtforms.StringField', 'StringField', (['"""Tytuł książki"""'], {}), "('Tytuł książki')\n", (1190, 1207), False, 'from wtforms import StringField, SubmitField, TextAreaField\n'), ((1223, 1243), 'wtforms.StringField', 'StringField', (['"""Autor"""'], {}), "('Autor')\n", (1234, 1243), False, 'from wtforms import StringField, SubmitField, TextAreaField\n'), ((1262, 1284), 'wtforms.StringField', 'StringField', (['"""Wydawca"""'], {}), "('Wydawca')\n", (1273, 1284), False, 'from wtforms import StringField, SubmitField, TextAreaField\n'), ((1299, 1323), 'wtforms.StringField', 'StringField', (['"""Kategoria"""'], {}), "('Kategoria')\n", (1310, 1323), False, 'from wtforms import StringField, SubmitField, TextAreaField\n'), ((1335, 1354), 'wtforms.StringField', 'StringField', (['"""ISBN"""'], {}), "('ISBN')\n", (1346, 1354), False, 'from wtforms import StringField, SubmitField, TextAreaField\n'), ((1368, 1398), 'wtforms.SubmitField', 'SubmitField', (['"""Zatwierdź dane."""'], {}), "('Zatwierdź dane.')\n", (1379, 1398), False, 'from wtforms import StringField, SubmitField, TextAreaField\n'), ((1446, 1466), 'wtforms.StringField', 'StringField', (['"""Autor"""'], {}), "('Autor')\n", (1457, 1466), False, 'from wtforms import StringField, SubmitField, TextAreaField\n'), ((1482, 1506), 'wtforms.StringField', 'StringField', (['"""Kategoria"""'], {}), "('Kategoria')\n", (1493, 1506), False, 'from wtforms import StringField, SubmitField, TextAreaField\n'), ((1520, 1542), 'wtforms.SubmitField', 'SubmitField', (['"""Filtruj"""'], {}), "('Filtruj')\n", (1531, 1542), False, 'from wtforms import StringField, SubmitField, TextAreaField\n'), ((1065, 1132), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Książka o takim tytule już znajduje się w bazie."""'], {}), "('Książka o takim tytule już znajduje się w bazie.')\n", (1080, 1132), False, 'from wtforms.validators import DataRequired, ValidationError\n'), ((284, 298), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (296, 298), False, 'from wtforms.validators import DataRequired, ValidationError\n'), ((369, 383), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (381, 383), False, 'from wtforms.validators import DataRequired, ValidationError\n'), ((505, 519), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (517, 519), False, 'from wtforms.validators import DataRequired, ValidationError\n'), ((633, 647), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (645, 647), False, 'from wtforms.validators import DataRequired, ValidationError\n'), ((966, 1009), 'app.models.Book.query.filter_by', 'Book.query.filter_by', ([], {'title': 'self.title.data'}), '(title=self.title.data)\n', (986, 1009), False, 'from app.models import Book, Author, Category\n')]
|
#!/usr/bin/env python3
#############
# Libraries #
#############
import random
import math
###########
# Classes #
###########
class person:
"""A person"""
instances = []
def __init__(self):
a = random.random()
self.prefs = [0.5, 0.5]
self.data = [a, 1 - a, 0]
self.utilityUpdate()
person.instances.append(self)
def trade(self, market):
self.marginalUtilityUpdate()
self.goalUpdate(market)
goods = [0, 1]
for good in goods:
toBuy = self.goal[good] - self.data[good]
price = market.price[good]
if (
self.data[good] + toBuy >= 0 and
self.data[2] - toBuy * price >= 0 and
market.data[good] - toBuy >= 0 and
market.data[2] + toBuy * price >= 0
):
market.data[good] -= toBuy
market.data[2] += toBuy * price
self.data[good] += toBuy
self.data[2] -= toBuy * price
self.utilityUpdate()
def goalUpdate(self, market):
self.goal = [self.data[0], self.data[1]]
if (self.marginalUtility[0]/market.price[0] > self.marginalUtility[1]/market.price[1]):
self.goal[0] +=0.01
self.goal[1] -=0.01
elif (self.marginalUtility[0]/market.price[0] < self.marginalUtility[1]/market.price[1]):
self.goal[0] -=0.01
self.goal[1] +=0.01
def utilityUpdate(self):
self.utility = (self.data[0] ** self.prefs[0]) * (self.data[1] ** self.prefs[1])
def marginalUtilityUpdate(self):
self.marginalUtility = [
self.prefs[0] * (self.data[0] ** (self.prefs[0] -1)) * (self.data[1] ** self.prefs[1]),
self.prefs[1] * (self.data[0] ** self.prefs[0] ) * (self.data[1] ** (self.prefs[1] - 1))
]
class market:
"""A amarket"""
instances = []
def __init__(self):
self.data = [0, 0, 100]
self.lastStock = self.data[:]
self.price = [10.0, 10.0]
market.instances.append(self)
def updatePrices(self):
goods = [0, 1]
for good in goods:
if self.data[good] < self.lastStock[good]:
self.price[good] += 0.001
else:
self.price[good] -= 0.001
self.lastStock = self.data[:]
#############
# Functions #
#############
def printStatus(entity):
print ("Printing")
for instance in entity.instances:
print("Data: ", instance.data)
#try:
# print("Utility: ", instance.utility)
#except:
# continue
print ()
#################
# Configuration #
#################
population = 3
##############
# Initialise #
##############
print ("Intialising...")
print ("--------------")
for i in range (population):
person()
auctioneer = market()
#######
# Run #
#######
printStatus(person)
printStatus(market)
var = ""
i = 0
while (var != "q"):
print ("STARTING")
print (i, ".......")
for instance in person.instances:
instance.trade(auctioneer)
auctioneer.updatePrices()
var = input("Please enter something: ")
print ("you entered", var)
printStatus(person)
printStatus(market)
print (auctioneer.price)
i+=1
|
[
"random.random"
] |
[((230, 245), 'random.random', 'random.random', ([], {}), '()\n', (243, 245), False, 'import random\n')]
|
import hpat
def count_array_REPs():
from hpat.distributed import Distribution
vals = hpat.distributed.dist_analysis.array_dists.values()
return sum([v == Distribution.REP for v in vals])
def count_parfor_REPs():
from hpat.distributed import Distribution
vals = hpat.distributed.dist_analysis.parfor_dists.values()
return sum([v == Distribution.REP for v in vals])
def count_parfor_OneDs():
from hpat.distributed import Distribution
vals = hpat.distributed.dist_analysis.parfor_dists.values()
return sum([v == Distribution.OneD for v in vals])
def count_array_OneDs():
from hpat.distributed import Distribution
vals = hpat.distributed.dist_analysis.array_dists.values()
return sum([v == Distribution.OneD for v in vals])
def count_parfor_OneD_Vars():
from hpat.distributed import Distribution
vals = hpat.distributed.dist_analysis.parfor_dists.values()
return sum([v == Distribution.OneD_Var for v in vals])
def count_array_OneD_Vars():
from hpat.distributed import Distribution
vals = hpat.distributed.dist_analysis.array_dists.values()
return sum([v == Distribution.OneD_Var for v in vals])
def dist_IR_contains(*args):
return sum([(s in hpat.distributed.fir_text) for s in args])
@hpat.jit
def get_rank():
return hpat.distributed_api.get_rank()
@hpat.jit
def get_start_end(n):
rank = hpat.distributed_api.get_rank()
n_pes = hpat.distributed_api.get_size()
start = hpat.distributed_api.get_start(n, n_pes, rank)
end = hpat.distributed_api.get_end(n, n_pes, rank)
return start, end
|
[
"hpat.distributed.dist_analysis.array_dists.values",
"hpat.distributed_api.get_rank",
"hpat.distributed_api.get_start",
"hpat.distributed.dist_analysis.parfor_dists.values",
"hpat.distributed_api.get_size",
"hpat.distributed_api.get_end"
] |
[((95, 146), 'hpat.distributed.dist_analysis.array_dists.values', 'hpat.distributed.dist_analysis.array_dists.values', ([], {}), '()\n', (144, 146), False, 'import hpat\n'), ((285, 337), 'hpat.distributed.dist_analysis.parfor_dists.values', 'hpat.distributed.dist_analysis.parfor_dists.values', ([], {}), '()\n', (335, 337), False, 'import hpat\n'), ((477, 529), 'hpat.distributed.dist_analysis.parfor_dists.values', 'hpat.distributed.dist_analysis.parfor_dists.values', ([], {}), '()\n', (527, 529), False, 'import hpat\n'), ((669, 720), 'hpat.distributed.dist_analysis.array_dists.values', 'hpat.distributed.dist_analysis.array_dists.values', ([], {}), '()\n', (718, 720), False, 'import hpat\n'), ((865, 917), 'hpat.distributed.dist_analysis.parfor_dists.values', 'hpat.distributed.dist_analysis.parfor_dists.values', ([], {}), '()\n', (915, 917), False, 'import hpat\n'), ((1065, 1116), 'hpat.distributed.dist_analysis.array_dists.values', 'hpat.distributed.dist_analysis.array_dists.values', ([], {}), '()\n', (1114, 1116), False, 'import hpat\n'), ((1311, 1342), 'hpat.distributed_api.get_rank', 'hpat.distributed_api.get_rank', ([], {}), '()\n', (1340, 1342), False, 'import hpat\n'), ((1388, 1419), 'hpat.distributed_api.get_rank', 'hpat.distributed_api.get_rank', ([], {}), '()\n', (1417, 1419), False, 'import hpat\n'), ((1432, 1463), 'hpat.distributed_api.get_size', 'hpat.distributed_api.get_size', ([], {}), '()\n', (1461, 1463), False, 'import hpat\n'), ((1476, 1522), 'hpat.distributed_api.get_start', 'hpat.distributed_api.get_start', (['n', 'n_pes', 'rank'], {}), '(n, n_pes, rank)\n', (1506, 1522), False, 'import hpat\n'), ((1533, 1577), 'hpat.distributed_api.get_end', 'hpat.distributed_api.get_end', (['n', 'n_pes', 'rank'], {}), '(n, n_pes, rank)\n', (1561, 1577), False, 'import hpat\n')]
|
from pathlib import Path
from typing import Dict, List, Mapping, Optional, Set
from kolga.utils.general import get_environment_vars_by_prefix, get_project_secret_var
from kolga.utils.models import HelmValues
class Service:
"""
A service is a by Helm deployable software
A service takes care of storing the configuration needed
to deploy a service to Kubernetes. It also stores metadata
about the service so that it can be shared with other services
if need be.
"""
def __init__(
self,
name: str,
track: str,
values: Optional[HelmValues] = None,
artifact_name: Optional[str] = None,
values_files: Optional[List[Path]] = None,
chart: str = "",
chart_path: Optional[Path] = None,
chart_version: Optional[str] = None,
depends_on: Optional[Set["Service"]] = None,
) -> None:
self.name = name
self.track = track
self.values = values or {}
self.artifact_name = artifact_name
self.values_files: List[Path] = values_files or []
self.chart = chart
self.chart_path = chart_path
self.chart_version = chart_version
self.depends_on: Set["Service"] = depends_on or set()
self._prerequisite_of: Set["Service"] = set()
self._validate_chart()
self.service_specific_values = self._get_service_variables()
def _validate_chart(self) -> None:
if not self.chart and not self.chart_path:
raise ValueError("Either chart or chart_name must be defined")
def _get_service_variables(self) -> Dict[str, str]:
return get_environment_vars_by_prefix(f"K8S_SERVICE_{self.name.upper()}_")
def add_dependency(self, service: "Service") -> None:
self.depends_on.add(service)
service.add_prerequisite(self)
def add_prerequisite(self, service: "Service") -> None:
self._prerequisite_of.add(service)
if self not in service.depends_on:
service.add_dependency(self)
def setup_prerequisites(self) -> None:
pass
def get_artifacts(self) -> Mapping[str, str]:
return {}
def get_service_secret_artifact_name(self, service: "Service") -> str:
if not self.artifact_name:
raise ValueError(f"No artifact name set for the service {self.name}")
return get_project_secret_var(
project_name=service.name, value=self.artifact_name
)
|
[
"kolga.utils.general.get_project_secret_var"
] |
[((2367, 2442), 'kolga.utils.general.get_project_secret_var', 'get_project_secret_var', ([], {'project_name': 'service.name', 'value': 'self.artifact_name'}), '(project_name=service.name, value=self.artifact_name)\n', (2389, 2442), False, 'from kolga.utils.general import get_environment_vars_by_prefix, get_project_secret_var\n')]
|
"""
Departures
http://doc.navitia.io/#departures
Also known as /departures service.
This endpoint retrieves a list of departures from a specific datetime of a selected object. Departures are ordered chronologically in ascending order as:
url Result
/coverage/{region_id}/{resource_path}/departures List of the next departures, multi-route oriented, only time sorted (no grouped by stop_point/route here)
/coverage/{lon;lat}/coords/{lon;lat}/departures List of the next departures, multi-route oriented, only time sorted (no grouped by stop_point/route here)
"""
import os
def departures(client, collection_name=None, object_id=None, coords=None, region=None, extra_params=None, verbose=False):
# Construct url
if coords and region:
raise ValueError(
"Cannot specifiy both coords and region, you must choose one.")
if coords:
# TODO: check coords format
# /coverage/{lon;lat}/coords/{lon;lat}/departures
url = os.path.join("coverage", coords, "coords",
coords, "departures")
else:
# /coverage/{region_id}/{resource_path}/departures
# First choose region
if not region and not hasattr(client, 'region'):
raise ValueError(
"You must specifiy coords or region, either here or in client")
elif region:
if isinstance(region, str):
# region argument overrides client specified region
used_region = region
else:
raise ValueError("Region must be a string")
elif not region and hasattr(client, 'region'):
# Takes already specified region
used_region = client.region
else:
# shouldn't be possible
raise ValueError("Weird error, caused by region")
# /coverage/{region_id}/{collection_name}
if not object_id or not collection_name:
raise ValueError("of correct type")
url = os.path.join("coverage", used_region,
collection_name, object_id, "departures")
return client._get(url=url, extra_params=extra_params, verbose=verbose)
|
[
"os.path.join"
] |
[((1027, 1091), 'os.path.join', 'os.path.join', (['"""coverage"""', 'coords', '"""coords"""', 'coords', '"""departures"""'], {}), "('coverage', coords, 'coords', coords, 'departures')\n", (1039, 1091), False, 'import os\n'), ((2042, 2121), 'os.path.join', 'os.path.join', (['"""coverage"""', 'used_region', 'collection_name', 'object_id', '"""departures"""'], {}), "('coverage', used_region, collection_name, object_id, 'departures')\n", (2054, 2121), False, 'import os\n')]
|
"""Vera tests."""
from unittest.mock import MagicMock
import pytest
import pyvera as pv
from requests.exceptions import RequestException
from homeassistant.components.vera import (
CONF_CONTROLLER,
CONF_EXCLUDE,
CONF_LIGHTS,
DOMAIN,
)
from homeassistant.config_entries import ENTRY_STATE_NOT_LOADED
from homeassistant.core import HomeAssistant
from .common import ComponentFactory, ConfigSource, new_simple_controller_config
from tests.common import MockConfigEntry, mock_registry
async def test_init(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device1 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device1.device_id = 1
vera_device1.vera_device_id = vera_device1.device_id
vera_device1.name = "first_dev"
vera_device1.is_tripped = False
entity1_id = "binary_sensor.first_dev_1"
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config={CONF_CONTROLLER: "http://127.0.0.1:111"},
config_source=ConfigSource.CONFIG_FLOW,
serial_number="first_serial",
devices=(vera_device1,),
),
)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry1 = entity_registry.async_get(entity1_id)
assert entry1
assert entry1.unique_id == "vera_first_serial_1"
async def test_init_from_file(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device1 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device1.device_id = 1
vera_device1.vera_device_id = vera_device1.device_id
vera_device1.name = "first_dev"
vera_device1.is_tripped = False
entity1_id = "binary_sensor.first_dev_1"
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config={CONF_CONTROLLER: "http://1172.16.17.32:111"},
config_source=ConfigSource.FILE,
serial_number="first_serial",
devices=(vera_device1,),
),
)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry1 = entity_registry.async_get(entity1_id)
assert entry1
assert entry1.unique_id == "vera_first_serial_1"
async def test_multiple_controllers_with_legacy_one(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test multiple controllers with one legacy controller."""
vera_device1 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device1.device_id = 1
vera_device1.vera_device_id = vera_device1.device_id
vera_device1.name = "first_dev"
vera_device1.is_tripped = False
entity1_id = "binary_sensor.first_dev_1"
vera_device2 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device2.device_id = 2
vera_device2.vera_device_id = vera_device2.device_id
vera_device2.name = "second_dev"
vera_device2.is_tripped = False
entity2_id = "binary_sensor.second_dev_2"
# Add existing entity registry entry from previous setup.
entity_registry = mock_registry(hass)
entity_registry.async_get_or_create(
domain="switch", platform=DOMAIN, unique_id="12"
)
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config={CONF_CONTROLLER: "http://127.0.0.1:111"},
config_source=ConfigSource.FILE,
serial_number="first_serial",
devices=(vera_device1,),
),
)
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config={CONF_CONTROLLER: "http://127.0.0.1:222"},
config_source=ConfigSource.CONFIG_FLOW,
serial_number="second_serial",
devices=(vera_device2,),
),
)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry1 = entity_registry.async_get(entity1_id)
assert entry1
assert entry1.unique_id == "1"
entry2 = entity_registry.async_get(entity2_id)
assert entry2
assert entry2.unique_id == "vera_second_serial_2"
async def test_unload(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device1 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device1.device_id = 1
vera_device1.vera_device_id = vera_device1.device_id
vera_device1.name = "first_dev"
vera_device1.is_tripped = False
await vera_component_factory.configure_component(
hass=hass, controller_config=new_simple_controller_config()
)
entries = hass.config_entries.async_entries(DOMAIN)
assert entries
for config_entry in entries:
assert await hass.config_entries.async_unload(config_entry.entry_id)
assert config_entry.state == ENTRY_STATE_NOT_LOADED
async def test_async_setup_entry_error(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
def setup_callback(controller: pv.VeraController) -> None:
controller.get_devices.side_effect = RequestException()
controller.get_scenes.side_effect = RequestException()
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(setup_callback=setup_callback),
)
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_CONTROLLER: "http://127.0.0.1"},
options={},
unique_id="12345",
)
entry.add_to_hass(hass)
assert not await hass.config_entries.async_setup(entry.entry_id)
@pytest.mark.parametrize(
["options"],
[
[{CONF_LIGHTS: [4, 10, 12, "AAA"], CONF_EXCLUDE: [1, "BBB"]}],
[{CONF_LIGHTS: ["4", "10", "12", "AAA"], CONF_EXCLUDE: ["1", "BBB"]}],
],
)
async def test_exclude_and_light_ids(
hass: HomeAssistant, vera_component_factory: ComponentFactory, options
) -> None:
"""Test device exclusion, marking switches as lights and fixing the data type."""
vera_device1 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device1.device_id = 1
vera_device1.vera_device_id = 1
vera_device1.name = "dev1"
vera_device1.is_tripped = False
entity_id1 = "binary_sensor.dev1_1"
vera_device2 = MagicMock(spec=pv.VeraBinarySensor) # type: pv.VeraBinarySensor
vera_device2.device_id = 2
vera_device2.vera_device_id = 2
vera_device2.name = "dev2"
vera_device2.is_tripped = False
entity_id2 = "binary_sensor.dev2_2"
vera_device3 = MagicMock(spec=pv.VeraSwitch) # type: pv.VeraSwitch
vera_device3.device_id = 3
vera_device3.vera_device_id = 3
vera_device3.name = "dev3"
vera_device3.category = pv.CATEGORY_SWITCH
vera_device3.is_switched_on = MagicMock(return_value=False)
entity_id3 = "switch.dev3_3"
vera_device4 = MagicMock(spec=pv.VeraSwitch) # type: pv.VeraSwitch
vera_device4.device_id = 4
vera_device4.vera_device_id = 4
vera_device4.name = "dev4"
vera_device4.category = pv.CATEGORY_SWITCH
vera_device4.is_switched_on = MagicMock(return_value=False)
vera_device4.get_brightness = MagicMock(return_value=0)
vera_device4.get_color = MagicMock(return_value=[0, 0, 0])
vera_device4.is_dimmable = True
entity_id4 = "light.dev4_4"
component_data = await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(
config_source=ConfigSource.CONFIG_ENTRY,
devices=(vera_device1, vera_device2, vera_device3, vera_device4),
config={**{CONF_CONTROLLER: "http://127.0.0.1:123"}, **options},
),
)
# Assert the entries were setup correctly.
config_entry = next(iter(hass.config_entries.async_entries(DOMAIN)))
assert config_entry.options[CONF_LIGHTS] == [4, 10, 12]
assert config_entry.options[CONF_EXCLUDE] == [1]
update_callback = component_data.controller_data[0].update_callback
update_callback(vera_device1)
update_callback(vera_device2)
update_callback(vera_device3)
update_callback(vera_device4)
await hass.async_block_till_done()
assert hass.states.get(entity_id1) is None
assert hass.states.get(entity_id2) is not None
assert hass.states.get(entity_id3) is not None
assert hass.states.get(entity_id4) is not None
|
[
"unittest.mock.MagicMock",
"tests.common.MockConfigEntry",
"requests.exceptions.RequestException",
"tests.common.mock_registry",
"pytest.mark.parametrize"
] |
[((5894, 6074), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['options']", "[[{CONF_LIGHTS: [4, 10, 12, 'AAA'], CONF_EXCLUDE: [1, 'BBB']}], [{\n CONF_LIGHTS: ['4', '10', '12', 'AAA'], CONF_EXCLUDE: ['1', 'BBB']}]]"], {}), "(['options'], [[{CONF_LIGHTS: [4, 10, 12, 'AAA'],\n CONF_EXCLUDE: [1, 'BBB']}], [{CONF_LIGHTS: ['4', '10', '12', 'AAA'],\n CONF_EXCLUDE: ['1', 'BBB']}]])\n", (5917, 6074), False, 'import pytest\n'), ((645, 680), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'pv.VeraBinarySensor'}), '(spec=pv.VeraBinarySensor)\n', (654, 680), False, 'from unittest.mock import MagicMock\n'), ((1610, 1645), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'pv.VeraBinarySensor'}), '(spec=pv.VeraBinarySensor)\n', (1619, 1645), False, 'from unittest.mock import MagicMock\n'), ((2633, 2668), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'pv.VeraBinarySensor'}), '(spec=pv.VeraBinarySensor)\n', (2642, 2668), False, 'from unittest.mock import MagicMock\n'), ((2923, 2958), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'pv.VeraBinarySensor'}), '(spec=pv.VeraBinarySensor)\n', (2932, 2958), False, 'from unittest.mock import MagicMock\n'), ((3280, 3299), 'tests.common.mock_registry', 'mock_registry', (['hass'], {}), '(hass)\n', (3293, 3299), False, 'from tests.common import MockConfigEntry, mock_registry\n'), ((4532, 4567), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'pv.VeraBinarySensor'}), '(spec=pv.VeraBinarySensor)\n', (4541, 4567), False, 'from unittest.mock import MagicMock\n'), ((5648, 5757), 'tests.common.MockConfigEntry', 'MockConfigEntry', ([], {'domain': 'DOMAIN', 'data': "{CONF_CONTROLLER: 'http://127.0.0.1'}", 'options': '{}', 'unique_id': '"""12345"""'}), "(domain=DOMAIN, data={CONF_CONTROLLER: 'http://127.0.0.1'},\n options={}, unique_id='12345')\n", (5663, 5757), False, 'from tests.common import MockConfigEntry, mock_registry\n'), ((6330, 6365), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'pv.VeraBinarySensor'}), '(spec=pv.VeraBinarySensor)\n', (6339, 6365), False, 'from unittest.mock import MagicMock\n'), ((6589, 6624), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'pv.VeraBinarySensor'}), '(spec=pv.VeraBinarySensor)\n', (6598, 6624), False, 'from unittest.mock import MagicMock\n'), ((6848, 6877), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'pv.VeraSwitch'}), '(spec=pv.VeraSwitch)\n', (6857, 6877), False, 'from unittest.mock import MagicMock\n'), ((7080, 7109), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (7089, 7109), False, 'from unittest.mock import MagicMock\n'), ((7164, 7193), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'pv.VeraSwitch'}), '(spec=pv.VeraSwitch)\n', (7173, 7193), False, 'from unittest.mock import MagicMock\n'), ((7396, 7425), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (7405, 7425), False, 'from unittest.mock import MagicMock\n'), ((7460, 7485), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(0)'}), '(return_value=0)\n', (7469, 7485), False, 'from unittest.mock import MagicMock\n'), ((7515, 7548), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '[0, 0, 0]'}), '(return_value=[0, 0, 0])\n', (7524, 7548), False, 'from unittest.mock import MagicMock\n'), ((5386, 5404), 'requests.exceptions.RequestException', 'RequestException', ([], {}), '()\n', (5402, 5404), False, 'from requests.exceptions import RequestException\n'), ((5449, 5467), 'requests.exceptions.RequestException', 'RequestException', ([], {}), '()\n', (5465, 5467), False, 'from requests.exceptions import RequestException\n')]
|
import unittest
from frozendict import frozendict
from src.helpers.music.queue import Queue, QueueIsEmptyError, RemoveOutOfIndexError
class QueueTest(unittest.TestCase):
queue1 = [
frozendict({ 'name' : 'song1' }),
frozendict({ 'name' : 'song2' }),
frozendict({ 'name' : 'song3' })
]
queue2 = {
frozendict({ 'name' : 'song1' }),
frozendict({ 'name' : 'song2' }),
frozendict({ 'name' : 'song3' })
}
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.q1 = Queue(QueueTest.queue1)
self.q2 = Queue(QueueTest.queue2)
self.q3 = Queue()
def tearDown(self):
pass
def test_create_queue(self):
q = Queue()
self.assertEqual(q._queue, [])
def test_is_empty(self):
self.assertFalse(self.q1.is_empty)
self.assertTrue(self.q3.is_empty)
def test_first_track(self):
self.assertEqual(self.q1.first_track['name'], QueueTest.queue1[0]['name'])
with self.assertRaises(QueueIsEmptyError):
self.q3.first_track
def test_current_track(self):
self.assertEqual(self.q1.current_track['name'], QueueTest.queue1[0]['name'])
self.q1.get_next_track()
self.assertEqual(self.q1.current_track['name'], QueueTest.queue1[1]['name'])
self.assertNotEqual(self.q1.current_track['name'], QueueTest.queue1[0]['name'])
with self.assertRaises(QueueIsEmptyError):
self.q3.current_track
def test_upcoming(self):
self.assertEqual(self.q1.upcoming[0]['name'], QueueTest.queue1[1]['name'])
self.assertEqual(self.q1.upcoming[1]['name'], QueueTest.queue1[2]['name'])
self.q1.get_next_track()
self.assertEqual(self.q1.upcoming[0]['name'], QueueTest.queue1[2]['name'])
self.assertNotEqual(self.q1.upcoming[0]['name'], QueueTest.queue1[1]['name'])
with self.assertRaises(QueueIsEmptyError):
self.q3.upcoming
def test_empty(self):
self.q1.empty()
self.assertEqual(self.q1.length, 0)
self.assertEqual(self.q1.position, 0)
def test_length(self):
self.assertEqual(self.q1.length, 3)
self.assertEqual(self.q3.length, 0)
def test_repeat_mode_all(self):
self.q1.set_repeat_mode('ALL')
self.q1.position = len(self.q1._queue)
next_track = self.q1.get_next_track()
self.assertEqual(next_track['name'], QueueTest.queue1[0]['name'])
def test_get_next_track(self):
with self.assertRaises(QueueIsEmptyError):
self.q3.get_next_track()
next_track = self.q1.get_next_track()
self.assertEqual(self.q1.position, 1)
self.q1.position = len(self.q1._queue)
next_track = self.q1.get_next_track()
self.assertIsNone(next_track)
def test_remove(self):
with self.assertRaises(RemoveOutOfIndexError):
self.q1.remove(255)
with self.assertRaises(RemoveOutOfIndexError):
self.q1.remove(-1)
self.q1.remove(1)
self.assertEqual(self.q1._queue[1]['name'], QueueTest.queue1[2]['name'])
|
[
"src.helpers.music.queue.Queue",
"frozendict.frozendict"
] |
[((196, 225), 'frozendict.frozendict', 'frozendict', (["{'name': 'song1'}"], {}), "({'name': 'song1'})\n", (206, 225), False, 'from frozendict import frozendict\n'), ((238, 267), 'frozendict.frozendict', 'frozendict', (["{'name': 'song2'}"], {}), "({'name': 'song2'})\n", (248, 267), False, 'from frozendict import frozendict\n'), ((280, 309), 'frozendict.frozendict', 'frozendict', (["{'name': 'song3'}"], {}), "({'name': 'song3'})\n", (290, 309), False, 'from frozendict import frozendict\n'), ((343, 372), 'frozendict.frozendict', 'frozendict', (["{'name': 'song1'}"], {}), "({'name': 'song1'})\n", (353, 372), False, 'from frozendict import frozendict\n'), ((385, 414), 'frozendict.frozendict', 'frozendict', (["{'name': 'song2'}"], {}), "({'name': 'song2'})\n", (395, 414), False, 'from frozendict import frozendict\n'), ((427, 456), 'frozendict.frozendict', 'frozendict', (["{'name': 'song3'}"], {}), "({'name': 'song3'})\n", (437, 456), False, 'from frozendict import frozendict\n'), ((625, 648), 'src.helpers.music.queue.Queue', 'Queue', (['QueueTest.queue1'], {}), '(QueueTest.queue1)\n', (630, 648), False, 'from src.helpers.music.queue import Queue, QueueIsEmptyError, RemoveOutOfIndexError\n'), ((667, 690), 'src.helpers.music.queue.Queue', 'Queue', (['QueueTest.queue2'], {}), '(QueueTest.queue2)\n', (672, 690), False, 'from src.helpers.music.queue import Queue, QueueIsEmptyError, RemoveOutOfIndexError\n'), ((709, 716), 'src.helpers.music.queue.Queue', 'Queue', ([], {}), '()\n', (714, 716), False, 'from src.helpers.music.queue import Queue, QueueIsEmptyError, RemoveOutOfIndexError\n'), ((805, 812), 'src.helpers.music.queue.Queue', 'Queue', ([], {}), '()\n', (810, 812), False, 'from src.helpers.music.queue import Queue, QueueIsEmptyError, RemoveOutOfIndexError\n')]
|
from pyvisa import ResourceManager, VisaIOError
from pylabnet.hardware.awg.dio_breakout import Driver
from pylabnet.utils.helper_methods import get_ip, load_device_config
from pylabnet.network.client_server.dio_breakout import Service, Client
from pylabnet.network.core.generic_server import GenericServer
def launch(**kwargs):
""" Connects to DIO breakout and instantiates server
:param kwargs: (dict) containing relevant kwargs
:logger: instance of LogClient for logging purposes
:port: (int) port number for the DIO breakout server
:config: (str) name of config file to us
"""
device_config = load_device_config('dio_breakout', kwargs['config'], logger=kwargs['logger'])
# Try to load settings
if 'resource_name' in device_config:
addr = device_config['resource_name']
else:
addr = device_config['device_id']
# Try to connect
try:
dio = Driver(address=addr, logger=kwargs['logger'])
# If it fails, prompt the user to enter GPIB address from resource list
except VisaIOError:
kwargs['logger'].error(f'Failed to connect to device at address {addr}')
raise
# Instantiate Service and server
dio_service = Service()
dio_service.assign_module(module=dio)
dio_service.assign_logger(logger=kwargs['logger'])
dio_server = GenericServer(
service=dio_service,
host=get_ip(),
port=kwargs['port']
)
dio_server.start()
|
[
"pylabnet.network.client_server.dio_breakout.Service",
"pylabnet.utils.helper_methods.load_device_config",
"pylabnet.utils.helper_methods.get_ip",
"pylabnet.hardware.awg.dio_breakout.Driver"
] |
[((640, 717), 'pylabnet.utils.helper_methods.load_device_config', 'load_device_config', (['"""dio_breakout"""', "kwargs['config']"], {'logger': "kwargs['logger']"}), "('dio_breakout', kwargs['config'], logger=kwargs['logger'])\n", (658, 717), False, 'from pylabnet.utils.helper_methods import get_ip, load_device_config\n'), ((1228, 1237), 'pylabnet.network.client_server.dio_breakout.Service', 'Service', ([], {}), '()\n', (1235, 1237), False, 'from pylabnet.network.client_server.dio_breakout import Service, Client\n'), ((930, 975), 'pylabnet.hardware.awg.dio_breakout.Driver', 'Driver', ([], {'address': 'addr', 'logger': "kwargs['logger']"}), "(address=addr, logger=kwargs['logger'])\n", (936, 975), False, 'from pylabnet.hardware.awg.dio_breakout import Driver\n'), ((1409, 1417), 'pylabnet.utils.helper_methods.get_ip', 'get_ip', ([], {}), '()\n', (1415, 1417), False, 'from pylabnet.utils.helper_methods import get_ip, load_device_config\n')]
|
# Standard Library
import datetime
# Third Party Code
from dateutil.tz import tzutc
# Supercell Code
from supercell.breezometer.pollen.models.pollen_index import PollenIndex
from supercell.breezometer.pollen.models.pollen_index_forecast import (
PollenIndexForecast,
)
from supercell.breezometer.pollen.models.pollen_type import PollenType
def test_model():
timestamp = datetime.datetime(2020, 1, 1, 0, 0, 0, tzinfo=tzutc())
assert (
'{"timestamp": "2020-01-01T00:00:00+00:00", "display_name": '
'"BreezoMeter Pollen Index", "short_name": "bpi", "pollen_type_count": 3, '
'"plant_count": 3}'
== str(
PollenIndexForecast(
timestamp=timestamp,
short_name="bpi",
display_name="BreezoMeter Pollen Index",
pollen_types=[
PollenType(
short_name="grass",
display_name="Grass",
in_season=True,
data_available=True,
index=PollenIndex(value=4, category="High", color="#FF8C00"),
timestamp=timestamp,
),
PollenType(
short_name="tree",
display_name="Tree",
in_season=True,
data_available=True,
index=PollenIndex(value=0, category="None", color=None),
timestamp=timestamp,
),
PollenType(
short_name="weed",
display_name="Weed",
in_season=True,
data_available=True,
index=PollenIndex(
value=3, category="Moderate", color="#FFFF00"
),
timestamp=timestamp,
),
],
plants=[
PollenType(
short_name="graminales",
display_name="Graminales",
in_season=True,
data_available=True,
index=PollenIndex(value=4, category="High", color="#FF8C00"),
timestamp=timestamp,
),
PollenType(
short_name="juniper",
display_name="Juniper",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="elm",
display_name="Elm",
in_season=True,
data_available=True,
index=PollenIndex(value=0, category="None", color=None),
timestamp=timestamp,
),
PollenType(
short_name="oak",
display_name="Oak",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="alder",
display_name="Alder",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="pine",
display_name="Pine",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="cottonwood",
display_name="Cottonwood",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="ragweed",
display_name="Ragweed",
in_season=True,
data_available=True,
index=PollenIndex(
value=3, category="Moderate", color="#FFFF00"
),
timestamp=timestamp,
),
PollenType(
short_name="birch",
display_name="Birch",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="ash",
display_name="Ash",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
PollenType(
short_name="maple",
display_name="Maple",
in_season=False,
data_available=False,
index=PollenIndex(value=None, category=None, color=None),
timestamp=timestamp,
),
],
)
)
)
def test_initialize_with_dictionary():
assert (
'{"timestamp": "2020-09-06T00:00:00+00:00", "display_name": '
'"BreezoMeter Pollen Index", "short_name": "bpi", "pollen_type_count": 3, '
'"plant_count": 3}'
== str(
PollenIndexForecast.initialize_from_dictionary(
response_dictionary={
"date": "2020-09-06",
"index_id": "bpi",
"index_display_name": "BreezoMeter Pollen Index",
"types": {
"grass": {
"display_name": "Grass",
"in_season": True,
"data_available": True,
"index": {
"value": 4,
"category": "High",
"color": "#FF8C00",
},
},
"tree": {
"display_name": "Tree",
"in_season": True,
"data_available": True,
"index": {"value": 0, "category": "None", "color": None},
},
"weed": {
"display_name": "Weed",
"in_season": True,
"data_available": True,
"index": {
"value": 3,
"category": "Moderate",
"color": "#FFFF00",
},
},
},
"plants": {
"graminales": {
"display_name": "Graminales",
"in_season": True,
"data_available": True,
"index": {
"value": 4,
"category": "High",
"color": "#FF8C00",
},
},
"juniper": {
"display_name": "Juniper",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"elm": {
"display_name": "Elm",
"in_season": True,
"data_available": True,
"index": {"value": 0, "category": "None", "color": None},
},
"oak": {
"display_name": "Oak",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"alder": {
"display_name": "Alder",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"pine": {
"display_name": "Pine",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"cottonwood": {
"display_name": "Cottonwood",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"ragweed": {
"display_name": "Ragweed",
"in_season": True,
"data_available": True,
"index": {
"value": 3,
"category": "Moderate",
"color": "#FFFF00",
},
},
"birch": {
"display_name": "Birch",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"ash": {
"display_name": "Ash",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
"maple": {
"display_name": "Maple",
"in_season": False,
"data_available": False,
"index": {"value": None, "category": None, "color": None},
},
},
}
)
)
)
|
[
"dateutil.tz.tzutc",
"supercell.breezometer.pollen.models.pollen_index.PollenIndex",
"supercell.breezometer.pollen.models.pollen_index_forecast.PollenIndexForecast.initialize_from_dictionary"
] |
[((428, 435), 'dateutil.tz.tzutc', 'tzutc', ([], {}), '()\n', (433, 435), False, 'from dateutil.tz import tzutc\n'), ((6351, 8641), 'supercell.breezometer.pollen.models.pollen_index_forecast.PollenIndexForecast.initialize_from_dictionary', 'PollenIndexForecast.initialize_from_dictionary', ([], {'response_dictionary': "{'date': '2020-09-06', 'index_id': 'bpi', 'index_display_name':\n 'BreezoMeter Pollen Index', 'types': {'grass': {'display_name': 'Grass',\n 'in_season': True, 'data_available': True, 'index': {'value': 4,\n 'category': 'High', 'color': '#FF8C00'}}, 'tree': {'display_name':\n 'Tree', 'in_season': True, 'data_available': True, 'index': {'value': 0,\n 'category': 'None', 'color': None}}, 'weed': {'display_name': 'Weed',\n 'in_season': True, 'data_available': True, 'index': {'value': 3,\n 'category': 'Moderate', 'color': '#FFFF00'}}}, 'plants': {'graminales':\n {'display_name': 'Graminales', 'in_season': True, 'data_available': \n True, 'index': {'value': 4, 'category': 'High', 'color': '#FF8C00'}},\n 'juniper': {'display_name': 'Juniper', 'in_season': False,\n 'data_available': False, 'index': {'value': None, 'category': None,\n 'color': None}}, 'elm': {'display_name': 'Elm', 'in_season': True,\n 'data_available': True, 'index': {'value': 0, 'category': 'None',\n 'color': None}}, 'oak': {'display_name': 'Oak', 'in_season': False,\n 'data_available': False, 'index': {'value': None, 'category': None,\n 'color': None}}, 'alder': {'display_name': 'Alder', 'in_season': False,\n 'data_available': False, 'index': {'value': None, 'category': None,\n 'color': None}}, 'pine': {'display_name': 'Pine', 'in_season': False,\n 'data_available': False, 'index': {'value': None, 'category': None,\n 'color': None}}, 'cottonwood': {'display_name': 'Cottonwood',\n 'in_season': False, 'data_available': False, 'index': {'value': None,\n 'category': None, 'color': None}}, 'ragweed': {'display_name':\n 'Ragweed', 'in_season': True, 'data_available': True, 'index': {'value':\n 3, 'category': 'Moderate', 'color': '#FFFF00'}}, 'birch': {\n 'display_name': 'Birch', 'in_season': False, 'data_available': False,\n 'index': {'value': None, 'category': None, 'color': None}}, 'ash': {\n 'display_name': 'Ash', 'in_season': False, 'data_available': False,\n 'index': {'value': None, 'category': None, 'color': None}}, 'maple': {\n 'display_name': 'Maple', 'in_season': False, 'data_available': False,\n 'index': {'value': None, 'category': None, 'color': None}}}}"}), "(response_dictionary={'date':\n '2020-09-06', 'index_id': 'bpi', 'index_display_name':\n 'BreezoMeter Pollen Index', 'types': {'grass': {'display_name': 'Grass',\n 'in_season': True, 'data_available': True, 'index': {'value': 4,\n 'category': 'High', 'color': '#FF8C00'}}, 'tree': {'display_name':\n 'Tree', 'in_season': True, 'data_available': True, 'index': {'value': 0,\n 'category': 'None', 'color': None}}, 'weed': {'display_name': 'Weed',\n 'in_season': True, 'data_available': True, 'index': {'value': 3,\n 'category': 'Moderate', 'color': '#FFFF00'}}}, 'plants': {'graminales':\n {'display_name': 'Graminales', 'in_season': True, 'data_available': \n True, 'index': {'value': 4, 'category': 'High', 'color': '#FF8C00'}},\n 'juniper': {'display_name': 'Juniper', 'in_season': False,\n 'data_available': False, 'index': {'value': None, 'category': None,\n 'color': None}}, 'elm': {'display_name': 'Elm', 'in_season': True,\n 'data_available': True, 'index': {'value': 0, 'category': 'None',\n 'color': None}}, 'oak': {'display_name': 'Oak', 'in_season': False,\n 'data_available': False, 'index': {'value': None, 'category': None,\n 'color': None}}, 'alder': {'display_name': 'Alder', 'in_season': False,\n 'data_available': False, 'index': {'value': None, 'category': None,\n 'color': None}}, 'pine': {'display_name': 'Pine', 'in_season': False,\n 'data_available': False, 'index': {'value': None, 'category': None,\n 'color': None}}, 'cottonwood': {'display_name': 'Cottonwood',\n 'in_season': False, 'data_available': False, 'index': {'value': None,\n 'category': None, 'color': None}}, 'ragweed': {'display_name':\n 'Ragweed', 'in_season': True, 'data_available': True, 'index': {'value':\n 3, 'category': 'Moderate', 'color': '#FFFF00'}}, 'birch': {\n 'display_name': 'Birch', 'in_season': False, 'data_available': False,\n 'index': {'value': None, 'category': None, 'color': None}}, 'ash': {\n 'display_name': 'Ash', 'in_season': False, 'data_available': False,\n 'index': {'value': None, 'category': None, 'color': None}}, 'maple': {\n 'display_name': 'Maple', 'in_season': False, 'data_available': False,\n 'index': {'value': None, 'category': None, 'color': None}}}})\n", (6397, 8641), False, 'from supercell.breezometer.pollen.models.pollen_index_forecast import PollenIndexForecast\n'), ((1077, 1131), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': '(4)', 'category': '"""High"""', 'color': '"""#FF8C00"""'}), "(value=4, category='High', color='#FF8C00')\n", (1088, 1131), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((1436, 1485), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': '(0)', 'category': '"""None"""', 'color': 'None'}), "(value=0, category='None', color=None)\n", (1447, 1485), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((1790, 1848), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': '(3)', 'category': '"""Moderate"""', 'color': '"""#FFFF00"""'}), "(value=3, category='Moderate', color='#FFFF00')\n", (1801, 1848), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((2263, 2317), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': '(4)', 'category': '"""High"""', 'color': '"""#FF8C00"""'}), "(value=4, category='High', color='#FF8C00')\n", (2274, 2317), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((2630, 2680), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': 'None', 'category': 'None', 'color': 'None'}), '(value=None, category=None, color=None)\n', (2641, 2680), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((2983, 3032), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': '(0)', 'category': '"""None"""', 'color': 'None'}), "(value=0, category='None', color=None)\n", (2994, 3032), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((3337, 3387), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': 'None', 'category': 'None', 'color': 'None'}), '(value=None, category=None, color=None)\n', (3348, 3387), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((3696, 3746), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': 'None', 'category': 'None', 'color': 'None'}), '(value=None, category=None, color=None)\n', (3707, 3746), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((4053, 4103), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': 'None', 'category': 'None', 'color': 'None'}), '(value=None, category=None, color=None)\n', (4064, 4103), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((4422, 4472), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': 'None', 'category': 'None', 'color': 'None'}), '(value=None, category=None, color=None)\n', (4433, 4472), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((4783, 4841), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': '(3)', 'category': '"""Moderate"""', 'color': '"""#FFFF00"""'}), "(value=3, category='Moderate', color='#FFFF00')\n", (4794, 4841), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((5204, 5254), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': 'None', 'category': 'None', 'color': 'None'}), '(value=None, category=None, color=None)\n', (5215, 5254), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((5559, 5609), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': 'None', 'category': 'None', 'color': 'None'}), '(value=None, category=None, color=None)\n', (5570, 5609), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n'), ((5918, 5968), 'supercell.breezometer.pollen.models.pollen_index.PollenIndex', 'PollenIndex', ([], {'value': 'None', 'category': 'None', 'color': 'None'}), '(value=None, category=None, color=None)\n', (5929, 5968), False, 'from supercell.breezometer.pollen.models.pollen_index import PollenIndex\n')]
|
#!/usr/bin/env python
# Cloudeebus
#
# Copyright 2012 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
import argparse, dbus, json, sys
from twisted.internet import glib2reactor
# Configure the twisted mainloop to be run inside the glib mainloop.
# This must be done before importing the other twisted modules
glib2reactor.install()
from twisted.internet import reactor
from autobahn.websocket import listenWS
from autobahn.wamp import WampServerFactory, WampCraServerProtocol
from dbus.mainloop.glib import DBusGMainLoop
import gobject
gobject.threads_init()
from dbus import glib
glib.init_threads()
# enable debug log
from twisted.python import log
###############################################################################
from cloudeebusengine import VERSION, SERVICELIST, CloudeebusService, cache
import cloudeebusengine
OPENDOOR = False
CREDENTIALS = {}
WHITELIST = []
NETMASK = []
###############################################################################
def ipV4ToHex(mask):
## Convert an ip or an IP mask (such as ip/24 or ip/255.255.255.0) in hex value (32bits)
maskHex = 0
byte = 0
if mask.rfind(".") == -1:
if (int(mask) < 32):
maskHex = (2**(int(mask))-1)
maskHex = maskHex << (32-int(mask))
else:
raise Exception("Illegal mask (larger than 32 bits) " + mask)
else:
maskField = mask.split(".")
# Check if mask has four fields (byte)
if len(maskField) != 4:
raise Exception("Illegal ip address / mask (should be 4 bytes) " + mask)
for maskQuartet in maskField:
byte = int(maskQuartet)
# Check if each field is really a byte
if byte > 255:
raise Exception("Illegal ip address / mask (digit larger than a byte) " + mask)
maskHex += byte
maskHex = maskHex << 8
maskHex = maskHex >> 8
return maskHex
###############################################################################
class CloudeebusServerProtocol(WampCraServerProtocol):
'''
connexion and session authentication management
'''
def onSessionOpen(self):
# CRA authentication options
self.clientAuthTimeout = 0
self.clientAuthAllowAnonymous = OPENDOOR
# CRA authentication init
WampCraServerProtocol.onSessionOpen(self)
def getAuthPermissions(self, key, extra):
return {'permissions': extra.get("permissions", None),
'authextra': extra.get("authextra", None),
'services': extra.get("services", None)}
def getAuthSecret(self, key):
secret = CREDENTIALS.get(key, None)
if secret is None:
return None
# secret must be of str type to be hashed
return str(secret)
def onAuthenticated(self, key, permissions):
if not OPENDOOR:
# check net filter
if NETMASK != []:
ipAllowed = False
for netfilter in NETMASK:
ipHex=ipV4ToHex(self.peer.host)
ipAllowed = (ipHex & netfilter['mask']) == netfilter['ipAllowed'] & netfilter['mask']
if ipAllowed:
break
if not ipAllowed:
raise Exception("host " + self.peer.host + " is not allowed!")
# check authentication key
if key is None:
raise Exception("Authentication failed")
# check permissions, array.index throws exception
if (permissions['permissions'] != None):
for req in permissions['permissions']:
WHITELIST.index(req);
# check allowed service creation, array.index throws exception
if (permissions['services'] != None):
for req in permissions['services']:
SERVICELIST.index(req);
# create cloudeebus service instance
self.cloudeebusService = CloudeebusService(permissions)
# register it for RPC
self.registerForRpc(self.cloudeebusService)
# register for Publish / Subscribe
self.registerForPubSub("", True)
def connectionLost(self, reason):
WampCraServerProtocol.connectionLost(self, reason)
if factory.getConnectionCount() == 0:
cache.reset()
###############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Javascript DBus bridge.')
parser.add_argument('-v', '--version', action='store_true',
help='print version and exit')
parser.add_argument('-d', '--debug', action='store_true',
help='log debug info on standard output')
parser.add_argument('-o', '--opendoor', action='store_true',
help='allow anonymous access to all services')
parser.add_argument('-p', '--port', default='9000',
help='port number')
parser.add_argument('-c', '--credentials',
help='path to credentials file')
parser.add_argument('-w', '--whitelist',
help='path to whitelist file (DBus services to use)')
parser.add_argument('-s', '--servicelist',
help='path to servicelist file (DBus services to export)')
parser.add_argument('-n', '--netmask',
help='netmask,IP filter (comma separated.) eg. : -n 127.0.0.1,192.168.2.0/24,10.12.16.0/255.255.255.0')
args = parser.parse_args(sys.argv[1:])
if args.version:
print("Cloudeebus version " + VERSION)
exit(0)
if args.debug:
log.startLogging(sys.stdout)
OPENDOOR = args.opendoor
if args.credentials:
jfile = open(args.credentials)
CREDENTIALS = json.load(jfile)
jfile.close()
if args.whitelist:
jfile = open(args.whitelist)
WHITELIST.extend(json.load(jfile))
jfile.close()
if args.servicelist:
jfile = open(args.servicelist)
SERVICELIST.extend(json.load(jfile))
jfile.close()
if args.netmask:
iplist = args.netmask.split(",")
for ip in iplist:
if ip.rfind("/") != -1:
ip=ip.split("/")
ipAllowed = ip[0]
mask = ip[1]
else:
ipAllowed = ip
mask = "255.255.255.255"
NETMASK.append( {'ipAllowed': ipV4ToHex(ipAllowed), 'mask' : ipV4ToHex(mask)} )
uri = "ws://localhost:" + args.port
factory = WampServerFactory(uri, debugWamp = args.debug)
factory.protocol = CloudeebusServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
# Configure cloudeebus engine for WAMP.
cloudeebusengine.factory = factory
cloudeebusengine.OPENDOOR = OPENDOOR
listenWS(factory)
DBusGMainLoop(set_as_default=True)
reactor.run()
|
[
"json.load",
"twisted.python.log.startLogging",
"argparse.ArgumentParser",
"autobahn.wamp.WampCraServerProtocol.connectionLost",
"cloudeebusengine.SERVICELIST.index",
"dbus.glib.init_threads",
"autobahn.wamp.WampCraServerProtocol.onSessionOpen",
"twisted.internet.glib2reactor.install",
"gobject.threads_init",
"autobahn.websocket.listenWS",
"twisted.internet.reactor.run",
"cloudeebusengine.cache.reset",
"dbus.mainloop.glib.DBusGMainLoop",
"cloudeebusengine.CloudeebusService",
"autobahn.wamp.WampServerFactory"
] |
[((887, 909), 'twisted.internet.glib2reactor.install', 'glib2reactor.install', ([], {}), '()\n', (907, 909), False, 'from twisted.internet import glib2reactor\n'), ((1117, 1139), 'gobject.threads_init', 'gobject.threads_init', ([], {}), '()\n', (1137, 1139), False, 'import gobject\n'), ((1163, 1182), 'dbus.glib.init_threads', 'glib.init_threads', ([], {}), '()\n', (1180, 1182), False, 'from dbus import glib\n'), ((5111, 5173), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Javascript DBus bridge."""'}), "(description='Javascript DBus bridge.')\n", (5134, 5173), False, 'import argparse, dbus, json, sys\n'), ((7159, 7203), 'autobahn.wamp.WampServerFactory', 'WampServerFactory', (['uri'], {'debugWamp': 'args.debug'}), '(uri, debugWamp=args.debug)\n', (7176, 7203), False, 'from autobahn.wamp import WampServerFactory, WampCraServerProtocol\n'), ((7436, 7453), 'autobahn.websocket.listenWS', 'listenWS', (['factory'], {}), '(factory)\n', (7444, 7453), False, 'from autobahn.websocket import listenWS\n'), ((7463, 7497), 'dbus.mainloop.glib.DBusGMainLoop', 'DBusGMainLoop', ([], {'set_as_default': '(True)'}), '(set_as_default=True)\n', (7476, 7497), False, 'from dbus.mainloop.glib import DBusGMainLoop\n'), ((7507, 7520), 'twisted.internet.reactor.run', 'reactor.run', ([], {}), '()\n', (7518, 7520), False, 'from twisted.internet import reactor\n'), ((2926, 2967), 'autobahn.wamp.WampCraServerProtocol.onSessionOpen', 'WampCraServerProtocol.onSessionOpen', (['self'], {}), '(self)\n', (2961, 2967), False, 'from autobahn.wamp import WampServerFactory, WampCraServerProtocol\n'), ((4611, 4641), 'cloudeebusengine.CloudeebusService', 'CloudeebusService', (['permissions'], {}), '(permissions)\n', (4628, 4641), False, 'from cloudeebusengine import VERSION, SERVICELIST, CloudeebusService, cache\n'), ((4864, 4914), 'autobahn.wamp.WampCraServerProtocol.connectionLost', 'WampCraServerProtocol.connectionLost', (['self', 'reason'], {}), '(self, reason)\n', (4900, 4914), False, 'from autobahn.wamp import WampServerFactory, WampCraServerProtocol\n'), ((6224, 6252), 'twisted.python.log.startLogging', 'log.startLogging', (['sys.stdout'], {}), '(sys.stdout)\n', (6240, 6252), False, 'from twisted.python import log\n'), ((6378, 6394), 'json.load', 'json.load', (['jfile'], {}), '(jfile)\n', (6387, 6394), False, 'import argparse, dbus, json, sys\n'), ((4973, 4986), 'cloudeebusengine.cache.reset', 'cache.reset', ([], {}), '()\n', (4984, 4986), False, 'from cloudeebusengine import VERSION, SERVICELIST, CloudeebusService, cache\n'), ((6507, 6523), 'json.load', 'json.load', (['jfile'], {}), '(jfile)\n', (6516, 6523), False, 'import argparse, dbus, json, sys\n'), ((6647, 6663), 'json.load', 'json.load', (['jfile'], {}), '(jfile)\n', (6656, 6663), False, 'import argparse, dbus, json, sys\n'), ((4509, 4531), 'cloudeebusengine.SERVICELIST.index', 'SERVICELIST.index', (['req'], {}), '(req)\n', (4526, 4531), False, 'from cloudeebusengine import VERSION, SERVICELIST, CloudeebusService, cache\n')]
|
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from rest_framework.documentation import include_docs_urls
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework import permissions
API_TITLE = 'Locaion API'
API_DESCRIPTION = 'A Web API for list of available Locations'
schema_view = get_schema_view(
openapi.Info(
title="Locaion API",
default_version='v1',
description="A Web API for list of available Locations"
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1/', include('api.urls')),
path('api/auth/', include('authentication.urls')),
# path('doc', include_docs_urls(title=API_TITLE, description=API_DESCRIPTION, permission_classes=(permissions.AllowAny,))),
path('', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('redoc', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"drf_yasg.openapi.Info",
"django.conf.urls.static.static",
"django.urls.path",
"django.urls.include"
] |
[((455, 572), 'drf_yasg.openapi.Info', 'openapi.Info', ([], {'title': '"""Locaion API"""', 'default_version': '"""v1"""', 'description': '"""A Web API for list of available Locations"""'}), "(title='Locaion API', default_version='v1', description=\n 'A Web API for list of available Locations')\n", (467, 572), False, 'from drf_yasg import openapi\n'), ((1143, 1206), 'django.conf.urls.static.static', 'static', (['settings.STATIC_URL'], {'document_root': 'settings.STATIC_ROOT'}), '(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n', (1149, 1206), False, 'from django.conf.urls.static import static\n'), ((699, 730), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (703, 730), False, 'from django.urls import path, include\n'), ((753, 772), 'django.urls.include', 'include', (['"""api.urls"""'], {}), "('api.urls')\n", (760, 772), False, 'from django.urls import path, include\n'), ((798, 828), 'django.urls.include', 'include', (['"""authentication.urls"""'], {}), "('authentication.urls')\n", (805, 828), False, 'from django.urls import path, include\n')]
|
from pathlib import Path
from datetime import datetime
import time
from subprocess import call
import os
from logzero import logger
import picamera
# Video file path
VIDEO_PATH = str(Path().resolve()) + "/videos/"
IMAGE_PATH = str(Path().resolve()) + "/images/"
class CameraController:
def __init__(self) -> None:
self.camera = picamera.PiCamera()
self.camera.resolution = (1296, 730)
self.video_file_path = VIDEO_PATH + str(datetime.now().date())
self.image_file_path = IMAGE_PATH + str(time.time())
def start_record(self):
""" Starts to record video. """
try:
# Delete possible video that was taken earlier.
logger.info("Deleting previous video if exists.")
self.delete_previous_video()
logger.info("Starting to record video.")
# Start recording video.
self.camera.start_recording(self.video_file_path + ".h264")
except Exception as ex:
logger.warning("Error happened while recording video.")
logger.error(ex)
def stop_record(self):
""" Stops video recording and calls converter function. """
try:
logger.info("Stopping video recording.")
# Stop recording.
self.camera.stop_recording()
# Take a picture of the plant.
self.capture_image()
# Convert video to mp4 and return result.
return self.convert_recording_to_mp4()
except Exception as ex:
logger.warning("Error happened ending video recording.")
logger.error(ex)
def capture_image(self):
""" Captures single image for later use. """
logger.info("Capturing image.")
self.camera.start_preview()
# Camera warm-up time
time.sleep(2)
# Capture image.
self.camera.capture(self.image_file_path + ".png")
logger.info("Image captured.")
def convert_recording_to_mp4(self):
""" Converts .h264 to mp4 file. """
# Define file names of original and converted versions.
orig_file = self.video_file_path + ".h264"
converted_file = self.video_file_path + ".mp4"
# Try to convert video with shell command.
try:
command = "MP4Box -add " + orig_file + " " + converted_file
logger.info("Converting video to mp4.")
# Execute command to convert h246 to mp4.
call([command], shell=True)
logger.info("Video successfully converted.")
self.delete_original_format()
return True
except:
logger.error("Error when converting video to mp4.")
return False
def delete_original_format(self):
""" Deletes the H246 format file after conversion to mp4. """
orig_file = Path(self.video_file_path + '.h264')
# Check if file exists.
if orig_file.is_file():
# Remove file.
os.remove(self.video_file_path + ".h264")
def delete_previous_video(self):
""" Deletes possibly existing mp4 video with the same date. """
converted_file = Path(self.video_file_path + '.mp4')
# Check if file exists.
if converted_file.is_file():
# Remove file.
os.remove(self.video_file_path + ".mp4")
|
[
"os.remove",
"logzero.logger.info",
"time.sleep",
"time.time",
"pathlib.Path",
"subprocess.call",
"logzero.logger.warning",
"logzero.logger.error",
"datetime.datetime.now",
"picamera.PiCamera"
] |
[((345, 364), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (362, 364), False, 'import picamera\n'), ((1716, 1747), 'logzero.logger.info', 'logger.info', (['"""Capturing image."""'], {}), "('Capturing image.')\n", (1727, 1747), False, 'from logzero import logger\n'), ((1822, 1835), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1832, 1835), False, 'import time\n'), ((1928, 1958), 'logzero.logger.info', 'logger.info', (['"""Image captured."""'], {}), "('Image captured.')\n", (1939, 1958), False, 'from logzero import logger\n'), ((2855, 2891), 'pathlib.Path', 'Path', (["(self.video_file_path + '.h264')"], {}), "(self.video_file_path + '.h264')\n", (2859, 2891), False, 'from pathlib import Path\n'), ((3173, 3208), 'pathlib.Path', 'Path', (["(self.video_file_path + '.mp4')"], {}), "(self.video_file_path + '.mp4')\n", (3177, 3208), False, 'from pathlib import Path\n'), ((697, 746), 'logzero.logger.info', 'logger.info', (['"""Deleting previous video if exists."""'], {}), "('Deleting previous video if exists.')\n", (708, 746), False, 'from logzero import logger\n'), ((800, 840), 'logzero.logger.info', 'logger.info', (['"""Starting to record video."""'], {}), "('Starting to record video.')\n", (811, 840), False, 'from logzero import logger\n'), ((1201, 1241), 'logzero.logger.info', 'logger.info', (['"""Stopping video recording."""'], {}), "('Stopping video recording.')\n", (1212, 1241), False, 'from logzero import logger\n'), ((2363, 2402), 'logzero.logger.info', 'logger.info', (['"""Converting video to mp4."""'], {}), "('Converting video to mp4.')\n", (2374, 2402), False, 'from logzero import logger\n'), ((2469, 2496), 'subprocess.call', 'call', (['[command]'], {'shell': '(True)'}), '([command], shell=True)\n', (2473, 2496), False, 'from subprocess import call\n'), ((2509, 2553), 'logzero.logger.info', 'logger.info', (['"""Video successfully converted."""'], {}), "('Video successfully converted.')\n", (2520, 2553), False, 'from logzero import logger\n'), ((2995, 3036), 'os.remove', 'os.remove', (["(self.video_file_path + '.h264')"], {}), "(self.video_file_path + '.h264')\n", (3004, 3036), False, 'import os\n'), ((3317, 3357), 'os.remove', 'os.remove', (["(self.video_file_path + '.mp4')"], {}), "(self.video_file_path + '.mp4')\n", (3326, 3357), False, 'import os\n'), ((185, 191), 'pathlib.Path', 'Path', ([], {}), '()\n', (189, 191), False, 'from pathlib import Path\n'), ((233, 239), 'pathlib.Path', 'Path', ([], {}), '()\n', (237, 239), False, 'from pathlib import Path\n'), ((529, 540), 'time.time', 'time.time', ([], {}), '()\n', (538, 540), False, 'import time\n'), ((994, 1049), 'logzero.logger.warning', 'logger.warning', (['"""Error happened while recording video."""'], {}), "('Error happened while recording video.')\n", (1008, 1049), False, 'from logzero import logger\n'), ((1062, 1078), 'logzero.logger.error', 'logger.error', (['ex'], {}), '(ex)\n', (1074, 1078), False, 'from logzero import logger\n'), ((1538, 1594), 'logzero.logger.warning', 'logger.warning', (['"""Error happened ending video recording."""'], {}), "('Error happened ending video recording.')\n", (1552, 1594), False, 'from logzero import logger\n'), ((1607, 1623), 'logzero.logger.error', 'logger.error', (['ex'], {}), '(ex)\n', (1619, 1623), False, 'from logzero import logger\n'), ((2648, 2699), 'logzero.logger.error', 'logger.error', (['"""Error when converting video to mp4."""'], {}), "('Error when converting video to mp4.')\n", (2660, 2699), False, 'from logzero import logger\n'), ((458, 472), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (470, 472), False, 'from datetime import datetime\n')]
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Organization,OrganizationImages
from evelist.models import Event,EventImages
class OrganizationRegisterForm(UserCreationForm):
name=forms.CharField(required=True, label="Organization Name")
email = forms.EmailField()
vision = forms.CharField(max_length=200, widget=forms.TextInput({}),label="Vision")
mission = forms.CharField(max_length=200, widget=forms.TextInput({}),label="Mission")
link=forms.CharField(required=True, label="Link")
class Meta:
model = User
fields = ['username','email','<PASSWORD>','<PASSWORD>','name','vision','mission','link']
class OrganizationUpdate(forms.ModelForm):
class Meta:
model=Organization
fields = ['name']
'''
class CreateEventForm(forms.Form):
name=forms.CharField(required=True, label="Event Name")
description = forms.CharField(max_length=200, widget=forms.TextInput({}),label="description")
venue=forms.CharField(required=True, label="Venue")
date=forms.DateField(widget=forms.SelectDateWidget())
'''
class CreateEventForm(forms.ModelForm):
class Meta:
model=Event
labels={"name":"Event Name","description":"Add Description","eventprofileImage":"Add Event Image","category":"category"}
fields=['name','description','venue','date','eventprofileImage','category']
exclude=['organizer']
class AddOrgImage(forms.ModelForm):
class Meta:
model=OrganizationImages
fields=['image']
exclude=['organization']
class AddImageForm(forms.ModelForm):
class Meta:
model=EventImages
fields=['i_event','image']
|
[
"django.forms.TextInput",
"django.forms.CharField",
"django.forms.EmailField"
] |
[((279, 336), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(True)', 'label': '"""Organization Name"""'}), "(required=True, label='Organization Name')\n", (294, 336), False, 'from django import forms\n'), ((346, 364), 'django.forms.EmailField', 'forms.EmailField', ([], {}), '()\n', (362, 364), False, 'from django import forms\n'), ((543, 587), 'django.forms.CharField', 'forms.CharField', ([], {'required': '(True)', 'label': '"""Link"""'}), "(required=True, label='Link')\n", (558, 587), False, 'from django import forms\n'), ((414, 433), 'django.forms.TextInput', 'forms.TextInput', (['{}'], {}), '({})\n', (429, 433), False, 'from django import forms\n'), ((500, 519), 'django.forms.TextInput', 'forms.TextInput', (['{}'], {}), '({})\n', (515, 519), False, 'from django import forms\n')]
|
"""
Boto S3 Router install script
"""
from setuptools import setup, find_packages
from pathlib import Path
import os
NAME = "boto-s3-router"
this_directory = Path(__file__).parent
LONG_DESCRIPTION = (this_directory / "README.md").read_text()
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = [
"boto3",
"fnmatch2",
]
setup(
name=NAME,
version=os.getenv('VERSION', '0.0.1'),
description="Provides a Boto3-like client routing requests to multiple S3 clients",
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author="Treeverse",
author_email="<EMAIL>",
url="https://github.com/treeverse/boto-s3-router",
keywords=["boto", "boto3", "lakeFS", "minio", "AWS", "s3", "router"],
python_requires=">=3.6",
install_requires=REQUIRES,
packages=find_packages(exclude="tests"),
include_package_data=True,
)
|
[
"pathlib.Path",
"os.getenv",
"setuptools.find_packages"
] |
[((164, 178), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (168, 178), False, 'from pathlib import Path\n'), ((470, 499), 'os.getenv', 'os.getenv', (['"""VERSION"""', '"""0.0.1"""'], {}), "('VERSION', '0.0.1')\n", (479, 499), False, 'import os\n'), ((933, 963), 'setuptools.find_packages', 'find_packages', ([], {'exclude': '"""tests"""'}), "(exclude='tests')\n", (946, 963), False, 'from setuptools import setup, find_packages\n')]
|
# SJTU EE208
import threading
import queue
import time
def get_page(page):
print('downloading page %s' % page)
time.sleep(0.5)
return g.get(page, [])
def get_all_links(content):
return content
def working():
while True:
print("getting","left:",q.qsize())
page = q.get()
# if varLock.acquire():
if page not in crawled:
# varLock.release()
# else:
# varLock.release()
content = get_page(page)
outlinks = get_all_links(content)
for link in outlinks:
q.put(link)
if varLock.acquire():
graph[page] = outlinks
crawled.append(page)
varLock.release()
print(q.qsize())
q.task_done()
g = {'A': ['B', 'C', 'D'],
'B': ['E', 'F'],
'C': ['1', '2'],
'1': ['3', '4'],
'D': ['G', 'H'],
'E': ['I', 'J'],
'G': ['K', 'L'],
}
start = time.time()
NUM = 4
crawled = []
graph = {}
varLock = threading.Lock()
q = queue.Queue()
q.put('A')
for i in range(NUM):
t = threading.Thread(target=working)
t.setDaemon(True)
t.start()
q.join()
end = time.time()
print(end - start)
|
[
"threading.Thread",
"time.sleep",
"threading.Lock",
"time.time",
"queue.Queue"
] |
[((982, 993), 'time.time', 'time.time', ([], {}), '()\n', (991, 993), False, 'import time\n'), ((1036, 1052), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1050, 1052), False, 'import threading\n'), ((1057, 1070), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1068, 1070), False, 'import queue\n'), ((1195, 1206), 'time.time', 'time.time', ([], {}), '()\n', (1204, 1206), False, 'import time\n'), ((122, 137), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (132, 137), False, 'import time\n'), ((1111, 1143), 'threading.Thread', 'threading.Thread', ([], {'target': 'working'}), '(target=working)\n', (1127, 1143), False, 'import threading\n')]
|
"""Test runway.cfngin.hooks.awslambda.models.responses."""
# pylint: disable=no-self-use,protected-access
from __future__ import annotations
import pytest
from pydantic import ValidationError
from awslambda.models.responses import AwsLambdaHookDeployResponse
class TestAwsLambdaHookDeployResponse:
"""Test AwsLambdaHookDeployResponse."""
def test_extra(self) -> None:
"""Test extra fields."""
with pytest.raises(ValidationError) as excinfo:
AwsLambdaHookDeployResponse(
bucket_name="test-bucket",
code_sha256="sha256",
invalid=True, # type: ignore
object_key="key",
runtime="test",
)
errors = excinfo.value.errors()
assert len(errors) == 1
assert errors[0]["loc"] == ("invalid",)
assert errors[0]["msg"] == "extra fields not permitted"
|
[
"pytest.raises",
"awslambda.models.responses.AwsLambdaHookDeployResponse"
] |
[((427, 457), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (440, 457), False, 'import pytest\n'), ((482, 610), 'awslambda.models.responses.AwsLambdaHookDeployResponse', 'AwsLambdaHookDeployResponse', ([], {'bucket_name': '"""test-bucket"""', 'code_sha256': '"""sha256"""', 'invalid': '(True)', 'object_key': '"""key"""', 'runtime': '"""test"""'}), "(bucket_name='test-bucket', code_sha256='sha256',\n invalid=True, object_key='key', runtime='test')\n", (509, 610), False, 'from awslambda.models.responses import AwsLambdaHookDeployResponse\n')]
|
from sana_pchr.reporting.recommender import *
from datetime import timedelta, date
from dateutil import rrule
import csv
calcs = [DIABETES_CALCULATOR, HYPERTENSION_CALCULATOR, DYSLIPIDEMIA_CALCULATOR]
clinics = [clinic for clinic in Clinic.objects.all() if "Test" not in clinic.name ]
start_date = date(2016,2,14)
end_date = date(2016,10,30)
#Parses the JSON-like format into outputs that can be output itno a CSV file
def parse_out(y, prefix):
out = {}
#recursive function that can take care of dicts or lists
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
for a in y:
flatten(a[1], prefix + '_' + a[0] + '_')
return out
#runs the analysis
def run():
for clinic in clinics:
last_date = date(2016,2,7)
with open(clinic.name + ".csv", mode="w") as outfile:
row = 1
for dt in rrule.rrule(rrule.WEEKLY, dtstart=start_date, until=end_date):
summary_out = parse_out(RiskLevelCalculator.get_summary(clinic, last_date, dt), 'All')
calc_outs = [parse_out(calc.calculate(clinic, last_date, dt),calc.name) for calc in calcs]
combd = {}
for calc_out in calc_outs:
combd.update(calc_out)
combd.update(summary_out)
if row == 1:
fields = ["week_starting" , 'ASCVD Level_<10%', 'ASCVD Level_10-20%', 'ASCVD Level_20-30%', 'ASCVD Level_30-40%','ASCVD Level_>40%'] + sorted(combd)
writer = csv.DictWriter(outfile, fieldnames=fields)
writer.writeheader()
row = 2
#Need this workaround with ASCVD calc b/c grouping is only by ones present, need all for header
combd.update(parse_out(ASCVD_CALCULATOR.calculate(clinic, last_date, dt), 'ASCVD Level'))
combd.update({'week_starting': last_date.strftime("%Y-%m-%d")})
writer.writerow(combd)
last_date = dt
|
[
"csv.DictWriter",
"datetime.date",
"dateutil.rrule.rrule"
] |
[((300, 317), 'datetime.date', 'date', (['(2016)', '(2)', '(14)'], {}), '(2016, 2, 14)\n', (304, 317), False, 'from datetime import timedelta, date\n'), ((327, 345), 'datetime.date', 'date', (['(2016)', '(10)', '(30)'], {}), '(2016, 10, 30)\n', (331, 345), False, 'from datetime import timedelta, date\n'), ((998, 1014), 'datetime.date', 'date', (['(2016)', '(2)', '(7)'], {}), '(2016, 2, 7)\n', (1002, 1014), False, 'from datetime import timedelta, date\n'), ((1117, 1178), 'dateutil.rrule.rrule', 'rrule.rrule', (['rrule.WEEKLY'], {'dtstart': 'start_date', 'until': 'end_date'}), '(rrule.WEEKLY, dtstart=start_date, until=end_date)\n', (1128, 1178), False, 'from dateutil import rrule\n'), ((1773, 1815), 'csv.DictWriter', 'csv.DictWriter', (['outfile'], {'fieldnames': 'fields'}), '(outfile, fieldnames=fields)\n', (1787, 1815), False, 'import csv\n')]
|
from time import sleep
from org.jointheleague.ecolban.rpirobot import SimpleIRobot, Sonar
robot = SimpleIRobot()
sonar = Sonar()
def setup():
# Initialization code here
pass
def loop():
# Repeating code here
return True
def shutdown():
robot.reset()
robot.stop()
robot.closeConnection()
setup()
while loop():
pass
shutdown()
|
[
"org.jointheleague.ecolban.rpirobot.SimpleIRobot",
"org.jointheleague.ecolban.rpirobot.Sonar"
] |
[((99, 113), 'org.jointheleague.ecolban.rpirobot.SimpleIRobot', 'SimpleIRobot', ([], {}), '()\n', (111, 113), False, 'from org.jointheleague.ecolban.rpirobot import SimpleIRobot, Sonar\n'), ((122, 129), 'org.jointheleague.ecolban.rpirobot.Sonar', 'Sonar', ([], {}), '()\n', (127, 129), False, 'from org.jointheleague.ecolban.rpirobot import SimpleIRobot, Sonar\n')]
|
# 0702.py
import cv2
import numpy as np
src = cv2.imread('./data/rect.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 100)
lines = cv2.HoughLines(edges, rho = 1, theta = np.pi/180.0, threshold = 100)
print('lines.shape = ', lines.shape)
for line in lines:
rho, theta = line[0]
c = np.cos(theta)
s = np.sin(theta)
x0 = c * rho
y0 = s * rho
x1 = int(x0 + 1000 * (-s))
y1 = int(y0 + 1000 * (c))
x2 = int(x0 - 1000 * (-s))
y2 = int(y0 - 1000 * (c))
cv2.line(src, (x1, y1), (x2, y2), (0, 0, 255), 2)
cv2.imshow('edges', edges)
cv2.imshow('src', src)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"cv2.line",
"cv2.Canny",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imread",
"numpy.sin",
"cv2.HoughLines",
"numpy.cos",
"cv2.imshow"
] |
[((53, 82), 'cv2.imread', 'cv2.imread', (['"""./data/rect.jpg"""'], {}), "('./data/rect.jpg')\n", (63, 82), False, 'import cv2\n'), ((91, 128), 'cv2.cvtColor', 'cv2.cvtColor', (['src', 'cv2.COLOR_BGR2GRAY'], {}), '(src, cv2.COLOR_BGR2GRAY)\n', (103, 128), False, 'import cv2\n'), ((138, 162), 'cv2.Canny', 'cv2.Canny', (['gray', '(50)', '(100)'], {}), '(gray, 50, 100)\n', (147, 162), False, 'import cv2\n'), ((172, 236), 'cv2.HoughLines', 'cv2.HoughLines', (['edges'], {'rho': '(1)', 'theta': '(np.pi / 180.0)', 'threshold': '(100)'}), '(edges, rho=1, theta=np.pi / 180.0, threshold=100)\n', (186, 236), False, 'import cv2\n'), ((593, 619), 'cv2.imshow', 'cv2.imshow', (['"""edges"""', 'edges'], {}), "('edges', edges)\n", (603, 619), False, 'import cv2\n'), ((621, 643), 'cv2.imshow', 'cv2.imshow', (['"""src"""', 'src'], {}), "('src', src)\n", (631, 643), False, 'import cv2\n'), ((645, 658), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (656, 658), False, 'import cv2\n'), ((660, 683), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (681, 683), False, 'import cv2\n'), ((336, 349), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (342, 349), True, 'import numpy as np\n'), ((359, 372), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (365, 372), True, 'import numpy as np\n'), ((540, 589), 'cv2.line', 'cv2.line', (['src', '(x1, y1)', '(x2, y2)', '(0, 0, 255)', '(2)'], {}), '(src, (x1, y1), (x2, y2), (0, 0, 255), 2)\n', (548, 589), False, 'import cv2\n')]
|
"""
CubicWeb instance request (user mode)
=====================================
Credit: <NAME>
pycaravel is a Python package that enables you to parse various source of data.
In this tutorial you will learn how to parse and search in a CubicWeb instance.
First checks
------------
In order to test if pycaravel package is installed on your machine, you can
check the package version.
"""
import caravel
print(caravel.__version__)
#############################################################################
# Now you can run the the configuration info function to see if all the
# dependencies are installed properly:
print(caravel.info())
#############################################################################
# Create a parser for your project
# --------------------------------
#
# The package provides a common interface to parse a CubicWeb instance. The
# parsing rules are defined by projects in the module, so we will beed to
# specify the project name you are working on. For the moement it is not
# possible to specify these rules via the API.
parser = caravel.get_parser(
project="herby",
layoutdir="/neurospin/tmp/pycaravel/layout")
#############################################################################
# You can now list the available configurations for your project, and the
# available layout representations pre-generated. Note that these
# representations are sorted by dates, and that the latest one will be used.
from pprint import pprint
pprint(parser.conf)
pprint(parser.representation)
#############################################################################
# You can export the whole 'sourcedata' layout in a pandas DataFrame.
print(parser.export_layout("sourcedata"))
#############################################################################
# It is also possible to filter this dataset. You need first to list all the
# avaliable filtering keys, then list all the availables values for the
# filtering key(s) of interest, and finally filter your dataset.
print(parser.list_keys("sourcedata"))
print(parser.list_values("sourcedata", "modality"))
print(parser.list_values("sourcedata", "center"))
search1 = parser.filter_layout(
"sourcedata", modality="T1w|T2w", extension="NIFTI", session="V04",
center="igr.fr")
print(search1)
#############################################################################
# Finally you may want to ask the system to load the filtered data. Only a
# couple of file extensions are supported. If no loader has been found the
# filename is returned. Using the shoping card mechanism you have downloaded
# your data in a custom folder. You need to specify this server-local machine
# mapping by setting the 'replace' parameter.
data1 = parser.load_data(
"sourcedata", search1,
replace=("/neurospin/radiomics_pub", "/neurospin/radiomics_pub"))
pprint(data1)
#############################################################################
# And for the phenotype
# ---------------------
#
# We can do the same for the phenotype
print(parser.list_keys("phenotype"))
print(parser.list_values("phenotype", "questionnaire"))
print(parser.list_values("phenotype", "subject"))
search2 = parser.filter_layout("phenotype", questionnaire="mcld",
subject="175643|278350")
print(search2)
data2 = parser.load_data("phenotype", search2)
pprint(data2)
|
[
"caravel.info",
"caravel.get_parser",
"pprint.pprint"
] |
[((1080, 1165), 'caravel.get_parser', 'caravel.get_parser', ([], {'project': '"""herby"""', 'layoutdir': '"""/neurospin/tmp/pycaravel/layout"""'}), "(project='herby', layoutdir='/neurospin/tmp/pycaravel/layout'\n )\n", (1098, 1165), False, 'import caravel\n'), ((1494, 1513), 'pprint.pprint', 'pprint', (['parser.conf'], {}), '(parser.conf)\n', (1500, 1513), False, 'from pprint import pprint\n'), ((1514, 1543), 'pprint.pprint', 'pprint', (['parser.representation'], {}), '(parser.representation)\n', (1520, 1543), False, 'from pprint import pprint\n'), ((2863, 2876), 'pprint.pprint', 'pprint', (['data1'], {}), '(data1)\n', (2869, 2876), False, 'from pprint import pprint\n'), ((3373, 3386), 'pprint.pprint', 'pprint', (['data2'], {}), '(data2)\n', (3379, 3386), False, 'from pprint import pprint\n'), ((633, 647), 'caravel.info', 'caravel.info', ([], {}), '()\n', (645, 647), False, 'import caravel\n')]
|
# coding = utf-8
import copy
import json
import gatlin.infra.commonUtils as util
import gatlin.infra.print as pt
import gatlin.nodes.parserSelector as ps
# 读取需要进行测试的flow全集
def launch_flows_config(location):
flow_json_file = location
with open(flow_json_file) as fl:
flows_config = json.loads(fl.read())
return flows_config
def parse_one_flowX(flow_name, nodes, environ, init_param):
pt.print_green('*' * 45 + ('PARSING %s' % flow_name) + ' BEGIN' + '*' * 45)
context = {}
context['environ'] = copy.deepcopy(environ) # environ抽到全局main-flow
context['request'] = {}
context['response'] = {}
context['session'] = init_param
context['misc'] = {'canProceed': True}
for node in nodes:
util.inject_all(context['environ'], node)
context['environ']['skip'] = False
node_parser = ps.fetch_parser(node['nodeName'])(context)
node_parser.lock_and_load()
if not node_parser.can_proceed():
pt.print_yellow("DUE TO [==%s==] THE FLOW HAS TO STOP." % context['misc']['reason'])
pt.print_yellow('#' * 35 + "NODE %s CANNOT PROCEED" % node['nodeName'] + '#' * 35)
pt.print_red('*' * 45 + ('PARSING %s' % flow_name) + ' ABORTED' + '*' * 45)
break
context['environ'] = copy.deepcopy(environ) # 每次清洗environ防止前后的污染,而session由node来管理
context['request'] = {}
context['response'] = {}
pt.print_green('*' * 45 + ('PARSING %s' % flow_name) + ' ENDED' + '*' * 45)
if __name__ == '__main__':
print(launch_flows_config("../input/flows.json"))
|
[
"copy.deepcopy",
"gatlin.infra.commonUtils.inject_all",
"gatlin.infra.print.print_yellow",
"gatlin.nodes.parserSelector.fetch_parser",
"gatlin.infra.print.print_green",
"gatlin.infra.print.print_red"
] |
[((412, 485), 'gatlin.infra.print.print_green', 'pt.print_green', (["('*' * 45 + 'PARSING %s' % flow_name + ' BEGIN' + '*' * 45)"], {}), "('*' * 45 + 'PARSING %s' % flow_name + ' BEGIN' + '*' * 45)\n", (426, 485), True, 'import gatlin.infra.print as pt\n'), ((530, 552), 'copy.deepcopy', 'copy.deepcopy', (['environ'], {}), '(environ)\n', (543, 552), False, 'import copy\n'), ((1431, 1504), 'gatlin.infra.print.print_green', 'pt.print_green', (["('*' * 45 + 'PARSING %s' % flow_name + ' ENDED' + '*' * 45)"], {}), "('*' * 45 + 'PARSING %s' % flow_name + ' ENDED' + '*' * 45)\n", (1445, 1504), True, 'import gatlin.infra.print as pt\n'), ((744, 785), 'gatlin.infra.commonUtils.inject_all', 'util.inject_all', (["context['environ']", 'node'], {}), "(context['environ'], node)\n", (759, 785), True, 'import gatlin.infra.commonUtils as util\n'), ((1300, 1322), 'copy.deepcopy', 'copy.deepcopy', (['environ'], {}), '(environ)\n', (1313, 1322), False, 'import copy\n'), ((851, 884), 'gatlin.nodes.parserSelector.fetch_parser', 'ps.fetch_parser', (["node['nodeName']"], {}), "(node['nodeName'])\n", (866, 884), True, 'import gatlin.nodes.parserSelector as ps\n'), ((984, 1073), 'gatlin.infra.print.print_yellow', 'pt.print_yellow', (["('DUE TO [==%s==] THE FLOW HAS TO STOP.' % context['misc']['reason'])"], {}), "('DUE TO [==%s==] THE FLOW HAS TO STOP.' % context['misc'][\n 'reason'])\n", (999, 1073), True, 'import gatlin.infra.print as pt\n'), ((1081, 1168), 'gatlin.infra.print.print_yellow', 'pt.print_yellow', (["('#' * 35 + 'NODE %s CANNOT PROCEED' % node['nodeName'] + '#' * 35)"], {}), "('#' * 35 + 'NODE %s CANNOT PROCEED' % node['nodeName'] + \n '#' * 35)\n", (1096, 1168), True, 'import gatlin.infra.print as pt\n'), ((1177, 1250), 'gatlin.infra.print.print_red', 'pt.print_red', (["('*' * 45 + 'PARSING %s' % flow_name + ' ABORTED' + '*' * 45)"], {}), "('*' * 45 + 'PARSING %s' % flow_name + ' ABORTED' + '*' * 45)\n", (1189, 1250), True, 'import gatlin.infra.print as pt\n')]
|
import os
import pathlib
import string
import subprocess
from elftools.elf.constants import SH_FLAGS
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
from fuzzware_harness.util import bytes2int
from fuzzware_pipeline.logging_handler import logging_handler
logger = logging_handler().get_logger("pipeline")
PRINTABLE_ASCIIVALS = frozenset(map(ord, string.printable))
OBJCOPY_UTIL = "arm-none-eabi-objcopy"
DYNAMICALLY_ADDED_REGION_NAME_PREFIX = "dynamically_added_crash_region_"
# From cortexm_memory.yml
DEFAULT_MEM_MAP = {
"ram": {"base_addr": 0x20000000, "size": 0x00100000, "permissions": "rw-"},
"mmio": {"base_addr": 0x40000000, "size": 0x20000000, "permissions": "rw-"},
"nvic": {"base_addr": 0xe0000000, "size": 0x10000000, "permissions": "rw-"},
"irq_ret": {"base_addr": 0xfffff000, "size": 0x1000, "permissions": "--x"}
}
# Some padded size after raw ROM contents
DEFAULT_ADD_TEXT_SIZE = 0x10000
ELF_MAGIC = b"\x7f\x45\x4c\x46"
def is_elf(path):
with open(path, "rb") as f:
magic = f.read(len(ELF_MAGIC))
from binascii import hexlify
logger.info(f"looking at file contents: {hexlify(magic)} == {hexlify(ELF_MAGIC)}")
return magic == ELF_MAGIC
def extract_elf(in_path, out_path):
assert is_elf(in_path)
subprocess.check_call([OBJCOPY_UTIL, "-O", "binary", in_path, out_path])
def collect_pointers(binary_contents):
pointers = []
initial_sp, reset_vector_addr = bytes2int(binary_contents[:4]), bytes2int(binary_contents[4:8])
logger.info(f"Got reset vector: 0x{reset_vector_addr:08x}")
min_rom_ptr, max_rom_ptr = reset_vector_addr, reset_vector_addr
def is_rom_ptr(addr, curr_min, curr_max):
if addr < 8:
return False
# Check range
outer_edge_size = len(binary_contents)-(curr_max - curr_min)
return curr_min-outer_edge_size <= addr <= curr_max + outer_edge_size
for i in range(8, len(binary_contents), 4):
val = bytes2int(binary_contents[i:i+4])
if is_rom_ptr(val, min_rom_ptr, max_rom_ptr):
if val < min_rom_ptr:
min_rom_ptr = val
elif val > max_rom_ptr:
max_rom_ptr = val
pointers.append(val)
return initial_sp, reset_vector_addr, pointers
def has_ascii_at_offset(binary_contents, offset, min_len=8):
if len(binary_contents) < offset + min_len:
return False
res = all(map(lambda ind: binary_contents[offset+ind] in PRINTABLE_ASCIIVALS, range(min_len)))
return res
THUMB_OPC_PUSH = 0xB5
THUMB_OPC_STMFD1 = 0x2D
THUMB_OPC_STMFD2 = 0xE9
THUMB_OPC_INFLOOP = 0xE7FE
FN_PROLOGUE_OPCODES = (THUMB_OPC_PUSH, THUMB_OPC_STMFD1, THUMB_OPC_STMFD2)
def has_fn_prologue_at_offset(binary_contents, binary_offset):
if binary_offset & 1 != 1:
return False
if len(binary_contents) <= binary_offset:
return False
# Remove thumb bit
binary_offset &= ~1
res = binary_contents[binary_offset+1] in FN_PROLOGUE_OPCODES
if not res:
res = THUMB_OPC_INFLOOP == bytes2int(binary_contents[binary_offset:binary_offset+2])
if res:
logger.info(f"Found function prologue at offset {binary_offset:x}")
return res
def can_be_good_offset(binary_contents, ptr, base_offset):
binary_offset = ptr - base_offset
if binary_offset < 0 or binary_offset > len(binary_contents):
return False
# We are pointing inside the image, let's see now
# 1. Is string?
if has_ascii_at_offset(binary_contents, binary_offset):
logger.info(f"Found ascii! (ptr 0x{ptr:08x}, offset: {base_offset:x}")
return True
# 2. Is function pointer?
if has_fn_prologue_at_offset(binary_contents, binary_offset):
return True
return False
PAGE_SIZE = 0x1000
PAGE_MASK = PAGE_SIZE - 1
def find_text_mapping(binary_path):
# Find by raw binary
# We do this via FirmXRay's algorithm:
# 1. scan for pointer values
# 2. Guess values based on found pointers and check whether pointers point to functions/strings
# 3. Choose base address with most matches
with open(binary_path, "rb") as f:
binary_contents = f.read()
aligned_contents_len = len(binary_contents)
if aligned_contents_len & PAGE_MASK:
aligned_contents_len = (aligned_contents_len & (~PAGE_MASK)) + PAGE_SIZE
initial_sp, reset_vector, pointers = collect_pointers(binary_contents)
pointers = sorted(set(pointers))
min_ptr, max_ptr = pointers[0], pointers[-1]
_, _, aligned_reset_vector = min_ptr & (~PAGE_MASK), max_ptr & (~PAGE_MASK), reset_vector & (~PAGE_MASK)
first_offset_candidate = -aligned_contents_len # -min(aligned_contents_len, aligned_min_ptr)
#print("first oc {:x}".format(first_offset_candidate))
#print("reset_vector {:x}".format(reset_vector))
if (reset_vector - first_offset_candidate) < 0: #sanity check, necessary for certain boards
first_offset_candidate = 0
#print("first oc {:x}".format(first_offset_candidate))
last_offset_candidate = aligned_contents_len
matches_per_offset = {}
for offset_candidate in range(first_offset_candidate, last_offset_candidate, PAGE_SIZE):
logger.info(f"Checking offset candidate: {offset_candidate}")
matches_per_offset[offset_candidate] = sum(map(lambda ptr: can_be_good_offset(binary_contents, ptr-aligned_reset_vector, offset_candidate), pointers))
best_candidates = sorted(matches_per_offset.items(), key=lambda entry: matches_per_offset[entry[0]])
best_candidate_offset = best_candidates[-1][0]
base_addr = aligned_reset_vector + best_candidate_offset
logger.info(f"Got base address: 0x{base_addr:08x} with {matches_per_offset[best_candidate_offset]} plausible address matches (second best: {best_candidates[-2][1]}).")
return initial_sp, base_addr, os.stat(binary_path).st_size + DEFAULT_ADD_TEXT_SIZE
def merge_adjacent_regions(memregion_config):
"""
Merge scattered memory regions into consecutive regions
"""
region_ends = {
entry["base_addr"]+entry["size"]: region_name for region_name, entry in memregion_config.items()
}
removed_region_names = []
for region_name in memregion_config:
start, size = memregion_config[region_name]['base_addr'], memregion_config[region_name]['size']
# Is our region the start of another region?
if start in region_ends:
adjacent_region_name = region_ends.pop(start)
memregion_config[adjacent_region_name]["size"] += size
region_ends[start+size] = adjacent_region_name
# Remove now merged_in fragment
memregion_config[region_name] = None
removed_region_names.append(region_name)
for rname in removed_region_names:
del memregion_config[rname]
def add_missing_regions(existing_mem_config, add_entries):
for rname, entry in add_entries.items():
start = entry['base_addr']
end = start + entry['size']
should_add = True
logger.info(f"Looking at region to add: {rname} ({start:x}-{end:x})")
consumed_region_names = set()
sorted_other_regions = sorted(existing_mem_config, key=lambda k: existing_mem_config[k]['base_addr'])
for i, other_rname in enumerate(sorted_other_regions):
if other_rname in consumed_region_names:
continue
other_entry = existing_mem_config[other_rname]
other_start = other_entry['base_addr']
other_end = other_start + other_entry['size']
print(f"comparing to {other_rname} ({other_start:x}-{other_end:x})")
# Need to extend next region backwards?
if start < other_start <= end:
logger.info(f"Setting start of region {other_rname} ({other_start:x}-{other_end:x}) to {start:x}")
prepend_size = other_start - start
other_start = start
other_entry['base_addr'] = other_start
other_entry['size'] += prepend_size
# Do we also need to extend other region forward?
if other_start <= start <= other_end < end:
# If we need to extend the other region forward, make sure not to clash with the region following that
if i+1 < len(sorted_other_regions):
next_region_name = sorted_other_regions[i+1]
next_start = existing_mem_config[next_region_name]['base_addr']
if end > next_start:
# We got a collision. Is it dynamically added?
if DYNAMICALLY_ADDED_REGION_NAME_PREFIX in next_region_name:
logger.warn(f"While extending forward, collided with dynamically added region {next_region_name}, consuming it")
consumed_region_names.add(next_region_name)
next_end = next_start + existing_mem_config[next_region_name]['size']
end = max(end, next_end)
del existing_mem_config[next_region_name]
else:
logger.warn("While extending forward, collided with next region, setting end to other region's start")
end = next_start
append_size = end - other_end
other_end = end
logger.info(f"Extending end of region {other_rname} ({other_start:x}-{other_start+other_entry['size']:x}) to {other_end:x}")
other_entry['size'] += append_size
# Fully contained? Then we added it or it was already included
if other_start <= start <= other_end and other_start <= end <= other_end:
logger.info(f"Region {rname} ({start:x}-{end:x}) fully contained in region {other_rname}")
should_add = False
break
# We did not find an overlap, so add the section
if should_add:
logger.info(f"Adding memory region {rname} ({start:#10x}-{end:#10x}) to config")
while rname in existing_mem_config:
rname = "_" + rname
existing_mem_config[rname] = {**entry}
def align_mem_map_to_pages(mem_config):
"""
Given an already non-colliding memory map, we make
sure that two regions are not on the same page boundary.
"""
sorted_region_names = sorted(mem_config, key=lambda reg_name: mem_config[reg_name]['base_addr'])
region_indices_to_eliminate = []
for i, region_name in enumerate(sorted_region_names):
if i == len(sorted_region_names):
break
if i in region_indices_to_eliminate:
continue
cur_start = mem_config[region_name]['base_addr']
cur_size = mem_config[region_name]['size']
cur_end = cur_start + cur_size
logger.info(f"[align_mem_map_to_pages] looking at region '{region_name}', base: {cur_start:#010x}, size: {cur_size:#x}")
# If we are aligned, there is no need to shift anything
if cur_end & PAGE_MASK == 0:
continue
next_start = mem_config[sorted_region_names[i+1]]['base_addr']
if cur_end & ~PAGE_MASK == next_start & ~PAGE_MASK:
logger.warning(f"Regions {region_name} and {sorted_region_names[i+1]} end/start on the same page, unaligned.")
cur_size += PAGE_SIZE - (cur_end % PAGE_SIZE)
next_shrink_size = PAGE_SIZE - (next_start % PAGE_SIZE)
next_start += next_shrink_size
logger.warning(f"Adjusting {region_name} size to {cur_size:08x}.")
logger.warning(f"Adjusting {sorted_region_names[i+1]} start to {next_start:#010x}.")
mem_config[sorted_region_names[i+1]]['base_addr'] = next_start
if next_shrink_size <= mem_config[sorted_region_names[i+1]]['size']:
mem_config[sorted_region_names[i+1]]['size'] -= next_shrink_size
else:
logger.warning(f"Fully removing region {sorted_region_names[i+1]} which spanned less than a page")
region_indices_to_eliminate.append(i+1)
mem_config[region_name]['size'] = cur_size
# TODO: We might have different permissions here. But if they differed,
# that would not have worked on most architectures anyways.
# What we could do instead is create a single-page region with merged permissions
for i in region_indices_to_eliminate:
del mem_config[sorted_region_names[i]]
def collect_and_merge_elf_segments(elf_path):
res = load_elf_segment_mem_regions(elf_path)
merge_adjacent_regions(res)
return res
def add_mem_map(config_basedir, config_map, binary_path, elf_path, ivt_offset):
if "memory_map" not in config_map:
config_map["memory_map"] = {}
mem_cfg = config_map["memory_map"]
# Fill from default memory map
for memregion_name, memregion_config in DEFAULT_MEM_MAP.items():
if memregion_name not in mem_cfg:
mem_cfg[memregion_name] = memregion_config
binary_already_mapped = False
abs_binpath = os.path.abspath(binary_path)
for region_config in mem_cfg.values():
f = region_config.get("file")
if f and f == abs_binpath:
binary_already_mapped = True
break
if not binary_already_mapped:
# We will register the binary image as "text", make sure it is not taken
assert "text" not in mem_cfg
_, text_base, text_size = find_text_mapping(binary_path)
mem_cfg["text"] = {
"base_addr": text_base,
"size": text_size,
"ivt_offset" : ivt_offset,
# get the relative path
"file": str(pathlib.Path(binary_path).relative_to(pathlib.Path(config_basedir))),
"permissions": "r-x"
}
if elf_path:
elf_memory_regions = collect_and_merge_elf_segments(elf_path)
logger.info(f"collected ELF memory regions: {elf_memory_regions}")
add_missing_regions(config_map['memory_map'], elf_memory_regions)
align_mem_map_to_pages(config_map['memory_map'])
def gen_syms(elf_path):
# Based on https://github.com/eliben/pyelftools/blob/master/scripts/readelf.py
res = {}
with open(elf_path, "rb") as f:
elffile = ELFFile(f)
symbol_tables = [(idx, s) for idx, s in enumerate(elffile.iter_sections())
if isinstance(s, SymbolTableSection)]
if not symbol_tables and elffile.num_sections() == 0:
logger.warning("No symbol sections...")
return res
for _, section in symbol_tables:
if section['sh_entsize'] == 0:
logger.warning("section['sh_entsize'] == 0")
# Symbol table has no entries
continue
for _, symbol in enumerate(section.iter_symbols()):
if symbol.name and "$" not in symbol.name:
res[symbol['st_value']] = symbol.name
return res
def load_elf_segment_mem_regions(elf_path):
# Based on https://github.com/eliben/pyelftools/blob/master/scripts/readelf.py
res = {}
with open(elf_path, "rb") as f:
elffile = ELFFile(f)
if elffile.num_sections() == 0:
return res
for section in elffile.iter_sections():
if (section['sh_flags'] & SH_FLAGS.SHF_ALLOC) == 0:
logger.debug(f"Section {section.name} does not have alloc flag set, skipping")
continue
if section['sh_size'] == 0:
logger.debug(f"Section {section.name} has 0 size, skipping")
continue
res[section.name] = {
'base_addr': section['sh_addr'],
'size': section['sh_size'],
'permissions': ('r'
+ ("w" if section['sh_flags'] & SH_FLAGS.SHF_WRITE else "-")
+ ("x" if section['sh_flags'] & SH_FLAGS.SHF_EXECINSTR else "-")
)
}
return res
def gen_configs(config_basedir, config_map, binary_path, elf_path, ivt_offset=0, ti_flag=False):
#check for proprietary header in binary file
#check_for_header(binary_path)
add_mem_map(config_basedir, config_map, binary_path, elf_path, ivt_offset)
if elf_path and 'symbols' not in config_map:
logger.info("Generating symbols")
config_map['symbols'] = gen_syms(elf_path)
if 'interrupt_triggers' not in config_map:
config_map['interrupt_triggers'] = {
"trigger": {
"fuzz_mode": "round_robin",
"every_nth_tick": 1000
}
}
#necessary actions for some texas instruments samples
if ti_flag:
#add rom region
config_map['memory_map']['ti_rom'] = {
"base_addr": 0x10000000,
"file": ti_flag,
"size": 0x20000,
"permissions": "r-x"
}
print(config_map)
#change ram size to 0x400000
config_map['memory_map']['ram']['size'] = 0x400000
#add is_entry = True to text
config_map['memory_map']['text']['is_entry'] = True
NUM_CRASH_MAPPED_AROUND_PAGES = 5
def add_region_for_crashing_addr(config_map, crash_addr):
page_start = crash_addr & ~PAGE_MASK
mapping_distance = NUM_CRASH_MAPPED_AROUND_PAGES * PAGE_SIZE
new_region_entry = {
f'{DYNAMICALLY_ADDED_REGION_NAME_PREFIX}{crash_addr:08x}': {
'base_addr': max(page_start - mapping_distance, 0),
'size': 2 * mapping_distance,
'permissions': 'rw-'
}
}
logger.info(f"Adding region for crash address 0x{crash_addr:x}: {new_region_entry}")
logger.warning("If you suspect this region to be an mmio-region, manually preface it with 'mmio' to make sure that it is detected by fuzzware")
add_missing_regions(config_map['memory_map'], new_region_entry)
align_mem_map_to_pages(config_map['memory_map'])
|
[
"os.path.abspath",
"os.stat",
"binascii.hexlify",
"elftools.elf.elffile.ELFFile",
"pathlib.Path",
"fuzzware_pipeline.logging_handler.logging_handler",
"fuzzware_harness.util.bytes2int",
"subprocess.check_call"
] |
[((1308, 1380), 'subprocess.check_call', 'subprocess.check_call', (["[OBJCOPY_UTIL, '-O', 'binary', in_path, out_path]"], {}), "([OBJCOPY_UTIL, '-O', 'binary', in_path, out_path])\n", (1329, 1380), False, 'import subprocess\n'), ((13170, 13198), 'os.path.abspath', 'os.path.abspath', (['binary_path'], {}), '(binary_path)\n', (13185, 13198), False, 'import os\n'), ((312, 329), 'fuzzware_pipeline.logging_handler.logging_handler', 'logging_handler', ([], {}), '()\n', (327, 329), False, 'from fuzzware_pipeline.logging_handler import logging_handler\n'), ((1478, 1508), 'fuzzware_harness.util.bytes2int', 'bytes2int', (['binary_contents[:4]'], {}), '(binary_contents[:4])\n', (1487, 1508), False, 'from fuzzware_harness.util import bytes2int\n'), ((1510, 1541), 'fuzzware_harness.util.bytes2int', 'bytes2int', (['binary_contents[4:8]'], {}), '(binary_contents[4:8])\n', (1519, 1541), False, 'from fuzzware_harness.util import bytes2int\n'), ((2001, 2036), 'fuzzware_harness.util.bytes2int', 'bytes2int', (['binary_contents[i:i + 4]'], {}), '(binary_contents[i:i + 4])\n', (2010, 2036), False, 'from fuzzware_harness.util import bytes2int\n'), ((14370, 14380), 'elftools.elf.elffile.ELFFile', 'ELFFile', (['f'], {}), '(f)\n', (14377, 14380), False, 'from elftools.elf.elffile import ELFFile\n'), ((15279, 15289), 'elftools.elf.elffile.ELFFile', 'ELFFile', (['f'], {}), '(f)\n', (15286, 15289), False, 'from elftools.elf.elffile import ELFFile\n'), ((3078, 3137), 'fuzzware_harness.util.bytes2int', 'bytes2int', (['binary_contents[binary_offset:binary_offset + 2]'], {}), '(binary_contents[binary_offset:binary_offset + 2])\n', (3087, 3137), False, 'from fuzzware_harness.util import bytes2int\n'), ((1167, 1181), 'binascii.hexlify', 'hexlify', (['magic'], {}), '(magic)\n', (1174, 1181), False, 'from binascii import hexlify\n'), ((1187, 1205), 'binascii.hexlify', 'hexlify', (['ELF_MAGIC'], {}), '(ELF_MAGIC)\n', (1194, 1205), False, 'from binascii import hexlify\n'), ((5866, 5886), 'os.stat', 'os.stat', (['binary_path'], {}), '(binary_path)\n', (5873, 5886), False, 'import os\n'), ((13825, 13853), 'pathlib.Path', 'pathlib.Path', (['config_basedir'], {}), '(config_basedir)\n', (13837, 13853), False, 'import pathlib\n'), ((13787, 13812), 'pathlib.Path', 'pathlib.Path', (['binary_path'], {}), '(binary_path)\n', (13799, 13812), False, 'import pathlib\n')]
|
import unittest
import os
import tempfile
import uuid
from studio import model
from model_test import get_test_experiment
# We are not currently working with HTTP providers.
@unittest.skip
class HTTPProviderHostedTest(unittest.TestCase):
def get_db_provider(self, config_name):
config_file = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
config_name)
return model.get_db_provider(model.get_config(config_file))
def test_add_get_delete_experiment(self):
with self.get_db_provider('test_config_http_client.yaml') as hp:
experiment_tuple = get_test_experiment()
hp.add_experiment(experiment_tuple[0])
experiment = hp.get_experiment(experiment_tuple[0].key)
self.assertEquals(experiment.key, experiment_tuple[0].key)
self.assertEquals(
experiment.filename,
experiment_tuple[0].filename)
self.assertEquals(experiment.args, experiment_tuple[0].args)
hp.delete_experiment(experiment_tuple[1])
self.assertTrue(hp.get_experiment(experiment_tuple[1]) is None)
def test_start_experiment(self):
with self.get_db_provider('test_config_http_client.yaml') as hp:
experiment_tuple = get_test_experiment()
hp.add_experiment(experiment_tuple[0])
hp.start_experiment(experiment_tuple[0])
experiment = hp.get_experiment(experiment_tuple[1])
self.assertTrue(experiment.status == 'running')
self.assertEquals(experiment.key, experiment_tuple[0].key)
self.assertEquals(
experiment.filename,
experiment_tuple[0].filename)
self.assertEquals(experiment.args, experiment_tuple[0].args)
hp.finish_experiment(experiment_tuple[0])
hp.delete_experiment(experiment_tuple[1])
def test_add_get_experiment_artifacts(self):
experiment_tuple = get_test_experiment()
e_experiment = experiment_tuple[0]
e_artifacts = e_experiment.artifacts
a1_filename = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
a2_filename = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
with open(a1_filename, 'w') as f:
f.write('hello world')
e_artifacts['a1'] = {
'local': a1_filename,
'mutable': False
}
e_artifacts['a2'] = {
'local': a2_filename,
'mutable': True
}
with self.get_db_provider('test_config_http_client.yaml') as db:
db.add_experiment(e_experiment)
experiment = db.get_experiment(e_experiment.key)
self.assertEquals(experiment.key, e_experiment.key)
self.assertEquals(experiment.filename, e_experiment.filename)
self.assertEquals(experiment.args, e_experiment.args)
db.delete_experiment(e_experiment.key)
os.remove(a1_filename)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.remove",
"uuid.uuid4",
"model_test.get_test_experiment",
"os.path.realpath",
"tempfile.gettempdir",
"studio.model.get_config"
] |
[((3064, 3079), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3077, 3079), False, 'import unittest\n'), ((2011, 2032), 'model_test.get_test_experiment', 'get_test_experiment', ([], {}), '()\n', (2030, 2032), False, 'from model_test import get_test_experiment\n'), ((457, 486), 'studio.model.get_config', 'model.get_config', (['config_file'], {}), '(config_file)\n', (473, 486), False, 'from studio import model\n'), ((640, 661), 'model_test.get_test_experiment', 'get_test_experiment', ([], {}), '()\n', (659, 661), False, 'from model_test import get_test_experiment\n'), ((1313, 1334), 'model_test.get_test_experiment', 'get_test_experiment', ([], {}), '()\n', (1332, 1334), False, 'from model_test import get_test_experiment\n'), ((2157, 2178), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (2176, 2178), False, 'import tempfile\n'), ((2234, 2255), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (2253, 2255), False, 'import tempfile\n'), ((3008, 3030), 'os.remove', 'os.remove', (['a1_filename'], {}), '(a1_filename)\n', (3017, 3030), False, 'import os\n'), ((366, 392), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (382, 392), False, 'import os\n'), ((2184, 2196), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2194, 2196), False, 'import uuid\n'), ((2261, 2273), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2271, 2273), False, 'import uuid\n')]
|
"""
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : builder.py
# Abstract :
# Current Version: 1.0.0
# Date : 2020-05-31
##################################################################################################
"""
import copy
import platform
from functools import partial
from torch.utils.data import DataLoader
from mmcv.utils import Registry
from mmcv.utils import build_from_cfg
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmdet.datasets import DATASETS
from mmdet.models.builder import build
from mmdet.datasets.builder import worker_init_fn
from mmdet.datasets.samplers import DistributedGroupSampler, GroupSampler, DistributedSampler
from .davar_dataset_wrappers import DavarConcatDataset
from .davar_multi_dataset import DavarMultiDataset
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
hard_limit = rlimit[1]
soft_limit = min(4096, hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
SAMPLER = Registry('sampler')
def build_sampler(cfg):
"""Build sampler
Args:
cfg(mmcv.Config): Sample cfg
Returns:
obj: sampler
"""
return build(cfg, SAMPLER)
def davar_build_dataloader(dataset,
samples_per_gpu=1,
workers_per_gpu=1,
sampler_type=None,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
**kwargs):
"""
Args:
dataset (Dataset): dataset
samples_per_gpu (int): image numbers on each gpu
workers_per_gpu (int): workers each gpu
sampler_type (optional | dict): sampler parameter
num_gpus (int): numbers of gpu
dist (boolean): whether to use distributed mode
shuffle (boolean): whether to shuffle the dataset
seed (int): seed number
**kwargs (None): back parameter
Returns:
the training data loader
"""
rank, world_size = get_dist_info()
if sampler_type is not None:
sampler = sampler_type
else:
sampler = kwargs.pop('sampler', None)
# if choose distributed sampler
if dist:
# whether to shuffle data
if shuffle:
if sampler is None:
# Distributed Group Sampler
sampler = DistributedGroupSampler(dataset, samples_per_gpu, world_size, rank,)
else:
sampler['dataset'] = dataset
sampler['samples_per_gpu'] = samples_per_gpu
# build distributed sampler
sampler = build_sampler(sampler)
else:
# distributed sampler
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
if shuffle:
if sampler is None:
# Group Sampler
sampler = GroupSampler(dataset, samples_per_gpu)
else:
sampler['dataset'] = dataset
sampler['samples_per_gpu'] = samples_per_gpu
# build non-distributed sampler
sampler = build_sampler(sampler)
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
# combine the training image to mini-batch tensor
init_fn = partial(worker_init_fn,
num_workers=num_workers,
rank=rank,
seed=seed) if seed is not None else None
# build data loader
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=False,
worker_init_fn=init_fn,
**kwargs)
return data_loader
def _concat_dataset(cfg, default_args=None):
"""
Args:
cfg (cfg): model config file
default_args (args): back parameter
Returns:
concat all the dataset in config file
"""
# dataset information, pipeline information, batch setting information
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
data_types = cfg.get('data_type', None)
pipeline = cfg.get('pipeline', None)
batch_ratios = cfg.get('batch_ratios', None)
# update the parameter of the config
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
if isinstance(data_types, (list, tuple)):
data_cfg['data_type'] = data_types[i]
if isinstance(pipeline, (list, tuple)):
if isinstance(pipeline[0], (list, tuple)):
data_cfg['pipeline'] = pipeline[i]
if isinstance(batch_ratios, (list, tuple)):
data_cfg['batch_ratios'] = batch_ratios[i]
# build the dataset
datasets.append(davar_build_dataset(data_cfg, default_args))
return DavarConcatDataset(datasets)
def davar_build_dataset(cfg, default_args=None):
"""
Args:
cfg (cfg): model config file
default_args (args): back parameter
Returns:
build the dataset for training
"""
from mmdet.datasets.dataset_wrappers import (ConcatDataset, RepeatDataset,
ClassBalancedDataset)
from mmdet.datasets import build_dataset
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'ConcatDataset':
dataset = ConcatDataset(
[build_dataset(c, default_args) for c in cfg['datasets']],
cfg.get('separate_eval', True))
elif cfg['type'] == 'DavarMultiDataset':
align_parameters = parameter_align(cfg)
dataset = DavarMultiDataset(cfg["batch_ratios"],
[davar_build_dataset(c, default_args) for c in align_parameters])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def parameter_align(cfg):
""" pipeline parameter alignment
Args:
cfg (config): model pipeline config
Returns:
"""
align_para = list()
if isinstance(cfg["batch_ratios"], (float, int)):
batch_ratios = [cfg["batch_ratios"]]
elif isinstance(cfg["batch_ratios"], (tuple, list)):
batch_ratios = cfg["batch_ratios"]
else:
batch_ratios = list(map(float, cfg["batch_ratios"].split('|')))
if isinstance(cfg["dataset"]["ann_file"], str):
cfg["dataset"]["ann_file"] = cfg["dataset"]["ann_file"].split('|')
if isinstance(cfg["dataset"]["img_prefix"], str):
cfg["dataset"]["img_prefix"] = cfg["dataset"]["img_prefix"].split('|')
dataset_num = len(batch_ratios)
for key, item in cfg["dataset"].items():
if isinstance(item, list) and isinstance(item[0], list) and len(item) < dataset_num:
for _ in range(dataset_num - len(item)):
cfg["dataset"][key].append(item)
elif isinstance(item, list) and isinstance(item[0], dict):
temp = []
for _ in range(dataset_num):
temp.append(item)
cfg["dataset"][key] = temp
elif isinstance(item, list) and len(item) == dataset_num:
continue
elif isinstance(item, (int, float)):
temp = []
for _ in range(dataset_num):
temp.append(item)
cfg["dataset"][key] = temp
elif isinstance(item, str):
temp_ = []
for _ in range(dataset_num):
temp_.append(item)
cfg["dataset"][key] = temp_
else:
raise TypeError("parameter type error")
for i in range(dataset_num):
temp_dict = dict()
for key, item in cfg["dataset"].items():
temp_dict[key] = item[i]
align_para.append(temp_dict)
return align_para
|
[
"mmcv.runner.get_dist_info",
"functools.partial",
"copy.deepcopy",
"mmdet.datasets.samplers.DistributedGroupSampler",
"mmcv.utils.build_from_cfg",
"mmdet.datasets.build_dataset",
"resource.getrlimit",
"resource.setrlimit",
"mmcv.utils.Registry",
"mmdet.datasets.samplers.DistributedSampler",
"mmdet.datasets.samplers.GroupSampler",
"platform.system",
"mmdet.models.builder.build"
] |
[((1304, 1323), 'mmcv.utils.Registry', 'Registry', (['"""sampler"""'], {}), "('sampler')\n", (1312, 1323), False, 'from mmcv.utils import Registry\n'), ((993, 1010), 'platform.system', 'platform.system', ([], {}), '()\n', (1008, 1010), False, 'import platform\n'), ((1110, 1152), 'resource.getrlimit', 'resource.getrlimit', (['resource.RLIMIT_NOFILE'], {}), '(resource.RLIMIT_NOFILE)\n', (1128, 1152), False, 'import resource\n'), ((1223, 1291), 'resource.setrlimit', 'resource.setrlimit', (['resource.RLIMIT_NOFILE', '(soft_limit, hard_limit)'], {}), '(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n', (1241, 1291), False, 'import resource\n'), ((1473, 1492), 'mmdet.models.builder.build', 'build', (['cfg', 'SAMPLER'], {}), '(cfg, SAMPLER)\n', (1478, 1492), False, 'from mmdet.models.builder import build\n'), ((2385, 2400), 'mmcv.runner.get_dist_info', 'get_dist_info', ([], {}), '()\n', (2398, 2400), False, 'from mmcv.runner import get_dist_info\n'), ((3811, 3881), 'functools.partial', 'partial', (['worker_init_fn'], {'num_workers': 'num_workers', 'rank': 'rank', 'seed': 'seed'}), '(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed)\n', (3818, 3881), False, 'from functools import partial\n'), ((5052, 5070), 'copy.deepcopy', 'copy.deepcopy', (['cfg'], {}), '(cfg)\n', (5065, 5070), False, 'import copy\n'), ((3085, 3145), 'mmdet.datasets.samplers.DistributedSampler', 'DistributedSampler', (['dataset', 'world_size', 'rank'], {'shuffle': '(False)'}), '(dataset, world_size, rank, shuffle=False)\n', (3103, 3145), False, 'from mmdet.datasets.samplers import DistributedGroupSampler, GroupSampler, DistributedSampler\n'), ((4158, 4207), 'functools.partial', 'partial', (['collate'], {'samples_per_gpu': 'samples_per_gpu'}), '(collate, samples_per_gpu=samples_per_gpu)\n', (4165, 4207), False, 'from functools import partial\n'), ((2728, 2795), 'mmdet.datasets.samplers.DistributedGroupSampler', 'DistributedGroupSampler', (['dataset', 'samples_per_gpu', 'world_size', 'rank'], {}), '(dataset, samples_per_gpu, world_size, rank)\n', (2751, 2795), False, 'from mmdet.datasets.samplers import DistributedGroupSampler, GroupSampler, DistributedSampler\n'), ((3342, 3380), 'mmdet.datasets.samplers.GroupSampler', 'GroupSampler', (['dataset', 'samples_per_gpu'], {}), '(dataset, samples_per_gpu)\n', (3354, 3380), False, 'from mmdet.datasets.samplers import DistributedGroupSampler, GroupSampler, DistributedSampler\n'), ((6417, 6447), 'mmdet.datasets.build_dataset', 'build_dataset', (['c', 'default_args'], {}), '(c, default_args)\n', (6430, 6447), False, 'from mmdet.datasets import build_dataset\n'), ((6550, 6580), 'mmdet.datasets.build_dataset', 'build_dataset', (['c', 'default_args'], {}), '(c, default_args)\n', (6563, 6580), False, 'from mmdet.datasets import build_dataset\n'), ((6990, 7033), 'mmdet.datasets.build_dataset', 'build_dataset', (["cfg['dataset']", 'default_args'], {}), "(cfg['dataset'], default_args)\n", (7003, 7033), False, 'from mmdet.datasets import build_dataset\n'), ((7149, 7192), 'mmdet.datasets.build_dataset', 'build_dataset', (["cfg['dataset']", 'default_args'], {}), "(cfg['dataset'], default_args)\n", (7162, 7192), False, 'from mmdet.datasets import build_dataset\n'), ((7355, 7398), 'mmcv.utils.build_from_cfg', 'build_from_cfg', (['cfg', 'DATASETS', 'default_args'], {}), '(cfg, DATASETS, default_args)\n', (7369, 7398), False, 'from mmcv.utils import build_from_cfg\n')]
|
import os
from typing import Dict, Union
import numpy as np
def lenient_makedirs(path: str) -> None:
"""Simple wrapper around makedirs that first checks for existence.
Args:
path (str): path to be created
"""
if not os.path.exists(path):
os.makedirs(path)
def tile_overlapped(image: np.ndarray,
tile_size: Union[tuple, int] = 256,
channels_first: bool = False,
tile_rows: int = None,
tile_cols: int = None) -> np.ndarray:
if len(image.shape) == 2:
axis = 0 if channels_first else -1
image = np.expand_dims(image, axis=axis)
if channels_first:
image = np.moveaxis(image, 0, -1)
# assume height, width, channels from now on
height, width, channels = image.shape
tile_h, tile_w = tile_size if isinstance(tile_size, tuple) else (tile_size, tile_size)
if height <= tile_h and width <= tile_w:
raise ValueError("Image is smaller than the required tile size")
# number of expected tiles, manually defined or inferred
exact = [height / float(tile_h), width / float(tile_w)]
outer = [int(np.ceil(v)) for v in exact]
# the required number of tiles is given by the ceiling
tile_count_h = tile_rows or outer[0]
tile_count_w = tile_cols or outer[1]
# compute total remainder for the expanded window
remainder_h = (tile_count_h * tile_h) - height
remainder_w = (tile_count_w * tile_w) - width
# divide remainders among tiles as overlap
overlap_h = int(np.floor(remainder_h / float(tile_count_h))) if tile_count_h > 1 else 0
overlap_w = int(np.floor(remainder_w / float(tile_count_w))) if tile_count_w > 1 else 0
# create the empty tensor to contain tiles
tiles = np.empty((tile_count_h, tile_count_w, tile_h, tile_w, channels), dtype=image.dtype)
stride_h = tile_h - overlap_h
stride_w = tile_w - overlap_w
# iterate over tiles and copy content from image windows
for row in range(tile_count_h):
for col in range(tile_count_w):
# get the starting indices, accounting for initial positions
# overlap is halved to distribute in left/right and top/bottom
x = max(row * stride_h - overlap_h // 2, 0)
y = max(col * stride_w - overlap_w // 2, 0)
# if it exceeds horizontally or vertically in the last rows or cols, increase overlap to fit
if (x + tile_h) >= height:
x -= abs(x + tile_h - height)
if (y + tile_w) >= width:
y -= abs(y + tile_w - width)
# assign tile to final tensor
tiles[row, col] = image[x:x + tile_h, y:y + tile_w, :]
return tiles
def convert_mask(image: np.ndarray, lut: Dict[tuple, int]) -> np.ndarray:
"""Converts a given RGB image containing labels in channels-last format (h, w, c)
into a greyscale mask where each index indicates a given class.
:param image: RGB input image with dimensions [height, width, channels]
:type image: np.ndarray
:param lut: look-up table containing the associations color -> index
:type lut: Dict[tuple, int]
:return: greyscale image with size [height, width] containing the mapped label indices
:rtype: np.ndarray
"""
result = np.zeros(image.shape[:2])
for color, index in lut.items():
result[np.all(image == color, axis=-1)] = index
return result.astype(np.uint8)
|
[
"numpy.moveaxis",
"os.makedirs",
"numpy.ceil",
"numpy.empty",
"numpy.zeros",
"numpy.expand_dims",
"os.path.exists",
"numpy.all"
] |
[((1779, 1867), 'numpy.empty', 'np.empty', (['(tile_count_h, tile_count_w, tile_h, tile_w, channels)'], {'dtype': 'image.dtype'}), '((tile_count_h, tile_count_w, tile_h, tile_w, channels), dtype=\n image.dtype)\n', (1787, 1867), True, 'import numpy as np\n'), ((3302, 3327), 'numpy.zeros', 'np.zeros', (['image.shape[:2]'], {}), '(image.shape[:2])\n', (3310, 3327), True, 'import numpy as np\n'), ((244, 264), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (258, 264), False, 'import os\n'), ((274, 291), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (285, 291), False, 'import os\n'), ((629, 661), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': 'axis'}), '(image, axis=axis)\n', (643, 661), True, 'import numpy as np\n'), ((701, 726), 'numpy.moveaxis', 'np.moveaxis', (['image', '(0)', '(-1)'], {}), '(image, 0, -1)\n', (712, 726), True, 'import numpy as np\n'), ((1165, 1175), 'numpy.ceil', 'np.ceil', (['v'], {}), '(v)\n', (1172, 1175), True, 'import numpy as np\n'), ((3380, 3411), 'numpy.all', 'np.all', (['(image == color)'], {'axis': '(-1)'}), '(image == color, axis=-1)\n', (3386, 3411), True, 'import numpy as np\n')]
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def detail_url(recipe_id):
"""return recipe url"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main Course'):
"""create a sample tag and return"""
return Tag.objects.create(
user=user,
name=name
)
def sample_ingredient(user, name='Cinnamon'):
"""create and return sample ingredient and return"""
return Ingredient.objects.create(
user=user,
name=name
)
def sample_recipe(user, **param):
"""create and return a sample recipe"""
defaults = {
'title': 'sample recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update(param)
return Recipe.objects.create(
user=user,
**defaults
)
class PublicRecipeApiTests(TestCase):
"""test unauthenticated rest API"""
def setUp(self) -> None:
self.client = APIClient()
def test_auth_required(self):
"""test that authentication is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""test recipe can be retrieved from authenticated user"""
def setUp(self) -> None:
self.client = APIClient()
self.user = get_user_model().objects.create_user(
email="<EMAIL>",
password="<PASSWORD>"
)
self.client.force_authenticate(user=self.user)
def test_retreive_recipe(self):
"""test user is able to retrieve recipe"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""test that recipes are limited to authenticated user"""
user2 = get_user_model().objects.create_user(
email="<EMAIL>",
password='<PASSWORD>'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""test viewing recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""test creating recipe"""
payload = {
'title': 'chocolate cake',
'time_minutes': 30,
'price': 5.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(
id=res.data['id']
)
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""test adding a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'Avocardo lime cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes': 20,
'price': 10.25
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(
id=res.data['id']
)
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""test adding a recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Prawns')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
payload = {
'title': 'Thai prawns curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 20,
'price': 10
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(
id=res.data['id']
)
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""test updating a recipe patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='curry')
payload = {
'title': 'Paneer tikka',
'tags': [new_tag.id]
}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""test updating a recipe full"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'chai',
'time_minutes': 25,
'price': 15
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
|
[
"core.models.Recipe.objects.filter",
"core.models.Tag.objects.create",
"core.models.Recipe.objects.all",
"core.models.Recipe.objects.create",
"recipe.serializers.RecipeDetailSerializer",
"core.models.Recipe.objects.get",
"django.contrib.auth.get_user_model",
"django.urls.reverse",
"core.models.Ingredient.objects.create",
"recipe.serializers.RecipeSerializer",
"rest_framework.test.APIClient"
] |
[((326, 355), 'django.urls.reverse', 'reverse', (['"""recipe:recipe-list"""'], {}), "('recipe:recipe-list')\n", (333, 355), False, 'from django.urls import reverse\n'), ((424, 473), 'django.urls.reverse', 'reverse', (['"""recipe:recipe-detail"""'], {'args': '[recipe_id]'}), "('recipe:recipe-detail', args=[recipe_id])\n", (431, 473), False, 'from django.urls import reverse\n'), ((570, 610), 'core.models.Tag.objects.create', 'Tag.objects.create', ([], {'user': 'user', 'name': 'name'}), '(user=user, name=name)\n', (588, 610), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((749, 796), 'core.models.Ingredient.objects.create', 'Ingredient.objects.create', ([], {'user': 'user', 'name': 'name'}), '(user=user, name=name)\n', (774, 796), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((1044, 1088), 'core.models.Recipe.objects.create', 'Recipe.objects.create', ([], {'user': 'user'}), '(user=user, **defaults)\n', (1065, 1088), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((1243, 1254), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1252, 1254), False, 'from rest_framework.test import APIClient\n'), ((1612, 1623), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (1621, 1623), False, 'from rest_framework.test import APIClient\n'), ((2093, 2129), 'recipe.serializers.RecipeSerializer', 'RecipeSerializer', (['recipes'], {'many': '(True)'}), '(recipes, many=True)\n', (2109, 2129), False, 'from recipe.serializers import RecipeSerializer, RecipeDetailSerializer\n'), ((2615, 2652), 'core.models.Recipe.objects.filter', 'Recipe.objects.filter', ([], {'user': 'self.user'}), '(user=self.user)\n', (2636, 2652), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((2674, 2710), 'recipe.serializers.RecipeSerializer', 'RecipeSerializer', (['recipes'], {'many': '(True)'}), '(recipes, many=True)\n', (2690, 2710), False, 'from recipe.serializers import RecipeSerializer, RecipeDetailSerializer\n'), ((3206, 3236), 'recipe.serializers.RecipeDetailSerializer', 'RecipeDetailSerializer', (['recipe'], {}), '(recipe)\n', (3228, 3236), False, 'from recipe.serializers import RecipeSerializer, RecipeDetailSerializer\n'), ((3629, 3666), 'core.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': "res.data['id']"}), "(id=res.data['id'])\n", (3647, 3666), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((4308, 4345), 'core.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': "res.data['id']"}), "(id=res.data['id'])\n", (4326, 4345), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((5083, 5120), 'core.models.Recipe.objects.get', 'Recipe.objects.get', ([], {'id': "res.data['id']"}), "(id=res.data['id'])\n", (5101, 5120), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((2035, 2055), 'core.models.Recipe.objects.all', 'Recipe.objects.all', ([], {}), '()\n', (2053, 2055), False, 'from core.models import Recipe, Tag, Ingredient\n'), ((1644, 1660), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1658, 1660), False, 'from django.contrib.auth import get_user_model\n'), ((2371, 2387), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (2385, 2387), False, 'from django.contrib.auth import get_user_model\n')]
|
import json
import torch
import numpy as np
import random
import torch.nn.functional as F
import functools
def cmp_time(a, b):
a_num = int(a.split('_')[1])
b_num = int(b.split('_')[1])
return a_num - b_num
def pad_tensor(vec, pad):
"""
pad tensor to fixed length
:parameter
vec: tensor to pad
pad: the size to pad to
:return
a new tensor padded to 'pad'
"""
padded = torch.cat([vec, torch.zeros((pad - len(vec),20), dtype=torch.float)], dim=0).data.numpy()
return padded
def padding_all(vec, max_len):
"""
vec: [n, len, feat]
"""
n = vec.shape[0]
vec_len = vec.shape[1]
padded = torch.cat([vec, torch.zeros((n,max_len-vec_len,20), dtype=torch.double)], dim=1).data
return padded
def load_info_data(path):
ori_data = np.load(path)
protein_tensor = torch.tensor(ori_data['pssm_arr'], dtype =torch.float) # [n_p ,220]
drug_tensor = torch.tensor(ori_data['drug_arr'], dtype =torch.float) # [n_d, 881]
protein_num = protein_tensor.shape[0]
drug_num = drug_tensor.shape[0]
node_num = protein_num + drug_num
return protein_tensor, drug_tensor, node_num, protein_num
def load_pre_process(preprocess_path):
with open(preprocess_path, 'r') as f:
a = json.load(f)
adj = torch.FloatTensor(a['adj'])
dti_inter_mat = torch.FloatTensor(a['dti_inter_mat'])
train_interact_pos = torch.tensor(a['train_interact_pos'])
val_interact_pos = torch.tensor(a['val_interact_pos'])
return adj, dti_inter_mat, train_interact_pos, val_interact_pos
|
[
"numpy.load",
"json.load",
"torch.FloatTensor",
"torch.zeros",
"torch.tensor"
] |
[((824, 837), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (831, 837), True, 'import numpy as np\n'), ((859, 912), 'torch.tensor', 'torch.tensor', (["ori_data['pssm_arr']"], {'dtype': 'torch.float'}), "(ori_data['pssm_arr'], dtype=torch.float)\n", (871, 912), False, 'import torch\n'), ((945, 998), 'torch.tensor', 'torch.tensor', (["ori_data['drug_arr']"], {'dtype': 'torch.float'}), "(ori_data['drug_arr'], dtype=torch.float)\n", (957, 998), False, 'import torch\n'), ((1285, 1297), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1294, 1297), False, 'import json\n'), ((1312, 1339), 'torch.FloatTensor', 'torch.FloatTensor', (["a['adj']"], {}), "(a['adj'])\n", (1329, 1339), False, 'import torch\n'), ((1364, 1401), 'torch.FloatTensor', 'torch.FloatTensor', (["a['dti_inter_mat']"], {}), "(a['dti_inter_mat'])\n", (1381, 1401), False, 'import torch\n'), ((1431, 1468), 'torch.tensor', 'torch.tensor', (["a['train_interact_pos']"], {}), "(a['train_interact_pos'])\n", (1443, 1468), False, 'import torch\n'), ((1496, 1531), 'torch.tensor', 'torch.tensor', (["a['val_interact_pos']"], {}), "(a['val_interact_pos'])\n", (1508, 1531), False, 'import torch\n'), ((688, 747), 'torch.zeros', 'torch.zeros', (['(n, max_len - vec_len, 20)'], {'dtype': 'torch.double'}), '((n, max_len - vec_len, 20), dtype=torch.double)\n', (699, 747), False, 'import torch\n')]
|
#!/usr/bin/python3
#-*- coding: UTF-8
import fileSplit
fileSplit.合并()
|
[
"fileSplit.合并"
] |
[((55, 69), 'fileSplit.合并', 'fileSplit.合并', ([], {}), '()\n', (67, 69), False, 'import fileSplit\n')]
|
from django.db import models
class Segment(models.Model):
from_stop = models.IntegerField()
to_stop = models.IntegerField()
distance = models.DecimalField(max_digits=6, decimal_places=2)
class Route(models.Model):
segments = models.ManyToManyField(Segment, through='RouteSegment')
class RouteSegment(models.Model):
route = models.ForeignKey(Route)
segment = models.ForeignKey(Segment)
sequence = models.PositiveSmallIntegerField(db_index=True)
class Meta:
ordering = ('route', 'sequence', )
class Ride(models.Model):
from_stop = models.IntegerField()
to_stop = models.IntegerField()
route = models.ForeignKey(Route)
class Ticket(models.Model):
ride = models.ForeignKey(Ride)
from_stop = models.IntegerField()
to_stop = models.IntegerField()
date = models.DateField()
description = models.TextField()
transaction_has = models.CharField(max_length=64)
price = models.DecimalField(max_digits=5, decimal_places=2)
|
[
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.DecimalField",
"django.db.models.IntegerField",
"django.db.models.DateField"
] |
[((76, 97), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (95, 97), False, 'from django.db import models\n'), ((112, 133), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (131, 133), False, 'from django.db import models\n'), ((149, 200), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(6)', 'decimal_places': '(2)'}), '(max_digits=6, decimal_places=2)\n', (168, 200), False, 'from django.db import models\n'), ((245, 300), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Segment'], {'through': '"""RouteSegment"""'}), "(Segment, through='RouteSegment')\n", (267, 300), False, 'from django.db import models\n'), ((349, 373), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Route'], {}), '(Route)\n', (366, 373), False, 'from django.db import models\n'), ((388, 414), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Segment'], {}), '(Segment)\n', (405, 414), False, 'from django.db import models\n'), ((430, 477), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'db_index': '(True)'}), '(db_index=True)\n', (462, 477), False, 'from django.db import models\n'), ((582, 603), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (601, 603), False, 'from django.db import models\n'), ((618, 639), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (637, 639), False, 'from django.db import models\n'), ((652, 676), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Route'], {}), '(Route)\n', (669, 676), False, 'from django.db import models\n'), ((718, 741), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Ride'], {}), '(Ride)\n', (735, 741), False, 'from django.db import models\n'), ((758, 779), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (777, 779), False, 'from django.db import models\n'), ((794, 815), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (813, 815), False, 'from django.db import models\n'), ((827, 845), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (843, 845), False, 'from django.db import models\n'), ((864, 882), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (880, 882), False, 'from django.db import models\n'), ((905, 936), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (921, 936), False, 'from django.db import models\n'), ((949, 1000), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(5)', 'decimal_places': '(2)'}), '(max_digits=5, decimal_places=2)\n', (968, 1000), False, 'from django.db import models\n')]
|
import sys
import numpy as np
import math
from JMLUtils import dist2, eprint
from StructureXYZ import StructXYZ
from typing import Sequence
TRIANGLE_TOL = 1E-4
Y_DENOM = 1.0 / math.sqrt(3)
def water(infile: str = 'QM_REF.xyz', delta=4.0):
delta = float(delta)
xyzfi = StructXYZ(infile)
assert len(xyzfi.probe_indices) == 0
assert xyzfi.n_atoms == 3
assert xyzfi.atom_names[0].startswith("O")
place_triangle(xyzfi, delta)
def place_triangle(xyzfi: StructXYZ, delta: float = 4.0, outname: str = "WATER_PROBE.xyz", center: int = 0,
flank1: int = 1, flank2: int = 2):
if center >= xyzfi.n_atoms or center < 0:
raise ValueError(f"Central atom index {center} out-of-bounds 0-{xyzfi.n_atoms}")
if flank1 >= xyzfi.n_atoms or flank1 < 0:
raise ValueError(f"Flank1 atom index {flank1} out-of-bounds 0-{xyzfi.n_atoms}")
if flank2 >= xyzfi.n_atoms or flank2 < 0:
raise ValueError(f"Flank2 atom index {flank2} out-of-bounds 0-{xyzfi.n_atoms}")
if center == flank1 or center == flank2 or flank1 == flank2:
raise ValueError(f"All three atoms must have distinct indices: received {center},{flank1},{flank2}")
triangle_center = xyzfi.coords[center] + xyzfi.coords[flank1] + xyzfi.coords[flank2]
triangle_center *= 0.5
place_vec = triangle_center - xyzfi.coords[center]
mag_pv = math.sqrt(np.dot(place_vec, place_vec))
bisector_vector = 0.5 * (xyzfi.coords[flank2] - xyzfi.coords[flank1])
# Only used as square, so don't bother w/ square root
half_bisector = np.dot(bisector_vector, bisector_vector)
from_bisector = math.sqrt((delta * delta) - half_bisector)
out_xyz = (place_vec * (from_bisector / mag_pv)) + triangle_center
eprint(f"Placing probe at {out_xyz}")
xyzfi.append_atom(xyzfi.get_default_probetype()[0], out_xyz)
xyzfi.write_out(outname)
|
[
"numpy.dot",
"StructureXYZ.StructXYZ",
"JMLUtils.eprint",
"math.sqrt"
] |
[((179, 191), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (188, 191), False, 'import math\n'), ((282, 299), 'StructureXYZ.StructXYZ', 'StructXYZ', (['infile'], {}), '(infile)\n', (291, 299), False, 'from StructureXYZ import StructXYZ\n'), ((1575, 1615), 'numpy.dot', 'np.dot', (['bisector_vector', 'bisector_vector'], {}), '(bisector_vector, bisector_vector)\n', (1581, 1615), True, 'import numpy as np\n'), ((1637, 1677), 'math.sqrt', 'math.sqrt', (['(delta * delta - half_bisector)'], {}), '(delta * delta - half_bisector)\n', (1646, 1677), False, 'import math\n'), ((1755, 1792), 'JMLUtils.eprint', 'eprint', (['f"""Placing probe at {out_xyz}"""'], {}), "(f'Placing probe at {out_xyz}')\n", (1761, 1792), False, 'from JMLUtils import dist2, eprint\n'), ((1392, 1420), 'numpy.dot', 'np.dot', (['place_vec', 'place_vec'], {}), '(place_vec, place_vec)\n', (1398, 1420), True, 'import numpy as np\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os, sys, logging, argparse
from itertools import ifilter as filter
import muz
import muz.frontend
import muz.vfs as vfs
import muz.beatmap as beatmap
import muz.game as game
import muz.util
from muz import _config as config
NAME = u"μz"
VERSION = "0.01-prepreprealpha"
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
userdir = os.path.abspath(os.path.join(os.path.expanduser("~"), ".muz"))
globalArgs = None
frontend = None
log = logging.getLogger(__name__)
def initUserDir():
if not os.path.exists(userdir):
os.makedirs(userdir)
def initvfs():
vfs.root.clear()
if globalArgs.no_vfs:
return
vfs.applySettings()
def initroot(root=vfs.root):
root.loadDataDirs(basedir, userdir, *globalArgs.extradirs)
for pack in globalArgs.extrapacks:
root.loadPack(pack)
return root
vfs.root = vfs.LazyNode(initroot)
def initArgParser(desc=None, prog=None):
if desc is None:
desc = "%s: a mania-style rhythm game" % NAME
if prog is None:
if os.path.split(sys.argv[0])[-1] == "__main__.py":
prog = "muz"
return argparse.ArgumentParser(description=desc, prog=prog, add_help=False, conflict_handler='resolve')
def handleGeneralArgs(parser, argv, namespace):
global globalArgs, userdir, basedir
g = parser.add_argument_group(title="general options")
g.add_argument('--basedir', action='store', default=basedir,
help="set the location of base game assets (default: %(default)s)")
g.add_argument('--userdir', action="store", default=userdir,
help="set the location of user-supplied game data (e.g. beatmaps) (default: %(default)s)")
g.add_argument('--no-vfs', action='store_true', default=False,
help="do not initialize the virtual filesystem")
g.add_argument('-d', '--dir', metavar='DIR', dest='extradirs', action='append', default=[],
help="add a directory to search for game data in (including beatmaps), can be specified multiple times")
g.add_argument('-p', '--pack', metavar='PACK', dest='extrapacks', action='append', default=[],
help="add a pack to search for game data in (including beatmaps), can be specified multiple times")
g.add_argument('-c', '--config', action="store", type=argparse.FileType('r'), default=None,
help="load an alternative configuration file (default: $userdir/config.json)")
g.add_argument('-l', '--list-beatmaps', dest="listbeatmaps", action="count", default=False,
help="list all beatmaps found in the virtual filesystem, specify twice to also list their 'nicer' names parsed from metadata (slow)")
g.add_argument('-L', '--list-vfs', dest="listvfspath", metavar="PATH", action="store", nargs='?', const='', default=None,
help="list the contents of a path in the virtual filesystem and exit")
g.add_argument('--log-level', dest="loglevel", metavar="LEVEL", choices=["critical", "error", "warning", "info", "debug"], default=None,
help="set the output verbosity level, overrides the config setting (default: warning)")
g.add_argument('--frontend', choices=tuple(muz.frontend.iter()), default="pygame",
help="set the subsystem used to render and display the game, handle input, play audio, etc. (default: %(default)s)")
g.add_argument('-v', '--version', action="version", version="%s %s" % (NAME, VERSION),
help="print the game version and exit")
g.add_argument('-h', '--help', action='store_true', #action="help",
help="print this rather unhelpful (I'm sorry) help message and exit")
n, a = parser.parse_known_args(argv, namespace=namespace)
globalArgs = n
basedir = os.path.abspath(n.basedir)
userdir = os.path.abspath(n.userdir)
if globalArgs.loglevel is not None:
muz.log.setLevel(muz.util.logLevelByName(globalArgs.loglevel))
if n.listvfspath is not None:
init()
l = vfs.locate(n.listvfspath)
for key in sorted(l.keys()):
print("%s%s" % (key, vfs.VPATH_SEP if l[key].isDir else ""))
exit(0)
if n.listbeatmaps:
init()
def getname(s):
if n.listbeatmaps < 2:
return s
try:
b = muz.beatmap.load(s, bare=True)
except Exception as e:
log.exception("failed to load beatmap %s: %s", s, e)
return s
else:
return "%s: %s" %(s, b.name)
for s in sorted(filter(None, (muz.beatmap.nameFromPath(path+obj) for path, obj, _ in vfs.root.walk()))):
print(getname(s))
exit(0)
return (n, a)
def handleGameArgs(parser, argv, namespace, beatmapOption=True):
g = parser.add_argument_group(title="game options")
if beatmapOption:
g.add_argument('beatmap', type=str, nargs=1,
help='run the game with the specified beatmap')
g.add_argument('--importer-options', action='store', default=None,
help='pass an option string to the beatmap importer')
g.add_argument('--start-from', dest='startfrom', metavar='TIME', type=int, action='store', default=0,
help='start playing from an arbitrary position, in milliseconds (default: 0)')
g.add_argument('--loop', metavar='TIME', type=int, action='store', default=0,
help='if >0, the song will automatically restart after being played for this much milliseconds (default: 0)')
g.add_argument('-o', '--beatmap-offset', metavar='TIME', type=int, action='store', default=None,
help='offset timing of all notes on the beatmap by this value, in milliseconds (overrides the config setting)')
g.add_argument('-f', '--fc-run', dest='fcrun', action='store_true', default=False,
help='automatically restart the game when the combo is broken')
g.add_argument('-p', '--perfect-run', dest='perfectrun', action='store_true', default=False,
help='automatically restart the game when anything less than Perfect is scored, implies --fc-run')
g.add_argument('-r', '--random', action='store_true', default=False,
help='randomize note positions on the beatmap')
g.add_argument('--shuffle-bands', action='store_true', default=False,
help='shuffle band positions')
g.add_argument('--mirror-bands', action='store_true', default=False,
help='mirror band positions')
g.add_argument('--no-holds', action='store_true', default=False,
help='replace each hold note with two hit notes')
g.add_argument('--holdify', action='store_true', default=False,
help='group all notes into holds where possible')
g.add_argument('-i', '--insane', action='store_true', default=False,
help='add lots of extra notes')
g.add_argument('-a', '--autoplay', action='store_true', default=False,
help='play automatically without user interaction (overrides the config setting)')
g.add_argument('-b', '--num-bands', action='store', type=int, default=0,
help='forces a specific amount of bands instead of reading it from the beatmap')
if beatmapOption and len(argv) < 1:
parser.print_help()
exit(1)
n, a = parser.parse_known_args(argv, namespace=namespace)
return (n, a)
def handleRemainingArgs(parser, argv, namespace):
if namespace.help:
parser.print_help()
exit(1)
if argv:
sys.stderr.write("error: unhandled arguments: %s\n\n" % ', '.join(repr(a) for a in argv))
parser.print_usage()
sys.stderr.write("\ntry %s -h for help\n" % parser.prog)
exit(1)
return (namespace, argv)
def loadConfig(requireLogLevel=logging.CRITICAL):
if globalArgs.config is not None:
cfg = globalArgs.config
else:
cfg = os.path.join(userdir, "config.json")
defCfg = os.path.join(userdir, "config.default.json")
try:
with open(defCfg, 'w') as f:
muz.config.dump(f)
except Exception:
log.exception("couldn't write the default configuration file")
else:
log.info("wrote the default configuration to %s", repr(defCfg))
try:
with open(cfg) as f:
muz.config.load(f)
except Exception:
log.exception("couldn't load the configuration file %s", repr(cfg))
else:
log.info("loaded configuration from %s", repr(cfg))
if globalArgs.loglevel is None:
muz.log.setLevel(min(requireLogLevel, muz.util.logLevelByName(muz._config["log"]["level"])))
def playBeatmap(bmap):
frontend.gameLoop(game.Game(bmap, frontend))
def initFrontend(args, namespace):
global frontend
frontend = muz.frontend.get(globalArgs.frontend, frontendArgs=args, frontendArgsNamespace=namespace)
def init(requireLogLevel=logging.CRITICAL):
reload(sys)
sys.setdefaultencoding("utf-8")
initUserDir()
loadConfig(requireLogLevel=requireLogLevel)
initvfs()
if frontend is not None:
frontend.postInit()
def bareInit(argv=None, requireFrontend=False):
p = initArgParser()
n = None
if argv is None:
argv = []
n, argv = handleGeneralArgs(p, argv, n)
n, argv = handleGameArgs(p, argv, n, beatmapOption=False)
if requireFrontend:
initFrontend(argv, n)
n, argv = handleRemainingArgs(p, argv, n)
init()
@muz.util.entrypoint
def run(*argv):
argv = argv[1:]
p = initArgParser()
n = None
n, argv = handleGeneralArgs(p, argv, n)
n, argv = handleGameArgs(p, argv, n)
initFrontend(argv, n)
n, argv = handleRemainingArgs(p, argv, n)
init()
try:
playBeatmap(beatmap.load(n.beatmap[0], options=n.importer_options))
finally:
frontend.shutdown()
@muz.util.entrypoint
def runUI(*argv):
argv = argv[1:]
p = initArgParser()
n = None
n, argv = handleGeneralArgs(p, argv, n)
initFrontend(argv, n)
init()
try:
frontend.main()
finally:
frontend.shutdown()
if __name__ == "__main__":
run(*sys.argv)
|
[
"argparse.ArgumentParser",
"muz.beatmap.load",
"muz.vfs.root.walk",
"muz.frontend.iter",
"muz.vfs.LazyNode",
"muz.vfs.root.clear",
"muz.vfs.applySettings",
"os.path.join",
"os.path.abspath",
"os.path.dirname",
"os.path.exists",
"muz.util.logLevelByName",
"sys.setdefaultencoding",
"argparse.FileType",
"muz.frontend.get",
"muz.game.Game",
"os.makedirs",
"muz.beatmap.nameFromPath",
"muz.vfs.locate",
"muz.config.dump",
"sys.stderr.write",
"os.path.split",
"os.path.expanduser",
"logging.getLogger",
"muz.config.load"
] |
[((666, 693), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (683, 693), False, 'import os, sys, logging, argparse\n'), ((799, 815), 'muz.vfs.root.clear', 'vfs.root.clear', ([], {}), '()\n', (813, 815), True, 'import muz.vfs as vfs\n'), ((863, 882), 'muz.vfs.applySettings', 'vfs.applySettings', ([], {}), '()\n', (880, 882), True, 'import muz.vfs as vfs\n'), ((1097, 1119), 'muz.vfs.LazyNode', 'vfs.LazyNode', (['initroot'], {}), '(initroot)\n', (1109, 1119), True, 'import muz.vfs as vfs\n'), ((1356, 1456), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'prog': 'prog', 'add_help': '(False)', 'conflict_handler': '"""resolve"""'}), "(description=desc, prog=prog, add_help=False,\n conflict_handler='resolve')\n", (1379, 1456), False, 'import os, sys, logging, argparse\n'), ((4059, 4085), 'os.path.abspath', 'os.path.abspath', (['n.basedir'], {}), '(n.basedir)\n', (4074, 4085), False, 'import os, sys, logging, argparse\n'), ((4100, 4126), 'os.path.abspath', 'os.path.abspath', (['n.userdir'], {}), '(n.userdir)\n', (4115, 4126), False, 'import os, sys, logging, argparse\n'), ((8350, 8394), 'os.path.join', 'os.path.join', (['userdir', '"""config.default.json"""'], {}), "(userdir, 'config.default.json')\n", (8362, 8394), False, 'import os, sys, logging, argparse\n'), ((9168, 9261), 'muz.frontend.get', 'muz.frontend.get', (['globalArgs.frontend'], {'frontendArgs': 'args', 'frontendArgsNamespace': 'namespace'}), '(globalArgs.frontend, frontendArgs=args,\n frontendArgsNamespace=namespace)\n', (9184, 9261), False, 'import muz\n'), ((9323, 9354), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (9345, 9354), False, 'import os, sys, logging, argparse\n'), ((517, 542), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (532, 542), False, 'import os, sys, logging, argparse\n'), ((592, 615), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (610, 615), False, 'import os, sys, logging, argparse\n'), ((725, 748), 'os.path.exists', 'os.path.exists', (['userdir'], {}), '(userdir)\n', (739, 748), False, 'import os, sys, logging, argparse\n'), ((758, 778), 'os.makedirs', 'os.makedirs', (['userdir'], {}), '(userdir)\n', (769, 778), False, 'import os, sys, logging, argparse\n'), ((4302, 4327), 'muz.vfs.locate', 'vfs.locate', (['n.listvfspath'], {}), '(n.listvfspath)\n', (4312, 4327), True, 'import muz.vfs as vfs\n'), ((8052, 8110), 'sys.stderr.write', 'sys.stderr.write', (['("""\ntry %s -h for help\n""" % parser.prog)'], {}), '("""\ntry %s -h for help\n""" % parser.prog)\n', (8068, 8110), False, 'import os, sys, logging, argparse\n'), ((8300, 8336), 'os.path.join', 'os.path.join', (['userdir', '"""config.json"""'], {}), "(userdir, 'config.json')\n", (8312, 8336), False, 'import os, sys, logging, argparse\n'), ((9070, 9095), 'muz.game.Game', 'game.Game', (['bmap', 'frontend'], {}), '(bmap, frontend)\n', (9079, 9095), True, 'import muz.game as game\n'), ((2569, 2591), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (2586, 2591), False, 'import os, sys, logging, argparse\n'), ((4193, 4237), 'muz.util.logLevelByName', 'muz.util.logLevelByName', (['globalArgs.loglevel'], {}), '(globalArgs.loglevel)\n', (4216, 4237), False, 'import muz\n'), ((8454, 8472), 'muz.config.dump', 'muz.config.dump', (['f'], {}), '(f)\n', (8469, 8472), False, 'import muz\n'), ((8699, 8717), 'muz.config.load', 'muz.config.load', (['f'], {}), '(f)\n', (8714, 8717), False, 'import muz\n'), ((10131, 10185), 'muz.beatmap.load', 'beatmap.load', (['n.beatmap[0]'], {'options': 'n.importer_options'}), '(n.beatmap[0], options=n.importer_options)\n', (10143, 10185), True, 'import muz.beatmap as beatmap\n'), ((1270, 1296), 'os.path.split', 'os.path.split', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1283, 1296), False, 'import os, sys, logging, argparse\n'), ((3473, 3492), 'muz.frontend.iter', 'muz.frontend.iter', ([], {}), '()\n', (3490, 3492), False, 'import muz\n'), ((4616, 4646), 'muz.beatmap.load', 'muz.beatmap.load', (['s'], {'bare': '(True)'}), '(s, bare=True)\n', (4632, 4646), False, 'import muz\n'), ((8969, 9021), 'muz.util.logLevelByName', 'muz.util.logLevelByName', (["muz._config['log']['level']"], {}), "(muz._config['log']['level'])\n", (8992, 9021), False, 'import muz\n'), ((4878, 4914), 'muz.beatmap.nameFromPath', 'muz.beatmap.nameFromPath', (['(path + obj)'], {}), '(path + obj)\n', (4902, 4914), False, 'import muz\n'), ((4933, 4948), 'muz.vfs.root.walk', 'vfs.root.walk', ([], {}), '()\n', (4946, 4948), True, 'import muz.vfs as vfs\n')]
|
#!/usr/bin/env python
# Copyright 2017-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright 2017-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Appends a file to a zip archive with copying the resulting zip to a new place """
import argparse
import shutil
import sys
from zipfile import ZipFile
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input")
parser.add_argument("--output")
parser.add_argument(
"files-to-append",
nargs="+",
help="Pairs of new zip entry name and file with entry content",
)
options = parser.parse_args()
copy_and_append_to_zip_file(options.input, options.output, options.files_to_append)
def copy_and_append_to_zip_file(input_zip_file, output_zip_file, files_to_append):
shutil.copy(input_zip_file, output_zip_file)
append_files_to_zip(output_zip_file, files_to_append)
def append_files_to_zip(zip_file_name, files_to_append):
with ZipFile(zip_file_name, "a") as zip_file:
for i in range(0, len(files_to_append) - 1, 2):
entry_name = files_to_append[i]
entry_content = __read_file(files_to_append[i + 1])
zip_file.writestr(entry_name, entry_content)
def __read_file(file_path):
with open(file_path) as new_file:
return new_file.read()
if __name__ == "__main__":
copy_and_append_to_zip_file(sys.argv[1], sys.argv[2], sys.argv[3:])
|
[
"zipfile.ZipFile",
"argparse.ArgumentParser",
"shutil.copy"
] |
[((1380, 1405), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1403, 1405), False, 'import argparse\n'), ((1837, 1881), 'shutil.copy', 'shutil.copy', (['input_zip_file', 'output_zip_file'], {}), '(input_zip_file, output_zip_file)\n', (1848, 1881), False, 'import shutil\n'), ((2008, 2035), 'zipfile.ZipFile', 'ZipFile', (['zip_file_name', '"""a"""'], {}), "(zip_file_name, 'a')\n", (2015, 2035), False, 'from zipfile import ZipFile\n')]
|
#!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2017
# Round 1A 2017
# Problem A. Alphabet Cake
# Solve all test sets
from __future__ import print_function
def make_cake(r, c, cake):
assert isinstance(cake, list)
filled_cake = []
for row in cake:
first_cell = '?'
last_cell = '?'
new_row = ''
for cell in row:
if cell != '?':
last_cell = cell
if first_cell == '?':
first_cell = cell
new_row += last_cell
new_row = new_row.replace('?', first_cell)
filled_cake.append(new_row)
filled_cake2 = []
last_row = '?' * c
first_row = '?' * c
for row in filled_cake:
if not '?' in row:
last_row = row
if '?' in first_row:
first_row = row
new_row = last_row
filled_cake2.append(new_row)
filled_cake2 = [first_row if '?' in row else row for row in filled_cake2]
return filled_cake2
if __name__ == '__main__':
import os
samples = [
(3, 3, ['G??', '?C?', '??J']),
(3, 4, ['CODE', '????', '?JAM']),
(2, 2, ['CA', 'KE'])
]
for sample in samples:
print(make_cake(*sample))
data_files = ['A-small-practice', 'A-large-practice']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
test_cases = []
j = 0
for _ in range(input_count):
cake = []
r, c = tuple([int(_) for _ in inputs[j].split(' ')])
j += 1
for _ in range(r):
row = inputs[j]
cake.append(row)
j += 1
test_cases.append((r, c, cake))
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for test_case in test_cases:
cake = make_cake(*test_case)
output_file.write('Case #{0}:\n'.format(i))
for row in cake:
output_file.write(row)
output_file.write('\n')
i += 1
|
[
"os.path.realpath"
] |
[((1378, 1404), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1394, 1404), False, 'import os\n'), ((2035, 2061), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2051, 2061), False, 'import os\n')]
|
from script.data_handler.DatasetPackLoader import DatasetPackLoader
from script.model.sklearn_like_model.AE.AE import AE
data_pack = DatasetPackLoader().load_dataset("MNIST")
dataset = data_pack['train']
Xs, Ys = dataset.full_batch(['Xs', 'Ys'])
sample_X = Xs[:2]
sample_Y = Ys[:2]
def AE_total_execute(model):
model.train(Xs, epoch=1)
metric = model.metric(sample_X)
print(metric)
code = model.code(sample_X)
print(code)
recon = model.recon(sample_X)
print(recon)
path = model.save()
class_ = model.__class__
del model
model = class_().load_meta(path)
print('model reloaded')
for i in range(2):
model.train(Xs, epoch=1)
metric = model.metric(sample_X)
print(metric)
metric = model.metric(sample_X)
print(metric)
code = model.code(sample_X)
print(code)
recon = model.recon(sample_X)
print(recon)
model.save()
def test_AE():
model = AE()
AE_total_execute(model)
def test_AE_with_noise():
model = AE(with_noise=True)
AE_total_execute(model)
|
[
"script.model.sklearn_like_model.AE.AE.AE",
"script.data_handler.DatasetPackLoader.DatasetPackLoader"
] |
[((998, 1002), 'script.model.sklearn_like_model.AE.AE.AE', 'AE', ([], {}), '()\n', (1000, 1002), False, 'from script.model.sklearn_like_model.AE.AE import AE\n'), ((1076, 1095), 'script.model.sklearn_like_model.AE.AE.AE', 'AE', ([], {'with_noise': '(True)'}), '(with_noise=True)\n', (1078, 1095), False, 'from script.model.sklearn_like_model.AE.AE import AE\n'), ((137, 156), 'script.data_handler.DatasetPackLoader.DatasetPackLoader', 'DatasetPackLoader', ([], {}), '()\n', (154, 156), False, 'from script.data_handler.DatasetPackLoader import DatasetPackLoader\n')]
|
#encoding:utf-8
'''
Created on 2015-8-27
图片查看窗口
@author: user
'''
from PyQt4 import QtGui, QtCore, uic
from PyQt4.Qt import pyqtSlot
from PyQt4.QtGui import QMessageBox
from shhicparking.server import TSStub
from shhicparking.util import dateutil
import base64
class PictureViewerDlg(QtGui.QDialog):
def __init__(self,parent):
super( PictureViewerDlg, self ).__init__(parent=parent)
uic.loadUi( "shhicparking/ui/uires/pictureViewerDlg.ui", self )
def show(self,photo,title=None):
QtGui.QDialog.show(self)
self.label.setPixmap(photo)
self.setWindowTitle(u"图片查看:"+title if title is not None else "")
@pyqtSlot()
def on_saveBtn_clicked(self):
fn = QtGui.QFileDialog.getSaveFileName(self,u"保存图片","/", u"图片文件 (*.jpg);;所有文件(*.*)")
if fn != '':
if self.label.pixmap().save(fn):
QMessageBox.information(self, u"保存图片",u"图片保存完成")
else:
QMessageBox.warning(self, u"保存图片",u"图片保存完成")
@pyqtSlot()
def on_closeBtn_clicked(self):
self.label.clear()
self.hide()
|
[
"PyQt4.QtGui.QMessageBox.information",
"PyQt4.QtGui.QDialog.show",
"PyQt4.uic.loadUi",
"PyQt4.QtGui.QFileDialog.getSaveFileName",
"PyQt4.QtGui.QMessageBox.warning",
"PyQt4.Qt.pyqtSlot"
] |
[((670, 680), 'PyQt4.Qt.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (678, 680), False, 'from PyQt4.Qt import pyqtSlot\n'), ((1028, 1038), 'PyQt4.Qt.pyqtSlot', 'pyqtSlot', ([], {}), '()\n', (1036, 1038), False, 'from PyQt4.Qt import pyqtSlot\n'), ((404, 465), 'PyQt4.uic.loadUi', 'uic.loadUi', (['"""shhicparking/ui/uires/pictureViewerDlg.ui"""', 'self'], {}), "('shhicparking/ui/uires/pictureViewerDlg.ui', self)\n", (414, 465), False, 'from PyQt4 import QtGui, QtCore, uic\n'), ((522, 546), 'PyQt4.QtGui.QDialog.show', 'QtGui.QDialog.show', (['self'], {}), '(self)\n', (540, 546), False, 'from PyQt4 import QtGui, QtCore, uic\n'), ((728, 813), 'PyQt4.QtGui.QFileDialog.getSaveFileName', 'QtGui.QFileDialog.getSaveFileName', (['self', 'u"""保存图片"""', '"""/"""', 'u"""图片文件 (*.jpg);;所有文件(*.*)"""'], {}), "(self, u'保存图片', '/',\n u'图片文件 (*.jpg);;所有文件(*.*)')\n", (761, 813), False, 'from PyQt4 import QtGui, QtCore, uic\n'), ((890, 939), 'PyQt4.QtGui.QMessageBox.information', 'QMessageBox.information', (['self', 'u"""保存图片"""', 'u"""图片保存完成"""'], {}), "(self, u'保存图片', u'图片保存完成')\n", (913, 939), False, 'from PyQt4.QtGui import QMessageBox\n'), ((973, 1018), 'PyQt4.QtGui.QMessageBox.warning', 'QMessageBox.warning', (['self', 'u"""保存图片"""', 'u"""图片保存完成"""'], {}), "(self, u'保存图片', u'图片保存完成')\n", (992, 1018), False, 'from PyQt4.QtGui import QMessageBox\n')]
|
# Objective: learn a Doc2Vec model
import logging
import multiprocessing
import random
from time import time
import numpy as np
from gensim.models import doc2vec
from benchmark_utils import load_benchmarked_app_ids, print_ranking
from sentence_models import print_most_similar_sentences
from universal_sentence_encoder import perform_knn_search_with_vectors_as_input
from universal_sentence_encoder import prepare_knn_search, transform_matches_to_app_ids
from utils import load_tokens, load_game_names, get_doc_model_file_name
from word_model import compute_similarity_using_word2vec_model
def get_tag_prefix():
return 'appID_'
def read_corpus(steam_tokens, game_tags=None, include_app_ids=True):
for app_id, tokens in steam_tokens.items():
doc_tag = []
if include_app_ids:
doc_tag += [get_tag_prefix() + str(app_id)]
try:
# Reference: https://medium.com/scaleabout/a-gentle-introduction-to-doc2vec-db3e8c0cce5e
doc_tag += game_tags[app_id]
except KeyError:
print('AppID = {} cannot be found in tag dictionary.'.format(app_id))
except TypeError:
pass
yield doc2vec.TaggedDocument(tokens, doc_tag)
def reformat_similarity_scores_for_doc2vec(similarity_scores_as_tuples, game_names=None):
if game_names is None:
game_names, _ = load_game_names()
dummy_app_ids = []
similarity_scores = dict()
for app_id, similarity_value in similarity_scores_as_tuples:
if app_id.startswith(get_tag_prefix()):
app_id = app_id[len(get_tag_prefix()):]
similarity_scores[str(app_id)] = similarity_value
if str(app_id) not in game_names:
dummy_app_ids.append(app_id)
if len(dummy_app_ids) > 0:
print('Dummy appIDs: {}'.format(dummy_app_ids))
return similarity_scores
def train_doc_model_on_steam_tokens(model=None, steam_tokens=None, num_epochs=10):
# You do not want to perform training this way, because training already happened when initializating the model
# with Doc2Vec(documents). Moreover, calling train() several times messes with decay of learning rate alpha!
if steam_tokens is None:
steam_tokens = load_tokens()
documents = list(read_corpus(steam_tokens))
if model is None:
model = doc2vec.Doc2Vec(documents) # training happens with 5 epochs (default) here
start = time()
model.train(documents, total_examples=len(documents), epochs=num_epochs)
print('Elapsed time: {%.2f}' % (time() - start))
model.save(get_doc_model_file_name())
return model
def compute_similarity_using_doc2vec_model(query_app_id, steam_tokens=None, model=None,
verbose=False,
enforce_training=False, avoid_inference=False, num_items_displayed=10):
if steam_tokens is None:
steam_tokens = load_tokens()
if model is None:
try:
print('Loading Doc2Vec model.')
model = doc2vec.Doc2Vec.load(get_doc_model_file_name())
if enforce_training:
model = train_doc_model_on_steam_tokens(model=model, steam_tokens=steam_tokens)
except FileNotFoundError:
print('Training Doc2Vec model from scratch.')
model = train_doc_model_on_steam_tokens(model=None, steam_tokens=steam_tokens)
if avoid_inference:
if verbose:
print('Finding most similar documents based on the query appID.')
# For games which are part of the training corpus, we do not need to call model.infer_vector()
similarity_scores_as_tuples = model.docvecs.most_similar(positive=get_tag_prefix() + str(query_app_id),
topn=num_items_displayed)
else:
if verbose:
print('Finding most similar documents based on an inferred vector, which represents the query document.')
query = steam_tokens[query_app_id]
# Caveat: « Subsequent calls to this function may infer different representations for the same document. »
# Reference: https://radimrehurek.com/gensim/models/doc2vec.html#gensim.models.doc2vec.Doc2Vec.infer_vector
inferred_vector = model.infer_vector(query)
similarity_scores_as_tuples = model.docvecs.most_similar([inferred_vector])
similarity_scores = reformat_similarity_scores_for_doc2vec(similarity_scores_as_tuples)
print_most_similar_sentences(similarity_scores, num_items_displayed=num_items_displayed)
return similarity_scores
def check_analogy(model, pos, neg, num_items_displayed=10):
similarity_scores_as_tuples = model.docvecs.most_similar(positive=[get_tag_prefix() + p for p in pos],
negative=[get_tag_prefix() + n for n in neg],
topn=num_items_displayed)
similarity_scores = reformat_similarity_scores_for_doc2vec(similarity_scores_as_tuples)
print_most_similar_sentences(similarity_scores, num_items_displayed)
return
def apply_pipeline(train_from_scratch=True, avoid_inference=False, shuffle_corpus=True,
include_genres=False, include_categories=True, include_app_ids=True,
verbose=False):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
game_names, game_tags = load_game_names(include_genres, include_categories)
steam_tokens = load_tokens()
documents = list(read_corpus(steam_tokens, game_tags, include_app_ids))
if shuffle_corpus:
# « Only if the training data has some existing clumping – like all the examples with certain words/topics are
# stuck together at the top or bottom of the ordering – is native ordering likely to cause training problems.
# And in that case, a single shuffle, before any training, should be enough to remove the clumping. »
# Reference: https://stackoverflow.com/a/48080869
random.shuffle(documents)
if train_from_scratch:
print('Creating a new Doc2Vec model from scratch.')
model = doc2vec.Doc2Vec(documents,
vector_size=100,
window=5,
min_count=5,
epochs=20,
workers=multiprocessing.cpu_count())
# NB: Do not follow the piece of advice given in https://rare-technologies.com/doc2vec-tutorial/
# « I have obtained better results by iterating over the data several times and either:
# 1. randomizing the order of input sentences, or
# 2. manually controlling the learning rate over the course of several iterations. »
# Indeed, in my experience, this leads to buggy results. Moreover, this approach is not recommended according to
# https://stackoverflow.com/a/48080869
model.save(get_doc_model_file_name())
else:
print('Loading previous Doc2Vec model.')
model = doc2vec.Doc2Vec.load(get_doc_model_file_name())
# Test doc2vec
if verbose:
try:
# Spelunky + (Slay the Spire) - (Dream Quest)
check_analogy(model, pos=['239350', '646570'], neg=['557410'])
except TypeError:
pass
try:
# Half-Life + (Witcher 2) - (Witcher)
check_analogy(model, pos=['70', '20920'], neg=['20900'])
except TypeError:
pass
query_app_ids = ['620', '364470', '504230', '583950', '646570', '863550', '794600']
for query_app_id in query_app_ids:
print('Query appID: {} ({})'.format(query_app_id, game_names[query_app_id]))
compute_similarity_using_doc2vec_model(query_app_id, steam_tokens, model,
avoid_inference=avoid_inference,
num_items_displayed=10)
# Check the relevance of the corresponding word2vec
for query_word in ['anime', 'fun', 'violent']:
compute_similarity_using_word2vec_model(query_word, steam_tokens, model)
entity = get_doc_model_entity(model)
tag_entity = set(tag for tag in entity if 'appID_' not in tag)
print(tag_entity)
query_tags = ['In-App Purchases', 'Free to Play', 'Violent', 'Early Access']
for query_tag in tag_entity.intersection(query_tags):
for query_app_id in query_app_ids:
try:
sim = model.docvecs.similarity(get_tag_prefix() + query_app_id, query_tag)
print('Similarity = {:.0%} for tag {} vs. appID {} ({})'.format(sim, query_tag, query_app_id,
game_names[query_app_id]))
except KeyError:
pass
num_items_displayed = 3
for query_tag in tag_entity:
print('\nTag: {}'.format(query_tag))
similarity_scores_as_tuples = model.docvecs.most_similar(positive=query_tag, topn=num_items_displayed)
similarity_scores = reformat_similarity_scores_for_doc2vec(similarity_scores_as_tuples)
print_most_similar_sentences(similarity_scores, num_items_displayed=num_items_displayed)
# Top 100
query_app_ids = load_benchmarked_app_ids(append_hard_coded_app_ids=True)
num_neighbors = 10
only_print_banners = True
use_cosine_similarity = True
label_database = np.array(model.docvecs.vectors_docs)
doc_tags = list(model.docvecs.doctags.keys())
init_indices = np.array(range(len(doc_tags)))
bool_indices_to_remove = list(map(lambda x: not x.startswith(get_tag_prefix()), doc_tags))
indices_to_remove = init_indices[bool_indices_to_remove]
label_database = np.delete(label_database, indices_to_remove, axis=0)
app_ids = [int(doc_tag[len(get_tag_prefix()):]) for doc_tag in doc_tags
if doc_tag.startswith(get_tag_prefix())]
knn = prepare_knn_search(label_database, use_cosine_similarity=use_cosine_similarity)
query_des = None
for query_app_id in query_app_ids:
if avoid_inference:
inferred_vector = label_database[app_ids.index(query_app_id)]
else:
# From query appID to query feature vector
query = steam_tokens[str(query_app_id)]
# Caveat: « Subsequent calls to this function may infer different representations for the same document. »
# Reference: https://radimrehurek.com/gensim/models/doc2vec.html#gensim.models.doc2vec.Doc2Vec.infer_vector
inferred_vector = model.infer_vector(query)
if query_des is None:
query_des = inferred_vector
else:
query_des = np.vstack((query_des, inferred_vector))
# Matching of feature vectors
matches = perform_knn_search_with_vectors_as_input(query_des, knn, num_neighbors)
# From feature matches to appID matches
matches_as_app_ids = transform_matches_to_app_ids(matches, app_ids)
print_ranking(query_app_ids,
matches_as_app_ids,
num_elements_displayed=num_neighbors,
only_print_banners=only_print_banners)
return
def get_doc_model_entity(model):
# The equivalent of a vocabulary for a word model
index2entity_set = set(model.docvecs.index2entity)
return index2entity_set
if __name__ == '__main__':
apply_pipeline(train_from_scratch=True, avoid_inference=False, shuffle_corpus=True,
include_genres=False, include_categories=False, include_app_ids=True)
|
[
"benchmark_utils.load_benchmarked_app_ids",
"random.shuffle",
"word_model.compute_similarity_using_word2vec_model",
"sentence_models.print_most_similar_sentences",
"multiprocessing.cpu_count",
"universal_sentence_encoder.prepare_knn_search",
"universal_sentence_encoder.perform_knn_search_with_vectors_as_input",
"utils.get_doc_model_file_name",
"utils.load_game_names",
"benchmark_utils.print_ranking",
"utils.load_tokens",
"numpy.delete",
"numpy.vstack",
"gensim.models.doc2vec.TaggedDocument",
"logging.basicConfig",
"universal_sentence_encoder.transform_matches_to_app_ids",
"time.time",
"numpy.array",
"gensim.models.doc2vec.Doc2Vec"
] |
[((2425, 2431), 'time.time', 'time', ([], {}), '()\n', (2429, 2431), False, 'from time import time\n'), ((4499, 4592), 'sentence_models.print_most_similar_sentences', 'print_most_similar_sentences', (['similarity_scores'], {'num_items_displayed': 'num_items_displayed'}), '(similarity_scores, num_items_displayed=\n num_items_displayed)\n', (4527, 4592), False, 'from sentence_models import print_most_similar_sentences\n'), ((5078, 5146), 'sentence_models.print_most_similar_sentences', 'print_most_similar_sentences', (['similarity_scores', 'num_items_displayed'], {}), '(similarity_scores, num_items_displayed)\n', (5106, 5146), False, 'from sentence_models import print_most_similar_sentences\n'), ((5376, 5471), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (5395, 5471), False, 'import logging\n'), ((5497, 5548), 'utils.load_game_names', 'load_game_names', (['include_genres', 'include_categories'], {}), '(include_genres, include_categories)\n', (5512, 5548), False, 'from utils import load_tokens, load_game_names, get_doc_model_file_name\n'), ((5569, 5582), 'utils.load_tokens', 'load_tokens', ([], {}), '()\n', (5580, 5582), False, 'from utils import load_tokens, load_game_names, get_doc_model_file_name\n'), ((9486, 9542), 'benchmark_utils.load_benchmarked_app_ids', 'load_benchmarked_app_ids', ([], {'append_hard_coded_app_ids': '(True)'}), '(append_hard_coded_app_ids=True)\n', (9510, 9542), False, 'from benchmark_utils import load_benchmarked_app_ids, print_ranking\n'), ((9652, 9688), 'numpy.array', 'np.array', (['model.docvecs.vectors_docs'], {}), '(model.docvecs.vectors_docs)\n', (9660, 9688), True, 'import numpy as np\n'), ((9967, 10019), 'numpy.delete', 'np.delete', (['label_database', 'indices_to_remove'], {'axis': '(0)'}), '(label_database, indices_to_remove, axis=0)\n', (9976, 10019), True, 'import numpy as np\n'), ((10164, 10243), 'universal_sentence_encoder.prepare_knn_search', 'prepare_knn_search', (['label_database'], {'use_cosine_similarity': 'use_cosine_similarity'}), '(label_database, use_cosine_similarity=use_cosine_similarity)\n', (10182, 10243), False, 'from universal_sentence_encoder import prepare_knn_search, transform_matches_to_app_ids\n'), ((11021, 11092), 'universal_sentence_encoder.perform_knn_search_with_vectors_as_input', 'perform_knn_search_with_vectors_as_input', (['query_des', 'knn', 'num_neighbors'], {}), '(query_des, knn, num_neighbors)\n', (11061, 11092), False, 'from universal_sentence_encoder import perform_knn_search_with_vectors_as_input\n'), ((11163, 11209), 'universal_sentence_encoder.transform_matches_to_app_ids', 'transform_matches_to_app_ids', (['matches', 'app_ids'], {}), '(matches, app_ids)\n', (11191, 11209), False, 'from universal_sentence_encoder import prepare_knn_search, transform_matches_to_app_ids\n'), ((11215, 11345), 'benchmark_utils.print_ranking', 'print_ranking', (['query_app_ids', 'matches_as_app_ids'], {'num_elements_displayed': 'num_neighbors', 'only_print_banners': 'only_print_banners'}), '(query_app_ids, matches_as_app_ids, num_elements_displayed=\n num_neighbors, only_print_banners=only_print_banners)\n', (11228, 11345), False, 'from benchmark_utils import load_benchmarked_app_ids, print_ranking\n'), ((1367, 1384), 'utils.load_game_names', 'load_game_names', ([], {}), '()\n', (1382, 1384), False, 'from utils import load_tokens, load_game_names, get_doc_model_file_name\n'), ((2234, 2247), 'utils.load_tokens', 'load_tokens', ([], {}), '()\n', (2245, 2247), False, 'from utils import load_tokens, load_game_names, get_doc_model_file_name\n'), ((2336, 2362), 'gensim.models.doc2vec.Doc2Vec', 'doc2vec.Doc2Vec', (['documents'], {}), '(documents)\n', (2351, 2362), False, 'from gensim.models import doc2vec\n'), ((2578, 2603), 'utils.get_doc_model_file_name', 'get_doc_model_file_name', ([], {}), '()\n', (2601, 2603), False, 'from utils import load_tokens, load_game_names, get_doc_model_file_name\n'), ((2938, 2951), 'utils.load_tokens', 'load_tokens', ([], {}), '()\n', (2949, 2951), False, 'from utils import load_tokens, load_game_names, get_doc_model_file_name\n'), ((6097, 6122), 'random.shuffle', 'random.shuffle', (['documents'], {}), '(documents)\n', (6111, 6122), False, 'import random\n'), ((1184, 1223), 'gensim.models.doc2vec.TaggedDocument', 'doc2vec.TaggedDocument', (['tokens', 'doc_tag'], {}), '(tokens, doc_tag)\n', (1206, 1223), False, 'from gensim.models import doc2vec\n'), ((7051, 7076), 'utils.get_doc_model_file_name', 'get_doc_model_file_name', ([], {}), '()\n', (7074, 7076), False, 'from utils import load_tokens, load_game_names, get_doc_model_file_name\n'), ((7174, 7199), 'utils.get_doc_model_file_name', 'get_doc_model_file_name', ([], {}), '()\n', (7197, 7199), False, 'from utils import load_tokens, load_game_names, get_doc_model_file_name\n'), ((8203, 8275), 'word_model.compute_similarity_using_word2vec_model', 'compute_similarity_using_word2vec_model', (['query_word', 'steam_tokens', 'model'], {}), '(query_word, steam_tokens, model)\n', (8242, 8275), False, 'from word_model import compute_similarity_using_word2vec_model\n'), ((9361, 9454), 'sentence_models.print_most_similar_sentences', 'print_most_similar_sentences', (['similarity_scores'], {'num_items_displayed': 'num_items_displayed'}), '(similarity_scores, num_items_displayed=\n num_items_displayed)\n', (9389, 9454), False, 'from sentence_models import print_most_similar_sentences\n'), ((10932, 10971), 'numpy.vstack', 'np.vstack', (['(query_des, inferred_vector)'], {}), '((query_des, inferred_vector))\n', (10941, 10971), True, 'import numpy as np\n'), ((2545, 2551), 'time.time', 'time', ([], {}), '()\n', (2549, 2551), False, 'from time import time\n'), ((3073, 3098), 'utils.get_doc_model_file_name', 'get_doc_model_file_name', ([], {}), '()\n', (3096, 3098), False, 'from utils import load_tokens, load_game_names, get_doc_model_file_name\n'), ((6473, 6500), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (6498, 6500), False, 'import multiprocessing\n')]
|
import os
import sys
import glob
import zipfile
import pandas as pd
import numpy as np
from .context import get_dataset_folder
from .results import *
from automlk.worker import get_search_rounds
from .print import *
import jinja2
import subprocess
jinja_globals = {'print_list': print_list,
'print_score': print_score,
'print_score_std': print_score_std,
'print_value': print_value,
'print_duration': print_duration,
'print_params': print_params,
'print_other_metrics': print_other_metrics,
'print_title': print_title,
}
def render(template, fileout, **kwargs):
"""
generates output from template into the fileout file
:param template: jinja2 template to be used (in folder /template)
:param fileout: file to store the results
:param kwargs: args to render the template
:return:
"""
t = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath="../automlk/templates/")).get_template(template)
with open(fileout, 'w') as f:
f.write(t.render({**kwargs, **jinja_globals}))
def gener_doc(dataset):
"""
generate the documentation of this dataset
:param dataset: dataset object
:return:
"""
# check or create doc folder
folder = get_dataset_folder(dataset.dataset_id) + '/docs'
if not os.path.exists(folder):
os.makedirs(folder)
os.makedirs(folder + '/_build')
os.makedirs(folder + '/_static')
os.makedirs(folder + '/_templates')
# generate conf.py
render('conf.txt', folder + '/conf.py', dataset=dataset)
render('make.bat', folder + '/make.bat', dataset=dataset)
render('makefile.txt', folder + '/Makefile', dataset=dataset)
# generate index
render('index.rst', folder + '/index.rst', dataset=dataset)
# dataset data and features
search = get_search_rounds(dataset.dataset_id)
if len(search) > 0:
best = get_best_models(dataset.dataset_id)
best_pp = get_best_pp(dataset.dataset_id)
# separate models (level 0) from ensembles (level 1)
best1 = [b for b in best if b['level'] == 1]
best2 = [b for b in best if b['level'] == 2]
print(len(best1), len(best2))
print(best1[:2])
render('dataset.rst', folder + '/dataset.rst', dataset=dataset, best1=best1, best2=best2, best_pp=best_pp,
n_searches1=len(search[search.level == 1]),
n_searches2=len(search[search.level == 2]))
# then for the best rounds
N_ROUNDS = 5
for round_id in list([b['round_id'] for b in best1[:N_ROUNDS]]) + list([b['round_id'] for b in best2[:N_ROUNDS]]):
round = search[search.round_id == int(round_id)].to_dict(orient='records')[0]
pipeline = [s for s in round['pipeline'] if s[0] not in ['NO-SCALE', 'PASS']]
params = get_round_params(search, round_id)
features = get_feature_importance(dataset.dataset_id, round_id)
render('round.rst', folder + '/round_%s.rst' % round_id, dataset=dataset, round=round,
pipeline=pipeline, features=features, params=params, cols=params.keys())
else:
# return render_template('dataset.html', dataset=dataset, n_searches1=0)
render('dataset.rst', folder + '/dataset.rst', dataset=dataset, n_searches1=0)
# then generate html and pdf with make
if sys.platform == 'linux':
subprocess.call(['sh', '../scripts/gen_doc.sh', os.path.abspath(get_dataset_folder(dataset.dataset_id)+'/docs')])
else:
os.system('call ../scripts/gen_doc ' + os.path.abspath(get_dataset_folder(dataset.dataset_id)+'/docs'))
# generate zip file of the html site
with zipfile.ZipFile(get_dataset_folder(dataset.dataset_id) + '/doc.zip', 'w') as z:
root = get_dataset_folder(dataset.dataset_id) + '/docs/_build/html/'
for dir in ['', '_static/', '_images/', '_sources/']:
for f in glob.glob(root + dir + '*.*'):
z.write(f, dataset.dataset_id + '/' + dir + os.path.basename(f))
|
[
"os.makedirs",
"os.path.basename",
"os.path.exists",
"jinja2.FileSystemLoader",
"glob.glob",
"automlk.worker.get_search_rounds"
] |
[((1924, 1961), 'automlk.worker.get_search_rounds', 'get_search_rounds', (['dataset.dataset_id'], {}), '(dataset.dataset_id)\n', (1941, 1961), False, 'from automlk.worker import get_search_rounds\n'), ((1402, 1424), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1416, 1424), False, 'import os\n'), ((1434, 1453), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (1445, 1453), False, 'import os\n'), ((1462, 1493), 'os.makedirs', 'os.makedirs', (["(folder + '/_build')"], {}), "(folder + '/_build')\n", (1473, 1493), False, 'import os\n'), ((1502, 1534), 'os.makedirs', 'os.makedirs', (["(folder + '/_static')"], {}), "(folder + '/_static')\n", (1513, 1534), False, 'import os\n'), ((1543, 1578), 'os.makedirs', 'os.makedirs', (["(folder + '/_templates')"], {}), "(folder + '/_templates')\n", (1554, 1578), False, 'import os\n'), ((4022, 4051), 'glob.glob', 'glob.glob', (["(root + dir + '*.*')"], {}), "(root + dir + '*.*')\n", (4031, 4051), False, 'import glob\n'), ((985, 1044), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', ([], {'searchpath': '"""../automlk/templates/"""'}), "(searchpath='../automlk/templates/')\n", (1008, 1044), False, 'import jinja2\n'), ((4113, 4132), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (4129, 4132), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Copyright 2021, Gradient Zero
All rights reserved
"""
import logging
from dq0.sdk.errors.errors import fatal_error
from dq0.sdk.pipeline import pipeline_config
import pandas as pd
from sklearn import pipeline
logger = logging.getLogger(__name__)
class Pipeline():
def __init__(self, steps=None, config_path=None, transformers_root_dir='.', log_key_string='', **kwargs):
"""
Initialize with steps directly (standalone mode) or with config file. Both can not be given.
params:
steps: List of (name, transform) tuples (implementing fit/transform) that are chained, in the order in which they are chained.
config_path: path to config file where the pipelien steps are given.
"""
self.log_key_string = log_key_string
if (steps is not None) and (config_path is None):
self.pipeline = pipeline.Pipeline(steps)
elif (steps is None) and (config_path is not None):
pp_config = pipeline_config.PipelineConfig(config_path=config_path)
steps = pp_config.get_steps_from_config(root_dir=transformers_root_dir, log_key_string=self.log_key_string)
self.pipeline = pipeline.Pipeline(steps)
else:
fatal_error("Both steps and config_path are given. Only one should be given.")
def fit(self, X, y=None, **fit_params):
if hasattr(X, 'columns'):
self.col_names = X.columns
else:
self.col_names = None
self.pipeline = self.pipeline.fit(X=X, y=y, **fit_params)
def fit_transform(self, X, y=None, **fit_params):
if hasattr(X, 'columns'):
self.col_names = X.columns
else:
self.col_names = None
X_t = self.pipeline.fit_transform(X=X, y=y, **fit_params)
if self.col_names is not None:
X_t = pd.DataFrame(X_t, columns=self.col_names)
return X_t
def get_params(self, deep=True):
return self.pipeline.get_params(deep=deep)
|
[
"pandas.DataFrame",
"dq0.sdk.errors.errors.fatal_error",
"sklearn.pipeline.Pipeline",
"dq0.sdk.pipeline.pipeline_config.PipelineConfig",
"logging.getLogger"
] |
[((251, 278), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (268, 278), False, 'import logging\n'), ((902, 926), 'sklearn.pipeline.Pipeline', 'pipeline.Pipeline', (['steps'], {}), '(steps)\n', (919, 926), False, 'from sklearn import pipeline\n'), ((1876, 1917), 'pandas.DataFrame', 'pd.DataFrame', (['X_t'], {'columns': 'self.col_names'}), '(X_t, columns=self.col_names)\n', (1888, 1917), True, 'import pandas as pd\n'), ((1011, 1066), 'dq0.sdk.pipeline.pipeline_config.PipelineConfig', 'pipeline_config.PipelineConfig', ([], {'config_path': 'config_path'}), '(config_path=config_path)\n', (1041, 1066), False, 'from dq0.sdk.pipeline import pipeline_config\n'), ((1215, 1239), 'sklearn.pipeline.Pipeline', 'pipeline.Pipeline', (['steps'], {}), '(steps)\n', (1232, 1239), False, 'from sklearn import pipeline\n'), ((1266, 1344), 'dq0.sdk.errors.errors.fatal_error', 'fatal_error', (['"""Both steps and config_path are given. Only one should be given."""'], {}), "('Both steps and config_path are given. Only one should be given.')\n", (1277, 1344), False, 'from dq0.sdk.errors.errors import fatal_error\n')]
|
import json
import boto3
class IamHelper:
def __init__(self):
self.client = boto3.client("iam")
self.ssm_client = boto3.client('ssm')
def create_or_get_ecs_role(self) -> str:
self.role_name = "spot-bot-ecs-service-role"
print("Check role exist")
try:
response = self.client.get_role(
RoleName=self.role_name,
)
print("<<< Role exist and role ARN is " + response["Role"]["Arn"])
return response["Role"]["Arn"]
except Exception as e:
print(type(e).__name__) # NoSuchEntityException for the first time call.
if type(e).__name__ == "NoSuchEntityException":
return self._create_ecs_role()
def _create_ecs_role(self) -> str:
assume_role_policy_document = json.dumps({
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
})
response = self.client.create_role(
RoleName=self.role_name,
AssumeRolePolicyDocument=assume_role_policy_document
)
print("<<< created ecs role: " + str(response))
return response["Role"]["Arn"]
@staticmethod
def get_account_id():
return boto3.client('sts').get_caller_identity().get('Account')
@staticmethod
def get_region():
region_name = boto3.session.Session().region_name
print("Current Region is - ", region_name)
return region_name
@staticmethod
def get_partition():
"""
Auto switch the region partition for arn.
"""
if boto3.session.Session().region_name in ("cn-northwest-1", "cn-north-1"):
print("China region")
return "aws-cn"
else:
return "aws"
|
[
"boto3.session.Session",
"boto3.client",
"json.dumps"
] |
[((92, 111), 'boto3.client', 'boto3.client', (['"""iam"""'], {}), "('iam')\n", (104, 111), False, 'import boto3\n'), ((138, 157), 'boto3.client', 'boto3.client', (['"""ssm"""'], {}), "('ssm')\n", (150, 157), False, 'import boto3\n'), ((829, 1002), 'json.dumps', 'json.dumps', (["{'Version': '2012-10-17', 'Statement': [{'Sid': '', 'Effect': 'Allow',\n 'Principal': {'Service': 'ecs-tasks.amazonaws.com'}, 'Action':\n 'sts:AssumeRole'}]}"], {}), "({'Version': '2012-10-17', 'Statement': [{'Sid': '', 'Effect':\n 'Allow', 'Principal': {'Service': 'ecs-tasks.amazonaws.com'}, 'Action':\n 'sts:AssumeRole'}]})\n", (839, 1002), False, 'import json\n'), ((1635, 1658), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (1656, 1658), False, 'import boto3\n'), ((1878, 1901), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (1899, 1901), False, 'import boto3\n'), ((1515, 1534), 'boto3.client', 'boto3.client', (['"""sts"""'], {}), "('sts')\n", (1527, 1534), False, 'import boto3\n')]
|
# Copyright 2021-present, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
import sys
from time import time
from typing import Union
def progress_bar(i: int, max_iter: int, epoch: Union[int, str],
task_number: int, loss: float) -> None:
"""
Prints out the progress bar on the stderr file.
:param i: the current iteration
:param max_iter: the maximum number of iteration
:param epoch: the epoch
:param task_number: the task index
:param loss: the current value of the loss function
"""
if not (i + 1) % 10 or (i + 1) == max_iter:
progress = min(float((i + 1) / max_iter), 1)
progress_bar = ('█' * int(50 * progress)) + ('┈' * (50 - int(50 * progress)))
print('\r[ {} ] Task {} | epoch {}: |{}| loss: {}'.format(
datetime.now().strftime("%m-%d | %H:%M"),
task_number + 1 if isinstance(task_number, int) else task_number,
epoch,
progress_bar,
round(loss / (i + 1), 8)
), file=sys.stderr, end='', flush=True)
class ProgressBar():
def __init__(self):
self.old_time = 0
self.running_sum = 0
def __call__(self, i: int, max_iter: int, epoch: Union[int, str],
task_number: int, loss: float) -> None:
"""
Prints out the progress bar on the stderr file.
:param i: the current iteration
:param max_iter: the maximum number of iteration
:param epoch: the epoch
:param task_number: the task index
:param loss: the current value of the loss function
"""
if i == 0:
self.old_time = time()
self.running_sum = 0
else:
self.running_sum = self.running_sum + (time() - self.old_time)
self.old_time = time()
if i:
progress = min(float((i + 1) / max_iter), 1)
progress_bar = ('█' * int(50 * progress)) + ('┈' * (50 - int(50 * progress)))
print('\r[ {} ] Task {} | epoch {}: |{}| {} ep/h | loss: {} |'.format(
datetime.now().strftime("%m-%d | %H:%M"),
task_number + 1 if isinstance(task_number, int) else task_number,
epoch,
progress_bar,
round(3600 / (self.running_sum / i * max_iter), 2),
round(loss, 8)
), file=sys.stderr, end='', flush=True)
|
[
"datetime.datetime.now",
"time.time"
] |
[((1810, 1816), 'time.time', 'time', ([], {}), '()\n', (1814, 1816), False, 'from time import time\n'), ((1967, 1973), 'time.time', 'time', ([], {}), '()\n', (1971, 1973), False, 'from time import time\n'), ((1915, 1921), 'time.time', 'time', ([], {}), '()\n', (1919, 1921), False, 'from time import time\n'), ((967, 981), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (979, 981), False, 'from datetime import datetime\n'), ((2234, 2248), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2246, 2248), False, 'from datetime import datetime\n')]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Chatham House Data
------------------
Collects input data for Chatham House.
"""
import logging
from hdx.data.dataset import Dataset
from hdx.data.resource import Resource
from hdx.data.showcase import Showcase
from hdx.utilities.dictandlist import integer_value_convert
from hdx.location.country import Country
from slugify import slugify
logger = logging.getLogger(__name__)
def append_value(countrydict, iso3, tier_or_type, name, value):
tiers_or_types = countrydict.get(iso3)
if tiers_or_types is None:
tiers_or_types = dict()
countrydict[iso3] = tiers_or_types
camps = tiers_or_types.get(tier_or_type)
if camps is None:
camps = dict()
tiers_or_types[tier_or_type] = camps
existing_pop = camps.get(name)
if existing_pop is None:
existing_pop = 0
camps[name] = existing_pop + value
def check_name_dispersed(name):
lowername = name.lower()
if 'dispersed' in lowername and ('country' in name.lower() or 'territory' in name.lower()):
return True
return False
def get_iso3(name):
iso3, match = Country.get_iso3_country_code_fuzzy(name, exception=ValueError)
if not match:
logger.info('Country %s matched to ISO3: %s!' % (name, iso3))
return iso3
def get_camp_non_camp_populations(noncamp_types, camp_types, camp_overrides, datasets, downloader):
noncamp_types = noncamp_types.split(',')
camp_types = camp_types.split(',')
dataset_unhcr = None
latest_date = None
for dataset in datasets:
if 'displacement' in dataset['title'].lower():
date = dataset.get_dataset_date_as_datetime()
if latest_date is None or date > latest_date:
dataset_unhcr = dataset
latest_date = date
if dataset_unhcr is None:
raise ValueError('No UNHCR dataset found!')
url = dataset_unhcr.get_resources()[0]['url']
country_ind = 0 # assume first column contains country
iso3 = None
row = None
prev_row = None
all_camps_per_country = dict()
unhcr_non_camp = dict()
unhcr_camp = dict()
unhcr_camp_excluded = dict()
rowiter = downloader.get_tabular_rows(url, sheet='Tab15')
for row in rowiter:
country = row[country_ind]
iso3 = Country.get_iso3_country_code(country)
if iso3 is not None:
break
prev_row = row
accommodation_ind = None
location_ind = None
population_ind = None
population = None
for i, text in enumerate(prev_row):
header = text.lower()
value = row[i]
if 'accommodation' in header:
accommodation_ind = i
elif 'location' in header and len(value) > 1:
location_ind = i
else:
try:
population = int(value)
population_ind = i
break
except ValueError:
pass
campname = row[location_ind]
def get_accommodation_type(name):
accom_type = camp_overrides['Accommodation Type'].get(name)
if accom_type is None:
accom_type = row[accommodation_ind]
else:
logger.info('Overriding accommodation type to %s for %s' % (accom_type, name))
return accom_type.lower()
accommodation_type = get_accommodation_type(campname)
def match_camp_types(name, accom_type, pop, iso):
if check_name_dispersed(name):
accom_type = noncamp_types[0]
found_camp_type = None
for camp_type in camp_types:
if camp_type in accom_type:
found_camp_type = camp_type
unhcr_camp[name] = pop, iso, found_camp_type
break
for noncamp_type in noncamp_types:
if noncamp_type in accom_type:
found_camp_type = noncamp_type
append_value(unhcr_non_camp, iso, found_camp_type, name, pop)
break
if found_camp_type is None:
append_value(unhcr_camp_excluded, iso, accom_type, name, pop)
append_value(all_camps_per_country, iso, accom_type, name, pop)
else:
append_value(all_camps_per_country, iso, found_camp_type, name, pop)
match_camp_types(campname, accommodation_type, population, iso3)
for row in rowiter:
country = row[country_ind]
if not country:
continue
if 'NOTES' in country.upper():
break
iso3, match = Country.get_iso3_country_code_fuzzy(country)
if iso3 is None:
logger.warning('Country %s could not be matched to ISO3 code!' % country)
continue
else:
if match is False:
logger.info('Matched %s to ISO3: %s!' % (country, iso3))
campname = row[location_ind]
accommodation_type = get_accommodation_type(campname)
population = int(row[population_ind])
match_camp_types(campname, accommodation_type, population, iso3)
for campname in sorted(camp_overrides['Population']):
if campname in unhcr_camp:
continue
iso3 = camp_overrides['Country'][campname]
accommodation_type = camp_overrides['Accommodation Type'][campname].lower()
population = camp_overrides['Population'][campname]
logger.info('Adding camp from override: %s (%s, %s): %d' % (campname, iso3, accommodation_type, population))
match_camp_types(campname, accommodation_type, population, iso3)
return all_camps_per_country, unhcr_non_camp, unhcr_camp, unhcr_camp_excluded
def get_camptypes(url, downloader):
camptypes = downloader.download_tabular_rows_as_dicts(url)
for key in camptypes:
camptypes[key] = integer_value_convert(camptypes[key])
return camptypes
def get_camptypes_fallbacks(url, downloader, keyfn=lambda x: x):
camptypes = downloader.download_tabular_rows_as_dicts(url)
camptypes_offgrid = dict()
camptypes_solid = dict()
for key in camptypes:
new_key = keyfn(key)
camptypes_offgrid[new_key] = dict()
camptypes_solid[new_key] = dict()
for tier in camptypes[key]:
try:
typeval = int(camptypes[key][tier])
if 'Lighting OffGrid' in tier:
camptypes_offgrid[new_key][tier.replace('Lighting OffGrid ', '')] = typeval
else:
camptypes_solid[new_key][tier.replace('Cooking Solid ', '')] = typeval
except ValueError:
pass
return camptypes_offgrid, camptypes_solid
def get_worldbank_series(json_url, downloader):
response = downloader.download(json_url)
json = response.json()
data = dict()
for countrydata in json[1]:
iso3 = Country.get_iso3_from_iso2(countrydata['country']['id'])
if iso3 is not None:
value = countrydata.get('value')
if value:
data[iso3] = float(value) / 100.0
return data
def get_slumratios(url, downloader):
stream = downloader.get_tabular_stream(url, headers=1, format='csv', compression='zip')
years = set()
for header in stream.headers:
try:
int(header)
years.add(header)
except ValueError:
pass
years = sorted(years, reverse=True)
slumratios = dict()
for row in stream.iter(keyed=True):
if not row:
break
iso3 = Country.get_iso3_from_m49(int(row['CountryCode']))
if iso3 is None:
continue
for year in years:
value = row.get(year)
if value and value != ' ':
slumratios[iso3] = float(value) / 100.0
return slumratios
def generate_dataset_resources_and_showcase(pop_types, today):
title = 'Energy consumption of refugees and displaced people'
slugified_name = slugify(title.lower())
dataset = Dataset({
'name': slugified_name,
'title': title,
})
dataset.set_maintainer('196196be-6037-4488-8b71-d786adf4c081')
dataset.set_organization('0c6bf79f-504c-4ba5-9fdf-c8cc893c8b2f')
dataset.set_dataset_date_from_datetime(today)
dataset.set_expected_update_frequency('Every month')
dataset.add_other_location('world')
tags = ['HXL', 'energy', 'refugees', 'internally displaced persons - idp']
dataset.add_tags(tags)
resources = list()
for pop_type in pop_types:
resource_data = {
'name': '%s_consumption.csv' % pop_type.lower().replace(' ', '_'),
'description': '%s %s' % (pop_type, title.lower()),
'format': 'csv'
}
resources.append(Resource(resource_data))
resource_data = {
'name': 'population.csv',
'description': 'UNHCR displaced population totals',
'format': 'csv'
}
resources.append(Resource(resource_data))
resource_data = {
'name': 'keyfigures_disagg.csv',
'description': 'Disaggregated MEI Key Figures',
'format': 'csv'
}
resources.append(Resource(resource_data))
resource_data = {
'name': 'keyfigures.csv',
'description': 'MEI Key Figures',
'format': 'csv'
}
resources.append(Resource(resource_data))
showcase = Showcase({
'name': '%s-showcase' % slugified_name,
'title': 'Energy services for refugees and displaced people',
'notes': 'Click the image on the right to go to the energy services model',
'url': 'http://www.sciencedirect.com/science/article/pii/S2211467X16300396',
'image_url': 'https://ars.els-cdn.com/content/image/X2211467X.jpg'
})
showcase.add_tags(tags)
return dataset, resources, showcase
|
[
"hdx.data.showcase.Showcase",
"hdx.data.dataset.Dataset",
"hdx.location.country.Country.get_iso3_country_code_fuzzy",
"hdx.utilities.dictandlist.integer_value_convert",
"hdx.location.country.Country.get_iso3_from_iso2",
"hdx.data.resource.Resource",
"logging.getLogger",
"hdx.location.country.Country.get_iso3_country_code"
] |
[((400, 427), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (417, 427), False, 'import logging\n'), ((1142, 1205), 'hdx.location.country.Country.get_iso3_country_code_fuzzy', 'Country.get_iso3_country_code_fuzzy', (['name'], {'exception': 'ValueError'}), '(name, exception=ValueError)\n', (1177, 1205), False, 'from hdx.location.country import Country\n'), ((7930, 7979), 'hdx.data.dataset.Dataset', 'Dataset', (["{'name': slugified_name, 'title': title}"], {}), "({'name': slugified_name, 'title': title})\n", (7937, 7979), False, 'from hdx.data.dataset import Dataset\n'), ((9285, 9638), 'hdx.data.showcase.Showcase', 'Showcase', (["{'name': '%s-showcase' % slugified_name, 'title':\n 'Energy services for refugees and displaced people', 'notes':\n 'Click the image on the right to go to the energy services model',\n 'url':\n 'http://www.sciencedirect.com/science/article/pii/S2211467X16300396',\n 'image_url': 'https://ars.els-cdn.com/content/image/X2211467X.jpg'}"], {}), "({'name': '%s-showcase' % slugified_name, 'title':\n 'Energy services for refugees and displaced people', 'notes':\n 'Click the image on the right to go to the energy services model',\n 'url':\n 'http://www.sciencedirect.com/science/article/pii/S2211467X16300396',\n 'image_url': 'https://ars.els-cdn.com/content/image/X2211467X.jpg'})\n", (9293, 9638), False, 'from hdx.data.showcase import Showcase\n'), ((2318, 2356), 'hdx.location.country.Country.get_iso3_country_code', 'Country.get_iso3_country_code', (['country'], {}), '(country)\n', (2347, 2356), False, 'from hdx.location.country import Country\n'), ((4511, 4555), 'hdx.location.country.Country.get_iso3_country_code_fuzzy', 'Country.get_iso3_country_code_fuzzy', (['country'], {}), '(country)\n', (4546, 4555), False, 'from hdx.location.country import Country\n'), ((5759, 5796), 'hdx.utilities.dictandlist.integer_value_convert', 'integer_value_convert', (['camptypes[key]'], {}), '(camptypes[key])\n', (5780, 5796), False, 'from hdx.utilities.dictandlist import integer_value_convert\n'), ((6795, 6851), 'hdx.location.country.Country.get_iso3_from_iso2', 'Country.get_iso3_from_iso2', (["countrydata['country']['id']"], {}), "(countrydata['country']['id'])\n", (6821, 6851), False, 'from hdx.location.country import Country\n'), ((8873, 8896), 'hdx.data.resource.Resource', 'Resource', (['resource_data'], {}), '(resource_data)\n', (8881, 8896), False, 'from hdx.data.resource import Resource\n'), ((9069, 9092), 'hdx.data.resource.Resource', 'Resource', (['resource_data'], {}), '(resource_data)\n', (9077, 9092), False, 'from hdx.data.resource import Resource\n'), ((9244, 9267), 'hdx.data.resource.Resource', 'Resource', (['resource_data'], {}), '(resource_data)\n', (9252, 9267), False, 'from hdx.data.resource import Resource\n'), ((8680, 8703), 'hdx.data.resource.Resource', 'Resource', (['resource_data'], {}), '(resource_data)\n', (8688, 8703), False, 'from hdx.data.resource import Resource\n')]
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
from tornado import gen
from http_client import AAsyncHTTPClient, get_url
class CASHelper(AAsyncHTTPClient):
CA_CERT_PATH = 'XXXXX'
@classmethod
def _base_url(cls):
'''cas server url prefix'''
return 'https://cas.test.change.it:8443'
@classmethod
def get_login_url(cls, redirect_uri, **kwargs):
kwargs['redirect_uri'] = redirect_uri
return get_url(cls._base_url(), '/oauth2/authorize', data=kwargs)
@classmethod
def get_logout_url(cls, ):
return get_url(cls._base_url(), '/logout')
@classmethod
@gen.coroutine
def async_access_token(cls, redirect_uri, code):
result = yield cls.async_get('/oauth2/accessToken',
ca_certs=cls.CA_CERT_PATH,
data={'code': code, 'redirect_uri': redirect_uri})
raise gen.Return(result)
@classmethod
def unpack_access_token(cls, result):
access_token, expires = None, None
if result:
result_array = result.split('&')
if len(result_array) == 2:
access_token_array = result_array[0].split('=')
expires_array = result_array[1].split('=')
if len(access_token_array) == 2 and access_token_array[0] == 'access_token' \
and len(expires_array) == 2 and expires_array[0] == 'expires':
access_token = access_token_array[1]
expires = float(expires_array[1])
return access_token, expires
@classmethod
@gen.coroutine
def async_profile(cls, access_token):
result = yield cls.async_get('/oauth2/profile',
ca_certs=cls.CA_CERT_PATH,
data={'access_token': access_token})
raise gen.Return(result)
@classmethod
def unpack_profile(cls, profile):
result = json.loads(profile)
profile_id = result.get('id')
return profile_id
|
[
"tornado.gen.Return",
"json.loads"
] |
[((962, 980), 'tornado.gen.Return', 'gen.Return', (['result'], {}), '(result)\n', (972, 980), False, 'from tornado import gen\n'), ((1948, 1966), 'tornado.gen.Return', 'gen.Return', (['result'], {}), '(result)\n', (1958, 1966), False, 'from tornado import gen\n'), ((2044, 2063), 'json.loads', 'json.loads', (['profile'], {}), '(profile)\n', (2054, 2063), False, 'import json\n')]
|
# coding: utf-8
"""
Snøskredvarsel API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v5.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AvalancheProblem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'avalanche_problem_id': 'int',
'avalanche_ext_id': 'int',
'avalanche_ext_name': 'str',
'aval_cause_id': 'int',
'aval_cause_name': 'str',
'aval_probability_id': 'int',
'aval_probability_name': 'str',
'aval_trigger_simple_id': 'int',
'aval_trigger_simple_name': 'str',
'destructive_size_ext_id': 'int',
'destructive_size_ext_name': 'str',
'aval_propagation_id': 'int',
'aval_propagation_name': 'str',
'avalanche_type_id': 'int',
'avalanche_type_name': 'str',
'avalanche_problem_type_id': 'int',
'avalanche_problem_type_name': 'str',
'valid_expositions': 'str',
'exposed_height1': 'int',
'exposed_height2': 'int',
'exposed_height_fill': 'int'
}
attribute_map = {
'avalanche_problem_id': 'AvalancheProblemId',
'avalanche_ext_id': 'AvalancheExtId',
'avalanche_ext_name': 'AvalancheExtName',
'aval_cause_id': 'AvalCauseId',
'aval_cause_name': 'AvalCauseName',
'aval_probability_id': 'AvalProbabilityId',
'aval_probability_name': 'AvalProbabilityName',
'aval_trigger_simple_id': 'AvalTriggerSimpleId',
'aval_trigger_simple_name': 'AvalTriggerSimpleName',
'destructive_size_ext_id': 'DestructiveSizeExtId',
'destructive_size_ext_name': 'DestructiveSizeExtName',
'aval_propagation_id': 'AvalPropagationId',
'aval_propagation_name': 'AvalPropagationName',
'avalanche_type_id': 'AvalancheTypeId',
'avalanche_type_name': 'AvalancheTypeName',
'avalanche_problem_type_id': 'AvalancheProblemTypeId',
'avalanche_problem_type_name': 'AvalancheProblemTypeName',
'valid_expositions': 'ValidExpositions',
'exposed_height1': 'ExposedHeight1',
'exposed_height2': 'ExposedHeight2',
'exposed_height_fill': 'ExposedHeightFill'
}
def __init__(self, avalanche_problem_id=None, avalanche_ext_id=None, avalanche_ext_name=None, aval_cause_id=None, aval_cause_name=None, aval_probability_id=None, aval_probability_name=None, aval_trigger_simple_id=None, aval_trigger_simple_name=None, destructive_size_ext_id=None, destructive_size_ext_name=None, aval_propagation_id=None, aval_propagation_name=None, avalanche_type_id=None, avalanche_type_name=None, avalanche_problem_type_id=None, avalanche_problem_type_name=None, valid_expositions=None, exposed_height1=None, exposed_height2=None, exposed_height_fill=None): # noqa: E501
"""AvalancheProblem - a model defined in Swagger""" # noqa: E501
self._avalanche_problem_id = None
self._avalanche_ext_id = None
self._avalanche_ext_name = None
self._aval_cause_id = None
self._aval_cause_name = None
self._aval_probability_id = None
self._aval_probability_name = None
self._aval_trigger_simple_id = None
self._aval_trigger_simple_name = None
self._destructive_size_ext_id = None
self._destructive_size_ext_name = None
self._aval_propagation_id = None
self._aval_propagation_name = None
self._avalanche_type_id = None
self._avalanche_type_name = None
self._avalanche_problem_type_id = None
self._avalanche_problem_type_name = None
self._valid_expositions = None
self._exposed_height1 = None
self._exposed_height2 = None
self._exposed_height_fill = None
self.discriminator = None
if avalanche_problem_id is not None:
self.avalanche_problem_id = avalanche_problem_id
if avalanche_ext_id is not None:
self.avalanche_ext_id = avalanche_ext_id
if avalanche_ext_name is not None:
self.avalanche_ext_name = avalanche_ext_name
if aval_cause_id is not None:
self.aval_cause_id = aval_cause_id
if aval_cause_name is not None:
self.aval_cause_name = aval_cause_name
if aval_probability_id is not None:
self.aval_probability_id = aval_probability_id
if aval_probability_name is not None:
self.aval_probability_name = aval_probability_name
if aval_trigger_simple_id is not None:
self.aval_trigger_simple_id = aval_trigger_simple_id
if aval_trigger_simple_name is not None:
self.aval_trigger_simple_name = aval_trigger_simple_name
if destructive_size_ext_id is not None:
self.destructive_size_ext_id = destructive_size_ext_id
if destructive_size_ext_name is not None:
self.destructive_size_ext_name = destructive_size_ext_name
if aval_propagation_id is not None:
self.aval_propagation_id = aval_propagation_id
if aval_propagation_name is not None:
self.aval_propagation_name = aval_propagation_name
if avalanche_type_id is not None:
self.avalanche_type_id = avalanche_type_id
if avalanche_type_name is not None:
self.avalanche_type_name = avalanche_type_name
if avalanche_problem_type_id is not None:
self.avalanche_problem_type_id = avalanche_problem_type_id
if avalanche_problem_type_name is not None:
self.avalanche_problem_type_name = avalanche_problem_type_name
if valid_expositions is not None:
self.valid_expositions = valid_expositions
if exposed_height1 is not None:
self.exposed_height1 = exposed_height1
if exposed_height2 is not None:
self.exposed_height2 = exposed_height2
if exposed_height_fill is not None:
self.exposed_height_fill = exposed_height_fill
@property
def avalanche_problem_id(self):
"""Gets the avalanche_problem_id of this AvalancheProblem. # noqa: E501
:return: The avalanche_problem_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._avalanche_problem_id
@avalanche_problem_id.setter
def avalanche_problem_id(self, avalanche_problem_id):
"""Sets the avalanche_problem_id of this AvalancheProblem.
:param avalanche_problem_id: The avalanche_problem_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._avalanche_problem_id = avalanche_problem_id
@property
def avalanche_ext_id(self):
"""Gets the avalanche_ext_id of this AvalancheProblem. # noqa: E501
:return: The avalanche_ext_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._avalanche_ext_id
@avalanche_ext_id.setter
def avalanche_ext_id(self, avalanche_ext_id):
"""Sets the avalanche_ext_id of this AvalancheProblem.
:param avalanche_ext_id: The avalanche_ext_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._avalanche_ext_id = avalanche_ext_id
@property
def avalanche_ext_name(self):
"""Gets the avalanche_ext_name of this AvalancheProblem. # noqa: E501
:return: The avalanche_ext_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._avalanche_ext_name
@avalanche_ext_name.setter
def avalanche_ext_name(self, avalanche_ext_name):
"""Sets the avalanche_ext_name of this AvalancheProblem.
:param avalanche_ext_name: The avalanche_ext_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._avalanche_ext_name = avalanche_ext_name
@property
def aval_cause_id(self):
"""Gets the aval_cause_id of this AvalancheProblem. # noqa: E501
:return: The aval_cause_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._aval_cause_id
@aval_cause_id.setter
def aval_cause_id(self, aval_cause_id):
"""Sets the aval_cause_id of this AvalancheProblem.
:param aval_cause_id: The aval_cause_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._aval_cause_id = aval_cause_id
@property
def aval_cause_name(self):
"""Gets the aval_cause_name of this AvalancheProblem. # noqa: E501
:return: The aval_cause_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._aval_cause_name
@aval_cause_name.setter
def aval_cause_name(self, aval_cause_name):
"""Sets the aval_cause_name of this AvalancheProblem.
:param aval_cause_name: The aval_cause_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._aval_cause_name = aval_cause_name
@property
def aval_probability_id(self):
"""Gets the aval_probability_id of this AvalancheProblem. # noqa: E501
:return: The aval_probability_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._aval_probability_id
@aval_probability_id.setter
def aval_probability_id(self, aval_probability_id):
"""Sets the aval_probability_id of this AvalancheProblem.
:param aval_probability_id: The aval_probability_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._aval_probability_id = aval_probability_id
@property
def aval_probability_name(self):
"""Gets the aval_probability_name of this AvalancheProblem. # noqa: E501
:return: The aval_probability_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._aval_probability_name
@aval_probability_name.setter
def aval_probability_name(self, aval_probability_name):
"""Sets the aval_probability_name of this AvalancheProblem.
:param aval_probability_name: The aval_probability_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._aval_probability_name = aval_probability_name
@property
def aval_trigger_simple_id(self):
"""Gets the aval_trigger_simple_id of this AvalancheProblem. # noqa: E501
:return: The aval_trigger_simple_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._aval_trigger_simple_id
@aval_trigger_simple_id.setter
def aval_trigger_simple_id(self, aval_trigger_simple_id):
"""Sets the aval_trigger_simple_id of this AvalancheProblem.
:param aval_trigger_simple_id: The aval_trigger_simple_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._aval_trigger_simple_id = aval_trigger_simple_id
@property
def aval_trigger_simple_name(self):
"""Gets the aval_trigger_simple_name of this AvalancheProblem. # noqa: E501
:return: The aval_trigger_simple_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._aval_trigger_simple_name
@aval_trigger_simple_name.setter
def aval_trigger_simple_name(self, aval_trigger_simple_name):
"""Sets the aval_trigger_simple_name of this AvalancheProblem.
:param aval_trigger_simple_name: The aval_trigger_simple_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._aval_trigger_simple_name = aval_trigger_simple_name
@property
def destructive_size_ext_id(self):
"""Gets the destructive_size_ext_id of this AvalancheProblem. # noqa: E501
:return: The destructive_size_ext_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._destructive_size_ext_id
@destructive_size_ext_id.setter
def destructive_size_ext_id(self, destructive_size_ext_id):
"""Sets the destructive_size_ext_id of this AvalancheProblem.
:param destructive_size_ext_id: The destructive_size_ext_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._destructive_size_ext_id = destructive_size_ext_id
@property
def destructive_size_ext_name(self):
"""Gets the destructive_size_ext_name of this AvalancheProblem. # noqa: E501
:return: The destructive_size_ext_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._destructive_size_ext_name
@destructive_size_ext_name.setter
def destructive_size_ext_name(self, destructive_size_ext_name):
"""Sets the destructive_size_ext_name of this AvalancheProblem.
:param destructive_size_ext_name: The destructive_size_ext_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._destructive_size_ext_name = destructive_size_ext_name
@property
def aval_propagation_id(self):
"""Gets the aval_propagation_id of this AvalancheProblem. # noqa: E501
:return: The aval_propagation_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._aval_propagation_id
@aval_propagation_id.setter
def aval_propagation_id(self, aval_propagation_id):
"""Sets the aval_propagation_id of this AvalancheProblem.
:param aval_propagation_id: The aval_propagation_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._aval_propagation_id = aval_propagation_id
@property
def aval_propagation_name(self):
"""Gets the aval_propagation_name of this AvalancheProblem. # noqa: E501
:return: The aval_propagation_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._aval_propagation_name
@aval_propagation_name.setter
def aval_propagation_name(self, aval_propagation_name):
"""Sets the aval_propagation_name of this AvalancheProblem.
:param aval_propagation_name: The aval_propagation_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._aval_propagation_name = aval_propagation_name
@property
def avalanche_type_id(self):
"""Gets the avalanche_type_id of this AvalancheProblem. # noqa: E501
:return: The avalanche_type_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._avalanche_type_id
@avalanche_type_id.setter
def avalanche_type_id(self, avalanche_type_id):
"""Sets the avalanche_type_id of this AvalancheProblem.
:param avalanche_type_id: The avalanche_type_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._avalanche_type_id = avalanche_type_id
@property
def avalanche_type_name(self):
"""Gets the avalanche_type_name of this AvalancheProblem. # noqa: E501
:return: The avalanche_type_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._avalanche_type_name
@avalanche_type_name.setter
def avalanche_type_name(self, avalanche_type_name):
"""Sets the avalanche_type_name of this AvalancheProblem.
:param avalanche_type_name: The avalanche_type_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._avalanche_type_name = avalanche_type_name
@property
def avalanche_problem_type_id(self):
"""Gets the avalanche_problem_type_id of this AvalancheProblem. # noqa: E501
:return: The avalanche_problem_type_id of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._avalanche_problem_type_id
@avalanche_problem_type_id.setter
def avalanche_problem_type_id(self, avalanche_problem_type_id):
"""Sets the avalanche_problem_type_id of this AvalancheProblem.
:param avalanche_problem_type_id: The avalanche_problem_type_id of this AvalancheProblem. # noqa: E501
:type: int
"""
self._avalanche_problem_type_id = avalanche_problem_type_id
@property
def avalanche_problem_type_name(self):
"""Gets the avalanche_problem_type_name of this AvalancheProblem. # noqa: E501
:return: The avalanche_problem_type_name of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._avalanche_problem_type_name
@avalanche_problem_type_name.setter
def avalanche_problem_type_name(self, avalanche_problem_type_name):
"""Sets the avalanche_problem_type_name of this AvalancheProblem.
:param avalanche_problem_type_name: The avalanche_problem_type_name of this AvalancheProblem. # noqa: E501
:type: str
"""
self._avalanche_problem_type_name = avalanche_problem_type_name
@property
def valid_expositions(self):
"""Gets the valid_expositions of this AvalancheProblem. # noqa: E501
:return: The valid_expositions of this AvalancheProblem. # noqa: E501
:rtype: str
"""
return self._valid_expositions
@valid_expositions.setter
def valid_expositions(self, valid_expositions):
"""Sets the valid_expositions of this AvalancheProblem.
:param valid_expositions: The valid_expositions of this AvalancheProblem. # noqa: E501
:type: str
"""
self._valid_expositions = valid_expositions
@property
def exposed_height1(self):
"""Gets the exposed_height1 of this AvalancheProblem. # noqa: E501
:return: The exposed_height1 of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._exposed_height1
@exposed_height1.setter
def exposed_height1(self, exposed_height1):
"""Sets the exposed_height1 of this AvalancheProblem.
:param exposed_height1: The exposed_height1 of this AvalancheProblem. # noqa: E501
:type: int
"""
self._exposed_height1 = exposed_height1
@property
def exposed_height2(self):
"""Gets the exposed_height2 of this AvalancheProblem. # noqa: E501
:return: The exposed_height2 of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._exposed_height2
@exposed_height2.setter
def exposed_height2(self, exposed_height2):
"""Sets the exposed_height2 of this AvalancheProblem.
:param exposed_height2: The exposed_height2 of this AvalancheProblem. # noqa: E501
:type: int
"""
self._exposed_height2 = exposed_height2
@property
def exposed_height_fill(self):
"""Gets the exposed_height_fill of this AvalancheProblem. # noqa: E501
:return: The exposed_height_fill of this AvalancheProblem. # noqa: E501
:rtype: int
"""
return self._exposed_height_fill
@exposed_height_fill.setter
def exposed_height_fill(self, exposed_height_fill):
"""Sets the exposed_height_fill of this AvalancheProblem.
:param exposed_height_fill: The exposed_height_fill of this AvalancheProblem. # noqa: E501
:type: int
"""
self._exposed_height_fill = exposed_height_fill
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AvalancheProblem, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AvalancheProblem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((20009, 20042), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (20022, 20042), False, 'import six\n')]
|
import random
def solution(x, y):
m, f, gen = int(x), int(y), 0
while(True):
if(m == 1 and f == 1):
return str(gen);
elif(m < 1 or f < 1 or m==f):
return "impossible"
elif(m == 1 or f == 1):
return str(gen + f * m - 1)
elif(m > f):
gen += int(m/f);
m -= f * int(m/f);
elif(m < f):
gen += int(f/m);
f -= m * int(f/m);
print(solution(random.getrandbits(1280),random.getrandbits(1281)))
|
[
"random.getrandbits"
] |
[((465, 489), 'random.getrandbits', 'random.getrandbits', (['(1280)'], {}), '(1280)\n', (483, 489), False, 'import random\n'), ((490, 514), 'random.getrandbits', 'random.getrandbits', (['(1281)'], {}), '(1281)\n', (508, 514), False, 'import random\n')]
|
from flask import jsonify
from api import api
import socket
@api.route('/v1/node/who/i/am', methods=['GET'])
def node_who_i_am():
return jsonify({'node_name':str(socket.gethostname())})
@api.route('/v1/node/sync', methods=['POST'])
def node_sync():
return jsonify({'node_name':str(socket.gethostname())})
@api.route('/v1/node/cluster/join', methods=['POST'])
def node_cluster_join():
return jsonify({'node_name':str(socket.gethostname())})
|
[
"socket.gethostname",
"api.api.route"
] |
[((62, 109), 'api.api.route', 'api.route', (['"""/v1/node/who/i/am"""'], {'methods': "['GET']"}), "('/v1/node/who/i/am', methods=['GET'])\n", (71, 109), False, 'from api import api\n'), ((193, 237), 'api.api.route', 'api.route', (['"""/v1/node/sync"""'], {'methods': "['POST']"}), "('/v1/node/sync', methods=['POST'])\n", (202, 237), False, 'from api import api\n'), ((317, 369), 'api.api.route', 'api.route', (['"""/v1/node/cluster/join"""'], {'methods': "['POST']"}), "('/v1/node/cluster/join', methods=['POST'])\n", (326, 369), False, 'from api import api\n'), ((167, 187), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (185, 187), False, 'import socket\n'), ((291, 311), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (309, 311), False, 'import socket\n'), ((431, 451), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (449, 451), False, 'import socket\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # Navigation
#
# ---
#
# You are welcome to use this coding environment to train your agent for the project. Follow the instructions below to get started!
#
# ### 1. Start the Environment
#
# Run the next code cell to install a few packages. This line will take a few minutes to run!
# In[ ]:
get_ipython().system('pip -q install ./python')
# The environment is already saved in the Workspace and can be accessed at the file path provided below. Please run the next code cell without making any changes.
# In[ ]:
from unityagents import UnityEnvironment
import numpy as np
# please do not modify the line below
env = UnityEnvironment(file_name="/data/Banana_Linux_NoVis/Banana.x86_64")
# Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
# In[ ]:
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# ### 2. Examine the State and Action Spaces
#
# Run the code cell below to print some information about the environment.
# In[ ]:
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents in the environment
print('Number of agents:', len(env_info.agents))
# number of actions
action_size = brain.vector_action_space_size
print('Number of actions:', action_size)
# examine the state space
state = env_info.vector_observations[0]
print('States look like:', state)
state_size = len(state)
print('States have length:', state_size)
# ### 3. Take Random Actions in the Environment
#
# In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment.
#
# Note that **in this coding environment, you will not be able to watch the agent while it is training**, and you should set `train_mode=True` to restart the environment.
# In[ ]:
env_info = env.reset(train_mode=True)[brain_name] # reset the environment
state = env_info.vector_observations[0] # get the current state
score = 0 # initialize the score
while True:
action = np.random.randint(action_size) # select an action
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
score += reward # update the score
state = next_state # roll over the state to next time step
if done: # exit loop if episode finished
break
print("Score: {}".format(score))
# When finished, you can close the environment.
# In[ ]:
env.close()
# ### 4. It's Your Turn!
#
# Now it's your turn to train your own agent to solve the environment! A few **important notes**:
# - When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following:
# ```python
# env_info = env.reset(train_mode=True)[brain_name]
# ```
# - To structure your work, you're welcome to work directly in this Jupyter notebook, or you might like to start over with a new file! You can see the list of files in the workspace by clicking on **_Jupyter_** in the top left corner of the notebook.
# - In this coding environment, you will not be able to watch the agent while it is training. However, **_after training the agent_**, you can download the saved model weights to watch the agent on your own machine!
|
[
"numpy.random.randint",
"unityagents.UnityEnvironment"
] |
[((674, 742), 'unityagents.UnityEnvironment', 'UnityEnvironment', ([], {'file_name': '"""/data/Banana_Linux_NoVis/Banana.x86_64"""'}), "(file_name='/data/Banana_Linux_NoVis/Banana.x86_64')\n", (690, 742), False, 'from unityagents import UnityEnvironment\n'), ((2258, 2288), 'numpy.random.randint', 'np.random.randint', (['action_size'], {}), '(action_size)\n', (2275, 2288), True, 'import numpy as np\n')]
|
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_widsprofile
short_description: Configure wireless intrusion detection system (WIDS) profiles.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- <NAME> (@chillancezen)
- <NAME> (@JieX19)
- <NAME> (@fshen01)
- <NAME> (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
widsprofile:
description: the top level parameters set
required: false
type: dict
suboptions:
ap-auto-suppress:
type: str
description: 'Enable/disable on-wire rogue AP auto-suppression (default = disable).'
choices:
- 'disable'
- 'enable'
ap-bgscan-disable-day:
description: no description
type: list
choices:
- sunday
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
ap-bgscan-disable-end:
type: str
description: 'End time, using a 24-hour clock in the format of hh:mm, for disabling background scanning (default = 00:00).'
ap-bgscan-disable-start:
type: str
description: 'Start time, using a 24-hour clock in the format of hh:mm, for disabling background scanning (default = 00:00).'
ap-bgscan-duration:
type: int
description: 'Listening time on a scanning channel (10 - 1000 msec, default = 20).'
ap-bgscan-idle:
type: int
description: 'Waiting time for channel inactivity before scanning this channel (0 - 1000 msec, default = 0).'
ap-bgscan-intv:
type: int
description: 'Period of time between scanning two channels (1 - 600 sec, default = 1).'
ap-bgscan-period:
type: int
description: 'Period of time between background scans (60 - 3600 sec, default = 600).'
ap-bgscan-report-intv:
type: int
description: 'Period of time between background scan reports (15 - 600 sec, default = 30).'
ap-fgscan-report-intv:
type: int
description: 'Period of time between foreground scan reports (15 - 600 sec, default = 15).'
ap-scan:
type: str
description: 'Enable/disable rogue AP detection.'
choices:
- 'disable'
- 'enable'
ap-scan-passive:
type: str
description: 'Enable/disable passive scanning. Enable means do not send probe request on any channels (default = disable).'
choices:
- 'disable'
- 'enable'
asleap-attack:
type: str
description: 'Enable/disable asleap attack detection (default = disable).'
choices:
- 'disable'
- 'enable'
assoc-flood-thresh:
type: int
description: 'The threshold value for association frame flooding.'
assoc-flood-time:
type: int
description: 'Number of seconds after which a station is considered not connected.'
assoc-frame-flood:
type: str
description: 'Enable/disable association frame flooding detection (default = disable).'
choices:
- 'disable'
- 'enable'
auth-flood-thresh:
type: int
description: 'The threshold value for authentication frame flooding.'
auth-flood-time:
type: int
description: 'Number of seconds after which a station is considered not connected.'
auth-frame-flood:
type: str
description: 'Enable/disable authentication frame flooding detection (default = disable).'
choices:
- 'disable'
- 'enable'
comment:
type: str
description: 'Comment.'
deauth-broadcast:
type: str
description: 'Enable/disable broadcasting de-authentication detection (default = disable).'
choices:
- 'disable'
- 'enable'
deauth-unknown-src-thresh:
type: int
description: 'Threshold value per second to deauth unknown src for DoS attack (0: no limit).'
eapol-fail-flood:
type: str
description: 'Enable/disable EAPOL-Failure flooding (to AP) detection (default = disable).'
choices:
- 'disable'
- 'enable'
eapol-fail-intv:
type: int
description: 'The detection interval for EAPOL-Failure flooding (1 - 3600 sec).'
eapol-fail-thresh:
type: int
description: 'The threshold value for EAPOL-Failure flooding in specified interval.'
eapol-logoff-flood:
type: str
description: 'Enable/disable EAPOL-Logoff flooding (to AP) detection (default = disable).'
choices:
- 'disable'
- 'enable'
eapol-logoff-intv:
type: int
description: 'The detection interval for EAPOL-Logoff flooding (1 - 3600 sec).'
eapol-logoff-thresh:
type: int
description: 'The threshold value for EAPOL-Logoff flooding in specified interval.'
eapol-pre-fail-flood:
type: str
description: 'Enable/disable premature EAPOL-Failure flooding (to STA) detection (default = disable).'
choices:
- 'disable'
- 'enable'
eapol-pre-fail-intv:
type: int
description: 'The detection interval for premature EAPOL-Failure flooding (1 - 3600 sec).'
eapol-pre-fail-thresh:
type: int
description: 'The threshold value for premature EAPOL-Failure flooding in specified interval.'
eapol-pre-succ-flood:
type: str
description: 'Enable/disable premature EAPOL-Success flooding (to STA) detection (default = disable).'
choices:
- 'disable'
- 'enable'
eapol-pre-succ-intv:
type: int
description: 'The detection interval for premature EAPOL-Success flooding (1 - 3600 sec).'
eapol-pre-succ-thresh:
type: int
description: 'The threshold value for premature EAPOL-Success flooding in specified interval.'
eapol-start-flood:
type: str
description: 'Enable/disable EAPOL-Start flooding (to AP) detection (default = disable).'
choices:
- 'disable'
- 'enable'
eapol-start-intv:
type: int
description: 'The detection interval for EAPOL-Start flooding (1 - 3600 sec).'
eapol-start-thresh:
type: int
description: 'The threshold value for EAPOL-Start flooding in specified interval.'
eapol-succ-flood:
type: str
description: 'Enable/disable EAPOL-Success flooding (to AP) detection (default = disable).'
choices:
- 'disable'
- 'enable'
eapol-succ-intv:
type: int
description: 'The detection interval for EAPOL-Success flooding (1 - 3600 sec).'
eapol-succ-thresh:
type: int
description: 'The threshold value for EAPOL-Success flooding in specified interval.'
invalid-mac-oui:
type: str
description: 'Enable/disable invalid MAC OUI detection.'
choices:
- 'disable'
- 'enable'
long-duration-attack:
type: str
description: 'Enable/disable long duration attack detection based on user configured threshold (default = disable).'
choices:
- 'disable'
- 'enable'
long-duration-thresh:
type: int
description: 'Threshold value for long duration attack detection (1000 - 32767 usec, default = 8200).'
name:
type: str
description: 'WIDS profile name.'
null-ssid-probe-resp:
type: str
description: 'Enable/disable null SSID probe response detection (default = disable).'
choices:
- 'disable'
- 'enable'
sensor-mode:
type: str
description: 'Scan WiFi nearby stations (default = disable).'
choices:
- 'disable'
- 'foreign'
- 'both'
spoofed-deauth:
type: str
description: 'Enable/disable spoofed de-authentication attack detection (default = disable).'
choices:
- 'disable'
- 'enable'
weak-wep-iv:
type: str
description: 'Enable/disable weak WEP IV (Initialization Vector) detection (default = disable).'
choices:
- 'disable'
- 'enable'
wireless-bridge:
type: str
description: 'Enable/disable wireless bridge detection (default = disable).'
choices:
- 'disable'
- 'enable'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Configure wireless intrusion detection system (WIDS) profiles.
fmgr_widsprofile:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
state: <value in [present, absent]>
widsprofile:
ap-auto-suppress: <value in [disable, enable]>
ap-bgscan-disable-day:
- sunday
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
ap-bgscan-disable-end: <value of string>
ap-bgscan-disable-start: <value of string>
ap-bgscan-duration: <value of integer>
ap-bgscan-idle: <value of integer>
ap-bgscan-intv: <value of integer>
ap-bgscan-period: <value of integer>
ap-bgscan-report-intv: <value of integer>
ap-fgscan-report-intv: <value of integer>
ap-scan: <value in [disable, enable]>
ap-scan-passive: <value in [disable, enable]>
asleap-attack: <value in [disable, enable]>
assoc-flood-thresh: <value of integer>
assoc-flood-time: <value of integer>
assoc-frame-flood: <value in [disable, enable]>
auth-flood-thresh: <value of integer>
auth-flood-time: <value of integer>
auth-frame-flood: <value in [disable, enable]>
comment: <value of string>
deauth-broadcast: <value in [disable, enable]>
deauth-unknown-src-thresh: <value of integer>
eapol-fail-flood: <value in [disable, enable]>
eapol-fail-intv: <value of integer>
eapol-fail-thresh: <value of integer>
eapol-logoff-flood: <value in [disable, enable]>
eapol-logoff-intv: <value of integer>
eapol-logoff-thresh: <value of integer>
eapol-pre-fail-flood: <value in [disable, enable]>
eapol-pre-fail-intv: <value of integer>
eapol-pre-fail-thresh: <value of integer>
eapol-pre-succ-flood: <value in [disable, enable]>
eapol-pre-succ-intv: <value of integer>
eapol-pre-succ-thresh: <value of integer>
eapol-start-flood: <value in [disable, enable]>
eapol-start-intv: <value of integer>
eapol-start-thresh: <value of integer>
eapol-succ-flood: <value in [disable, enable]>
eapol-succ-intv: <value of integer>
eapol-succ-thresh: <value of integer>
invalid-mac-oui: <value in [disable, enable]>
long-duration-attack: <value in [disable, enable]>
long-duration-thresh: <value of integer>
name: <value of string>
null-ssid-probe-resp: <value in [disable, enable]>
sensor-mode: <value in [disable, foreign, both]>
spoofed-deauth: <value in [disable, enable]>
weak-wep-iv: <value in [disable, enable]>
wireless-bridge: <value in [disable, enable]>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/wireless-controller/wids-profile',
'/pm/config/global/obj/wireless-controller/wids-profile'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/wireless-controller/wids-profile/{wids-profile}',
'/pm/config/global/obj/wireless-controller/wids-profile/{wids-profile}'
]
url_params = ['adom']
module_primary_key = 'name'
module_arg_spec = {
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'widsprofile': {
'required': False,
'type': 'dict',
'options': {
'ap-auto-suppress': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ap-bgscan-disable-day': {
'required': False,
'type': 'list',
'choices': [
'sunday',
'monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday'
]
},
'ap-bgscan-disable-end': {
'required': False,
'type': 'str'
},
'ap-bgscan-disable-start': {
'required': False,
'type': 'str'
},
'ap-bgscan-duration': {
'required': False,
'type': 'int'
},
'ap-bgscan-idle': {
'required': False,
'type': 'int'
},
'ap-bgscan-intv': {
'required': False,
'type': 'int'
},
'ap-bgscan-period': {
'required': False,
'type': 'int'
},
'ap-bgscan-report-intv': {
'required': False,
'type': 'int'
},
'ap-fgscan-report-intv': {
'required': False,
'type': 'int'
},
'ap-scan': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ap-scan-passive': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'asleap-attack': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'assoc-flood-thresh': {
'required': False,
'type': 'int'
},
'assoc-flood-time': {
'required': False,
'type': 'int'
},
'assoc-frame-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'auth-flood-thresh': {
'required': False,
'type': 'int'
},
'auth-flood-time': {
'required': False,
'type': 'int'
},
'auth-frame-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'comment': {
'required': False,
'type': 'str'
},
'deauth-broadcast': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'deauth-unknown-src-thresh': {
'required': False,
'type': 'int'
},
'eapol-fail-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'eapol-fail-intv': {
'required': False,
'type': 'int'
},
'eapol-fail-thresh': {
'required': False,
'type': 'int'
},
'eapol-logoff-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'eapol-logoff-intv': {
'required': False,
'type': 'int'
},
'eapol-logoff-thresh': {
'required': False,
'type': 'int'
},
'eapol-pre-fail-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'eapol-pre-fail-intv': {
'required': False,
'type': 'int'
},
'eapol-pre-fail-thresh': {
'required': False,
'type': 'int'
},
'eapol-pre-succ-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'eapol-pre-succ-intv': {
'required': False,
'type': 'int'
},
'eapol-pre-succ-thresh': {
'required': False,
'type': 'int'
},
'eapol-start-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'eapol-start-intv': {
'required': False,
'type': 'int'
},
'eapol-start-thresh': {
'required': False,
'type': 'int'
},
'eapol-succ-flood': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'eapol-succ-intv': {
'required': False,
'type': 'int'
},
'eapol-succ-thresh': {
'required': False,
'type': 'int'
},
'invalid-mac-oui': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'long-duration-attack': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'long-duration-thresh': {
'required': False,
'type': 'int'
},
'name': {
'required': True,
'type': 'str'
},
'null-ssid-probe-resp': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'sensor-mode': {
'required': False,
'choices': [
'disable',
'foreign',
'both'
],
'type': 'str'
},
'spoofed-deauth': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'weak-wep-iv': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'wireless-bridge': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'widsprofile'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd()
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
|
[
"ansible_collections.fortinet.fortimanager.plugins.module_utils.napi.check_parameter_bypass",
"ansible_collections.fortinet.fortimanager.plugins.module_utils.napi.check_galaxy_version",
"ansible.module_utils.connection.Connection",
"ansible_collections.fortinet.fortimanager.plugins.module_utils.napi.NAPIManager"
] |
[((27910, 27947), 'ansible_collections.fortinet.fortimanager.plugins.module_utils.napi.check_galaxy_version', 'check_galaxy_version', (['module_arg_spec'], {}), '(module_arg_spec)\n', (27930, 27947), False, 'from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version\n'), ((28165, 28196), 'ansible.module_utils.connection.Connection', 'Connection', (['module._socket_path'], {}), '(module._socket_path)\n', (28175, 28196), False, 'from ansible.module_utils.connection import Connection\n'), ((28212, 28341), 'ansible_collections.fortinet.fortimanager.plugins.module_utils.napi.NAPIManager', 'NAPIManager', (['jrpc_urls', 'perobject_jrpc_urls', 'module_primary_key', 'url_params', 'module', 'connection'], {'top_level_schema_name': '"""data"""'}), "(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params,\n module, connection, top_level_schema_name='data')\n", (28223, 28341), False, 'from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager\n'), ((27989, 28043), 'ansible_collections.fortinet.fortimanager.plugins.module_utils.napi.check_parameter_bypass', 'check_parameter_bypass', (['module_arg_spec', '"""widsprofile"""'], {}), "(module_arg_spec, 'widsprofile')\n", (28011, 28043), False, 'from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass\n')]
|
# Intervals Between Identical Elements
from typing import List
from collections import defaultdict
class Solution:
def getDistances(self, arr: List[int]) -> List[int]:
indices = defaultdict(list)
for index, n in enumerate(arr):
indices[n].append(index)
ans = [0] * len(arr)
for _, inds in indices.items():
ps = [0] * len(inds)
for ith, ind in enumerate(inds):
if ith == 0:
ps[0] = ind
else:
ps[ith] = ps[ith - 1] + ind
for ith, ind in enumerate(inds):
ans[ind] = abs((ith + 1) * ind - ps[ith]) + abs((len(inds) - 1 - ith) * ind - (ps[len(inds) - 1] - ps[ith]))
return ans
if __name__ == "__main__":
sol = Solution()
arr = [2,1,3,1,2,3,3]
arr = [10,5,10,10]
print(sol.getDistances(arr))
|
[
"collections.defaultdict"
] |
[((192, 209), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (203, 209), False, 'from collections import defaultdict\n')]
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import base64
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.kinesis import exceptions
from boto.compat import json
class KinesisConnection(AWSQueryConnection):
"""
Amazon Kinesis Service API Reference
Amazon Kinesis is a managed service that scales elastically for
real time processing of streaming big data.
"""
APIVersion = "2013-12-02"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "kinesis.us-east-1.amazonaws.com"
ServiceName = "Kinesis"
TargetPrefix = "Kinesis_20131202"
ResponseError = JSONResponseError
_faults = {
"ProvisionedThroughputExceededException": exceptions.ProvisionedThroughputExceededException,
"LimitExceededException": exceptions.LimitExceededException,
"ExpiredIteratorException": exceptions.ExpiredIteratorException,
"ResourceInUseException": exceptions.ResourceInUseException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InvalidArgumentException": exceptions.InvalidArgumentException,
"SubscriptionRequiredException": exceptions.SubscriptionRequiredException
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(KinesisConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_stream(self, stream_name, shard_count):
"""
This operation adds a new Amazon Kinesis stream to your AWS
account. A stream captures and transports data records that
are continuously emitted from different data sources or
producers . Scale-out within an Amazon Kinesis stream is
explicitly supported by means of shards, which are uniquely
identified groups of data records in an Amazon Kinesis stream.
You specify and control the number of shards that a stream is
composed of. Each shard can support up to 5 read transactions
per second up to a maximum total of 2 MB of data read per
second. Each shard can support up to 1000 write transactions
per second up to a maximum total of 1 MB data written per
second. You can add shards to a stream if the amount of data
input increases and you can remove shards if the amount of
data input decreases.
The stream name identifies the stream. The name is scoped to
the AWS account used by the application. It is also scoped by
region. That is, two streams in two different accounts can
have the same name, and two streams in the same account, but
in two different regions, can have the same name.
`CreateStream` is an asynchronous operation. Upon receiving a
`CreateStream` request, Amazon Kinesis immediately returns and
sets the stream status to CREATING. After the stream is
created, Amazon Kinesis sets the stream status to ACTIVE. You
should perform read and write operations only on an ACTIVE
stream.
You receive a `LimitExceededException` when making a
`CreateStream` request if you try to do one of the following:
+ Have more than five streams in the CREATING state at any
point in time.
+ Create more shards than are authorized for your account.
**Note:** The default limit for an AWS account is two shards
per stream. If you need to create a stream with more than two
shards, contact AWS Support to increase the limit on your
account.
You can use the `DescribeStream` operation to check the stream
status, which is returned in `StreamStatus`.
`CreateStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: A name to identify the stream. The stream name is
scoped to the AWS account used by the application that creates the
stream. It is also scoped by region. That is, two streams in two
different AWS accounts can have the same name, and two streams in
the same AWS account, but in two different regions, can have the
same name.
:type shard_count: integer
:param shard_count: The number of shards that the stream will use. The
throughput of the stream is a function of the number of shards;
more shards are required for greater provisioned throughput.
**Note:** The default limit for an AWS account is two shards per
stream. If you need to create a stream with more than two shards,
contact AWS Support to increase the limit on your account.
"""
params = {
'StreamName': stream_name,
'ShardCount': shard_count,
}
return self.make_request(action='CreateStream',
body=json.dumps(params))
def delete_stream(self, stream_name):
"""
This operation deletes a stream and all of its shards and
data. You must shut down any applications that are operating
on the stream before you delete the stream. If an application
attempts to operate on a deleted stream, it will receive the
exception `ResourceNotFoundException`.
If the stream is in the ACTIVE state, you can delete it. After
a `DeleteStream` request, the specified stream is in the
DELETING state until Amazon Kinesis completes the deletion.
**Note:** Amazon Kinesis might continue to accept data read
and write operations, such as PutRecord and GetRecords, on a
stream in the DELETING state until the stream deletion is
complete.
When you delete a stream, any shards in that stream are also
deleted.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`DeleteStream` has a limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to delete.
"""
params = {'StreamName': stream_name, }
return self.make_request(action='DeleteStream',
body=json.dumps(params))
def describe_stream(self, stream_name, limit=None,
exclusive_start_shard_id=None):
"""
This operation returns the following information about the
stream: the current status of the stream, the stream Amazon
Resource Name (ARN), and an array of shard objects that
comprise the stream. For each shard object there is
information about the hash key and sequence number ranges that
the shard spans, and the IDs of any earlier shards that played
in a role in a MergeShards or SplitShard operation that
created the shard. A sequence number is the identifier
associated with every record ingested in the Amazon Kinesis
stream. The sequence number is assigned by the Amazon Kinesis
service when a record is put into the stream.
You can limit the number of returned shards using the `Limit`
parameter. The number of shards in a stream may be too large
to return from a single call to `DescribeStream`. You can
detect this by using the `HasMoreShards` flag in the returned
output. `HasMoreShards` is set to `True` when there is more
data available.
If there are more shards available, you can request more
shards by using the shard ID of the last shard returned by the
`DescribeStream` request, in the `ExclusiveStartShardId`
parameter in a subsequent request to `DescribeStream`.
`DescribeStream` is a paginated operation.
`DescribeStream` has a limit of 10 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream to describe.
:type limit: integer
:param limit: The maximum number of shards to return.
:type exclusive_start_shard_id: string
:param exclusive_start_shard_id: The shard ID of the shard to start
with for the stream description.
"""
params = {'StreamName': stream_name, }
if limit is not None:
params['Limit'] = limit
if exclusive_start_shard_id is not None:
params['ExclusiveStartShardId'] = exclusive_start_shard_id
return self.make_request(action='DescribeStream',
body=json.dumps(params))
def get_records(self, shard_iterator, limit=None, b64_decode=True):
"""
This operation returns one or more data records from a shard.
A `GetRecords` operation request can retrieve up to 10 MB of
data.
You specify a shard iterator for the shard that you want to
read data from in the `ShardIterator` parameter. The shard
iterator specifies the position in the shard from which you
want to start reading data records sequentially. A shard
iterator specifies this position using the sequence number of
a data record in the shard. For more information about the
shard iterator, see GetShardIterator.
`GetRecords` may return a partial result if the response size
limit is exceeded. You will get an error, but not a partial
result if the shard's provisioned throughput is exceeded, the
shard iterator has expired, or an internal processing failure
has occurred. Clients can request a smaller amount of data by
specifying a maximum number of returned records using the
`Limit` parameter. The `Limit` parameter can be set to an
integer value of up to 10,000. If you set the value to an
integer greater than 10,000, you will receive
`InvalidArgumentException`.
A new shard iterator is returned by every `GetRecords` request
in `NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request. When you
repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to
use in your first `GetRecords` request and then use the shard
iterator returned in `NextShardIterator` for subsequent reads.
`GetRecords` can return `null` for the `NextShardIterator` to
reflect that the shard has been closed and that the requested
shard iterator would never have returned more data.
If no items can be processed because of insufficient
provisioned throughput on the shard involved in the request,
`GetRecords` throws `ProvisionedThroughputExceededException`.
:type shard_iterator: string
:param shard_iterator: The position in the shard from which you want to
start sequentially reading data records.
:type limit: integer
:param limit: The maximum number of records to return, which can be set
to a value of up to 10,000.
:type b64_decode: boolean
:param b64_decode: Decode the Base64-encoded ``Data`` field of records.
"""
params = {'ShardIterator': shard_iterator, }
if limit is not None:
params['Limit'] = limit
response = self.make_request(action='GetRecords',
body=json.dumps(params))
# Base64 decode the data
if b64_decode:
for record in response.get('Records', []):
record['Data'] = base64.b64decode(
record['Data'].encode('utf-8')).decode('utf-8')
return response
def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type,
starting_sequence_number=None):
"""
This operation returns a shard iterator in `ShardIterator`.
The shard iterator specifies the position in the shard from
which you want to start reading data records sequentially. A
shard iterator specifies this position using the sequence
number of a data record in a shard. A sequence number is the
identifier associated with every record ingested in the Amazon
Kinesis stream. The sequence number is assigned by the Amazon
Kinesis service when a record is put into the stream.
You must specify the shard iterator type in the
`GetShardIterator` request. For example, you can set the
`ShardIteratorType` parameter to read exactly from the
position denoted by a specific sequence number by using the
AT_SEQUENCE_NUMBER shard iterator type, or right after the
sequence number by using the AFTER_SEQUENCE_NUMBER shard
iterator type, using sequence numbers returned by earlier
PutRecord, GetRecords or DescribeStream requests. You can
specify the shard iterator type TRIM_HORIZON in the request to
cause `ShardIterator` to point to the last untrimmed record in
the shard in the system, which is the oldest data record in
the shard. Or you can point to just after the most recent
record in the shard, by using the shard iterator type LATEST,
so that you always read the most recent data in the shard.
**Note:** Each shard iterator expires five minutes after it is
returned to the requester.
When you repeatedly read from an Amazon Kinesis stream use a
GetShardIterator request to get the first shard iterator to to
use in your first `GetRecords` request and then use the shard
iterator returned by the `GetRecords` request in
`NextShardIterator` for subsequent reads. A new shard iterator
is returned by every `GetRecords` request in
`NextShardIterator`, which you use in the `ShardIterator`
parameter of the next `GetRecords` request.
If a `GetShardIterator` request is made too often, you will
receive a `ProvisionedThroughputExceededException`. For more
information about throughput limits, see the `Amazon Kinesis
Developer Guide`_.
`GetShardIterator` can return `null` for its `ShardIterator`
to indicate that the shard has been closed and that the
requested iterator will return no more data. A shard can be
closed by a SplitShard or MergeShards operation.
`GetShardIterator` has a limit of 5 transactions per second
per account per shard.
:type stream_name: string
:param stream_name: The name of the stream.
:type shard_id: string
:param shard_id: The shard ID of the shard to get the iterator for.
:type shard_iterator_type: string
:param shard_iterator_type:
Determines how the shard iterator is used to start reading data records
from the shard.
The following are the valid shard iterator types:
+ AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted
by a specific sequence number.
+ AFTER_SEQUENCE_NUMBER - Start reading right after the position
denoted by a specific sequence number.
+ TRIM_HORIZON - Start reading at the last untrimmed record in the
shard in the system, which is the oldest data record in the shard.
+ LATEST - Start reading just after the most recent record in the
shard, so that you always read the most recent data in the shard.
:type starting_sequence_number: string
:param starting_sequence_number: The sequence number of the data record
in the shard from which to start reading from.
"""
params = {
'StreamName': stream_name,
'ShardId': shard_id,
'ShardIteratorType': shard_iterator_type,
}
if starting_sequence_number is not None:
params['StartingSequenceNumber'] = starting_sequence_number
return self.make_request(action='GetShardIterator',
body=json.dumps(params))
def list_streams(self, limit=None, exclusive_start_stream_name=None):
"""
This operation returns an array of the names of all the
streams that are associated with the AWS account making the
`ListStreams` request. A given AWS account can have many
streams active at one time.
The number of streams may be too large to return from a single
call to `ListStreams`. You can limit the number of returned
streams using the `Limit` parameter. If you do not specify a
value for the `Limit` parameter, Amazon Kinesis uses the
default limit, which is currently 10.
You can detect if there are more streams available to list by
using the `HasMoreStreams` flag from the returned output. If
there are more streams available, you can request more streams
by using the name of the last stream returned by the
`ListStreams` request in the `ExclusiveStartStreamName`
parameter in a subsequent request to `ListStreams`. The group
of stream names returned by the subsequent request is then
added to the list. You can continue this process until all the
stream names have been collected in the list.
`ListStreams` has a limit of 5 transactions per second per
account.
:type limit: integer
:param limit: The maximum number of streams to list.
:type exclusive_start_stream_name: string
:param exclusive_start_stream_name: The name of the stream to start the
list with.
"""
params = {}
if limit is not None:
params['Limit'] = limit
if exclusive_start_stream_name is not None:
params['ExclusiveStartStreamName'] = exclusive_start_stream_name
return self.make_request(action='ListStreams',
body=json.dumps(params))
def merge_shards(self, stream_name, shard_to_merge,
adjacent_shard_to_merge):
"""
This operation merges two adjacent shards in a stream and
combines them into a single shard to reduce the stream's
capacity to ingest and transport data. Two shards are
considered adjacent if the union of the hash key ranges for
the two shards form a contiguous set with no gaps. For
example, if you have two shards, one with a hash key range of
276...381 and the other with a hash key range of 382...454,
then you could merge these two shards into a single shard that
would have a hash key range of 276...454. After the merge, the
single child shard receives data for all hash key values
covered by the two parent shards.
`MergeShards` is called when there is a need to reduce the
overall capacity of a stream because of excess capacity that
is not being used. The operation requires that you specify the
shard to be merged and the adjacent shard for a given stream.
For more information about merging shards, see the `Amazon
Kinesis Developer Guide`_.
If the stream is in the ACTIVE state, you can call
`MergeShards`. If a stream is in CREATING or UPDATING or
DELETING states, then Amazon Kinesis returns a
`ResourceInUseException`. If the specified stream does not
exist, Amazon Kinesis returns a `ResourceNotFoundException`.
You can use the DescribeStream operation to check the state of
the stream, which is returned in `StreamStatus`.
`MergeShards` is an asynchronous operation. Upon receiving a
`MergeShards` request, Amazon Kinesis immediately returns a
response and sets the `StreamStatus` to UPDATING. After the
operation is completed, Amazon Kinesis sets the `StreamStatus`
to ACTIVE. Read and write operations continue to work while
the stream is in the UPDATING state.
You use the DescribeStream operation to determine the shard
IDs that are specified in the `MergeShards` request.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, `MergeShards` or SplitShard, you
will receive a `LimitExceededException`.
`MergeShards` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the merge.
:type shard_to_merge: string
:param shard_to_merge: The shard ID of the shard to combine with the
adjacent shard for the merge.
:type adjacent_shard_to_merge: string
:param adjacent_shard_to_merge: The shard ID of the adjacent shard for
the merge.
"""
params = {
'StreamName': stream_name,
'ShardToMerge': shard_to_merge,
'AdjacentShardToMerge': adjacent_shard_to_merge,
}
return self.make_request(action='MergeShards',
body=json.dumps(params))
def put_record(self, stream_name, data, partition_key,
explicit_hash_key=None,
sequence_number_for_ordering=None,
exclusive_minimum_sequence_number=None,
b64_encode=True):
"""
This operation puts a data record into an Amazon Kinesis
stream from a producer. This operation must be called to send
data from the producer into the Amazon Kinesis stream for
real-time ingestion and subsequent processing. The `PutRecord`
operation requires the name of the stream that captures,
stores, and transports the data; a partition key; and the data
blob itself. The data blob could be a segment from a log file,
geographic/location data, website clickstream data, or any
other data type.
The partition key is used to distribute data across shards.
Amazon Kinesis segregates the data records that belong to a
data stream into multiple shards, using the partition key
associated with each data record to determine which shard a
given data record belongs to.
Partition keys are Unicode strings, with a maximum length
limit of 256 bytes. An MD5 hash function is used to map
partition keys to 128-bit integer values and to map associated
data records to shards using the hash key ranges of the
shards. You can override hashing the partition key to
determine the shard by explicitly specifying a hash value
using the `ExplicitHashKey` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
`PutRecord` returns the shard ID of where the data record was
placed and the sequence number that was assigned to the data
record.
Sequence numbers generally increase over time. To guarantee
strictly increasing ordering, use the
`SequenceNumberForOrdering` parameter. For more information,
see the `Amazon Kinesis Developer Guide`_.
If a `PutRecord` request cannot be processed because of
insufficient provisioned throughput on the shard involved in
the request, `PutRecord` throws
`ProvisionedThroughputExceededException`.
Data records are accessible for only 24 hours from the time
that they are added to an Amazon Kinesis stream.
:type stream_name: string
:param stream_name: The name of the stream to put the data record into.
:type data: blob
:param data: The data blob to put into the record, which is
Base64-encoded when the blob is serialized.
The maximum size of the data blob (the payload after
Base64-decoding) is 50 kilobytes (KB)
Set `b64_encode` to disable automatic Base64 encoding.
:type partition_key: string
:param partition_key: Determines which shard in the stream the data
record is assigned to. Partition keys are Unicode strings with a
maximum length limit of 256 bytes. Amazon Kinesis uses the
partition key as input to a hash function that maps the partition
key and associated data to a specific shard. Specifically, an MD5
hash function is used to map partition keys to 128-bit integer
values and to map associated data records to shards. As a result of
this hashing mechanism, all data records with the same partition
key will map to the same shard within the stream.
:type explicit_hash_key: string
:param explicit_hash_key: The hash value used to explicitly determine
the shard the data record is assigned to by overriding the
partition key hash.
:type sequence_number_for_ordering: string
:param sequence_number_for_ordering: Guarantees strictly increasing
sequence numbers, for puts from the same client and to the same
partition key. Usage: set the `SequenceNumberForOrdering` of record
n to the sequence number of record n-1 (as returned in the
PutRecordResult when putting record n-1 ). If this parameter is not
set, records will be coarsely ordered based on arrival time.
:type b64_encode: boolean
:param b64_encode: Whether to Base64 encode `data`. Can be set to
``False`` if `data` is already encoded to prevent double encoding.
"""
params = {
'StreamName': stream_name,
'Data': data,
'PartitionKey': partition_key,
}
if explicit_hash_key is not None:
params['ExplicitHashKey'] = explicit_hash_key
if sequence_number_for_ordering is not None:
params['SequenceNumberForOrdering'] = sequence_number_for_ordering
if b64_encode:
params['Data'] = base64.b64encode(
params['Data'].encode('utf-8')).decode('utf-8')
return self.make_request(action='PutRecord',
body=json.dumps(params))
def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
"""
This operation splits a shard into two new shards in the
stream, to increase the stream's capacity to ingest and
transport data. `SplitShard` is called when there is a need to
increase the overall capacity of stream because of an expected
increase in the volume of data records being ingested.
`SplitShard` can also be used when a given shard appears to be
approaching its maximum utilization, for example, when the set
of producers sending data into the specific shard are suddenly
sending more than previously anticipated. You can also call
the `SplitShard` operation to increase stream capacity, so
that more Amazon Kinesis applications can simultaneously read
data from the stream for real-time processing.
The `SplitShard` operation requires that you specify the shard
to be split and the new hash key, which is the position in the
shard where the shard gets split in two. In many cases, the
new hash key might simply be the average of the beginning and
ending hash key, but it can be any hash key value in the range
being mapped into the shard. For more information about
splitting shards, see the `Amazon Kinesis Developer Guide`_.
You can use the DescribeStream operation to determine the
shard ID and hash key values for the `ShardToSplit` and
`NewStartingHashKey` parameters that are specified in the
`SplitShard` request.
`SplitShard` is an asynchronous operation. Upon receiving a
`SplitShard` request, Amazon Kinesis immediately returns a
response and sets the stream status to UPDATING. After the
operation is completed, Amazon Kinesis sets the stream status
to ACTIVE. Read and write operations continue to work while
the stream is in the UPDATING state.
You can use `DescribeStream` to check the status of the
stream, which is returned in `StreamStatus`. If the stream is
in the ACTIVE state, you can call `SplitShard`. If a stream is
in CREATING or UPDATING or DELETING states, then Amazon
Kinesis returns a `ResourceInUseException`.
If the specified stream does not exist, Amazon Kinesis returns
a `ResourceNotFoundException`. If you try to create more
shards than are authorized for your account, you receive a
`LimitExceededException`.
**Note:** The default limit for an AWS account is two shards
per stream. If you need to create a stream with more than two
shards, contact AWS Support to increase the limit on your
account.
If you try to operate on too many streams in parallel using
CreateStream, DeleteStream, MergeShards or SplitShard, you
will receive a `LimitExceededException`.
`SplitShard` has limit of 5 transactions per second per
account.
:type stream_name: string
:param stream_name: The name of the stream for the shard split.
:type shard_to_split: string
:param shard_to_split: The shard ID of the shard to split.
:type new_starting_hash_key: string
:param new_starting_hash_key: A hash key value for the starting hash
key of one of the child shards created by the split. The hash key
range for a given shard constitutes a set of ordered contiguous
positive integers. The value for `NewStartingHashKey` must be in
the range of hash keys being mapped into the shard. The
`NewStartingHashKey` hash key value and all higher hash key values
in hash key range are distributed to one of the child shards. All
the lower hash key values in the range are distributed to the other
child shard.
"""
params = {
'StreamName': stream_name,
'ShardToSplit': shard_to_split,
'NewStartingHashKey': new_starting_hash_key,
}
return self.make_request(action='SplitShard',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response.getheaders())
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
|
[
"boto.compat.json.dumps",
"boto.regioninfo.RegionInfo",
"boto.log.debug",
"boto.compat.json.loads"
] |
[((32741, 32770), 'boto.log.debug', 'boto.log.debug', (['response_body'], {}), '(response_body)\n', (32755, 32770), False, 'import boto\n'), ((2488, 2556), 'boto.regioninfo.RegionInfo', 'RegionInfo', (['self', 'self.DefaultRegionName', 'self.DefaultRegionEndpoint'], {}), '(self, self.DefaultRegionName, self.DefaultRegionEndpoint)\n', (2498, 2556), False, 'from boto.regioninfo import RegionInfo\n'), ((32923, 32948), 'boto.compat.json.loads', 'json.loads', (['response_body'], {}), '(response_body)\n', (32933, 32948), False, 'from boto.compat import json\n'), ((6371, 6389), 'boto.compat.json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (6381, 6389), False, 'from boto.compat import json\n'), ((7747, 7765), 'boto.compat.json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (7757, 7765), False, 'from boto.compat import json\n'), ((10086, 10104), 'boto.compat.json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (10096, 10104), False, 'from boto.compat import json\n'), ((12975, 12993), 'boto.compat.json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (12985, 12993), False, 'from boto.compat import json\n'), ((17660, 17678), 'boto.compat.json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (17670, 17678), False, 'from boto.compat import json\n'), ((19569, 19587), 'boto.compat.json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (19579, 19587), False, 'from boto.compat import json\n'), ((22717, 22735), 'boto.compat.json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (22727, 22735), False, 'from boto.compat import json\n'), ((27820, 27838), 'boto.compat.json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (27830, 27838), False, 'from boto.compat import json\n'), ((32057, 32075), 'boto.compat.json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (32067, 32075), False, 'from boto.compat import json\n'), ((32859, 32884), 'boto.compat.json.loads', 'json.loads', (['response_body'], {}), '(response_body)\n', (32869, 32884), False, 'from boto.compat import json\n')]
|
import numpy as np
import hierarchy as hrcy
def get_stationary_distribution(capacities, r, lmbda, mu):
assert capacities[-1] == 1
matrix = hrcy.transitions.get_transition_matrix(
capacities=capacities, r=r, lmbda=lmbda, mu=mu
)
dimension = matrix.shape[0]
M = np.vstack((matrix.transpose(), np.ones(dimension)))
b = np.vstack((np.zeros((dimension, 1)), [1]))
return np.linalg.lstsq(M, b)[0].transpose()[0]
|
[
"numpy.linalg.lstsq",
"hierarchy.transitions.get_transition_matrix",
"numpy.zeros",
"numpy.ones"
] |
[((150, 241), 'hierarchy.transitions.get_transition_matrix', 'hrcy.transitions.get_transition_matrix', ([], {'capacities': 'capacities', 'r': 'r', 'lmbda': 'lmbda', 'mu': 'mu'}), '(capacities=capacities, r=r, lmbda=\n lmbda, mu=mu)\n', (188, 241), True, 'import hierarchy as hrcy\n'), ((323, 341), 'numpy.ones', 'np.ones', (['dimension'], {}), '(dimension)\n', (330, 341), True, 'import numpy as np\n'), ((363, 387), 'numpy.zeros', 'np.zeros', (['(dimension, 1)'], {}), '((dimension, 1))\n', (371, 387), True, 'import numpy as np\n'), ((407, 428), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['M', 'b'], {}), '(M, b)\n', (422, 428), True, 'import numpy as np\n')]
|
import requests
from bs4 import BeautifulSoup as bs
url = "https://mdn.github.io/beginner-html-site/"
response = requests.get(url) # this is like going to our browser and going to the url
print(response.status_code) # 200 would be good 404 would be bad
print(response.text[:500]) # first 500 characters
# i could parse by hand but i can use beautiful soup
# # we parse our raw text html into a soup object
soup = bs(response.text, "html.parser") # html parser is optional we can tell it use lxml which might be better parser
print(soup.title)
print(type(soup))
headline = soup.find("h1") # there should only be one h1 tag in the html
print(headline)
print(headline.text) # gets you text
images = soup.find_all("img") # get all images tags
print(images)
print(len(images))
# we would want some logic here to test length of images
if len(images) > 0:
first_image = images[0]
print(first_image.attrs)
for key, value in first_image.attrs.items(): # attrs is a dictionary
print("attribute", key, "value:", value)
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((115, 132), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (127, 132), False, 'import requests\n'), ((421, 453), 'bs4.BeautifulSoup', 'bs', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (423, 453), True, 'from bs4 import BeautifulSoup as bs\n')]
|
import numpy as np
arr1 = np.array([1, 2, 3])
arr2 = np.array([1, 2, 3])
newarr = np.sum([arr1, arr2])
print(newarr)
|
[
"numpy.array",
"numpy.sum"
] |
[((27, 46), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (35, 46), True, 'import numpy as np\n'), ((54, 73), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (62, 73), True, 'import numpy as np\n'), ((84, 104), 'numpy.sum', 'np.sum', (['[arr1, arr2]'], {}), '([arr1, arr2])\n', (90, 104), True, 'import numpy as np\n')]
|
'''
for i in predictions/test/*; do python visualize_predictions.py $i\/doc.json $i/pred_weights.npy prediction_dir/$i ; done;
'''
import argparse
import numpy as np
import bipartite_utils
import json
import os
import subprocess
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('document')
parser.add_argument('predictions')
parser.add_argument('output')
parser.add_argument('--n_to_show', default=5)
return parser.parse_args()
def call(x):
subprocess.call(x, shell=True)
def main():
args = parse_args()
pred_adj = np.load(args.predictions)
with open(args.document) as f:
data = json.loads(f.read())
images, text = data[0], data[1]
solve_fn = bipartite_utils.generate_fast_hungarian_solving_function()
sol = solve_fn(pred_adj, args.n_to_show)
scores = pred_adj[sol[:,0], sol[:,1]]
true_adj = np.zeros((len(text), len(images)))
for text_idx, t in enumerate(text):
if t[1] == -1: continue
true_adj[text_idx, t[1]] = 1
for image_idx, t in enumerate(images):
if t[1] == -1: continue
true_adj[t[1], image_idx] = 1
auc = 100 * roc_auc_score(true_adj.flatten(),
pred_adj.flatten())
print('AUC: {:.2f} {}'.format(auc,
data[-1]))
ordered_images, ordered_sentences = [], []
for img_idx, sent_idx, sc in sorted(
zip(sol[:,1], sol[:,0], scores), key=lambda x:-x[-1])[:args.n_to_show]:
ordered_images.append(img_idx)
ordered_sentences.append(sent_idx)
print(sc)
pred_adj_subgraph = pred_adj[np.array(ordered_sentences),:][:,np.array(ordered_images)]
true_adj_subgraph = true_adj[np.array(ordered_sentences),:][:,np.array(ordered_images)]
selected_images = [images[img_idx][0] for img_idx in ordered_images]
selected_sentences = [text[sent_idx][0] for sent_idx in ordered_sentences]
# normalize predicted sims to have max 1 and min 0
# first, clip out negative values
pred_adj_subgraph = np.clip(pred_adj_subgraph, 0, 1.0)
pred_adj_subgraph -= np.min(pred_adj_subgraph.flatten())
pred_adj_subgraph /= np.max(pred_adj_subgraph.flatten())
assert np.min(pred_adj_subgraph.flatten()) == 0.0
assert np.max(pred_adj_subgraph.flatten()) == 1.0
print(pred_adj_subgraph.shape)
print(ordered_images)
print(ordered_sentences)
print(selected_images)
print(selected_sentences)
# each line has ((x1, y1, x2, y2), strength, correctness)
# images go above text
lines_to_plot = []
image_text_gap = 2
same_mode_gap = 2
offdiag_alpha_mul = .5
def cosine_to_width(cos, exp=2.0, maxwidth=8.0):
return cos**exp * maxwidth
def cosine_to_alpha(cos, exp=1/2., maxalpha=1.0):
return cos**exp * maxalpha
correct_color, incorrect_color = '#1b7837', '#762a83'
lines_to_plot = []
for text_idx in range(args.n_to_show):
for image_idx in range(args.n_to_show):
coords = (text_idx*same_mode_gap, 0, image_idx*same_mode_gap, image_text_gap)
strength = max(pred_adj_subgraph[text_idx, image_idx], 0)
correctness = true_adj_subgraph[text_idx, image_idx] == 1
lines_to_plot.append((coords, strength, correctness))
plt.figure(figsize=(args.n_to_show*same_mode_gap, image_text_gap))
for (x1, y1, x2, y2), strength, correct in sorted(lines_to_plot,
key=lambda x: x[1]):
if x1 == x2: continue
plt.plot([x1, x2], [y1, y2],
linewidth=cosine_to_width(strength),
alpha=cosine_to_alpha(strength) * offdiag_alpha_mul,
color=correct_color if correct else incorrect_color)
for (x1, y1, x2, y2), strength, correct in sorted(lines_to_plot,
key=lambda x: x[1]):
if x1 != x2: continue
plt.plot([x1, x2], [y1, y2],
linewidth=cosine_to_width(strength),
color=correct_color if correct else incorrect_color)
plt.axis('off')
plt.tight_layout()
if not os.path.exists(args.output):
os.makedirs(args.output)
with open(args.output + '/sentences.txt', 'w') as f:
f.write('\n'.join([' '.join(s.split()) for s in selected_sentences]))
with open(args.output + '/images.txt', 'w') as f:
f.write('\n'.join(selected_images))
with open(args.output + '/all_sentences.txt', 'w') as f:
f.write('\n'.join([' '.join(s[0].split()) for s in text]))
with open(args.output + '/all_images.txt', 'w') as f:
f.write('\n'.join([x[0] for x in images]))
with open(args.output + '/auc.txt', 'w') as f:
f.write('{:.4f}'.format(auc))
plt.savefig(args.output + '/graph.png', dpi=300)
call('convert {} -trim {}'.format(args.output + '/graph.png',
args.output + '/graph_cropped.png'))
if __name__ == '__main__':
main()
|
[
"numpy.load",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.exists",
"matplotlib.pyplot.axis",
"numpy.clip",
"matplotlib.pyplot.figure",
"subprocess.call",
"numpy.array",
"bipartite_utils.generate_fast_hungarian_solving_function",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] |
[((336, 361), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (359, 361), False, 'import argparse\n'), ((571, 601), 'subprocess.call', 'subprocess.call', (['x'], {'shell': '(True)'}), '(x, shell=True)\n', (586, 601), False, 'import subprocess\n'), ((655, 680), 'numpy.load', 'np.load', (['args.predictions'], {}), '(args.predictions)\n', (662, 680), True, 'import numpy as np\n'), ((805, 863), 'bipartite_utils.generate_fast_hungarian_solving_function', 'bipartite_utils.generate_fast_hungarian_solving_function', ([], {}), '()\n', (861, 863), False, 'import bipartite_utils\n'), ((2141, 2175), 'numpy.clip', 'np.clip', (['pred_adj_subgraph', '(0)', '(1.0)'], {}), '(pred_adj_subgraph, 0, 1.0)\n', (2148, 2175), True, 'import numpy as np\n'), ((3395, 3463), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(args.n_to_show * same_mode_gap, image_text_gap)'}), '(figsize=(args.n_to_show * same_mode_gap, image_text_gap))\n', (3405, 3463), True, 'import matplotlib.pyplot as plt\n'), ((4223, 4238), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4231, 4238), True, 'import matplotlib.pyplot as plt\n'), ((4243, 4261), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4259, 4261), True, 'import matplotlib.pyplot as plt\n'), ((4903, 4951), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(args.output + '/graph.png')"], {'dpi': '(300)'}), "(args.output + '/graph.png', dpi=300)\n", (4914, 4951), True, 'import matplotlib.pyplot as plt\n'), ((4278, 4305), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (4292, 4305), False, 'import os\n'), ((4315, 4339), 'os.makedirs', 'os.makedirs', (['args.output'], {}), '(args.output)\n', (4326, 4339), False, 'import os\n'), ((1753, 1777), 'numpy.array', 'np.array', (['ordered_images'], {}), '(ordered_images)\n', (1761, 1777), True, 'import numpy as np\n'), ((1845, 1869), 'numpy.array', 'np.array', (['ordered_images'], {}), '(ordered_images)\n', (1853, 1869), True, 'import numpy as np\n'), ((1720, 1747), 'numpy.array', 'np.array', (['ordered_sentences'], {}), '(ordered_sentences)\n', (1728, 1747), True, 'import numpy as np\n'), ((1812, 1839), 'numpy.array', 'np.array', (['ordered_sentences'], {}), '(ordered_sentences)\n', (1820, 1839), True, 'import numpy as np\n')]
|
from process import Process
from process import State
class Simulation:
def __init__(self, data, cpu, scheduling):
self.data = data
self.cpu = cpu
self.scheduling = scheduling
self.processes = []
### Initiate all processes at the same time since they all arrive at t = 0
for i, processSimulationData in enumerate(data):
process = Process(i + 1, processSimulationData)
self.scheduling.addProcess(process)
self.processes.append(process)
self.time = 0
self.totalRunningTime = 0
# Check if all processes terminated
def terminated(self):
for process in self.processes:
if process.state != State.TERMINATED:
return False
return True
### Simulation Loop
def run(self):
# Stop only if all processes terminated
while not self.terminated():
# Update process state
for process in self.processes:
process.update()
# Update scheduling state
self.scheduling.update()
### Print formatted data
if (self.cpu.contextSwitch or self.terminated()):
print('Time: ' + str(self.time))
print('{:20}'.format('Process ID'), end='')
print('{:20}'.format('Burst Type'), end='')
print('{:20}'.format('Burst TIme'), end='')
print('{:20}'.format('Remaining Time'), end='')
print('{:20}'.format('State'), end='')
print()
for process in self.processes:
process.print()
self.cpu.contextSwitch = False
self.scheduling.printQueues()
print('----------------------------------------------------------------------------------------------')
# Track simulation result
for process in self.processes:
### Track waiting time
if process.state == State.READY:
process.totalWaitingTime += 1
### Track turnaround time
if process.state != State.TERMINATED:
process.turnaroundTime += 1
### Track response time
if process.previousState == State.NEW and process.state == State.READY:
process.responseTime += 1
# print('-----------------------------------------')
### Track total running time
if self.cpu.currentProcess:
if self.cpu.currentProcess.state == State.RUNNING:
self.totalRunningTime += 1
self.time += 1
### Compute results after simulation is finished
print('Results:')
waitingTimeSum = 0
turnaroundTimeSum = 0
responseTimeSum = 0
### Print formatted data
print('{:20}'.format('Process ID'), end='')
print('{:20}'.format('Tw (Waiting)'), end='')
print('{:20}'.format('Ttr (Turnaround)'), end='')
print('{:20}'.format('Tr (Response)'), end='')
print()
for process in self.processes:
print('{:20}'.format('P' + str(process.pid)), end='')
print('{:20}'.format(str(process.totalWaitingTime)), end='')
print('{:20}'.format(str(process.turnaroundTime)), end='')
print('{:20}'.format(str(process.responseTime)), end='')
print()
waitingTimeSum += process.totalWaitingTime
turnaroundTimeSum += process.turnaroundTime
responseTimeSum += process.responseTime
print('{:20}'.format('Average'), end='')
print('{:20}'.format(str(waitingTimeSum / 8)), end='')
print('{:20}'.format(str(turnaroundTimeSum / 8)), end='')
print('{:20}'.format(str(responseTimeSum / 8)), end='')
print()
print()
print('CPU Utilization: ', end='')
print(self.totalRunningTime/(self.time - 1) * 100)
print('Total time to finish all processes: ', end='')
print(self.time - 1 )
|
[
"process.Process"
] |
[((397, 434), 'process.Process', 'Process', (['(i + 1)', 'processSimulationData'], {}), '(i + 1, processSimulationData)\n', (404, 434), False, 'from process import Process\n')]
|
import concat.level1.execute
import unittest
import ast
from typing import Dict
class TestExecute(unittest.TestCase):
names = ['to_int', 'to_bool', 'to_complex', 'len', 'getitem', 'to_float',
'decode_bytes', 'to_tuple', 'to_bytes', 'to_list', 'to_bytearray',
'to_set', 'add_to_set', 'to_frozenset', 'to_dict',
'user_defined_function', 'method', 'with_async', 'for_async',
'coroutine', 'math', 'import_module', 'import_advanced',
'custom_class', 'instance', 'open', 'popen', 'fdopen', 'curry',
'call', 'drop', 'drop_2', 'drop_3', 'nip', 'nip_2', 'dup',
'dup_2', 'swap', 'dup_3', 'over', 'over_2', 'pick', 'to_slice',
'choose', 'if_then', 'if_not', 'case', 'loop']
def setUp(self) -> None:
pass
def test_execute_function(self) -> None:
module = ast.Module(body=[])
concat.level1.execute.execute('<test>', module, {})
# we passed if we get here
def test_preamble(self) -> None:
"""Test that the preamble adds correct names to the globals dict."""
module = ast.Module(body=[])
globals: Dict[str, object] = {}
concat.level1.execute.execute('<test>', module, globals)
for name in self.names:
with self.subTest(msg='presence of "{}"'.format(name), name=name):
message = 'preamble did not add "{}"'.format(name)
self.assertIn(name, globals, msg=message)
|
[
"ast.Module"
] |
[((879, 898), 'ast.Module', 'ast.Module', ([], {'body': '[]'}), '(body=[])\n', (889, 898), False, 'import ast\n'), ((1126, 1145), 'ast.Module', 'ast.Module', ([], {'body': '[]'}), '(body=[])\n', (1136, 1145), False, 'import ast\n')]
|
"""
Contains TarifpreispositionProOrt class
and corresponding marshmallow schema for de-/serialization
"""
from typing import List
import attr
from marshmallow import fields
from bo4e.com.com import COM, COMSchema
from bo4e.com.tarifpreisstaffelproort import TarifpreisstaffelProOrt, TarifpreisstaffelProOrtSchema
from bo4e.validators import check_list_length_at_least_one
# pylint: disable=too-few-public-methods
@attr.s(auto_attribs=True, kw_only=True)
class TarifpreispositionProOrt(COM):
"""
Mit dieser Komponente können Tarifpreise verschiedener Typen abgebildet werden
.. HINT::
`TarifpreispositionProOrt JSON Schema <https://json-schema.app/view/%23?url=https://raw.githubusercontent.com/Hochfrequenz/BO4E-python/main/json_schemas/com/TarifpreispositionProOrtSchema.json>`_
"""
# required attributes
#: Postleitzahl des Ortes für den der Preis gilt
postleitzahl: str = attr.ib(validator=attr.validators.matches_re(r"^\d{5}$"))
#: Ort für den der Preis gilt
ort: str = attr.ib(validator=attr.validators.instance_of(str))
#: ene't-Netznummer des Netzes in dem der Preis gilt
netznr: str = attr.ib(validator=attr.validators.instance_of(str))
# Hier sind die Staffeln mit ihren Preisenangaben definiert
preisstaffeln: List[TarifpreisstaffelProOrt] = attr.ib(
validator=[
attr.validators.deep_iterable(
member_validator=attr.validators.instance_of(TarifpreisstaffelProOrt),
iterable_validator=check_list_length_at_least_one,
),
]
)
# there are no optional attributes
class TarifpreispositionProOrtSchema(COMSchema):
"""
Schema for de-/serialization of TarifpreispositionProOrt.
"""
class_name = TarifpreispositionProOrt
# required attributes
postleitzahl = fields.Str()
ort = fields.Str()
netznr = fields.Str()
preisstaffeln = fields.List(fields.Nested(TarifpreisstaffelProOrtSchema))
|
[
"attr.validators.instance_of",
"attr.s",
"attr.validators.matches_re",
"marshmallow.fields.Str",
"marshmallow.fields.Nested"
] |
[((420, 459), 'attr.s', 'attr.s', ([], {'auto_attribs': '(True)', 'kw_only': '(True)'}), '(auto_attribs=True, kw_only=True)\n', (426, 459), False, 'import attr\n'), ((1834, 1846), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (1844, 1846), False, 'from marshmallow import fields\n'), ((1857, 1869), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (1867, 1869), False, 'from marshmallow import fields\n'), ((1883, 1895), 'marshmallow.fields.Str', 'fields.Str', ([], {}), '()\n', (1893, 1895), False, 'from marshmallow import fields\n'), ((1928, 1972), 'marshmallow.fields.Nested', 'fields.Nested', (['TarifpreisstaffelProOrtSchema'], {}), '(TarifpreisstaffelProOrtSchema)\n', (1941, 1972), False, 'from marshmallow import fields\n'), ((938, 976), 'attr.validators.matches_re', 'attr.validators.matches_re', (['"""^\\\\d{5}$"""'], {}), "('^\\\\d{5}$')\n", (964, 976), False, 'import attr\n'), ((1045, 1077), 'attr.validators.instance_of', 'attr.validators.instance_of', (['str'], {}), '(str)\n', (1072, 1077), False, 'import attr\n'), ((1172, 1204), 'attr.validators.instance_of', 'attr.validators.instance_of', (['str'], {}), '(str)\n', (1199, 1204), False, 'import attr\n'), ((1426, 1478), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TarifpreisstaffelProOrt'], {}), '(TarifpreisstaffelProOrt)\n', (1453, 1478), False, 'import attr\n')]
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Enables scheduling execution at a later time."""
import time
from grpc.framework.foundation import _timer_future
def later(delay, computation):
"""Schedules later execution of a callable.
Args:
delay: Any numeric value. Represents the minimum length of time in seconds
to allow to pass before beginning the computation. No guarantees are made
about the maximum length of time that will pass.
computation: A callable that accepts no arguments.
Returns:
A Future representing the scheduled computation.
"""
timer_future = _timer_future.TimerFuture(time.time() + delay, computation)
timer_future.start()
return timer_future
|
[
"time.time"
] |
[((2118, 2129), 'time.time', 'time.time', ([], {}), '()\n', (2127, 2129), False, 'import time\n')]
|
import collections
from collections import defaultdict
from typing import List, Dict
from pycrunch_trace.tracing.file_map import FileMap
class ClientTraceIntrospection:
total_events: int
def __init__(self):
self.total_events = 0
self.stats = defaultdict(int)
# file id -> hit count
self.top_files = defaultdict(int)
def save_events(self, events: List):
self.total_events += len(events)
for e in events:
self.stats[e.event_name] += 1
self.top_files[e.cursor.file] += 1
def print_to_console(self, files: Dict[str, int]):
print('TraceIntrospection:')
print(' stats:')
for (each, hit_count) in self.stats.items():
print(f' - {each}:{hit_count}')
print(' files:')
filemap = FileMap.from_reverse(files)
sorted_x = sorted(self.top_files.items(), reverse=True, key=lambda kv: kv[1])
sortir = collections.OrderedDict(sorted_x)
for (each, hit_count) in sortir.items():
print(f' - {hit_count} hits in {filemap.filename_by_id(each)}')
client_introspection = ClientTraceIntrospection()
|
[
"collections.defaultdict",
"pycrunch_trace.tracing.file_map.FileMap.from_reverse",
"collections.OrderedDict"
] |
[((270, 286), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (281, 286), False, 'from collections import defaultdict\n'), ((343, 359), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (354, 359), False, 'from collections import defaultdict\n'), ((818, 845), 'pycrunch_trace.tracing.file_map.FileMap.from_reverse', 'FileMap.from_reverse', (['files'], {}), '(files)\n', (838, 845), False, 'from pycrunch_trace.tracing.file_map import FileMap\n'), ((949, 982), 'collections.OrderedDict', 'collections.OrderedDict', (['sorted_x'], {}), '(sorted_x)\n', (972, 982), False, 'import collections\n')]
|
import sqlite3
class AI_DB(object):
def __init__(self, db_file_name):
self.conn = sqlite3.connect(db_file_name)
self.cur = self.conn.cursor()
def read_1_data(self, user_id):
query = "SELECT * FROM user WHERE user_id="+str(user_id)
self.cur.execute(query)
row = self.cur.fetchall()
return row
def read_all_data(self):
query = "SELECT * FROM user"
self.cur.execute(query)
rows = self.cur.fetchall()
return rows
def updatePersonType(self, user_id, person_type):
query = "UPDATE user SET person_type = \""+str(person_type)+"\" WHERE user_id = "+str(user_id)
self.cur.execute(query)
def __del__(self):
print("DB class deleted.")
self.conn.commit()
self.conn.close()
'''
db = AI_DB("./chat2.db")
print(db.read_all_data())
print(db.read_1_data(1))
#db.updatePersonType(3, "G")
#print(db.read_all_data())
del db
#cur.execute("SELECT * FROM user")
#cur.execute("CREATE TABLE IF NOT EXISTS user(user_id integer primary key autoincrement, gender varchar(20), nickname varchar(20), type varchar(1))")
'''
|
[
"sqlite3.connect"
] |
[((95, 124), 'sqlite3.connect', 'sqlite3.connect', (['db_file_name'], {}), '(db_file_name)\n', (110, 124), False, 'import sqlite3\n')]
|
# Generated by Django 3.0.8 on 2020-08-05 20:30
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('CompanyManagement', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Form',
fields=[
('ID', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('Field', django.contrib.postgres.fields.ArrayField(base_field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), size=None), size=None)),
('CompanyID', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='CompanyManagement.Company')),
],
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.UUIDField",
"django.db.models.CharField"
] |
[((430, 534), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)', 'unique': '(True)'}), '(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False, unique=True)\n', (446, 534), False, 'from django.db import migrations, models\n'), ((754, 863), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': '(0)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""CompanyManagement.Company"""'}), "(default=0, on_delete=django.db.models.deletion.CASCADE,\n to='CompanyManagement.Company')\n", (771, 863), False, 'from django.db import migrations, models\n'), ((665, 697), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (681, 697), False, 'from django.db import migrations, models\n')]
|
import webapp2
import json
from models.user import User
class UpdateUserHandler(webapp2.RequestHandler):
def get(self):
user = User.checkUser()
if not user:
return
update_name = self.request.get('name')
update_car = self.request.get('car')
needCar = False
if update_car == 'true':
needCar = True
if update_name and update_car:
update = User.updateInfo(user.email,update_name,needCar)
self.response.write(json.dumps({'status':'ok'}))
else:
self.response.write(json.dumps({'status':'error'}))
app = webapp2.WSGIApplication([
('/update_user', UpdateUserHandler)
], debug=True)
|
[
"models.user.User.updateInfo",
"models.user.User.checkUser",
"json.dumps",
"webapp2.WSGIApplication"
] |
[((566, 640), 'webapp2.WSGIApplication', 'webapp2.WSGIApplication', (["[('/update_user', UpdateUserHandler)]"], {'debug': '(True)'}), "([('/update_user', UpdateUserHandler)], debug=True)\n", (589, 640), False, 'import webapp2\n'), ((137, 153), 'models.user.User.checkUser', 'User.checkUser', ([], {}), '()\n', (151, 153), False, 'from models.user import User\n'), ((380, 429), 'models.user.User.updateInfo', 'User.updateInfo', (['user.email', 'update_name', 'needCar'], {}), '(user.email, update_name, needCar)\n', (395, 429), False, 'from models.user import User\n'), ((460, 488), 'json.dumps', 'json.dumps', (["{'status': 'ok'}"], {}), "({'status': 'ok'})\n", (470, 488), False, 'import json\n'), ((522, 553), 'json.dumps', 'json.dumps', (["{'status': 'error'}"], {}), "({'status': 'error'})\n", (532, 553), False, 'import json\n')]
|
# Generated by Django 3.2.7 on 2021-10-15 08:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('resource_tracker', '0004_alter_resourcepoolattributedefinition_resource_pool'),
]
operations = [
migrations.RenameField(
model_name='resourcegroupattributedefinition',
old_name='resource_group_definition',
new_name='resource_group',
),
migrations.RenameField(
model_name='resourcegrouptextattributedefinition',
old_name='resource_group_definition',
new_name='resource_group',
),
migrations.AlterUniqueTogether(
name='resourcegroupattributedefinition',
unique_together={('name', 'resource_group')},
),
migrations.AlterUniqueTogether(
name='resourcegrouptextattributedefinition',
unique_together={('name', 'resource_group')},
),
]
|
[
"django.db.migrations.AlterUniqueTogether",
"django.db.migrations.RenameField"
] |
[((269, 407), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""resourcegroupattributedefinition"""', 'old_name': '"""resource_group_definition"""', 'new_name': '"""resource_group"""'}), "(model_name='resourcegroupattributedefinition',\n old_name='resource_group_definition', new_name='resource_group')\n", (291, 407), False, 'from django.db import migrations\n'), ((460, 602), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""resourcegrouptextattributedefinition"""', 'old_name': '"""resource_group_definition"""', 'new_name': '"""resource_group"""'}), "(model_name='resourcegrouptextattributedefinition',\n old_name='resource_group_definition', new_name='resource_group')\n", (482, 602), False, 'from django.db import migrations\n'), ((655, 776), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""resourcegroupattributedefinition"""', 'unique_together': "{('name', 'resource_group')}"}), "(name='resourcegroupattributedefinition',\n unique_together={('name', 'resource_group')})\n", (685, 776), False, 'from django.db import migrations\n'), ((817, 942), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""resourcegrouptextattributedefinition"""', 'unique_together': "{('name', 'resource_group')}"}), "(name='resourcegrouptextattributedefinition',\n unique_together={('name', 'resource_group')})\n", (847, 942), False, 'from django.db import migrations\n')]
|
from setuptools import setup,find_packages
setup(
name='hechmsd',
version='1.0.0',
packages=find_packages(),
url='http://www.curwsl.org/',
license='',
author='hasitha',
author_email='<EMAIL>',
description='HecHms Distributed version',
include_package_data=True,
install_requires=['FLASK', 'Flask-Uploads', 'Flask-JSON', 'pandas','numpy','shapely', 'joblib', 'netCDF4', 'matplotlib', 'imageio', 'scipy', 'geopandas'],
zip_safe=False
)
|
[
"setuptools.find_packages"
] |
[((105, 120), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (118, 120), False, 'from setuptools import setup, find_packages\n')]
|
from datetime import datetime
from flask_login import UserMixin
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(UserMixin, db.Model):
id = db.Column(db.String(32), primary_key=True)
username = db.Column(db.String(128), index=True, unique=True, nullable=False)
email = db.Column(db.String(128), index=True, unique=True, nullable=False)
is_authorized = db.Column(db.Integer, default=0, nullable=False)
created = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
class OAuth(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.String(32), db.ForeignKey(User.id), nullable=False)
token = db.Column(db.String(1024), nullable=False)
access_token = db.Column(db.String(256), nullable=False)
expires_in = db.Column(db.DateTime, nullable=False)
provider = db.Column(db.String(10), default="google", nullable=False)
expired = db.Column(db.Integer, default=0, nullable=False)
created = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
user = db.relationship(User)
|
[
"flask_sqlalchemy.SQLAlchemy"
] |
[((111, 123), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (121, 123), False, 'from flask_sqlalchemy import SQLAlchemy\n')]
|
"""A despicable Hanabi player.
Cheating Idiot never hints. He peeks at his cards. When he has a play, he
picks one randomly. When he doesn't, he discards randomly.
"""
from hanabi_classes import *
from bot_utils import get_plays
class CheatingIdiotPlayer(AIPlayer):
@classmethod
def get_name(cls):
return 'idiot'
def __init__(self, *args):
"""Can be overridden to perform initialization, but must call super"""
super(CheatingIdiotPlayer, self).__init__(*args)
def play(self, r):
cards = r.h[r.whoseTurn].cards
progress = r.progress
playableCards = get_plays(cards, progress)
if playableCards == []:
return 'discard', random.choice(cards)
else:
return 'play', random.choice(playableCards)
def end_game_logging(self):
"""Can be overridden to perform logging at the end of the game"""
pass
|
[
"bot_utils.get_plays"
] |
[((621, 647), 'bot_utils.get_plays', 'get_plays', (['cards', 'progress'], {}), '(cards, progress)\n', (630, 647), False, 'from bot_utils import get_plays\n')]
|
import json
def permissions(app_id):
form = {
'f.req': [[[
'xdSrCf',
f'[[null,["{app_id}",7],[]]]',
None,
'1'
]]]
}
form['f.req'] = json.dumps(form['f.req'], separators=(',', ':'))
return form
|
[
"json.dumps"
] |
[((211, 259), 'json.dumps', 'json.dumps', (["form['f.req']"], {'separators': "(',', ':')"}), "(form['f.req'], separators=(',', ':'))\n", (221, 259), False, 'import json\n')]
|
import os
import numpy as np
class TupperwearD435_0:
F = np.load(os.path.join(os.path.dirname(__file__), 'v1_data/f_matrix.npy'))
P = np.load(os.path.join(os.path.dirname(__file__), 'v1_data/p_matrix_original.npy'))
class TupperwearD435:
F = np.load(os.path.join(os.path.dirname(__file__), 'v1_data/f_matrix.npy'))
P = np.load(os.path.join(os.path.dirname(__file__), 'v1_data/p_matrix.npy'))
|
[
"os.path.dirname"
] |
[((83, 108), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (98, 108), False, 'import os\n'), ((164, 189), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (179, 189), False, 'import os\n'), ((277, 302), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (292, 302), False, 'import os\n'), ((358, 383), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (373, 383), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""Evaluators.
========= =============================================
Name Reference
========= =============================================
rankbased :class:`pykeen.evaluation.RankBasedEvaluator`
sklearn :class:`pykeen.evaluation.SklearnEvaluator`
========= =============================================
.. note:: This table can be re-generated with ``pykeen ls evaluators -f rst``
========= =================================================
Name Reference
========= =================================================
rankbased :class:`pykeen.evaluation.RankBasedMetricResults`
sklearn :class:`pykeen.evaluation.SklearnMetricResults`
========= =================================================
.. note:: This table can be re-generated with ``pykeen ls metrics -f rst``
References
----------
.. [berrendorf2020] <NAME>, <NAME>, <NAME>, <NAME> (2020) `Interpretable and Fair
Comparison of Link Prediction or Entity Alignment Methods with Adjusted Mean Rank
<https://arxiv.org/abs/2002.06914>`_.
"""
import dataclasses
from typing import Mapping, Set, Type, Union
from .evaluator import Evaluator, MetricResults, evaluate
from .rank_based_evaluator import RankBasedEvaluator, RankBasedMetricResults
from .sklearn import SklearnEvaluator, SklearnMetricResults
from ..utils import get_cls, normalize_string
__all__ = [
'evaluate',
'Evaluator',
'MetricResults',
'RankBasedEvaluator',
'RankBasedMetricResults',
'SklearnEvaluator',
'SklearnMetricResults',
'metrics',
'evaluators',
'get_evaluator_cls',
'get_metric_list',
]
_EVALUATOR_SUFFIX = 'Evaluator'
_EVALUATORS: Set[Type[Evaluator]] = {
RankBasedEvaluator,
SklearnEvaluator,
}
#: A mapping of evaluators' names to their implementations
evaluators: Mapping[str, Type[Evaluator]] = {
normalize_string(cls.__name__, suffix=_EVALUATOR_SUFFIX): cls
for cls in _EVALUATORS
}
def get_evaluator_cls(query: Union[None, str, Type[Evaluator]]) -> Type[Evaluator]:
"""Get the evaluator class."""
return get_cls(
query,
base=Evaluator,
lookup_dict=evaluators,
default=RankBasedEvaluator,
suffix=_EVALUATOR_SUFFIX,
)
_METRICS_SUFFIX = 'MetricResults'
_METRICS: Set[Type[MetricResults]] = {
RankBasedMetricResults,
SklearnMetricResults,
}
#: A mapping of results' names to their implementations
metrics: Mapping[str, Type[MetricResults]] = {
normalize_string(cls.__name__, suffix=_METRICS_SUFFIX): cls
for cls in _METRICS
}
def get_metric_list():
"""Get info about all metrics across all evaluators."""
return [
(field, name, value)
for name, value in metrics.items()
for field in dataclasses.fields(value)
]
|
[
"dataclasses.fields"
] |
[((2749, 2774), 'dataclasses.fields', 'dataclasses.fields', (['value'], {}), '(value)\n', (2767, 2774), False, 'import dataclasses\n')]
|
from django.urls import reverse
from django.shortcuts import redirect, render
from django.views.generic import TemplateView
from guardian.mixins import LoginRequiredMixin
from tally_ho.libs.permissions import groups
from tally_ho.apps.tally.models import UserProfile
GROUP_URLS = {
groups.AUDIT_CLERK: "audit",
groups.AUDIT_SUPERVISOR: "audit",
groups.CLEARANCE_CLERK: "clearance",
groups.CLEARANCE_SUPERVISOR: "clearance",
groups.CORRECTIONS_CLERK: "corrections",
groups.DATA_ENTRY_1_CLERK: "data-entry",
groups.DATA_ENTRY_2_CLERK: "data-entry",
groups.INTAKE_CLERK: "intake",
groups.INTAKE_SUPERVISOR: "intake",
groups.QUALITY_CONTROL_CLERK: "quality-control",
groups.QUALITY_CONTROL_SUPERVISOR: "quality-control",
groups.SUPER_ADMINISTRATOR: "super-administrator-tallies",
groups.TALLY_MANAGER: "tally-manager",
}
def permission_denied(request):
return render(request, 'errors/403.html')
def not_found(request):
return render(request, 'errors/404.html')
def bad_request(request):
return render(request, 'errors/400.html')
def server_error(request):
return render(request, 'errors/500.html')
def suspicious_error(request):
error_message = request.session.get('error_message')
if error_message:
del request.session['error_message']
return render(request,
'errors/suspicious.html',
{'error_message': error_message})
def get_user_role_url(user):
if user.groups.count():
user_group = user.groups.all()[0]
kwargs = {}
if user_group.name not in [groups.TALLY_MANAGER,
groups.SUPER_ADMINISTRATOR]:
userprofile = UserProfile.objects.get(id=user.id)
if not userprofile.tally:
return reverse('home-no-tally')
kwargs = {'tally_id': userprofile.tally.id}
return reverse(GROUP_URLS.get(user_group.name), kwargs=kwargs)
return None
class HomeView(LoginRequiredMixin, TemplateView):
template_name = "home.html"
def redirect_user_to_role_view(self):
user = self.request.user
redirect_url = get_user_role_url(user)
if redirect_url:
return redirect(redirect_url)
return None
def dispatch(self, request, *args, **kwargs):
self.request = request
redirect_response = self.redirect_user_to_role_view()
if redirect_response:
return redirect_response
return super(HomeView, self).dispatch(request, *args, **kwargs)
class LocaleView(TemplateView):
def get(self, *args, **kwargs):
get_data = self.request.GET
locale = get_data.get('locale')
if locale:
self.request.session['locale'] = locale
self.request.session['django_language'] = locale
next_url = get_data.get('next', 'home')
if not len(next_url) or next_url.startswith('locale'):
next_url = 'home'
return redirect(next_url)
class NoTallyView(LoginRequiredMixin, TemplateView):
template_name = "no_tally_assigned.html"
|
[
"django.shortcuts.render",
"tally_ho.apps.tally.models.UserProfile.objects.get",
"django.shortcuts.redirect",
"django.urls.reverse"
] |
[((918, 952), 'django.shortcuts.render', 'render', (['request', '"""errors/403.html"""'], {}), "(request, 'errors/403.html')\n", (924, 952), False, 'from django.shortcuts import redirect, render\n'), ((990, 1024), 'django.shortcuts.render', 'render', (['request', '"""errors/404.html"""'], {}), "(request, 'errors/404.html')\n", (996, 1024), False, 'from django.shortcuts import redirect, render\n'), ((1064, 1098), 'django.shortcuts.render', 'render', (['request', '"""errors/400.html"""'], {}), "(request, 'errors/400.html')\n", (1070, 1098), False, 'from django.shortcuts import redirect, render\n'), ((1139, 1173), 'django.shortcuts.render', 'render', (['request', '"""errors/500.html"""'], {}), "(request, 'errors/500.html')\n", (1145, 1173), False, 'from django.shortcuts import redirect, render\n'), ((1344, 1419), 'django.shortcuts.render', 'render', (['request', '"""errors/suspicious.html"""', "{'error_message': error_message}"], {}), "(request, 'errors/suspicious.html', {'error_message': error_message})\n", (1350, 1419), False, 'from django.shortcuts import redirect, render\n'), ((3011, 3029), 'django.shortcuts.redirect', 'redirect', (['next_url'], {}), '(next_url)\n', (3019, 3029), False, 'from django.shortcuts import redirect, render\n'), ((1725, 1760), 'tally_ho.apps.tally.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'id': 'user.id'}), '(id=user.id)\n', (1748, 1760), False, 'from tally_ho.apps.tally.models import UserProfile\n'), ((2244, 2266), 'django.shortcuts.redirect', 'redirect', (['redirect_url'], {}), '(redirect_url)\n', (2252, 2266), False, 'from django.shortcuts import redirect, render\n'), ((1822, 1846), 'django.urls.reverse', 'reverse', (['"""home-no-tally"""'], {}), "('home-no-tally')\n", (1829, 1846), False, 'from django.urls import reverse\n')]
|