code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import numpy as np
import random as rand
import matplotlib.pyplot as plt
class component:
def __init__(self,num_node):
self.num_node = num_node
self.parent = [i for i in range(num_node)]
self.weight = [0 for i in range(num_node)]
self.size = [1 for i in range(num_node)]
def find(self,root):
if root==self.parent[root]:
return root
else:
self.parent[root]=self.find(self.parent[root])
return self.parent[root]
def get_size(self,u):
return self.size[u]
def get_dif(self,u,v,k):
return min(self.weight[u]+k/self.size[u],self.weight[v]+k/self.size[v])
def merge(self,u,v,w):
self.parent[v]=u
self.size[u]+=self.size[v]
self.weight[u]=w
self.weight[v]=w
def image(self,shape):
# img = np.zeros(shape)
img = np.ones(shape)
cnt = 0
color_map = {}
c = lambda: [rand.random() , rand.random() , rand.random()]
for i in range(self.num_node):
root = self.find(i)
if root not in color_map.keys():
color_map[root] = c()
cnt+=1
img[i//shape[1],i%shape[1]] = color_map[root]
return img
def show_images(self,src_img):
shape = src_img.shape
m = {}
cnt = 0
for i in range(self.num_node):
root = self.find(i)
if root not in m.keys():
m[root]= set()
cnt+=1
m[root].add((i//shape[1],i%shape[1]))
img = src_img
cnt+=1
row = int(round(cnt/5+0.5))
plt.subplot(row,5, 1)
plt.imshow(img)
index = 1
for key, value in m.items():
index += 1
sub_img = np.ones(img.shape, dtype=int) * 255
for item in value:
sub_img[item[0], item[1]] = img[item[0], item[1]]
plt.subplot(row,5, index)
plt.imshow(sub_img)
plt.show() | segmentation/Component.py | import numpy as np
import random as rand
import matplotlib.pyplot as plt
class component:
def __init__(self,num_node):
self.num_node = num_node
self.parent = [i for i in range(num_node)]
self.weight = [0 for i in range(num_node)]
self.size = [1 for i in range(num_node)]
def find(self,root):
if root==self.parent[root]:
return root
else:
self.parent[root]=self.find(self.parent[root])
return self.parent[root]
def get_size(self,u):
return self.size[u]
def get_dif(self,u,v,k):
return min(self.weight[u]+k/self.size[u],self.weight[v]+k/self.size[v])
def merge(self,u,v,w):
self.parent[v]=u
self.size[u]+=self.size[v]
self.weight[u]=w
self.weight[v]=w
def image(self,shape):
# img = np.zeros(shape)
img = np.ones(shape)
cnt = 0
color_map = {}
c = lambda: [rand.random() , rand.random() , rand.random()]
for i in range(self.num_node):
root = self.find(i)
if root not in color_map.keys():
color_map[root] = c()
cnt+=1
img[i//shape[1],i%shape[1]] = color_map[root]
return img
def show_images(self,src_img):
shape = src_img.shape
m = {}
cnt = 0
for i in range(self.num_node):
root = self.find(i)
if root not in m.keys():
m[root]= set()
cnt+=1
m[root].add((i//shape[1],i%shape[1]))
img = src_img
cnt+=1
row = int(round(cnt/5+0.5))
plt.subplot(row,5, 1)
plt.imshow(img)
index = 1
for key, value in m.items():
index += 1
sub_img = np.ones(img.shape, dtype=int) * 255
for item in value:
sub_img[item[0], item[1]] = img[item[0], item[1]]
plt.subplot(row,5, index)
plt.imshow(sub_img)
plt.show() | 0.101233 | 0.418103 |
from typing import TypeVar, Any
from abc import ABC, abstractmethod
import eagerpy as ep
T = TypeVar("T")
class Criterion(ABC):
"""Abstract base class to implement new criteria."""
@abstractmethod
def __repr__(self) -> str:
...
@abstractmethod
def __call__(self, perturbed: T, outputs: T) -> T:
"""Returns a boolean tensor indicating which perturbed inputs are adversarial.
Args:
perturbed: Tensor with perturbed inputs ``(batch, ...)``.
outputs: Tensor with model outputs for the perturbed inputs ``(batch, ...)``.
Returns:
A boolean tensor indicating which perturbed inputs are adversarial ``(batch,)``.
"""
...
def __and__(self, other: "Criterion") -> "Criterion":
return _And(self, other)
class _And(Criterion):
def __init__(self, a: Criterion, b: Criterion):
super().__init__()
self.a = a
self.b = b
def __repr__(self) -> str:
return f"{self.a!r} & {self.b!r}"
def __call__(self, perturbed: T, outputs: T) -> T:
args, restore_type = ep.astensors_(perturbed, outputs)
a = self.a(*args)
b = self.b(*args)
is_adv = ep.logical_and(a, b)
return restore_type(is_adv)
class Misclassification(Criterion):
"""Considers those perturbed inputs adversarial whose predicted class
differs from the label.
Args:
labels: Tensor with labels of the unperturbed inputs ``(batch,)``.
"""
def __init__(self, labels: Any):
super().__init__()
self.labels: ep.Tensor = ep.astensor(labels)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.labels!r})"
def __call__(self, perturbed: T, outputs: T) -> T:
outputs_, restore_type = ep.astensor_(outputs)
del perturbed, outputs
classes = outputs_.argmax(axis=-1)
assert classes.shape == self.labels.shape
is_adv = classes != self.labels
return restore_type(is_adv)
class TargetedMisclassification(Criterion):
"""Considers those perturbed inputs adversarial whose predicted class
matches the target class.
Args:
target_classes: Tensor with target classes ``(batch,)``.
"""
def __init__(self, target_classes: Any):
super().__init__()
self.target_classes: ep.Tensor = ep.astensor(target_classes)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.target_classes!r})"
def __call__(self, perturbed: T, outputs: T) -> T:
outputs_, restore_type = ep.astensor_(outputs)
del perturbed, outputs
classes = outputs_.argmax(axis=-1)
assert classes.shape == self.target_classes.shape
is_adv = classes == self.target_classes
return restore_type(is_adv) | foolbox/criteria.py | from typing import TypeVar, Any
from abc import ABC, abstractmethod
import eagerpy as ep
T = TypeVar("T")
class Criterion(ABC):
"""Abstract base class to implement new criteria."""
@abstractmethod
def __repr__(self) -> str:
...
@abstractmethod
def __call__(self, perturbed: T, outputs: T) -> T:
"""Returns a boolean tensor indicating which perturbed inputs are adversarial.
Args:
perturbed: Tensor with perturbed inputs ``(batch, ...)``.
outputs: Tensor with model outputs for the perturbed inputs ``(batch, ...)``.
Returns:
A boolean tensor indicating which perturbed inputs are adversarial ``(batch,)``.
"""
...
def __and__(self, other: "Criterion") -> "Criterion":
return _And(self, other)
class _And(Criterion):
def __init__(self, a: Criterion, b: Criterion):
super().__init__()
self.a = a
self.b = b
def __repr__(self) -> str:
return f"{self.a!r} & {self.b!r}"
def __call__(self, perturbed: T, outputs: T) -> T:
args, restore_type = ep.astensors_(perturbed, outputs)
a = self.a(*args)
b = self.b(*args)
is_adv = ep.logical_and(a, b)
return restore_type(is_adv)
class Misclassification(Criterion):
"""Considers those perturbed inputs adversarial whose predicted class
differs from the label.
Args:
labels: Tensor with labels of the unperturbed inputs ``(batch,)``.
"""
def __init__(self, labels: Any):
super().__init__()
self.labels: ep.Tensor = ep.astensor(labels)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.labels!r})"
def __call__(self, perturbed: T, outputs: T) -> T:
outputs_, restore_type = ep.astensor_(outputs)
del perturbed, outputs
classes = outputs_.argmax(axis=-1)
assert classes.shape == self.labels.shape
is_adv = classes != self.labels
return restore_type(is_adv)
class TargetedMisclassification(Criterion):
"""Considers those perturbed inputs adversarial whose predicted class
matches the target class.
Args:
target_classes: Tensor with target classes ``(batch,)``.
"""
def __init__(self, target_classes: Any):
super().__init__()
self.target_classes: ep.Tensor = ep.astensor(target_classes)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.target_classes!r})"
def __call__(self, perturbed: T, outputs: T) -> T:
outputs_, restore_type = ep.astensor_(outputs)
del perturbed, outputs
classes = outputs_.argmax(axis=-1)
assert classes.shape == self.target_classes.shape
is_adv = classes == self.target_classes
return restore_type(is_adv) | 0.963618 | 0.40869 |
from __future__ import unicode_literals
from binascii import crc32
from wxpy.utils import start_new_thread
emojis = \
'😀😁😂🤣😃😄😅😆😉😊😋😎😍😘😗😙😚🙂🤗🤔😐😑😶🙄😏😣😥😮🤐😯' \
'😪😫😴😌🤓😛😜😝🤤😒😓😔😕🙃🤑😲😇🤠🤡🤥😺😸😹😻😼😽🙀😿😾🙈' \
'🙉🙊🌱🌲🌳🌴🌵🌾🌿🍀🍁🍂🍃🍇🍈🍉🍊🍋🍌🍍🍏🍐🍑🍒🍓🥝🍅🥑🍆🥔' \
'🥕🌽🥒🍄🥜🌰🍞🥐🥖🥞🧀🍖🍗🥓🍔🍟🍕🌭🌮🌯🥙🥚🍳🥘🍲🥗🍿🍱🍘🍙' \
'🍚🍛🍜🍝🍠🍢🍣🍤🍥🍡🍦🍧🍨🍩🍪🎂🍰🍫🍬🍭🍮🍯🍼🥛☕🍵🍶🍾🍷🍸' \
'🍹🍺🍻🥂🥃🍴🥄🔪🏺🌍🌎🌏🌐🗾🌋🗻🏠🏡🏢🏣🏤🏥🏦🏨🏩🏪🏫🏬🏭🏯' \
'🏰💒🗼🗽⛪🕌🕍🕋⛲⛺🌁🌃🌄🌅🌆🌇🌉🌌🎠🎡🎢💈🎪🎭🎨🎰🚂🚃🚄🚅' \
'🚆🚇🚈🚉🚊🚝🚞🚋🚌🚍🚎🚐🚑🚒🚓🚔🚕🚖🚗🚘🚙🚚🚛🚜🚲🛴🛵🚏⛽🚨' \
'🚥🚦🚧⚓⛵🛶🚤🚢🛫🛬💺🚁🚟🚠🚡🚀🚪🛌🚽🚿🛀🛁⌛⏳⌚⏰🌑🌒🌓🌔' \
'🌕🌖🌗🌘🌙🌚🌛🌜🌝🌞⭐🌟🌠⛅🌀🌈🌂☔⚡⛄🔥💧🌊🎃🎄🎆🎇✨🎈🎉' \
'🎊🎋🎍🎎🎏🎐🎑🎁🎫🏆🏅🥇🥈🥉⚽⚾🏀🏐🏈🏉🎾🎱🎳🏏🏑🏒🏓🏸🥊🥋' \
'🥅🎯⛳🎣🎽🎿🎮🎲🃏🎴🔇🔈🔉🔊📢📣📯🔔🔕🎼🎵🎶🎤🎧📻🎷🎸🎹🎺🎻' \
'🥁📱📲📞📟📠🔋🔌💻💽💾💿📀🎥🎬📺📷📸📹📼🔍🔎🔬🔭📡💡🔦📔📕📖' \
'📗📘📙📚📓📒📃📜📄📰📑🔖💰💴💵💶💷💸💳💱💲📧📨📩📤📥📦📫📪📬' \
'📭📮📝💼📁📂📅📆📇📋📌📍📎📏📐🔒🔓🔏🔐🔑🔨🔫🏹🔧🔩🔗🚬🗿🔮🛒'
def assign_emoji(chat):
n = crc32(str(chat.wxid or chat.nickname).encode()) & 0xffffffff
return emojis[n % len(emojis)]
def forward_prefix(user):
# represent for avatar
avatar_repr = assign_emoji(user)
return '{} · {}'.format(avatar_repr, user.name)
def sync_message_in_groups(
msg, groups, prefix=None, suffix=None,
raise_for_unsupported=False, run_async=True
):
"""
将消息同步到多个微信群中
支持以下消息类型
* 文本 (`TEXT`)
* 视频(`VIDEO`)
* 文件 (`ATTACHMENT`)
* 图片/自定义表情 (`PICTURE`)
* 但不支持表情商店中的表情
* 名片 (`CARD`)
* 仅支持公众号名片,以及自己发出的个人号名片
* 分享 (`SHARING`)
* 会被转化为 `标题 + 链接` 形式的纯文本
* 语音 (`RECORDING`)
* 会以文件方式发送
* 地图 (`MAP`)
* 会转化为 `位置名称 + 地图链接` 形式的文本消息
:param Message msg: 需同步的消息对象
:param Group groups: 需同步的群列表
:param str prefix:
* 转发时的 **前缀** 文本,原消息为文本时会自动换行
* 若不设定,则使用默认前缀作为提示
:param str suffix:
* 转发时的 **后缀** 文本,原消息为文本时会自动换行
* 默认为空
:param bool raise_for_unsupported:
| 为 True 时,将为不支持的消息类型抛出 `NotImplementedError` 异常
:param bool run_async: 是否异步执行,为 True 时不阻塞线程
::
my_groups = [group1, group2, group3 ...]
@bot.register(my_groups, except_self=False)
def sync_my_groups(msg):
sync_message_in_groups(msg, my_groups)
"""
def process():
for group in groups:
if group == msg.chat:
continue
msg.forward(
chat=group, prefix=prefix, suffix=suffix,
raise_for_unsupported=raise_for_unsupported
)
if not prefix:
prefix = forward_prefix(msg.member)
if run_async:
start_new_thread(process, use_caller_name=True)
else:
process() | wxpy/ext/sync_message_in_groups.py | from __future__ import unicode_literals
from binascii import crc32
from wxpy.utils import start_new_thread
emojis = \
'😀😁😂🤣😃😄😅😆😉😊😋😎😍😘😗😙😚🙂🤗🤔😐😑😶🙄😏😣😥😮🤐😯' \
'😪😫😴😌🤓😛😜😝🤤😒😓😔😕🙃🤑😲😇🤠🤡🤥😺😸😹😻😼😽🙀😿😾🙈' \
'🙉🙊🌱🌲🌳🌴🌵🌾🌿🍀🍁🍂🍃🍇🍈🍉🍊🍋🍌🍍🍏🍐🍑🍒🍓🥝🍅🥑🍆🥔' \
'🥕🌽🥒🍄🥜🌰🍞🥐🥖🥞🧀🍖🍗🥓🍔🍟🍕🌭🌮🌯🥙🥚🍳🥘🍲🥗🍿🍱🍘🍙' \
'🍚🍛🍜🍝🍠🍢🍣🍤🍥🍡🍦🍧🍨🍩🍪🎂🍰🍫🍬🍭🍮🍯🍼🥛☕🍵🍶🍾🍷🍸' \
'🍹🍺🍻🥂🥃🍴🥄🔪🏺🌍🌎🌏🌐🗾🌋🗻🏠🏡🏢🏣🏤🏥🏦🏨🏩🏪🏫🏬🏭🏯' \
'🏰💒🗼🗽⛪🕌🕍🕋⛲⛺🌁🌃🌄🌅🌆🌇🌉🌌🎠🎡🎢💈🎪🎭🎨🎰🚂🚃🚄🚅' \
'🚆🚇🚈🚉🚊🚝🚞🚋🚌🚍🚎🚐🚑🚒🚓🚔🚕🚖🚗🚘🚙🚚🚛🚜🚲🛴🛵🚏⛽🚨' \
'🚥🚦🚧⚓⛵🛶🚤🚢🛫🛬💺🚁🚟🚠🚡🚀🚪🛌🚽🚿🛀🛁⌛⏳⌚⏰🌑🌒🌓🌔' \
'🌕🌖🌗🌘🌙🌚🌛🌜🌝🌞⭐🌟🌠⛅🌀🌈🌂☔⚡⛄🔥💧🌊🎃🎄🎆🎇✨🎈🎉' \
'🎊🎋🎍🎎🎏🎐🎑🎁🎫🏆🏅🥇🥈🥉⚽⚾🏀🏐🏈🏉🎾🎱🎳🏏🏑🏒🏓🏸🥊🥋' \
'🥅🎯⛳🎣🎽🎿🎮🎲🃏🎴🔇🔈🔉🔊📢📣📯🔔🔕🎼🎵🎶🎤🎧📻🎷🎸🎹🎺🎻' \
'🥁📱📲📞📟📠🔋🔌💻💽💾💿📀🎥🎬📺📷📸📹📼🔍🔎🔬🔭📡💡🔦📔📕📖' \
'📗📘📙📚📓📒📃📜📄📰📑🔖💰💴💵💶💷💸💳💱💲📧📨📩📤📥📦📫📪📬' \
'📭📮📝💼📁📂📅📆📇📋📌📍📎📏📐🔒🔓🔏🔐🔑🔨🔫🏹🔧🔩🔗🚬🗿🔮🛒'
def assign_emoji(chat):
n = crc32(str(chat.wxid or chat.nickname).encode()) & 0xffffffff
return emojis[n % len(emojis)]
def forward_prefix(user):
# represent for avatar
avatar_repr = assign_emoji(user)
return '{} · {}'.format(avatar_repr, user.name)
def sync_message_in_groups(
msg, groups, prefix=None, suffix=None,
raise_for_unsupported=False, run_async=True
):
"""
将消息同步到多个微信群中
支持以下消息类型
* 文本 (`TEXT`)
* 视频(`VIDEO`)
* 文件 (`ATTACHMENT`)
* 图片/自定义表情 (`PICTURE`)
* 但不支持表情商店中的表情
* 名片 (`CARD`)
* 仅支持公众号名片,以及自己发出的个人号名片
* 分享 (`SHARING`)
* 会被转化为 `标题 + 链接` 形式的纯文本
* 语音 (`RECORDING`)
* 会以文件方式发送
* 地图 (`MAP`)
* 会转化为 `位置名称 + 地图链接` 形式的文本消息
:param Message msg: 需同步的消息对象
:param Group groups: 需同步的群列表
:param str prefix:
* 转发时的 **前缀** 文本,原消息为文本时会自动换行
* 若不设定,则使用默认前缀作为提示
:param str suffix:
* 转发时的 **后缀** 文本,原消息为文本时会自动换行
* 默认为空
:param bool raise_for_unsupported:
| 为 True 时,将为不支持的消息类型抛出 `NotImplementedError` 异常
:param bool run_async: 是否异步执行,为 True 时不阻塞线程
::
my_groups = [group1, group2, group3 ...]
@bot.register(my_groups, except_self=False)
def sync_my_groups(msg):
sync_message_in_groups(msg, my_groups)
"""
def process():
for group in groups:
if group == msg.chat:
continue
msg.forward(
chat=group, prefix=prefix, suffix=suffix,
raise_for_unsupported=raise_for_unsupported
)
if not prefix:
prefix = forward_prefix(msg.member)
if run_async:
start_new_thread(process, use_caller_name=True)
else:
process() | 0.407687 | 0.363675 |
import nextcord as discord
from nextcord.ext import tasks, commands
from nextcord.utils import utcnow
from datetime import timedelta
import sys
import random
from random import choice
import asyncio
import time
import datetime
import pymongo
sys.path.append("..")
from functions import functions as funs
import config
client = funs.mongo_c()
db = client.bot
servers = db.servers
settings = db.settings
class mod(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(usage = '(@member) [reason]', description = 'Забанить пользователя.', help = 'Модерация', aliases = ['бан'])
async def ban(self, ctx, member: discord.Member, *, arg="Причина не указана"):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
if ctx.author.guild_permissions.ban_members == True:
await member.send(f'Вы были забанены на сервере `{ctx.guild.name}` по причине: `{arg}`\nСоздатель сервера: `{ctx.guild.owner}`')
ban = f"{ctx.author}({ctx.author.id}) - {arg}"
await member.ban(reason=ban)
msg = [
"Забаненый уже никогда не будет тем, кто был раньше...",
"Бан это и плохо и хорошо, смотря с какой стороны смотреть...",
"Тот ли человек после бана кем он был раньше?",
]
server = servers.find_one({"server": ctx.guild.id})
await ctx.send(embed = discord.Embed(color=server['embed_color']).add_field(
name="Бан",
value=f"Забанен: {member.mention}\n"
f"Забанил: {ctx.author.mention}\n"
f"Причина: {arg}\n"
f"Банов: {int(len(await ctx.guild.bans()))-1} +1"
).set_thumbnail(
url= "https://ia.wampi.ru/2020/08/09/1452967606_anime-sword-art-online-lisbeth-anime-gifki-2775271.gif").set_footer(
icon_url=ctx.author.avatar.url,
text=random.choice(msg)))
@commands.command(usage = '(member_id)', description = 'Разбанить пользователя на сервере.', help = 'Модерация', aliases = ['разбанить'])
async def unban(self, ctx, member_id:int):
user = await self.bot.fetch_user(member_id)
await ctx.guild.unban(user)
await user.send(f'Вы были разбанены на сервере `{ctx.guild.name}`\nСоздатель сервера: `{ctx.guild.owner}`')
await ctx.send(f"Пользователь {user} был разбанен.")
@commands.command(usage = '(@member) [reason]', description = 'Кикнуть пользователя.', help = 'Модерация', aliases = ['кик'])
async def kick(self, ctx, member: discord.Member, arg="Причина не указана"):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
if ctx.author.guild_permissions.kick_members == True:
await member.send(f'Вы были выгнаны с сервере `{ctx.guild.name}` по причине: `{arg}`\nСоздатель сервера: `{ctx.guild.owner}`')
k = f"{ctx.author}({ctx.author.id}) - {arg}"
await member.kick(reason=k)
server = servers.find_one({"server": ctx.guild.id})
await ctx.send(embed = discord.Embed(color=server['embed_color']).add_field(
name="Кик",
value=f"Кикнут: {member.mention}\n"
f"Кикнул: {ctx.author.mention}\n"
f"Причина: {arg}\n"
).set_thumbnail(
url= "https://pa1.narvii.com/6392/9b4dd5ba812d32198cbd5465e0d10b46153c2208_hq.gif"))
@commands.command(usage = '-', description = 'Задержка бота.', help = 'Бот', aliases = ['пинг'])
async def ping(self, ctx):
ping = self.bot.latency
ping_emoji = "🟩🔳🔳🔳🔳"
ping_list = [
{"ping": 0.10000000000000000, "emoji": "🟧🟩🔳🔳🔳"},
{"ping": 0.15000000000000000, "emoji": "🟥🟧🟩🔳🔳"},
{"ping": 0.20000000000000000, "emoji": "🟥🟥🟧🟩🔳"},
{"ping": 0.25000000000000000, "emoji": "🟥🟥🟥🟧🟩"},
{"ping": 0.30000000000000000, "emoji": "🟥🟥🟥🟥🟧"},
{"ping": 0.35000000000000000, "emoji": "🟥🟥🟥🟥🟥"}]
for ping_one in ping_list:
if ping > ping_one["ping"]:
ping_emoji = ping_one["emoji"]
message = await ctx.send("Пожалуйста, подождите. . .")
await message.edit(content = f"Понг! {ping_emoji} `{ping * 1000:.0f}ms` :ping_pong:")
@commands.command(usage = '(@member) (time) [reason]', description = 'Замьютить пользователя на сервере.', help = 'Мьюты', aliases = ['мьют'])
async def mute(self, ctx, member: discord.Member = None, timem = None, *, reason = None):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
server = servers.find_one({"server": ctx.guild.id})
if member is None:
await ctx.send("Вы не указали пользователя!")
elif timem is None:
await ctx.send("Вы не указали время!\nФормат: 10m (s/m/h/d/w)")
else:
if server['mod']['muterole'] is not None:
role = ctx.guild.get_role(server['mod']['muterole']) #id роли
if role != None:
await member.add_roles(role)
if reason == None:
reason = 'Не указана'
try:
ttime = int(timem[:-1])
except:
await ctx.send(f"Укажите число!")
return
if member.id == ctx.guild.owner.id:
return
if timem.endswith("s"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime)
embs = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\nПричина: {reason}", color=server['embed_color'])
await ctx.send(embed = embs)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime))
except:
pass
elif timem.endswith("m"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime*60 })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime*60)
embm = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\n\n**Причина: {reason}**", color=server['embed_color'])
await ctx.send(embed = embm)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime*60))
except:
pass
elif timem.endswith("h"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime*3600 })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime*3600)
embh = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\n\n**Причина: {reason}**", color=server['embed_color'])
await ctx.send(embed = embh)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime*3600))
except:
pass
elif timem.endswith("d"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime*86400 })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime*86400)
embd = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\n\n**Причина: {reason}**", color=server['embed_color'])
await ctx.send(embed = embd)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime*86400))
except:
pass
elif timem.endswith("w"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime*604800 })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime*604800)
embd = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\n\n**Причина: {reason}**", color=server['embed_color'])
await ctx.send(embed = embd)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime*604800))
except:
pass
else:
await ctx.send('Ошибка указания времени.')
return
@commands.command(usage = '(@member) [reason]', description = 'Выдать варн пользователю.', help = 'Варны', aliases = ['варн'])
async def warn(self, ctx, user:discord.Member, *,reason = None):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
await funs.warn(ctx, user, reason, ctx.author)
@commands.command(usage = '(@member)', description = 'Просмотреть варны пользователя.', help = 'Варны', aliases = ['варны'])
async def warns(self, ctx, user:discord.Member = None):
if user == None:
user = ctx.author
server = servers.find_one({'server':ctx.guild.id})
text = ""
if user == None:
await ctx.send("Вы не указали пользователя")
return
try:
print(server['mod']['warns'][str(user.id)].items())
except Exception:
embd = discord.Embed(title = f"Варны: {user.name}", description = f"Варны отсутсвуют", color=server['embed_color'])
await ctx.send(embed = embd)
return
else:
for i in server['mod']['warns'][str(user.id)].items():
if i[1]['reason'] == None:
reason = "Не указано"
else:
reason = i[1]['reason']
text = text + f"**#{i[0]}** **{i[1]['time']}: **{reason}\nВыдал: <@{i[1]['author']}>\n\n"
embd = discord.Embed(title = f"Варны: {user.name}", description = f"{text}", color=server['embed_color'])
await ctx.send(embed = embd)
@commands.command(usage = '(@member) [warn_id]', description = 'Снять варн с пользователя.', help = 'Варны', aliases = ['разварнить'])
async def unwarn(self, ctx, member:discord.Member, num:int = 1):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
server = servers.find_one({'server':ctx.guild.id})
try:
server['mod']['warns'][str(member.id)]
except Exception:
await ctx.send("У этого пользователя нету такого варна.")
return
m = server['mod']
w = m['warns']
w.copy()
try:
w[str(member.id)].pop(str(num))
servers.update_one({"server": ctx.guild.id}, {"$set": {'mod': m}})
except Exception:
await ctx.send(f"У данного пользователя нет варна #{num}")
return
embd = discord.Embed(title = "Сброс", description = f"Варн #{num}, пользователя {member.mention} был сброшен", color=server['embed_color'])
await ctx.send(embed = embd)
@commands.command(usage = '(@member)', description = 'Размьютить пользователя.', help = 'Мьюты', aliases = ['размьютить'])
async def unmute(self, ctx, member:discord.Member):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
server = servers.find_one({'server':ctx.guild.id})
try:
server['mute_members'][str(member.id)]
except Exception:
await ctx.send("Этот пользователь не в мьюте.")
return
a = server['mute_members'].copy()
a.pop(str(member.id))
servers.update_one({'server':server['server']},{'$set': {'mute_members':a}})
try:
await self.bot.get_guild(ctx.guild.id).get_member(member.id).remove_roles(self.bot.get_guild(ctx.guild.id).get_role(server['mod']['muterole']))
except Exception:
await ctx.send("У бота не достаточно прав на снятие или роль мьюта сброшена")
return
embd = discord.Embed(title = "Сброс", description = f"Мьют с пользователя {member.mention} был снят.", color=server['embed_color'])
await ctx.send(embed = embd)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = 0))
except:
pass
@commands.command(usage = '-', description = 'Просмотреть всех замьюченых пользователей.', help = 'Мьюты', aliases = ['мьюты'])
async def mutes(self,ctx):
server = servers.find_one({'server':ctx.guild.id})
text = ''
for memid in server['mute_members']:
try:
member = ctx.guild.get_member(int(memid))
text = text + f"{member.mention}, осталось: {funs.time_end(server['mute_members'][str(memid)]-time.time())}\n"
except Exception:
a = server['mute_members'].copy()
a.pop(memid)
servers.update_one({'server':server['server']},{'$set':{'mute_members':a}})
await ctx.send(embed = discord.Embed(title="Мьюты", description=text, color=server['embed_color']))
@commands.command(usage = '(number max100)', description = 'Очистить чат.', help = 'Модерация', aliases = ['очистить'])
async def clear(self, ctx, number:int):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
if number <= 100:
deleted = await ctx.channel.purge(limit=number)
message = await ctx.send('Удалено {} сообщений(я)'.format(len(deleted)))
await asyncio.sleep(3)
await message.delete()
else:
await ctx.send('Не возможно удалить более 100-та сообщений за раз!')
@commands.command(hidden=True)
async def global_warn(self,ctx, id:int, *, reason:str = "None"):
s = settings.find_one({"sid": 1})
if ctx.author.id not in s['moderators']:
await ctx.send("У вас нет прав модератора бота!")
return
try:
s['bl global chat'][str(id)]
nw = len(s['bl global chat'][str(id)].keys())
if nw < 3:
s['bl global chat'][str(id)].update({str(nw+1):{'reason':reason,"time":time.time() + 2628000}})
settings.update_one({"sid": 1},{'$set': {'bl global chat':s['bl global chat']}})
await ctx.send(f"Пользователь c id `{id}` получил варн #{nw+1}")
else:
s['bl global chat'][str(id)].update({'ban':f'{reason} | auto ban due to 3 warns'})
settings.update_one({"sid": 1},{'$set': {'bl global chat':s['bl global chat']}})
await ctx.send(f"Пользователь c id `{id}` был автоматически забанен за х3 предупреждения")
except Exception:
s['bl global chat'].update({str(id):{'1':{'reason':reason,"time":time.time() + 2628000}}})
settings.update_one({"sid": 1},{'$set': {'bl global chat':s['bl global chat']}})
await ctx.send(f"Пользователь c id `{id}` получил варн #1")
@commands.command(hidden=True)
async def global_ban(self,ctx, id:int, *, reason:str = "None"):
s = settings.find_one({"sid": 1})
if ctx.author.id not in s['moderators']:
await ctx.send("У вас нет прав модератора бота!")
return
try:
s['bl global chat'][str(id)].update({'ban':f'ban: {reason}'})
except Exception:
s['bl global chat'].update({str(id):{} })
s['bl global chat'][str(id)].update({'ban':f'ban: {reason}'})
settings.update_one({"sid": 1},{'$set': {'bl global chat':s['bl global chat']}})
await ctx.send(f"Пользователь c id `{id}` был забанен в межсерверном чате.")
@commands.command(usage = '[#channel]', description = 'Очистить голос. канал\каналы от пользователей.', help = 'Модерация', aliases = ['очистить_войс'])
async def voice_clean(self, ctx, channel:discord.VoiceChannel = None):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
if channel != None:
for i in channel.members:
await i.move_to(channel=None)
await ctx.send("Голосовой канал был очищен от пользователей!")
else:
ch = []
for i in ctx.guild.channels:
if type(i) == discord.channel.VoiceChannel:
if len(i.members) > 0:
ch.append(i)
for c in ch:
for i in c.members:
await i.move_to(channel=None)
await ctx.send("Голосовой канал был очищен от пользователей!")
def setup(bot):
bot.add_cog(mod(bot)) | Cog/moderation.py | import nextcord as discord
from nextcord.ext import tasks, commands
from nextcord.utils import utcnow
from datetime import timedelta
import sys
import random
from random import choice
import asyncio
import time
import datetime
import pymongo
sys.path.append("..")
from functions import functions as funs
import config
client = funs.mongo_c()
db = client.bot
servers = db.servers
settings = db.settings
class mod(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(usage = '(@member) [reason]', description = 'Забанить пользователя.', help = 'Модерация', aliases = ['бан'])
async def ban(self, ctx, member: discord.Member, *, arg="Причина не указана"):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
if ctx.author.guild_permissions.ban_members == True:
await member.send(f'Вы были забанены на сервере `{ctx.guild.name}` по причине: `{arg}`\nСоздатель сервера: `{ctx.guild.owner}`')
ban = f"{ctx.author}({ctx.author.id}) - {arg}"
await member.ban(reason=ban)
msg = [
"Забаненый уже никогда не будет тем, кто был раньше...",
"Бан это и плохо и хорошо, смотря с какой стороны смотреть...",
"Тот ли человек после бана кем он был раньше?",
]
server = servers.find_one({"server": ctx.guild.id})
await ctx.send(embed = discord.Embed(color=server['embed_color']).add_field(
name="Бан",
value=f"Забанен: {member.mention}\n"
f"Забанил: {ctx.author.mention}\n"
f"Причина: {arg}\n"
f"Банов: {int(len(await ctx.guild.bans()))-1} +1"
).set_thumbnail(
url= "https://ia.wampi.ru/2020/08/09/1452967606_anime-sword-art-online-lisbeth-anime-gifki-2775271.gif").set_footer(
icon_url=ctx.author.avatar.url,
text=random.choice(msg)))
@commands.command(usage = '(member_id)', description = 'Разбанить пользователя на сервере.', help = 'Модерация', aliases = ['разбанить'])
async def unban(self, ctx, member_id:int):
user = await self.bot.fetch_user(member_id)
await ctx.guild.unban(user)
await user.send(f'Вы были разбанены на сервере `{ctx.guild.name}`\nСоздатель сервера: `{ctx.guild.owner}`')
await ctx.send(f"Пользователь {user} был разбанен.")
@commands.command(usage = '(@member) [reason]', description = 'Кикнуть пользователя.', help = 'Модерация', aliases = ['кик'])
async def kick(self, ctx, member: discord.Member, arg="Причина не указана"):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
if ctx.author.guild_permissions.kick_members == True:
await member.send(f'Вы были выгнаны с сервере `{ctx.guild.name}` по причине: `{arg}`\nСоздатель сервера: `{ctx.guild.owner}`')
k = f"{ctx.author}({ctx.author.id}) - {arg}"
await member.kick(reason=k)
server = servers.find_one({"server": ctx.guild.id})
await ctx.send(embed = discord.Embed(color=server['embed_color']).add_field(
name="Кик",
value=f"Кикнут: {member.mention}\n"
f"Кикнул: {ctx.author.mention}\n"
f"Причина: {arg}\n"
).set_thumbnail(
url= "https://pa1.narvii.com/6392/9b4dd5ba812d32198cbd5465e0d10b46153c2208_hq.gif"))
@commands.command(usage = '-', description = 'Задержка бота.', help = 'Бот', aliases = ['пинг'])
async def ping(self, ctx):
ping = self.bot.latency
ping_emoji = "🟩🔳🔳🔳🔳"
ping_list = [
{"ping": 0.10000000000000000, "emoji": "🟧🟩🔳🔳🔳"},
{"ping": 0.15000000000000000, "emoji": "🟥🟧🟩🔳🔳"},
{"ping": 0.20000000000000000, "emoji": "🟥🟥🟧🟩🔳"},
{"ping": 0.25000000000000000, "emoji": "🟥🟥🟥🟧🟩"},
{"ping": 0.30000000000000000, "emoji": "🟥🟥🟥🟥🟧"},
{"ping": 0.35000000000000000, "emoji": "🟥🟥🟥🟥🟥"}]
for ping_one in ping_list:
if ping > ping_one["ping"]:
ping_emoji = ping_one["emoji"]
message = await ctx.send("Пожалуйста, подождите. . .")
await message.edit(content = f"Понг! {ping_emoji} `{ping * 1000:.0f}ms` :ping_pong:")
@commands.command(usage = '(@member) (time) [reason]', description = 'Замьютить пользователя на сервере.', help = 'Мьюты', aliases = ['мьют'])
async def mute(self, ctx, member: discord.Member = None, timem = None, *, reason = None):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
server = servers.find_one({"server": ctx.guild.id})
if member is None:
await ctx.send("Вы не указали пользователя!")
elif timem is None:
await ctx.send("Вы не указали время!\nФормат: 10m (s/m/h/d/w)")
else:
if server['mod']['muterole'] is not None:
role = ctx.guild.get_role(server['mod']['muterole']) #id роли
if role != None:
await member.add_roles(role)
if reason == None:
reason = 'Не указана'
try:
ttime = int(timem[:-1])
except:
await ctx.send(f"Укажите число!")
return
if member.id == ctx.guild.owner.id:
return
if timem.endswith("s"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime)
embs = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\nПричина: {reason}", color=server['embed_color'])
await ctx.send(embed = embs)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime))
except:
pass
elif timem.endswith("m"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime*60 })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime*60)
embm = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\n\n**Причина: {reason}**", color=server['embed_color'])
await ctx.send(embed = embm)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime*60))
except:
pass
elif timem.endswith("h"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime*3600 })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime*3600)
embh = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\n\n**Причина: {reason}**", color=server['embed_color'])
await ctx.send(embed = embh)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime*3600))
except:
pass
elif timem.endswith("d"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime*86400 })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime*86400)
embd = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\n\n**Причина: {reason}**", color=server['embed_color'])
await ctx.send(embed = embd)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime*86400))
except:
pass
elif timem.endswith("w"):
a = server['mute_members'].copy()
a.update({str(member.id): time.time() + ttime*604800 })
servers.update_one({"server": ctx.guild.id}, {"$set": {"mute_members": a}})
times = funs.time_end(ttime*604800)
embd = discord.Embed(title = "Мьют", description = f"Пользователю {member.name}, был выдан мьют на {times}\n\n**Причина: {reason}**", color=server['embed_color'])
await ctx.send(embed = embd)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = ttime*604800))
except:
pass
else:
await ctx.send('Ошибка указания времени.')
return
@commands.command(usage = '(@member) [reason]', description = 'Выдать варн пользователю.', help = 'Варны', aliases = ['варн'])
async def warn(self, ctx, user:discord.Member, *,reason = None):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
await funs.warn(ctx, user, reason, ctx.author)
@commands.command(usage = '(@member)', description = 'Просмотреть варны пользователя.', help = 'Варны', aliases = ['варны'])
async def warns(self, ctx, user:discord.Member = None):
if user == None:
user = ctx.author
server = servers.find_one({'server':ctx.guild.id})
text = ""
if user == None:
await ctx.send("Вы не указали пользователя")
return
try:
print(server['mod']['warns'][str(user.id)].items())
except Exception:
embd = discord.Embed(title = f"Варны: {user.name}", description = f"Варны отсутсвуют", color=server['embed_color'])
await ctx.send(embed = embd)
return
else:
for i in server['mod']['warns'][str(user.id)].items():
if i[1]['reason'] == None:
reason = "Не указано"
else:
reason = i[1]['reason']
text = text + f"**#{i[0]}** **{i[1]['time']}: **{reason}\nВыдал: <@{i[1]['author']}>\n\n"
embd = discord.Embed(title = f"Варны: {user.name}", description = f"{text}", color=server['embed_color'])
await ctx.send(embed = embd)
@commands.command(usage = '(@member) [warn_id]', description = 'Снять варн с пользователя.', help = 'Варны', aliases = ['разварнить'])
async def unwarn(self, ctx, member:discord.Member, num:int = 1):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
server = servers.find_one({'server':ctx.guild.id})
try:
server['mod']['warns'][str(member.id)]
except Exception:
await ctx.send("У этого пользователя нету такого варна.")
return
m = server['mod']
w = m['warns']
w.copy()
try:
w[str(member.id)].pop(str(num))
servers.update_one({"server": ctx.guild.id}, {"$set": {'mod': m}})
except Exception:
await ctx.send(f"У данного пользователя нет варна #{num}")
return
embd = discord.Embed(title = "Сброс", description = f"Варн #{num}, пользователя {member.mention} был сброшен", color=server['embed_color'])
await ctx.send(embed = embd)
@commands.command(usage = '(@member)', description = 'Размьютить пользователя.', help = 'Мьюты', aliases = ['размьютить'])
async def unmute(self, ctx, member:discord.Member):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
server = servers.find_one({'server':ctx.guild.id})
try:
server['mute_members'][str(member.id)]
except Exception:
await ctx.send("Этот пользователь не в мьюте.")
return
a = server['mute_members'].copy()
a.pop(str(member.id))
servers.update_one({'server':server['server']},{'$set': {'mute_members':a}})
try:
await self.bot.get_guild(ctx.guild.id).get_member(member.id).remove_roles(self.bot.get_guild(ctx.guild.id).get_role(server['mod']['muterole']))
except Exception:
await ctx.send("У бота не достаточно прав на снятие или роль мьюта сброшена")
return
embd = discord.Embed(title = "Сброс", description = f"Мьют с пользователя {member.mention} был снят.", color=server['embed_color'])
await ctx.send(embed = embd)
try:
await member.edit(timeout=utcnow() + timedelta(seconds = 0))
except:
pass
@commands.command(usage = '-', description = 'Просмотреть всех замьюченых пользователей.', help = 'Мьюты', aliases = ['мьюты'])
async def mutes(self,ctx):
server = servers.find_one({'server':ctx.guild.id})
text = ''
for memid in server['mute_members']:
try:
member = ctx.guild.get_member(int(memid))
text = text + f"{member.mention}, осталось: {funs.time_end(server['mute_members'][str(memid)]-time.time())}\n"
except Exception:
a = server['mute_members'].copy()
a.pop(memid)
servers.update_one({'server':server['server']},{'$set':{'mute_members':a}})
await ctx.send(embed = discord.Embed(title="Мьюты", description=text, color=server['embed_color']))
@commands.command(usage = '(number max100)', description = 'Очистить чат.', help = 'Модерация', aliases = ['очистить'])
async def clear(self, ctx, number:int):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
if number <= 100:
deleted = await ctx.channel.purge(limit=number)
message = await ctx.send('Удалено {} сообщений(я)'.format(len(deleted)))
await asyncio.sleep(3)
await message.delete()
else:
await ctx.send('Не возможно удалить более 100-та сообщений за раз!')
@commands.command(hidden=True)
async def global_warn(self,ctx, id:int, *, reason:str = "None"):
s = settings.find_one({"sid": 1})
if ctx.author.id not in s['moderators']:
await ctx.send("У вас нет прав модератора бота!")
return
try:
s['bl global chat'][str(id)]
nw = len(s['bl global chat'][str(id)].keys())
if nw < 3:
s['bl global chat'][str(id)].update({str(nw+1):{'reason':reason,"time":time.time() + 2628000}})
settings.update_one({"sid": 1},{'$set': {'bl global chat':s['bl global chat']}})
await ctx.send(f"Пользователь c id `{id}` получил варн #{nw+1}")
else:
s['bl global chat'][str(id)].update({'ban':f'{reason} | auto ban due to 3 warns'})
settings.update_one({"sid": 1},{'$set': {'bl global chat':s['bl global chat']}})
await ctx.send(f"Пользователь c id `{id}` был автоматически забанен за х3 предупреждения")
except Exception:
s['bl global chat'].update({str(id):{'1':{'reason':reason,"time":time.time() + 2628000}}})
settings.update_one({"sid": 1},{'$set': {'bl global chat':s['bl global chat']}})
await ctx.send(f"Пользователь c id `{id}` получил варн #1")
@commands.command(hidden=True)
async def global_ban(self,ctx, id:int, *, reason:str = "None"):
s = settings.find_one({"sid": 1})
if ctx.author.id not in s['moderators']:
await ctx.send("У вас нет прав модератора бота!")
return
try:
s['bl global chat'][str(id)].update({'ban':f'ban: {reason}'})
except Exception:
s['bl global chat'].update({str(id):{} })
s['bl global chat'][str(id)].update({'ban':f'ban: {reason}'})
settings.update_one({"sid": 1},{'$set': {'bl global chat':s['bl global chat']}})
await ctx.send(f"Пользователь c id `{id}` был забанен в межсерверном чате.")
@commands.command(usage = '[#channel]', description = 'Очистить голос. канал\каналы от пользователей.', help = 'Модерация', aliases = ['очистить_войс'])
async def voice_clean(self, ctx, channel:discord.VoiceChannel = None):
if funs.roles_check(ctx.author, ctx.guild.id) == False:
await ctx.send("У вас недостаточно прав для использования этой команды!")
return
if channel != None:
for i in channel.members:
await i.move_to(channel=None)
await ctx.send("Голосовой канал был очищен от пользователей!")
else:
ch = []
for i in ctx.guild.channels:
if type(i) == discord.channel.VoiceChannel:
if len(i.members) > 0:
ch.append(i)
for c in ch:
for i in c.members:
await i.move_to(channel=None)
await ctx.send("Голосовой канал был очищен от пользователей!")
def setup(bot):
bot.add_cog(mod(bot)) | 0.253122 | 0.147371 |
import csv
import os
import re
import sqlite3
import sys
class Asset():
def __init__(self, filename, md5, bytes):
self.filename = filename
self.md5 = md5
self.bytes = int(bytes)
def found(self, cursor):
fb_query = """SELECT * FROM files
WHERE filename=? and bytes=?;"""
f_query = """SELECT * FROM files
WHERE filename=?;"""
fmb_query = """SELECT * FROM files
WHERE filename=? and md5=? and bytes=?;"""
data = (self.filename, self.md5, self.bytes)
result = cursor.execute(fmb_query, data).fetchall()
if len(result) > 0:
return True
else:
return False
class DirList():
def __init__(self, path):
self.path = path
self.filename = os.path.basename(path)
self.lines = self.read()
self.assets = []
self.assets_found = 0
self.reported_bytes = None
def bytes(self):
return sum([asset.bytes for asset in self.assets])
def read(self):
encodings = ['utf-8', 'latin1', 'macroman']
for encoding in encodings:
try:
with open(self.path, encoding=encoding) as handle:
self.lines = [line for line in handle.read().split('\n')]
return
except ValueError:
continue
sys.exit(f"Cannot decode file at {self.path}")
def parse(self):
csv_lines = []
self.summary = {}
for line in self.lines:
if line == '':
continue
elif line.startswith('Extension'):
marker, ext, count = line.split(' - ')
self.summary[ext] = int(count.replace(',', ''))
elif line.startswith('Total file size:'):
match = re.match(r'Total file size: ([0-9,]+) bytes.', line)
if match:
self.reported_bytes = int(match.group(1).replace(',', ''))
else:
csv_lines.append(line)
for row in csv.DictReader(csv_lines, delimiter="\t"):
if row['Type'] == "File":
asset = Asset(row['File Name'], row['MD5'], row['File Size'])
self.assets.append(asset)
def display(self):
print(self.filename.upper())
print(f" LINES: {len(self.lines)}")
print(f"ASSETS: {len(self.assets)}")
print(f" FOUND: {self.assets_found}")
print(f" BYTES: {self.bytes()}")
print(f"RBYTES: {self.reported_bytes}")
def main():
dbpath = ('/Users/westgard/Box Sync/'
'AWSMigration/aws-migration-data/'
'restored.db'
)
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
outputfile = open(sys.argv[2], 'w')
for dir, subdirs, files in os.walk(sys.argv[1]):
for file in files:
if file.endswith(".txt"):
path = os.path.join(dir, file)
print(f"\nProcessing {file}...")
dirlist = DirList(path)
dirlist.read()
dirlist.parse()
for asset in dirlist.assets:
if asset.found(cursor):
dirlist.assets_found += 1
dirlist.display()
row = [dirlist.filename,
len(dirlist.lines),
len(dirlist.assets),
dirlist.assets_found,
dirlist.bytes(),
dirlist.path
]
outputfile.write(','.join([str(i) for i in row]) + '\n')
outputfile.close()
if __name__ == "__main__":
main() | verifier/prange.py |
import csv
import os
import re
import sqlite3
import sys
class Asset():
def __init__(self, filename, md5, bytes):
self.filename = filename
self.md5 = md5
self.bytes = int(bytes)
def found(self, cursor):
fb_query = """SELECT * FROM files
WHERE filename=? and bytes=?;"""
f_query = """SELECT * FROM files
WHERE filename=?;"""
fmb_query = """SELECT * FROM files
WHERE filename=? and md5=? and bytes=?;"""
data = (self.filename, self.md5, self.bytes)
result = cursor.execute(fmb_query, data).fetchall()
if len(result) > 0:
return True
else:
return False
class DirList():
def __init__(self, path):
self.path = path
self.filename = os.path.basename(path)
self.lines = self.read()
self.assets = []
self.assets_found = 0
self.reported_bytes = None
def bytes(self):
return sum([asset.bytes for asset in self.assets])
def read(self):
encodings = ['utf-8', 'latin1', 'macroman']
for encoding in encodings:
try:
with open(self.path, encoding=encoding) as handle:
self.lines = [line for line in handle.read().split('\n')]
return
except ValueError:
continue
sys.exit(f"Cannot decode file at {self.path}")
def parse(self):
csv_lines = []
self.summary = {}
for line in self.lines:
if line == '':
continue
elif line.startswith('Extension'):
marker, ext, count = line.split(' - ')
self.summary[ext] = int(count.replace(',', ''))
elif line.startswith('Total file size:'):
match = re.match(r'Total file size: ([0-9,]+) bytes.', line)
if match:
self.reported_bytes = int(match.group(1).replace(',', ''))
else:
csv_lines.append(line)
for row in csv.DictReader(csv_lines, delimiter="\t"):
if row['Type'] == "File":
asset = Asset(row['File Name'], row['MD5'], row['File Size'])
self.assets.append(asset)
def display(self):
print(self.filename.upper())
print(f" LINES: {len(self.lines)}")
print(f"ASSETS: {len(self.assets)}")
print(f" FOUND: {self.assets_found}")
print(f" BYTES: {self.bytes()}")
print(f"RBYTES: {self.reported_bytes}")
def main():
dbpath = ('/Users/westgard/Box Sync/'
'AWSMigration/aws-migration-data/'
'restored.db'
)
conn = sqlite3.connect(dbpath)
cursor = conn.cursor()
outputfile = open(sys.argv[2], 'w')
for dir, subdirs, files in os.walk(sys.argv[1]):
for file in files:
if file.endswith(".txt"):
path = os.path.join(dir, file)
print(f"\nProcessing {file}...")
dirlist = DirList(path)
dirlist.read()
dirlist.parse()
for asset in dirlist.assets:
if asset.found(cursor):
dirlist.assets_found += 1
dirlist.display()
row = [dirlist.filename,
len(dirlist.lines),
len(dirlist.assets),
dirlist.assets_found,
dirlist.bytes(),
dirlist.path
]
outputfile.write(','.join([str(i) for i in row]) + '\n')
outputfile.close()
if __name__ == "__main__":
main() | 0.23467 | 0.119537 |
from keras import backend as K
from keras.applications.vgg16 import VGG16
from keras.models import Model
def vgg16_feature_model(flayers, weights='imagenet'):
"""
Feature exctraction VGG16 model.
# Arguments
flayers: list of strings with names of layers to get the features for.
The length of `flayers` should be > 1, otherwise the output shape
is one axis less.
weights: ether "imagenet" or path to the file with weights.
# Returns
features_model: keras.models.Model instance to extract the features.
# Raises
AssertionError: in case of `flayers` is not a list.
AssertionError: in case of length of 'flayers' < 2.
"""
assert isinstance(flayers,list), "First argument 'flayers' must be a list"
assert len(flayers) > 1, "Length of 'flayers' must be > 1."
base_model = VGG16(include_top=False, weights=weights)
vgg16_outputs = [base_model.get_layer(flayers[i]).output for i in range(len(flayers))]
features_model = Model(inputs=[base_model.input], outputs=vgg16_outputs, name='vgg16_features')
features_model.trainable = False
features_model.compile(loss='mse', optimizer='adam')
return features_model
# Losses:
# -------
def total_loss(mask, vgg16_weights='imagenet'):
"""
Total loss defined in Eq 7 of Liu et al 2018 with:
y_true = I_gt,
y_pred = I_out,
y_comp = I_comp.
"""
vgg16_lnames = ['block1_pool', 'block2_pool', 'block3_pool']
vgg_model = vgg16_feature_model(vgg16_lnames, weights=vgg16_weights)
def loss(y_true, y_pred):
mask_inv = 1 - mask
y_comp = mask * y_true + mask_inv * y_pred
vgg_out = vgg_model(y_pred)
vgg_gt = vgg_model(y_true)
vgg_comp = vgg_model(y_comp)
l_valid = loss_per_pixel(y_true, y_pred, mask)
l_hole = loss_per_pixel(y_true, y_pred, mask_inv)
l_perc = loss_perc(vgg_out, vgg_gt, vgg_comp)
l_style = loss_style(vgg_out, vgg_gt, vgg_comp)
l_tv = loss_tv(y_comp, mask_inv)
return l_valid + 6.*l_hole + 0.05*l_perc + 120.*l_style + 0.1*l_tv
return loss
def loss_l1(y_true, y_pred):
"""
Size-averaged L1 loss used in all the losses.
If size_average is True, the l1 losses are means,
If size_average is False, the l1 losses are sums divided by norm (should be specified),
only have effect if y_true.ndim = 4.
"""
if K.ndim(y_true) == 4:
# images and vgg features
return K.mean(K.abs(y_pred - y_true), axis=[1,2,3])
elif K.ndim(y_true) == 3:
# gram matrices
return K.mean(K.abs(y_pred - y_true), axis=[1,2])
else:
raise NotImplementedError("Calculating L1 loss on 1D tensors? should not occur for this network")
def gram_matrix(x):
"""Gram matrix used in the style losses."""
assert K.ndim(x) == 4, 'Input tensor should be 4D (B, H, W, C).'
assert K.image_data_format() == 'channels_last', "Use channels-last format."
# Permute channels and get resulting shape
x = K.permute_dimensions(x, (0, 3, 1, 2))
shape = K.shape(x)
B, C, H, W = shape[0], shape[1], shape[2], shape[3]
# Reshape x and do batch dot product
features = K.reshape(x, K.stack([B, C, H*W]))
gram = K.batch_dot(features, features, axes=2)
# Normalize with channels, height and width
gram /= K.cast(C * H * W, x.dtype)
return gram
def loss_per_pixel(y_true, y_pred, mask):
"""
Per pixel loss for selected pixels.
Note, we don't use loss_l1 for l_valid and l_hole to prevent unnecessary
mask multiplication, "mask * y_pred - mask *y_true)" instead of
"mask * (y_pred - y_true)".
"""
assert K.ndim(y_true) == 4, 'Input tensor should be 4D (B, H, W, C).'
return K.mean(K.abs(mask * (y_pred - y_true)), axis=[1,2,3])
def loss_perc(vgg_out, vgg_gt, vgg_comp):
"""Perceptual loss."""
l = 0.
for o, g, c in zip(vgg_out, vgg_gt, vgg_comp):
l += loss_l1(o, g) + loss_l1(c, g)
return l
def loss_style(vgg_out, vgg_gt, vgg_comp):
"""Style loss consisting of two terms: out and comp."""
l = 0.
for o, g, c in zip(vgg_out, vgg_gt, vgg_comp):
gram_gt = gram_matrix(g)
l += loss_l1(gram_matrix(o), gram_gt) + loss_l1(gram_matrix(c), gram_gt)
return l
def loss_tv(y_comp, mask_inv):
"""Total variation (TV) loss, smoothing penalty on the hole region."""
assert K.ndim(y_comp) == 4 and K.ndim(mask_inv) == 4, 'Input tensors should be 4D (B, H, W, C).'
# Create dilated hole region using a 3x3 kernel of all 1s.
kernel = K.ones(shape=(3, 3, mask_inv.shape[3], mask_inv.shape[3]))
dilated_mask = K.conv2d(mask_inv, kernel, data_format='channels_last', padding='same')
dilated_mask = K.clip(dilated_mask, 0., 1.)
# It seems difficult to get the right value using the backend functions only.
# Our TV loss is higher than the one in the paper due to redundant 1-pixel margins
# and the way we implemented the TV loss via the conv2D operation. The offset depends
# on the mask used and is constant for a given mask during optimization. This
# additional constant will add noise to the loss values printed but should not affect
# the gradients. The TV loss part based on the inpainted regions should be scaled
# well with the weight 0.1 from the paper.
# Compute dilated hole region of y_comp
P = dilated_mask * y_comp
return loss_l1(P[:,:-1,:,:], P[:,1:,:,:]) + loss_l1(P[:,:,:-1,:], P[:,:,1:,:]) | inpainter_utils/pconv2d_loss.py | from keras import backend as K
from keras.applications.vgg16 import VGG16
from keras.models import Model
def vgg16_feature_model(flayers, weights='imagenet'):
"""
Feature exctraction VGG16 model.
# Arguments
flayers: list of strings with names of layers to get the features for.
The length of `flayers` should be > 1, otherwise the output shape
is one axis less.
weights: ether "imagenet" or path to the file with weights.
# Returns
features_model: keras.models.Model instance to extract the features.
# Raises
AssertionError: in case of `flayers` is not a list.
AssertionError: in case of length of 'flayers' < 2.
"""
assert isinstance(flayers,list), "First argument 'flayers' must be a list"
assert len(flayers) > 1, "Length of 'flayers' must be > 1."
base_model = VGG16(include_top=False, weights=weights)
vgg16_outputs = [base_model.get_layer(flayers[i]).output for i in range(len(flayers))]
features_model = Model(inputs=[base_model.input], outputs=vgg16_outputs, name='vgg16_features')
features_model.trainable = False
features_model.compile(loss='mse', optimizer='adam')
return features_model
# Losses:
# -------
def total_loss(mask, vgg16_weights='imagenet'):
"""
Total loss defined in Eq 7 of Liu et al 2018 with:
y_true = I_gt,
y_pred = I_out,
y_comp = I_comp.
"""
vgg16_lnames = ['block1_pool', 'block2_pool', 'block3_pool']
vgg_model = vgg16_feature_model(vgg16_lnames, weights=vgg16_weights)
def loss(y_true, y_pred):
mask_inv = 1 - mask
y_comp = mask * y_true + mask_inv * y_pred
vgg_out = vgg_model(y_pred)
vgg_gt = vgg_model(y_true)
vgg_comp = vgg_model(y_comp)
l_valid = loss_per_pixel(y_true, y_pred, mask)
l_hole = loss_per_pixel(y_true, y_pred, mask_inv)
l_perc = loss_perc(vgg_out, vgg_gt, vgg_comp)
l_style = loss_style(vgg_out, vgg_gt, vgg_comp)
l_tv = loss_tv(y_comp, mask_inv)
return l_valid + 6.*l_hole + 0.05*l_perc + 120.*l_style + 0.1*l_tv
return loss
def loss_l1(y_true, y_pred):
"""
Size-averaged L1 loss used in all the losses.
If size_average is True, the l1 losses are means,
If size_average is False, the l1 losses are sums divided by norm (should be specified),
only have effect if y_true.ndim = 4.
"""
if K.ndim(y_true) == 4:
# images and vgg features
return K.mean(K.abs(y_pred - y_true), axis=[1,2,3])
elif K.ndim(y_true) == 3:
# gram matrices
return K.mean(K.abs(y_pred - y_true), axis=[1,2])
else:
raise NotImplementedError("Calculating L1 loss on 1D tensors? should not occur for this network")
def gram_matrix(x):
"""Gram matrix used in the style losses."""
assert K.ndim(x) == 4, 'Input tensor should be 4D (B, H, W, C).'
assert K.image_data_format() == 'channels_last', "Use channels-last format."
# Permute channels and get resulting shape
x = K.permute_dimensions(x, (0, 3, 1, 2))
shape = K.shape(x)
B, C, H, W = shape[0], shape[1], shape[2], shape[3]
# Reshape x and do batch dot product
features = K.reshape(x, K.stack([B, C, H*W]))
gram = K.batch_dot(features, features, axes=2)
# Normalize with channels, height and width
gram /= K.cast(C * H * W, x.dtype)
return gram
def loss_per_pixel(y_true, y_pred, mask):
"""
Per pixel loss for selected pixels.
Note, we don't use loss_l1 for l_valid and l_hole to prevent unnecessary
mask multiplication, "mask * y_pred - mask *y_true)" instead of
"mask * (y_pred - y_true)".
"""
assert K.ndim(y_true) == 4, 'Input tensor should be 4D (B, H, W, C).'
return K.mean(K.abs(mask * (y_pred - y_true)), axis=[1,2,3])
def loss_perc(vgg_out, vgg_gt, vgg_comp):
"""Perceptual loss."""
l = 0.
for o, g, c in zip(vgg_out, vgg_gt, vgg_comp):
l += loss_l1(o, g) + loss_l1(c, g)
return l
def loss_style(vgg_out, vgg_gt, vgg_comp):
"""Style loss consisting of two terms: out and comp."""
l = 0.
for o, g, c in zip(vgg_out, vgg_gt, vgg_comp):
gram_gt = gram_matrix(g)
l += loss_l1(gram_matrix(o), gram_gt) + loss_l1(gram_matrix(c), gram_gt)
return l
def loss_tv(y_comp, mask_inv):
"""Total variation (TV) loss, smoothing penalty on the hole region."""
assert K.ndim(y_comp) == 4 and K.ndim(mask_inv) == 4, 'Input tensors should be 4D (B, H, W, C).'
# Create dilated hole region using a 3x3 kernel of all 1s.
kernel = K.ones(shape=(3, 3, mask_inv.shape[3], mask_inv.shape[3]))
dilated_mask = K.conv2d(mask_inv, kernel, data_format='channels_last', padding='same')
dilated_mask = K.clip(dilated_mask, 0., 1.)
# It seems difficult to get the right value using the backend functions only.
# Our TV loss is higher than the one in the paper due to redundant 1-pixel margins
# and the way we implemented the TV loss via the conv2D operation. The offset depends
# on the mask used and is constant for a given mask during optimization. This
# additional constant will add noise to the loss values printed but should not affect
# the gradients. The TV loss part based on the inpainted regions should be scaled
# well with the weight 0.1 from the paper.
# Compute dilated hole region of y_comp
P = dilated_mask * y_comp
return loss_l1(P[:,:-1,:,:], P[:,1:,:,:]) + loss_l1(P[:,:,:-1,:], P[:,:,1:,:]) | 0.947854 | 0.545104 |
def read_input(path: str):
"""
Read game board file from path.
Return list of str.
>>> read_input("check.txt")
['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***']
"""
with open(path, "r", encoding="utf-8") as file:
lst = file.readlines()
for i in range(len(lst)):
if lst[i][-1] == "\n":
lst[i] = lst[i][:-1]
return lst
def left_to_right_check(input_line: str, pivot: str):
"""
Check row-wise visibility from left to right.
Return True if number of building from the left-most hint is visible looking to the right,
False otherwise.
input_line - representing board row.
pivot - number on the left-most hint of the input_line.
>>> left_to_right_check("412453*", 4)
True
>>> left_to_right_check("412453*", 5)
False
"""
if input_line[0] == "*":
return True
else:
pivot = int(pivot)
lst = list(input_line)[1:]
if pivot > lst.index("5")+1:
return False
elif pivot < lst.index("5")+1:
lost = lst.index("5")+1 - pivot
counter = 0
for i in range(lst.index("5")-1):
try:
if (int(lst[i+1]) - int(lst[i]) < 0
or int(lst[i+1]) - int(lst[i-1])):
counter +=1
except:
continue
if counter == lost:
return True
return False
else:
for i in range(lst.index("5")-1):
if int(lst[i+1]) - int(lst[i]) < 0:
return False
return True
def check_not_finished_board(board: list):
"""
Check if skyscraper board is not finished, i.e., '?' present on the game board.
Return True if finished, False otherwise.
>>> check_not_finished_board(['***21**', '4?????*', '4?????*', '*?????5', '*?????*', '*?????*', '*2*1***'])
False
>>> check_not_finished_board(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_not_finished_board(['***21**', '412453*', '423145*', '*5?3215', '*35214*', '*41532*', '*2*1***'])
False
"""
lst = list(" ".join(board))
if "?" in lst:
return False
return True
def check_uniqueness_in_rows(board: list):
"""
Check buildings of unique height in each row.
Return True if buildings in a row have unique length, False otherwise.
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_uniqueness_in_rows(['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*553215', '*35214*', '*41532*', '*2*1***'])
False
"""
for i in range(1,len(board)-1):
lst = list(board[i])
for j in range(1,len(lst)-1):
if lst[j] != "*" and lst[1:-1].count(lst[j]) != 1:
return False
return True
def check_horizontal_visibility(board: list):
"""
Check row-wise visibility (left-right and vice versa)
Return True if all horizontal hints are satisfiable,
i.e., for line 412453* , hint is 4, and 1245 are the four buildings
that could be observed from the hint looking to the right.
>>> check_horizontal_visibility(['***21**', '412453*', '412354*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_horizontal_visibility(['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
>>> check_horizontal_visibility(['***21**', '452413*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
"""
for i in range(1,len(board)-1):
if ((not left_to_right_check(board[i], board[i][0]))
or (not left_to_right_check(board[i][::-1], board[i][-1]))):
return False
return True
def check_columns(board: list):
"""
Check column-wise compliance of the board for uniqueness (buildings of unique height) and visibility (top-bottom and vice versa).
Same as for horizontal cases, but aggregated in one function for vertical case, i.e. columns.
>>> check_columns(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_columns(['***21**', '412153*', '423445*', '*543215', '*35214*', '*41232*', '*2*1***'])
False
>>> check_columns(['***21**', '412553*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
"""
lst = []
for i in range(len(board)):
lst.extend(list(board[i]))
i = 0
while i < 7:
line = ''
for j in range(i,len(lst),7):
line+=lst[j]
board[i] = line
i +=1
if check_uniqueness_in_rows(board):
return check_horizontal_visibility(board)
return False
def check_skyscrapers(input_path: str):
"""
Main function to check the status of skyscraper game board.
Return True if the board status is compliant with the rules,
False otherwise.
>>> check_skyscrapers("check.txt")
False
"""
board = read_input(input_path)
if (check_not_finished_board(board) and check_uniqueness_in_rows(board)
and check_horizontal_visibility(board) and check_columns(board)):
return True
return False
if __name__ == "__main__":
print(check_skyscrapers("check.txt")) | skyscraper.py | def read_input(path: str):
"""
Read game board file from path.
Return list of str.
>>> read_input("check.txt")
['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***']
"""
with open(path, "r", encoding="utf-8") as file:
lst = file.readlines()
for i in range(len(lst)):
if lst[i][-1] == "\n":
lst[i] = lst[i][:-1]
return lst
def left_to_right_check(input_line: str, pivot: str):
"""
Check row-wise visibility from left to right.
Return True if number of building from the left-most hint is visible looking to the right,
False otherwise.
input_line - representing board row.
pivot - number on the left-most hint of the input_line.
>>> left_to_right_check("412453*", 4)
True
>>> left_to_right_check("412453*", 5)
False
"""
if input_line[0] == "*":
return True
else:
pivot = int(pivot)
lst = list(input_line)[1:]
if pivot > lst.index("5")+1:
return False
elif pivot < lst.index("5")+1:
lost = lst.index("5")+1 - pivot
counter = 0
for i in range(lst.index("5")-1):
try:
if (int(lst[i+1]) - int(lst[i]) < 0
or int(lst[i+1]) - int(lst[i-1])):
counter +=1
except:
continue
if counter == lost:
return True
return False
else:
for i in range(lst.index("5")-1):
if int(lst[i+1]) - int(lst[i]) < 0:
return False
return True
def check_not_finished_board(board: list):
"""
Check if skyscraper board is not finished, i.e., '?' present on the game board.
Return True if finished, False otherwise.
>>> check_not_finished_board(['***21**', '4?????*', '4?????*', '*?????5', '*?????*', '*?????*', '*2*1***'])
False
>>> check_not_finished_board(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_not_finished_board(['***21**', '412453*', '423145*', '*5?3215', '*35214*', '*41532*', '*2*1***'])
False
"""
lst = list(" ".join(board))
if "?" in lst:
return False
return True
def check_uniqueness_in_rows(board: list):
"""
Check buildings of unique height in each row.
Return True if buildings in a row have unique length, False otherwise.
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_uniqueness_in_rows(['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
>>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*553215', '*35214*', '*41532*', '*2*1***'])
False
"""
for i in range(1,len(board)-1):
lst = list(board[i])
for j in range(1,len(lst)-1):
if lst[j] != "*" and lst[1:-1].count(lst[j]) != 1:
return False
return True
def check_horizontal_visibility(board: list):
"""
Check row-wise visibility (left-right and vice versa)
Return True if all horizontal hints are satisfiable,
i.e., for line 412453* , hint is 4, and 1245 are the four buildings
that could be observed from the hint looking to the right.
>>> check_horizontal_visibility(['***21**', '412453*', '412354*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_horizontal_visibility(['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
>>> check_horizontal_visibility(['***21**', '452413*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
"""
for i in range(1,len(board)-1):
if ((not left_to_right_check(board[i], board[i][0]))
or (not left_to_right_check(board[i][::-1], board[i][-1]))):
return False
return True
def check_columns(board: list):
"""
Check column-wise compliance of the board for uniqueness (buildings of unique height) and visibility (top-bottom and vice versa).
Same as for horizontal cases, but aggregated in one function for vertical case, i.e. columns.
>>> check_columns(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
True
>>> check_columns(['***21**', '412153*', '423445*', '*543215', '*35214*', '*41232*', '*2*1***'])
False
>>> check_columns(['***21**', '412553*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***'])
False
"""
lst = []
for i in range(len(board)):
lst.extend(list(board[i]))
i = 0
while i < 7:
line = ''
for j in range(i,len(lst),7):
line+=lst[j]
board[i] = line
i +=1
if check_uniqueness_in_rows(board):
return check_horizontal_visibility(board)
return False
def check_skyscrapers(input_path: str):
"""
Main function to check the status of skyscraper game board.
Return True if the board status is compliant with the rules,
False otherwise.
>>> check_skyscrapers("check.txt")
False
"""
board = read_input(input_path)
if (check_not_finished_board(board) and check_uniqueness_in_rows(board)
and check_horizontal_visibility(board) and check_columns(board)):
return True
return False
if __name__ == "__main__":
print(check_skyscrapers("check.txt")) | 0.717705 | 0.405272 |
from os import getcwd
import argparse
import json
from typing import Dict, List, Union, MutableMapping
import sys
from idol.generator import GeneratorParams
from idol.functional import OrderedObj
from idol.__idol__ import Map
from idol.py.schema.module import Module
class CliConfig:
flags: Dict[str, str]
args: Dict[str, str]
argparse: argparse.ArgumentParser
def __init__(
self,
flags: Dict[str, str] = {},
args: Dict[str, str] = {},
description: str = "Codegenerator built on python",
):
self.flags = flags
self.args = args
self.argparse = argparse.ArgumentParser(description=description)
for arg, desc in args.items():
self.argparse.add_argument("--" + arg, help=desc, nargs="*")
for arg, desc in flags.items():
self.argparse.add_argument("--" + arg, help=desc, action="store_true")
self.argparse.add_argument(
"-input_json",
help="ignored when stdin is piped into this program, otherwise should be a json file containing the output of an idol run.",
)
def start(config: CliConfig):
args: argparse.Namespace = config.argparse.parse_args()
if sys.stdin.isatty():
data = open(args.input_json, "r").read()
else:
data = sys.stdin.read()
return prepare_generator_params(vars(args), data)
def prepare_generator_params(
options: Dict[str, Union[List[str], bool]], data: str
) -> GeneratorParams:
modules = json.loads(data)
Map.of(Module, {}).validate(modules)
modules: MutableMapping[Module] = Map.of(Module, {}).wrap(modules)
all_modules: OrderedObj[Module] = OrderedObj(modules)
all_types = OrderedObj.from_iterable(m.types_as_ordered_obj() for m in all_modules.values())
targets = options.get("target", [])
scaffold_types = OrderedObj.from_iterable(
modules[target].types_as_ordered_obj() for target in targets
)
return GeneratorParams(
all_modules, all_types, scaffold_types, options.get("output", [getcwd()])[0], options
) | src/lib/idol/cli.py | from os import getcwd
import argparse
import json
from typing import Dict, List, Union, MutableMapping
import sys
from idol.generator import GeneratorParams
from idol.functional import OrderedObj
from idol.__idol__ import Map
from idol.py.schema.module import Module
class CliConfig:
flags: Dict[str, str]
args: Dict[str, str]
argparse: argparse.ArgumentParser
def __init__(
self,
flags: Dict[str, str] = {},
args: Dict[str, str] = {},
description: str = "Codegenerator built on python",
):
self.flags = flags
self.args = args
self.argparse = argparse.ArgumentParser(description=description)
for arg, desc in args.items():
self.argparse.add_argument("--" + arg, help=desc, nargs="*")
for arg, desc in flags.items():
self.argparse.add_argument("--" + arg, help=desc, action="store_true")
self.argparse.add_argument(
"-input_json",
help="ignored when stdin is piped into this program, otherwise should be a json file containing the output of an idol run.",
)
def start(config: CliConfig):
args: argparse.Namespace = config.argparse.parse_args()
if sys.stdin.isatty():
data = open(args.input_json, "r").read()
else:
data = sys.stdin.read()
return prepare_generator_params(vars(args), data)
def prepare_generator_params(
options: Dict[str, Union[List[str], bool]], data: str
) -> GeneratorParams:
modules = json.loads(data)
Map.of(Module, {}).validate(modules)
modules: MutableMapping[Module] = Map.of(Module, {}).wrap(modules)
all_modules: OrderedObj[Module] = OrderedObj(modules)
all_types = OrderedObj.from_iterable(m.types_as_ordered_obj() for m in all_modules.values())
targets = options.get("target", [])
scaffold_types = OrderedObj.from_iterable(
modules[target].types_as_ordered_obj() for target in targets
)
return GeneratorParams(
all_modules, all_types, scaffold_types, options.get("output", [getcwd()])[0], options
) | 0.526343 | 0.156105 |
import asyncio
import motor.motor_asyncio
try:
import ujson as json
except ImportError: # pragma no cover
import json
from urllib.parse import urlparse
from .model import APITest
from .exceptions import ApitestConnectionError
async def _do_mongodb_query(col, query: dict): # pragma no cover
"""
Do a query in a MongoDB and return the result
:param query: the query in MongoDB format
:type query: dict
:return: MongoDB Returned information
:rtype: dict
"""
# Get last inserted record
cursor = col.find().sort("$natural", -1).limit(1)
# Get the result
return await cursor.to_list(length=1)
def _load_from_mongo(mongo_uri: str):
"""
Load API Test information from a MongoDB.
Collection used to store API Test information will be named: **apitest**
>>> load_from_mongo("mongodb://127.0.0.1:27017")
<type 'APITest'>
>>> _load_from_mongo("mongodb://user:pass@mongo.example.com:27017/database")
<type 'APITest'>
:param mongo_uri: MongoDB connection string
:type mongo_uri: str
:return: Return a APITest object instance
:rtype: APITest
:raise ApitestConnectionError: If some error occurs when try to connect to MongoDB
"""
try:
# Make connection
client = motor.motor_asyncio.AsyncIOMotorClient(mongo_uri)
# Get database form connectionstring
db = urlparse(mongo_uri).path
if not db:
db = "apitest"
# Get database -> collection
col = client[db]["apitest"]
# Do the query
loop = asyncio.get_event_loop()
ret = loop.run_until_complete(_do_mongodb_query(col, {}))
if ret:
return ret[0]
else:
return {}
except Exception as e:
raise ApitestConnectionError from e
def _load_from_file(file_path: str):
assert isinstance(file_path, str)
# Get path
path = file_path.replace("file://", "")
with open(path, "r") as f:
return json.loads(f.read())
def load_data(connection_string: str):
"""
Load data from a source. Source could be:
- A JSON File
- A MongoDB
Load data from a file
---------------------
If you want to load data from a File, you must to provide this connection string:
>>> connection_string = "/path/to/my/file.json"
or using URI format:
>>> connection_string = "file:///path/to/my/file.json"
Load file from a MongoDB
------------------------
If you want to load data from a MongoDB database, you must to provide a connection string like:
>>> connection_string = "mongodb://mongo.example.com:27017"
Or event more complicate:
>>> connection_string = "mongodb://db1.example.net,db2.example.net:2500/?replicaSet=test"
:param connection_string:
:type connection_string:
:return:
:rtype:
"""
assert isinstance(connection_string, str)
if connection_string.startswith("mongodb://"):
data = _load_from_mongo(connection_string)
elif connection_string.startswith("file://"):
data = _load_from_file(connection_string)
else:
data = _load_from_file("file://{}".format(connection_string))
# Load JSON info
return APITest(**data)
__all__ = ("load_data",) | refactor/old/apitest/core/loaders.py | import asyncio
import motor.motor_asyncio
try:
import ujson as json
except ImportError: # pragma no cover
import json
from urllib.parse import urlparse
from .model import APITest
from .exceptions import ApitestConnectionError
async def _do_mongodb_query(col, query: dict): # pragma no cover
"""
Do a query in a MongoDB and return the result
:param query: the query in MongoDB format
:type query: dict
:return: MongoDB Returned information
:rtype: dict
"""
# Get last inserted record
cursor = col.find().sort("$natural", -1).limit(1)
# Get the result
return await cursor.to_list(length=1)
def _load_from_mongo(mongo_uri: str):
"""
Load API Test information from a MongoDB.
Collection used to store API Test information will be named: **apitest**
>>> load_from_mongo("mongodb://127.0.0.1:27017")
<type 'APITest'>
>>> _load_from_mongo("mongodb://user:pass@mongo.example.com:27017/database")
<type 'APITest'>
:param mongo_uri: MongoDB connection string
:type mongo_uri: str
:return: Return a APITest object instance
:rtype: APITest
:raise ApitestConnectionError: If some error occurs when try to connect to MongoDB
"""
try:
# Make connection
client = motor.motor_asyncio.AsyncIOMotorClient(mongo_uri)
# Get database form connectionstring
db = urlparse(mongo_uri).path
if not db:
db = "apitest"
# Get database -> collection
col = client[db]["apitest"]
# Do the query
loop = asyncio.get_event_loop()
ret = loop.run_until_complete(_do_mongodb_query(col, {}))
if ret:
return ret[0]
else:
return {}
except Exception as e:
raise ApitestConnectionError from e
def _load_from_file(file_path: str):
assert isinstance(file_path, str)
# Get path
path = file_path.replace("file://", "")
with open(path, "r") as f:
return json.loads(f.read())
def load_data(connection_string: str):
"""
Load data from a source. Source could be:
- A JSON File
- A MongoDB
Load data from a file
---------------------
If you want to load data from a File, you must to provide this connection string:
>>> connection_string = "/path/to/my/file.json"
or using URI format:
>>> connection_string = "file:///path/to/my/file.json"
Load file from a MongoDB
------------------------
If you want to load data from a MongoDB database, you must to provide a connection string like:
>>> connection_string = "mongodb://mongo.example.com:27017"
Or event more complicate:
>>> connection_string = "mongodb://db1.example.net,db2.example.net:2500/?replicaSet=test"
:param connection_string:
:type connection_string:
:return:
:rtype:
"""
assert isinstance(connection_string, str)
if connection_string.startswith("mongodb://"):
data = _load_from_mongo(connection_string)
elif connection_string.startswith("file://"):
data = _load_from_file(connection_string)
else:
data = _load_from_file("file://{}".format(connection_string))
# Load JSON info
return APITest(**data)
__all__ = ("load_data",) | 0.655005 | 0.301677 |
CHARS = {
'sp':0x20,
'apb':0x08, # active position back
'apf':0x09, # active position forward
'apd':0x0a, # active position down
'apu':0x0b, # active position up
'cs':0x0c, # clear screen
'apr':0x0d, # active position return
'si':0x0e, # shift in
'so':0x0f, # shift out
'con':0x11, # cursor on
'rpt':0x12, # repeat last character
'cof':0x14, # cursor off
'can':0x18, # cancel (fills the rest off the line with spaces)
'ss2':0x19, # single shift 2 (g2 set, some legents say that this is the magic "yes" character)
'esc':0x1b, # escape
'ss3':0x1d, # single shift 3 (g3 set)
'aph':0x1e, # active position home
'us':0x1f, # unit seperator (also known as apa)
# c0 data link control tokens (single byte constants)
'nul':0x00, # null
'soh':0x01, # start of heading
'stx':0x02, # start text
'etx':0x03, # end text
'eot':0x04, # end of transmission
'enq':0x05, # enquiry
'ack':0x06, # acknowledge
'itb':0x07, # end intermediate block
'dle':0x10, # data link escape
'nak':0x15, # negative acknowledge
'syn':0x16, # syncronize
'etb':0x17, # end textblock
# note: the data link control tokens are not mentiond in ets_300_072
# c0 propritay-btx tokens (single byte constants)
'ini':0x13, # initiator (*)
'ter':0x1c, # terminator (#)
'dct':0x1a, # propritary btx (makes the terminal talking)
# c1s/c1p control functions set (single byte constants)
'fsh':0x88, # flash
'std':0x89, # steady
'ebx':0x8a, # end box
'sbx':0x8b, # start box
'nsz':0x8c, # normal-size
'dbh':0x8d, # double-height
'dbw':0x8e, # double-width
'dbs':0x8f, # double-size
'cdy':0x98, # conceal display
'spl':0x99, # stop lining
'stl':0x9a, # start lining
'csi':0x9b, # control sequence introducer
# c1s control functions set (single byte constants)
'abk':0x80, # alpha black
'anr':0x81, # alpha red
'ang':0x82, # alpha green
'any':0x83, # alpha yellow
'anb':0x84, # alpha blue
'anm':0x85, # alpha mageta
'anc':0x86, # alpha cyan
'anw':0x87, # alpha white
'mbk':0x90, # mosaic black
'msr':0x91, # mosaic red
'msg':0x92, # mosaic green
'msy':0x93, # mosaic yellow
'msb':0x94, # mosaic blue
'msm':0x95, # mosaic magenta
'msc':0x96, # mosaic cyan
'msw':0x97, # mosaic white
'bbd':0x9c, # black background
'nbd':0x9d, # new background
'hms':0x9e, # hold mosaic
'rms':0x9f, # release mosaic
# c1p control functions set (single byte constants)
'bkf':0x80, # black foreground
'rdf':0x81, # red foreground
'grf':0x82, # green foreground
'ylf':0x83, # yellow foreground
'blf':0x84, # blue foreground
'mgf':0x85, # magenta foreground
'cnf':0x86, # cyan foreground
'whf':0x87, # white foreground
'bkb':0x90, # black background
'rdb':0x91, # red background
'grb':0x92, # green background
'ylb':0x93, # yellow background
'blb':0x94, # blue background
'mgb':0x95, # magenta background
'cnb':0x96, # cyan background
'whb':0x97, # white background
'npo':0x9c, # normal polarity
'ipo':0x9d, # inverted polarity
'trb':0x9e, # transparent background
'stc':0x9f, # stop conceal
# fe full screen atributes (single byte constants)
'fbkb':0x50, # black background
'frdb':0x51, # red background
'fgrb':0x52, # green background
'fylb':0x53, # yellow background
'fblb':0x54, # blue background
'fmgb':0x55, # magenta background
'fcnb':0x56, # cyan background
'fwhb':0x57, # white background
'ftrb':0x5e # transparent background
} | cept.py |
CHARS = {
'sp':0x20,
'apb':0x08, # active position back
'apf':0x09, # active position forward
'apd':0x0a, # active position down
'apu':0x0b, # active position up
'cs':0x0c, # clear screen
'apr':0x0d, # active position return
'si':0x0e, # shift in
'so':0x0f, # shift out
'con':0x11, # cursor on
'rpt':0x12, # repeat last character
'cof':0x14, # cursor off
'can':0x18, # cancel (fills the rest off the line with spaces)
'ss2':0x19, # single shift 2 (g2 set, some legents say that this is the magic "yes" character)
'esc':0x1b, # escape
'ss3':0x1d, # single shift 3 (g3 set)
'aph':0x1e, # active position home
'us':0x1f, # unit seperator (also known as apa)
# c0 data link control tokens (single byte constants)
'nul':0x00, # null
'soh':0x01, # start of heading
'stx':0x02, # start text
'etx':0x03, # end text
'eot':0x04, # end of transmission
'enq':0x05, # enquiry
'ack':0x06, # acknowledge
'itb':0x07, # end intermediate block
'dle':0x10, # data link escape
'nak':0x15, # negative acknowledge
'syn':0x16, # syncronize
'etb':0x17, # end textblock
# note: the data link control tokens are not mentiond in ets_300_072
# c0 propritay-btx tokens (single byte constants)
'ini':0x13, # initiator (*)
'ter':0x1c, # terminator (#)
'dct':0x1a, # propritary btx (makes the terminal talking)
# c1s/c1p control functions set (single byte constants)
'fsh':0x88, # flash
'std':0x89, # steady
'ebx':0x8a, # end box
'sbx':0x8b, # start box
'nsz':0x8c, # normal-size
'dbh':0x8d, # double-height
'dbw':0x8e, # double-width
'dbs':0x8f, # double-size
'cdy':0x98, # conceal display
'spl':0x99, # stop lining
'stl':0x9a, # start lining
'csi':0x9b, # control sequence introducer
# c1s control functions set (single byte constants)
'abk':0x80, # alpha black
'anr':0x81, # alpha red
'ang':0x82, # alpha green
'any':0x83, # alpha yellow
'anb':0x84, # alpha blue
'anm':0x85, # alpha mageta
'anc':0x86, # alpha cyan
'anw':0x87, # alpha white
'mbk':0x90, # mosaic black
'msr':0x91, # mosaic red
'msg':0x92, # mosaic green
'msy':0x93, # mosaic yellow
'msb':0x94, # mosaic blue
'msm':0x95, # mosaic magenta
'msc':0x96, # mosaic cyan
'msw':0x97, # mosaic white
'bbd':0x9c, # black background
'nbd':0x9d, # new background
'hms':0x9e, # hold mosaic
'rms':0x9f, # release mosaic
# c1p control functions set (single byte constants)
'bkf':0x80, # black foreground
'rdf':0x81, # red foreground
'grf':0x82, # green foreground
'ylf':0x83, # yellow foreground
'blf':0x84, # blue foreground
'mgf':0x85, # magenta foreground
'cnf':0x86, # cyan foreground
'whf':0x87, # white foreground
'bkb':0x90, # black background
'rdb':0x91, # red background
'grb':0x92, # green background
'ylb':0x93, # yellow background
'blb':0x94, # blue background
'mgb':0x95, # magenta background
'cnb':0x96, # cyan background
'whb':0x97, # white background
'npo':0x9c, # normal polarity
'ipo':0x9d, # inverted polarity
'trb':0x9e, # transparent background
'stc':0x9f, # stop conceal
# fe full screen atributes (single byte constants)
'fbkb':0x50, # black background
'frdb':0x51, # red background
'fgrb':0x52, # green background
'fylb':0x53, # yellow background
'fblb':0x54, # blue background
'fmgb':0x55, # magenta background
'fcnb':0x56, # cyan background
'fwhb':0x57, # white background
'ftrb':0x5e # transparent background
} | 0.35488 | 0.109658 |
from slepc4py import SLEPc
from petsc4py import PETSc
import unittest
# --------------------------------------------------------------------
class BaseTestObject(object):
CLASS, FACTORY = None, 'create'
TARGS, KARGS = (), {}
BUILD = None
def setUp(self):
self.obj = self.CLASS()
getattr(self.obj,self.FACTORY)(*self.TARGS, **self.KARGS)
if not self.obj: self.obj.create()
def tearDown(self):
self.obj = None
def testTypeRegistry(self):
type_reg = PETSc.__type_registry__
classid = self.obj.getClassId()
typeobj = self.CLASS
if isinstance(self.obj, PETSc.DMDA):
typeobj = PETSc.DM
self.assertTrue(type_reg[classid] is typeobj )
def testLogClass(self):
name = self.CLASS.__name__
logcls = PETSc.Log.Class(name)
classid = self.obj.getClassId()
self.assertEqual(logcls.id, classid)
def testClass(self):
self.assertTrue(isinstance(self.obj, self.CLASS))
self.assertTrue(type(self.obj) is self.CLASS)
def testNonZero(self):
self.assertTrue(bool(self.obj))
def testDestroy(self):
self.assertTrue(bool(self.obj))
self.obj.destroy()
self.assertFalse(bool(self.obj))
## self.assertRaises(PETSc.Error, self.obj.destroy)
## self.assertTrue(self.obj.this is this)
def testOptions(self):
self.assertFalse(self.obj.getOptionsPrefix())
prefix1 = 'my_'
self.obj.setOptionsPrefix(prefix1)
self.assertEqual(self.obj.getOptionsPrefix(), prefix1)
prefix2 = 'opt_'
self.obj.setOptionsPrefix(prefix2)
self.assertEqual(self.obj.getOptionsPrefix(), prefix2)
## self.obj.appendOptionsPrefix(prefix1)
## self.assertEqual(self.obj.getOptionsPrefix(),
## prefix2 + prefix1)
## self.obj.prependOptionsPrefix(prefix1)
## self.assertEqual(self.obj.getOptionsPrefix(),
## prefix1 + prefix2 + prefix1)
self.obj.setFromOptions()
def testName(self):
oldname = self.obj.getName()
newname = '%s-%s' %(oldname, oldname)
self.obj.setName(newname)
self.assertEqual(self.obj.getName(), newname)
self.obj.setName(oldname)
self.assertEqual(self.obj.getName(), oldname)
def testComm(self):
comm = self.obj.getComm()
self.assertTrue(isinstance(comm, PETSc.Comm))
self.assertTrue(comm in [PETSc.COMM_SELF, PETSc.COMM_WORLD])
def testRefCount(self):
self.assertEqual(self.obj.getRefCount(), 1)
self.obj.incRef()
self.assertEqual(self.obj.getRefCount(), 2)
self.obj.incRef()
self.assertEqual(self.obj.getRefCount(), 3)
self.obj.decRef()
self.assertEqual(self.obj.getRefCount(), 2)
self.obj.decRef()
self.assertEqual(self.obj.getRefCount(), 1)
self.obj.decRef()
self.assertFalse(bool(self.obj))
def testHandle(self):
self.assertTrue(self.obj.handle)
self.assertTrue(self.obj.fortran)
h, f = self.obj.handle, self.obj.fortran
if (h>0 and f>0) or (h<0 and f<0):
self.assertEqual(h, f)
self.obj.destroy()
self.assertFalse(self.obj.handle)
self.assertFalse(self.obj.fortran)
def testComposeQuery(self):
self.assertEqual(self.obj.getRefCount(), 1)
self.obj.compose('myobj', self.obj)
self.assertTrue(type(self.obj.query('myobj')) is self.CLASS)
self.assertEqual(self.obj.query('myobj'), self.obj)
self.assertEqual(self.obj.getRefCount(), 2)
self.obj.compose('myobj', None)
self.assertEqual(self.obj.getRefCount(), 1)
self.assertEqual(self.obj.query('myobj'), None)
def testProperties(self):
self.assertEqual(self.obj.getClassId(), self.obj.classid)
self.assertEqual(self.obj.getClassName(), self.obj.klass)
self.assertEqual(self.obj.getType(), self.obj.type)
self.assertEqual(self.obj.getName(), self.obj.name)
self.assertEqual(self.obj.getComm(), self.obj.comm)
self.assertEqual(self.obj.getRefCount(), self.obj.refcount)
def testShallowCopy(self):
import copy
rc = self.obj.getRefCount()
obj = copy.copy(self.obj)
self.assertTrue(obj is not self.obj)
self.assertTrue(obj == self.obj)
self.assertTrue(type(obj) is type(self.obj))
self.assertEqual(obj.getRefCount(), rc+1)
del obj
self.assertEqual(self.obj.getRefCount(), rc)
def testDeepCopy(self):
self.obj.setFromOptions()
import copy
rc = self.obj.getRefCount()
try:
obj = copy.deepcopy(self.obj)
except NotImplementedError:
return
self.assertTrue(obj is not self.obj)
self.assertTrue(obj != self.obj)
self.assertTrue(type(obj) is type(self.obj))
self.assertEqual(self.obj.getRefCount(), rc)
self.assertEqual(obj.getRefCount(), 1)
del obj
# --------------------------------------------------------------------
class TestObjectST(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.ST
class TestObjectBV(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.BV
def testDeepCopy(self): pass
class TestObjectEPS(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.EPS
class TestObjectSVD(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.SVD
class TestObjectPEP(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.PEP
class TestObjectNEP(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.NEP
class TestObjectMFN(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.MFN
# --------------------------------------------------------------------
if __name__ == '__main__':
unittest.main() | test/test_object.py | from slepc4py import SLEPc
from petsc4py import PETSc
import unittest
# --------------------------------------------------------------------
class BaseTestObject(object):
CLASS, FACTORY = None, 'create'
TARGS, KARGS = (), {}
BUILD = None
def setUp(self):
self.obj = self.CLASS()
getattr(self.obj,self.FACTORY)(*self.TARGS, **self.KARGS)
if not self.obj: self.obj.create()
def tearDown(self):
self.obj = None
def testTypeRegistry(self):
type_reg = PETSc.__type_registry__
classid = self.obj.getClassId()
typeobj = self.CLASS
if isinstance(self.obj, PETSc.DMDA):
typeobj = PETSc.DM
self.assertTrue(type_reg[classid] is typeobj )
def testLogClass(self):
name = self.CLASS.__name__
logcls = PETSc.Log.Class(name)
classid = self.obj.getClassId()
self.assertEqual(logcls.id, classid)
def testClass(self):
self.assertTrue(isinstance(self.obj, self.CLASS))
self.assertTrue(type(self.obj) is self.CLASS)
def testNonZero(self):
self.assertTrue(bool(self.obj))
def testDestroy(self):
self.assertTrue(bool(self.obj))
self.obj.destroy()
self.assertFalse(bool(self.obj))
## self.assertRaises(PETSc.Error, self.obj.destroy)
## self.assertTrue(self.obj.this is this)
def testOptions(self):
self.assertFalse(self.obj.getOptionsPrefix())
prefix1 = 'my_'
self.obj.setOptionsPrefix(prefix1)
self.assertEqual(self.obj.getOptionsPrefix(), prefix1)
prefix2 = 'opt_'
self.obj.setOptionsPrefix(prefix2)
self.assertEqual(self.obj.getOptionsPrefix(), prefix2)
## self.obj.appendOptionsPrefix(prefix1)
## self.assertEqual(self.obj.getOptionsPrefix(),
## prefix2 + prefix1)
## self.obj.prependOptionsPrefix(prefix1)
## self.assertEqual(self.obj.getOptionsPrefix(),
## prefix1 + prefix2 + prefix1)
self.obj.setFromOptions()
def testName(self):
oldname = self.obj.getName()
newname = '%s-%s' %(oldname, oldname)
self.obj.setName(newname)
self.assertEqual(self.obj.getName(), newname)
self.obj.setName(oldname)
self.assertEqual(self.obj.getName(), oldname)
def testComm(self):
comm = self.obj.getComm()
self.assertTrue(isinstance(comm, PETSc.Comm))
self.assertTrue(comm in [PETSc.COMM_SELF, PETSc.COMM_WORLD])
def testRefCount(self):
self.assertEqual(self.obj.getRefCount(), 1)
self.obj.incRef()
self.assertEqual(self.obj.getRefCount(), 2)
self.obj.incRef()
self.assertEqual(self.obj.getRefCount(), 3)
self.obj.decRef()
self.assertEqual(self.obj.getRefCount(), 2)
self.obj.decRef()
self.assertEqual(self.obj.getRefCount(), 1)
self.obj.decRef()
self.assertFalse(bool(self.obj))
def testHandle(self):
self.assertTrue(self.obj.handle)
self.assertTrue(self.obj.fortran)
h, f = self.obj.handle, self.obj.fortran
if (h>0 and f>0) or (h<0 and f<0):
self.assertEqual(h, f)
self.obj.destroy()
self.assertFalse(self.obj.handle)
self.assertFalse(self.obj.fortran)
def testComposeQuery(self):
self.assertEqual(self.obj.getRefCount(), 1)
self.obj.compose('myobj', self.obj)
self.assertTrue(type(self.obj.query('myobj')) is self.CLASS)
self.assertEqual(self.obj.query('myobj'), self.obj)
self.assertEqual(self.obj.getRefCount(), 2)
self.obj.compose('myobj', None)
self.assertEqual(self.obj.getRefCount(), 1)
self.assertEqual(self.obj.query('myobj'), None)
def testProperties(self):
self.assertEqual(self.obj.getClassId(), self.obj.classid)
self.assertEqual(self.obj.getClassName(), self.obj.klass)
self.assertEqual(self.obj.getType(), self.obj.type)
self.assertEqual(self.obj.getName(), self.obj.name)
self.assertEqual(self.obj.getComm(), self.obj.comm)
self.assertEqual(self.obj.getRefCount(), self.obj.refcount)
def testShallowCopy(self):
import copy
rc = self.obj.getRefCount()
obj = copy.copy(self.obj)
self.assertTrue(obj is not self.obj)
self.assertTrue(obj == self.obj)
self.assertTrue(type(obj) is type(self.obj))
self.assertEqual(obj.getRefCount(), rc+1)
del obj
self.assertEqual(self.obj.getRefCount(), rc)
def testDeepCopy(self):
self.obj.setFromOptions()
import copy
rc = self.obj.getRefCount()
try:
obj = copy.deepcopy(self.obj)
except NotImplementedError:
return
self.assertTrue(obj is not self.obj)
self.assertTrue(obj != self.obj)
self.assertTrue(type(obj) is type(self.obj))
self.assertEqual(self.obj.getRefCount(), rc)
self.assertEqual(obj.getRefCount(), 1)
del obj
# --------------------------------------------------------------------
class TestObjectST(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.ST
class TestObjectBV(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.BV
def testDeepCopy(self): pass
class TestObjectEPS(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.EPS
class TestObjectSVD(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.SVD
class TestObjectPEP(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.PEP
class TestObjectNEP(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.NEP
class TestObjectMFN(BaseTestObject, unittest.TestCase):
CLASS = SLEPc.MFN
# --------------------------------------------------------------------
if __name__ == '__main__':
unittest.main() | 0.453988 | 0.394667 |
import copy
import json
from django.utils.translation import ugettext_lazy as _
from apps.utils.db import array_group
from apps.exceptions import ValidationError
from apps.utils.log import logger
from apps.log_databus.constants import EtlConfig
from apps.log_databus.handlers.etl_storage import EtlStorage
from apps.log_databus.exceptions import EtlDelimiterParseException
from apps.log_databus.handlers.etl_storage.utils.transfer import preview
from apps.log_databus.constants import (
ETL_DELIMITER_END,
ETL_DELIMITER_DELETE,
ETL_DELIMITER_IGNORE,
FIELD_TEMPLATE,
)
class BkLogDelimiterEtlStorage(EtlStorage):
etl_config = EtlConfig.BK_LOG_DELIMITER
def etl_preview(self, data, etl_params=None) -> list:
"""
字段提取预览
:param data: 日志原文
:param etl_params: 字段提取参数
:return: 字段列表 list
"""
if not etl_params.get("separator"):
raise ValidationError(_("分隔符不能为空"))
values = data.split(etl_params["separator"])
result = []
separator_field_list = []
for index, key in enumerate(values):
field_index = index + 1
result.append({"field_index": field_index, "field_name": "", "value": values[index]})
separator_field_list.append(f"key{field_index}")
# 调用SDK
etl_params["separator_field_list"] = separator_field_list
preview_fields = preview("delimiter", data, etl_only=True, **etl_params)
result = []
for index, key in enumerate(separator_field_list):
result.append({"field_index": index + 1, "field_name": "", "value": preview_fields.get(key, "")})
return result
def get_result_table_config(self, fields, etl_params, built_in_config, es_version="5.X"):
"""
配置清洗入库策略,需兼容新增、编辑
"""
# option
option = {
"retain_original_text": etl_params.get("retain_original_text", False),
"separator_node_source": "data",
"separator_node_action": "delimiter",
"separator_node_name": self.separator_node_name,
"separator": etl_params["separator"],
}
if built_in_config.get("option") and isinstance(built_in_config["option"], dict):
option = dict(built_in_config["option"], **option)
# 根据字段列表生成separator_field_list
# 1. 找到最大的field_index
user_fields = {}
max_index = 0
for field in fields:
field_index = int(field["field_index"])
user_fields[str(field_index)] = field
if field_index > max_index:
max_index = field_index
# 2. 生成分隔符字段列表
separator_field_list = []
for i in range(max_index):
user_field = user_fields.get(str(i + 1))
if not user_field:
separator_field_list.append(ETL_DELIMITER_IGNORE)
else:
separator_field_list.append(
user_field["field_name"] if not user_field["is_delete"] else ETL_DELIMITER_DELETE
)
separator_field_list.append(ETL_DELIMITER_END)
if len(json.dumps(separator_field_list)) >= 256:
logger.error(f"[etl][delimiter]separator_field_list => {separator_field_list}")
option["separator_field_list"] = separator_field_list
result_table_fields = self.get_result_table_fields(fields, etl_params, built_in_config, es_version=es_version)
return {
"option": option,
"field_list": result_table_fields["fields"],
"time_alias_name": result_table_fields["time_field"]["alias_name"],
"time_option": result_table_fields["time_field"]["option"],
}
@classmethod
def parse_result_table_config(cls, result_table_config, result_table_storage=None):
if not result_table_config["option"].get("separator_field_list"):
raise EtlDelimiterParseException()
collector_config = super().parse_result_table_config(result_table_config, result_table_storage)
collector_fields = array_group(
[field for field in collector_config["fields"] if not field["is_built_in"]], "field_name", 1
)
fields = []
for index, key in enumerate(result_table_config["option"]["separator_field_list"]):
if key in collector_fields:
field_info = collector_fields[key]
field_info["field_index"] = index + 1
fields.append(field_info)
elif key == ETL_DELIMITER_DELETE:
field_info = copy.deepcopy(FIELD_TEMPLATE)
field_info["field_index"] = index + 1
fields.append(field_info)
# 加上内置字段
fields += [field for field in collector_config["fields"] if field["is_built_in"]]
collector_config["fields"] = fields
return collector_config | apps/log_databus/handlers/etl_storage/bk_log_delimiter.py | import copy
import json
from django.utils.translation import ugettext_lazy as _
from apps.utils.db import array_group
from apps.exceptions import ValidationError
from apps.utils.log import logger
from apps.log_databus.constants import EtlConfig
from apps.log_databus.handlers.etl_storage import EtlStorage
from apps.log_databus.exceptions import EtlDelimiterParseException
from apps.log_databus.handlers.etl_storage.utils.transfer import preview
from apps.log_databus.constants import (
ETL_DELIMITER_END,
ETL_DELIMITER_DELETE,
ETL_DELIMITER_IGNORE,
FIELD_TEMPLATE,
)
class BkLogDelimiterEtlStorage(EtlStorage):
etl_config = EtlConfig.BK_LOG_DELIMITER
def etl_preview(self, data, etl_params=None) -> list:
"""
字段提取预览
:param data: 日志原文
:param etl_params: 字段提取参数
:return: 字段列表 list
"""
if not etl_params.get("separator"):
raise ValidationError(_("分隔符不能为空"))
values = data.split(etl_params["separator"])
result = []
separator_field_list = []
for index, key in enumerate(values):
field_index = index + 1
result.append({"field_index": field_index, "field_name": "", "value": values[index]})
separator_field_list.append(f"key{field_index}")
# 调用SDK
etl_params["separator_field_list"] = separator_field_list
preview_fields = preview("delimiter", data, etl_only=True, **etl_params)
result = []
for index, key in enumerate(separator_field_list):
result.append({"field_index": index + 1, "field_name": "", "value": preview_fields.get(key, "")})
return result
def get_result_table_config(self, fields, etl_params, built_in_config, es_version="5.X"):
"""
配置清洗入库策略,需兼容新增、编辑
"""
# option
option = {
"retain_original_text": etl_params.get("retain_original_text", False),
"separator_node_source": "data",
"separator_node_action": "delimiter",
"separator_node_name": self.separator_node_name,
"separator": etl_params["separator"],
}
if built_in_config.get("option") and isinstance(built_in_config["option"], dict):
option = dict(built_in_config["option"], **option)
# 根据字段列表生成separator_field_list
# 1. 找到最大的field_index
user_fields = {}
max_index = 0
for field in fields:
field_index = int(field["field_index"])
user_fields[str(field_index)] = field
if field_index > max_index:
max_index = field_index
# 2. 生成分隔符字段列表
separator_field_list = []
for i in range(max_index):
user_field = user_fields.get(str(i + 1))
if not user_field:
separator_field_list.append(ETL_DELIMITER_IGNORE)
else:
separator_field_list.append(
user_field["field_name"] if not user_field["is_delete"] else ETL_DELIMITER_DELETE
)
separator_field_list.append(ETL_DELIMITER_END)
if len(json.dumps(separator_field_list)) >= 256:
logger.error(f"[etl][delimiter]separator_field_list => {separator_field_list}")
option["separator_field_list"] = separator_field_list
result_table_fields = self.get_result_table_fields(fields, etl_params, built_in_config, es_version=es_version)
return {
"option": option,
"field_list": result_table_fields["fields"],
"time_alias_name": result_table_fields["time_field"]["alias_name"],
"time_option": result_table_fields["time_field"]["option"],
}
@classmethod
def parse_result_table_config(cls, result_table_config, result_table_storage=None):
if not result_table_config["option"].get("separator_field_list"):
raise EtlDelimiterParseException()
collector_config = super().parse_result_table_config(result_table_config, result_table_storage)
collector_fields = array_group(
[field for field in collector_config["fields"] if not field["is_built_in"]], "field_name", 1
)
fields = []
for index, key in enumerate(result_table_config["option"]["separator_field_list"]):
if key in collector_fields:
field_info = collector_fields[key]
field_info["field_index"] = index + 1
fields.append(field_info)
elif key == ETL_DELIMITER_DELETE:
field_info = copy.deepcopy(FIELD_TEMPLATE)
field_info["field_index"] = index + 1
fields.append(field_info)
# 加上内置字段
fields += [field for field in collector_config["fields"] if field["is_built_in"]]
collector_config["fields"] = fields
return collector_config | 0.274935 | 0.151467 |
import asyncio
import pytest
import time
from asynctest import MagicMock
from aiohttp import ClientConnectionError
from kin_base.keypair import Keypair
from kin_base.operation import *
from kin_base.horizon import Horizon
from kin_base.exceptions import HorizonRequestError
from kin_base.transaction import Transaction
from kin_base.transaction_envelope import TransactionEnvelope as Te
async def make_envelope(network, horizon, address, seed, *args, **kwargs):
opts = {
'sequence': int((await horizon.account(address))['sequence']) + 1,
'fee': 100 * len(args)
}
for opt, value in kwargs.items():
opts[opt] = value
tx = Transaction(address, **opts)
for count, op in enumerate(args):
tx.add_operation(op)
envelope = Te(tx, network_id=network)
signer = Keypair.from_seed(seed)
envelope.sign(signer)
envelope_xdr = envelope.xdr()
return envelope_xdr
@pytest.mark.asyncio
async def test_submit(setup, helpers, aio_session):
kp = Keypair.random()
address = kp.address().decode()
seed = kp.seed()
await helpers.fund_account(setup, address, aio_session)
async with Horizon(setup.horizon_endpoint_uri) as horizon:
envelope_xdr = await make_envelope(setup.network, horizon, address, seed,
Payment(
destination=address,
asset=Asset.native(),
amount="0.1618"))
response = await horizon.submit(envelope_xdr.decode())
assert 'hash' in response
@pytest.mark.asyncio
async def test_sse(setup, helpers, aio_session):
kp = Keypair.random()
address = kp.address().decode()
events = []
async def sse_handler(events):
async with Horizon(setup.horizon_endpoint_uri) as horizon:
async for event in await horizon.account_transactions('GA3FLH3EVYHZUHTPQZU63JPX7ECJQL<KEY>',
sse=True):
events.append(event)
break
handler = asyncio.ensure_future(sse_handler(events))
await helpers.fund_account(setup, address, aio_session)
await asyncio.sleep(5)
assert len(events) == 1
@pytest.mark.asyncio
async def test_sse_event_timeout(setup, helpers, aio_session):
kp = Keypair.random()
address = kp.address().decode()
events = []
async def sse_handler(events):
async with Horizon(setup.horizon_endpoint_uri) as horizon:
async for event in await horizon.account_transactions(
'<KEY>',
sse=True, sse_timeout=15):
events.append(event)
handler = asyncio.ensure_future(sse_handler(events))
await helpers.fund_account(setup, address, aio_session)
await asyncio.sleep(5)
assert len(events) == 1
await asyncio.sleep(20)
# Make sure that the sse generator raised timeout error
with pytest.raises(asyncio.TimeoutError):
raise handler.exception()
@pytest.mark.asyncio
async def test_horizon_retry(setup):
async with Horizon(setup.horizon_endpoint_uri) as horizon:
horizon._session.get = MagicMock(side_effect=ClientConnectionError)
horizon.num_retries = 3
expected_time = 1.5 # 0 + 0.5 + 1
start = time.time()
with pytest.raises(HorizonRequestError):
await horizon.account('<KEY>')
elapsed = time.time() - start
assert horizon._session.get.call_count == horizon.num_retries
assert elapsed >= expected_time
@pytest.mark.asyncio
async def test_horizon_retry_successes(setup):
class MockedGet:
def __init__(self, return_value):
self.return_value = return_value
async def __aenter__(self):
return self.return_value
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async with Horizon(setup.horizon_endpoint_uri) as horizon:
real_resp = await horizon._session.get(setup.horizon_endpoint_uri + "/accounts/<KEY>")
horizon._session.get = MagicMock(side_effect=[ClientConnectionError, MockedGet(real_resp)])
horizon.num_retries = 3
res = await horizon.account('<KEY>')
assert horizon._session.get.call_count == 2
assert res | tests/test_horizon.py | import asyncio
import pytest
import time
from asynctest import MagicMock
from aiohttp import ClientConnectionError
from kin_base.keypair import Keypair
from kin_base.operation import *
from kin_base.horizon import Horizon
from kin_base.exceptions import HorizonRequestError
from kin_base.transaction import Transaction
from kin_base.transaction_envelope import TransactionEnvelope as Te
async def make_envelope(network, horizon, address, seed, *args, **kwargs):
opts = {
'sequence': int((await horizon.account(address))['sequence']) + 1,
'fee': 100 * len(args)
}
for opt, value in kwargs.items():
opts[opt] = value
tx = Transaction(address, **opts)
for count, op in enumerate(args):
tx.add_operation(op)
envelope = Te(tx, network_id=network)
signer = Keypair.from_seed(seed)
envelope.sign(signer)
envelope_xdr = envelope.xdr()
return envelope_xdr
@pytest.mark.asyncio
async def test_submit(setup, helpers, aio_session):
kp = Keypair.random()
address = kp.address().decode()
seed = kp.seed()
await helpers.fund_account(setup, address, aio_session)
async with Horizon(setup.horizon_endpoint_uri) as horizon:
envelope_xdr = await make_envelope(setup.network, horizon, address, seed,
Payment(
destination=address,
asset=Asset.native(),
amount="0.1618"))
response = await horizon.submit(envelope_xdr.decode())
assert 'hash' in response
@pytest.mark.asyncio
async def test_sse(setup, helpers, aio_session):
kp = Keypair.random()
address = kp.address().decode()
events = []
async def sse_handler(events):
async with Horizon(setup.horizon_endpoint_uri) as horizon:
async for event in await horizon.account_transactions('GA3FLH3EVYHZUHTPQZU63JPX7ECJQL<KEY>',
sse=True):
events.append(event)
break
handler = asyncio.ensure_future(sse_handler(events))
await helpers.fund_account(setup, address, aio_session)
await asyncio.sleep(5)
assert len(events) == 1
@pytest.mark.asyncio
async def test_sse_event_timeout(setup, helpers, aio_session):
kp = Keypair.random()
address = kp.address().decode()
events = []
async def sse_handler(events):
async with Horizon(setup.horizon_endpoint_uri) as horizon:
async for event in await horizon.account_transactions(
'<KEY>',
sse=True, sse_timeout=15):
events.append(event)
handler = asyncio.ensure_future(sse_handler(events))
await helpers.fund_account(setup, address, aio_session)
await asyncio.sleep(5)
assert len(events) == 1
await asyncio.sleep(20)
# Make sure that the sse generator raised timeout error
with pytest.raises(asyncio.TimeoutError):
raise handler.exception()
@pytest.mark.asyncio
async def test_horizon_retry(setup):
async with Horizon(setup.horizon_endpoint_uri) as horizon:
horizon._session.get = MagicMock(side_effect=ClientConnectionError)
horizon.num_retries = 3
expected_time = 1.5 # 0 + 0.5 + 1
start = time.time()
with pytest.raises(HorizonRequestError):
await horizon.account('<KEY>')
elapsed = time.time() - start
assert horizon._session.get.call_count == horizon.num_retries
assert elapsed >= expected_time
@pytest.mark.asyncio
async def test_horizon_retry_successes(setup):
class MockedGet:
def __init__(self, return_value):
self.return_value = return_value
async def __aenter__(self):
return self.return_value
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async with Horizon(setup.horizon_endpoint_uri) as horizon:
real_resp = await horizon._session.get(setup.horizon_endpoint_uri + "/accounts/<KEY>")
horizon._session.get = MagicMock(side_effect=[ClientConnectionError, MockedGet(real_resp)])
horizon.num_retries = 3
res = await horizon.account('<KEY>')
assert horizon._session.get.call_count == 2
assert res | 0.651909 | 0.296463 |
from hachoir_parser import Parser
from hachoir_core.field import FieldSet, UInt8, UInt16, Enum, RawBytes
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.image.common import PaletteRGB
class Line(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["/width"].value * self["/bpp"].value
def createFields(self):
for x in xrange(self["/width"].value):
yield UInt8(self, "pixel[]")
class Pixels(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["/width"].value * self["/height"].value * self["/bpp"].value
def createFields(self):
if self["/options"].value == 0:
RANGE = xrange(self["/height"].value-1,-1,-1)
else:
RANGE = xrange(self["/height"].value)
for y in RANGE:
yield Line(self, "line[%u]" % y)
class TargaFile(Parser):
PARSER_TAGS = {
"id": "targa",
"category": "image",
"file_ext": ("tga",),
"mime": (u"image/targa", u"image/tga", u"image/x-tga"),
"min_size": 18*8,
"description": u"Truevision Targa Graphic (TGA)"
}
CODEC_NAME = {
1: u"8-bit uncompressed",
2: u"24-bit uncompressed",
9: u"8-bit RLE",
10: u"24-bit RLE",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["version"].value != 1:
return "Unknown version"
if self["codec"].value not in self.CODEC_NAME:
return "Unknown codec"
if self["x_min"].value != 0 or self["y_min"].value != 0:
return "(x_min, y_min) is not (0,0)"
if self["bpp"].value not in (8, 24):
return "Unknown bits/pixel value"
return True
def createFields(self):
yield UInt8(self, "hdr_size", "Header size in bytes")
yield UInt8(self, "version", "Targa version (always one)")
yield Enum(UInt8(self, "codec", "Pixels encoding"), self.CODEC_NAME)
yield UInt16(self, "palette_ofs", "Palette absolute file offset")
yield UInt16(self, "nb_color", "Number of color")
yield UInt8(self, "color_map_size", "Color map entry size")
yield UInt16(self, "x_min")
yield UInt16(self, "y_min")
yield UInt16(self, "width")
yield UInt16(self, "height")
yield UInt8(self, "bpp", "Bits per pixel")
yield UInt8(self, "options", "Options (0: vertical mirror)")
if self["bpp"].value == 8:
yield PaletteRGB(self, "palette", 256)
if self["codec"].value == 1:
yield Pixels(self, "pixels")
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "raw_pixels", size) | .modules/.metagoofil/hachoir_parser/image/tga.py | from hachoir_parser import Parser
from hachoir_core.field import FieldSet, UInt8, UInt16, Enum, RawBytes
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.image.common import PaletteRGB
class Line(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["/width"].value * self["/bpp"].value
def createFields(self):
for x in xrange(self["/width"].value):
yield UInt8(self, "pixel[]")
class Pixels(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = self["/width"].value * self["/height"].value * self["/bpp"].value
def createFields(self):
if self["/options"].value == 0:
RANGE = xrange(self["/height"].value-1,-1,-1)
else:
RANGE = xrange(self["/height"].value)
for y in RANGE:
yield Line(self, "line[%u]" % y)
class TargaFile(Parser):
PARSER_TAGS = {
"id": "targa",
"category": "image",
"file_ext": ("tga",),
"mime": (u"image/targa", u"image/tga", u"image/x-tga"),
"min_size": 18*8,
"description": u"Truevision Targa Graphic (TGA)"
}
CODEC_NAME = {
1: u"8-bit uncompressed",
2: u"24-bit uncompressed",
9: u"8-bit RLE",
10: u"24-bit RLE",
}
endian = LITTLE_ENDIAN
def validate(self):
if self["version"].value != 1:
return "Unknown version"
if self["codec"].value not in self.CODEC_NAME:
return "Unknown codec"
if self["x_min"].value != 0 or self["y_min"].value != 0:
return "(x_min, y_min) is not (0,0)"
if self["bpp"].value not in (8, 24):
return "Unknown bits/pixel value"
return True
def createFields(self):
yield UInt8(self, "hdr_size", "Header size in bytes")
yield UInt8(self, "version", "Targa version (always one)")
yield Enum(UInt8(self, "codec", "Pixels encoding"), self.CODEC_NAME)
yield UInt16(self, "palette_ofs", "Palette absolute file offset")
yield UInt16(self, "nb_color", "Number of color")
yield UInt8(self, "color_map_size", "Color map entry size")
yield UInt16(self, "x_min")
yield UInt16(self, "y_min")
yield UInt16(self, "width")
yield UInt16(self, "height")
yield UInt8(self, "bpp", "Bits per pixel")
yield UInt8(self, "options", "Options (0: vertical mirror)")
if self["bpp"].value == 8:
yield PaletteRGB(self, "palette", 256)
if self["codec"].value == 1:
yield Pixels(self, "pixels")
else:
size = (self.size - self.current_size) // 8
if size:
yield RawBytes(self, "raw_pixels", size) | 0.605566 | 0.15444 |
CDED = [
{"name":"fileName", "length":40, "description":None},
{"name":"responsabilityCenter", "length":60, "description":None},
{"name":"filler", "length":9, "description":None},
{"name":"seGeographicCorner", "length":26, "description":None},
{"name":"softwareUsed", "length":1, "description":[None, None, None, None, None, None, None, None, "ANUDEM", "FME for Linux"]},
{"name":"filler", "length":1, "description":None},
{"name":"sectionalIndicator", "length":3, "description":None},
{"name":"originCode", "length":4, "description":None},
{"name":"demLevelCode", "length":6, "description":None},
{"name":"elevationPattern", "length":6, "description":[None, 'Regular', 'Random']},
{"name":"planimetricReferenceSystem", "length":6, "description":['Geographic', 'UTM', 'State Plane']},
{"name":"zoneReferenceSystem", "length":6, "description":None},
{"name":"mapProjectionParameters", "length":360,"description":None, "ignore":True},
{"name":"uomGround", "length":6, "description":['Radians', 'Meters', 'Feet', 'Arc Seconds']},
{"name":"uomElevation", "length":6, "description":[None, 'Feet', 'Meters']},
{"name":"quadrangleSideCount", "length":6, "description":None},
{"name":"quadrangleCoordinatesArray", "length":192,"description":None},
{"name":"minMaxElevations", "length":48, "description":None},
{"name":"counterclockwiseAngle", "length":24, "description":None},
{"name":"elevationAccuracy", "length":6, "description":['Accuracy Unknown', 'Accuracy given in Record C']},
{"name":"xyzResolution", "length":36, "description":None},
{"name":"collumnCount", "length":6, "description":None},
{"name":"rowCount", "length":6, "description":None},
{"name":"largestPrimaryContourInterval", "length":5, "description":None},
{"name":"counterIntervalUnits", "length":1, "description":['N/A', 'Feet', 'Meters']},
{"name":"smallestPrimaryContourInterval", "length":5, "description":None},
{"name":"counterIntervalUnits", "length":1, "description":['N/A', 'Feet', 'Meters']},
{"name":"dataSourceDate", "length":4, "description":None},
{"name":"dataRevisionDate", "length":4, "description":None},
{"name":"inspectionFlag", "length":1, "description":None},
{"name":"validationFlag", "length":1, "description":['Not Validated', 'Validated', 'Validated', 'Validated', 'Validated', 'Validated']},
{"name":"suspectVoidAreasFlag", "length":2, "description":['No Suspect/Void Areas', 'Suspect Areas', 'Void Areas', 'Both Suspect and Void Areas']},
{"name":"verticalDatum", "length":2, "description":[None, 'Local Mean Sea Level', 'NGVD 29', 'NAVD 88']},
{"name":"horizontalDatum", "length":2, "description":[None, 'North American Datum 1927', 'World Geodetic System 1972', 'WGS 84', 'NAD 83', 'Old Hawaii Datum', 'Puerto Rico Datum']},
{"name":"dataEdition", "length":4, "description":None},
{"name":"percentVoid", "length":4, "description":None},
{"name":"edgeMatchFlag", "length":8, "description":None},
{"name":"verticalDatumShift", "length":7, "description":None},
] | cdedtools/translationtables.py | CDED = [
{"name":"fileName", "length":40, "description":None},
{"name":"responsabilityCenter", "length":60, "description":None},
{"name":"filler", "length":9, "description":None},
{"name":"seGeographicCorner", "length":26, "description":None},
{"name":"softwareUsed", "length":1, "description":[None, None, None, None, None, None, None, None, "ANUDEM", "FME for Linux"]},
{"name":"filler", "length":1, "description":None},
{"name":"sectionalIndicator", "length":3, "description":None},
{"name":"originCode", "length":4, "description":None},
{"name":"demLevelCode", "length":6, "description":None},
{"name":"elevationPattern", "length":6, "description":[None, 'Regular', 'Random']},
{"name":"planimetricReferenceSystem", "length":6, "description":['Geographic', 'UTM', 'State Plane']},
{"name":"zoneReferenceSystem", "length":6, "description":None},
{"name":"mapProjectionParameters", "length":360,"description":None, "ignore":True},
{"name":"uomGround", "length":6, "description":['Radians', 'Meters', 'Feet', 'Arc Seconds']},
{"name":"uomElevation", "length":6, "description":[None, 'Feet', 'Meters']},
{"name":"quadrangleSideCount", "length":6, "description":None},
{"name":"quadrangleCoordinatesArray", "length":192,"description":None},
{"name":"minMaxElevations", "length":48, "description":None},
{"name":"counterclockwiseAngle", "length":24, "description":None},
{"name":"elevationAccuracy", "length":6, "description":['Accuracy Unknown', 'Accuracy given in Record C']},
{"name":"xyzResolution", "length":36, "description":None},
{"name":"collumnCount", "length":6, "description":None},
{"name":"rowCount", "length":6, "description":None},
{"name":"largestPrimaryContourInterval", "length":5, "description":None},
{"name":"counterIntervalUnits", "length":1, "description":['N/A', 'Feet', 'Meters']},
{"name":"smallestPrimaryContourInterval", "length":5, "description":None},
{"name":"counterIntervalUnits", "length":1, "description":['N/A', 'Feet', 'Meters']},
{"name":"dataSourceDate", "length":4, "description":None},
{"name":"dataRevisionDate", "length":4, "description":None},
{"name":"inspectionFlag", "length":1, "description":None},
{"name":"validationFlag", "length":1, "description":['Not Validated', 'Validated', 'Validated', 'Validated', 'Validated', 'Validated']},
{"name":"suspectVoidAreasFlag", "length":2, "description":['No Suspect/Void Areas', 'Suspect Areas', 'Void Areas', 'Both Suspect and Void Areas']},
{"name":"verticalDatum", "length":2, "description":[None, 'Local Mean Sea Level', 'NGVD 29', 'NAVD 88']},
{"name":"horizontalDatum", "length":2, "description":[None, 'North American Datum 1927', 'World Geodetic System 1972', 'WGS 84', 'NAD 83', 'Old Hawaii Datum', 'Puerto Rico Datum']},
{"name":"dataEdition", "length":4, "description":None},
{"name":"percentVoid", "length":4, "description":None},
{"name":"edgeMatchFlag", "length":8, "description":None},
{"name":"verticalDatumShift", "length":7, "description":None},
] | 0.53437 | 0.473779 |
""" Tests of DifferenceBetweenAdjacentGridSquares plugin."""
import unittest
import iris
import numpy as np
from iris.coords import CellMethod
from iris.cube import Cube
from iris.tests import IrisTest
from numpy import ma
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
from improver.utilities.spatial import DifferenceBetweenAdjacentGridSquares
class Test_create_difference_cube(IrisTest):
"""Test the create_difference_cube method."""
def setUp(self):
"""Set up cube."""
data = np.array([[1, 2, 3], [2, 4, 6], [5, 10, 15]])
self.diff_in_y_array = np.array([[1, 2, 3], [3, 6, 9]])
self.cube = set_up_variable_cube(data, "wind_speed", "m s-1", "equalarea",)
self.plugin = DifferenceBetweenAdjacentGridSquares()
def test_y_dimension(self):
"""Test differences calculated along the y dimension."""
points = self.cube.coord(axis="y").points
expected_y = (points[1:] + points[:-1]) / 2
result = self.plugin.create_difference_cube(
self.cube, "projection_y_coordinate", self.diff_in_y_array
)
self.assertIsInstance(result, Cube)
self.assertArrayAlmostEqual(result.coord(axis="y").points, expected_y)
self.assertArrayEqual(result.data, self.diff_in_y_array)
def test_x_dimension(self):
"""Test differences calculated along the x dimension."""
diff_array = np.array([[1, 1], [2, 2], [5, 5]])
points = self.cube.coord(axis="x").points
expected_x = (points[1:] + points[:-1]) / 2
result = self.plugin.create_difference_cube(
self.cube, "projection_x_coordinate", diff_array
)
self.assertIsInstance(result, Cube)
self.assertArrayAlmostEqual(result.coord(axis="x").points, expected_x)
self.assertArrayEqual(result.data, diff_array)
def test_othercoords(self):
"""Test that other coords are transferred properly"""
time_coord = self.cube.coord("time")
proj_x_coord = self.cube.coord(axis="x")
result = self.plugin.create_difference_cube(
self.cube, "projection_y_coordinate", self.diff_in_y_array
)
self.assertEqual(result.coord(axis="x"), proj_x_coord)
self.assertEqual(result.coord("time"), time_coord)
class Test_calculate_difference(IrisTest):
"""Test the calculate_difference method."""
def setUp(self):
"""Set up cube."""
data = np.array([[1, 2, 3], [2, 4, 6], [5, 10, 15]])
self.cube = set_up_variable_cube(data, "wind_speed", "m s-1", "equalarea",)
self.plugin = DifferenceBetweenAdjacentGridSquares()
def test_x_dimension(self):
"""Test differences calculated along the x dimension."""
expected = np.array([[1, 1], [2, 2], [5, 5]])
result = self.plugin.calculate_difference(
self.cube, self.cube.coord(axis="x").name()
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayEqual(result, expected)
def test_y_dimension(self):
"""Test differences calculated along the y dimension."""
expected = np.array([[1, 2, 3], [3, 6, 9]])
result = self.plugin.calculate_difference(
self.cube, self.cube.coord(axis="y").name()
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayEqual(result, expected)
def test_missing_data(self):
"""Test that the result is as expected when data is missing."""
data = np.array([[1, 2, 3], [np.nan, 4, 6], [5, 10, 15]], dtype=np.float32)
self.cube.data = data
expected = np.array([[np.nan, 2, 3], [np.nan, 6, 9]])
result = self.plugin.calculate_difference(
self.cube, self.cube.coord(axis="y").name()
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayAlmostEqual(result, expected)
def test_masked_data(self):
"""Test that the result is as expected when data is masked."""
data = ma.array(
[[1, 2, 3], [2, 4, 6], [5, 10, 15]], mask=[[0, 0, 0], [1, 0, 0], [0, 0, 0]]
)
self.cube.data = data
expected = ma.array([[1, 2, 3], [3, 6, 9]], mask=[[1, 0, 0], [1, 0, 0]])
result = self.plugin.calculate_difference(
self.cube, self.cube.coord(axis="y").name()
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayEqual(result, expected)
self.assertArrayEqual(result.mask, expected.mask)
class Test_process(IrisTest):
"""Test the process method."""
def setUp(self):
"""Set up cube."""
data = np.array([[1, 2, 3], [2, 4, 6], [5, 10, 15]])
self.cube = set_up_variable_cube(
data, "wind_speed", "m s-1", "equalarea", realizations=np.array([1, 2]),
)
self.plugin = DifferenceBetweenAdjacentGridSquares()
def test_basic(self):
"""Test that differences are calculated along both the x and
y dimensions and returned as separate cubes."""
expected_x = np.array([[1, 1], [2, 2], [5, 5]])
expected_y = np.array([[1, 2, 3], [3, 6, 9]])
result = self.plugin.process(self.cube)
self.assertIsInstance(result[0], Cube)
self.assertArrayEqual(result[0].data, expected_x)
self.assertIsInstance(result[1], Cube)
self.assertArrayEqual(result[1].data, expected_y)
def test_metadata(self):
"""Test the resulting metadata is correct."""
cell_method_x = CellMethod(
"difference", coords=["projection_x_coordinate"], intervals="1 grid length"
)
cell_method_y = CellMethod(
"difference", coords=["projection_y_coordinate"], intervals="1 grid length"
)
result = self.plugin.process(self.cube)
for cube, cm in zip(result, [cell_method_x, cell_method_y]):
self.assertEqual(cube.cell_methods[0], cm)
self.assertEqual(
cube.attributes["form_of_difference"], "forward_difference"
)
self.assertEqual(cube.name(), "difference_of_wind_speed")
def test_3d_cube(self):
"""Test the differences are calculated along both the x and
y dimensions and returned as separate cubes when a 3d cube is input."""
data = np.array(
[[[1, 2, 3], [2, 4, 6], [5, 10, 15]], [[1, 2, 3], [2, 2, 6], [5, 10, 20]]]
)
expected_x = np.array([[[1, 1], [2, 2], [5, 5]], [[1, 1], [0, 4], [5, 10]]])
expected_y = np.array([[[1, 2, 3], [3, 6, 9]], [[1, 0, 3], [3, 8, 14]]])
cube = set_up_variable_cube(
data, "wind_speed", "m s-1", "equalarea", realizations=np.array([1, 2]),
)
result = self.plugin.process(cube)
self.assertIsInstance(result[0], iris.cube.Cube)
self.assertArrayEqual(result[0].data, expected_x)
self.assertIsInstance(result[1], iris.cube.Cube)
self.assertArrayEqual(result[1].data, expected_y)
if __name__ == "__main__":
unittest.main() | improver_tests/utilities/test_DifferenceBetweenAdjacentGridSquares.py | """ Tests of DifferenceBetweenAdjacentGridSquares plugin."""
import unittest
import iris
import numpy as np
from iris.coords import CellMethod
from iris.cube import Cube
from iris.tests import IrisTest
from numpy import ma
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
from improver.utilities.spatial import DifferenceBetweenAdjacentGridSquares
class Test_create_difference_cube(IrisTest):
"""Test the create_difference_cube method."""
def setUp(self):
"""Set up cube."""
data = np.array([[1, 2, 3], [2, 4, 6], [5, 10, 15]])
self.diff_in_y_array = np.array([[1, 2, 3], [3, 6, 9]])
self.cube = set_up_variable_cube(data, "wind_speed", "m s-1", "equalarea",)
self.plugin = DifferenceBetweenAdjacentGridSquares()
def test_y_dimension(self):
"""Test differences calculated along the y dimension."""
points = self.cube.coord(axis="y").points
expected_y = (points[1:] + points[:-1]) / 2
result = self.plugin.create_difference_cube(
self.cube, "projection_y_coordinate", self.diff_in_y_array
)
self.assertIsInstance(result, Cube)
self.assertArrayAlmostEqual(result.coord(axis="y").points, expected_y)
self.assertArrayEqual(result.data, self.diff_in_y_array)
def test_x_dimension(self):
"""Test differences calculated along the x dimension."""
diff_array = np.array([[1, 1], [2, 2], [5, 5]])
points = self.cube.coord(axis="x").points
expected_x = (points[1:] + points[:-1]) / 2
result = self.plugin.create_difference_cube(
self.cube, "projection_x_coordinate", diff_array
)
self.assertIsInstance(result, Cube)
self.assertArrayAlmostEqual(result.coord(axis="x").points, expected_x)
self.assertArrayEqual(result.data, diff_array)
def test_othercoords(self):
"""Test that other coords are transferred properly"""
time_coord = self.cube.coord("time")
proj_x_coord = self.cube.coord(axis="x")
result = self.plugin.create_difference_cube(
self.cube, "projection_y_coordinate", self.diff_in_y_array
)
self.assertEqual(result.coord(axis="x"), proj_x_coord)
self.assertEqual(result.coord("time"), time_coord)
class Test_calculate_difference(IrisTest):
"""Test the calculate_difference method."""
def setUp(self):
"""Set up cube."""
data = np.array([[1, 2, 3], [2, 4, 6], [5, 10, 15]])
self.cube = set_up_variable_cube(data, "wind_speed", "m s-1", "equalarea",)
self.plugin = DifferenceBetweenAdjacentGridSquares()
def test_x_dimension(self):
"""Test differences calculated along the x dimension."""
expected = np.array([[1, 1], [2, 2], [5, 5]])
result = self.plugin.calculate_difference(
self.cube, self.cube.coord(axis="x").name()
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayEqual(result, expected)
def test_y_dimension(self):
"""Test differences calculated along the y dimension."""
expected = np.array([[1, 2, 3], [3, 6, 9]])
result = self.plugin.calculate_difference(
self.cube, self.cube.coord(axis="y").name()
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayEqual(result, expected)
def test_missing_data(self):
"""Test that the result is as expected when data is missing."""
data = np.array([[1, 2, 3], [np.nan, 4, 6], [5, 10, 15]], dtype=np.float32)
self.cube.data = data
expected = np.array([[np.nan, 2, 3], [np.nan, 6, 9]])
result = self.plugin.calculate_difference(
self.cube, self.cube.coord(axis="y").name()
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayAlmostEqual(result, expected)
def test_masked_data(self):
"""Test that the result is as expected when data is masked."""
data = ma.array(
[[1, 2, 3], [2, 4, 6], [5, 10, 15]], mask=[[0, 0, 0], [1, 0, 0], [0, 0, 0]]
)
self.cube.data = data
expected = ma.array([[1, 2, 3], [3, 6, 9]], mask=[[1, 0, 0], [1, 0, 0]])
result = self.plugin.calculate_difference(
self.cube, self.cube.coord(axis="y").name()
)
self.assertIsInstance(result, np.ndarray)
self.assertArrayEqual(result, expected)
self.assertArrayEqual(result.mask, expected.mask)
class Test_process(IrisTest):
"""Test the process method."""
def setUp(self):
"""Set up cube."""
data = np.array([[1, 2, 3], [2, 4, 6], [5, 10, 15]])
self.cube = set_up_variable_cube(
data, "wind_speed", "m s-1", "equalarea", realizations=np.array([1, 2]),
)
self.plugin = DifferenceBetweenAdjacentGridSquares()
def test_basic(self):
"""Test that differences are calculated along both the x and
y dimensions and returned as separate cubes."""
expected_x = np.array([[1, 1], [2, 2], [5, 5]])
expected_y = np.array([[1, 2, 3], [3, 6, 9]])
result = self.plugin.process(self.cube)
self.assertIsInstance(result[0], Cube)
self.assertArrayEqual(result[0].data, expected_x)
self.assertIsInstance(result[1], Cube)
self.assertArrayEqual(result[1].data, expected_y)
def test_metadata(self):
"""Test the resulting metadata is correct."""
cell_method_x = CellMethod(
"difference", coords=["projection_x_coordinate"], intervals="1 grid length"
)
cell_method_y = CellMethod(
"difference", coords=["projection_y_coordinate"], intervals="1 grid length"
)
result = self.plugin.process(self.cube)
for cube, cm in zip(result, [cell_method_x, cell_method_y]):
self.assertEqual(cube.cell_methods[0], cm)
self.assertEqual(
cube.attributes["form_of_difference"], "forward_difference"
)
self.assertEqual(cube.name(), "difference_of_wind_speed")
def test_3d_cube(self):
"""Test the differences are calculated along both the x and
y dimensions and returned as separate cubes when a 3d cube is input."""
data = np.array(
[[[1, 2, 3], [2, 4, 6], [5, 10, 15]], [[1, 2, 3], [2, 2, 6], [5, 10, 20]]]
)
expected_x = np.array([[[1, 1], [2, 2], [5, 5]], [[1, 1], [0, 4], [5, 10]]])
expected_y = np.array([[[1, 2, 3], [3, 6, 9]], [[1, 0, 3], [3, 8, 14]]])
cube = set_up_variable_cube(
data, "wind_speed", "m s-1", "equalarea", realizations=np.array([1, 2]),
)
result = self.plugin.process(cube)
self.assertIsInstance(result[0], iris.cube.Cube)
self.assertArrayEqual(result[0].data, expected_x)
self.assertIsInstance(result[1], iris.cube.Cube)
self.assertArrayEqual(result[1].data, expected_y)
if __name__ == "__main__":
unittest.main() | 0.88382 | 0.826572 |
from telegram import Update, ForceReply, ReplyKeyboardMarkup
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, ConversationHandler
import logging
from RoutePlanner import RoutePlanner
from WeatherForecaster import WeatherForecaster
from RouteWeatherEvaluator import RouteWeatherEvaluator
from pprint import pprint
START_LOCATION, TARGET_LOCATION = range(2)
logger = logging.getLogger(__name__)
class TelegramBot:
def __init__(self, telegram_token, openrouteservice_apikey, openweathermap_apikey):
self.routeplanner = RoutePlanner(apikey=openrouteservice_apikey)
self.weatherforecaster = WeatherForecaster(apikey=openweathermap_apikey)
self.routeweatherevaluator = RouteWeatherEvaluator()
self.start_location_coords = None
self.target_location_coords = None
# todo: scores = routeweatherevaluator.calculate_route_scores(waypoints, self.weatherforecaster)
self.updater = Updater(telegram_token)
self.dispatcher = self.updater.dispatcher
self.dispatcher.add_handler(CommandHandler("start", self.start_handler))
self.dispatcher.add_handler(CommandHandler("help", self.help_handler))
# todo: folium map https://github.com/GIScience/openrouteservice-py/blob/master/examples/basic_example.ipynb
# todo: weather conversation
routeweather_conversation_handler = ConversationHandler(
entry_points=[
CommandHandler('weather', self.routeweather_conversation_start_handler),
CommandHandler('wetter', self.routeweather_conversation_start_handler),
],
states={
START_LOCATION: [
MessageHandler(Filters.location, self.routeweather_conversation_startloc_geo_handler),
MessageHandler(Filters.text & ~Filters.command, self.routeweather_conversation_startloc_str_handler),
],
TARGET_LOCATION: [
MessageHandler(Filters.location, self.routeweather_conversation_targetloc_geo_handler),
MessageHandler(Filters.text & ~Filters.command, self.routeweather_conversation_targetloc_str_handler),
]
},
fallbacks=[CommandHandler('cancel', self.cancel)],
)
# todo: weather chat (/weather -> start -> end -> antwort/bild)
self.dispatcher.add_handler(routeweather_conversation_handler)
self.dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, self.message_handler))
def run_bot(self):
self.updater.start_polling()
self.updater.idle()
def start_handler(self, update: Update, context: CallbackContext):
"""Answer to /start"""
update.message.reply_text(
'Hi! My name is DroptopWeather Bot. I will tell you the best time to start your drive. '
'Send /weather to start my calculations.'
)
def help_handler(self, update: Update, context: CallbackContext):
"""Answer to /help"""
update.message.reply_text(
"Hi, I'm DroptopWeather Bot. I will tell you the best time to start your drive."
"Send /weather to start the conversation."
)
def routeweather_conversation_start_handler(self, update: Update, context: CallbackContext):
"""Answer to /start"""
update.message.reply_text(
"I will tell you the best time to start your drive. "
"Send /cancel to stop talking to me.\n\n"
"Where do you want do start your drive?"
)
return START_LOCATION
def routeweather_conversation_startloc_geo_handler(self, update: Update, context: CallbackContext):
location = update.message.location
logger.info("User sent the start location %f, %f", location.latitude, location.longitude)
self.start_location_coords = location.longitude, location.latitude
update.message.reply_text("Selected this location as starting point.")
update.message.reply_text("What is the target location of your drive?")
return TARGET_LOCATION
def routeweather_conversation_startloc_str_handler(self, update: Update, context: CallbackContext):
message_text = update.message.text
logger.info("User sent the start message %s", message_text)
geocode = self.routeplanner.location_to_coords(message_text)
geocode_label = geocode["label"]
self.start_location_coords = geocode["coords"]
if self.start_location_coords is not None:
update.message.reply_text(
"Selected \"{}\" as starting point.".format(geocode_label)
)
update.message.reply_text("What is the target location of your drive?")
return TARGET_LOCATION
else:
update.message.reply_text("Could not find a corresponding location. Please try another search term or try to send a geo location.")
return START_LOCATION
def routeweather_conversation_targetloc_geo_handler(self, update: Update, context: CallbackContext):
location = update.message.location
logger.info("User sent the target location %f, %f", location.latitude, location.longitude)
self.target_location_coords = location.longitude, location.latitude
update.message.reply_text("Selected this location as target.")
self.send_routeweather(update, context)
return ConversationHandler.END
def routeweather_conversation_targetloc_str_handler(self, update: Update, context: CallbackContext):
message_text = update.message.text
logger.info("User sent the target message %s", message_text)
geocode = self.routeplanner.location_to_coords(message_text)
geocode_label = geocode["label"]
self.target_location_coords = geocode["coords"]
if self.target_location_coords is not None:
update.message.reply_text(
"Selected \"{}\" as target.".format(geocode_label)
)
self.send_routeweather(update, context)
return ConversationHandler.END
else:
update.message.reply_text("Could not find a corresponding location. Please try another search term or try to send a geo location.")
return TARGET_LOCATION
def send_routeweather(self, update: Update, context: CallbackContext):
print("Send Routeweather")
print(self.start_location_coords)
print(self.target_location_coords)
distance_between_waypoints_m = 10000
number_of_waypoints = 5
route = self.routeplanner.calculate_route(start_coords=self.start_location_coords, end_coords=self.target_location_coords)
waypoints = self.routeplanner.waypoints = self.routeplanner.calculate_waypoints(route, distance=distance_between_waypoints_m, max_number=number_of_waypoints)
scores = self.routeweatherevaluator.calculate_route_scores(waypoints, self.weatherforecaster)
pprint(scores)
update.message.reply_text("Here are the Results.")
good_news = []
for starttime, waypoint_scores in scores.items():
avg_waypoint_score = sum(waypoint_scores)/len(waypoint_scores)
if avg_waypoint_score > 0:
good_news.append(starttime.strftime("%d.%m.%Y %H:%M Uhr") + " " + str(round(avg_waypoint_score, 1)))
if len(good_news):
for good_news_msg in good_news:
update.message.reply_text(good_news_msg)
else:
update.message.reply_text("No good timeframe found in the next 48 hours.")
def cancel(self, update: Update, context: CallbackContext):
"""Cancels the conversation."""
self.start_location_coords = None
self.target_location_coords = None
logger.info("conversation cancelled")
update.message.reply_text(
'Goodbye.'
)
return ConversationHandler.END
def message_handler(self, update: Update, context: CallbackContext):
"""Answer to every other message from user"""
update.message.reply_text("I did not understand. Please try again. \n\nYou can use the commad \"/help\" to learn how to use the bot.") | src/Bot.py | from telegram import Update, ForceReply, ReplyKeyboardMarkup
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext, ConversationHandler
import logging
from RoutePlanner import RoutePlanner
from WeatherForecaster import WeatherForecaster
from RouteWeatherEvaluator import RouteWeatherEvaluator
from pprint import pprint
START_LOCATION, TARGET_LOCATION = range(2)
logger = logging.getLogger(__name__)
class TelegramBot:
def __init__(self, telegram_token, openrouteservice_apikey, openweathermap_apikey):
self.routeplanner = RoutePlanner(apikey=openrouteservice_apikey)
self.weatherforecaster = WeatherForecaster(apikey=openweathermap_apikey)
self.routeweatherevaluator = RouteWeatherEvaluator()
self.start_location_coords = None
self.target_location_coords = None
# todo: scores = routeweatherevaluator.calculate_route_scores(waypoints, self.weatherforecaster)
self.updater = Updater(telegram_token)
self.dispatcher = self.updater.dispatcher
self.dispatcher.add_handler(CommandHandler("start", self.start_handler))
self.dispatcher.add_handler(CommandHandler("help", self.help_handler))
# todo: folium map https://github.com/GIScience/openrouteservice-py/blob/master/examples/basic_example.ipynb
# todo: weather conversation
routeweather_conversation_handler = ConversationHandler(
entry_points=[
CommandHandler('weather', self.routeweather_conversation_start_handler),
CommandHandler('wetter', self.routeweather_conversation_start_handler),
],
states={
START_LOCATION: [
MessageHandler(Filters.location, self.routeweather_conversation_startloc_geo_handler),
MessageHandler(Filters.text & ~Filters.command, self.routeweather_conversation_startloc_str_handler),
],
TARGET_LOCATION: [
MessageHandler(Filters.location, self.routeweather_conversation_targetloc_geo_handler),
MessageHandler(Filters.text & ~Filters.command, self.routeweather_conversation_targetloc_str_handler),
]
},
fallbacks=[CommandHandler('cancel', self.cancel)],
)
# todo: weather chat (/weather -> start -> end -> antwort/bild)
self.dispatcher.add_handler(routeweather_conversation_handler)
self.dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, self.message_handler))
def run_bot(self):
self.updater.start_polling()
self.updater.idle()
def start_handler(self, update: Update, context: CallbackContext):
"""Answer to /start"""
update.message.reply_text(
'Hi! My name is DroptopWeather Bot. I will tell you the best time to start your drive. '
'Send /weather to start my calculations.'
)
def help_handler(self, update: Update, context: CallbackContext):
"""Answer to /help"""
update.message.reply_text(
"Hi, I'm DroptopWeather Bot. I will tell you the best time to start your drive."
"Send /weather to start the conversation."
)
def routeweather_conversation_start_handler(self, update: Update, context: CallbackContext):
"""Answer to /start"""
update.message.reply_text(
"I will tell you the best time to start your drive. "
"Send /cancel to stop talking to me.\n\n"
"Where do you want do start your drive?"
)
return START_LOCATION
def routeweather_conversation_startloc_geo_handler(self, update: Update, context: CallbackContext):
location = update.message.location
logger.info("User sent the start location %f, %f", location.latitude, location.longitude)
self.start_location_coords = location.longitude, location.latitude
update.message.reply_text("Selected this location as starting point.")
update.message.reply_text("What is the target location of your drive?")
return TARGET_LOCATION
def routeweather_conversation_startloc_str_handler(self, update: Update, context: CallbackContext):
message_text = update.message.text
logger.info("User sent the start message %s", message_text)
geocode = self.routeplanner.location_to_coords(message_text)
geocode_label = geocode["label"]
self.start_location_coords = geocode["coords"]
if self.start_location_coords is not None:
update.message.reply_text(
"Selected \"{}\" as starting point.".format(geocode_label)
)
update.message.reply_text("What is the target location of your drive?")
return TARGET_LOCATION
else:
update.message.reply_text("Could not find a corresponding location. Please try another search term or try to send a geo location.")
return START_LOCATION
def routeweather_conversation_targetloc_geo_handler(self, update: Update, context: CallbackContext):
location = update.message.location
logger.info("User sent the target location %f, %f", location.latitude, location.longitude)
self.target_location_coords = location.longitude, location.latitude
update.message.reply_text("Selected this location as target.")
self.send_routeweather(update, context)
return ConversationHandler.END
def routeweather_conversation_targetloc_str_handler(self, update: Update, context: CallbackContext):
message_text = update.message.text
logger.info("User sent the target message %s", message_text)
geocode = self.routeplanner.location_to_coords(message_text)
geocode_label = geocode["label"]
self.target_location_coords = geocode["coords"]
if self.target_location_coords is not None:
update.message.reply_text(
"Selected \"{}\" as target.".format(geocode_label)
)
self.send_routeweather(update, context)
return ConversationHandler.END
else:
update.message.reply_text("Could not find a corresponding location. Please try another search term or try to send a geo location.")
return TARGET_LOCATION
def send_routeweather(self, update: Update, context: CallbackContext):
print("Send Routeweather")
print(self.start_location_coords)
print(self.target_location_coords)
distance_between_waypoints_m = 10000
number_of_waypoints = 5
route = self.routeplanner.calculate_route(start_coords=self.start_location_coords, end_coords=self.target_location_coords)
waypoints = self.routeplanner.waypoints = self.routeplanner.calculate_waypoints(route, distance=distance_between_waypoints_m, max_number=number_of_waypoints)
scores = self.routeweatherevaluator.calculate_route_scores(waypoints, self.weatherforecaster)
pprint(scores)
update.message.reply_text("Here are the Results.")
good_news = []
for starttime, waypoint_scores in scores.items():
avg_waypoint_score = sum(waypoint_scores)/len(waypoint_scores)
if avg_waypoint_score > 0:
good_news.append(starttime.strftime("%d.%m.%Y %H:%M Uhr") + " " + str(round(avg_waypoint_score, 1)))
if len(good_news):
for good_news_msg in good_news:
update.message.reply_text(good_news_msg)
else:
update.message.reply_text("No good timeframe found in the next 48 hours.")
def cancel(self, update: Update, context: CallbackContext):
"""Cancels the conversation."""
self.start_location_coords = None
self.target_location_coords = None
logger.info("conversation cancelled")
update.message.reply_text(
'Goodbye.'
)
return ConversationHandler.END
def message_handler(self, update: Update, context: CallbackContext):
"""Answer to every other message from user"""
update.message.reply_text("I did not understand. Please try again. \n\nYou can use the commad \"/help\" to learn how to use the bot.") | 0.436142 | 0.084191 |
from pandas import DataFrame
from .atr import atr
from ..overlap.hlc3 import hlc3
from ..overlap.sma import sma
from ..utils import get_offset, non_zero_range, verify_series
def aberration(high, low, close, length=None, atr_length=None, offset=None, **kwargs):
"""Indicator: Aberration (ABER)"""
# Validate arguments
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
length = int(length) if length and length > 0 else 5
atr_length = int(atr_length) if atr_length and atr_length > 0 else 15
offset = get_offset(offset)
# Calculate Result
atr_ = atr(high=high, low=low, close=close, length=atr_length)
jg = hlc3(high=high, low=low, close=close)
zg = sma(jg, length)
sg = zg + atr_
xg = zg - atr_
# Offset
if offset != 0:
zg = zg.shift(offset)
sg = sg.shift(offset)
xg = xg.shift(offset)
atr_ = atr_.shift(offset)
# Handle fills
if "fillna" in kwargs:
zg.fillna(kwargs["fillna"], inplace=True)
sg.fillna(kwargs["fillna"], inplace=True)
xg.fillna(kwargs["fillna"], inplace=True)
atr_.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
zg.fillna(method=kwargs["fill_method"], inplace=True)
sg.fillna(method=kwargs["fill_method"], inplace=True)
xg.fillna(method=kwargs["fill_method"], inplace=True)
atr_.fillna(method=kwargs["fill_method"], inplace=True)
# Name and Categorize it
_props = f"_{length}_{atr_length}"
zg.name = f"ABER_ZG{_props}"
sg.name = f"ABER_SG{_props}"
xg.name = f"ABER_XG{_props}"
atr_.name = f"ABER_ATR{_props}"
zg.category = sg.category = "volatility"
xg.category = atr_.category = zg.category
# Prepare DataFrame to return
data = {
zg.name: zg,
sg.name: sg,
xg.name: xg,
atr_.name: atr_
}
aberdf = DataFrame(data)
aberdf.name = f"ABER{_props}"
aberdf.category = zg.category
return aberdf
aberration.__doc__ = \
"""Aberration
A volatility indicator similar to Keltner Channels.
Sources:
Few internet resources on definitive definition.
Request by Github user homily, issue #46
Calculation:
Default Inputs:
length=5, atr_length=15
ATR = Average True Range
SMA = Simple Moving Average
ATR = ATR(length=atr_length)
JG = TP = HLC3(high, low, close)
ZG = SMA(JG, length)
SG = ZG + ATR
XG = ZG - ATR
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
length (int): The short period. Default: 5
atr_length (int): The short period. Default: 15
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: zg, sg, xg, atr columns.
""" | pandas_ta/volatility/aberration.py | from pandas import DataFrame
from .atr import atr
from ..overlap.hlc3 import hlc3
from ..overlap.sma import sma
from ..utils import get_offset, non_zero_range, verify_series
def aberration(high, low, close, length=None, atr_length=None, offset=None, **kwargs):
"""Indicator: Aberration (ABER)"""
# Validate arguments
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
length = int(length) if length and length > 0 else 5
atr_length = int(atr_length) if atr_length and atr_length > 0 else 15
offset = get_offset(offset)
# Calculate Result
atr_ = atr(high=high, low=low, close=close, length=atr_length)
jg = hlc3(high=high, low=low, close=close)
zg = sma(jg, length)
sg = zg + atr_
xg = zg - atr_
# Offset
if offset != 0:
zg = zg.shift(offset)
sg = sg.shift(offset)
xg = xg.shift(offset)
atr_ = atr_.shift(offset)
# Handle fills
if "fillna" in kwargs:
zg.fillna(kwargs["fillna"], inplace=True)
sg.fillna(kwargs["fillna"], inplace=True)
xg.fillna(kwargs["fillna"], inplace=True)
atr_.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
zg.fillna(method=kwargs["fill_method"], inplace=True)
sg.fillna(method=kwargs["fill_method"], inplace=True)
xg.fillna(method=kwargs["fill_method"], inplace=True)
atr_.fillna(method=kwargs["fill_method"], inplace=True)
# Name and Categorize it
_props = f"_{length}_{atr_length}"
zg.name = f"ABER_ZG{_props}"
sg.name = f"ABER_SG{_props}"
xg.name = f"ABER_XG{_props}"
atr_.name = f"ABER_ATR{_props}"
zg.category = sg.category = "volatility"
xg.category = atr_.category = zg.category
# Prepare DataFrame to return
data = {
zg.name: zg,
sg.name: sg,
xg.name: xg,
atr_.name: atr_
}
aberdf = DataFrame(data)
aberdf.name = f"ABER{_props}"
aberdf.category = zg.category
return aberdf
aberration.__doc__ = \
"""Aberration
A volatility indicator similar to Keltner Channels.
Sources:
Few internet resources on definitive definition.
Request by Github user homily, issue #46
Calculation:
Default Inputs:
length=5, atr_length=15
ATR = Average True Range
SMA = Simple Moving Average
ATR = ATR(length=atr_length)
JG = TP = HLC3(high, low, close)
ZG = SMA(JG, length)
SG = ZG + ATR
XG = ZG - ATR
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
length (int): The short period. Default: 5
atr_length (int): The short period. Default: 15
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: zg, sg, xg, atr columns.
""" | 0.592667 | 0.385404 |
import os
import logging
import logging.config
from functools import wraps
def setup_logging(root: str) -> object:
"""configure logging protocol.
Defines the configuration for the python logging objects. At level debug
it will only log to the console. At info level it will log to the
projects log file. At warning level an email notification is sent.
"""
# Create log directory if it does not exist
log_directory = os.path.join(root, 'logs')
if not os.path.exists(log_directory):
os.mkdir(log_directory)
# Create a dictionary for the logging configuration.
logging_config = {
'version': 1,
'disable_existing_loggers': True,
'filters': {},
'formatters': {
'verbose': {
'format': (
' %(asctime)s - %(levelname)s - script name:%(module)s at'
'lineno:%(lineno)d - %(message)s'
)
},
},
'handlers': {
'file_handler': {
'level': 'INFO',
'formatter': 'verbose',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(log_directory, 'log' + '.log'),
'mode': 'a',
'maxBytes': 10*1_024*1_024,
'backupCount': 3,
},
'email_handler': {
'level': 'WARNING',
'formatter': 'verbose',
'class': 'logging.handlers.SMTPHandler',
'mailhost': 'mail.dnr.wa.gov',
'fromaddr': '<EMAIL>',
'toaddrs': ['<EMAIL>'],
'subject': 'Web Scraping Script Update',
},
'console_handler': {
'level': 'DEBUG',
'formatter': 'verbose',
'class': 'logging.StreamHandler'
},
},
'loggers': {
'logger': {
'handlers': [
'file_handler',
'email_handler',
'console_handler',
],
'level': 'DEBUG',
'propagate': False,
},
},
}
# Pass config to the logger
logging.config.dictConfig(logging_config)
# Create and return the logging object
logger = logging.getLogger('logger')
return logger
def timer(logger):
"Simple decorator to time how long processes take"
def decorator(fn):
from time import perf_counter
@wraps(fn)
def inner(*args, **kwargs):
start = perf_counter()
result = fn(*args, **kwargs)
end = perf_counter()
elapsed = end - start
m, s = divmod(elapsed, 60)
h, m = divmod(m, 60)
message = (
f'{fn.__name__} took {h} hours, {m} minutes,'
f' {s} seconds to complete.'
)
logger.info(message)
return result
return inner
return decorator | utilities.py | import os
import logging
import logging.config
from functools import wraps
def setup_logging(root: str) -> object:
"""configure logging protocol.
Defines the configuration for the python logging objects. At level debug
it will only log to the console. At info level it will log to the
projects log file. At warning level an email notification is sent.
"""
# Create log directory if it does not exist
log_directory = os.path.join(root, 'logs')
if not os.path.exists(log_directory):
os.mkdir(log_directory)
# Create a dictionary for the logging configuration.
logging_config = {
'version': 1,
'disable_existing_loggers': True,
'filters': {},
'formatters': {
'verbose': {
'format': (
' %(asctime)s - %(levelname)s - script name:%(module)s at'
'lineno:%(lineno)d - %(message)s'
)
},
},
'handlers': {
'file_handler': {
'level': 'INFO',
'formatter': 'verbose',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(log_directory, 'log' + '.log'),
'mode': 'a',
'maxBytes': 10*1_024*1_024,
'backupCount': 3,
},
'email_handler': {
'level': 'WARNING',
'formatter': 'verbose',
'class': 'logging.handlers.SMTPHandler',
'mailhost': 'mail.dnr.wa.gov',
'fromaddr': '<EMAIL>',
'toaddrs': ['<EMAIL>'],
'subject': 'Web Scraping Script Update',
},
'console_handler': {
'level': 'DEBUG',
'formatter': 'verbose',
'class': 'logging.StreamHandler'
},
},
'loggers': {
'logger': {
'handlers': [
'file_handler',
'email_handler',
'console_handler',
],
'level': 'DEBUG',
'propagate': False,
},
},
}
# Pass config to the logger
logging.config.dictConfig(logging_config)
# Create and return the logging object
logger = logging.getLogger('logger')
return logger
def timer(logger):
"Simple decorator to time how long processes take"
def decorator(fn):
from time import perf_counter
@wraps(fn)
def inner(*args, **kwargs):
start = perf_counter()
result = fn(*args, **kwargs)
end = perf_counter()
elapsed = end - start
m, s = divmod(elapsed, 60)
h, m = divmod(m, 60)
message = (
f'{fn.__name__} took {h} hours, {m} minutes,'
f' {s} seconds to complete.'
)
logger.info(message)
return result
return inner
return decorator | 0.419172 | 0.115187 |
import unittest
from unittest import mock
import pytest
import string
import dbt.exceptions
import dbt.graph.selector as graph_selector
import dbt.graph.cli as graph_cli
from dbt.node_types import NodeType
import networkx as nx
def _get_graph():
integer_graph = nx.balanced_tree(2, 2, nx.DiGraph())
package_mapping = {
i: 'm.' + ('X' if i % 2 == 0 else 'Y') + '.' + letter
for (i, letter) in enumerate(string.ascii_lowercase)
}
# Edges: [(X.a, Y.b), (X.a, X.c), (Y.b, Y.d), (Y.b, X.e), (X.c, Y.f), (X.c, X.g)]
return graph_selector.Graph(nx.relabel_nodes(integer_graph, package_mapping))
def _get_manifest(graph):
nodes = {}
for unique_id in graph:
fqn = unique_id.split('.')
node = mock.MagicMock(
unique_id=unique_id,
fqn=fqn,
package_name=fqn[0],
tags=[],
resource_type=NodeType.Model,
empty=False,
config=mock.MagicMock(enabled=True),
)
nodes[unique_id] = node
nodes['m.X.a'].tags = ['abc']
nodes['m.Y.b'].tags = ['abc', 'bcef']
nodes['m.X.c'].tags = ['abc', 'bcef']
nodes['m.Y.d'].tags = []
nodes['m.X.e'].tags = ['efg', 'bcef']
nodes['m.Y.f'].tags = ['efg', 'bcef']
nodes['m.X.g'].tags = ['efg']
return mock.MagicMock(nodes=nodes)
@pytest.fixture
def graph():
return graph_selector.Graph(_get_graph())
@pytest.fixture
def manifest(graph):
return _get_manifest(graph)
def id_macro(arg):
if isinstance(arg, str):
return arg
try:
return '_'.join(arg)
except TypeError:
return arg
run_specs = [
# include by fqn
(['X.a'], [], {'m.X.a'}),
# include by tag
(['tag:abc'], [], {'m.X.a', 'm.Y.b', 'm.X.c'}),
# exclude by tag
(['*'], ['tag:abc'], {'m.Y.d', 'm.X.e', 'm.Y.f', 'm.X.g'}),
# tag + fqn
(['tag:abc', 'a'], [], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['tag:abc', 'd'], [], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.d'}),
# multiple node selection across packages
(['X.a', 'b'], [], {'m.X.a', 'm.Y.b'}),
(['X.a+'], ['b'], {'m.X.a','m.X.c', 'm.Y.d','m.X.e','m.Y.f','m.X.g'}),
# children
(['X.c+'], [], {'m.X.c', 'm.Y.f', 'm.X.g'}),
(['X.a+1'], [], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['X.a+'], ['tag:efg'], {'m.X.a','m.Y.b','m.X.c', 'm.Y.d'}),
# parents
(['+Y.f'], [], {'m.X.c', 'm.Y.f', 'm.X.a'}),
(['1+Y.f'], [], {'m.X.c', 'm.Y.f'}),
# childrens parents
(['@X.c'], [], {'m.X.a', 'm.X.c', 'm.Y.f', 'm.X.g'}),
# multiple selection/exclusion
(['tag:abc', 'tag:bcef'], [], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.X.e', 'm.Y.f'}),
(['tag:abc', 'tag:bcef'], ['tag:efg'], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['tag:abc', 'tag:bcef'], ['tag:efg', 'a'], {'m.Y.b', 'm.X.c'}),
# intersections
(['a,a'], [], {'m.X.a'}),
(['+c,c+'], [], {'m.X.c'}),
(['a,b'], [], set()),
(['tag:abc,tag:bcef'], [], {'m.Y.b', 'm.X.c'}),
(['*,tag:abc,a'], [], {'m.X.a'}),
(['a,tag:abc,*'], [], {'m.X.a'}),
(['tag:abc,tag:bcef'], ['c'], {'m.Y.b'}),
(['tag:bcef,tag:efg'], ['tag:bcef,@b'], {'m.Y.f'}),
(['tag:bcef,tag:efg'], ['tag:bcef,@a'], set()),
(['*,@a,+b'], ['*,tag:abc,tag:bcef'], {'m.X.a'}),
(['tag:bcef,tag:efg', '*,tag:abc'], [], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.X.e', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['e'], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['e'], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['e', 'f'], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['tag:abc,tag:bcef'], {'m.X.a', 'm.X.e', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['tag:abc,tag:bcef', 'tag:abc,a'], {'m.X.e', 'm.Y.f'})
]
@pytest.mark.parametrize('include,exclude,expected', run_specs, ids=id_macro)
def test_run_specs(include, exclude, expected):
graph = _get_graph()
manifest = _get_manifest(graph)
selector = graph_selector.NodeSelector(graph, manifest)
spec = graph_cli.parse_difference(include, exclude)
selected = selector.select_nodes(spec)
assert selected == expected
param_specs = [
('a', False, None, False, None, 'fqn', 'a', False),
('+a', True, None, False, None, 'fqn', 'a', False),
('256+a', True, 256, False, None, 'fqn', 'a', False),
('a+', False, None, True, None, 'fqn', 'a', False),
('a+256', False, None, True, 256, 'fqn', 'a', False),
('+a+', True, None, True, None, 'fqn', 'a', False),
('16+a+32', True, 16, True, 32, 'fqn', 'a', False),
('@a', False, None, False, None, 'fqn', 'a', True),
('a.b', False, None, False, None, 'fqn', 'a.b', False),
('+a.b', True, None, False, None, 'fqn', 'a.b', False),
('256+a.b', True, 256, False, None, 'fqn', 'a.b', False),
('a.b+', False, None, True, None, 'fqn', 'a.b', False),
('a.b+256', False, None, True, 256, 'fqn', 'a.b', False),
('+a.b+', True, None, True, None, 'fqn', 'a.b', False),
('16+a.b+32', True, 16, True, 32, 'fqn', 'a.b', False),
('@a.b', False, None, False, None, 'fqn', 'a.b', True),
('a.b.*', False, None, False, None, 'fqn', 'a.b.*', False),
('+a.b.*', True, None, False, None, 'fqn', 'a.b.*', False),
('256+a.b.*', True, 256, False, None, 'fqn', 'a.b.*', False),
('a.b.*+', False, None, True, None, 'fqn', 'a.b.*', False),
('a.b.*+256', False, None, True, 256, 'fqn', 'a.b.*', False),
('+a.b.*+', True, None, True, None, 'fqn', 'a.b.*', False),
('16+a.b.*+32', True, 16, True, 32, 'fqn', 'a.b.*', False),
('@a.b.*', False, None, False, None, 'fqn', 'a.b.*', True),
('tag:a', False, None, False, None, 'tag', 'a', False),
('+tag:a', True, None, False, None, 'tag', 'a', False),
('256+tag:a', True, 256, False, None, 'tag', 'a', False),
('tag:a+', False, None, True, None, 'tag', 'a', False),
('tag:a+256', False, None, True, 256, 'tag', 'a', False),
('+tag:a+', True, None, True, None, 'tag', 'a', False),
('16+tag:a+32', True, 16, True, 32, 'tag', 'a', False),
('@tag:a', False, None, False, None, 'tag', 'a', True),
('source:a', False, None, False, None, 'source', 'a', False),
('source:a+', False, None, True, None, 'source', 'a', False),
('source:a+1', False, None, True, 1, 'source', 'a', False),
('source:a+32', False, None, True, 32, 'source', 'a', False),
('@source:a', False, None, False, None, 'source', 'a', True),
]
@pytest.mark.parametrize(
'spec,parents,parents_depth,children,children_depth,filter_type,filter_value,childrens_parents',
param_specs,
ids=id_macro
)
def test_parse_specs(spec, parents, parents_depth, children, children_depth, filter_type, filter_value, childrens_parents):
parsed = graph_selector.SelectionCriteria.from_single_spec(spec)
assert parsed.parents == parents
assert parsed.parents_depth == parents_depth
assert parsed.children == children
assert parsed.children_depth == children_depth
assert parsed.method == filter_type
assert parsed.value == filter_value
assert parsed.childrens_parents == childrens_parents
invalid_specs = [
'@a+',
'@a.b+',
'@a.b*+',
'@tag:a+',
'@source:a+',
]
@pytest.mark.parametrize('invalid', invalid_specs, ids=lambda k: str(k))
def test_invalid_specs(invalid):
with pytest.raises(dbt.exceptions.RuntimeException):
graph_selector.SelectionCriteria.from_single_spec(invalid) | test/unit/test_graph_selection.py | import unittest
from unittest import mock
import pytest
import string
import dbt.exceptions
import dbt.graph.selector as graph_selector
import dbt.graph.cli as graph_cli
from dbt.node_types import NodeType
import networkx as nx
def _get_graph():
integer_graph = nx.balanced_tree(2, 2, nx.DiGraph())
package_mapping = {
i: 'm.' + ('X' if i % 2 == 0 else 'Y') + '.' + letter
for (i, letter) in enumerate(string.ascii_lowercase)
}
# Edges: [(X.a, Y.b), (X.a, X.c), (Y.b, Y.d), (Y.b, X.e), (X.c, Y.f), (X.c, X.g)]
return graph_selector.Graph(nx.relabel_nodes(integer_graph, package_mapping))
def _get_manifest(graph):
nodes = {}
for unique_id in graph:
fqn = unique_id.split('.')
node = mock.MagicMock(
unique_id=unique_id,
fqn=fqn,
package_name=fqn[0],
tags=[],
resource_type=NodeType.Model,
empty=False,
config=mock.MagicMock(enabled=True),
)
nodes[unique_id] = node
nodes['m.X.a'].tags = ['abc']
nodes['m.Y.b'].tags = ['abc', 'bcef']
nodes['m.X.c'].tags = ['abc', 'bcef']
nodes['m.Y.d'].tags = []
nodes['m.X.e'].tags = ['efg', 'bcef']
nodes['m.Y.f'].tags = ['efg', 'bcef']
nodes['m.X.g'].tags = ['efg']
return mock.MagicMock(nodes=nodes)
@pytest.fixture
def graph():
return graph_selector.Graph(_get_graph())
@pytest.fixture
def manifest(graph):
return _get_manifest(graph)
def id_macro(arg):
if isinstance(arg, str):
return arg
try:
return '_'.join(arg)
except TypeError:
return arg
run_specs = [
# include by fqn
(['X.a'], [], {'m.X.a'}),
# include by tag
(['tag:abc'], [], {'m.X.a', 'm.Y.b', 'm.X.c'}),
# exclude by tag
(['*'], ['tag:abc'], {'m.Y.d', 'm.X.e', 'm.Y.f', 'm.X.g'}),
# tag + fqn
(['tag:abc', 'a'], [], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['tag:abc', 'd'], [], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.d'}),
# multiple node selection across packages
(['X.a', 'b'], [], {'m.X.a', 'm.Y.b'}),
(['X.a+'], ['b'], {'m.X.a','m.X.c', 'm.Y.d','m.X.e','m.Y.f','m.X.g'}),
# children
(['X.c+'], [], {'m.X.c', 'm.Y.f', 'm.X.g'}),
(['X.a+1'], [], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['X.a+'], ['tag:efg'], {'m.X.a','m.Y.b','m.X.c', 'm.Y.d'}),
# parents
(['+Y.f'], [], {'m.X.c', 'm.Y.f', 'm.X.a'}),
(['1+Y.f'], [], {'m.X.c', 'm.Y.f'}),
# childrens parents
(['@X.c'], [], {'m.X.a', 'm.X.c', 'm.Y.f', 'm.X.g'}),
# multiple selection/exclusion
(['tag:abc', 'tag:bcef'], [], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.X.e', 'm.Y.f'}),
(['tag:abc', 'tag:bcef'], ['tag:efg'], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['tag:abc', 'tag:bcef'], ['tag:efg', 'a'], {'m.Y.b', 'm.X.c'}),
# intersections
(['a,a'], [], {'m.X.a'}),
(['+c,c+'], [], {'m.X.c'}),
(['a,b'], [], set()),
(['tag:abc,tag:bcef'], [], {'m.Y.b', 'm.X.c'}),
(['*,tag:abc,a'], [], {'m.X.a'}),
(['a,tag:abc,*'], [], {'m.X.a'}),
(['tag:abc,tag:bcef'], ['c'], {'m.Y.b'}),
(['tag:bcef,tag:efg'], ['tag:bcef,@b'], {'m.Y.f'}),
(['tag:bcef,tag:efg'], ['tag:bcef,@a'], set()),
(['*,@a,+b'], ['*,tag:abc,tag:bcef'], {'m.X.a'}),
(['tag:bcef,tag:efg', '*,tag:abc'], [], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.X.e', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['e'], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['e'], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['e', 'f'], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['tag:abc,tag:bcef'], {'m.X.a', 'm.X.e', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['tag:abc,tag:bcef', 'tag:abc,a'], {'m.X.e', 'm.Y.f'})
]
@pytest.mark.parametrize('include,exclude,expected', run_specs, ids=id_macro)
def test_run_specs(include, exclude, expected):
graph = _get_graph()
manifest = _get_manifest(graph)
selector = graph_selector.NodeSelector(graph, manifest)
spec = graph_cli.parse_difference(include, exclude)
selected = selector.select_nodes(spec)
assert selected == expected
param_specs = [
('a', False, None, False, None, 'fqn', 'a', False),
('+a', True, None, False, None, 'fqn', 'a', False),
('256+a', True, 256, False, None, 'fqn', 'a', False),
('a+', False, None, True, None, 'fqn', 'a', False),
('a+256', False, None, True, 256, 'fqn', 'a', False),
('+a+', True, None, True, None, 'fqn', 'a', False),
('16+a+32', True, 16, True, 32, 'fqn', 'a', False),
('@a', False, None, False, None, 'fqn', 'a', True),
('a.b', False, None, False, None, 'fqn', 'a.b', False),
('+a.b', True, None, False, None, 'fqn', 'a.b', False),
('256+a.b', True, 256, False, None, 'fqn', 'a.b', False),
('a.b+', False, None, True, None, 'fqn', 'a.b', False),
('a.b+256', False, None, True, 256, 'fqn', 'a.b', False),
('+a.b+', True, None, True, None, 'fqn', 'a.b', False),
('16+a.b+32', True, 16, True, 32, 'fqn', 'a.b', False),
('@a.b', False, None, False, None, 'fqn', 'a.b', True),
('a.b.*', False, None, False, None, 'fqn', 'a.b.*', False),
('+a.b.*', True, None, False, None, 'fqn', 'a.b.*', False),
('256+a.b.*', True, 256, False, None, 'fqn', 'a.b.*', False),
('a.b.*+', False, None, True, None, 'fqn', 'a.b.*', False),
('a.b.*+256', False, None, True, 256, 'fqn', 'a.b.*', False),
('+a.b.*+', True, None, True, None, 'fqn', 'a.b.*', False),
('16+a.b.*+32', True, 16, True, 32, 'fqn', 'a.b.*', False),
('@a.b.*', False, None, False, None, 'fqn', 'a.b.*', True),
('tag:a', False, None, False, None, 'tag', 'a', False),
('+tag:a', True, None, False, None, 'tag', 'a', False),
('256+tag:a', True, 256, False, None, 'tag', 'a', False),
('tag:a+', False, None, True, None, 'tag', 'a', False),
('tag:a+256', False, None, True, 256, 'tag', 'a', False),
('+tag:a+', True, None, True, None, 'tag', 'a', False),
('16+tag:a+32', True, 16, True, 32, 'tag', 'a', False),
('@tag:a', False, None, False, None, 'tag', 'a', True),
('source:a', False, None, False, None, 'source', 'a', False),
('source:a+', False, None, True, None, 'source', 'a', False),
('source:a+1', False, None, True, 1, 'source', 'a', False),
('source:a+32', False, None, True, 32, 'source', 'a', False),
('@source:a', False, None, False, None, 'source', 'a', True),
]
@pytest.mark.parametrize(
'spec,parents,parents_depth,children,children_depth,filter_type,filter_value,childrens_parents',
param_specs,
ids=id_macro
)
def test_parse_specs(spec, parents, parents_depth, children, children_depth, filter_type, filter_value, childrens_parents):
parsed = graph_selector.SelectionCriteria.from_single_spec(spec)
assert parsed.parents == parents
assert parsed.parents_depth == parents_depth
assert parsed.children == children
assert parsed.children_depth == children_depth
assert parsed.method == filter_type
assert parsed.value == filter_value
assert parsed.childrens_parents == childrens_parents
invalid_specs = [
'@a+',
'@a.b+',
'@a.b*+',
'@tag:a+',
'@source:a+',
]
@pytest.mark.parametrize('invalid', invalid_specs, ids=lambda k: str(k))
def test_invalid_specs(invalid):
with pytest.raises(dbt.exceptions.RuntimeException):
graph_selector.SelectionCriteria.from_single_spec(invalid) | 0.540439 | 0.374476 |
import distiller
from .ranked_structures_pruner import *
class SensitivityPruner(object):
"""Use algorithm from "Learning both Weights and Connections for Efficient
Neural Networks" - https://arxiv.org/pdf/1506.02626v3.pdf
I.e.: "The pruning threshold is chosen as a quality parameter multiplied
by the standard deviation of a layers weights."
In this code, the "quality parameter" is referred to as "sensitivity" and
is based on the values learned from performing sensitivity analysis.
Note that this implementation deviates slightly from the algorithm Song Han
describes in his PhD dissertation, in that the threshold value is set only
once. In his PhD dissertation, Song Han describes a growing threshold, at
each iteration. This requires n+1 hyper-parameters (n being the number of
pruning iterations we use): the threshold and the threshold increase (delta)
at each pruning iteration.
The implementation that follows, takes advantage of the fact that as pruning
progresses, more weights are pulled toward zero, and therefore the threshold
"traps" more weights. Thus, we can use less hyper-parameters and achieve the
same results.
"""
def __init__(self, name, sensitivities, **kwargs):
self.name = name
self.sensitivities = sensitivities
def set_param_mask(self, param, param_name, zeros_mask_dict, meta):
if param_name not in self.sensitivities:
if '*' not in self.sensitivities:
return
else:
sensitivity = self.sensitivities['*']
else:
sensitivity = self.sensitivities[param_name]
zeros_mask_dict[param_name].mask = distiller.create_mask_sensitivity_criterion(param, sensitivity)
class L1RankedStructureParameterPruner_SP(SensitivityPruner):
def __init__(self, name, sensitivities, group_type, weights, group_dependency=None, kwargs=None):
super().__init__(name, sensitivities)
self.pruner = L1RankedStructureParameterPruner(name, group_type, desired_sparsity=0, weights=weights,
group_dependency=group_dependency, kwargs=kwargs) | distiller/pruning/sensitivity_pruner.py |
import distiller
from .ranked_structures_pruner import *
class SensitivityPruner(object):
"""Use algorithm from "Learning both Weights and Connections for Efficient
Neural Networks" - https://arxiv.org/pdf/1506.02626v3.pdf
I.e.: "The pruning threshold is chosen as a quality parameter multiplied
by the standard deviation of a layers weights."
In this code, the "quality parameter" is referred to as "sensitivity" and
is based on the values learned from performing sensitivity analysis.
Note that this implementation deviates slightly from the algorithm Song Han
describes in his PhD dissertation, in that the threshold value is set only
once. In his PhD dissertation, Song Han describes a growing threshold, at
each iteration. This requires n+1 hyper-parameters (n being the number of
pruning iterations we use): the threshold and the threshold increase (delta)
at each pruning iteration.
The implementation that follows, takes advantage of the fact that as pruning
progresses, more weights are pulled toward zero, and therefore the threshold
"traps" more weights. Thus, we can use less hyper-parameters and achieve the
same results.
"""
def __init__(self, name, sensitivities, **kwargs):
self.name = name
self.sensitivities = sensitivities
def set_param_mask(self, param, param_name, zeros_mask_dict, meta):
if param_name not in self.sensitivities:
if '*' not in self.sensitivities:
return
else:
sensitivity = self.sensitivities['*']
else:
sensitivity = self.sensitivities[param_name]
zeros_mask_dict[param_name].mask = distiller.create_mask_sensitivity_criterion(param, sensitivity)
class L1RankedStructureParameterPruner_SP(SensitivityPruner):
def __init__(self, name, sensitivities, group_type, weights, group_dependency=None, kwargs=None):
super().__init__(name, sensitivities)
self.pruner = L1RankedStructureParameterPruner(name, group_type, desired_sparsity=0, weights=weights,
group_dependency=group_dependency, kwargs=kwargs) | 0.852675 | 0.602617 |
import os
from rlbot.agents.base_agent import BOT_NAME_KEY, BOT_CONFIG_LOADOUT_HEADER, BOT_CONFIG_MODULE_HEADER
from rlbot.gui.presets import AgentPreset, LoadoutPreset
from rlbot.parsing.agent_config_parser import PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_CONFIG_KEY, \
PARTICIPANT_BOT_SKILL_KEY, PARTICIPANT_TYPE_KEY, PARTICIPANT_TEAM, PARTICIPANT_LOADOUT_CONFIG_KEY, BotConfigBundle
class GUIAgent:
"""
Holds all info for an agent stored in the GUI, also contains some methods to set values more easily
"""
overall_config = None
def __init__(self, overall_index: int, loadout_preset: LoadoutPreset=None, agent_preset: AgentPreset=None):
self.overall_index = overall_index
self.loadout_preset = loadout_preset
self.agent_preset = agent_preset
if loadout_preset is not None:
self.ingame_name = loadout_preset.config.get(BOT_CONFIG_LOADOUT_HEADER, BOT_NAME_KEY)
else:
self.ingame_name = None
# Below here the getters and setters
def get_configs(self):
"""
:return: overall index, agent config, loadout config in that order
"""
loadout_config = self.loadout_preset.config.copy()
config_path = None
if self.agent_preset.config_path is not None: # Might be none if preset was never saved to disk.
config_path = os.path.dirname(self.agent_preset.config_path)
config = self.agent_preset.config.copy()
config.set_value(BOT_CONFIG_MODULE_HEADER, BOT_NAME_KEY, self.ingame_name)
config_bundle = BotConfigBundle(config_path, config)
return self.overall_index, config_bundle, loadout_config
def set_name(self, name):
self.ingame_name = name
def get_name(self):
return self.ingame_name
def set_loadout_preset(self, loadout_preset: LoadoutPreset):
self.loadout_preset = loadout_preset
self.set_loadout_config_path(loadout_preset.config_path)
def get_loadout_preset(self):
return self.loadout_preset
def set_agent_preset(self, agent_preset: AgentPreset):
self.agent_preset = agent_preset
self.set_agent_config_path(agent_preset.config_path)
def get_agent_preset(self):
return self.agent_preset
def get_agent_config_path(self):
return os.path.realpath(self.overall_config.get(PARTICIPANT_CONFIGURATION_HEADER,
PARTICIPANT_CONFIG_KEY, self.overall_index))
def set_agent_config_path(self, config_path: str):
self.overall_config.set_value(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_CONFIG_KEY,
config_path, self.overall_index)
def get_loadout_config_path(self):
return os.path.realpath(self.overall_config.get(PARTICIPANT_CONFIGURATION_HEADER,
PARTICIPANT_LOADOUT_CONFIG_KEY, self.overall_index))
def set_loadout_config_path(self, config_path: str):
self.overall_config.set_value(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_LOADOUT_CONFIG_KEY,
config_path, self.overall_index)
def get_participant_type(self):
return self.overall_config.get(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_TYPE_KEY, self.overall_index)
def set_participant_type(self, participant_type: str):
return self.overall_config.set_value(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_TYPE_KEY,
participant_type, self.overall_index)
def get_bot_skill(self):
return self.overall_config.getfloat(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_BOT_SKILL_KEY,
self.overall_index)
def set_bot_skill(self, bot_skill: float):
return self.overall_config.set_value(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_BOT_SKILL_KEY,
bot_skill, self.overall_index)
def get_team(self):
return self.overall_config.getint(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_TEAM, self.overall_index)
def set_team(self, team_i: int):
return self.overall_config.set_value(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_TEAM, team_i,
self.overall_index) | src/main/python/rlbot/gui/gui_agent.py | import os
from rlbot.agents.base_agent import BOT_NAME_KEY, BOT_CONFIG_LOADOUT_HEADER, BOT_CONFIG_MODULE_HEADER
from rlbot.gui.presets import AgentPreset, LoadoutPreset
from rlbot.parsing.agent_config_parser import PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_CONFIG_KEY, \
PARTICIPANT_BOT_SKILL_KEY, PARTICIPANT_TYPE_KEY, PARTICIPANT_TEAM, PARTICIPANT_LOADOUT_CONFIG_KEY, BotConfigBundle
class GUIAgent:
"""
Holds all info for an agent stored in the GUI, also contains some methods to set values more easily
"""
overall_config = None
def __init__(self, overall_index: int, loadout_preset: LoadoutPreset=None, agent_preset: AgentPreset=None):
self.overall_index = overall_index
self.loadout_preset = loadout_preset
self.agent_preset = agent_preset
if loadout_preset is not None:
self.ingame_name = loadout_preset.config.get(BOT_CONFIG_LOADOUT_HEADER, BOT_NAME_KEY)
else:
self.ingame_name = None
# Below here the getters and setters
def get_configs(self):
"""
:return: overall index, agent config, loadout config in that order
"""
loadout_config = self.loadout_preset.config.copy()
config_path = None
if self.agent_preset.config_path is not None: # Might be none if preset was never saved to disk.
config_path = os.path.dirname(self.agent_preset.config_path)
config = self.agent_preset.config.copy()
config.set_value(BOT_CONFIG_MODULE_HEADER, BOT_NAME_KEY, self.ingame_name)
config_bundle = BotConfigBundle(config_path, config)
return self.overall_index, config_bundle, loadout_config
def set_name(self, name):
self.ingame_name = name
def get_name(self):
return self.ingame_name
def set_loadout_preset(self, loadout_preset: LoadoutPreset):
self.loadout_preset = loadout_preset
self.set_loadout_config_path(loadout_preset.config_path)
def get_loadout_preset(self):
return self.loadout_preset
def set_agent_preset(self, agent_preset: AgentPreset):
self.agent_preset = agent_preset
self.set_agent_config_path(agent_preset.config_path)
def get_agent_preset(self):
return self.agent_preset
def get_agent_config_path(self):
return os.path.realpath(self.overall_config.get(PARTICIPANT_CONFIGURATION_HEADER,
PARTICIPANT_CONFIG_KEY, self.overall_index))
def set_agent_config_path(self, config_path: str):
self.overall_config.set_value(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_CONFIG_KEY,
config_path, self.overall_index)
def get_loadout_config_path(self):
return os.path.realpath(self.overall_config.get(PARTICIPANT_CONFIGURATION_HEADER,
PARTICIPANT_LOADOUT_CONFIG_KEY, self.overall_index))
def set_loadout_config_path(self, config_path: str):
self.overall_config.set_value(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_LOADOUT_CONFIG_KEY,
config_path, self.overall_index)
def get_participant_type(self):
return self.overall_config.get(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_TYPE_KEY, self.overall_index)
def set_participant_type(self, participant_type: str):
return self.overall_config.set_value(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_TYPE_KEY,
participant_type, self.overall_index)
def get_bot_skill(self):
return self.overall_config.getfloat(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_BOT_SKILL_KEY,
self.overall_index)
def set_bot_skill(self, bot_skill: float):
return self.overall_config.set_value(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_BOT_SKILL_KEY,
bot_skill, self.overall_index)
def get_team(self):
return self.overall_config.getint(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_TEAM, self.overall_index)
def set_team(self, team_i: int):
return self.overall_config.set_value(PARTICIPANT_CONFIGURATION_HEADER, PARTICIPANT_TEAM, team_i,
self.overall_index) | 0.407805 | 0.073897 |
import json
import logging
import boto3
from boto3.dynamodb.conditions import Key
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
q_name = "travel-invitation-mq"
user_table = boto3.resource('dynamodb').Table('userTable')
sender_email="<EMAIL>"
authUrl="http://localhost:8080/#/accept/schedule/"
def get_target_user(user_id):
try:
response = user_table.get_item(
Key={
"userId": user_id
}
)
if "Item" in response and len(response["Item"]) != 0:
# logger.debug(json.dumps(response, indent=2))
return True, response
return False, {
'statusCode': 400,
'body': json.dumps({
"code": 400,
"msg": "user can not be revised"
})
}
except Exception as e:
return False, {
'statusCode': 400,
'body': json.dumps({
"code": 400,
"msg": "user id doesn't exist"
})
}
def get_target_user_by_email(user_email):
try:
response = user_table.scan(
FilterExpression=Key("userEmail").eq(user_email)
)
if "Items" in response and len(response["Items"]) != 0:
# logger.debug(json.dumps(response, indent=2))
return True, response
return False, {
'statusCode': 400,
'body': json.dumps({
"code": 400,
"msg": "user can not be revised"
})
}
except Exception as e:
return False, {
'statusCode': 400,
'body': json.dumps({
"code": 400,
"msg": "user email doesn't exist"
})
}
def pollSQS():
sqs = boto3.resource('sqs')
queue = sqs.get_queue_by_name(QueueName=q_name)
response = queue.receive_messages(MaxNumberOfMessages=1)
if len(response) == 0:
logger.info("no message")
return None
return response
def sendSMS(owner_user_name, invited_user_id, invited_user_email, schedule_id):
subject="Your invitation to a schedule."
charset="UTF-8"
msg_text = "Dear guest! \nYour friend %s send you an invitation.\n" % owner_user_name
msg_text += "The schedule id for you to view is: %s\n\n" % schedule_id
msg_text += "Please click here to accept the invitation:\n" + authUrl+"?editorId="+invited_user_id+"&scheduleId="+schedule_id
logger.debug("SMS message:\n" + msg_text)
client=boto3.client('ses',region_name="us-east-1")
response = client.send_email(
Destination={
'ToAddresses': [
invited_user_email,
],
},
Message={
'Body': {
'Text': {
'Charset': charset,
'Data': msg_text,
},
},
'Subject': {
'Charset': charset,
'Data': subject,
},
},
Source=sender_email
)
return {"statusCode": 200,
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "OPTIONS,POST,GET,PUT",
"Access-Control-Allow-Headers": "Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With"
},
'body': "add visitor info and send SMS succefully!",
}
def lambda_handler(event, context):
logger.debug(json.dumps(event, indent=2))
SQS_response = pollSQS()
if SQS_response is None:
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "OPTIONS,POST,GET,PUT",
"Access-Control-Allow-Headers": "Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With"
},
'body': json.dumps({
"code": 400,
"msg": "SQS empty"
})
}
for SQS_r in SQS_response:
r_data = json.loads(SQS_r.body)
SQS_r.delete()
logger.debug("sending data: " + json.dumps(r_data))
owner_user_id = r_data["ownerUserId"]
invited_user_email = r_data["invitedUserEmail"]
schedule_id = r_data["scheduleId"]
succ1, response_invited = get_target_user_by_email(invited_user_email)
if not succ1:
return response_invited
invited_user_id = response_invited["Items"][0]["userId"]
succ2, response_owner = get_target_user(owner_user_id)
if not succ2:
return response_owner
owner_user_name = response_owner["Item"]["userName"]
logger.debug("sending data to email: " + invited_user_email)
# owner_user_email = response_owner["Item"]["userEmail"]
sendSMS(owner_user_name, invited_user_id, invited_user_email, schedule_id)
return {
'statusCode': 200,
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "OPTIONS,POST,GET,PUT",
"Access-Control-Allow-Headers": "Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With"
},
'body': json.dumps({
"code": 200,
"msg": "send email successfully"
})
} | travel-planner-backend/handlers/proj_send_invitation.py | import json
import logging
import boto3
from boto3.dynamodb.conditions import Key
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
q_name = "travel-invitation-mq"
user_table = boto3.resource('dynamodb').Table('userTable')
sender_email="<EMAIL>"
authUrl="http://localhost:8080/#/accept/schedule/"
def get_target_user(user_id):
try:
response = user_table.get_item(
Key={
"userId": user_id
}
)
if "Item" in response and len(response["Item"]) != 0:
# logger.debug(json.dumps(response, indent=2))
return True, response
return False, {
'statusCode': 400,
'body': json.dumps({
"code": 400,
"msg": "user can not be revised"
})
}
except Exception as e:
return False, {
'statusCode': 400,
'body': json.dumps({
"code": 400,
"msg": "user id doesn't exist"
})
}
def get_target_user_by_email(user_email):
try:
response = user_table.scan(
FilterExpression=Key("userEmail").eq(user_email)
)
if "Items" in response and len(response["Items"]) != 0:
# logger.debug(json.dumps(response, indent=2))
return True, response
return False, {
'statusCode': 400,
'body': json.dumps({
"code": 400,
"msg": "user can not be revised"
})
}
except Exception as e:
return False, {
'statusCode': 400,
'body': json.dumps({
"code": 400,
"msg": "user email doesn't exist"
})
}
def pollSQS():
sqs = boto3.resource('sqs')
queue = sqs.get_queue_by_name(QueueName=q_name)
response = queue.receive_messages(MaxNumberOfMessages=1)
if len(response) == 0:
logger.info("no message")
return None
return response
def sendSMS(owner_user_name, invited_user_id, invited_user_email, schedule_id):
subject="Your invitation to a schedule."
charset="UTF-8"
msg_text = "Dear guest! \nYour friend %s send you an invitation.\n" % owner_user_name
msg_text += "The schedule id for you to view is: %s\n\n" % schedule_id
msg_text += "Please click here to accept the invitation:\n" + authUrl+"?editorId="+invited_user_id+"&scheduleId="+schedule_id
logger.debug("SMS message:\n" + msg_text)
client=boto3.client('ses',region_name="us-east-1")
response = client.send_email(
Destination={
'ToAddresses': [
invited_user_email,
],
},
Message={
'Body': {
'Text': {
'Charset': charset,
'Data': msg_text,
},
},
'Subject': {
'Charset': charset,
'Data': subject,
},
},
Source=sender_email
)
return {"statusCode": 200,
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "OPTIONS,POST,GET,PUT",
"Access-Control-Allow-Headers": "Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With"
},
'body': "add visitor info and send SMS succefully!",
}
def lambda_handler(event, context):
logger.debug(json.dumps(event, indent=2))
SQS_response = pollSQS()
if SQS_response is None:
return {
'statusCode': 400,
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "OPTIONS,POST,GET,PUT",
"Access-Control-Allow-Headers": "Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With"
},
'body': json.dumps({
"code": 400,
"msg": "SQS empty"
})
}
for SQS_r in SQS_response:
r_data = json.loads(SQS_r.body)
SQS_r.delete()
logger.debug("sending data: " + json.dumps(r_data))
owner_user_id = r_data["ownerUserId"]
invited_user_email = r_data["invitedUserEmail"]
schedule_id = r_data["scheduleId"]
succ1, response_invited = get_target_user_by_email(invited_user_email)
if not succ1:
return response_invited
invited_user_id = response_invited["Items"][0]["userId"]
succ2, response_owner = get_target_user(owner_user_id)
if not succ2:
return response_owner
owner_user_name = response_owner["Item"]["userName"]
logger.debug("sending data to email: " + invited_user_email)
# owner_user_email = response_owner["Item"]["userEmail"]
sendSMS(owner_user_name, invited_user_id, invited_user_email, schedule_id)
return {
'statusCode': 200,
'headers': {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "OPTIONS,POST,GET,PUT",
"Access-Control-Allow-Headers": "Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With"
},
'body': json.dumps({
"code": 200,
"msg": "send email successfully"
})
} | 0.176459 | 0.094636 |
import os
from tika import detector
from tika import parser
import subprocess
import re
import json
from tika.parser import _parse
from tika.tika import callServer, ServerEndpoint
# Path for directory containing files to be processed
path = "/Users/charanshampur/newAwsDump/testFiles2"
# Path for Google Scholar Api Program
googleScholarProgramPath="/Users/charanshampur/PycharmProjects/CSCI599/scholar.py"
# Python Interpreter to be used to run the Scholar API
pythonInterpreter="/Library/Frameworks/Python.framework/Versions/2.7/bin/python2.7"
# File containing metadata about related publication and geographic location
publicationFile = open("Publication.json","w")
# output file generated from TagRatioparser.py
measurementFile = open("Measurement.json","r")
measJson = json.load(measurementFile)
# Function for processing the metadata generated by running grobid journal parser
def processGorbid(metadata):
meta={}
if "Author" in metadata:
meta["Author"]=metadata["Author"]
if "grobid:header_Title" in metadata:
meta["grobid:header_Title"]=metadata["grobid:header_Title"]
if "grobid:header_Authors" in metadata:
meta["grobid:header_Authors"]=metadata["grobid:header_Authors"]
if "grobid:header_Affiliations" in metadata:
meta["grobid:header_FullAffiliations"]=metadata["grobid:header_FullAffiliations"]
if "grobid:header_Address" in metadata:
meta["grobid:header_Address"]=metadata["grobid:header_Address"]
if "title" in metadata:
meta["title"]=metadata["title"]
return meta
# Function for preparing the author list and phrase for fetching related publications
def getRelatedPub(metadata):
pubGrobidAuthor=[]
pubGrobidHeader=[]
pubAuthor=[]
if "grobid:header_Authors" in metadata:
authors=metadata["grobid:header_Authors"].split(",")
if(len(authors)!=0):
for author in authors:
if(re.match("\S",author)):
pubGrobidAuthor+=googleScholarApi(True,author)
if len(pubGrobidAuthor) > 40:
break
if "grobid:header_Title" in metadata:
phrase=metadata["grobid:header_Title"]
pubGrobidHeader=googleScholarApi(False,phrase)
if "Author" in metadata:
authors=metadata["Author"].split(",")
if(len(authors)!=0):
for author in authors:
if(re.match("\S",author)):
pubAuthor+=googleScholarApi(True,author)
if len(pubAuthor) > 40:
break
pubAuthor = pubAuthor + pubGrobidAuthor + pubGrobidHeader
return pubAuthor
# Function which calls the google scholar api
def googleScholarApi(authorFLag,data):
pubList=[]
data=data.encode('ascii', 'ignore')
if authorFLag:
buffer=subprocess.check_output([pythonInterpreter, googleScholarProgramPath, '-a', "'"+data+"'", '-c 20', '--csv'])
else:
buffer=subprocess.check_output([pythonInterpreter, googleScholarProgramPath, '-p', "'"+data+"'", '-c 20', '--csv'])
if(buffer!=None and buffer!=""):
listOfPub=buffer.split("...")
for publication in listOfPub:
if(publication!="\n"):
details = publication.split("|")
if(len(details)>2):
pubStr = "Title : "+details[0].strip()+", Year-Published : "+details[2].strip()+", URL : "+details[1].strip()
pubList.append(pubStr)
return pubList
# function for selecting the metadata fetched from geotopic parser
def getGeoTags(geoMetadata,metaData):
for k,v in geoMetadata.items():
if re.match(r'(Optional)+|(Geographic)+',k,re.M):
metaData[k]=v
return metaData
# Main function
publicationDict={}
for path,dirs,files in os.walk(path):
for file in files:
if file not in ".DS_Store":
parsedData=""
path_to_file = path+"/"+str(file)
print path_to_file
docType = detector.from_file(path_to_file)
metaData={}
buffer=""
if docType=="application/pdf":
parsedData=parser.from_file(path_to_file,"http://localhost:9090")
if parsedData["content"]!=None:
metaData=processGorbid(parsedData["metadata"])
buffer = parsedData["content"]
else:
if path_to_file not in measJson:
continue
metaDataNer = measJson[path_to_file]
if "NER_PERSON" in metaDataNer:
metaData["Author"]=''.join(metaDataNer["NER_PERSON"])
if "content" in metaDataNer:
buffer=metaDataNer["content"]
try:
metaData["title"]=parser.from_file(path_to_file,"http://localhost:9091")["metadata"]["title"]
except:
pass
metaDataPublication=getRelatedPub(metaData)
if (len(metaDataPublication)>0):
metaData["Publications"]=metaDataPublication
if (len(buffer)>0):
buffer=buffer.encode('ascii', 'ignore')
status, response = callServer('put', ServerEndpoint, '/rmeta', buffer,
{'Content-Type' : 'application/geotopic'}, False)
geoMetaData=_parse((status,response))
if "metadata" in geoMetaData:
metaData=getGeoTags(geoMetaData["metadata"],metaData)
if (len(metaData)>0):
publicationDict[path_to_file]=metaData
json.dump(publicationDict,publicationFile,indent=4)
measurementFile.close()
publicationFile.close() | 3.NER_GeoTopic/grobidParser.py | import os
from tika import detector
from tika import parser
import subprocess
import re
import json
from tika.parser import _parse
from tika.tika import callServer, ServerEndpoint
# Path for directory containing files to be processed
path = "/Users/charanshampur/newAwsDump/testFiles2"
# Path for Google Scholar Api Program
googleScholarProgramPath="/Users/charanshampur/PycharmProjects/CSCI599/scholar.py"
# Python Interpreter to be used to run the Scholar API
pythonInterpreter="/Library/Frameworks/Python.framework/Versions/2.7/bin/python2.7"
# File containing metadata about related publication and geographic location
publicationFile = open("Publication.json","w")
# output file generated from TagRatioparser.py
measurementFile = open("Measurement.json","r")
measJson = json.load(measurementFile)
# Function for processing the metadata generated by running grobid journal parser
def processGorbid(metadata):
meta={}
if "Author" in metadata:
meta["Author"]=metadata["Author"]
if "grobid:header_Title" in metadata:
meta["grobid:header_Title"]=metadata["grobid:header_Title"]
if "grobid:header_Authors" in metadata:
meta["grobid:header_Authors"]=metadata["grobid:header_Authors"]
if "grobid:header_Affiliations" in metadata:
meta["grobid:header_FullAffiliations"]=metadata["grobid:header_FullAffiliations"]
if "grobid:header_Address" in metadata:
meta["grobid:header_Address"]=metadata["grobid:header_Address"]
if "title" in metadata:
meta["title"]=metadata["title"]
return meta
# Function for preparing the author list and phrase for fetching related publications
def getRelatedPub(metadata):
pubGrobidAuthor=[]
pubGrobidHeader=[]
pubAuthor=[]
if "grobid:header_Authors" in metadata:
authors=metadata["grobid:header_Authors"].split(",")
if(len(authors)!=0):
for author in authors:
if(re.match("\S",author)):
pubGrobidAuthor+=googleScholarApi(True,author)
if len(pubGrobidAuthor) > 40:
break
if "grobid:header_Title" in metadata:
phrase=metadata["grobid:header_Title"]
pubGrobidHeader=googleScholarApi(False,phrase)
if "Author" in metadata:
authors=metadata["Author"].split(",")
if(len(authors)!=0):
for author in authors:
if(re.match("\S",author)):
pubAuthor+=googleScholarApi(True,author)
if len(pubAuthor) > 40:
break
pubAuthor = pubAuthor + pubGrobidAuthor + pubGrobidHeader
return pubAuthor
# Function which calls the google scholar api
def googleScholarApi(authorFLag,data):
pubList=[]
data=data.encode('ascii', 'ignore')
if authorFLag:
buffer=subprocess.check_output([pythonInterpreter, googleScholarProgramPath, '-a', "'"+data+"'", '-c 20', '--csv'])
else:
buffer=subprocess.check_output([pythonInterpreter, googleScholarProgramPath, '-p', "'"+data+"'", '-c 20', '--csv'])
if(buffer!=None and buffer!=""):
listOfPub=buffer.split("...")
for publication in listOfPub:
if(publication!="\n"):
details = publication.split("|")
if(len(details)>2):
pubStr = "Title : "+details[0].strip()+", Year-Published : "+details[2].strip()+", URL : "+details[1].strip()
pubList.append(pubStr)
return pubList
# function for selecting the metadata fetched from geotopic parser
def getGeoTags(geoMetadata,metaData):
for k,v in geoMetadata.items():
if re.match(r'(Optional)+|(Geographic)+',k,re.M):
metaData[k]=v
return metaData
# Main function
publicationDict={}
for path,dirs,files in os.walk(path):
for file in files:
if file not in ".DS_Store":
parsedData=""
path_to_file = path+"/"+str(file)
print path_to_file
docType = detector.from_file(path_to_file)
metaData={}
buffer=""
if docType=="application/pdf":
parsedData=parser.from_file(path_to_file,"http://localhost:9090")
if parsedData["content"]!=None:
metaData=processGorbid(parsedData["metadata"])
buffer = parsedData["content"]
else:
if path_to_file not in measJson:
continue
metaDataNer = measJson[path_to_file]
if "NER_PERSON" in metaDataNer:
metaData["Author"]=''.join(metaDataNer["NER_PERSON"])
if "content" in metaDataNer:
buffer=metaDataNer["content"]
try:
metaData["title"]=parser.from_file(path_to_file,"http://localhost:9091")["metadata"]["title"]
except:
pass
metaDataPublication=getRelatedPub(metaData)
if (len(metaDataPublication)>0):
metaData["Publications"]=metaDataPublication
if (len(buffer)>0):
buffer=buffer.encode('ascii', 'ignore')
status, response = callServer('put', ServerEndpoint, '/rmeta', buffer,
{'Content-Type' : 'application/geotopic'}, False)
geoMetaData=_parse((status,response))
if "metadata" in geoMetaData:
metaData=getGeoTags(geoMetaData["metadata"],metaData)
if (len(metaData)>0):
publicationDict[path_to_file]=metaData
json.dump(publicationDict,publicationFile,indent=4)
measurementFile.close()
publicationFile.close() | 0.203114 | 0.074131 |
from transbank.common.options import WebpayOptions
from transbank.common.request_service import RequestService
from transbank.common.api_constants import ApiConstants
from transbank.common.integration_commerce_codes import IntegrationCommerceCodes
from transbank.common.webpay_transaction import WebpayTransaction
from transbank.common.integration_api_keys import IntegrationApiKeys
from transbank.common.validation_util import ValidationUtil
from transbank.webpay.webpay_plus.schema import MallTransactionCreateRequestSchema, MallTransactionRefundRequestSchema, MallTransactionCaptureRequestSchema
from transbank.webpay.webpay_plus.request import MallTransactionCreateDetails, MallTransactionCreateRequest, MallTransactionRefundRequest, MallTransactionCaptureRequest
from transbank.error.transbank_error import TransbankError
from transbank.error.transaction_create_error import TransactionCreateError
from transbank.error.transaction_commit_error import TransactionCommitError
from transbank.error.transaction_status_error import TransactionStatusError
from transbank.error.transaction_refund_error import TransactionRefundError
from transbank.error.transaction_capture_error import TransactionCaptureError
class MallTransaction(WebpayTransaction):
CREATE_ENDPOINT = ApiConstants.WEBPAY_ENDPOINT + '/transactions/'
COMMIT_ENDPOINT = ApiConstants.WEBPAY_ENDPOINT + '/transactions/{}'
STATUS_ENDPOINT = ApiConstants.WEBPAY_ENDPOINT + '/transactions/{}'
REFUND_ENDPOINT = ApiConstants.WEBPAY_ENDPOINT + '/transactions/{}/refunds'
CAPTURE_ENDPOINT = ApiConstants.WEBPAY_ENDPOINT + '/transactions/{}/capture'
def __init__(self, options: WebpayOptions = None):
if options is None:
self.configure_for_testing()
else:
super().__init__(options)
def create(self, buy_order: str, session_id: str, return_url: str, details: MallTransactionCreateDetails):
ValidationUtil.has_text_with_max_length(buy_order, ApiConstants.BUY_ORDER_LENGTH, "buy_order")
ValidationUtil.has_text_with_max_length(session_id, ApiConstants.SESSION_ID_LENGTH, "session_id")
ValidationUtil.has_text_with_max_length(return_url, ApiConstants.RETURN_URL_LENGTH, "return_url")
ValidationUtil.has_elements(details.details, "details")
for item in details.details:
ValidationUtil.has_text_with_max_length(item.commerce_code, ApiConstants.COMMERCE_CODE_LENGTH, "details.commerce_code")
ValidationUtil.has_text_with_max_length(item.buy_order, ApiConstants.BUY_ORDER_LENGTH, "details.buy_order")
try:
endpoint = MallTransaction.CREATE_ENDPOINT
request = MallTransactionCreateRequest(buy_order, session_id, return_url, details.details)
return RequestService.post(endpoint, MallTransactionCreateRequestSchema().dumps(request).data, self.options)
except TransbankError as e:
raise TransactionCreateError(e.message, e.code)
def commit(self, token: str):
ValidationUtil.has_text_with_max_length(token, ApiConstants.TOKEN_LENGTH, "token")
try:
endpoint = MallTransaction.COMMIT_ENDPOINT.format(token)
return RequestService.put(endpoint, {}, self.options)
except TransbankError as e:
raise TransactionCommitError(e.message, e.code)
def status(self, token: str):
ValidationUtil.has_text_with_max_length(token, ApiConstants.TOKEN_LENGTH, "token")
try:
endpoint = MallTransaction.STATUS_ENDPOINT.format(token)
return RequestService.get(endpoint, self.options)
except TransbankError as e:
raise TransactionStatusError(e.message, e.code)
def refund(self, token: str, child_buy_order: str, child_commerce_code:str, amount: float):
ValidationUtil.has_text_with_max_length(token, ApiConstants.TOKEN_LENGTH, "token")
ValidationUtil.has_text_with_max_length(child_commerce_code, ApiConstants.COMMERCE_CODE_LENGTH, "child_commerce_code")
ValidationUtil.has_text_with_max_length(child_buy_order, ApiConstants.BUY_ORDER_LENGTH, "child_buy_order")
try:
endpoint = MallTransaction.REFUND_ENDPOINT.format(token)
request = MallTransactionRefundRequest(commerce_code=child_commerce_code, buy_order=child_buy_order, amount=amount)
return RequestService.post(endpoint, MallTransactionRefundRequestSchema().dumps(request).data, self.options)
except TransbankError as e:
raise TransactionRefundError(e.message, e.code)
def capture(self, child_commerce_code: str, token: str, buy_order: str, authorization_code: str, capture_amount: float):
ValidationUtil.has_text_with_max_length(token, ApiConstants.TOKEN_LENGTH, "token")
ValidationUtil.has_text_with_max_length(child_commerce_code, ApiConstants.COMMERCE_CODE_LENGTH, "child_commerce_code")
ValidationUtil.has_text_with_max_length(buy_order, ApiConstants.BUY_ORDER_LENGTH, "buy_order")
ValidationUtil.has_text_with_max_length(authorization_code, ApiConstants.AUTHORIZATION_CODE_LENGTH, "authorization_code")
try:
endpoint = MallTransaction.CAPTURE_ENDPOINT.format(token)
request = MallTransactionCaptureRequest(child_commerce_code, buy_order, authorization_code, capture_amount)
return RequestService.put(endpoint, MallTransactionCaptureRequestSchema().dumps(request).data, self.options)
except TransbankError as e:
raise TransactionCaptureError(e.message, e.code)
def configure_for_testing(self):
return self.configure_for_integration(IntegrationCommerceCodes.WEBPAY_PLUS_MALL, IntegrationApiKeys.WEBPAY)
def configure_for_testing_deferred(self):
return self.configure_for_integration(IntegrationCommerceCodes.WEBPAY_PLUS_MALL_DEFERRED, IntegrationApiKeys.WEBPAY) | transbank/webpay/webpay_plus/mall_transaction.py | from transbank.common.options import WebpayOptions
from transbank.common.request_service import RequestService
from transbank.common.api_constants import ApiConstants
from transbank.common.integration_commerce_codes import IntegrationCommerceCodes
from transbank.common.webpay_transaction import WebpayTransaction
from transbank.common.integration_api_keys import IntegrationApiKeys
from transbank.common.validation_util import ValidationUtil
from transbank.webpay.webpay_plus.schema import MallTransactionCreateRequestSchema, MallTransactionRefundRequestSchema, MallTransactionCaptureRequestSchema
from transbank.webpay.webpay_plus.request import MallTransactionCreateDetails, MallTransactionCreateRequest, MallTransactionRefundRequest, MallTransactionCaptureRequest
from transbank.error.transbank_error import TransbankError
from transbank.error.transaction_create_error import TransactionCreateError
from transbank.error.transaction_commit_error import TransactionCommitError
from transbank.error.transaction_status_error import TransactionStatusError
from transbank.error.transaction_refund_error import TransactionRefundError
from transbank.error.transaction_capture_error import TransactionCaptureError
class MallTransaction(WebpayTransaction):
CREATE_ENDPOINT = ApiConstants.WEBPAY_ENDPOINT + '/transactions/'
COMMIT_ENDPOINT = ApiConstants.WEBPAY_ENDPOINT + '/transactions/{}'
STATUS_ENDPOINT = ApiConstants.WEBPAY_ENDPOINT + '/transactions/{}'
REFUND_ENDPOINT = ApiConstants.WEBPAY_ENDPOINT + '/transactions/{}/refunds'
CAPTURE_ENDPOINT = ApiConstants.WEBPAY_ENDPOINT + '/transactions/{}/capture'
def __init__(self, options: WebpayOptions = None):
if options is None:
self.configure_for_testing()
else:
super().__init__(options)
def create(self, buy_order: str, session_id: str, return_url: str, details: MallTransactionCreateDetails):
ValidationUtil.has_text_with_max_length(buy_order, ApiConstants.BUY_ORDER_LENGTH, "buy_order")
ValidationUtil.has_text_with_max_length(session_id, ApiConstants.SESSION_ID_LENGTH, "session_id")
ValidationUtil.has_text_with_max_length(return_url, ApiConstants.RETURN_URL_LENGTH, "return_url")
ValidationUtil.has_elements(details.details, "details")
for item in details.details:
ValidationUtil.has_text_with_max_length(item.commerce_code, ApiConstants.COMMERCE_CODE_LENGTH, "details.commerce_code")
ValidationUtil.has_text_with_max_length(item.buy_order, ApiConstants.BUY_ORDER_LENGTH, "details.buy_order")
try:
endpoint = MallTransaction.CREATE_ENDPOINT
request = MallTransactionCreateRequest(buy_order, session_id, return_url, details.details)
return RequestService.post(endpoint, MallTransactionCreateRequestSchema().dumps(request).data, self.options)
except TransbankError as e:
raise TransactionCreateError(e.message, e.code)
def commit(self, token: str):
ValidationUtil.has_text_with_max_length(token, ApiConstants.TOKEN_LENGTH, "token")
try:
endpoint = MallTransaction.COMMIT_ENDPOINT.format(token)
return RequestService.put(endpoint, {}, self.options)
except TransbankError as e:
raise TransactionCommitError(e.message, e.code)
def status(self, token: str):
ValidationUtil.has_text_with_max_length(token, ApiConstants.TOKEN_LENGTH, "token")
try:
endpoint = MallTransaction.STATUS_ENDPOINT.format(token)
return RequestService.get(endpoint, self.options)
except TransbankError as e:
raise TransactionStatusError(e.message, e.code)
def refund(self, token: str, child_buy_order: str, child_commerce_code:str, amount: float):
ValidationUtil.has_text_with_max_length(token, ApiConstants.TOKEN_LENGTH, "token")
ValidationUtil.has_text_with_max_length(child_commerce_code, ApiConstants.COMMERCE_CODE_LENGTH, "child_commerce_code")
ValidationUtil.has_text_with_max_length(child_buy_order, ApiConstants.BUY_ORDER_LENGTH, "child_buy_order")
try:
endpoint = MallTransaction.REFUND_ENDPOINT.format(token)
request = MallTransactionRefundRequest(commerce_code=child_commerce_code, buy_order=child_buy_order, amount=amount)
return RequestService.post(endpoint, MallTransactionRefundRequestSchema().dumps(request).data, self.options)
except TransbankError as e:
raise TransactionRefundError(e.message, e.code)
def capture(self, child_commerce_code: str, token: str, buy_order: str, authorization_code: str, capture_amount: float):
ValidationUtil.has_text_with_max_length(token, ApiConstants.TOKEN_LENGTH, "token")
ValidationUtil.has_text_with_max_length(child_commerce_code, ApiConstants.COMMERCE_CODE_LENGTH, "child_commerce_code")
ValidationUtil.has_text_with_max_length(buy_order, ApiConstants.BUY_ORDER_LENGTH, "buy_order")
ValidationUtil.has_text_with_max_length(authorization_code, ApiConstants.AUTHORIZATION_CODE_LENGTH, "authorization_code")
try:
endpoint = MallTransaction.CAPTURE_ENDPOINT.format(token)
request = MallTransactionCaptureRequest(child_commerce_code, buy_order, authorization_code, capture_amount)
return RequestService.put(endpoint, MallTransactionCaptureRequestSchema().dumps(request).data, self.options)
except TransbankError as e:
raise TransactionCaptureError(e.message, e.code)
def configure_for_testing(self):
return self.configure_for_integration(IntegrationCommerceCodes.WEBPAY_PLUS_MALL, IntegrationApiKeys.WEBPAY)
def configure_for_testing_deferred(self):
return self.configure_for_integration(IntegrationCommerceCodes.WEBPAY_PLUS_MALL_DEFERRED, IntegrationApiKeys.WEBPAY) | 0.546012 | 0.103024 |
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: loop_protect
short_description: implements loop-protect rest api
version_added: "2.6"
description:
- "This configures loop protect on device over vlan or port"
extends_documentation_fragment:
- arubaoss_rest
options:
command:
description:
- Type of action to be taken.
required: true
port_disable_timer:
description:
- Set the number of seconds before disabled ports are
automatically re-enabled
required: false
trasmit_interval:
description:
- Set the number of seconds between loop detect packet transmissions.
required: false
mode:
description:
- Configures vlan or port mode
required: false
default: LPM_PORT
choices: LPM_PORT, LPM_VLAN
interface:
description:
- Interface id on which loop protect to be configured
required: false
receiver_action:
description:
- Set the action to take when a loop is detected.
is_loop_protection_enabled must be true to update the receiver_action.
required: false
default: LPRA_SEND_DISABLE
choices: LPRA_SEND_DISABLE, LPRA_NO_DISABLE, LPRA_SEND_RECV_DISABLE
vlan:
description:
- Vlan id on which loop protect is to be configured
required: false
extends_documentation_fragment:
- azure
author:
- <NAME> (@hpe)
'''
EXAMPLES = '''
- name: update loop
arubaoss_loop_protect:
command: update
trap: True
- name: enable loop-prtoect on port
arubaoss_loop_protect:
command: update_port
interface: 1
- name: disable loop-prtoect on port
arubaoss_loop_protect:
command: update_port
interface: 1
loop_protected: False
- name: change loop-protect mode to vlan
arubaoss_loop_protect:
command: update
mode: LPM_VLAN
- name: enable loop-prtoect on vlan
arubaoss_loop_protect:
command: update_vlan
vlan: 10
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.arubaoss.arubaoss import run_commands,get_config
from ansible.module_utils.network.arubaoss.arubaoss import arubaoss_argument_spec, arubaoss_required_if
import sys
def update(module):
params = module.params
url = "/loop_protect"
data = {
'port_disable_timer_in_senconds': params['port_disable_timer'],
'trasmit_interval_in_seconds': params['transmit_interval'],
'mode': params['mode'],
'is_trap_on_loop_detected_enabled': params['trap']
}
result = run_commands(module, url, data, 'PUT',check=url)
return result
def update_port(module):
params = module.params
url = '/loop_protect/ports/' + params['interface']
port_url = '/ports/' + str(params['interface'])
check_port = get_config(module,port_url)
if not check_port:
return {'msg': 'Port {} not present on device'.format(params['interface']),
'changed':False}
data = {
'port_id': params['interface'],
'is_loop_protection_enabled': params['loop_protected'],
'receiver_action': params['receiver_action']
}
result = run_commands(module, url, data, 'PUT',check=url)
return result
def update_vlan(module):
params = module.params
url = '/loop_protect/vlans/' + str(params['vlan'])
vlan_url = '/vlans/' + str(params['vlan'])
check_vlan = get_config(module,vlan_url)
if not check_vlan:
return {'msg': 'Vlan {} not configured'.format(params['vlan']),
'changed':False}
data = {
'vlan_id': params['vlan'],
'is_vlan_loop_protected': params['loop_protected'],
}
result = run_commands(module, url, data, 'PUT',check=url)
return result
def run_module():
module_args = dict(
command=dict(type='str', required=True,
choices=['update','update_port','update_vlan']),
port_disable_timer=dict(type='int', required=False, default=0),
transmit_interval=dict(type='int', required=False, default=5),
mode=dict(type='str', required=False, choices=['LPM_PORT','LPM_VLAN'],
default='LPM_PORT'),
trap=dict(type='bool', required=False, default=False),
interface=dict(type='str', required=False,),
loop_protected=dict(type='bool', required=False, default=True),
receiver_action=dict(type='str', required=False, default='LPRA_SEND_DISABLE',
choices=['LPRA_SEND_DISABLE','LPRA_NO_DISABLE','LPRA_SEND_RECV_DISABLE']),
vlan=dict(type='int', required=False),
)
module_args.update(arubaoss_argument_spec)
result = dict(changed=False,warnings='Not Supported')
module = AnsibleModule(
required_if=arubaoss_required_if,
argument_spec=module_args,
supports_check_mode=True
)
if module.check_mode:
module.exit_json(**result)
command = module.params['command']
try:
thismod = sys.modules[__name__]
method = getattr(thismod, command)
result = method(module)
except Exception as err:
return module.fail_json(msg=err)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main() | aruba_module_installer/library/modules/network/arubaoss/arubaoss_loop_protect.py |
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: loop_protect
short_description: implements loop-protect rest api
version_added: "2.6"
description:
- "This configures loop protect on device over vlan or port"
extends_documentation_fragment:
- arubaoss_rest
options:
command:
description:
- Type of action to be taken.
required: true
port_disable_timer:
description:
- Set the number of seconds before disabled ports are
automatically re-enabled
required: false
trasmit_interval:
description:
- Set the number of seconds between loop detect packet transmissions.
required: false
mode:
description:
- Configures vlan or port mode
required: false
default: LPM_PORT
choices: LPM_PORT, LPM_VLAN
interface:
description:
- Interface id on which loop protect to be configured
required: false
receiver_action:
description:
- Set the action to take when a loop is detected.
is_loop_protection_enabled must be true to update the receiver_action.
required: false
default: LPRA_SEND_DISABLE
choices: LPRA_SEND_DISABLE, LPRA_NO_DISABLE, LPRA_SEND_RECV_DISABLE
vlan:
description:
- Vlan id on which loop protect is to be configured
required: false
extends_documentation_fragment:
- azure
author:
- <NAME> (@hpe)
'''
EXAMPLES = '''
- name: update loop
arubaoss_loop_protect:
command: update
trap: True
- name: enable loop-prtoect on port
arubaoss_loop_protect:
command: update_port
interface: 1
- name: disable loop-prtoect on port
arubaoss_loop_protect:
command: update_port
interface: 1
loop_protected: False
- name: change loop-protect mode to vlan
arubaoss_loop_protect:
command: update
mode: LPM_VLAN
- name: enable loop-prtoect on vlan
arubaoss_loop_protect:
command: update_vlan
vlan: 10
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.arubaoss.arubaoss import run_commands,get_config
from ansible.module_utils.network.arubaoss.arubaoss import arubaoss_argument_spec, arubaoss_required_if
import sys
def update(module):
params = module.params
url = "/loop_protect"
data = {
'port_disable_timer_in_senconds': params['port_disable_timer'],
'trasmit_interval_in_seconds': params['transmit_interval'],
'mode': params['mode'],
'is_trap_on_loop_detected_enabled': params['trap']
}
result = run_commands(module, url, data, 'PUT',check=url)
return result
def update_port(module):
params = module.params
url = '/loop_protect/ports/' + params['interface']
port_url = '/ports/' + str(params['interface'])
check_port = get_config(module,port_url)
if not check_port:
return {'msg': 'Port {} not present on device'.format(params['interface']),
'changed':False}
data = {
'port_id': params['interface'],
'is_loop_protection_enabled': params['loop_protected'],
'receiver_action': params['receiver_action']
}
result = run_commands(module, url, data, 'PUT',check=url)
return result
def update_vlan(module):
params = module.params
url = '/loop_protect/vlans/' + str(params['vlan'])
vlan_url = '/vlans/' + str(params['vlan'])
check_vlan = get_config(module,vlan_url)
if not check_vlan:
return {'msg': 'Vlan {} not configured'.format(params['vlan']),
'changed':False}
data = {
'vlan_id': params['vlan'],
'is_vlan_loop_protected': params['loop_protected'],
}
result = run_commands(module, url, data, 'PUT',check=url)
return result
def run_module():
module_args = dict(
command=dict(type='str', required=True,
choices=['update','update_port','update_vlan']),
port_disable_timer=dict(type='int', required=False, default=0),
transmit_interval=dict(type='int', required=False, default=5),
mode=dict(type='str', required=False, choices=['LPM_PORT','LPM_VLAN'],
default='LPM_PORT'),
trap=dict(type='bool', required=False, default=False),
interface=dict(type='str', required=False,),
loop_protected=dict(type='bool', required=False, default=True),
receiver_action=dict(type='str', required=False, default='LPRA_SEND_DISABLE',
choices=['LPRA_SEND_DISABLE','LPRA_NO_DISABLE','LPRA_SEND_RECV_DISABLE']),
vlan=dict(type='int', required=False),
)
module_args.update(arubaoss_argument_spec)
result = dict(changed=False,warnings='Not Supported')
module = AnsibleModule(
required_if=arubaoss_required_if,
argument_spec=module_args,
supports_check_mode=True
)
if module.check_mode:
module.exit_json(**result)
command = module.params['command']
try:
thismod = sys.modules[__name__]
method = getattr(thismod, command)
result = method(module)
except Exception as err:
return module.fail_json(msg=err)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main() | 0.477798 | 0.257367 |
import os
from os.path import join, exists
import numpy as np
from pgl.utils.data.dataloader import Dataloader
from pahelix.utils.data_utils import save_data_list_to_npz, load_npz_to_data_list
__all__ = ['InMemoryDataset']
class InMemoryDataset(object):
"""
The InMemoryDataset manages :attr:`data_list` which is an array of data
and the data is a dict of numpy ndarray.
It works like a list: you can call `dataset[i]` to get the i-th element of
the :attr:`data_list` and call `len(dataset)` to get the length of :attr:`data_list`.
The :attr:`data_list` can be cached in npz forward by calling `dataset.save_data(data_path)`
and after that, call `InMemoryDataset(data_path)` to reload.
Args:
data_list(list of dict of numpy ndarray): a list of dict of numpy ndarray.
data_path(str): the path to the cached npz path.
"""
def __init__(self,
data_list=None,
npz_data_path=None):
super(InMemoryDataset, self).__init__()
assert (data_list is None) ^ (npz_data_path is None), \
"Only data_list or npz_data_path should be set."
self.data_list = data_list
self.npz_data_path = npz_data_path
if not npz_data_path is None:
self.data_list = self._load_npz_data(npz_data_path)
def _load_npz_data(self, data_path):
data_list = []
files = [file for file in os.listdir(data_path) if file.endswith('.npz')]
for file in files:
data_list += load_npz_to_data_list(join(data_path, file))
return data_list
def _save_npz_data(self, data_list, data_path, max_num_per_file=10000):
if not exists(data_path):
os.makedirs(data_path)
n = len(data_list)
for i in range(int((n - 1) / max_num_per_file) + 1):
file = 'part-%05d.npz' % i
sub_data_list = self.data_list[i * max_num_per_file: (i + 1) * max_num_per_file]
save_data_list_to_npz(join(data_path, file), sub_data_list)
def save_data(self, data_path):
"""
Save the :attr:`data_list` to the :attr:`data_path` in the disk with npz format.
After that, call `InMemoryDataset(data_path)` to reload the :attr:`data_list`.
Args:
data_path(str): the path to the cached npz path.
"""
self._save_npz_data(self.data_list, data_path)
def __getitem__(self, key):
if isinstance(key, slice):
start, stop, step = key.indices(len(self))
dataset = InMemoryDataset(
data_list=[self[i] for i in range(start, stop, step)])
return dataset
elif isinstance(key, int) or \
isinstance(key, np.int64) or \
isinstance(key, np.int32):
return self.data_list[key]
elif isinstance(key, list):
dataset = InMemoryDataset(
data_list=[self[i] for i in key])
return dataset
else:
raise TypeError('Invalid argument type: %s of %s' % (type(key), key))
def __len__(self):
return len(self.data_list)
def iter_batch(self, batch_size, num_workers=4, shuffle=False, collate_fn=None):
"""
It returns an batch iterator which yields a batch of data. Firstly, a sub-list of
`data` of size :attr:`batch_size` will be draw from the :attr:`data_list`, then
the function :attr:`collate_fn` will be applied to the sub-list to create a batch and
yield back. This process is accelerated by multiprocess.
Args:
batch_size(int): the batch_size.
num_workers(int): the number of workers used to generate batch data. Required by
multiprocess.
shuffle(bool): whether to shuffle the order of the :attr:`data_list`.
collate_fn(function): used to convert the sub-list of :attr:`data_list` to the
aggregated batch data.
"""
return Dataloader(self,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
collate_fn=collate_fn) | pahelix/datasets/inmemory_dataset.py | import os
from os.path import join, exists
import numpy as np
from pgl.utils.data.dataloader import Dataloader
from pahelix.utils.data_utils import save_data_list_to_npz, load_npz_to_data_list
__all__ = ['InMemoryDataset']
class InMemoryDataset(object):
"""
The InMemoryDataset manages :attr:`data_list` which is an array of data
and the data is a dict of numpy ndarray.
It works like a list: you can call `dataset[i]` to get the i-th element of
the :attr:`data_list` and call `len(dataset)` to get the length of :attr:`data_list`.
The :attr:`data_list` can be cached in npz forward by calling `dataset.save_data(data_path)`
and after that, call `InMemoryDataset(data_path)` to reload.
Args:
data_list(list of dict of numpy ndarray): a list of dict of numpy ndarray.
data_path(str): the path to the cached npz path.
"""
def __init__(self,
data_list=None,
npz_data_path=None):
super(InMemoryDataset, self).__init__()
assert (data_list is None) ^ (npz_data_path is None), \
"Only data_list or npz_data_path should be set."
self.data_list = data_list
self.npz_data_path = npz_data_path
if not npz_data_path is None:
self.data_list = self._load_npz_data(npz_data_path)
def _load_npz_data(self, data_path):
data_list = []
files = [file for file in os.listdir(data_path) if file.endswith('.npz')]
for file in files:
data_list += load_npz_to_data_list(join(data_path, file))
return data_list
def _save_npz_data(self, data_list, data_path, max_num_per_file=10000):
if not exists(data_path):
os.makedirs(data_path)
n = len(data_list)
for i in range(int((n - 1) / max_num_per_file) + 1):
file = 'part-%05d.npz' % i
sub_data_list = self.data_list[i * max_num_per_file: (i + 1) * max_num_per_file]
save_data_list_to_npz(join(data_path, file), sub_data_list)
def save_data(self, data_path):
"""
Save the :attr:`data_list` to the :attr:`data_path` in the disk with npz format.
After that, call `InMemoryDataset(data_path)` to reload the :attr:`data_list`.
Args:
data_path(str): the path to the cached npz path.
"""
self._save_npz_data(self.data_list, data_path)
def __getitem__(self, key):
if isinstance(key, slice):
start, stop, step = key.indices(len(self))
dataset = InMemoryDataset(
data_list=[self[i] for i in range(start, stop, step)])
return dataset
elif isinstance(key, int) or \
isinstance(key, np.int64) or \
isinstance(key, np.int32):
return self.data_list[key]
elif isinstance(key, list):
dataset = InMemoryDataset(
data_list=[self[i] for i in key])
return dataset
else:
raise TypeError('Invalid argument type: %s of %s' % (type(key), key))
def __len__(self):
return len(self.data_list)
def iter_batch(self, batch_size, num_workers=4, shuffle=False, collate_fn=None):
"""
It returns an batch iterator which yields a batch of data. Firstly, a sub-list of
`data` of size :attr:`batch_size` will be draw from the :attr:`data_list`, then
the function :attr:`collate_fn` will be applied to the sub-list to create a batch and
yield back. This process is accelerated by multiprocess.
Args:
batch_size(int): the batch_size.
num_workers(int): the number of workers used to generate batch data. Required by
multiprocess.
shuffle(bool): whether to shuffle the order of the :attr:`data_list`.
collate_fn(function): used to convert the sub-list of :attr:`data_list` to the
aggregated batch data.
"""
return Dataloader(self,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
collate_fn=collate_fn) | 0.730674 | 0.42316 |
import time
import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
@pytest.fixture(scope="session")
def driver(request):
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
wd = webdriver.Chrome(chrome_options=options)
request.addfinalizer(wd.quit)
return wd
def test_login(driver):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("<PASSWORD>")
driver.find_element_by_name("login").click()
def test_click_items_menu(driver):
wait = WebDriverWait(driver, 10) # seconds
items = driver.find_elements_by_xpath(".//li[@id='app-']")
count_items = len(items)
print(count_items)
item = items[0]
for i in range(count_items):
print(str(i + 1) + " - " + item.text)
time.sleep(1)
item.click()
head_item = wait.until(EC.presence_of_element_located((By.XPATH, ".//h1")))
print(head_item.text)
subitems = driver.find_elements_by_xpath(".//li[@id='app-' and @class='selected']//li")
count_subitems = len(subitems)
if count_subitems:
for _ in range(1, count_subitems):
subitem = driver.find_element_by_xpath(".//ul[@class='docs']//li[@class='selected']/following-sibling::li")
actions = ActionChains(driver)
actions.move_to_element(subitem)
actions.perform()
time.sleep(1)
subitem.click()
head_subitem = wait.until(EC.presence_of_element_located((By.XPATH, ".//h1")))
print(head_subitem.text)
if (i + 1 != count_items):
item = driver.find_element_by_xpath(".//li[@id='app-' and @class='selected']/following-sibling::li") | task_7/main.py | import time
import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
@pytest.fixture(scope="session")
def driver(request):
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
wd = webdriver.Chrome(chrome_options=options)
request.addfinalizer(wd.quit)
return wd
def test_login(driver):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("<PASSWORD>")
driver.find_element_by_name("login").click()
def test_click_items_menu(driver):
wait = WebDriverWait(driver, 10) # seconds
items = driver.find_elements_by_xpath(".//li[@id='app-']")
count_items = len(items)
print(count_items)
item = items[0]
for i in range(count_items):
print(str(i + 1) + " - " + item.text)
time.sleep(1)
item.click()
head_item = wait.until(EC.presence_of_element_located((By.XPATH, ".//h1")))
print(head_item.text)
subitems = driver.find_elements_by_xpath(".//li[@id='app-' and @class='selected']//li")
count_subitems = len(subitems)
if count_subitems:
for _ in range(1, count_subitems):
subitem = driver.find_element_by_xpath(".//ul[@class='docs']//li[@class='selected']/following-sibling::li")
actions = ActionChains(driver)
actions.move_to_element(subitem)
actions.perform()
time.sleep(1)
subitem.click()
head_subitem = wait.until(EC.presence_of_element_located((By.XPATH, ".//h1")))
print(head_subitem.text)
if (i + 1 != count_items):
item = driver.find_element_by_xpath(".//li[@id='app-' and @class='selected']/following-sibling::li") | 0.181263 | 0.06663 |
# (C) Copyright 2020 Hewlett Packard Enterprise Development LP.
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'
}
DOCUMENTATION = '''
---
module: aoscx_command
version_added: "2.9"
short_description: Logs in and executes CLI commands on AOS-CX device via SSH connection
description:
- This module allows execution of CLI commands on AOS-CX devices via SSH connection
author: <NAME> (@ArubaNetworks)
options:
commands:
description: List of commands to be executed in sequence on the switch. Every command
will attempt to be executed regardless of the success or failure of the previous
command in the list. To execute commands in the 'configure' context, you must include
the 'configure terminal' command or one of its variations before the configuration commands.
'Show' commands are valid and their output will be printed to the screen, returned by the
module, and optionally saved to a file. The default module timeout is 30 seconds. To change the
command timeout, set the variable 'ansible_command_timeout' to the desired time in seconds.
required: True
type: list
wait_for:
description: A list of conditions to wait to be satisfied before continuing execution.
Each condition must include a test of the 'result' variable, which contains the output
results of each already-executed command in the 'commands' list. 'result' is a list
such that result[0] contains the output from commands[0], results[1] contains the output
from commands[1], and so on.
required: False
type: list
aliases: ['waitfor']
match:
description: Specifies whether all conditions in 'wait_for' must be satisfied or if just
any one condition can be satisfied. To be used with 'wait_for'.
default: 'all'
choices: ['any', 'all']
required: False
type: str
retries:
description: Maximum number of retries to check for the expected prompt.
default: 10
required: False
type: int
interval:
description: Interval between retries, in seconds.
default: 1
required: False
type: int
output_file:
description: Full path of the local system file to which commands' results will be output.
The directory must exist, but if the file doesn't exist, it will be created.
required: False
type: str
output_file_format:
description: Format to output the file in, either JSON or plain text.
To be used with 'output_file'.
default: json
choices: ['json', 'plain-text']
required: False
type: str
provider:
description: A dict object containing connection details.
suboptions:
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode on the
remote device. If authorize is false, then this argument does nothing.
If the value is not specified in the task, the value of environment variable
ANSIBLE_NET_AUTH_PASS will be used instead.
type: str
authorize:
description:
- Instructs the module to enter privileged mode on the remote device before
sending any commands. If not specified, the device will attempt to execute
all commands in non-privileged mode. If the value is not specified in the
task, the value of environment variable ANSIBLE_NET_AUTHORIZE will be used instead.
type: bool
host:
description:
- Specifies the DNS host name or address for connecting to the remote device over the
specified transport. The value of host is used as the destination address for the transport.
required: True
type: str
password:
description:
- Specifies the password to use to authenticate the connection to the remote device.
This value is used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable ANSIBLE_NET_PASSWORD will be used instead.
type: str
port:
description:
- Specifies the port to use when building the connection to the remote device.
type: int
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to the remote device.
This value is the path to the key used to authenticate the SSH session. If the value
is not specified in the task, the value of environment variable ANSIBLE_NET_SSH_KEYFILE
will be used instead.
type: path
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device for either
connecting or sending commands. If the timeout is exceeded before the operation is completed,
the module will error.
type: int
username:
description:
- Configures the username to use to authenticate the connection to the remote device.
This value is used to authenticate the SSH session. If the value is not specified in the task,
the value of environment variable ANSIBLE_NET_USERNAME will be used instead.
type: str
type: dict
''' # NOQA
EXAMPLES = '''
- name: Execute show commands and configure commands, and output results to file in plaintext
aoscx_command:
commands: ['show run',
'show vsf',
'show interface 1/1/1',
'config',
'interface 1/1/2',
'no shut',
'ip address 10.10.10.10/24',
'routing',
'ip address 10.10.10.11/24',
'exit',
'vlan 2',
'end']
output_file: /users/Home/configure.cfg
output_file_format: plain-text
- name: Show running-config and show interface mgmt, and pass only if all (both) results match
aoscx_command:
commands:
- 'show run'
- 'show int mgmt'
wait_for:
- result[0] contains "vlan "
- result[1] contains "127.0.0.1"
match: all
retries: 5
interval: 5
- name: Show all available commands and output them to a file (as JSON)
aoscx_command:
commands: ['list']
output_file: /users/Home/config_list.cfg
- name: Run ping command with increased command timeout
vars:
- ansible_command_timeout: 60
aoscx_command:
commands:
- ping 10.80.2.120 vrf mgmt repetitions 100
''' # NOQA
RETURN = r'''
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
'''
import time
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.network.common.utils import to_lines, ComplexList
from ansible_collections.arubanetworks.aoscx.plugins.module_utils.aoscx import run_commands, aoscx_argument_spec
def transform_commands(module):
'''
Transform the command to a complex list
'''
transform = ComplexList(dict(
command=dict(key=True),
prompt=dict(type='list'),
answer=dict(type='list'),
newline=dict(type='bool', default=True),
sendonly=dict(type='bool', default=False),
check_all=dict(type='bool', default=False),
), module)
return transform(module.params['commands'])
def parse_commands(module, warnings):
'''
Parse the command
'''
commands = transform_commands(module)
return commands
def main():
'''
Main entry point to the module
'''
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['any', 'all']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int'),
output_file=dict(type='str', default=None),
output_file_format=dict(type='str', default='json',
choices=['json', 'plain-text'])
)
argument_spec.update(aoscx_argument_spec)
warnings = list()
result = {'changed': False, 'warnings': warnings}
module = AnsibleModule(
argument_spec=argument_spec, supports_check_mode=True)
commands = parse_commands(module, warnings)
wait_for = module.params['wait_for'] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError as exc:
module.fail_json(msg=to_text(exc))
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries >= 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
commands_list = []
for command in commands:
commands_list.append(command['command'])
if module.params['output_file'] is not None:
output_file_format = str(module.params['output_file_format'])
if output_file_format == 'json':
output_list = []
for i, command in enumerate(commands_list):
output_dict = {}
output_dict['command'] = command
output_dict['response'] = responses[i]
output_list.append(output_dict)
output_file = str(module.params['output_file'])
with open(output_file, 'w') as output:
json.dump(output_list, output, indent=4)
output.write("\n")
else:
output_file = str(module.params['output_file'])
with open(output_file, 'w') as output:
for i, command in enumerate(commands_list):
output.write("command: ")
output.write(command)
output.write("\n")
output.write("response: ")
output.write(str(responses[i]))
output.write("\n")
output.write("------------------------------------------")
output.write("\n")
result.update({
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main() | plugins/modules/aoscx_command.py |
# (C) Copyright 2020 Hewlett Packard Enterprise Development LP.
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'
}
DOCUMENTATION = '''
---
module: aoscx_command
version_added: "2.9"
short_description: Logs in and executes CLI commands on AOS-CX device via SSH connection
description:
- This module allows execution of CLI commands on AOS-CX devices via SSH connection
author: <NAME> (@ArubaNetworks)
options:
commands:
description: List of commands to be executed in sequence on the switch. Every command
will attempt to be executed regardless of the success or failure of the previous
command in the list. To execute commands in the 'configure' context, you must include
the 'configure terminal' command or one of its variations before the configuration commands.
'Show' commands are valid and their output will be printed to the screen, returned by the
module, and optionally saved to a file. The default module timeout is 30 seconds. To change the
command timeout, set the variable 'ansible_command_timeout' to the desired time in seconds.
required: True
type: list
wait_for:
description: A list of conditions to wait to be satisfied before continuing execution.
Each condition must include a test of the 'result' variable, which contains the output
results of each already-executed command in the 'commands' list. 'result' is a list
such that result[0] contains the output from commands[0], results[1] contains the output
from commands[1], and so on.
required: False
type: list
aliases: ['waitfor']
match:
description: Specifies whether all conditions in 'wait_for' must be satisfied or if just
any one condition can be satisfied. To be used with 'wait_for'.
default: 'all'
choices: ['any', 'all']
required: False
type: str
retries:
description: Maximum number of retries to check for the expected prompt.
default: 10
required: False
type: int
interval:
description: Interval between retries, in seconds.
default: 1
required: False
type: int
output_file:
description: Full path of the local system file to which commands' results will be output.
The directory must exist, but if the file doesn't exist, it will be created.
required: False
type: str
output_file_format:
description: Format to output the file in, either JSON or plain text.
To be used with 'output_file'.
default: json
choices: ['json', 'plain-text']
required: False
type: str
provider:
description: A dict object containing connection details.
suboptions:
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode on the
remote device. If authorize is false, then this argument does nothing.
If the value is not specified in the task, the value of environment variable
ANSIBLE_NET_AUTH_PASS will be used instead.
type: str
authorize:
description:
- Instructs the module to enter privileged mode on the remote device before
sending any commands. If not specified, the device will attempt to execute
all commands in non-privileged mode. If the value is not specified in the
task, the value of environment variable ANSIBLE_NET_AUTHORIZE will be used instead.
type: bool
host:
description:
- Specifies the DNS host name or address for connecting to the remote device over the
specified transport. The value of host is used as the destination address for the transport.
required: True
type: str
password:
description:
- Specifies the password to use to authenticate the connection to the remote device.
This value is used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable ANSIBLE_NET_PASSWORD will be used instead.
type: str
port:
description:
- Specifies the port to use when building the connection to the remote device.
type: int
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to the remote device.
This value is the path to the key used to authenticate the SSH session. If the value
is not specified in the task, the value of environment variable ANSIBLE_NET_SSH_KEYFILE
will be used instead.
type: path
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device for either
connecting or sending commands. If the timeout is exceeded before the operation is completed,
the module will error.
type: int
username:
description:
- Configures the username to use to authenticate the connection to the remote device.
This value is used to authenticate the SSH session. If the value is not specified in the task,
the value of environment variable ANSIBLE_NET_USERNAME will be used instead.
type: str
type: dict
''' # NOQA
EXAMPLES = '''
- name: Execute show commands and configure commands, and output results to file in plaintext
aoscx_command:
commands: ['show run',
'show vsf',
'show interface 1/1/1',
'config',
'interface 1/1/2',
'no shut',
'ip address 10.10.10.10/24',
'routing',
'ip address 10.10.10.11/24',
'exit',
'vlan 2',
'end']
output_file: /users/Home/configure.cfg
output_file_format: plain-text
- name: Show running-config and show interface mgmt, and pass only if all (both) results match
aoscx_command:
commands:
- 'show run'
- 'show int mgmt'
wait_for:
- result[0] contains "vlan "
- result[1] contains "127.0.0.1"
match: all
retries: 5
interval: 5
- name: Show all available commands and output them to a file (as JSON)
aoscx_command:
commands: ['list']
output_file: /users/Home/config_list.cfg
- name: Run ping command with increased command timeout
vars:
- ansible_command_timeout: 60
aoscx_command:
commands:
- ping 10.80.2.120 vrf mgmt repetitions 100
''' # NOQA
RETURN = r'''
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
'''
import time
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.network.common.utils import to_lines, ComplexList
from ansible_collections.arubanetworks.aoscx.plugins.module_utils.aoscx import run_commands, aoscx_argument_spec
def transform_commands(module):
'''
Transform the command to a complex list
'''
transform = ComplexList(dict(
command=dict(key=True),
prompt=dict(type='list'),
answer=dict(type='list'),
newline=dict(type='bool', default=True),
sendonly=dict(type='bool', default=False),
check_all=dict(type='bool', default=False),
), module)
return transform(module.params['commands'])
def parse_commands(module, warnings):
'''
Parse the command
'''
commands = transform_commands(module)
return commands
def main():
'''
Main entry point to the module
'''
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['any', 'all']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int'),
output_file=dict(type='str', default=None),
output_file_format=dict(type='str', default='json',
choices=['json', 'plain-text'])
)
argument_spec.update(aoscx_argument_spec)
warnings = list()
result = {'changed': False, 'warnings': warnings}
module = AnsibleModule(
argument_spec=argument_spec, supports_check_mode=True)
commands = parse_commands(module, warnings)
wait_for = module.params['wait_for'] or list()
try:
conditionals = [Conditional(c) for c in wait_for]
except AttributeError as exc:
module.fail_json(msg=to_text(exc))
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries >= 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
commands_list = []
for command in commands:
commands_list.append(command['command'])
if module.params['output_file'] is not None:
output_file_format = str(module.params['output_file_format'])
if output_file_format == 'json':
output_list = []
for i, command in enumerate(commands_list):
output_dict = {}
output_dict['command'] = command
output_dict['response'] = responses[i]
output_list.append(output_dict)
output_file = str(module.params['output_file'])
with open(output_file, 'w') as output:
json.dump(output_list, output, indent=4)
output.write("\n")
else:
output_file = str(module.params['output_file'])
with open(output_file, 'w') as output:
for i, command in enumerate(commands_list):
output.write("command: ")
output.write(command)
output.write("\n")
output.write("response: ")
output.write(str(responses[i]))
output.write("\n")
output.write("------------------------------------------")
output.write("\n")
result.update({
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main() | 0.790166 | 0.383064 |
import unittest
import testing_config # Must be imported before the module under test.
import flask
import mock
import werkzeug.exceptions # Flask HTTP stuff.
from framework import csp
test_app = flask.Flask(__name__)
class CspTest(unittest.TestCase):
def setUp(self):
csp.ENABLED = True
csp.REPORT_ONLY = False
csp.REPORT_URI = 'test'
self.test_policy = {
'upgrade-insecure-requests': '',
'default-src': ["'self'"],
'base-uri': ["'none'"],
'object-src': ["'none'"],
'img-src': ["'self'", 'https:', 'data:'],
}
def test_get_nonce(self):
"""Many different nonce values are all different."""
nonces = []
for _ in range(1000):
nonces.append(csp.get_nonce())
self.assertEqual(len(nonces), len(set(nonces)))
@mock.patch('framework.csp.USE_NONCE_ONLY_POLICY', False)
def test_get_default_policy__strict(self):
"""We can get the regular strict policy."""
policy = csp.get_default_policy(nonce='12345')
self.assertCountEqual(list(csp.DEFAULT_POLICY.keys()), list(policy.keys()))
self.assertIn('strict-dynamic', policy['script-src'])
self.assertIn("'nonce-12345'", policy['script-src'])
@mock.patch('framework.csp.USE_NONCE_ONLY_POLICY', True)
def test_get_default_policy__strict(self):
"""We can get the even stricter nonce-only policy."""
policy = csp.get_default_policy(nonce='12345')
self.assertCountEqual(list(csp.NONCE_ONLY_POLICY.keys()), list(policy.keys()))
self.assertNotIn('strict-dynamic', policy['script-src'])
self.assertIn("'nonce-12345'", policy['script-src'])
@mock.patch('framework.csp.REPORT_ONLY', False)
def test_get_csp_header_key__enforced(self):
"""We can get the header used when the policy."""
self.assertEqual(
csp.HEADER_KEY_ENFORCE,
csp.get_csp_header_key())
@mock.patch('framework.csp.REPORT_ONLY', True)
def test_get_csp_header_key__enforced(self):
"""We can get the header used when only reporting violations."""
self.assertEqual(
csp.HEADER_KEY_REPORT_ONLY,
csp.get_csp_header_key())
def test_build_policy(self):
"""Each part of the CSP policy is in the header."""
expected_directives = [
'upgrade-insecure-requests', "default-src 'self'", "base-uri 'none'",
"object-src 'none'", "img-src 'self' https: data:", 'report-uri test'
]
result = csp.build_policy(self.test_policy)
result_directives = [x.strip() for x in result.split(';')]
self.assertCountEqual(expected_directives, result_directives)
@mock.patch('framework.csp.REPORT_ONLY', True)
def test_get_headers(self):
"""We can get a complete header dict."""
actual = csp.get_headers('12345')
self.assertIn('12345', actual[csp.HEADER_KEY_REPORT_ONLY])
class CspReporttest(unittest.TestCase):
@mock.patch('logging.error')
def test_report_handler(self, mock_error):
"""The report handler logs something for each request."""
with test_app.test_request_context('/csp', data='12345', method='POST'):
actual = csp.report_handler()
self.assertEqual('', actual)
mock_error.assert_called_once() | framework/csp_test.py |
import unittest
import testing_config # Must be imported before the module under test.
import flask
import mock
import werkzeug.exceptions # Flask HTTP stuff.
from framework import csp
test_app = flask.Flask(__name__)
class CspTest(unittest.TestCase):
def setUp(self):
csp.ENABLED = True
csp.REPORT_ONLY = False
csp.REPORT_URI = 'test'
self.test_policy = {
'upgrade-insecure-requests': '',
'default-src': ["'self'"],
'base-uri': ["'none'"],
'object-src': ["'none'"],
'img-src': ["'self'", 'https:', 'data:'],
}
def test_get_nonce(self):
"""Many different nonce values are all different."""
nonces = []
for _ in range(1000):
nonces.append(csp.get_nonce())
self.assertEqual(len(nonces), len(set(nonces)))
@mock.patch('framework.csp.USE_NONCE_ONLY_POLICY', False)
def test_get_default_policy__strict(self):
"""We can get the regular strict policy."""
policy = csp.get_default_policy(nonce='12345')
self.assertCountEqual(list(csp.DEFAULT_POLICY.keys()), list(policy.keys()))
self.assertIn('strict-dynamic', policy['script-src'])
self.assertIn("'nonce-12345'", policy['script-src'])
@mock.patch('framework.csp.USE_NONCE_ONLY_POLICY', True)
def test_get_default_policy__strict(self):
"""We can get the even stricter nonce-only policy."""
policy = csp.get_default_policy(nonce='12345')
self.assertCountEqual(list(csp.NONCE_ONLY_POLICY.keys()), list(policy.keys()))
self.assertNotIn('strict-dynamic', policy['script-src'])
self.assertIn("'nonce-12345'", policy['script-src'])
@mock.patch('framework.csp.REPORT_ONLY', False)
def test_get_csp_header_key__enforced(self):
"""We can get the header used when the policy."""
self.assertEqual(
csp.HEADER_KEY_ENFORCE,
csp.get_csp_header_key())
@mock.patch('framework.csp.REPORT_ONLY', True)
def test_get_csp_header_key__enforced(self):
"""We can get the header used when only reporting violations."""
self.assertEqual(
csp.HEADER_KEY_REPORT_ONLY,
csp.get_csp_header_key())
def test_build_policy(self):
"""Each part of the CSP policy is in the header."""
expected_directives = [
'upgrade-insecure-requests', "default-src 'self'", "base-uri 'none'",
"object-src 'none'", "img-src 'self' https: data:", 'report-uri test'
]
result = csp.build_policy(self.test_policy)
result_directives = [x.strip() for x in result.split(';')]
self.assertCountEqual(expected_directives, result_directives)
@mock.patch('framework.csp.REPORT_ONLY', True)
def test_get_headers(self):
"""We can get a complete header dict."""
actual = csp.get_headers('12345')
self.assertIn('12345', actual[csp.HEADER_KEY_REPORT_ONLY])
class CspReporttest(unittest.TestCase):
@mock.patch('logging.error')
def test_report_handler(self, mock_error):
"""The report handler logs something for each request."""
with test_app.test_request_context('/csp', data='12345', method='POST'):
actual = csp.report_handler()
self.assertEqual('', actual)
mock_error.assert_called_once() | 0.54359 | 0.397471 |
from unittest import TestCase
from docutils import nodes
from mock import patch
from sphinx import jinja2glue
from sphinx_testing import TestApp
from hieroglyph.tests import util
from hieroglyph.builder import SlideBuilder
from hieroglyph.writer import (
SlideData,
BaseSlideTranslator,
SlideTranslator,
)
class SlideTranslationTests(TestCase):
def setUp(self):
self.app = TestApp(
buildername='slides',
copy_srcdir_to_tmpdir=True,
srcdir=util.test_root,
)
self.builder = SlideBuilder(self.app)
self.document = util.make_document(
'testing',
"""\
Slide ``Title``
---------------
* Bullet 1
* Bullet 2
""",
)
self.translator = BaseSlideTranslator(
self.builder,
self.document,
)
self.builder.init_templates()
def test_push_body(self):
self.translator.body = [1, 2, 3]
self.translator.push_body()
self.assertEqual(self.translator.body, [])
self.assertEqual(self.translator.body_stack, [[1, 2, 3]])
self.translator.body.append('foo')
self.translator.push_body()
self.assertEqual(
self.translator.body_stack,
[
[1, 2, 3, ],
['foo', ],
],
)
def test_pop_body(self):
self.translator.body.append('a')
self.translator.push_body()
self.translator.body.append('1')
self.translator.push_body()
self.assertEqual(
self.translator.body_stack,
[
['a'],
['1'],
],
)
self.assertEqual(self.translator.body, [])
self.translator.pop_body()
self.assertEqual(
self.translator.body_stack,
[
['a'],
],
)
self.assertEqual(self.translator.body, ['1'])
def test_visit_slide_creates_new_slide_data(self):
# sanity checks
self.assertIsNone(self.translator.current_slide)
self.assertIsInstance(self.document[0], nodes.section)
# visit the slide section
self.translator.visit_slide(self.document[0])
# verify the Slide was created
self.assertIsNotNone(self.translator.current_slide)
self.assertIsInstance(self.translator.current_slide, SlideData)
self.assertEqual(
self.translator.current_slide.level,
self.document[0].attributes.get(
'level',
self.translator.section_level,
),
)
def test_section_classes_added_to_slidedata(self):
self.document[0].set_class('fancy')
# visit the slide section
self.translator.visit_slide(self.document[0])
self.assertEqual(
self.translator.current_slide.classes,
['fancy'],
)
def test_prefixed_classes_added_to_slidedata_context_classes(self):
self.document[0].set_class('fancy')
self.document[0].set_class('content-inner')
# visit the slide section
self.translator.visit_slide(self.document[0])
self.assertEqual(
self.translator.current_slide.get_slide_context()['classes'],
['fancy', 'content-inner'],
)
self.assertEqual(
self.translator.current_slide.get_slide_context()['slide_classes'],
['fancy', ],
)
self.assertEqual(
self.translator.current_slide.get_slide_context()['content_classes'],
['inner'],
)
def test_depart_slide_clears_current_slide(self):
# visit the slide section
self.translator.visit_slide(self.document[0])
self.assertIsNotNone(self.translator.current_slide)
self.translator.depart_slide(self.document[0])
self.assertIsNone(self.translator.current_slide)
def test_visit_title_in_slide_sets_slide_title(self):
# visit the slide section
self.translator.visit_slide(self.document[0])
# visit the title
self.translator.visit_title(self.document[0][0])
self.assertEqual(
self.document[0][0].astext(),
self.translator.current_slide.title,
)
def test_depart_slide_sets_slide_content(self):
pass
def test_slide_data_get_context(self):
slide = SlideData(
self.translator,
title='My Pretty Slide',
id='my-pretty-slide',
level=1,
)
self.assertEqual(
slide.get_slide_context(),
{
'title': 'My Pretty Slide',
'level': 1,
'content': '',
'classes': [],
'slide_classes': [],
'content_classes': [],
'id': 'my-pretty-slide',
'slide_number': 0,
'config': self.translator.builder.config,
},
)
@patch.object(jinja2glue.BuiltinTemplateLoader, 'render')
def test_depart_slide_calls_template_render(self, render_mock):
self.translator.visit_slide(self.document[0])
self.assertIsNotNone(self.translator.current_slide)
current_slide = self.translator.current_slide
self.translator.depart_slide(self.document[0])
self.assertIsNone(self.translator.current_slide)
render_mock.assert_called_once_with(
'slide.html',
current_slide.get_slide_context(),
)
@patch.object(
jinja2glue.BuiltinTemplateLoader,
'render',
return_value='** SLIDE **',
)
def test_rendered_template_added_to_body(self, render_mock):
self.translator.visit_slide(self.document[0])
self.translator.depart_slide(self.document[0])
self.assertIsNone(self.translator.current_slide)
self.assertEqual(
self.translator.body[-1],
'** SLIDE **',
)
@patch.object(
jinja2glue.BuiltinTemplateLoader,
'render',
return_value='** SLIDE **',
)
def test_only_rendered_template_added(self, render_mock):
self.translator.visit_section = self.translator.visit_slide
self.translator.depart_section = self.translator.depart_slide
self.document.walkabout(self.translator)
self.assertEqual(
self.translator.body,
['** SLIDE **'],
)
def test_section_id_added_to_current_slide(self):
self.document[0].set_class('fancy')
# visit the slide section
self.translator.visit_slide(self.document[0])
self.assertEqual(
self.translator.current_slide.id,
'slide-title',
)
def test_inline_markup_in_title(self):
self.translator.visit_section = self.translator.visit_slide
self.translator.depart_section = self.translator.depart_slide
self.document.walkabout(self.translator)
self.assertIn(
self.translator.slide_data[-1].title,
[
# Sphinx 1.1, 1.2
'Slide <tt class="docutils literal">'
'<span class="pre">Title</span></tt>',
# Sphinx 1.3
'Slide <code class="docutils literal">'
'<span class="pre">Title</span></code>'
],
)
def test_non_section_titles_rendered_normally(self):
document = util.make_document(
'testing',
"""\
Section Title
-------------
Some Text
.. note:: Take note!
Another paragraph
""",
)
translator = SlideTranslator(
self.builder,
document,
)
document.walkabout(translator)
self.assertEqual(
translator.body,
[
u'\n<article class="slide level-1" id="section-title">\n\n'
'<h1>Section Title</h1>\n\n'
'<p>Some Text</p>\n'
'<div class="admonition note">\n'
'<p class="first admonition-title">Note</p>\n'
'<p class="last">Take note!</p>\n'
'</div>\n'
'<p>Another paragraph</p>'
'\n\n\n\n\n</article>',
],
)
def test_slide_titles(self):
document = util.make_document(
'testing',
"""\
.. slide:: Slide Title
Slide Content
""",
)
translator = SlideTranslator(
self.builder,
document,
)
document.walkabout(translator)
self.assertEqual(
translator.body,
[
u'\n<article class="admonition-slide-title slide level-1">\n\n'
'<h1>Slide Title</h1>\n\n'
'<p>Slide Content</p>\n\n\n\n\n</article>',
],
)
class QuoteSlideTests(TestCase):
def setUp(self):
self.app = TestApp(
buildername='slides',
copy_srcdir_to_tmpdir=True,
srcdir=util.test_root,
)
self.builder = SlideBuilder(self.app)
def test_rst_quote_makes_quote_slide(self):
document = util.make_document(
'quoted',
"""\
.. slide:: Quotes
:level: 2
reStructuredText quotes are automatically converted
-- The Sign Painter
""",
)
translator = SlideTranslator(
self.builder,
document,
)
document.walkabout(translator)
self.assertEqual(
translator.body,
[
u'\n<article class="admonition-quotes slide level-2">\n\n'
'<h2>Quotes</h2>\n\n'
'<q>\n'
'reStructuredText quotes are automatically converted</q>\n'
'<div class="author">\n'
'The Sign Painter</div>'
'\n\n\n\n\n</article>',
],
)
def test_unattributed_rst_quote_makes_quote_slide(self):
document = util.make_document(
'quoted',
"""\
.. slide:: Quotes
:level: 2
reStructuredText quotes are automatically converted
""",
)
translator = SlideTranslator(
self.builder,
document,
)
document.walkabout(translator)
self.assertEqual(
translator.body,
[
u'\n<article class="admonition-quotes slide level-2">\n\n'
'<h2>Quotes</h2>\n\n'
'<q>\n'
'reStructuredText quotes are automatically converted</q>\n'
'\n\n\n\n</article>',
],
)
def test_rst_quote_processes_normally_with_extra_content(self):
document = util.make_document(
'quoted',
"""\
.. slide:: Indented RST
:level: 2
This text is over indented.
As is this text.
They look like quotes but they're not.
""",
)
translator = SlideTranslator(
self.builder,
document,
)
document.walkabout(translator)
self.assertEqual(
translator.body,
[
u'\n<article class="admonition-indented-rst slide level-2">\n\n'
'<h2>Indented RST</h2>\n\n'
'<blockquote>\n'
'<div><p>This text is over indented.</p>\n'
'<p>As is this text.</p>\n'
'<p>They look like quotes but they\'re not.</p>\n'
'</div></blockquote>\n'
'\n\n\n\n</article>',
],
) | v/lib/python2.7/site-packages/hieroglyph/tests/test_translator.py | from unittest import TestCase
from docutils import nodes
from mock import patch
from sphinx import jinja2glue
from sphinx_testing import TestApp
from hieroglyph.tests import util
from hieroglyph.builder import SlideBuilder
from hieroglyph.writer import (
SlideData,
BaseSlideTranslator,
SlideTranslator,
)
class SlideTranslationTests(TestCase):
def setUp(self):
self.app = TestApp(
buildername='slides',
copy_srcdir_to_tmpdir=True,
srcdir=util.test_root,
)
self.builder = SlideBuilder(self.app)
self.document = util.make_document(
'testing',
"""\
Slide ``Title``
---------------
* Bullet 1
* Bullet 2
""",
)
self.translator = BaseSlideTranslator(
self.builder,
self.document,
)
self.builder.init_templates()
def test_push_body(self):
self.translator.body = [1, 2, 3]
self.translator.push_body()
self.assertEqual(self.translator.body, [])
self.assertEqual(self.translator.body_stack, [[1, 2, 3]])
self.translator.body.append('foo')
self.translator.push_body()
self.assertEqual(
self.translator.body_stack,
[
[1, 2, 3, ],
['foo', ],
],
)
def test_pop_body(self):
self.translator.body.append('a')
self.translator.push_body()
self.translator.body.append('1')
self.translator.push_body()
self.assertEqual(
self.translator.body_stack,
[
['a'],
['1'],
],
)
self.assertEqual(self.translator.body, [])
self.translator.pop_body()
self.assertEqual(
self.translator.body_stack,
[
['a'],
],
)
self.assertEqual(self.translator.body, ['1'])
def test_visit_slide_creates_new_slide_data(self):
# sanity checks
self.assertIsNone(self.translator.current_slide)
self.assertIsInstance(self.document[0], nodes.section)
# visit the slide section
self.translator.visit_slide(self.document[0])
# verify the Slide was created
self.assertIsNotNone(self.translator.current_slide)
self.assertIsInstance(self.translator.current_slide, SlideData)
self.assertEqual(
self.translator.current_slide.level,
self.document[0].attributes.get(
'level',
self.translator.section_level,
),
)
def test_section_classes_added_to_slidedata(self):
self.document[0].set_class('fancy')
# visit the slide section
self.translator.visit_slide(self.document[0])
self.assertEqual(
self.translator.current_slide.classes,
['fancy'],
)
def test_prefixed_classes_added_to_slidedata_context_classes(self):
self.document[0].set_class('fancy')
self.document[0].set_class('content-inner')
# visit the slide section
self.translator.visit_slide(self.document[0])
self.assertEqual(
self.translator.current_slide.get_slide_context()['classes'],
['fancy', 'content-inner'],
)
self.assertEqual(
self.translator.current_slide.get_slide_context()['slide_classes'],
['fancy', ],
)
self.assertEqual(
self.translator.current_slide.get_slide_context()['content_classes'],
['inner'],
)
def test_depart_slide_clears_current_slide(self):
# visit the slide section
self.translator.visit_slide(self.document[0])
self.assertIsNotNone(self.translator.current_slide)
self.translator.depart_slide(self.document[0])
self.assertIsNone(self.translator.current_slide)
def test_visit_title_in_slide_sets_slide_title(self):
# visit the slide section
self.translator.visit_slide(self.document[0])
# visit the title
self.translator.visit_title(self.document[0][0])
self.assertEqual(
self.document[0][0].astext(),
self.translator.current_slide.title,
)
def test_depart_slide_sets_slide_content(self):
pass
def test_slide_data_get_context(self):
slide = SlideData(
self.translator,
title='My Pretty Slide',
id='my-pretty-slide',
level=1,
)
self.assertEqual(
slide.get_slide_context(),
{
'title': 'My Pretty Slide',
'level': 1,
'content': '',
'classes': [],
'slide_classes': [],
'content_classes': [],
'id': 'my-pretty-slide',
'slide_number': 0,
'config': self.translator.builder.config,
},
)
@patch.object(jinja2glue.BuiltinTemplateLoader, 'render')
def test_depart_slide_calls_template_render(self, render_mock):
self.translator.visit_slide(self.document[0])
self.assertIsNotNone(self.translator.current_slide)
current_slide = self.translator.current_slide
self.translator.depart_slide(self.document[0])
self.assertIsNone(self.translator.current_slide)
render_mock.assert_called_once_with(
'slide.html',
current_slide.get_slide_context(),
)
@patch.object(
jinja2glue.BuiltinTemplateLoader,
'render',
return_value='** SLIDE **',
)
def test_rendered_template_added_to_body(self, render_mock):
self.translator.visit_slide(self.document[0])
self.translator.depart_slide(self.document[0])
self.assertIsNone(self.translator.current_slide)
self.assertEqual(
self.translator.body[-1],
'** SLIDE **',
)
@patch.object(
jinja2glue.BuiltinTemplateLoader,
'render',
return_value='** SLIDE **',
)
def test_only_rendered_template_added(self, render_mock):
self.translator.visit_section = self.translator.visit_slide
self.translator.depart_section = self.translator.depart_slide
self.document.walkabout(self.translator)
self.assertEqual(
self.translator.body,
['** SLIDE **'],
)
def test_section_id_added_to_current_slide(self):
self.document[0].set_class('fancy')
# visit the slide section
self.translator.visit_slide(self.document[0])
self.assertEqual(
self.translator.current_slide.id,
'slide-title',
)
def test_inline_markup_in_title(self):
self.translator.visit_section = self.translator.visit_slide
self.translator.depart_section = self.translator.depart_slide
self.document.walkabout(self.translator)
self.assertIn(
self.translator.slide_data[-1].title,
[
# Sphinx 1.1, 1.2
'Slide <tt class="docutils literal">'
'<span class="pre">Title</span></tt>',
# Sphinx 1.3
'Slide <code class="docutils literal">'
'<span class="pre">Title</span></code>'
],
)
def test_non_section_titles_rendered_normally(self):
document = util.make_document(
'testing',
"""\
Section Title
-------------
Some Text
.. note:: Take note!
Another paragraph
""",
)
translator = SlideTranslator(
self.builder,
document,
)
document.walkabout(translator)
self.assertEqual(
translator.body,
[
u'\n<article class="slide level-1" id="section-title">\n\n'
'<h1>Section Title</h1>\n\n'
'<p>Some Text</p>\n'
'<div class="admonition note">\n'
'<p class="first admonition-title">Note</p>\n'
'<p class="last">Take note!</p>\n'
'</div>\n'
'<p>Another paragraph</p>'
'\n\n\n\n\n</article>',
],
)
def test_slide_titles(self):
document = util.make_document(
'testing',
"""\
.. slide:: Slide Title
Slide Content
""",
)
translator = SlideTranslator(
self.builder,
document,
)
document.walkabout(translator)
self.assertEqual(
translator.body,
[
u'\n<article class="admonition-slide-title slide level-1">\n\n'
'<h1>Slide Title</h1>\n\n'
'<p>Slide Content</p>\n\n\n\n\n</article>',
],
)
class QuoteSlideTests(TestCase):
def setUp(self):
self.app = TestApp(
buildername='slides',
copy_srcdir_to_tmpdir=True,
srcdir=util.test_root,
)
self.builder = SlideBuilder(self.app)
def test_rst_quote_makes_quote_slide(self):
document = util.make_document(
'quoted',
"""\
.. slide:: Quotes
:level: 2
reStructuredText quotes are automatically converted
-- The Sign Painter
""",
)
translator = SlideTranslator(
self.builder,
document,
)
document.walkabout(translator)
self.assertEqual(
translator.body,
[
u'\n<article class="admonition-quotes slide level-2">\n\n'
'<h2>Quotes</h2>\n\n'
'<q>\n'
'reStructuredText quotes are automatically converted</q>\n'
'<div class="author">\n'
'The Sign Painter</div>'
'\n\n\n\n\n</article>',
],
)
def test_unattributed_rst_quote_makes_quote_slide(self):
document = util.make_document(
'quoted',
"""\
.. slide:: Quotes
:level: 2
reStructuredText quotes are automatically converted
""",
)
translator = SlideTranslator(
self.builder,
document,
)
document.walkabout(translator)
self.assertEqual(
translator.body,
[
u'\n<article class="admonition-quotes slide level-2">\n\n'
'<h2>Quotes</h2>\n\n'
'<q>\n'
'reStructuredText quotes are automatically converted</q>\n'
'\n\n\n\n</article>',
],
)
def test_rst_quote_processes_normally_with_extra_content(self):
document = util.make_document(
'quoted',
"""\
.. slide:: Indented RST
:level: 2
This text is over indented.
As is this text.
They look like quotes but they're not.
""",
)
translator = SlideTranslator(
self.builder,
document,
)
document.walkabout(translator)
self.assertEqual(
translator.body,
[
u'\n<article class="admonition-indented-rst slide level-2">\n\n'
'<h2>Indented RST</h2>\n\n'
'<blockquote>\n'
'<div><p>This text is over indented.</p>\n'
'<p>As is this text.</p>\n'
'<p>They look like quotes but they\'re not.</p>\n'
'</div></blockquote>\n'
'\n\n\n\n</article>',
],
) | 0.744471 | 0.306138 |
from __future__ import absolute_import
import unittest
from helpers import xroad
from main.maincontroller import MainController
from tests.xroad_client_registration_in_ss_221 import client_registration_in_ss
class XroadSecurityServerClientDeletion(unittest.TestCase):
"""
MEMBER_14 Delete an X-Road Member's Subsystem
MEMBER_52 Unregister a Security Server Client
MEMBER_53 Delete a Security Server Client
RIA URL: https://jira.ria.ee/browse/XT-366, https://jira.ria.ee/browse/XTKB-131
RIA URL: https://jira.ria.ee/browse/XT-404, https://jira.ria.ee/browse/XTKB-164
RIA URL: https://jira.ria.ee/browse/XT-405, https://jira.ria.ee/browse/XTKB-34, https://jira.ria.ee/browse/XTKB-124
Depends on finishing other test(s): XroadSecurityServerClientRegistration, global group
Requires helper scenarios: xroad_ss_client_certification_213
X-Road version: 6.16.0
"""
def test_client_deletion(self):
main = MainController(self)
# Set test name and number
main.test_number = 'MEMBER_14 / MEMBER_53'
main.test_name = self.__class__.__name__
main.url = main.config.get('cs.host')
main.username = main.config.get('cs.user')
main.password = main.config.get('cs.pass')
main.reset_webdriver(url=main.url, username=main.username, password=main.password, close_previous=False,
init_new_webdriver=False)
ss1_ssh_host = main.config.get('ss1.ssh_host')
ss1_ssh_username = main.config.get('ss1.ssh_user')
ss1_ssh_password = main.config.get('ss1.ssh_pass')
ss2_ssh_host = main.config.get('ss2.ssh_host')
ss2_ssh_username = main.config.get('ss2.ssh_user')
ss2_ssh_password = main.config.get('ss2.ssh_pass')
main.management_services = xroad.split_xroad_subsystem(main.config.get('ss1.management_id'))
cs_member = xroad.split_xroad_id(main.config.get('ss1.client_id'))
ss1_client = xroad.split_xroad_id(main.config.get('ss1.client_id'))
ss1_client_2 = xroad.split_xroad_id(main.config.get('ss1.client2_id'))
ss2_client = xroad.split_xroad_id(main.config.get('ss2.client_id'))
ss2_client_2 = xroad.split_xroad_id(main.config.get('ss2.client2_id'))
cs_member_name = main.config.get('ss1.client_name')
ss1_client_name = main.config.get('ss1.client_name')
ss1_client_2_name = main.config.get('ss1.client2_name')
ss2_client_name = main.config.get('ss2.client_name')
ss2_client_2_name = main.config.get('ss2.client2_name')
test_func = client_registration_in_ss.test_remove(main.config.get('cs.host'),
main.config.get('cs.user'),
main.config.get('cs.pass'),
main.config.get('ss1.host'),
main.config.get('ss1.user'),
main.config.get('ss1.pass'),
main.config.get('ss2.host'),
main.config.get('ss2.user'),
main.config.get('ss2.pass'),
cs_new_member=cs_member, cs_member_name=cs_member_name,
ss1_client=ss1_client, ss1_client_name=ss1_client_name,
ss1_client_2=ss1_client_2,
ss1_client_2_name=ss1_client_2_name,
ss1_ssh_host=ss1_ssh_host,
ss1_ssh_username=ss1_ssh_username,
ss1_ssh_password=ss1_ssh_password,
ss2_ssh_host=ss2_ssh_host,
ss2_ssh_username=ss2_ssh_username,
ss2_ssh_password=<PASSWORD>,
ss2_client=ss2_client, ss2_client_name=ss2_client_name,
ss2_client_2=ss2_client_2,
ss2_client_2_name=ss2_client_2_name,
ca_ssh_host=main.config.get('ca.ssh_host'),
ca_ssh_username=main.config.get('ca.ssh_user'),
ca_ssh_password=main.config.get('ca.ssh_pass'),
cs_ssh_host=main.config.get('cs.ssh_host'),
cs_ssh_username=main.config.get('cs.ssh_user'),
cs_ssh_password=main.config.get('cs.ssh_pass'),
global_group=main.config.get('cs.global_group_1')
)
test_func(main)
main.tearDown() | common/xrd-ui-tests-python/tests/xroad_client_registration_in_ss_221/XroadSecurityServerClientDeletion.py | from __future__ import absolute_import
import unittest
from helpers import xroad
from main.maincontroller import MainController
from tests.xroad_client_registration_in_ss_221 import client_registration_in_ss
class XroadSecurityServerClientDeletion(unittest.TestCase):
"""
MEMBER_14 Delete an X-Road Member's Subsystem
MEMBER_52 Unregister a Security Server Client
MEMBER_53 Delete a Security Server Client
RIA URL: https://jira.ria.ee/browse/XT-366, https://jira.ria.ee/browse/XTKB-131
RIA URL: https://jira.ria.ee/browse/XT-404, https://jira.ria.ee/browse/XTKB-164
RIA URL: https://jira.ria.ee/browse/XT-405, https://jira.ria.ee/browse/XTKB-34, https://jira.ria.ee/browse/XTKB-124
Depends on finishing other test(s): XroadSecurityServerClientRegistration, global group
Requires helper scenarios: xroad_ss_client_certification_213
X-Road version: 6.16.0
"""
def test_client_deletion(self):
main = MainController(self)
# Set test name and number
main.test_number = 'MEMBER_14 / MEMBER_53'
main.test_name = self.__class__.__name__
main.url = main.config.get('cs.host')
main.username = main.config.get('cs.user')
main.password = main.config.get('cs.pass')
main.reset_webdriver(url=main.url, username=main.username, password=main.password, close_previous=False,
init_new_webdriver=False)
ss1_ssh_host = main.config.get('ss1.ssh_host')
ss1_ssh_username = main.config.get('ss1.ssh_user')
ss1_ssh_password = main.config.get('ss1.ssh_pass')
ss2_ssh_host = main.config.get('ss2.ssh_host')
ss2_ssh_username = main.config.get('ss2.ssh_user')
ss2_ssh_password = main.config.get('ss2.ssh_pass')
main.management_services = xroad.split_xroad_subsystem(main.config.get('ss1.management_id'))
cs_member = xroad.split_xroad_id(main.config.get('ss1.client_id'))
ss1_client = xroad.split_xroad_id(main.config.get('ss1.client_id'))
ss1_client_2 = xroad.split_xroad_id(main.config.get('ss1.client2_id'))
ss2_client = xroad.split_xroad_id(main.config.get('ss2.client_id'))
ss2_client_2 = xroad.split_xroad_id(main.config.get('ss2.client2_id'))
cs_member_name = main.config.get('ss1.client_name')
ss1_client_name = main.config.get('ss1.client_name')
ss1_client_2_name = main.config.get('ss1.client2_name')
ss2_client_name = main.config.get('ss2.client_name')
ss2_client_2_name = main.config.get('ss2.client2_name')
test_func = client_registration_in_ss.test_remove(main.config.get('cs.host'),
main.config.get('cs.user'),
main.config.get('cs.pass'),
main.config.get('ss1.host'),
main.config.get('ss1.user'),
main.config.get('ss1.pass'),
main.config.get('ss2.host'),
main.config.get('ss2.user'),
main.config.get('ss2.pass'),
cs_new_member=cs_member, cs_member_name=cs_member_name,
ss1_client=ss1_client, ss1_client_name=ss1_client_name,
ss1_client_2=ss1_client_2,
ss1_client_2_name=ss1_client_2_name,
ss1_ssh_host=ss1_ssh_host,
ss1_ssh_username=ss1_ssh_username,
ss1_ssh_password=ss1_ssh_password,
ss2_ssh_host=ss2_ssh_host,
ss2_ssh_username=ss2_ssh_username,
ss2_ssh_password=<PASSWORD>,
ss2_client=ss2_client, ss2_client_name=ss2_client_name,
ss2_client_2=ss2_client_2,
ss2_client_2_name=ss2_client_2_name,
ca_ssh_host=main.config.get('ca.ssh_host'),
ca_ssh_username=main.config.get('ca.ssh_user'),
ca_ssh_password=main.config.get('ca.ssh_pass'),
cs_ssh_host=main.config.get('cs.ssh_host'),
cs_ssh_username=main.config.get('cs.ssh_user'),
cs_ssh_password=main.config.get('cs.ssh_pass'),
global_group=main.config.get('cs.global_group_1')
)
test_func(main)
main.tearDown() | 0.547948 | 0.079567 |
import numpy as np
def strain_to_stress(strain, mu, nu):
"""
Compute stress given strain like:
stress = 2 * mu * strain + lambda * Id(3) * trace(strain)
Parameters
----------
strain : {array-like}, shape (n_tensors, 6)
The strain tensors ordered like (e_xx, e_yy, e_zz, e_xy, e_xz, e_yz)
mu : float
Shear modulus
nu : float
Poisson ratio
Returns
-------
stress : np.ndarray, shape (n_tensors, 6)
The stress tensors ordered like (s_xx, s_yy, s_zz, s_xy, s_xz, s_yz)
"""
lam = 2 * mu * nu / (1 - 2 * nu)
trace = np.sum(strain[:, :3], axis=1)
stress = np.empty_like(strain)
stress[:, :3] = 2 * mu * strain[:, :3] + lam * trace[:, np.newaxis]
stress[:, 3:] = 2 * mu * strain[:, 3:]
return stress
def compute_normal_vectors(tri_pts) -> np.ndarray:
"""
Compute normal vectors for each triangle.
Parameters
----------
tri_pts : {array-like}, shape (n_triangles, 3, 3)
The vertices of the triangles.
Returns
-------
normals : np.ndarray, shape (n_triangles, 3)
The normal vectors for each triangle.
"""
leg1 = tri_pts[:, 1] - tri_pts[:, 0]
leg2 = tri_pts[:, 2] - tri_pts[:, 0]
# The normal vector is one axis of the TDCS and can be
# computed as the cross product of the two corner-corner tangent vectors.
Vnormal = np.cross(leg1, leg2, axis=1)
# And it should be normalized to have unit length of course!
Vnormal /= np.linalg.norm(Vnormal, axis=1)[:, None]
return Vnormal
def compute_projection_transforms(origins, transformer) -> np.ndarray:
"""
Convert vectors from one coordinate system to another. Unlike positions,
this cannot be done with a simple pyproj call. We first need to set up a
vector start and end point, convert those into the new coordinate system
and then recompute the direction/distance between the start and end point.
The output matrices are not pure rotation matrices because there is also
a scaling of vector lengths. For example, converting from latitude to
meters will result in a large scale factor.
You can obtain the inverse transformation either by computing the inverse
of the matrix or by passing an inverse pyproj.Transformer.
Parameters
----------
origins : {array-like}, shape (N, 3)
The points at which we will compute rotation matrices
transformer : pyproj.Transformer
A pyproj.Transformer that will perform the necessary projection step.
Returns
-------
transform_mats : np.ndarray, shape (n_triangles, 3, 3)
The 3x3 rotation and scaling matrices that transform vectors from the
EFCS to TDCS.
"""
out = np.empty((origins.shape[0], 3, 3), dtype=origins.dtype)
for d in range(3):
eps = 1.0
targets = origins.copy()
targets[:, d] += eps
proj_origins = np.array(
transformer.transform(origins[:, 0], origins[:, 1], origins[:, 2])
).T.copy()
proj_targets = np.array(
transformer.transform(targets[:, 0], targets[:, 1], targets[:, 2])
).T.copy()
out[:, :, d] = proj_targets - proj_origins
return out
def compute_efcs_to_tdcs_rotations(tri_pts) -> np.ndarray:
"""
Build rotation matrices that convert from an Earth-fixed coordinate system
(EFCS) to a triangular dislocation coordinate system (TDCS).
In the EFCS, the vectors will be directions/length in a map projection or
an elliptical coordinate system.
In the TDCS, the coordinates/vectors will be separated into:
`(along-strike-distance, along-dip-distance, tensile-distance)`
Note that in the Nikhoo and Walter 2015 and the Okada convention, the dip
vector points upwards. This is different from the standard geologic
convention where the dip vector points downwards.
It may be useful to extract normal, dip or strike vectors from the rotation
matrices that are returned by this function. The strike vectors are:
`rot_mats[:, 0, :]`, the dip vectors are `rot_mats[:, 1, :]` and the normal
vectors are `rot_mats[:, 2, :]`.
To transform from TDCS back to EFCS, we simply need the transpose of the
rotation matrices because the inverse of an orthogonal matrix is its
transpose. To get this you can run `np.transpose(rot_mats, (0, 2, 1))`.
Parameters
----------
tri_pts : {array-like}, shape (n_triangles, 3, 3)
The vertices of the triangles.
Returns
-------
rot_mats : np.ndarray, shape (n_triangles, 3, 3)
The 3x3 rotation matrices that transform vectors from the EFCS to TDCS.
"""
Vnormal = compute_normal_vectors(tri_pts)
eY = np.array([0, 1, 0])
eZ = np.array([0, 0, 1])
# The strike vector is defined as orthogonal to both the (0,0,1) vector and
# the normal.
Vstrike_raw = np.cross(eZ[None, :], Vnormal, axis=1)
Vstrike_length = np.linalg.norm(Vstrike_raw, axis=1)
# If eZ == Vnormal, we will get Vstrike = (0,0,0). In this case, just set
# Vstrike equal to (0,±1,0).
Vstrike = np.where(
Vstrike_length[:, None] == 0, eY[None, :] * Vnormal[:, 2, None], Vstrike_raw
)
Vstrike /= np.linalg.norm(Vstrike, axis=1)[:, None]
Vdip = np.cross(Vnormal, Vstrike, axis=1)
return np.transpose(np.array([Vstrike, Vdip, Vnormal]), (1, 0, 2)) | cutde/geometry.py | import numpy as np
def strain_to_stress(strain, mu, nu):
"""
Compute stress given strain like:
stress = 2 * mu * strain + lambda * Id(3) * trace(strain)
Parameters
----------
strain : {array-like}, shape (n_tensors, 6)
The strain tensors ordered like (e_xx, e_yy, e_zz, e_xy, e_xz, e_yz)
mu : float
Shear modulus
nu : float
Poisson ratio
Returns
-------
stress : np.ndarray, shape (n_tensors, 6)
The stress tensors ordered like (s_xx, s_yy, s_zz, s_xy, s_xz, s_yz)
"""
lam = 2 * mu * nu / (1 - 2 * nu)
trace = np.sum(strain[:, :3], axis=1)
stress = np.empty_like(strain)
stress[:, :3] = 2 * mu * strain[:, :3] + lam * trace[:, np.newaxis]
stress[:, 3:] = 2 * mu * strain[:, 3:]
return stress
def compute_normal_vectors(tri_pts) -> np.ndarray:
"""
Compute normal vectors for each triangle.
Parameters
----------
tri_pts : {array-like}, shape (n_triangles, 3, 3)
The vertices of the triangles.
Returns
-------
normals : np.ndarray, shape (n_triangles, 3)
The normal vectors for each triangle.
"""
leg1 = tri_pts[:, 1] - tri_pts[:, 0]
leg2 = tri_pts[:, 2] - tri_pts[:, 0]
# The normal vector is one axis of the TDCS and can be
# computed as the cross product of the two corner-corner tangent vectors.
Vnormal = np.cross(leg1, leg2, axis=1)
# And it should be normalized to have unit length of course!
Vnormal /= np.linalg.norm(Vnormal, axis=1)[:, None]
return Vnormal
def compute_projection_transforms(origins, transformer) -> np.ndarray:
"""
Convert vectors from one coordinate system to another. Unlike positions,
this cannot be done with a simple pyproj call. We first need to set up a
vector start and end point, convert those into the new coordinate system
and then recompute the direction/distance between the start and end point.
The output matrices are not pure rotation matrices because there is also
a scaling of vector lengths. For example, converting from latitude to
meters will result in a large scale factor.
You can obtain the inverse transformation either by computing the inverse
of the matrix or by passing an inverse pyproj.Transformer.
Parameters
----------
origins : {array-like}, shape (N, 3)
The points at which we will compute rotation matrices
transformer : pyproj.Transformer
A pyproj.Transformer that will perform the necessary projection step.
Returns
-------
transform_mats : np.ndarray, shape (n_triangles, 3, 3)
The 3x3 rotation and scaling matrices that transform vectors from the
EFCS to TDCS.
"""
out = np.empty((origins.shape[0], 3, 3), dtype=origins.dtype)
for d in range(3):
eps = 1.0
targets = origins.copy()
targets[:, d] += eps
proj_origins = np.array(
transformer.transform(origins[:, 0], origins[:, 1], origins[:, 2])
).T.copy()
proj_targets = np.array(
transformer.transform(targets[:, 0], targets[:, 1], targets[:, 2])
).T.copy()
out[:, :, d] = proj_targets - proj_origins
return out
def compute_efcs_to_tdcs_rotations(tri_pts) -> np.ndarray:
"""
Build rotation matrices that convert from an Earth-fixed coordinate system
(EFCS) to a triangular dislocation coordinate system (TDCS).
In the EFCS, the vectors will be directions/length in a map projection or
an elliptical coordinate system.
In the TDCS, the coordinates/vectors will be separated into:
`(along-strike-distance, along-dip-distance, tensile-distance)`
Note that in the Nikhoo and Walter 2015 and the Okada convention, the dip
vector points upwards. This is different from the standard geologic
convention where the dip vector points downwards.
It may be useful to extract normal, dip or strike vectors from the rotation
matrices that are returned by this function. The strike vectors are:
`rot_mats[:, 0, :]`, the dip vectors are `rot_mats[:, 1, :]` and the normal
vectors are `rot_mats[:, 2, :]`.
To transform from TDCS back to EFCS, we simply need the transpose of the
rotation matrices because the inverse of an orthogonal matrix is its
transpose. To get this you can run `np.transpose(rot_mats, (0, 2, 1))`.
Parameters
----------
tri_pts : {array-like}, shape (n_triangles, 3, 3)
The vertices of the triangles.
Returns
-------
rot_mats : np.ndarray, shape (n_triangles, 3, 3)
The 3x3 rotation matrices that transform vectors from the EFCS to TDCS.
"""
Vnormal = compute_normal_vectors(tri_pts)
eY = np.array([0, 1, 0])
eZ = np.array([0, 0, 1])
# The strike vector is defined as orthogonal to both the (0,0,1) vector and
# the normal.
Vstrike_raw = np.cross(eZ[None, :], Vnormal, axis=1)
Vstrike_length = np.linalg.norm(Vstrike_raw, axis=1)
# If eZ == Vnormal, we will get Vstrike = (0,0,0). In this case, just set
# Vstrike equal to (0,±1,0).
Vstrike = np.where(
Vstrike_length[:, None] == 0, eY[None, :] * Vnormal[:, 2, None], Vstrike_raw
)
Vstrike /= np.linalg.norm(Vstrike, axis=1)[:, None]
Vdip = np.cross(Vnormal, Vstrike, axis=1)
return np.transpose(np.array([Vstrike, Vdip, Vnormal]), (1, 0, 2)) | 0.965103 | 0.821546 |
import time
from kbcstorage.base import Endpoint
class Jobs(Endpoint):
"""
Jobs are objects that manage asynchronous tasks, these are all
potentially long-running actions such as loading table data,
snapshotting, table structure modifications. Jobs are created by
actions on target resources.
A job has four available statuses:
``waiting``
The job is in the queue and is waiting for execution.
``processing``
The job is being processed by a worker.
``success``
The job is done with a success.
``error``
The job is done with an error.
"""
def __init__(self, root_url, token):
"""
Create a Jobs endpoint.
Args:
root_url (:obj:`str`): The base url for the API.
token (:obj:`str`): A storage API key.
"""
super().__init__(root_url, 'jobs', token)
def list(self):
"""
List all jobs details.
Returns:
response_body: The json from the HTTP response.
Raises:
requests.HTTPError: If the API request fails.
"""
return self._get(self.base_url)
def detail(self, job_id):
"""
Retrieves information about a given job.
Args:
job_id (str or int): The id of the job.
Raises:
requests.HTTPError: If the API request fails.
"""
url = '{}/{}'.format(self.base_url, job_id)
return self._get(url)
def status(self, job_id):
"""
Retrieve the status of a given job.
Args:
job_id (str or int): The id of the job.
Raises:
requests.HTTPError: If the API request fails.
"""
return self.detail(job_id)['status']
def completed(self, job_id):
"""
Check if a job is completed or not.
Args:
job_id (str or int): The id of the job.
Returns:
completed (bool): True if job is completed, else False.
Raises:
requests.HTTPError: If the API request fails.
"""
completed_statuses = ('error', 'success')
return self.status(job_id) in completed_statuses
def block_until_completed(self, job_id):
"""
Poll the API until the job is completed.
Args:
job_id (str): The id of the job
Returns:
response_body: The parsed json from the HTTP response
containing a storage Job.
Raises:
requests.HTTPError: If any API request fails.
"""
retries = 1
while True:
job = self.detail(job_id)
if job['status'] in ('error', 'success'):
return job
retries += 1
time.sleep(min(2 ** retries, 20))
def block_for_success(self, job_id):
"""
Poll the API until the job is completed, then return ``True`` if the
job is successful, else ``False``.
Args:
job_id (str): The id of the job
Returns:
success (bool): True if the job status is success, else False.
Raises:
requests.HTTPError: If any API request fails.
"""
job = self.block_until_completed(job_id)
return job['status'] == 'success' | kbcstorage/jobs.py | import time
from kbcstorage.base import Endpoint
class Jobs(Endpoint):
"""
Jobs are objects that manage asynchronous tasks, these are all
potentially long-running actions such as loading table data,
snapshotting, table structure modifications. Jobs are created by
actions on target resources.
A job has four available statuses:
``waiting``
The job is in the queue and is waiting for execution.
``processing``
The job is being processed by a worker.
``success``
The job is done with a success.
``error``
The job is done with an error.
"""
def __init__(self, root_url, token):
"""
Create a Jobs endpoint.
Args:
root_url (:obj:`str`): The base url for the API.
token (:obj:`str`): A storage API key.
"""
super().__init__(root_url, 'jobs', token)
def list(self):
"""
List all jobs details.
Returns:
response_body: The json from the HTTP response.
Raises:
requests.HTTPError: If the API request fails.
"""
return self._get(self.base_url)
def detail(self, job_id):
"""
Retrieves information about a given job.
Args:
job_id (str or int): The id of the job.
Raises:
requests.HTTPError: If the API request fails.
"""
url = '{}/{}'.format(self.base_url, job_id)
return self._get(url)
def status(self, job_id):
"""
Retrieve the status of a given job.
Args:
job_id (str or int): The id of the job.
Raises:
requests.HTTPError: If the API request fails.
"""
return self.detail(job_id)['status']
def completed(self, job_id):
"""
Check if a job is completed or not.
Args:
job_id (str or int): The id of the job.
Returns:
completed (bool): True if job is completed, else False.
Raises:
requests.HTTPError: If the API request fails.
"""
completed_statuses = ('error', 'success')
return self.status(job_id) in completed_statuses
def block_until_completed(self, job_id):
"""
Poll the API until the job is completed.
Args:
job_id (str): The id of the job
Returns:
response_body: The parsed json from the HTTP response
containing a storage Job.
Raises:
requests.HTTPError: If any API request fails.
"""
retries = 1
while True:
job = self.detail(job_id)
if job['status'] in ('error', 'success'):
return job
retries += 1
time.sleep(min(2 ** retries, 20))
def block_for_success(self, job_id):
"""
Poll the API until the job is completed, then return ``True`` if the
job is successful, else ``False``.
Args:
job_id (str): The id of the job
Returns:
success (bool): True if the job status is success, else False.
Raises:
requests.HTTPError: If any API request fails.
"""
job = self.block_until_completed(job_id)
return job['status'] == 'success' | 0.828141 | 0.420957 |
from types import FunctionType
from typing import Tuple
from axelrod._strategy_utils import thue_morse_generator
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class SequencePlayer(Player):
"""Abstract base class for players that use a generated sequence to
determine their plays.
Names:
- Sequence Player: Original name by <NAME>
"""
def __init__(
self, generator_function: FunctionType, generator_args: Tuple = ()
) -> None:
super().__init__()
# Initialize the sequence generator
self.generator_function = generator_function
self.generator_args = generator_args
self.sequence_generator = self.generator_function(*self.generator_args)
def meta_strategy(self, value: int) -> None:
"""Determines how to map the sequence value to cooperate or defect.
By default, treat values like python truth values. Override in child
classes for alternate behaviors."""
if value == 0:
return D
else:
return C
def strategy(self, opponent: Player) -> Action:
# Iterate through the sequence and apply the meta strategy
for s in self.sequence_generator:
return self.meta_strategy(s)
def __getstate__(self):
return_dict = self.__dict__.copy()
del return_dict["sequence_generator"]
return return_dict
def __setstate__(self, state):
self.__dict__.update(state)
self.__dict__["sequence_generator"] = self.generator_function(
*self.generator_args
)
for turn in self.history:
next(self.sequence_generator)
class ThueMorse(SequencePlayer):
"""
A player who cooperates or defects according to the Thue-Morse sequence.
The first few terms of the Thue-Morse sequence are:
0 1 1 0 1 0 0 1 1 0 0 1 0 1 1 0 . . .
Thue-Morse sequence: http://mathworld.wolfram.com/Thue-MorseSequence.html
Names:
- Thue Morse: Original name by <NAME>mer
"""
name = "ThueMorse"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__(thue_morse_generator, (0,))
class ThueMorseInverse(ThueMorse):
""" A player who plays the inverse of the Thue-Morse sequence.
Names:
- Inverse Thue Morse: Original name by <NAME>
"""
name = "ThueMorseInverse"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super(ThueMorse, self).__init__(thue_morse_generator, (0,))
def meta_strategy(self, value: int) -> Action:
# Switch the default cooperate and defect action on 0 or 1
if value == 0:
return C
else:
return D | axelrod/strategies/sequence_player.py | from types import FunctionType
from typing import Tuple
from axelrod._strategy_utils import thue_morse_generator
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class SequencePlayer(Player):
"""Abstract base class for players that use a generated sequence to
determine their plays.
Names:
- Sequence Player: Original name by <NAME>
"""
def __init__(
self, generator_function: FunctionType, generator_args: Tuple = ()
) -> None:
super().__init__()
# Initialize the sequence generator
self.generator_function = generator_function
self.generator_args = generator_args
self.sequence_generator = self.generator_function(*self.generator_args)
def meta_strategy(self, value: int) -> None:
"""Determines how to map the sequence value to cooperate or defect.
By default, treat values like python truth values. Override in child
classes for alternate behaviors."""
if value == 0:
return D
else:
return C
def strategy(self, opponent: Player) -> Action:
# Iterate through the sequence and apply the meta strategy
for s in self.sequence_generator:
return self.meta_strategy(s)
def __getstate__(self):
return_dict = self.__dict__.copy()
del return_dict["sequence_generator"]
return return_dict
def __setstate__(self, state):
self.__dict__.update(state)
self.__dict__["sequence_generator"] = self.generator_function(
*self.generator_args
)
for turn in self.history:
next(self.sequence_generator)
class ThueMorse(SequencePlayer):
"""
A player who cooperates or defects according to the Thue-Morse sequence.
The first few terms of the Thue-Morse sequence are:
0 1 1 0 1 0 0 1 1 0 0 1 0 1 1 0 . . .
Thue-Morse sequence: http://mathworld.wolfram.com/Thue-MorseSequence.html
Names:
- Thue Morse: Original name by <NAME>mer
"""
name = "ThueMorse"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__(thue_morse_generator, (0,))
class ThueMorseInverse(ThueMorse):
""" A player who plays the inverse of the Thue-Morse sequence.
Names:
- Inverse Thue Morse: Original name by <NAME>
"""
name = "ThueMorseInverse"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super(ThueMorse, self).__init__(thue_morse_generator, (0,))
def meta_strategy(self, value: int) -> Action:
# Switch the default cooperate and defect action on 0 or 1
if value == 0:
return C
else:
return D | 0.927396 | 0.374991 |
from jina.peapods.runtimes.head import HeadRuntime
if False:
from argparse import Namespace
def pod(args: 'Namespace'):
"""
Start a Pod
:param args: arguments coming from the CLI.
"""
from jina.peapods.pods import Pod
try:
with Pod(args) as p:
p.join()
except KeyboardInterrupt:
pass
def pea(args: 'Namespace'):
"""
Start a Pea
:param args: arguments coming from the CLI.
"""
from jina.peapods.peas.factory import PeaFactory
try:
with PeaFactory.build_pea(args) as p:
p.join()
except KeyboardInterrupt:
pass
def executor_native(args: 'Namespace'):
"""
Starts an Executor in a WorkerRuntime
:param args: arguments coming from the CLI.
"""
from jina.peapods.runtimes.worker import WorkerRuntime
if args.runtime_cls == 'WorkerRuntime':
runtime_cls = WorkerRuntime
elif args.runtime_cls == 'HeadRuntime':
runtime_cls = HeadRuntime
else:
raise RuntimeError(
f' runtime_cls {args.runtime_cls} is not supported with `--native` argument. `WorkerRuntime` is supported'
)
with runtime_cls(args) as rt:
name = (
rt._data_request_handler._executor.metas.name
if hasattr(rt, '_data_request_handler')
else rt.name
)
rt.logger.success(f' Executor {name} started')
rt.run_forever()
def executor(args: 'Namespace'):
"""
Starts an Executor in any Runtime
:param args: arguments coming from the CLI.
:returns: return the same as `pea` or `worker_runtime`
"""
if args.native:
return executor_native(args)
else:
return pea(args)
def worker_runtime(args: 'Namespace'):
"""
Starts a WorkerRuntime
:param args: arguments coming from the CLI.
"""
from jina.peapods.runtimes.worker import WorkerRuntime
with WorkerRuntime(args) as runtime:
runtime.logger.success(
f' Executor {runtime._data_request_handler._executor.metas.name} started'
)
runtime.run_forever()
def gateway(args: 'Namespace'):
"""
Start a Gateway Pod
:param args: arguments coming from the CLI.
"""
from jina.enums import GatewayProtocolType
from jina.peapods.runtimes import get_runtime
gateway_runtime_dict = {
GatewayProtocolType.GRPC: 'GRPCGatewayRuntime',
GatewayProtocolType.WEBSOCKET: 'WebSocketGatewayRuntime',
GatewayProtocolType.HTTP: 'HTTPGatewayRuntime',
}
runtime_cls = get_runtime(gateway_runtime_dict[args.protocol])
with runtime_cls(args) as runtime:
runtime.logger.success(
f' Gateway with protocol {gateway_runtime_dict[args.protocol]} started'
)
runtime.run_forever()
def ping(args: 'Namespace'):
"""
Check the connectivity of a Pea
:param args: arguments coming from the CLI.
"""
from jina.checker import NetworkChecker
NetworkChecker(args)
def client(args: 'Namespace'):
"""
Start a client connects to the gateway
:param args: arguments coming from the CLI.
"""
from jina.clients import Client
Client(args)
def export_api(args: 'Namespace'):
"""
Export the API
:param args: arguments coming from the CLI.
"""
import json
from cli.export import api_to_dict
from jina.jaml import JAML
from jina import __version__
from jina.logging.predefined import default_logger
from jina.schemas import get_full_schema
if args.yaml_path:
dump_api = api_to_dict()
for yp in args.yaml_path:
f_name = (yp % __version__) if '%s' in yp else yp
with open(f_name, 'w', encoding='utf8') as fp:
JAML.dump(dump_api, fp)
default_logger.info(f'API is exported to {f_name}')
if args.json_path:
dump_api = api_to_dict()
for jp in args.json_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
if args.schema_path:
dump_api = get_full_schema()
for jp in args.schema_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
def hello(args: 'Namespace'):
"""
Run any of the hello world examples
:param args: arguments coming from the CLI.
"""
if args.hello == 'fashion':
from jina.helloworld.fashion.app import hello_world
hello_world(args)
elif args.hello == 'chatbot':
from jina.helloworld.chatbot.app import hello_world
hello_world(args)
elif args.hello == 'multimodal':
from jina.helloworld.multimodal.app import hello_world
hello_world(args)
elif args.hello == 'fork':
from jina.helloworld.fork import fork_hello
fork_hello(args)
else:
raise ValueError(f'must be one of [`fashion`, `chatbot`, `multimodal`, `fork`]')
def flow(args: 'Namespace'):
"""
Start a Flow from a YAML file or a docker image
:param args: arguments coming from the CLI.
"""
from jina import Flow
if args.uses:
f = Flow.load_config(args.uses)
with f:
f.block()
else:
raise ValueError('start a flow from CLI requires a valid `--uses`')
def optimizer(args: 'Namespace'):
"""
Start an optimization from a YAML file
:param args: arguments coming from the CLI.
"""
from jina.optimizers import run_optimizer_cli
run_optimizer_cli(args)
def hub(args: 'Namespace'):
"""
Start a hub builder for push, pull
:param args: arguments coming from the CLI.
"""
from jina.hubble.hubio import HubIO
getattr(HubIO(args), args.hub)()
def help(args: 'Namespace'):
"""
Lookup the usage of certain argument in Jina API.
:param args: arguments coming from the CLI.
"""
from cli.lookup import lookup_and_print
lookup_and_print(args.query.lower()) | cli/api.py | from jina.peapods.runtimes.head import HeadRuntime
if False:
from argparse import Namespace
def pod(args: 'Namespace'):
"""
Start a Pod
:param args: arguments coming from the CLI.
"""
from jina.peapods.pods import Pod
try:
with Pod(args) as p:
p.join()
except KeyboardInterrupt:
pass
def pea(args: 'Namespace'):
"""
Start a Pea
:param args: arguments coming from the CLI.
"""
from jina.peapods.peas.factory import PeaFactory
try:
with PeaFactory.build_pea(args) as p:
p.join()
except KeyboardInterrupt:
pass
def executor_native(args: 'Namespace'):
"""
Starts an Executor in a WorkerRuntime
:param args: arguments coming from the CLI.
"""
from jina.peapods.runtimes.worker import WorkerRuntime
if args.runtime_cls == 'WorkerRuntime':
runtime_cls = WorkerRuntime
elif args.runtime_cls == 'HeadRuntime':
runtime_cls = HeadRuntime
else:
raise RuntimeError(
f' runtime_cls {args.runtime_cls} is not supported with `--native` argument. `WorkerRuntime` is supported'
)
with runtime_cls(args) as rt:
name = (
rt._data_request_handler._executor.metas.name
if hasattr(rt, '_data_request_handler')
else rt.name
)
rt.logger.success(f' Executor {name} started')
rt.run_forever()
def executor(args: 'Namespace'):
"""
Starts an Executor in any Runtime
:param args: arguments coming from the CLI.
:returns: return the same as `pea` or `worker_runtime`
"""
if args.native:
return executor_native(args)
else:
return pea(args)
def worker_runtime(args: 'Namespace'):
"""
Starts a WorkerRuntime
:param args: arguments coming from the CLI.
"""
from jina.peapods.runtimes.worker import WorkerRuntime
with WorkerRuntime(args) as runtime:
runtime.logger.success(
f' Executor {runtime._data_request_handler._executor.metas.name} started'
)
runtime.run_forever()
def gateway(args: 'Namespace'):
"""
Start a Gateway Pod
:param args: arguments coming from the CLI.
"""
from jina.enums import GatewayProtocolType
from jina.peapods.runtimes import get_runtime
gateway_runtime_dict = {
GatewayProtocolType.GRPC: 'GRPCGatewayRuntime',
GatewayProtocolType.WEBSOCKET: 'WebSocketGatewayRuntime',
GatewayProtocolType.HTTP: 'HTTPGatewayRuntime',
}
runtime_cls = get_runtime(gateway_runtime_dict[args.protocol])
with runtime_cls(args) as runtime:
runtime.logger.success(
f' Gateway with protocol {gateway_runtime_dict[args.protocol]} started'
)
runtime.run_forever()
def ping(args: 'Namespace'):
"""
Check the connectivity of a Pea
:param args: arguments coming from the CLI.
"""
from jina.checker import NetworkChecker
NetworkChecker(args)
def client(args: 'Namespace'):
"""
Start a client connects to the gateway
:param args: arguments coming from the CLI.
"""
from jina.clients import Client
Client(args)
def export_api(args: 'Namespace'):
"""
Export the API
:param args: arguments coming from the CLI.
"""
import json
from cli.export import api_to_dict
from jina.jaml import JAML
from jina import __version__
from jina.logging.predefined import default_logger
from jina.schemas import get_full_schema
if args.yaml_path:
dump_api = api_to_dict()
for yp in args.yaml_path:
f_name = (yp % __version__) if '%s' in yp else yp
with open(f_name, 'w', encoding='utf8') as fp:
JAML.dump(dump_api, fp)
default_logger.info(f'API is exported to {f_name}')
if args.json_path:
dump_api = api_to_dict()
for jp in args.json_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
if args.schema_path:
dump_api = get_full_schema()
for jp in args.schema_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
def hello(args: 'Namespace'):
"""
Run any of the hello world examples
:param args: arguments coming from the CLI.
"""
if args.hello == 'fashion':
from jina.helloworld.fashion.app import hello_world
hello_world(args)
elif args.hello == 'chatbot':
from jina.helloworld.chatbot.app import hello_world
hello_world(args)
elif args.hello == 'multimodal':
from jina.helloworld.multimodal.app import hello_world
hello_world(args)
elif args.hello == 'fork':
from jina.helloworld.fork import fork_hello
fork_hello(args)
else:
raise ValueError(f'must be one of [`fashion`, `chatbot`, `multimodal`, `fork`]')
def flow(args: 'Namespace'):
"""
Start a Flow from a YAML file or a docker image
:param args: arguments coming from the CLI.
"""
from jina import Flow
if args.uses:
f = Flow.load_config(args.uses)
with f:
f.block()
else:
raise ValueError('start a flow from CLI requires a valid `--uses`')
def optimizer(args: 'Namespace'):
"""
Start an optimization from a YAML file
:param args: arguments coming from the CLI.
"""
from jina.optimizers import run_optimizer_cli
run_optimizer_cli(args)
def hub(args: 'Namespace'):
"""
Start a hub builder for push, pull
:param args: arguments coming from the CLI.
"""
from jina.hubble.hubio import HubIO
getattr(HubIO(args), args.hub)()
def help(args: 'Namespace'):
"""
Lookup the usage of certain argument in Jina API.
:param args: arguments coming from the CLI.
"""
from cli.lookup import lookup_and_print
lookup_and_print(args.query.lower()) | 0.72662 | 0.264905 |
from __future__ import print_function
import codecs
import os
import sys
import shutil
import re
from bs4 import BeautifulSoup
import commons
LS_URL = u'http://zgdwz.lifescience.com.cn/ashx/searchinfo.ashx?key={}'
LS_INFO_URL = u'http://zgdwz.lifescience.com.cn/info/{}'
BD_URL = u'https://baike.baidu.com/item/{}'
BD_HOST = u'https://baike.baidu.com'
HD_URL = u'http://www.baike.com/wiki/{}'
ZO_URL = u'http://zgdwz.lifescience.com.cn/search?key={}&t=2'
ZO_HOST = u'http://zgdwz.lifescience.com.cn'
CSDB_URL = u'http://www.zoology.csdb.cn/efauna/searchTaxon?search={}'
CSDB_HOST = u'http://www.zoology.csdb.cn/efauna/'
'''
curl 'http://zgdwz.lifescience.com.cn/ashx/autocomplete.ashx?q=%E9%A9%AC%E5%8F%A3&limit=20×tamp=1514366731531' -H 'DNT: 1' -H 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6,de;q=0.5,fr;q=0.4,pl;q=0.3,pt;q=0.2,es;q=0.1' -H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36' -H 'Accept: application/json, text/javascript, */*; q=0.01' -H 'Referer: http://zgdwz.lifescience.com.cn/search?key=%E9%A9%AC%E5%8F%A3%E9%B1%BC&t=2' -H 'X-Requested-With: XMLHttpRequest' -H 'Connection: keep-alive' --compressed
'''
def clean_text(text, minlen=300):
text = re.sub(r'\n{2,}', '\n', text)
text = re.sub(r' {2,}', ' ', text)
if len(text) < minlen:
return None
return text
def parse_ls_text(html):
soup = BeautifulSoup(html, "lxml")
for s in soup('script'):
s.decompose()
for s in soup('style'):
s.decompose()
for s in soup.find_all(class_='ipvip'):
s.decompose()
s = soup.find('div', id='centerdiv')
title = soup.title.get_text().strip()
content = s.get_text('\n') if s else None
return title, content
def parse_bd_text(html):
soup = BeautifulSoup(html, "lxml")
for s in soup('script'):
s.decompose()
for s in soup('style'):
s.decompose()
s = soup.find('div', class_='main-content')
return clean_text(s.get_text('\n')) if s else None
def parse_hd_text(html):
soup = BeautifulSoup(html, "lxml")
for s in soup('script'):
s.decompose()
for s in soup('style'):
s.decompose()
for s in soup.select('#index-footer'):
s.decompose()
for s in soup.find_all(class_='wap-citiao'):
s.decompose()
for s in soup.find_all(class_='bjbd'):
s.decompose()
s = soup.find(id='content')
return clean_text(s.get_text('\n')) if s else None
def parse_csdb_text(html):
soup = BeautifulSoup(html, "lxml")
for s in soup('script'):
s.decompose()
for s in soup('style'):
s.decompose()
return clean_text(soup.get_text('\n')) if s else None
def csdb_info_link(tag):
return not tag.has_attr('class') \
and tag.has_attr('href') \
and tag['href'].startswith('getTaxon') \
def get_csdb_url(name):
exclude = [u'属', u'科', u'目', u'蚊', u'蛾', u'虫']
r = commons.get(CSDB_URL.format(name), encoding='utf-8',
allow_redirects=False)
if r.status_code == 200:
soup = BeautifulSoup(r.text, "lxml")
result = None
for s in soup.find_all(csdb_info_link):
skip = False
for e in exclude:
if e in s.text:
skip = True
break
if skip:
continue
if name in s.text:
result = s['href']
break
if result:
return CSDB_HOST + result
def download_csdb_info(name, dst):
ofile = os.path.join(dst, u"{}_csdb.txt".format(name))
if os.path.exists(ofile):
print(u'CSDB Skip {}'.format(name))
return ofile
csdb_url = get_csdb_url(name)
csdb_text = None
if csdb_url:
r = commons.get(csdb_url, encoding='utf-8',
allow_redirects=False)
if r.status_code == 200:
csdb_text = parse_csdb_text(r.text)
if csdb_text:
with codecs.open(ofile, 'w', 'utf-8') as f:
print(u'CSDB Found {}'.format(name))
f.write(csdb_url)
f.write('\n\n')
f.write(csdb_text)
return ofile
def download_bd_info(name, dst):
ofile = os.path.join(dst, u"{}_bd.txt".format(name))
if os.path.exists(ofile):
print(u'Baidu Skip {}'.format(name))
return ofile
bd_url = BD_URL.format(name)
bd_text = None
r = commons.get(bd_url, encoding='utf-8',
allow_redirects=False)
if r.status_code == 200:
bd_text = parse_bd_text(r.text)
elif r.status_code == 302:
location = r.headers['Location']
if location and location.startswith('/item/'):
bd_url = "{}{}".format(BD_HOST, location)
r = commons.get(bd_url, encoding='utf-8',
allow_redirects=False)
if r.status_code == 200:
bd_text = parse_bd_text(r.text)
else:
if name.endswith(u'鱼'):
bd_url = BD_URL.format(name[:-1])
r = commons.get(bd_url, encoding='utf-8',
allow_redirects=False)
if r.status_code == 200:
bd_text = parse_bd_text(r.text)
if bd_text:
with codecs.open(ofile, 'w', 'utf-8') as f:
print(u'Baidu Found {}'.format(name))
f.write(bd_url)
f.write('\n\n')
f.write(bd_text)
return ofile
def download_hd_info(name, dst):
ofile = os.path.join(dst, u"{}_hd.txt".format(name))
if os.path.exists(ofile):
print(u'Hudong Skip {}'.format(name))
return ofile
hd_url = HD_URL.format(name)
hd_text = None
r = commons.get(hd_url, encoding='utf-8',
allow_redirects=False)
if r.status_code == 200:
hd_text = parse_hd_text(r.text)
if hd_text:
with codecs.open(ofile, 'w', 'utf-8') as f:
print(u'Hudong Found {}'.format(name))
f.write(hd_url)
f.write('\n\n')
f.write(hd_text)
return ofile
def download_info(name, dst):
print(u'Processing {}'.format(name))
a = download_csdb_info(name, dst)
b = download_bd_info(name, dst)
c = download_hd_info(name, dst)
return a or b or c
def download_fish_list(list_file, dst=None):
if not dst:
dst = os.path.dirname(list_file)
names = codecs.open(list_file, 'r', encoding='utf-8').read().splitlines()
for name in names:
url = LS_URL.format(name)
r = commons.get(url, encoding='utf-8', allow_redirects=False)
if r.status_code != 200 or not r.text:
print(u'No match {}'.format(name))
continue
url = LS_INFO_URL.format(r.text)
# print(url)
r = commons.get(url, encoding='utf-8',
allow_redirects=False)
if r.status_code != 200:
continue
title, content = parse_ls_text(r.text)
if title and content:
ofile = os.path.join(dst, u'{}.txt'.format(title))
if os.path.exists(ofile):
print(u'Skip {}'.format(title))
continue
with codecs.open(ofile, 'w', 'utf-8') as f:
print(u'Saved {}'.format(title))
f.write(content)
def main(list_file, dst=None):
if not dst:
dst = os.path.dirname(list_file)
nt_file = os.path.join(dst, "notfound.txt")
names = codecs.open(list_file, 'r', encoding='utf-8').read().splitlines()
nt_names = []
for name in names:
if not download_info(name, dst):
nt_names.append(name)
with codecs.open(nt_file, 'w', 'utf-8') as f:
f.write('\n'.join(nt_names))
if __name__ == '__main__':
if True:
download_fish_list(os.path.abspath(sys.argv[1]))
sys.exit(0)
if len(sys.argv) < 2:
print('Usage: {} list.txt'.format(sys.argv[0]))
sys.exit(1)
list_file = os.path.abspath(sys.argv[1])
if len(sys.argv) > 2:
dst = os.path.abspath(sys.argv[2])
else:
dst = os.path.dirname(list_file)
main(list_file, dst) | labs/fish_details_fetch.py | from __future__ import print_function
import codecs
import os
import sys
import shutil
import re
from bs4 import BeautifulSoup
import commons
LS_URL = u'http://zgdwz.lifescience.com.cn/ashx/searchinfo.ashx?key={}'
LS_INFO_URL = u'http://zgdwz.lifescience.com.cn/info/{}'
BD_URL = u'https://baike.baidu.com/item/{}'
BD_HOST = u'https://baike.baidu.com'
HD_URL = u'http://www.baike.com/wiki/{}'
ZO_URL = u'http://zgdwz.lifescience.com.cn/search?key={}&t=2'
ZO_HOST = u'http://zgdwz.lifescience.com.cn'
CSDB_URL = u'http://www.zoology.csdb.cn/efauna/searchTaxon?search={}'
CSDB_HOST = u'http://www.zoology.csdb.cn/efauna/'
'''
curl 'http://zgdwz.lifescience.com.cn/ashx/autocomplete.ashx?q=%E9%A9%AC%E5%8F%A3&limit=20×tamp=1514366731531' -H 'DNT: 1' -H 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6,de;q=0.5,fr;q=0.4,pl;q=0.3,pt;q=0.2,es;q=0.1' -H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36' -H 'Accept: application/json, text/javascript, */*; q=0.01' -H 'Referer: http://zgdwz.lifescience.com.cn/search?key=%E9%A9%AC%E5%8F%A3%E9%B1%BC&t=2' -H 'X-Requested-With: XMLHttpRequest' -H 'Connection: keep-alive' --compressed
'''
def clean_text(text, minlen=300):
text = re.sub(r'\n{2,}', '\n', text)
text = re.sub(r' {2,}', ' ', text)
if len(text) < minlen:
return None
return text
def parse_ls_text(html):
soup = BeautifulSoup(html, "lxml")
for s in soup('script'):
s.decompose()
for s in soup('style'):
s.decompose()
for s in soup.find_all(class_='ipvip'):
s.decompose()
s = soup.find('div', id='centerdiv')
title = soup.title.get_text().strip()
content = s.get_text('\n') if s else None
return title, content
def parse_bd_text(html):
soup = BeautifulSoup(html, "lxml")
for s in soup('script'):
s.decompose()
for s in soup('style'):
s.decompose()
s = soup.find('div', class_='main-content')
return clean_text(s.get_text('\n')) if s else None
def parse_hd_text(html):
soup = BeautifulSoup(html, "lxml")
for s in soup('script'):
s.decompose()
for s in soup('style'):
s.decompose()
for s in soup.select('#index-footer'):
s.decompose()
for s in soup.find_all(class_='wap-citiao'):
s.decompose()
for s in soup.find_all(class_='bjbd'):
s.decompose()
s = soup.find(id='content')
return clean_text(s.get_text('\n')) if s else None
def parse_csdb_text(html):
soup = BeautifulSoup(html, "lxml")
for s in soup('script'):
s.decompose()
for s in soup('style'):
s.decompose()
return clean_text(soup.get_text('\n')) if s else None
def csdb_info_link(tag):
return not tag.has_attr('class') \
and tag.has_attr('href') \
and tag['href'].startswith('getTaxon') \
def get_csdb_url(name):
exclude = [u'属', u'科', u'目', u'蚊', u'蛾', u'虫']
r = commons.get(CSDB_URL.format(name), encoding='utf-8',
allow_redirects=False)
if r.status_code == 200:
soup = BeautifulSoup(r.text, "lxml")
result = None
for s in soup.find_all(csdb_info_link):
skip = False
for e in exclude:
if e in s.text:
skip = True
break
if skip:
continue
if name in s.text:
result = s['href']
break
if result:
return CSDB_HOST + result
def download_csdb_info(name, dst):
ofile = os.path.join(dst, u"{}_csdb.txt".format(name))
if os.path.exists(ofile):
print(u'CSDB Skip {}'.format(name))
return ofile
csdb_url = get_csdb_url(name)
csdb_text = None
if csdb_url:
r = commons.get(csdb_url, encoding='utf-8',
allow_redirects=False)
if r.status_code == 200:
csdb_text = parse_csdb_text(r.text)
if csdb_text:
with codecs.open(ofile, 'w', 'utf-8') as f:
print(u'CSDB Found {}'.format(name))
f.write(csdb_url)
f.write('\n\n')
f.write(csdb_text)
return ofile
def download_bd_info(name, dst):
ofile = os.path.join(dst, u"{}_bd.txt".format(name))
if os.path.exists(ofile):
print(u'Baidu Skip {}'.format(name))
return ofile
bd_url = BD_URL.format(name)
bd_text = None
r = commons.get(bd_url, encoding='utf-8',
allow_redirects=False)
if r.status_code == 200:
bd_text = parse_bd_text(r.text)
elif r.status_code == 302:
location = r.headers['Location']
if location and location.startswith('/item/'):
bd_url = "{}{}".format(BD_HOST, location)
r = commons.get(bd_url, encoding='utf-8',
allow_redirects=False)
if r.status_code == 200:
bd_text = parse_bd_text(r.text)
else:
if name.endswith(u'鱼'):
bd_url = BD_URL.format(name[:-1])
r = commons.get(bd_url, encoding='utf-8',
allow_redirects=False)
if r.status_code == 200:
bd_text = parse_bd_text(r.text)
if bd_text:
with codecs.open(ofile, 'w', 'utf-8') as f:
print(u'Baidu Found {}'.format(name))
f.write(bd_url)
f.write('\n\n')
f.write(bd_text)
return ofile
def download_hd_info(name, dst):
ofile = os.path.join(dst, u"{}_hd.txt".format(name))
if os.path.exists(ofile):
print(u'Hudong Skip {}'.format(name))
return ofile
hd_url = HD_URL.format(name)
hd_text = None
r = commons.get(hd_url, encoding='utf-8',
allow_redirects=False)
if r.status_code == 200:
hd_text = parse_hd_text(r.text)
if hd_text:
with codecs.open(ofile, 'w', 'utf-8') as f:
print(u'Hudong Found {}'.format(name))
f.write(hd_url)
f.write('\n\n')
f.write(hd_text)
return ofile
def download_info(name, dst):
print(u'Processing {}'.format(name))
a = download_csdb_info(name, dst)
b = download_bd_info(name, dst)
c = download_hd_info(name, dst)
return a or b or c
def download_fish_list(list_file, dst=None):
if not dst:
dst = os.path.dirname(list_file)
names = codecs.open(list_file, 'r', encoding='utf-8').read().splitlines()
for name in names:
url = LS_URL.format(name)
r = commons.get(url, encoding='utf-8', allow_redirects=False)
if r.status_code != 200 or not r.text:
print(u'No match {}'.format(name))
continue
url = LS_INFO_URL.format(r.text)
# print(url)
r = commons.get(url, encoding='utf-8',
allow_redirects=False)
if r.status_code != 200:
continue
title, content = parse_ls_text(r.text)
if title and content:
ofile = os.path.join(dst, u'{}.txt'.format(title))
if os.path.exists(ofile):
print(u'Skip {}'.format(title))
continue
with codecs.open(ofile, 'w', 'utf-8') as f:
print(u'Saved {}'.format(title))
f.write(content)
def main(list_file, dst=None):
if not dst:
dst = os.path.dirname(list_file)
nt_file = os.path.join(dst, "notfound.txt")
names = codecs.open(list_file, 'r', encoding='utf-8').read().splitlines()
nt_names = []
for name in names:
if not download_info(name, dst):
nt_names.append(name)
with codecs.open(nt_file, 'w', 'utf-8') as f:
f.write('\n'.join(nt_names))
if __name__ == '__main__':
if True:
download_fish_list(os.path.abspath(sys.argv[1]))
sys.exit(0)
if len(sys.argv) < 2:
print('Usage: {} list.txt'.format(sys.argv[0]))
sys.exit(1)
list_file = os.path.abspath(sys.argv[1])
if len(sys.argv) > 2:
dst = os.path.abspath(sys.argv[2])
else:
dst = os.path.dirname(list_file)
main(list_file, dst) | 0.177526 | 0.062674 |
import sys
import cdsapi
year= int(sys.argv[1])
#month= sys.argv[2]
print('/home/smartmet/data/ec-sf_%s-%s_all-24h-euro.grib'%(year,year+2))
c = cdsapi.Client()
c.retrieve(
'seasonal-original-single-levels',
{
'format': 'grib',
'originating_centre': 'ecmwf',
'system': '5',
'variable': [
# '10m_u_component_of_wind', '10m_v_component_of_wind', '10m_wind_gust_since_previous_post_processing',
# '2m_dewpoint_temperature', '2m_temperature', 'eastward_turbulent_surface_stress',
# 'evaporation', 'maximum_2m_temperature_in_the_last_24_hours', 'mean_sea_level_pressure',
# 'minimum_2m_temperature_in_the_last_24_hours', 'northward_turbulent_surface_stress', 'runoff',
# 'sea_ice_cover', 'sea_surface_temperature', 'snow_density','snow_depth', 'snowfall',
# 'soil_temperature_level_1','soil_temperature_level_2','soil_temperature_level_3',
# '39.128', '40.128', '41.128','42.128',
# 'surface_latent_heat_flux', 'surface_net_solar_radiation', 'surface_net_thermal_radiation',
# 'surface_sensible_heat_flux', 'surface_solar_radiation_downwards', 'surface_thermal_radiation_downwards',
# 'top_net_solar_radiation', 'top_net_thermal_radiation', 'total_cloud_cover',
'total_precipitation'
],
'area' : '75/-30/25/50',
'year': [year,year+1,year+2],
'month': [1,2,3,4,5,6,7,8,9,10,11,12],
'day': '01',
'leadtime_hour': [
"24", "48", "72", "96", "120", "144", "168", "192", "216", "240", "264", "288", "312", "336", "360", "384", "408", "432", "456", "480",
"504", "528", "552", "576", "600", "624", "648", "672", "696", "720", "744", "768", "792", "816", "840", "864", "888", "912", "936", "960",
"984", "1008", "1032", "1056", "1080", "1104", "1128", "1152", "1176", "1200", "1224", "1248", "1272", "1296", "1320", "1344", "1368", "1392",
"1416", "1440", "1464", "1488", "1512", "1536", "1560", "1584", "1608", "1632", "1656", "1680", "1704", "1728", "1752", "1776", "1800", "1824",
"1848", "1872", "1896", "1920", "1944", "1968", "1992", "2016", "2040", "2064", "2088", "2112", "2136", "2160", "2184", "2208", "2232", "2256",
"2280", "2304", "2328", "2352", "2376", "2400", "2424", "2448", "2472", "2496", "2520", "2544", "2568", "2592", "2616", "2640", "2664", "2688",
"2712", "2736", "2760", "2784", "2808", "2832", "2856", "2880", "2904", "2928", "2952", "2976", "3000", "3024", "3048", "3072", "3096", "3120",
"3144", "3168", "3192", "3216", "3240", "3264", "3288", "3312", "3336", "3360", "3384", "3408", "3432", "3456", "3480", "3504", "3528", "3552",
"3576", "3600", "3624", "3648", "3672", "3696", "3720", "3744", "3768", "3792", "3816", "3840", "3864", "3888", "3912", "3936", "3960", "3984",
"4008", "4032", "4056", "4080", "4104", "4128", "4152", "4176", "4200", "4224", "4248", "4272", "4296", "4320", "4344", "4368", "4392", "4416",
"4440", "4464", "4488", "4512", "4536", "4560", "4584", "4608", "4632", "4656", "4680", "4704", "4728", "4752", "4776", "4800", "4824", "4848",
"4872", "4896", "4920", "4944", "4968", "4992", "5016", "5040", "5064", "5088", "5112", "5136", "5160"
],
},
'/mnt/data/ens/ec-sf_%s-%s_tp-24h-euro.grib'%(year,year+2)
) | bin/cds-sf-tp-24h-stats.py | import sys
import cdsapi
year= int(sys.argv[1])
#month= sys.argv[2]
print('/home/smartmet/data/ec-sf_%s-%s_all-24h-euro.grib'%(year,year+2))
c = cdsapi.Client()
c.retrieve(
'seasonal-original-single-levels',
{
'format': 'grib',
'originating_centre': 'ecmwf',
'system': '5',
'variable': [
# '10m_u_component_of_wind', '10m_v_component_of_wind', '10m_wind_gust_since_previous_post_processing',
# '2m_dewpoint_temperature', '2m_temperature', 'eastward_turbulent_surface_stress',
# 'evaporation', 'maximum_2m_temperature_in_the_last_24_hours', 'mean_sea_level_pressure',
# 'minimum_2m_temperature_in_the_last_24_hours', 'northward_turbulent_surface_stress', 'runoff',
# 'sea_ice_cover', 'sea_surface_temperature', 'snow_density','snow_depth', 'snowfall',
# 'soil_temperature_level_1','soil_temperature_level_2','soil_temperature_level_3',
# '39.128', '40.128', '41.128','42.128',
# 'surface_latent_heat_flux', 'surface_net_solar_radiation', 'surface_net_thermal_radiation',
# 'surface_sensible_heat_flux', 'surface_solar_radiation_downwards', 'surface_thermal_radiation_downwards',
# 'top_net_solar_radiation', 'top_net_thermal_radiation', 'total_cloud_cover',
'total_precipitation'
],
'area' : '75/-30/25/50',
'year': [year,year+1,year+2],
'month': [1,2,3,4,5,6,7,8,9,10,11,12],
'day': '01',
'leadtime_hour': [
"24", "48", "72", "96", "120", "144", "168", "192", "216", "240", "264", "288", "312", "336", "360", "384", "408", "432", "456", "480",
"504", "528", "552", "576", "600", "624", "648", "672", "696", "720", "744", "768", "792", "816", "840", "864", "888", "912", "936", "960",
"984", "1008", "1032", "1056", "1080", "1104", "1128", "1152", "1176", "1200", "1224", "1248", "1272", "1296", "1320", "1344", "1368", "1392",
"1416", "1440", "1464", "1488", "1512", "1536", "1560", "1584", "1608", "1632", "1656", "1680", "1704", "1728", "1752", "1776", "1800", "1824",
"1848", "1872", "1896", "1920", "1944", "1968", "1992", "2016", "2040", "2064", "2088", "2112", "2136", "2160", "2184", "2208", "2232", "2256",
"2280", "2304", "2328", "2352", "2376", "2400", "2424", "2448", "2472", "2496", "2520", "2544", "2568", "2592", "2616", "2640", "2664", "2688",
"2712", "2736", "2760", "2784", "2808", "2832", "2856", "2880", "2904", "2928", "2952", "2976", "3000", "3024", "3048", "3072", "3096", "3120",
"3144", "3168", "3192", "3216", "3240", "3264", "3288", "3312", "3336", "3360", "3384", "3408", "3432", "3456", "3480", "3504", "3528", "3552",
"3576", "3600", "3624", "3648", "3672", "3696", "3720", "3744", "3768", "3792", "3816", "3840", "3864", "3888", "3912", "3936", "3960", "3984",
"4008", "4032", "4056", "4080", "4104", "4128", "4152", "4176", "4200", "4224", "4248", "4272", "4296", "4320", "4344", "4368", "4392", "4416",
"4440", "4464", "4488", "4512", "4536", "4560", "4584", "4608", "4632", "4656", "4680", "4704", "4728", "4752", "4776", "4800", "4824", "4848",
"4872", "4896", "4920", "4944", "4968", "4992", "5016", "5040", "5064", "5088", "5112", "5136", "5160"
],
},
'/mnt/data/ens/ec-sf_%s-%s_tp-24h-euro.grib'%(year,year+2)
) | 0.121035 | 0.271729 |
import os
import dgl
import torch
import numpy as np
from dgl.data import DGLDataset
class Polar(DGLDataset):
def __init__(self, root, split, num_nodes=189):
# super(Polar, self).__init__()
self.root = root
self.split = split
self.num_nodes = num_nodes
self.graphs_base = os.path.join(self.root, "graphs", self.split)
self.node_features_base = os.path.join(self.root, "node_features", self.split)
self.items_list, self.label_names = self.get_matirx_list()
def get_matirx_list(self):
items_list = []
label_names = []
counter = 0
for root, dirs, files in os.walk(self.graphs_base, topdown=True):
nftrs_root = os.path.join(self.node_features_base, os.path.split(root)[1])
if counter == 0:
label_names = dirs
for file in files:
if file.endswith(".csv"):
items_list.append(
{
"graph": os.path.join(root, file),
"node_features": os.path.join(nftrs_root, file),
"label": counter - 1,
"label_name": label_names[counter - 1]
}
)
counter += 1
return items_list, label_names
def __getitem__(self, index):
graph_dir = self.items_list[index]["graph"]
nftrs_dir = self.items_list[index]["node_features"]
label = self.items_list[index]["label"]
label_name = self.items_list[index]["label_name"]
node_features = np.loadtxt(nftrs_dir, delimiter=",")
edge_weights = np.loadtxt(graph_dir, delimiter=",")
edge_weights = np.squeeze(edge_weights.reshape(1, -1))
src = [[0 for i in range(self.num_nodes)] for j in range(self.num_nodes)]
for i in range(len(src)):
for j in range(len(src[i])):
src[i][j] = i
src = np.array(src).flatten()
det = [[i for i in range(self.num_nodes)] for j in range(self.num_nodes)]
det = np.array(det).flatten()
u, v = (torch.tensor(src), torch.tensor(det))
g = dgl.graph((u, v))
# add node features and edge features
g.ndata["node_features"] = torch.from_numpy(node_features)
g.edata["edge_weights"] = torch.from_numpy(edge_weights)
return g, label, label_name
def __len__(self):
return len(self.items_list)
def main():
dataset = Polar("./dataset", "train")
print(f"Size of train set: {len(dataset):d}")
dataiter = iter(dataset)
g, label, label_name = next(dataiter)
print(g, label, label_name)
if __name__ == "__main__":
main() | dataloader.py | import os
import dgl
import torch
import numpy as np
from dgl.data import DGLDataset
class Polar(DGLDataset):
def __init__(self, root, split, num_nodes=189):
# super(Polar, self).__init__()
self.root = root
self.split = split
self.num_nodes = num_nodes
self.graphs_base = os.path.join(self.root, "graphs", self.split)
self.node_features_base = os.path.join(self.root, "node_features", self.split)
self.items_list, self.label_names = self.get_matirx_list()
def get_matirx_list(self):
items_list = []
label_names = []
counter = 0
for root, dirs, files in os.walk(self.graphs_base, topdown=True):
nftrs_root = os.path.join(self.node_features_base, os.path.split(root)[1])
if counter == 0:
label_names = dirs
for file in files:
if file.endswith(".csv"):
items_list.append(
{
"graph": os.path.join(root, file),
"node_features": os.path.join(nftrs_root, file),
"label": counter - 1,
"label_name": label_names[counter - 1]
}
)
counter += 1
return items_list, label_names
def __getitem__(self, index):
graph_dir = self.items_list[index]["graph"]
nftrs_dir = self.items_list[index]["node_features"]
label = self.items_list[index]["label"]
label_name = self.items_list[index]["label_name"]
node_features = np.loadtxt(nftrs_dir, delimiter=",")
edge_weights = np.loadtxt(graph_dir, delimiter=",")
edge_weights = np.squeeze(edge_weights.reshape(1, -1))
src = [[0 for i in range(self.num_nodes)] for j in range(self.num_nodes)]
for i in range(len(src)):
for j in range(len(src[i])):
src[i][j] = i
src = np.array(src).flatten()
det = [[i for i in range(self.num_nodes)] for j in range(self.num_nodes)]
det = np.array(det).flatten()
u, v = (torch.tensor(src), torch.tensor(det))
g = dgl.graph((u, v))
# add node features and edge features
g.ndata["node_features"] = torch.from_numpy(node_features)
g.edata["edge_weights"] = torch.from_numpy(edge_weights)
return g, label, label_name
def __len__(self):
return len(self.items_list)
def main():
dataset = Polar("./dataset", "train")
print(f"Size of train set: {len(dataset):d}")
dataiter = iter(dataset)
g, label, label_name = next(dataiter)
print(g, label, label_name)
if __name__ == "__main__":
main() | 0.346099 | 0.261128 |
import datetime as dt
import urllib.parse
from typing import Dict, Union
from utils.database import tibiaDatabase
from utils.general import get_local_timezone
from utils.tibia import get_tibia_time_zone
WIKI_ICON = "https://vignette.wikia.nocookie.net/tibia/images/b/bc/Wiki.png/revision/latest?path-prefix=en"
def get_article_url(title: str) -> str:
return f"http://tibia.wikia.com/wiki/{urllib.parse.quote(title)}"
def get_monster(name):
"""Returns a dictionary with a monster's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, id, hp, exp, maxdmg, elem_physical, elem_holy,
elem_death, elem_fire, elem_energy, elem_ice, elem_earth, elem_drown, elem_lifedrain, senseinvis,
arm, image."""
# Reading monster database
c = tibiaDatabase.cursor()
c.execute("SELECT * FROM creatures WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower() or len(result) == 1:
monster = result[0]
else:
return [x['title'] for x in result]
try:
if monster['hitpoints'] is None or monster['hitpoints'] < 1:
monster['hitpoints'] = None
c.execute("SELECT items.title as item, chance, min, max "
"FROM creatures_drops, items "
"WHERE items.id = creatures_drops.item_id AND creature_id = ? "
"ORDER BY chance DESC",
(monster["id"],))
monster["loot"] = c.fetchall()
return monster
finally:
c.close()
def get_bestiary_classes() -> Dict[str, int]:
"""Gets all the bestiary classes
:return: The classes and how many creatures it has
:rtype: dict(str, int)
"""
rows = tibiaDatabase.execute("SELECT DISTINCT bestiary_class, count(*) as count "
"FROM creatures WHERE bestiary_class not NUll "
"GROUP BY bestiary_class ORDER BY bestiary_class")
classes = {}
for r in rows:
classes[r["bestiary_class"]] = r["count"]
return classes
def get_bestiary_creatures(_class: str) -> Dict[str, str]:
"""Gets the creatures that belong to a bestiary class
:param _class: The name of the class
:type _class: str
:return: The creatures in the class, with their difficulty level.
:rtype: dict(str, str)
"""
rows = tibiaDatabase.execute("""
SELECT title, bestiary_level
FROM creatures
WHERE bestiary_class LIKE ?
ORDER BY
CASE bestiary_level
WHEN "Trivial" THEN 0
WHEN "Easy" THEN 1
WHEN "Medium" THEN 2
WHEN "Hard" THEN 3
END
""", (_class,))
creatures = {}
for r in rows:
creatures[r["title"]] = r["bestiary_level"]
return creatures
def get_item(name):
"""Returns a dictionary containing an item's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, look_text, npcs_sold*, value_sell, npcs_bought*, value_buy.
*npcs_sold and npcs_bought are list, each element is a dictionary with the keys: name, city."""
# Reading item database
c = tibiaDatabase.cursor()
# Search query
c.execute("SELECT * FROM items WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower() or len(result) == 1:
item = result[0]
else:
return [x['title'] for x in result]
try:
c.execute("SELECT npc.name, npc.city, npcs_selling.value, currency.name as currency "
"FROM npcs_selling "
"LEFT JOIN npcs npc on npc.id = npc_id "
"LEFT JOIN items currency on currency.id = currency "
"WHERE item_id = ? "
"ORDER BY npcs_selling.value ASC", (item["id"],))
item["sellers"] = c.fetchall()
c.execute("SELECT npc.name, npc.city, npcs_buying.value, currency.name as currency "
"FROM npcs_buying "
"LEFT JOIN npcs npc on npc.id = npc_id "
"LEFT JOIN items currency on currency.id = currency "
"WHERE item_id = ? "
"ORDER BY npcs_buying.value DESC", (item["id"],))
item["buyers"] = c.fetchall()
c.execute("SELECT creature.title as name, chance "
"FROM creatures_drops "
"LEFT JOIN creatures creature on creature.id = creature_id "
"WHERE item_id = ? "
"ORDER BY chance DESC ", (item["id"],))
item["loot_from"] = c.fetchall()
c.execute("SELECT quests.name "
"FROM quests_rewards "
"INNER JOIN quests ON quests.id = quests_rewards.quest_id "
"WHERE item_id = ? ", (item["id"],))
item["quests_reward"] = c.fetchall()
# Get item's properties:
c.execute("SELECT * FROM items_attributes WHERE item_id = ?", (item["id"],))
results = c.fetchall()
item["attributes"] = {}
for row in results:
if row["attribute"] == "imbuement":
temp = item["attributes"].get("imbuements", list())
temp.append(row["value"])
item["attributes"]["imbuements"] = temp
else:
item["attributes"][row["attribute"]] = row["value"]
return item
finally:
c.close()
def get_imbuement(name):
"""Returns a dictionary containing an item's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, look_text, npcs_sold*, value_sell, npcs_bought*, value_buy.
*npcs_sold and npcs_bought are list, each element is a dictionary with the keys: name, city."""
# Reading item database
c = tibiaDatabase.cursor()
# Search query
c.execute("SELECT * FROM imbuements WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or len(result) == 1:
imbuement = result[0]
else:
return [x['name'] for x in result]
try:
c.execute("SELECT items.title as name, amount "
"FROM imbuements_materials "
"INNER JOIN items on items.id = imbuements_materials.item_id "
"WHERE imbuement_id = ?", (imbuement["id"],))
imbuement["materials"] = c.fetchall()
return imbuement
finally:
c.close()
def get_rashid_info() -> Dict[str, Union[str, int]]:
"""Returns a dictionary with rashid's info
Dictionary contains: the name of the week, city and x,y,z, positions."""
offset = get_tibia_time_zone() - get_local_timezone()
# Server save is at 10am, so in tibia a new day starts at that hour
tibia_time = dt.datetime.now() + dt.timedelta(hours=offset - 10)
c = tibiaDatabase.cursor()
c.execute("SELECT * FROM rashid_positions WHERE day = ?", (tibia_time.weekday(),))
info = c.fetchone()
c.close()
return info
def get_spell(name):
"""Returns a dictionary containing a spell's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
c.execute("SELECT * FROM spells WHERE words LIKE ? or name LIKE ?", (name,)*2)
spell = c.fetchone()
if spell is None:
c.execute("SELECT * FROM spells WHERE words LIKE ? OR name LIKE ? ORDER BY LENGTH(name) LIMIT 15",
("%" + name + "%",)*2)
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or result[0]["words"].lower() == name.lower() or len(
result) == 1:
spell = result[0]
else:
return ["{name} ({words})".format(**x) for x in result]
spell["npcs"] = []
c.execute("""SELECT npcs.title as name, npcs.city, npcs_spells.knight, npcs_spells.paladin,
npcs_spells.sorcerer, npcs_spells.druid FROM npcs, npcs_spells
WHERE npcs_spells.spell_id = ? AND npcs_spells.npc_id = npcs.id""", (spell["id"],))
result = c.fetchall()
for npc in result:
npc["city"] = npc["city"].title()
spell["npcs"].append(npc)
return spell
finally:
c.close()
def get_npc(name):
"""Returns a dictionary containing a NPC's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
# search query
c.execute("SELECT * FROM npcs WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower or len(result) == 1:
npc = result[0]
else:
return [x["title"] for x in result]
c.execute("SELECT item.title as name, npcs_selling.value, currency.name as currency "
"FROM npcs_selling "
"LEFT JOIN items item on item.id = item_id "
"LEFT JOIN items currency on currency.id = currency "
"WHERE npc_id = ? "
"ORDER BY npcs_selling.value DESC", (npc["id"],))
npc["selling"] = c.fetchall()
c.execute("SELECT item.title as name, npcs_buying.value, currency.name as currency "
"FROM npcs_buying "
"LEFT JOIN items item on item.id = item_id "
"LEFT JOIN items currency on currency.id = currency "
"WHERE npc_id = ? "
"ORDER BY npcs_buying.value DESC", (npc["id"],))
npc["buying"] = c.fetchall()
c.execute("SELECT spell.name, spell.price, npcs_spells.knight, npcs_spells.sorcerer, npcs_spells.paladin, "
"npcs_spells.druid "
"FROM npcs_spells "
"INNER JOIN spells spell ON spell.id = spell_id "
"WHERE npc_id = ? "
"ORDER BY price DESC", (npc["id"],))
npc["spells"] = c.fetchall()
c.execute("SELECT destination as name, price, notes "
"FROM npcs_destinations "
"WHERE npc_id = ? "
"ORDER BY name ASC", (npc["id"],))
npc["destinations"] = c.fetchall()
return npc
finally:
c.close()
def get_key(number):
"""Returns a dictionary containing a NPC's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
# search query
c.execute("SELECT items_keys.*, item.image FROM items_keys "
"INNER JOIN items item ON item.id = items_keys.item_id "
"WHERE number = ? ", (number,))
result = c.fetchone()
return result
finally:
c.close()
def search_key(terms):
"""Returns a dictionary containing a NPC's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
# search query
c.execute("SELECT items_keys.*, item.image FROM items_keys "
"INNER JOIN items item ON item.id = items_keys.item_id "
"WHERE items_keys.name LIKE ? OR notes LIKE ? or origin LIKE ? LIMIT 10 ", ("%" + terms + "%",)*3)
result = c.fetchall()
if len(result) == 0:
return None
elif len(result) == 1:
return result[0]
return result
finally:
c.close()
def get_achievement(name):
"""Returns an achievement (dictionary), a list of possible matches or none"""
c = tibiaDatabase.cursor()
try:
# Search query
c.execute("SELECT * FROM achievements WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15",
("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or len(result) == 1:
return result[0]
else:
return [x['name'] for x in result]
finally:
c.close()
def get_mapper_link(x, y, z):
def convert_pos(pos):
return f"{(pos&0xFF00)>>8}.{pos&0x00FF}"
return f"http://tibia.wikia.com/wiki/Mapper?coords={convert_pos(x)}-{convert_pos(y)}-{z}-4-1-1" | utils/tibiawiki.py | import datetime as dt
import urllib.parse
from typing import Dict, Union
from utils.database import tibiaDatabase
from utils.general import get_local_timezone
from utils.tibia import get_tibia_time_zone
WIKI_ICON = "https://vignette.wikia.nocookie.net/tibia/images/b/bc/Wiki.png/revision/latest?path-prefix=en"
def get_article_url(title: str) -> str:
return f"http://tibia.wikia.com/wiki/{urllib.parse.quote(title)}"
def get_monster(name):
"""Returns a dictionary with a monster's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, id, hp, exp, maxdmg, elem_physical, elem_holy,
elem_death, elem_fire, elem_energy, elem_ice, elem_earth, elem_drown, elem_lifedrain, senseinvis,
arm, image."""
# Reading monster database
c = tibiaDatabase.cursor()
c.execute("SELECT * FROM creatures WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower() or len(result) == 1:
monster = result[0]
else:
return [x['title'] for x in result]
try:
if monster['hitpoints'] is None or monster['hitpoints'] < 1:
monster['hitpoints'] = None
c.execute("SELECT items.title as item, chance, min, max "
"FROM creatures_drops, items "
"WHERE items.id = creatures_drops.item_id AND creature_id = ? "
"ORDER BY chance DESC",
(monster["id"],))
monster["loot"] = c.fetchall()
return monster
finally:
c.close()
def get_bestiary_classes() -> Dict[str, int]:
"""Gets all the bestiary classes
:return: The classes and how many creatures it has
:rtype: dict(str, int)
"""
rows = tibiaDatabase.execute("SELECT DISTINCT bestiary_class, count(*) as count "
"FROM creatures WHERE bestiary_class not NUll "
"GROUP BY bestiary_class ORDER BY bestiary_class")
classes = {}
for r in rows:
classes[r["bestiary_class"]] = r["count"]
return classes
def get_bestiary_creatures(_class: str) -> Dict[str, str]:
"""Gets the creatures that belong to a bestiary class
:param _class: The name of the class
:type _class: str
:return: The creatures in the class, with their difficulty level.
:rtype: dict(str, str)
"""
rows = tibiaDatabase.execute("""
SELECT title, bestiary_level
FROM creatures
WHERE bestiary_class LIKE ?
ORDER BY
CASE bestiary_level
WHEN "Trivial" THEN 0
WHEN "Easy" THEN 1
WHEN "Medium" THEN 2
WHEN "Hard" THEN 3
END
""", (_class,))
creatures = {}
for r in rows:
creatures[r["title"]] = r["bestiary_level"]
return creatures
def get_item(name):
"""Returns a dictionary containing an item's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, look_text, npcs_sold*, value_sell, npcs_bought*, value_buy.
*npcs_sold and npcs_bought are list, each element is a dictionary with the keys: name, city."""
# Reading item database
c = tibiaDatabase.cursor()
# Search query
c.execute("SELECT * FROM items WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower() or len(result) == 1:
item = result[0]
else:
return [x['title'] for x in result]
try:
c.execute("SELECT npc.name, npc.city, npcs_selling.value, currency.name as currency "
"FROM npcs_selling "
"LEFT JOIN npcs npc on npc.id = npc_id "
"LEFT JOIN items currency on currency.id = currency "
"WHERE item_id = ? "
"ORDER BY npcs_selling.value ASC", (item["id"],))
item["sellers"] = c.fetchall()
c.execute("SELECT npc.name, npc.city, npcs_buying.value, currency.name as currency "
"FROM npcs_buying "
"LEFT JOIN npcs npc on npc.id = npc_id "
"LEFT JOIN items currency on currency.id = currency "
"WHERE item_id = ? "
"ORDER BY npcs_buying.value DESC", (item["id"],))
item["buyers"] = c.fetchall()
c.execute("SELECT creature.title as name, chance "
"FROM creatures_drops "
"LEFT JOIN creatures creature on creature.id = creature_id "
"WHERE item_id = ? "
"ORDER BY chance DESC ", (item["id"],))
item["loot_from"] = c.fetchall()
c.execute("SELECT quests.name "
"FROM quests_rewards "
"INNER JOIN quests ON quests.id = quests_rewards.quest_id "
"WHERE item_id = ? ", (item["id"],))
item["quests_reward"] = c.fetchall()
# Get item's properties:
c.execute("SELECT * FROM items_attributes WHERE item_id = ?", (item["id"],))
results = c.fetchall()
item["attributes"] = {}
for row in results:
if row["attribute"] == "imbuement":
temp = item["attributes"].get("imbuements", list())
temp.append(row["value"])
item["attributes"]["imbuements"] = temp
else:
item["attributes"][row["attribute"]] = row["value"]
return item
finally:
c.close()
def get_imbuement(name):
"""Returns a dictionary containing an item's info, if no exact match was found, it returns a list of suggestions.
The dictionary has the following keys: name, look_text, npcs_sold*, value_sell, npcs_bought*, value_buy.
*npcs_sold and npcs_bought are list, each element is a dictionary with the keys: name, city."""
# Reading item database
c = tibiaDatabase.cursor()
# Search query
c.execute("SELECT * FROM imbuements WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or len(result) == 1:
imbuement = result[0]
else:
return [x['name'] for x in result]
try:
c.execute("SELECT items.title as name, amount "
"FROM imbuements_materials "
"INNER JOIN items on items.id = imbuements_materials.item_id "
"WHERE imbuement_id = ?", (imbuement["id"],))
imbuement["materials"] = c.fetchall()
return imbuement
finally:
c.close()
def get_rashid_info() -> Dict[str, Union[str, int]]:
"""Returns a dictionary with rashid's info
Dictionary contains: the name of the week, city and x,y,z, positions."""
offset = get_tibia_time_zone() - get_local_timezone()
# Server save is at 10am, so in tibia a new day starts at that hour
tibia_time = dt.datetime.now() + dt.timedelta(hours=offset - 10)
c = tibiaDatabase.cursor()
c.execute("SELECT * FROM rashid_positions WHERE day = ?", (tibia_time.weekday(),))
info = c.fetchone()
c.close()
return info
def get_spell(name):
"""Returns a dictionary containing a spell's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
c.execute("SELECT * FROM spells WHERE words LIKE ? or name LIKE ?", (name,)*2)
spell = c.fetchone()
if spell is None:
c.execute("SELECT * FROM spells WHERE words LIKE ? OR name LIKE ? ORDER BY LENGTH(name) LIMIT 15",
("%" + name + "%",)*2)
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or result[0]["words"].lower() == name.lower() or len(
result) == 1:
spell = result[0]
else:
return ["{name} ({words})".format(**x) for x in result]
spell["npcs"] = []
c.execute("""SELECT npcs.title as name, npcs.city, npcs_spells.knight, npcs_spells.paladin,
npcs_spells.sorcerer, npcs_spells.druid FROM npcs, npcs_spells
WHERE npcs_spells.spell_id = ? AND npcs_spells.npc_id = npcs.id""", (spell["id"],))
result = c.fetchall()
for npc in result:
npc["city"] = npc["city"].title()
spell["npcs"].append(npc)
return spell
finally:
c.close()
def get_npc(name):
"""Returns a dictionary containing a NPC's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
# search query
c.execute("SELECT * FROM npcs WHERE title LIKE ? ORDER BY LENGTH(title) ASC LIMIT 15", ("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["title"].lower() == name.lower or len(result) == 1:
npc = result[0]
else:
return [x["title"] for x in result]
c.execute("SELECT item.title as name, npcs_selling.value, currency.name as currency "
"FROM npcs_selling "
"LEFT JOIN items item on item.id = item_id "
"LEFT JOIN items currency on currency.id = currency "
"WHERE npc_id = ? "
"ORDER BY npcs_selling.value DESC", (npc["id"],))
npc["selling"] = c.fetchall()
c.execute("SELECT item.title as name, npcs_buying.value, currency.name as currency "
"FROM npcs_buying "
"LEFT JOIN items item on item.id = item_id "
"LEFT JOIN items currency on currency.id = currency "
"WHERE npc_id = ? "
"ORDER BY npcs_buying.value DESC", (npc["id"],))
npc["buying"] = c.fetchall()
c.execute("SELECT spell.name, spell.price, npcs_spells.knight, npcs_spells.sorcerer, npcs_spells.paladin, "
"npcs_spells.druid "
"FROM npcs_spells "
"INNER JOIN spells spell ON spell.id = spell_id "
"WHERE npc_id = ? "
"ORDER BY price DESC", (npc["id"],))
npc["spells"] = c.fetchall()
c.execute("SELECT destination as name, price, notes "
"FROM npcs_destinations "
"WHERE npc_id = ? "
"ORDER BY name ASC", (npc["id"],))
npc["destinations"] = c.fetchall()
return npc
finally:
c.close()
def get_key(number):
"""Returns a dictionary containing a NPC's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
# search query
c.execute("SELECT items_keys.*, item.image FROM items_keys "
"INNER JOIN items item ON item.id = items_keys.item_id "
"WHERE number = ? ", (number,))
result = c.fetchone()
return result
finally:
c.close()
def search_key(terms):
"""Returns a dictionary containing a NPC's info, a list of possible matches or None"""
c = tibiaDatabase.cursor()
try:
# search query
c.execute("SELECT items_keys.*, item.image FROM items_keys "
"INNER JOIN items item ON item.id = items_keys.item_id "
"WHERE items_keys.name LIKE ? OR notes LIKE ? or origin LIKE ? LIMIT 10 ", ("%" + terms + "%",)*3)
result = c.fetchall()
if len(result) == 0:
return None
elif len(result) == 1:
return result[0]
return result
finally:
c.close()
def get_achievement(name):
"""Returns an achievement (dictionary), a list of possible matches or none"""
c = tibiaDatabase.cursor()
try:
# Search query
c.execute("SELECT * FROM achievements WHERE name LIKE ? ORDER BY LENGTH(name) ASC LIMIT 15",
("%" + name + "%",))
result = c.fetchall()
if len(result) == 0:
return None
elif result[0]["name"].lower() == name.lower() or len(result) == 1:
return result[0]
else:
return [x['name'] for x in result]
finally:
c.close()
def get_mapper_link(x, y, z):
def convert_pos(pos):
return f"{(pos&0xFF00)>>8}.{pos&0x00FF}"
return f"http://tibia.wikia.com/wiki/Mapper?coords={convert_pos(x)}-{convert_pos(y)}-{z}-4-1-1" | 0.727589 | 0.254087 |
from dypac.embeddings import Embedding
from sklearn.preprocessing import OneHotEncoder
from nilearn.image import resample_to_img
from nilearn.input_data import NiftiMasker
class BaseMasker:
def __init__(self):
"""
Build a Dypac-like masker from labels.
Parameters
----------
masker:
a nilearn NiftiMasker.
Attributes
----------
masker_:
The nilearn masker
"""
def _check_components_(self):
"""Check for presence of estimated components."""
if not hasattr(self, "components_"):
raise ValueError(
"Object has no components_ attribute. "
"This is probably because fit has not "
"been called."
)
def load_img(self, img, confound=None):
"""
Load a 4D image using the same preprocessing as model fitting.
Parameters
----------
img : Niimg-like object.
See http://nilearn.github.io/manipulating_images/input_output.html
An fMRI dataset
Returns
-------
img_p : Niimg-like object.
Same as input, after the preprocessing step used in the model have
been applied.
"""
self._check_components_()
tseries = self.masker_.transform([img], [confound])
return self.masker_.inverse_transform(tseries[0])
def transform(self, img, confound=None):
"""
Transform a 4D dataset in a component space.
Parameters
----------
img : Niimg-like object.
See http://nilearn.github.io/manipulating_images/input_output.html
An fMRI dataset
confound : CSV file or 2D matrix, optional.
Confound parameters, to be passed to nilearn.signal.clean.
Returns
-------
weights : numpy array of shape [n_samples, n_states + 1]
The fMRI tseries after projection in the parcellation
space. Note that the first coefficient corresponds to the intercept,
and not one of the parcels.
"""
self._check_components_()
tseries = self.masker_.transform([img], [confound])
del img
return self.embedding_.transform(tseries[0])
def inverse_transform(self, weights):
"""
Transform component weights as a 4D dataset.
Parameters
----------
weights : numpy array of shape [n_samples, n_states + 1]
The fMRI tseries after projection in the parcellation
space. Note that the first coefficient corresponds to the intercept,
and not one of the parcels.
Returns
-------
img : Niimg-like object.
The 4D fMRI dataset corresponding to the weights.
"""
self._check_components_()
return self.masker_.inverse_transform(self.embedding_.inverse_transform(weights))
def compress(self, img, confound=None):
"""
Provide the approximation of a 4D dataset after projection in parcellation space.
Parameters
----------
img : Niimg-like object.
See http://nilearn.github.io/manipulating_images/input_output.html
An fMRI dataset
confound : CSV file or 2D matrix, optional.
Confound parameters, to be passed to nilearn.signal.clean.
Returns
-------
img_c : Niimg-like object.
The 4D fMRI dataset corresponding to the input, compressed in the parcel space.
"""
self._check_components_()
tseries = self.masker_.transform([img], [confound])
del img
return self.masker_.inverse_transform(self.embedding_.compress(tseries[0]))
def score(self, img, confound=None):
"""
R2 map of the quality of the compression.
Parameters
----------
img : Niimg-like object.
See http://nilearn.github.io/manipulating_images/input_output.html
An fMRI dataset
confound : CSV file or 2D matrix, optional.
Confound parameters, to be passed to nilearn.signal.clean.
Returns
-------
score : Niimg-like object.
A 3D map of R2 score of the quality of the compression.
Note
----
The R2 score map is the fraction of the variance of fMRI time series captured
by the parcels at each voxel. A score of 1 means perfect approximation.
The score can be negative, in which case the parcellation approximation
performs worst than the average of the signal.
"""
self._check_components_()
tseries = self.masker_.transform([img], [confound])
del img
return self.masker_.inverse_transform(self.embedding_.score(tseries[0]))
class LabelsMasker(BaseMasker):
def __init__(self, model, labels):
"""
Build a Dypac-like masker from labels.
Parameters
----------
model:
a Dypac model.
labels:
a brain volumes with parcels (labels).
Attributes
----------
components_:
each column is a onehot encoder for one of the parcels.
embedding_:
see the class Embedding from Dypac.
"""
labels_r = resample_to_img(source_img=labels,
target_img=model.mask_img_, interpolation="nearest")
nifti_masker = NiftiMasker(
mask_img=model.mask_img_,
standardize=False,
smoothing_fwhm=None,
detrend=False,
memory="nilearn_cache",
memory_level=1,
)
labels_mask = nifti_masker.fit_transform(labels_r)
self.masker_ = model.masker_
self.components_ = OneHotEncoder().fit_transform(labels_mask.transpose())
self.embedding_ = Embedding(self.components_.todense().transpose())
class MapsMasker(BaseMasker):
def __init__(self, model, maps):
"""
Build a Dypac-like masker from a collection of brain maps.
Parameters
----------
model:
a Dypac model.
maps: 4D niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
Set of continuous maps. One representative time course per map is
extracted using least square regression.
Attributes
----------
components_:
each column is brain map, after masking
embedding_:
see the class Embedding from Dypac.
"""
maps_r = resample_to_img(source_img=maps,
target_img=model.mask_img_, interpolation="continuous")
nifti_masker = NiftiMasker(
mask_img=model.mask_img_,
standardize=False,
smoothing_fwhm=None,
detrend=False,
memory="nilearn_cache",
memory_level=1,
)
maps_mask = nifti_masker.fit_transform(maps_r)
self.masker_ = model.masker_
self.components_ = maps_mask.transpose()
self.embedding_ = Embedding(self.components_.transpose()) | cneuromod_embeddings/dypac_masker.py | from dypac.embeddings import Embedding
from sklearn.preprocessing import OneHotEncoder
from nilearn.image import resample_to_img
from nilearn.input_data import NiftiMasker
class BaseMasker:
def __init__(self):
"""
Build a Dypac-like masker from labels.
Parameters
----------
masker:
a nilearn NiftiMasker.
Attributes
----------
masker_:
The nilearn masker
"""
def _check_components_(self):
"""Check for presence of estimated components."""
if not hasattr(self, "components_"):
raise ValueError(
"Object has no components_ attribute. "
"This is probably because fit has not "
"been called."
)
def load_img(self, img, confound=None):
"""
Load a 4D image using the same preprocessing as model fitting.
Parameters
----------
img : Niimg-like object.
See http://nilearn.github.io/manipulating_images/input_output.html
An fMRI dataset
Returns
-------
img_p : Niimg-like object.
Same as input, after the preprocessing step used in the model have
been applied.
"""
self._check_components_()
tseries = self.masker_.transform([img], [confound])
return self.masker_.inverse_transform(tseries[0])
def transform(self, img, confound=None):
"""
Transform a 4D dataset in a component space.
Parameters
----------
img : Niimg-like object.
See http://nilearn.github.io/manipulating_images/input_output.html
An fMRI dataset
confound : CSV file or 2D matrix, optional.
Confound parameters, to be passed to nilearn.signal.clean.
Returns
-------
weights : numpy array of shape [n_samples, n_states + 1]
The fMRI tseries after projection in the parcellation
space. Note that the first coefficient corresponds to the intercept,
and not one of the parcels.
"""
self._check_components_()
tseries = self.masker_.transform([img], [confound])
del img
return self.embedding_.transform(tseries[0])
def inverse_transform(self, weights):
"""
Transform component weights as a 4D dataset.
Parameters
----------
weights : numpy array of shape [n_samples, n_states + 1]
The fMRI tseries after projection in the parcellation
space. Note that the first coefficient corresponds to the intercept,
and not one of the parcels.
Returns
-------
img : Niimg-like object.
The 4D fMRI dataset corresponding to the weights.
"""
self._check_components_()
return self.masker_.inverse_transform(self.embedding_.inverse_transform(weights))
def compress(self, img, confound=None):
"""
Provide the approximation of a 4D dataset after projection in parcellation space.
Parameters
----------
img : Niimg-like object.
See http://nilearn.github.io/manipulating_images/input_output.html
An fMRI dataset
confound : CSV file or 2D matrix, optional.
Confound parameters, to be passed to nilearn.signal.clean.
Returns
-------
img_c : Niimg-like object.
The 4D fMRI dataset corresponding to the input, compressed in the parcel space.
"""
self._check_components_()
tseries = self.masker_.transform([img], [confound])
del img
return self.masker_.inverse_transform(self.embedding_.compress(tseries[0]))
def score(self, img, confound=None):
"""
R2 map of the quality of the compression.
Parameters
----------
img : Niimg-like object.
See http://nilearn.github.io/manipulating_images/input_output.html
An fMRI dataset
confound : CSV file or 2D matrix, optional.
Confound parameters, to be passed to nilearn.signal.clean.
Returns
-------
score : Niimg-like object.
A 3D map of R2 score of the quality of the compression.
Note
----
The R2 score map is the fraction of the variance of fMRI time series captured
by the parcels at each voxel. A score of 1 means perfect approximation.
The score can be negative, in which case the parcellation approximation
performs worst than the average of the signal.
"""
self._check_components_()
tseries = self.masker_.transform([img], [confound])
del img
return self.masker_.inverse_transform(self.embedding_.score(tseries[0]))
class LabelsMasker(BaseMasker):
def __init__(self, model, labels):
"""
Build a Dypac-like masker from labels.
Parameters
----------
model:
a Dypac model.
labels:
a brain volumes with parcels (labels).
Attributes
----------
components_:
each column is a onehot encoder for one of the parcels.
embedding_:
see the class Embedding from Dypac.
"""
labels_r = resample_to_img(source_img=labels,
target_img=model.mask_img_, interpolation="nearest")
nifti_masker = NiftiMasker(
mask_img=model.mask_img_,
standardize=False,
smoothing_fwhm=None,
detrend=False,
memory="nilearn_cache",
memory_level=1,
)
labels_mask = nifti_masker.fit_transform(labels_r)
self.masker_ = model.masker_
self.components_ = OneHotEncoder().fit_transform(labels_mask.transpose())
self.embedding_ = Embedding(self.components_.todense().transpose())
class MapsMasker(BaseMasker):
def __init__(self, model, maps):
"""
Build a Dypac-like masker from a collection of brain maps.
Parameters
----------
model:
a Dypac model.
maps: 4D niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
Set of continuous maps. One representative time course per map is
extracted using least square regression.
Attributes
----------
components_:
each column is brain map, after masking
embedding_:
see the class Embedding from Dypac.
"""
maps_r = resample_to_img(source_img=maps,
target_img=model.mask_img_, interpolation="continuous")
nifti_masker = NiftiMasker(
mask_img=model.mask_img_,
standardize=False,
smoothing_fwhm=None,
detrend=False,
memory="nilearn_cache",
memory_level=1,
)
maps_mask = nifti_masker.fit_transform(maps_r)
self.masker_ = model.masker_
self.components_ = maps_mask.transpose()
self.embedding_ = Embedding(self.components_.transpose()) | 0.953242 | 0.644029 |
import unittest
import os
import opentimelineio as otio
from tests import baseline_reader
"""Unit tests for the schemadef plugin system."""
SCHEMADEF_NAME = "schemadef_example"
EXAMPLE_ARG = "exampleArg"
EXCLASS = "<class 'opentimelineio.schemadef.example_schemadef.exampleSchemaDef'>"
TEST_STRING = """
{
"OTIO_SCHEMA": "exampleSchemaDef.1",
"exampleArg": "foobar"
}
"""
def _clean_plugin_module():
"""Remove the example_schemadef if its already been loaded to test
autoload/explicit load behavior.
"""
try:
del otio.schemadef.example_schemadef
except AttributeError:
pass
try:
plugin = otio.schema.schemadef.from_name("example_schemadef")
plugin._module = None
except otio.exceptions.NotSupportedError:
pass
class TestPluginSchemadefs(unittest.TestCase):
def setUp(self):
self.save_manifest = otio.plugins.manifest._MANIFEST
self.save_manifest_path = os.environ.get('OTIO_PLUGIN_MANIFEST_PATH')
# find the path to the baselines/schemadef_example.json
self.manifest_path = baseline_reader.path_to_baseline(SCHEMADEF_NAME)
os.environ['OTIO_PLUGIN_MANIFEST_PATH'] = self.manifest_path
otio.plugins.manifest.ActiveManifest(force_reload=True)
_clean_plugin_module()
def tearDown(self):
# restore original state
if self.save_manifest_path:
os.environ['OTIO_PLUGIN_MANIFEST_PATH'] = self.save_manifest_path
else:
del os.environ['OTIO_PLUGIN_MANIFEST_PATH']
otio.plugins.manifest._MANIFEST = self.save_manifest
_clean_plugin_module()
def test_autoloaded_plugin(self):
with self.assertRaises(AttributeError):
otio.schemadef.example_schemadef
# should force an autoload
thing = otio.adapters.read_from_string(TEST_STRING, "otio_json")
self.assertEqual(thing.exampleArg, "foobar")
def test_plugin_schemadef(self):
with self.assertRaises(AttributeError):
otio.schemadef.example_schemadef
# force loading the module
otio.schema.schemadef.module_from_name("example_schemadef")
# Our test manifest should have been loaded, including
# the example_schemadef.
# Try creating a schema object using the instance_from_schema method.
peculiar_value = "something One-derful"
example = otio.core.instance_from_schema("exampleSchemaDef", 1, {
EXAMPLE_ARG: peculiar_value
})
self.assertEqual(str(type(example)), EXCLASS)
self.assertEqual(example.exampleArg, peculiar_value)
def test_plugin_schemadef_namespace(self):
with self.assertRaises(AttributeError):
otio.schemadef.example_schemadef
# force loading the module
plugin_module = otio.schema.schemadef.module_from_name(
"example_schemadef"
)
# Try creating schema object with the direct class definition method:
peculiar_value = "something Two-derful"
example = otio.schemadef.example_schemadef.exampleSchemaDef(peculiar_value)
self.assertEqual(plugin_module, otio.schemadef.example_schemadef)
self.assertEqual(str(type(example)), EXCLASS)
self.assertEqual(example.exampleArg, peculiar_value)
if __name__ == '__main__':
unittest.main() | tests/test_schemadef_plugin.py | import unittest
import os
import opentimelineio as otio
from tests import baseline_reader
"""Unit tests for the schemadef plugin system."""
SCHEMADEF_NAME = "schemadef_example"
EXAMPLE_ARG = "exampleArg"
EXCLASS = "<class 'opentimelineio.schemadef.example_schemadef.exampleSchemaDef'>"
TEST_STRING = """
{
"OTIO_SCHEMA": "exampleSchemaDef.1",
"exampleArg": "foobar"
}
"""
def _clean_plugin_module():
"""Remove the example_schemadef if its already been loaded to test
autoload/explicit load behavior.
"""
try:
del otio.schemadef.example_schemadef
except AttributeError:
pass
try:
plugin = otio.schema.schemadef.from_name("example_schemadef")
plugin._module = None
except otio.exceptions.NotSupportedError:
pass
class TestPluginSchemadefs(unittest.TestCase):
def setUp(self):
self.save_manifest = otio.plugins.manifest._MANIFEST
self.save_manifest_path = os.environ.get('OTIO_PLUGIN_MANIFEST_PATH')
# find the path to the baselines/schemadef_example.json
self.manifest_path = baseline_reader.path_to_baseline(SCHEMADEF_NAME)
os.environ['OTIO_PLUGIN_MANIFEST_PATH'] = self.manifest_path
otio.plugins.manifest.ActiveManifest(force_reload=True)
_clean_plugin_module()
def tearDown(self):
# restore original state
if self.save_manifest_path:
os.environ['OTIO_PLUGIN_MANIFEST_PATH'] = self.save_manifest_path
else:
del os.environ['OTIO_PLUGIN_MANIFEST_PATH']
otio.plugins.manifest._MANIFEST = self.save_manifest
_clean_plugin_module()
def test_autoloaded_plugin(self):
with self.assertRaises(AttributeError):
otio.schemadef.example_schemadef
# should force an autoload
thing = otio.adapters.read_from_string(TEST_STRING, "otio_json")
self.assertEqual(thing.exampleArg, "foobar")
def test_plugin_schemadef(self):
with self.assertRaises(AttributeError):
otio.schemadef.example_schemadef
# force loading the module
otio.schema.schemadef.module_from_name("example_schemadef")
# Our test manifest should have been loaded, including
# the example_schemadef.
# Try creating a schema object using the instance_from_schema method.
peculiar_value = "something One-derful"
example = otio.core.instance_from_schema("exampleSchemaDef", 1, {
EXAMPLE_ARG: peculiar_value
})
self.assertEqual(str(type(example)), EXCLASS)
self.assertEqual(example.exampleArg, peculiar_value)
def test_plugin_schemadef_namespace(self):
with self.assertRaises(AttributeError):
otio.schemadef.example_schemadef
# force loading the module
plugin_module = otio.schema.schemadef.module_from_name(
"example_schemadef"
)
# Try creating schema object with the direct class definition method:
peculiar_value = "something Two-derful"
example = otio.schemadef.example_schemadef.exampleSchemaDef(peculiar_value)
self.assertEqual(plugin_module, otio.schemadef.example_schemadef)
self.assertEqual(str(type(example)), EXCLASS)
self.assertEqual(example.exampleArg, peculiar_value)
if __name__ == '__main__':
unittest.main() | 0.563978 | 0.251523 |
import pandas as pd
from warnings import warn
from google.api_core.exceptions import NotFound
from carto.exceptions import CartoException
from ...clients.bigquery_client import BigQueryClient
from ....auth import Credentials, defaults
try:
from abc import ABC
except ImportError:
from abc import ABCMeta
ABC = ABCMeta('ABC', (object,), {'__slots__': ()})
_WORKING_PROJECT = 'carto-do-customers'
class CatalogEntity(ABC):
id_field = 'id'
entity_repo = None
export_excluded_fields = ['summary_json']
def __init__(self, data):
self.data = data
@property
def id(self):
return self.data[self.id_field]
@property
def slug(self):
try:
return self.data['slug']
except KeyError:
return None
@classmethod
def get(cls, id_):
return cls.entity_repo.get_by_id(id_)
@classmethod
def get_all(cls, filters=None):
return cls.entity_repo.get_all(filters)
@classmethod
def get_list(cls, id_list):
return cls.entity_repo.get_by_id_list(id_list)
def to_series(self):
return pd.Series(self.data)
def to_dict(self):
return {key: value for key, value in self.data.items() if key not in self.export_excluded_fields}
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
return not self == other
def __str__(self):
return '{classname}({data})'.format(classname=self.__class__.__name__, data=self.data.__str__())
def __repr__(self):
return "<{classname}('{entity_id}')>".format(classname=self.__class__.__name__, entity_id=self._get_print_id())
def _get_print_id(self):
if 'slug' in self.data.keys():
return self.data['slug']
return self.id
def _download(self, credentials=None):
credentials = self._get_credentials(credentials)
user_dataset = credentials.get_do_user_dataset()
bq_client = _get_bigquery_client(_WORKING_PROJECT, credentials)
project, dataset, table = self.id.split('.')
view = 'view_{}_{}'.format(dataset.replace('-', '_'), table)
try:
file_path = bq_client.download_to_file(_WORKING_PROJECT, user_dataset, view)
except NotFound:
raise CartoException('You have not purchased the dataset `{}` yet'.format(self.id))
warn('Data saved: {}.'.format(file_path))
warn("To read it you can do: `pandas.read_csv('{}')`.".format(file_path))
return file_path
def _get_credentials(self, credentials=None):
_credentials = credentials or defaults.get_default_credentials()
if not isinstance(_credentials, Credentials):
raise ValueError('`credentials` must be a Credentials class instance')
return _credentials
def _get_bigquery_client(project, credentials):
return BigQueryClient(project, credentials)
def is_slug_value(id_value):
return len(id_value.split('.')) == 1
class CatalogList(list):
def __init__(self, data):
super(CatalogList, self).__init__(data)
def get(self, item_id):
return next(iter(filter(lambda item: item.id == item_id or item.slug == item_id, self)), None)
def to_dataframe(self):
return pd.DataFrame([item.data for item in self]) | cartoframes/data/observatory/catalog/entity.py | import pandas as pd
from warnings import warn
from google.api_core.exceptions import NotFound
from carto.exceptions import CartoException
from ...clients.bigquery_client import BigQueryClient
from ....auth import Credentials, defaults
try:
from abc import ABC
except ImportError:
from abc import ABCMeta
ABC = ABCMeta('ABC', (object,), {'__slots__': ()})
_WORKING_PROJECT = 'carto-do-customers'
class CatalogEntity(ABC):
id_field = 'id'
entity_repo = None
export_excluded_fields = ['summary_json']
def __init__(self, data):
self.data = data
@property
def id(self):
return self.data[self.id_field]
@property
def slug(self):
try:
return self.data['slug']
except KeyError:
return None
@classmethod
def get(cls, id_):
return cls.entity_repo.get_by_id(id_)
@classmethod
def get_all(cls, filters=None):
return cls.entity_repo.get_all(filters)
@classmethod
def get_list(cls, id_list):
return cls.entity_repo.get_by_id_list(id_list)
def to_series(self):
return pd.Series(self.data)
def to_dict(self):
return {key: value for key, value in self.data.items() if key not in self.export_excluded_fields}
def __eq__(self, other):
return self.data == other.data
def __ne__(self, other):
return not self == other
def __str__(self):
return '{classname}({data})'.format(classname=self.__class__.__name__, data=self.data.__str__())
def __repr__(self):
return "<{classname}('{entity_id}')>".format(classname=self.__class__.__name__, entity_id=self._get_print_id())
def _get_print_id(self):
if 'slug' in self.data.keys():
return self.data['slug']
return self.id
def _download(self, credentials=None):
credentials = self._get_credentials(credentials)
user_dataset = credentials.get_do_user_dataset()
bq_client = _get_bigquery_client(_WORKING_PROJECT, credentials)
project, dataset, table = self.id.split('.')
view = 'view_{}_{}'.format(dataset.replace('-', '_'), table)
try:
file_path = bq_client.download_to_file(_WORKING_PROJECT, user_dataset, view)
except NotFound:
raise CartoException('You have not purchased the dataset `{}` yet'.format(self.id))
warn('Data saved: {}.'.format(file_path))
warn("To read it you can do: `pandas.read_csv('{}')`.".format(file_path))
return file_path
def _get_credentials(self, credentials=None):
_credentials = credentials or defaults.get_default_credentials()
if not isinstance(_credentials, Credentials):
raise ValueError('`credentials` must be a Credentials class instance')
return _credentials
def _get_bigquery_client(project, credentials):
return BigQueryClient(project, credentials)
def is_slug_value(id_value):
return len(id_value.split('.')) == 1
class CatalogList(list):
def __init__(self, data):
super(CatalogList, self).__init__(data)
def get(self, item_id):
return next(iter(filter(lambda item: item.id == item_id or item.slug == item_id, self)), None)
def to_dataframe(self):
return pd.DataFrame([item.data for item in self]) | 0.557123 | 0.125977 |
from __future__ import annotations
import itertools
from typing import Any, Tuple
from collections.abc import Iterable
import pandas as pd
import numpy as np
ITEMID = "item_id"
TIMESTAMP = "timestamp"
class TimeSeriesDataFrame(pd.DataFrame):
"""TimeSeriesDataFrame to represent time-series dataset.
Parameters
----------
data : Any
Time-series data to construct a TimeSeriesDataFrame.
It currently supports three input formats:
1. Time-series data in Iterable format. For example:
iterable_dataset = [
{"target": [0, 1, 2], "start": pd.Timestamp("01-01-2019", freq='D')},
{"target": [3, 4, 5], "start": pd.Timestamp("01-01-2019", freq='D')},
{"target": [6, 7, 8], "start": pd.Timestamp("01-01-2019", freq='D')}
]
2. Time-series data in pd.DataFrame format without multi-index. For example:
item_id timestamp target
0 0 2019-01-01 0
1 0 2019-01-02 1
2 0 2019-01-03 2
3 1 2019-01-01 3
4 1 2019-01-02 4
5 1 2019-01-03 5
6 2 2019-01-01 6
7 2 2019-01-02 7
8 2 2019-01-03 8
3. Time-series data in pd.DataFrame format with multi-index on item_id and timestamp. For example:
target
item_id timestamp
0 2019-01-01 0
2019-01-02 1
2019-01-03 2
1 2019-01-01 3
2019-01-02 4
2019-01-03 5
2 2019-01-01 6
2019-01-02 7
2019-01-03 8
This example can be found using example() function.
"""
def __init__(self, data: Any, *args, **kwargs):
if isinstance(data, pd.DataFrame):
if isinstance(data.index, pd.MultiIndex):
self._validate_multi_index_data_frame(data)
else:
data = self.from_data_frame(data)
else:
data = self.from_iterable_dataset(data)
super().__init__(data=data, *args, **kwargs)
# TODO: move out of production code to dedicated example in examples
@classmethod
def example(cls):
"""An example TimeSeriesDataFrame.
Returns
-------
ts_df : TimeSeriesDataFrame
It returns an example TimeSeriesDataFrame as:
target
item_id timestamp
0 2019-01-01 0
2019-01-02 1
2019-01-03 2
1 2019-01-01 3
2019-01-02 4
2019-01-03 5
2 2019-01-01 6
2019-01-02 7
2019-01-03 8
"""
target = np.arange(9)
datetime_index = tuple(
pd.date_range(pd.Timestamp("01-01-2019"), periods=3, freq="D") # noqa
)
item_ids = (0, 1, 2)
multi_index = pd.MultiIndex.from_product(
[item_ids, datetime_index], names=[ITEMID, TIMESTAMP]
)
return TimeSeriesDataFrame(
pd.Series(target, name="target", index=multi_index).to_frame()
)
@classmethod
def _validate_iterable(cls, data: Iterable):
if not isinstance(data, Iterable):
raise ValueError("data must be of type Iterable.")
first = next(iter(data), None)
if first is None:
raise ValueError("data has no time-series.")
for i, ts in enumerate(itertools.chain([first], data)):
if not isinstance(ts, dict):
raise ValueError(
f"{i}'th time-series in data must be a dict, got{type(ts)}"
)
if not ("target" in ts and "start" in ts):
raise ValueError(
f"{i}'th time-series in data must have 'target' and 'start', got{ts.keys()}"
)
if not isinstance(ts["start"], pd.Timestamp) or ts["start"].freq is None:
raise ValueError(
f"{i}'th time-series must have timestamp as 'start' with freq specified, got {ts['start']}"
)
@classmethod
def _validate_data_frame(cls, df: pd.DataFrame):
if not isinstance(df, pd.DataFrame):
raise ValueError(f"data must be a pd.DataFrame, got {type(df)}")
if ITEMID not in df.columns:
raise ValueError(f"data must have a `{ITEMID}` column")
if TIMESTAMP not in df.columns:
raise ValueError(f"data must have a `{TIMESTAMP}` column")
if df[ITEMID].isnull().any():
raise ValueError(f"`{ITEMID}` column can not have nan")
if df[TIMESTAMP].isnull().any():
raise ValueError(f"`{TIMESTAMP}` column can not have nan")
if not df[ITEMID].dtype == "int64":
raise ValueError(f"for {ITEMID}, the only pandas dtype allowed is ‘int64’.")
if not df[TIMESTAMP].dtype == "datetime64[ns]":
raise ValueError(
f"for {TIMESTAMP}, the only pandas dtype allowed is ‘datetime64[ns]’."
)
@classmethod
def _validate_multi_index_data_frame(cls, data: pd.DataFrame):
"""Validate a multi-index pd.DataFrame can be converted to TimeSeriesDataFrame
Parameters:
-----------
data : pd.DataFrame
a data frame in pd.DataFrame format.
"""
if not isinstance(data, pd.DataFrame):
raise ValueError(f"data must be a pd.DataFrame, got {type(data)}")
if not isinstance(data.index, pd.MultiIndex):
raise ValueError(f"data must have pd.MultiIndex, got {type(data.index)}")
if not data.index.dtypes.array[0] == "int64":
raise ValueError(f"for {ITEMID}, the only pandas dtype allowed is ‘int64’.")
if not data.index.dtypes.array[1] == "datetime64[ns]":
raise ValueError(
f"for {TIMESTAMP}, the only pandas dtype allowed is ‘datetime64[ns]’."
)
if not data.index.names == (f"{ITEMID}", f"{TIMESTAMP}"):
raise ValueError(
f"data must have index names as ('{ITEMID}', '{TIMESTAMP}'), got {data.index.names}"
)
@classmethod
def from_iterable_dataset(cls, iterable_dataset: Iterable) -> TimeSeriesDataFrame:
"""Convenient function to Iterable dataset to TimeSeriesDataFrame.
Parameters:
-----------
iterable_dataset : Iterable
The iterable_dataset must have the following format:
iterable_dataset = [
{"target": [0, 1, 2], "start": pd.Timestamp("01-01-2019", freq='D')},
{"target": [3, 4, 5], "start": pd.Timestamp("01-01-2019", freq='D')},
{"target": [6, 7, 8], "start": pd.Timestamp("01-01-2019", freq='D')}
]
Returns:
--------
ts_df : TimeSeriesDataFrame
A data frame in TimeSeriesDataFrame format.
"""
cls._validate_iterable(iterable_dataset)
all_ts = []
for i, ts in enumerate(iterable_dataset):
start_timestamp = ts["start"]
target = ts["target"]
datetime_index = tuple(
pd.date_range(
start_timestamp, periods=len(target), freq=start_timestamp.freq
)
)
idx = pd.MultiIndex.from_product(
[(i,), datetime_index], names=[ITEMID, TIMESTAMP]
)
ts_df = pd.Series(target, name="target", index=idx).to_frame()
all_ts.append(ts_df)
return TimeSeriesDataFrame(pd.concat(all_ts))
@classmethod
def from_data_frame(cls, df: pd.DataFrame) -> TimeSeriesDataFrame:
"""Convert a normal pd.DataFrame to a TimeSeriesDataFrame
Parameters:
-----------
df: pd.DataFrame
A pd.DataFrame with 'item_id' and 'timestamp' as columns. For example:
item_id timestamp target
0 0 2019-01-01 0
1 0 2019-01-02 1
2 0 2019-01-03 2
3 1 2019-01-01 3
4 1 2019-01-02 4
5 1 2019-01-03 5
6 2 2019-01-01 6
7 2 2019-01-02 7
8 2 2019-01-03 8
Returns:
--------
ts_df : TimeSeriesDataFrame
A data frame in TimeSeriesDataFrame format.
"""
cls._validate_data_frame(df)
return TimeSeriesDataFrame(df.set_index([ITEMID, TIMESTAMP]))
def split_by_time(
self, cutoff_time: pd.Timestamp
) -> Tuple[TimeSeriesDataFrame, TimeSeriesDataFrame]:
"""Split dataframe by a cutoff_time.
Parameters
----------
cutoff_time : pd.Timestamp
The time to Split the current data frame into two data frames, all in TimeSeriesDataFrame format.
Returns
-------
data_before : TimeSeriesDataFrame
The first one after split contains time-series before the cutoff_time (exclude cutoff_time).
data_after : TimeSeriesDataFrame
The second one after split contains time-series after the cutoff_time (include cutoff_time).
"""
nanosecond_before_cutoff = cutoff_time - pd.Timedelta(nanoseconds=1)
data_before = self.loc[(slice(None), slice(None, nanosecond_before_cutoff)), :]
data_after = self.loc[(slice(None), slice(cutoff_time, None)), :]
return TimeSeriesDataFrame(data_before), TimeSeriesDataFrame(data_after)
def split_by_item(
self, cutoff_item: int
) -> Tuple[TimeSeriesDataFrame, TimeSeriesDataFrame]:
"""Split dataframe by an item_id cutoff_item.
Parameters
----------
cutoff_item : int
The item_id to Split the current data frame into two data frames, all in TimeSeriesDataFrame format.
Returns
-------
data_before : TimeSeriesDataFrame
The first one after split contains time-series before the cutoff_item (exclude cutoff_item).
data_after : TimeSeriesDataFrame
The second one after split contains time-series after the cutoff_item (include cutoff_item).
"""
data_before = self.loc[(slice(None, cutoff_item - 1), slice(None)), :]
data_after = self.loc[(slice(cutoff_item, None), slice(None)), :]
return TimeSeriesDataFrame(data_before), TimeSeriesDataFrame(data_after)
def subsequence(
self, start: pd.Timestamp, end: pd.Timestamp
) -> TimeSeriesDataFrame:
"""Extract time-series between start (inclusive) and end (exclusive) time.
Parameters
----------
start : pd.Timestamp
The start time (inclusive) of a time range that will be used for subsequence.
end : pd.Timestamp
The end time (exclusive) of a time range that will be used for subsequence.
Returns
-------
ts_df : TimeSeriesDataFrame
A new data frame in TimeSeriesDataFrame format contains time-series in a time range
defined between start and end time.
"""
if end < start:
raise ValueError(f"end time {end} is earlier than stat time {start}")
nanosecond_before_end = end - pd.Timedelta(nanoseconds=1)
return TimeSeriesDataFrame(
self.loc[(slice(None), slice(start, nanosecond_before_end)), :]
) | forecasting/src/autogluon/forecasting/dataset/ts_dataframe.py | from __future__ import annotations
import itertools
from typing import Any, Tuple
from collections.abc import Iterable
import pandas as pd
import numpy as np
ITEMID = "item_id"
TIMESTAMP = "timestamp"
class TimeSeriesDataFrame(pd.DataFrame):
"""TimeSeriesDataFrame to represent time-series dataset.
Parameters
----------
data : Any
Time-series data to construct a TimeSeriesDataFrame.
It currently supports three input formats:
1. Time-series data in Iterable format. For example:
iterable_dataset = [
{"target": [0, 1, 2], "start": pd.Timestamp("01-01-2019", freq='D')},
{"target": [3, 4, 5], "start": pd.Timestamp("01-01-2019", freq='D')},
{"target": [6, 7, 8], "start": pd.Timestamp("01-01-2019", freq='D')}
]
2. Time-series data in pd.DataFrame format without multi-index. For example:
item_id timestamp target
0 0 2019-01-01 0
1 0 2019-01-02 1
2 0 2019-01-03 2
3 1 2019-01-01 3
4 1 2019-01-02 4
5 1 2019-01-03 5
6 2 2019-01-01 6
7 2 2019-01-02 7
8 2 2019-01-03 8
3. Time-series data in pd.DataFrame format with multi-index on item_id and timestamp. For example:
target
item_id timestamp
0 2019-01-01 0
2019-01-02 1
2019-01-03 2
1 2019-01-01 3
2019-01-02 4
2019-01-03 5
2 2019-01-01 6
2019-01-02 7
2019-01-03 8
This example can be found using example() function.
"""
def __init__(self, data: Any, *args, **kwargs):
if isinstance(data, pd.DataFrame):
if isinstance(data.index, pd.MultiIndex):
self._validate_multi_index_data_frame(data)
else:
data = self.from_data_frame(data)
else:
data = self.from_iterable_dataset(data)
super().__init__(data=data, *args, **kwargs)
# TODO: move out of production code to dedicated example in examples
@classmethod
def example(cls):
"""An example TimeSeriesDataFrame.
Returns
-------
ts_df : TimeSeriesDataFrame
It returns an example TimeSeriesDataFrame as:
target
item_id timestamp
0 2019-01-01 0
2019-01-02 1
2019-01-03 2
1 2019-01-01 3
2019-01-02 4
2019-01-03 5
2 2019-01-01 6
2019-01-02 7
2019-01-03 8
"""
target = np.arange(9)
datetime_index = tuple(
pd.date_range(pd.Timestamp("01-01-2019"), periods=3, freq="D") # noqa
)
item_ids = (0, 1, 2)
multi_index = pd.MultiIndex.from_product(
[item_ids, datetime_index], names=[ITEMID, TIMESTAMP]
)
return TimeSeriesDataFrame(
pd.Series(target, name="target", index=multi_index).to_frame()
)
@classmethod
def _validate_iterable(cls, data: Iterable):
if not isinstance(data, Iterable):
raise ValueError("data must be of type Iterable.")
first = next(iter(data), None)
if first is None:
raise ValueError("data has no time-series.")
for i, ts in enumerate(itertools.chain([first], data)):
if not isinstance(ts, dict):
raise ValueError(
f"{i}'th time-series in data must be a dict, got{type(ts)}"
)
if not ("target" in ts and "start" in ts):
raise ValueError(
f"{i}'th time-series in data must have 'target' and 'start', got{ts.keys()}"
)
if not isinstance(ts["start"], pd.Timestamp) or ts["start"].freq is None:
raise ValueError(
f"{i}'th time-series must have timestamp as 'start' with freq specified, got {ts['start']}"
)
@classmethod
def _validate_data_frame(cls, df: pd.DataFrame):
if not isinstance(df, pd.DataFrame):
raise ValueError(f"data must be a pd.DataFrame, got {type(df)}")
if ITEMID not in df.columns:
raise ValueError(f"data must have a `{ITEMID}` column")
if TIMESTAMP not in df.columns:
raise ValueError(f"data must have a `{TIMESTAMP}` column")
if df[ITEMID].isnull().any():
raise ValueError(f"`{ITEMID}` column can not have nan")
if df[TIMESTAMP].isnull().any():
raise ValueError(f"`{TIMESTAMP}` column can not have nan")
if not df[ITEMID].dtype == "int64":
raise ValueError(f"for {ITEMID}, the only pandas dtype allowed is ‘int64’.")
if not df[TIMESTAMP].dtype == "datetime64[ns]":
raise ValueError(
f"for {TIMESTAMP}, the only pandas dtype allowed is ‘datetime64[ns]’."
)
@classmethod
def _validate_multi_index_data_frame(cls, data: pd.DataFrame):
"""Validate a multi-index pd.DataFrame can be converted to TimeSeriesDataFrame
Parameters:
-----------
data : pd.DataFrame
a data frame in pd.DataFrame format.
"""
if not isinstance(data, pd.DataFrame):
raise ValueError(f"data must be a pd.DataFrame, got {type(data)}")
if not isinstance(data.index, pd.MultiIndex):
raise ValueError(f"data must have pd.MultiIndex, got {type(data.index)}")
if not data.index.dtypes.array[0] == "int64":
raise ValueError(f"for {ITEMID}, the only pandas dtype allowed is ‘int64’.")
if not data.index.dtypes.array[1] == "datetime64[ns]":
raise ValueError(
f"for {TIMESTAMP}, the only pandas dtype allowed is ‘datetime64[ns]’."
)
if not data.index.names == (f"{ITEMID}", f"{TIMESTAMP}"):
raise ValueError(
f"data must have index names as ('{ITEMID}', '{TIMESTAMP}'), got {data.index.names}"
)
@classmethod
def from_iterable_dataset(cls, iterable_dataset: Iterable) -> TimeSeriesDataFrame:
"""Convenient function to Iterable dataset to TimeSeriesDataFrame.
Parameters:
-----------
iterable_dataset : Iterable
The iterable_dataset must have the following format:
iterable_dataset = [
{"target": [0, 1, 2], "start": pd.Timestamp("01-01-2019", freq='D')},
{"target": [3, 4, 5], "start": pd.Timestamp("01-01-2019", freq='D')},
{"target": [6, 7, 8], "start": pd.Timestamp("01-01-2019", freq='D')}
]
Returns:
--------
ts_df : TimeSeriesDataFrame
A data frame in TimeSeriesDataFrame format.
"""
cls._validate_iterable(iterable_dataset)
all_ts = []
for i, ts in enumerate(iterable_dataset):
start_timestamp = ts["start"]
target = ts["target"]
datetime_index = tuple(
pd.date_range(
start_timestamp, periods=len(target), freq=start_timestamp.freq
)
)
idx = pd.MultiIndex.from_product(
[(i,), datetime_index], names=[ITEMID, TIMESTAMP]
)
ts_df = pd.Series(target, name="target", index=idx).to_frame()
all_ts.append(ts_df)
return TimeSeriesDataFrame(pd.concat(all_ts))
@classmethod
def from_data_frame(cls, df: pd.DataFrame) -> TimeSeriesDataFrame:
"""Convert a normal pd.DataFrame to a TimeSeriesDataFrame
Parameters:
-----------
df: pd.DataFrame
A pd.DataFrame with 'item_id' and 'timestamp' as columns. For example:
item_id timestamp target
0 0 2019-01-01 0
1 0 2019-01-02 1
2 0 2019-01-03 2
3 1 2019-01-01 3
4 1 2019-01-02 4
5 1 2019-01-03 5
6 2 2019-01-01 6
7 2 2019-01-02 7
8 2 2019-01-03 8
Returns:
--------
ts_df : TimeSeriesDataFrame
A data frame in TimeSeriesDataFrame format.
"""
cls._validate_data_frame(df)
return TimeSeriesDataFrame(df.set_index([ITEMID, TIMESTAMP]))
def split_by_time(
self, cutoff_time: pd.Timestamp
) -> Tuple[TimeSeriesDataFrame, TimeSeriesDataFrame]:
"""Split dataframe by a cutoff_time.
Parameters
----------
cutoff_time : pd.Timestamp
The time to Split the current data frame into two data frames, all in TimeSeriesDataFrame format.
Returns
-------
data_before : TimeSeriesDataFrame
The first one after split contains time-series before the cutoff_time (exclude cutoff_time).
data_after : TimeSeriesDataFrame
The second one after split contains time-series after the cutoff_time (include cutoff_time).
"""
nanosecond_before_cutoff = cutoff_time - pd.Timedelta(nanoseconds=1)
data_before = self.loc[(slice(None), slice(None, nanosecond_before_cutoff)), :]
data_after = self.loc[(slice(None), slice(cutoff_time, None)), :]
return TimeSeriesDataFrame(data_before), TimeSeriesDataFrame(data_after)
def split_by_item(
self, cutoff_item: int
) -> Tuple[TimeSeriesDataFrame, TimeSeriesDataFrame]:
"""Split dataframe by an item_id cutoff_item.
Parameters
----------
cutoff_item : int
The item_id to Split the current data frame into two data frames, all in TimeSeriesDataFrame format.
Returns
-------
data_before : TimeSeriesDataFrame
The first one after split contains time-series before the cutoff_item (exclude cutoff_item).
data_after : TimeSeriesDataFrame
The second one after split contains time-series after the cutoff_item (include cutoff_item).
"""
data_before = self.loc[(slice(None, cutoff_item - 1), slice(None)), :]
data_after = self.loc[(slice(cutoff_item, None), slice(None)), :]
return TimeSeriesDataFrame(data_before), TimeSeriesDataFrame(data_after)
def subsequence(
self, start: pd.Timestamp, end: pd.Timestamp
) -> TimeSeriesDataFrame:
"""Extract time-series between start (inclusive) and end (exclusive) time.
Parameters
----------
start : pd.Timestamp
The start time (inclusive) of a time range that will be used for subsequence.
end : pd.Timestamp
The end time (exclusive) of a time range that will be used for subsequence.
Returns
-------
ts_df : TimeSeriesDataFrame
A new data frame in TimeSeriesDataFrame format contains time-series in a time range
defined between start and end time.
"""
if end < start:
raise ValueError(f"end time {end} is earlier than stat time {start}")
nanosecond_before_end = end - pd.Timedelta(nanoseconds=1)
return TimeSeriesDataFrame(
self.loc[(slice(None), slice(start, nanosecond_before_end)), :]
) | 0.815416 | 0.435241 |
from abc import ABC, abstractmethod
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
from brainframe.api import BrainFrameAPI
from brainframe.api.bf_codecs import Identity
VECTOR = List[float]
# TODO: Use @dataclass decorator in Python3.7
class IdentityPrototype:
"""Information on a to-be-created identity, including the images and and
vectors that should be saved under this identity.
"""
def __init__(self):
self.unique_name: str = None
"""The identity's unique name."""
self.nickname: str = None
"""The identity's nickname."""
self.images_by_class_name: Dict[str, List[Tuple[Path, bytes]]] = \
defaultdict(list)
"""The images that should be encoded and assigned to this identity. The
key is the class name, and the value is all images that should be
encoded under that class name for this identity.
"""
self.vectors_by_class_name: Dict[str, List[Tuple[Path, VECTOR]]] = \
defaultdict(list)
"""The vectors that should be assigned to this identity. The key is the
class name, and the value is all vectors that should be encoded under
that class name for this identity.
"""
def __repr__(self):
return f"IdentityPrototype(unique_name={self.unique_name}, " \
f"nickname={self.nickname}, " \
f"images_by_class_name={list(self.images_by_class_name.keys())}))"
class IdentityFinder(ABC):
"""Describes a type that is capable of creating IdentityPrototypes from
some source. This can be used to import identities from another format into
BrainFrame.
"""
@abstractmethod
def find(self) -> List[IdentityPrototype]:
"""Finds IdentityPrototypes from some source.
:return: Found prototypes
"""
raise NotImplementedError
def create_identity_from_prototype(api: BrainFrameAPI, prototype: IdentityPrototype) \
-> Identity:
"""Creates the identity and all of its encodings based on the given
prototype.
:param api: The API object to use when creating these identities
:param prototype: The prototype to create from
:return: The created identity
"""
identity = Identity(
unique_name=prototype.unique_name,
nickname=prototype.nickname,
metadata={})
identity = api.set_identity(identity)
for class_name, images in prototype.images_by_class_name.items():
for image_name, image_bytes in images:
image_id = api.new_storage_as_image(image_bytes)
api.new_identity_image(identity.id, class_name, image_id)
for class_name, vectors in prototype.vectors_by_class_name.items():
for file_name, vector in vectors:
api.new_identity_vector(identity.id, class_name, vector)
return identity | brainframe_qt/api_utils/identities/identity_finder.py | from abc import ABC, abstractmethod
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
from brainframe.api import BrainFrameAPI
from brainframe.api.bf_codecs import Identity
VECTOR = List[float]
# TODO: Use @dataclass decorator in Python3.7
class IdentityPrototype:
"""Information on a to-be-created identity, including the images and and
vectors that should be saved under this identity.
"""
def __init__(self):
self.unique_name: str = None
"""The identity's unique name."""
self.nickname: str = None
"""The identity's nickname."""
self.images_by_class_name: Dict[str, List[Tuple[Path, bytes]]] = \
defaultdict(list)
"""The images that should be encoded and assigned to this identity. The
key is the class name, and the value is all images that should be
encoded under that class name for this identity.
"""
self.vectors_by_class_name: Dict[str, List[Tuple[Path, VECTOR]]] = \
defaultdict(list)
"""The vectors that should be assigned to this identity. The key is the
class name, and the value is all vectors that should be encoded under
that class name for this identity.
"""
def __repr__(self):
return f"IdentityPrototype(unique_name={self.unique_name}, " \
f"nickname={self.nickname}, " \
f"images_by_class_name={list(self.images_by_class_name.keys())}))"
class IdentityFinder(ABC):
"""Describes a type that is capable of creating IdentityPrototypes from
some source. This can be used to import identities from another format into
BrainFrame.
"""
@abstractmethod
def find(self) -> List[IdentityPrototype]:
"""Finds IdentityPrototypes from some source.
:return: Found prototypes
"""
raise NotImplementedError
def create_identity_from_prototype(api: BrainFrameAPI, prototype: IdentityPrototype) \
-> Identity:
"""Creates the identity and all of its encodings based on the given
prototype.
:param api: The API object to use when creating these identities
:param prototype: The prototype to create from
:return: The created identity
"""
identity = Identity(
unique_name=prototype.unique_name,
nickname=prototype.nickname,
metadata={})
identity = api.set_identity(identity)
for class_name, images in prototype.images_by_class_name.items():
for image_name, image_bytes in images:
image_id = api.new_storage_as_image(image_bytes)
api.new_identity_image(identity.id, class_name, image_id)
for class_name, vectors in prototype.vectors_by_class_name.items():
for file_name, vector in vectors:
api.new_identity_vector(identity.id, class_name, vector)
return identity | 0.696268 | 0.351784 |
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import ast
from mil_vision_tools import CentroidObjectsTracker, TrackedObject
from vision_utils import centroid
from overlay import Overlay
from mil_msgs.msg import ObjectsInImage, ObjectInImage
import numpy as np
class image_object_tracker:
def __init__(self):
#Subscribers
rospy.Subscriber("/raw_objects_topic", ObjectsInImage,self.objects_in_image_cb)
self.pub_objects_in_image = rospy.Publisher("persistent_objects_topic", ObjectsInImage, queue_size = 1)
self.tracker = CentroidObjectsTracker(expiration_seconds = rospy.get_param("expiration_seconds"), max_distance=rospy.get_param("max_distance"))
def objects_in_image_cb(self, unfiltered_objects_in_image):
# gets unfiltered objects from cv scripts here
for i in unfiltered_objects_in_image.objects:
#currently impliments only the centroid tracker, maybe polygon tracker in near future
c = centroid(i)
i.attributes = str({"centroid": c})
#note: data field is holding an ObjectInImage i
obj = self.tracker.add_observation(unfiltered_objects_in_image.header.stamp, np.array(c), data=i)
#standard use of CentroidObjectsTracker
self.tracker.clear_expired(now=unfiltered_objects_in_image.header.stamp)
persistent = self.tracker.get_persistent_objects(min_observations=rospy.get_param("min_observations"), min_age=rospy.Duration(rospy.get_param("min_age")))
objects_in_image = ObjectsInImage()#these are the actual persisten/filtered objects in image
objects_in_image.header = unfiltered_objects_in_image.header
for i in persistent:
#this is done so that subscriber scripts can have the id of things easily
#attributes holds a dictionary in string form
attributes = ast.literal_eval(i.data.attributes)
attributes["id"] = i.id
i.data.attributes = str(attributes)
objects_in_image.objects.append(i.data)
self.pub_objects_in_image.publish(objects_in_image)
if __name__ == '__main__':
rospy.init_node("image_object_tracker", anonymous = False)
image_object_tracker = image_object_tracker()
rospy.spin() | perception/mil_vision/src/mil_unified_vision_interface/image_object_tracker.py |
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import ast
from mil_vision_tools import CentroidObjectsTracker, TrackedObject
from vision_utils import centroid
from overlay import Overlay
from mil_msgs.msg import ObjectsInImage, ObjectInImage
import numpy as np
class image_object_tracker:
def __init__(self):
#Subscribers
rospy.Subscriber("/raw_objects_topic", ObjectsInImage,self.objects_in_image_cb)
self.pub_objects_in_image = rospy.Publisher("persistent_objects_topic", ObjectsInImage, queue_size = 1)
self.tracker = CentroidObjectsTracker(expiration_seconds = rospy.get_param("expiration_seconds"), max_distance=rospy.get_param("max_distance"))
def objects_in_image_cb(self, unfiltered_objects_in_image):
# gets unfiltered objects from cv scripts here
for i in unfiltered_objects_in_image.objects:
#currently impliments only the centroid tracker, maybe polygon tracker in near future
c = centroid(i)
i.attributes = str({"centroid": c})
#note: data field is holding an ObjectInImage i
obj = self.tracker.add_observation(unfiltered_objects_in_image.header.stamp, np.array(c), data=i)
#standard use of CentroidObjectsTracker
self.tracker.clear_expired(now=unfiltered_objects_in_image.header.stamp)
persistent = self.tracker.get_persistent_objects(min_observations=rospy.get_param("min_observations"), min_age=rospy.Duration(rospy.get_param("min_age")))
objects_in_image = ObjectsInImage()#these are the actual persisten/filtered objects in image
objects_in_image.header = unfiltered_objects_in_image.header
for i in persistent:
#this is done so that subscriber scripts can have the id of things easily
#attributes holds a dictionary in string form
attributes = ast.literal_eval(i.data.attributes)
attributes["id"] = i.id
i.data.attributes = str(attributes)
objects_in_image.objects.append(i.data)
self.pub_objects_in_image.publish(objects_in_image)
if __name__ == '__main__':
rospy.init_node("image_object_tracker", anonymous = False)
image_object_tracker = image_object_tracker()
rospy.spin() | 0.414069 | 0.183557 |
from __future__ import absolute_import, division, print_function
from elasticsearch import Elasticsearch
import brorig.log as log
class SearchManager:
def __init__(self, index):
self.index = "brorig_%s" % index.lower()
self.es = Elasticsearch()
if not self.es.ping():
log.warning("Search engine not available")
self.es = None
return
self.es.indices.create(index=self.index)
def packet_population(self, network):
"""
Transfer all search criterion packet to the search engine.
"""
if not self.es:
return
log.debug("Adding packet in search engine...")
packets = [p for n in network.nodes for p in n.packet_list()] + \
[p for l in network.links for p in l.packet_list()]
for p in packets:
self.es.index(index=self.index, doc_type='packets', id=p.uuid, body=p.search_criterion())
log.debug("All packets added to the search engine")
def clean(self):
"""
Clean the search engine
"""
if not self.es:
return
log.debug("Destroy index %s in search engine" % self.index)
self.es.indices.delete(index=self.index, ignore=[400, 404])
def search(self, filter):
"""
Ask to the search engine to gives the most pertinent result based on the user filter
:param filter: request user filter
:return: list of packet UUID resulted of the search request
"""
serialized_filter = filter.copy()
# TODO support time ?
del serialized_filter['time']
if serialized_filter == {}:
return None
macthes = {"match": {c: v} for (c, v) in serialized_filter.iteritems()}
result = self.es.search(index=self.index, doc_type='packets', body={
"query": {
"bool": {
"must": macthes
}
}
})
log.debug("Search engine found %s packets" % result['hits']['total'])
return [h['_id'] for h in result['hits']['hits']]
def __del__(self):
self.clean() | brorig/search.py |
from __future__ import absolute_import, division, print_function
from elasticsearch import Elasticsearch
import brorig.log as log
class SearchManager:
def __init__(self, index):
self.index = "brorig_%s" % index.lower()
self.es = Elasticsearch()
if not self.es.ping():
log.warning("Search engine not available")
self.es = None
return
self.es.indices.create(index=self.index)
def packet_population(self, network):
"""
Transfer all search criterion packet to the search engine.
"""
if not self.es:
return
log.debug("Adding packet in search engine...")
packets = [p for n in network.nodes for p in n.packet_list()] + \
[p for l in network.links for p in l.packet_list()]
for p in packets:
self.es.index(index=self.index, doc_type='packets', id=p.uuid, body=p.search_criterion())
log.debug("All packets added to the search engine")
def clean(self):
"""
Clean the search engine
"""
if not self.es:
return
log.debug("Destroy index %s in search engine" % self.index)
self.es.indices.delete(index=self.index, ignore=[400, 404])
def search(self, filter):
"""
Ask to the search engine to gives the most pertinent result based on the user filter
:param filter: request user filter
:return: list of packet UUID resulted of the search request
"""
serialized_filter = filter.copy()
# TODO support time ?
del serialized_filter['time']
if serialized_filter == {}:
return None
macthes = {"match": {c: v} for (c, v) in serialized_filter.iteritems()}
result = self.es.search(index=self.index, doc_type='packets', body={
"query": {
"bool": {
"must": macthes
}
}
})
log.debug("Search engine found %s packets" % result['hits']['total'])
return [h['_id'] for h in result['hits']['hits']]
def __del__(self):
self.clean() | 0.596433 | 0.195536 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
df=pd.read_csv("ex2data1.txt",header=None)
X=df.iloc[:,:-1].values
y=df.iloc[:,-1].values
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def costFunction(theta, X, y):
"""
Takes in numpy array theta, x and y and return the logistic regression cost function and gradient
"""
# print "Theta", theta
# print "X", X
# print "y", y
m=len(y)
predictions = sigmoid(np.dot(X,theta))
error = (-y * np.log(predictions)) - ((1-y)*np.log(1-predictions))
cost = 1/m * sum(error)
grad = 1/m * np.dot(X.transpose(),(predictions - y))
return cost[0] , grad
def gradientDescent(X,y,theta,alpha,num_iters):
"""
Take in numpy array X, y and theta and update theta by taking num_iters gradient steps
with learning rate of alpha
return theta and the list of the cost of theta during each iteration
"""
m=len(y)
J_history =[]
for i in range(num_iters):
cost, grad = costFunction(theta,X,y)
theta = theta - (alpha * grad)
J_history.append(cost)
return theta , J_history
def featureNormalization(X):
"""
Take in numpy array of X values and return normalize X values,
the mean and standard deviation of each feature
"""
mean=np.mean(X,axis=0)
std=np.std(X,axis=0)
X_norm = (X - mean)/std
return X_norm , mean , std
def plot(x, y):
pos , neg = (y==1).reshape(100,1) , (y==0).reshape(100,1)
plt.scatter(X[pos[:,0],0],X[pos[:,0],1],c="r",marker="+")
plt.scatter(X[neg[:,0],0],X[neg[:,0],1],marker="o",s=10)
plt.xlabel("Exam 1 score")
plt.ylabel("Exam 2 score")
plt.legend(["Admitted","Not admitted"],loc=0)
m , n = X.shape[0], X.shape[1]
X, X_mean, X_std = featureNormalization(X)
X= np.append(np.ones((m,1)),X,axis=1)
y=y.reshape(m,1)
initial_theta = np.zeros((n+1,1))
cost, grad= costFunction(initial_theta,X,y)
theta , J_history = gradientDescent(X,y,initial_theta,1,400)
print("Cost of initial theta is",cost)
print("Gradient at initial theta (zeros):",grad)
print("Theta optimized by gradient descent:",theta)
print("The cost of the optimized theta:",J_history[-1]) | and.py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
df=pd.read_csv("ex2data1.txt",header=None)
X=df.iloc[:,:-1].values
y=df.iloc[:,-1].values
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def costFunction(theta, X, y):
"""
Takes in numpy array theta, x and y and return the logistic regression cost function and gradient
"""
# print "Theta", theta
# print "X", X
# print "y", y
m=len(y)
predictions = sigmoid(np.dot(X,theta))
error = (-y * np.log(predictions)) - ((1-y)*np.log(1-predictions))
cost = 1/m * sum(error)
grad = 1/m * np.dot(X.transpose(),(predictions - y))
return cost[0] , grad
def gradientDescent(X,y,theta,alpha,num_iters):
"""
Take in numpy array X, y and theta and update theta by taking num_iters gradient steps
with learning rate of alpha
return theta and the list of the cost of theta during each iteration
"""
m=len(y)
J_history =[]
for i in range(num_iters):
cost, grad = costFunction(theta,X,y)
theta = theta - (alpha * grad)
J_history.append(cost)
return theta , J_history
def featureNormalization(X):
"""
Take in numpy array of X values and return normalize X values,
the mean and standard deviation of each feature
"""
mean=np.mean(X,axis=0)
std=np.std(X,axis=0)
X_norm = (X - mean)/std
return X_norm , mean , std
def plot(x, y):
pos , neg = (y==1).reshape(100,1) , (y==0).reshape(100,1)
plt.scatter(X[pos[:,0],0],X[pos[:,0],1],c="r",marker="+")
plt.scatter(X[neg[:,0],0],X[neg[:,0],1],marker="o",s=10)
plt.xlabel("Exam 1 score")
plt.ylabel("Exam 2 score")
plt.legend(["Admitted","Not admitted"],loc=0)
m , n = X.shape[0], X.shape[1]
X, X_mean, X_std = featureNormalization(X)
X= np.append(np.ones((m,1)),X,axis=1)
y=y.reshape(m,1)
initial_theta = np.zeros((n+1,1))
cost, grad= costFunction(initial_theta,X,y)
theta , J_history = gradientDescent(X,y,initial_theta,1,400)
print("Cost of initial theta is",cost)
print("Gradient at initial theta (zeros):",grad)
print("Theta optimized by gradient descent:",theta)
print("The cost of the optimized theta:",J_history[-1]) | 0.295027 | 0.710402 |
from __future__ import print_function
import numpy as np
import sys
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'x-large',
'figure.figsize': (13, 6),
'figure.autolayout': True,
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
month="Aug2021"
def get_timing(lang, test):
result = None
with open("../Results/timing_results_"+month+".txt", "r") as fid:
lines = fid.readlines()
for line in lines:
if (lang in line) and (test in line):
a, b, result = line.split(",")
break
return result
languages = ["C", "Fortran", "Python", "Numba", "Julia", "IDL", "Matlab", "R", "Java"]
#languages = ["C", "Fortran", "Python", "Numba", "Julia", "IDL", "Matlab", "R", "Java", "Scala"]
test_cases = ["copy_matrix", "look_and_say", "iterative_fibonacci", "recursive_fibonacci", "matrix_multiplication", "evaluate_functions", "belief_propagation", "markov_chain", "laplace_equation", "munchauser_number", "pernicious_number"]
num_lang = len(languages)
num_test = len(test_cases)
A = np.empty((num_lang,num_test,))
B = np.zeros((num_lang,num_test,))
A[:] = np.nan
i = 0
for lang in languages:
j = 0
for test in test_cases:
result = get_timing(lang, test)
if result:
A[i,j] = float(result)
j += 1
i += 1
A = np.ma.masked_invalid(A)
for j in range(num_test):
if A[0,j] == 0.0:
A[:,j] = np.exp(A[:,j])
else:
coef = A[0,j]
A[:,j] = A[:,j] / coef
data_sets = [A[j,:] for j in range(num_lang)]
colors = ["blue", "orange", "green", "yellow", "purple", "red", "pink", "olive", "brown", "gray", "lime"]
fig, ax = plt.subplots(figsize=(15.0, 7.0))
pos = np.arange(num_test)
bar_width = 0.085
i = 0
for a in data_sets:
ax.bar(pos + (i+1)*bar_width, a, bar_width, color=colors[i])
i += 1
plt.yscale('log')#, nonposy='clip')
ax.yaxis.grid()
#plt.legend(loc='best')
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(languages, loc='center left', bbox_to_anchor=(1, 0.5))
#plt.legend(languages, loc='upper center')
ax.set_xticks(pos)
ax.set_xticklabels(test_cases, rotation=45)
plt.savefig("fig_languages_histo_"+month+".png", bbox_inches = "tight")
plt.show() | Plots/plot_timing_histo.py |
from __future__ import print_function
import numpy as np
import sys
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'x-large',
'figure.figsize': (13, 6),
'figure.autolayout': True,
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
month="Aug2021"
def get_timing(lang, test):
result = None
with open("../Results/timing_results_"+month+".txt", "r") as fid:
lines = fid.readlines()
for line in lines:
if (lang in line) and (test in line):
a, b, result = line.split(",")
break
return result
languages = ["C", "Fortran", "Python", "Numba", "Julia", "IDL", "Matlab", "R", "Java"]
#languages = ["C", "Fortran", "Python", "Numba", "Julia", "IDL", "Matlab", "R", "Java", "Scala"]
test_cases = ["copy_matrix", "look_and_say", "iterative_fibonacci", "recursive_fibonacci", "matrix_multiplication", "evaluate_functions", "belief_propagation", "markov_chain", "laplace_equation", "munchauser_number", "pernicious_number"]
num_lang = len(languages)
num_test = len(test_cases)
A = np.empty((num_lang,num_test,))
B = np.zeros((num_lang,num_test,))
A[:] = np.nan
i = 0
for lang in languages:
j = 0
for test in test_cases:
result = get_timing(lang, test)
if result:
A[i,j] = float(result)
j += 1
i += 1
A = np.ma.masked_invalid(A)
for j in range(num_test):
if A[0,j] == 0.0:
A[:,j] = np.exp(A[:,j])
else:
coef = A[0,j]
A[:,j] = A[:,j] / coef
data_sets = [A[j,:] for j in range(num_lang)]
colors = ["blue", "orange", "green", "yellow", "purple", "red", "pink", "olive", "brown", "gray", "lime"]
fig, ax = plt.subplots(figsize=(15.0, 7.0))
pos = np.arange(num_test)
bar_width = 0.085
i = 0
for a in data_sets:
ax.bar(pos + (i+1)*bar_width, a, bar_width, color=colors[i])
i += 1
plt.yscale('log')#, nonposy='clip')
ax.yaxis.grid()
#plt.legend(loc='best')
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(languages, loc='center left', bbox_to_anchor=(1, 0.5))
#plt.legend(languages, loc='upper center')
ax.set_xticks(pos)
ax.set_xticklabels(test_cases, rotation=45)
plt.savefig("fig_languages_histo_"+month+".png", bbox_inches = "tight")
plt.show() | 0.396769 | 0.415551 |
from os import _exit
from traceback import print_exception
import os
import sys
try:
_FLAGS = None
# In the pip module the library also exports the clingo symbols, which
# should be globally available for other libraries depending on clingo.
if hasattr(sys, 'setdlopenflags'):
_FLAGS = sys.getdlopenflags()
sys.setdlopenflags(os.RTLD_LAZY|os.RTLD_GLOBAL)
try:
# Note: imported first to correctly handle the embedded case
from _clingo import ffi as _ffi, lib as _lib # type: ignore # pylint: disable=no-name-in-module
except ImportError:
from ._clingo import ffi as _ffi, lib as _lib # type: ignore # pylint: disable=no-name-in-module
finally:
if _FLAGS is not None:
sys.setdlopenflags(_FLAGS)
def _str(f_size, f_str, *args, handler=None):
p_size = _ffi.new('size_t*')
_handle_error(f_size(*args, p_size), handler)
p_str = _ffi.new('char[]', p_size[0])
_handle_error(f_str(*args, p_str, p_size[0]), handler)
return _ffi.string(p_str).decode()
def _c_call(c_type, c_fun, *args, handler=None):
'''
Helper to simplify calling C functions where the last parameter is a
reference to the return value.
'''
if isinstance(c_type, str):
p_ret = _ffi.new(f'{c_type}*')
else:
p_ret = c_type
_handle_error(c_fun(*args, p_ret), handler)
return p_ret[0]
def _c_call2(c_type1, c_type2, c_fun, *args, handler=None):
'''
Helper to simplify calling C functions where the last two parameters are a
reference to the return value.
'''
p_ret1 = _ffi.new(f'{c_type1}*')
p_ret2 = _ffi.new(f'{c_type2}*')
_handle_error(c_fun(*args, p_ret1, p_ret2), handler)
return p_ret1[0], p_ret2[0]
def _to_str(c_str) -> str:
return _ffi.string(c_str).decode()
def _handle_error(ret: bool, handler=None):
if not ret:
code = _lib.clingo_error_code()
if code == _lib.clingo_error_unknown and handler is not None and handler.error is not None:
raise handler.error[0](handler.error[1]).with_traceback(handler.error[2])
msg = _ffi.string(_lib.clingo_error_message()).decode()
if code == _lib.clingo_error_bad_alloc:
raise MemoryError(msg)
raise RuntimeError(msg)
def _cb_error_handler(param: str):
def handler(exception, exc_value, traceback) -> bool:
if traceback is not None:
handler = _ffi.from_handle(traceback.tb_frame.f_locals[param])
handler.error = (exception, exc_value, traceback)
_lib.clingo_set_error(_lib.clingo_error_unknown, str(exc_value).encode())
else:
_lib.clingo_set_error(_lib.clingo_error_runtime, "error in callback".encode())
return False
return handler
def _cb_error_panic(exception, exc_value, traceback):
print_exception(exception, exc_value, traceback)
sys.stderr.write('PANIC: exception in nothrow scope')
_exit(1)
def _cb_error_print(exception, exc_value, traceback):
print_exception(exception, exc_value, traceback)
return False
class _Error:
'''
Class to store an error in a unique location.
'''
def __init__(self):
self._error = None
def clear(self):
'''
Clears the last error set.
'''
self._error = None
@property
def error(self):
'''
Return the last error set.
'''
# pylint: disable=protected-access,missing-function-docstring
return self._error
@error.setter
def error(self, value):
'''
Set an error if no error has been set before.
This function is thread-safe.
'''
# pylint: disable=protected-access
self._error = value
class _CBData:
'''
The class stores the data object that should be passed to a callback as
well as provides the means to set an error while a callback is running.
'''
def __init__(self, data, error):
self.data = data
self._error = error
@property
def error(self):
'''
Get the last error in the underlying error object.
'''
return self._error.error
@error.setter
def error(self, value):
'''
Set error in the underlying error object.
'''
self._error.error = value
def _overwritten(base, obj, function):
return hasattr(obj, function) and (
not hasattr(base, function) or
getattr(base, function) is not getattr(obj.__class__, function, None)) | libpyclingo/clingo/_internal.py | from os import _exit
from traceback import print_exception
import os
import sys
try:
_FLAGS = None
# In the pip module the library also exports the clingo symbols, which
# should be globally available for other libraries depending on clingo.
if hasattr(sys, 'setdlopenflags'):
_FLAGS = sys.getdlopenflags()
sys.setdlopenflags(os.RTLD_LAZY|os.RTLD_GLOBAL)
try:
# Note: imported first to correctly handle the embedded case
from _clingo import ffi as _ffi, lib as _lib # type: ignore # pylint: disable=no-name-in-module
except ImportError:
from ._clingo import ffi as _ffi, lib as _lib # type: ignore # pylint: disable=no-name-in-module
finally:
if _FLAGS is not None:
sys.setdlopenflags(_FLAGS)
def _str(f_size, f_str, *args, handler=None):
p_size = _ffi.new('size_t*')
_handle_error(f_size(*args, p_size), handler)
p_str = _ffi.new('char[]', p_size[0])
_handle_error(f_str(*args, p_str, p_size[0]), handler)
return _ffi.string(p_str).decode()
def _c_call(c_type, c_fun, *args, handler=None):
'''
Helper to simplify calling C functions where the last parameter is a
reference to the return value.
'''
if isinstance(c_type, str):
p_ret = _ffi.new(f'{c_type}*')
else:
p_ret = c_type
_handle_error(c_fun(*args, p_ret), handler)
return p_ret[0]
def _c_call2(c_type1, c_type2, c_fun, *args, handler=None):
'''
Helper to simplify calling C functions where the last two parameters are a
reference to the return value.
'''
p_ret1 = _ffi.new(f'{c_type1}*')
p_ret2 = _ffi.new(f'{c_type2}*')
_handle_error(c_fun(*args, p_ret1, p_ret2), handler)
return p_ret1[0], p_ret2[0]
def _to_str(c_str) -> str:
return _ffi.string(c_str).decode()
def _handle_error(ret: bool, handler=None):
if not ret:
code = _lib.clingo_error_code()
if code == _lib.clingo_error_unknown and handler is not None and handler.error is not None:
raise handler.error[0](handler.error[1]).with_traceback(handler.error[2])
msg = _ffi.string(_lib.clingo_error_message()).decode()
if code == _lib.clingo_error_bad_alloc:
raise MemoryError(msg)
raise RuntimeError(msg)
def _cb_error_handler(param: str):
def handler(exception, exc_value, traceback) -> bool:
if traceback is not None:
handler = _ffi.from_handle(traceback.tb_frame.f_locals[param])
handler.error = (exception, exc_value, traceback)
_lib.clingo_set_error(_lib.clingo_error_unknown, str(exc_value).encode())
else:
_lib.clingo_set_error(_lib.clingo_error_runtime, "error in callback".encode())
return False
return handler
def _cb_error_panic(exception, exc_value, traceback):
print_exception(exception, exc_value, traceback)
sys.stderr.write('PANIC: exception in nothrow scope')
_exit(1)
def _cb_error_print(exception, exc_value, traceback):
print_exception(exception, exc_value, traceback)
return False
class _Error:
'''
Class to store an error in a unique location.
'''
def __init__(self):
self._error = None
def clear(self):
'''
Clears the last error set.
'''
self._error = None
@property
def error(self):
'''
Return the last error set.
'''
# pylint: disable=protected-access,missing-function-docstring
return self._error
@error.setter
def error(self, value):
'''
Set an error if no error has been set before.
This function is thread-safe.
'''
# pylint: disable=protected-access
self._error = value
class _CBData:
'''
The class stores the data object that should be passed to a callback as
well as provides the means to set an error while a callback is running.
'''
def __init__(self, data, error):
self.data = data
self._error = error
@property
def error(self):
'''
Get the last error in the underlying error object.
'''
return self._error.error
@error.setter
def error(self, value):
'''
Set error in the underlying error object.
'''
self._error.error = value
def _overwritten(base, obj, function):
return hasattr(obj, function) and (
not hasattr(base, function) or
getattr(base, function) is not getattr(obj.__class__, function, None)) | 0.401923 | 0.075109 |
import os
import sys
import copy
import json
from matplotlib import rc,rcParams
from pathlib import Path
class PlotSettings(object):
""" Class which holds configuration settings for graphdata."""
def __init__(self):
self._G = self.loadsettings()
self._LSoptions = {'basic':['k-o','k--d','k-.s','r-o','r--d','r-.s'],\
'color':['k','r','b','g','c'],\
'longcolor': ['k','r','b','g','c','k--','r--','b--','g--','c--'],\
'color2':['r','b','g','k'],\
'grey': ['k-o','k--d','k.-s','k--*'],\
'cpaper':['k.','b--','r','gs'],\
'cpaper2':['k--','r'],\
'cpaper3':['k','b--','r','gs'],\
'converge':['r-<','k-d','b-o','g-*','t-p','f-v'],\
'converge2':['t-p','k-d','f-v','g-*'],\
'converge3':['r-<','t-p','b-o','c-v'],\
'converge4':['t-p','k-d','c-v','g-*']}
self._LSvec = copy.deepcopy(self._LSoptions[self._G['LSvec']])
self._LS = self._LSvec[0]
self._cmapvec = ['hot2','jet','bone2','gray','binary','gist_yarg']
self.legendList = []
self._modeVec = ['basic','paper']
self.setMode(self._G["Mode"])
@property
def LS(self):
return self._LS
@LS.setter
def LS(self,val):
self._LS = val
def _loadDefaultsettings(self):
default_dict = {'points1D': 200,'pointsX_2D':100,'pointsY_2D':100,\
'scale':'noscale','xdimscale':1.0,'ydimscale':1.0,'zdimscale':1.0,\
'tdimscale':1.0,'xdimscale_str':'','ydimscale_str':'','zdimscale_str':'',\
'tdimscale_str':'', 'process data':'on',\
'LSvec':'color','cmap':'hot','title':'on','legend':'on',\
'movFormat':'wmv2','movLength':10.0,\
'elev':30,'azim' : -120,'decades' : 12, 'contours' : 20, \
'FigWidth':14,'FigHeight':8,'FigWidthL':15,'FigHeightL':5,\
'PlotWidth':14,'PlotHeight':8,'SemilogyWidth':15,'SemilogyHeight':5,\
'LogLogWidth':14,'LogLogHeight':8,\
'SurfaceTickFormat':'%0.02e','NumberSurfaceTicks':6,'SurfaceTickFormat':'%0.02e',\
'SurfaceWidth':8,'SurfaceHeight':8,\
'WireframeHeight':8,'WireframeWidth':8,\
'WaterfallHeight':8,'WaterfallWidth':8,\
'ContourfHeight':6,'ContourfWidth':15,\
'WireframeLogHeight':5,'WireframeLogWidth':16,\
'Mode':'basic'}
return default_dict
def loadsettings(self):
""" Internal function which sets configuration dictionary by
loading values from $HOME/.configs/graphdata/graphdata.conf or
using specified default values.
OUTPUTS:
configs : dict
dictionary of configuration values.
"""
configs = dict()
#hm = os.environ.get("HOME")
hm = os.path.expanduser('~')
pr_dir = os.path.join(hm,'.config','graphdata')
pr_file = os.path.join(pr_dir,'settings.conf')
try:
with open(pr_file) as f:
configs = json.load(f)
except IOError as e:
configs = self._loadDefaultsettings()
os.makedirs(pr_dir,exist_ok=True)
with open(pr_file,'w') as outfile:
json.dump(configs,outfile)
return configs
def scale(self):
return self._G['scale']
def settingsDict(self):
return self._G
def displaysettings(self):
for key in sorted(self._G.keys()):
print((key + ":" + " "*8 + str(self._G[key])))
def _writesettings(self):
""" Writes a json configuration file. """
#hm = os.environ.get("HOME")
hm = os.path.expanduser('~')
pr_file = os.path.join(hm,'.config','graphdata','settings.conf')
with open(pr_file,'w') as outfile:
json.dump(self._G,outfile)
def _processScale(self,scale):
if scale == 'femto' or scale == 'f':
return ('f',1.0e-15)
elif scale == 'nano' or scale == 'n':
return ('n',1.0e-9)
elif scale == 'micro' or scale == 'mu':
return ('$\mu$',1.0e-6)
elif scale == 'milli' or scale == 'm':
return ('m',1.0e-3)
elif scale == 'centi' or scale == 'c':
return ('c',1.0e-2)
elif scale == 'mega' or scale == 'M':
return ('M',1.0e6)
elif scale == 'giga' or scale == 'G':
return ('G',1.0e9)
elif scale == '':
return ('',1.0)
else:
return False
def scaleX(self,scale):
scale_str, scale_val = self._processScale(scale)
self._G['xdimscale'] = scale_val
self._G['xdimscale_str'] = scale_str
self._writesettings()
def scaleY(self,scale):
scale_str, scale_val = self._processScale(scale)
self._G['ydimscale'] = scale_val
self._G['ydimscale_str'] = scale_str
self._writesettings()
def scaleZ(self,scale):
scale_str, scale_val = self._processScale(scale)
self._G['zdimscale'] = scale_val
self._G['zdimscale_str'] = scale_str
self._writesettings()
def scaleT(self,scale):
scale_str, scale_val = self._processScale(scale)
self._G['tdimscale'] = scale_val
self._G['tdimscale_str'] = scale_str
self._writesettings()
def addLegend(self,name):
self.legendList.append(name)
def setLegend(self,name):
self.legendList = []
self.legendList.append(name)
def contourSize(self,w,h):
self._G["ContourWidth"] = w
self._G["ContourHeight"] = h
self._writesettings()
def evolveSize(self,w,h):
self._G["EvolveWidth"] = w
self._G["EvolveHeight"] = h
self._writesettings()
def evolveSizeL(self,w,h):
self._G["EvolveWidthL"] = w
self._G["EvolveHeightL"] = h
self._writesettings()
def surfaceSize(self,w,h):
self._G["SurfaceWidth"] = w
self._G["SurfaceHeight"] = h
self._writesettings()
def waterfallSize(self,w,h):
self._G["WaterfallWidth"] = w
self._G["WaterfallHeight"] = h
self._writesettings()
def plotSize(self,w,h):
self._G["PlotWidth"] = w
self._G["PlotHeight"] = h
self._writesettings()
def plotSizeL(self,w,h):
self._G["PlotWidthL"] = w
self._G["PlotHeightL"] = h
self._writesettings()
def wireSize(self,w,h):
self._G["WireWidth"] = w
self._G["WireHeight"] = h
self._writesettings()
def loglogSize(self,w,h):
self._G["LogLogWidth"] = w
self._G["LogLogHeight"] = h
self._writesettings()
def toggleLS(self):
item = self._LSvec.pop(0)
self._LSvec.append(item)
self._LS = self._LSvec[0]
print("Line style toggled to '{}'".format(self._LS))
def toggleCmap(self):
item = self._cmapvec.pop(0)
self._cmapvec.append(item)
self._G["cmap"] = str(self._cmapvec[0])
print(("Colormap toggled to " + str(self._G["cmap"])))
self._writesettings()
def setCmap(self,val):
"""
Cmap styles: hot2,jet,bone2,gray,binary,gist_yarg,etc.
"""
self._G["cmap"] = val
print(("Colormap set to " + str(self._G["cmap"])))
self._writesettings()
def setLS(self,key):
"""
Linestyle options:
basic: ['k-o','k--d','k-.s','r-o','r--d','r-.s']
color: ['k','r','b','g','c']
color2: ['r','b','g','k']
longcolor: ['k','r','b','g','c','k--','r--','b--','g--','c--']
grey: ['k-o','k--d','k.-s','k--*']
cpaper: ['k.','b--','r','gs']
cpaper2: ['k--','r']
converge: ['r-<','k-d','b-o','g-*','t-p','f-v']
converge2: ['t-p','k-d','f-v','g-*']
"""
if key in self._LSoptions:
self._G['LSvec'] = key
self._writesettings()
self.defaultLS()
else:
print("Did not recognize linestyles. Here is a list of the options: ")
print(('\n'.join("%s:\t\t %s" % (kkey, ','.join(map(str, values))) for kkey, values in list(self._LSoptions.items()))))
def defaultLS(self):
self._LSvec = copy.deepcopy(self._LSoptions[self._G['LSvec']])
self._LS = self._LSvec[0]
def setMovieLength(self,num):
self._G["movLength"] = num
self._writesettings()
def decades(self,num):
self._G["decades"] = num
self._writesettings()
def numContours(self,num):
self._G["contours"] = num
self._writesettings()
def surfaceView(self,elev,azim):
self._G["elev"] = elev
self._G["azim"] = azim
self._writesettings()
def points1D(self,num):
self._G["points1D"] = num
self._writesettings()
def points2D(self,num1,num2):
self._G["pointsX_2D"] = num1
self._G["pointsY_2D"] = num2
self._writesettings()
def pointsX_2D(self,num):
self._G["pointsX_2D"] = num
self._writesettings()
def pointsY_2D(self,num):
self._G["pointsY_2D"] = num
self._writesettings()
def scaleset(self,val):
self._G["scale"] = val
self._writesettings()
def setNumSurfTicks(self,val):
self._G['NumberSurfaceTicks'] = val
self._writesettings()
def toggleSurfFormat(self):
if(self._G['SurfaceTickFormat'] == '%0.02e'):
self._G['SurfaceTickFormat'] = '%0.02f'
else:
self._G['SurfaceTickFormat'] = '%0.02e'
self._writesettings()
def toggleTitle(self):
if(str(self._G['title']) == 'on'):
self._G['title'] = 'off'
print('Figure title toggled off')
else:
self._G['title'] = 'on'
print('Figure title toggled on')
self._writesettings()
def toggleLegend(self):
if(self._G['legend'] == 'on'):
self._G['legend'] = 'off'
print('Legend toggled off')
else:
self._G['legend'] = 'on'
print('Legend toggled on')
self._writesettings()
def toggleMovFormat(self):
if(self._G['movFormat'] == 'mpeg4'):
self._G['movFormat'] = 'wmv2'
else:
self._G['movFormat'] = 'mpeg4'
self._writesettings()
def toggleScale(self):
if(str(self._G['scale']) == 'nonDim'):
self._G['scale'] = 'noscale'
print("Scale toggled to noscale")
elif(str(self._G['scale']) == 'noscale'):
self._G['scale'] = 'dimscale'
print("Scale toggled to dimscale")
else:
self._G['scale'] = 'nonDim'
print("Scale toggled to non-dimensional scale")
self._writesettings()
def toggleProcessData(self):
if(str(self._G['process data']) == 'on'):
self._G['process data'] = 'off'
print("Data will not be processed")
elif(str(self._G['process data']) == 'off'):
self._G['process data'] = 'on'
print("Data will be processed")
else:
self._G['process data'] = 'on'
print("Data will be processed")
self._writesettings()
def setMode(self,val):
"""
Mode settings available are
paper: high res figures (sizes show up as they will in a paper)
basic: bigger plots for use with presentations/data visualization.
"""
if val == 'paper':
self._setPaperMode()
self._writesettings()
elif val == 'basic':
self._setBasicMode()
self._writesettings()
else:
print("Did not recognize mode setting")
def _setPaperMode(self):
self._G["Mode"] = 'paper'
font = {'family' : 'sans-serif',
'sans-serif' : 'Helvetica',
'weight' : 'bold',
'size' : 6}
font = {'family' : 'sans-serif',
'sans-serif' : 'Helvetica',
'weight' : 'bold',
'size' : 8}
rc('font',**font)
rcParams['figure.dpi'] = 240
width = 1.1*3.35; height = 1.1*1.6
#width = 3.35; height = 1.5
width = 3.35; height = 1.5
rcParams['figure.figsize'] = width,height # figure size in inches
rcParams['lines.markersize'] = 1.2
#rcParams['lines.linewidth'] = 1.0
rcParams['lines.linewidth'] = 0.75
rcParams['xtick.major.width'] = 0.25
rcParams['xtick.major.size'] = 4
#rcParams['xtick.major.size'] = 2
rcParams['xtick.labelsize'] = 7
#rcParams['xtick.labelsize'] = 6
rcParams['ytick.major.width'] = 0.25
rcParams['ytick.major.size'] = 4
#rcParams['ytick.major.size'] = 2
rcParams['ytick.labelsize'] = 7
rcParams['ytick.labelsize'] = 6
rcParams['figure.subplot.left'] = 0.2 #the left side of the subplots of the figure
rcParams['figure.subplot.right'] = 0.8 # the right side of the subplots of the figure
rcParams['figure.subplot.bottom'] = 0.2 # the bottom of the subplots of the figure
rcParams['figure.subplot.top'] = 0.8
rcParams['figure.subplot.wspace'] = 0.2
rcParams['figure.subplot.hspace'] = 0.2
rcParams['axes.labelsize'] = 8
rcParams['axes.labelsize'] = 7
rcParams['savefig.dpi'] = 600 # figure dots per inch
rcParams['legend.handlelength'] = 2.2 # the length of the legend lines in fraction of fontsize
rcParams['legend.handlelength'] = 2.2 # the length of the legend lines in fraction of fontsize
rcParams['legend.handleheight'] = 0.2 # the height of the legend handle in fraction of fontsize
rcParams['legend.handletextpad'] = 0.4 # the space between the legend line and legend text in fraction of fontsize
rcParams['legend.handlelength'] = 1.6 # the length of the legend lines in fraction of fontsize
rcParams['legend.fontsize'] = 'small'
#rcParams['legend.numpoints'] = 3
rcParams['legend.numpoints'] = 1
rcParams['legend.fontsize'] = 'medium'
rcParams['legend.frameon'] = True
self.evolveSize(3.35,2.0)
self.contourSize(width,height)
self.wireSize(width,height)
self.plotSize(width,height)
self.plotSizeL(width,height)
self.surfaceSize(width,height)
self.loglogSize(3.35,1.5)
def _setBasicMode(self):
self._G["Mode"] = 'basic'
font = {'family' : 'sans-serif',
'sans-serif' : 'Helvetica',
'weight' : 'bold',
'size' : 16}
rc('font',**font)
rcParams['figure.dpi'] = 100
rcParams['axes.labelsize'] = 16
rcParams['xtick.labelsize'] = 14
rcParams['ytick.labelsize'] = 14
rcParams['font.weight'] = 'bold'
rcParams['lines.markersize'] = 2
rcParams['lines.linewidth'] = 1.3
rcParams['font.size'] = 16
rcParams['font.weight'] = 'bold'
rcParams['xtick.major.width'] = 1
rcParams['xtick.major.size'] = 4
rcParams['ytick.major.width'] = 1
rcParams['ytick.major.size'] = 4
rcParams['legend.fontsize'] = 'medium'
rcParams['legend.frameon'] = True
self.evolveSize(12,8)
self.contourSize(12,8)
self.wireSize(12,8)
self.plotSize(8,6)
self.plotSizeL(8,6)
self.surfaceSize(12,8)
self.loglogSize(16,5)
self.setLS('color')
def toggleMode(self):
"""
Mode settings available are
paper: high res figures (sizes show up as they will in a paper)
basic: bigger plots for use with presentations/data visualization.
"""
item = self._modeVec.pop(0)
self._modeVec.append(item)
self.setMode(str(self._modeVec[0]))
print(("Mode toggled to " + str(self._modeVec[0]))) | graphdata/settings/settings.py |
import os
import sys
import copy
import json
from matplotlib import rc,rcParams
from pathlib import Path
class PlotSettings(object):
""" Class which holds configuration settings for graphdata."""
def __init__(self):
self._G = self.loadsettings()
self._LSoptions = {'basic':['k-o','k--d','k-.s','r-o','r--d','r-.s'],\
'color':['k','r','b','g','c'],\
'longcolor': ['k','r','b','g','c','k--','r--','b--','g--','c--'],\
'color2':['r','b','g','k'],\
'grey': ['k-o','k--d','k.-s','k--*'],\
'cpaper':['k.','b--','r','gs'],\
'cpaper2':['k--','r'],\
'cpaper3':['k','b--','r','gs'],\
'converge':['r-<','k-d','b-o','g-*','t-p','f-v'],\
'converge2':['t-p','k-d','f-v','g-*'],\
'converge3':['r-<','t-p','b-o','c-v'],\
'converge4':['t-p','k-d','c-v','g-*']}
self._LSvec = copy.deepcopy(self._LSoptions[self._G['LSvec']])
self._LS = self._LSvec[0]
self._cmapvec = ['hot2','jet','bone2','gray','binary','gist_yarg']
self.legendList = []
self._modeVec = ['basic','paper']
self.setMode(self._G["Mode"])
@property
def LS(self):
return self._LS
@LS.setter
def LS(self,val):
self._LS = val
def _loadDefaultsettings(self):
default_dict = {'points1D': 200,'pointsX_2D':100,'pointsY_2D':100,\
'scale':'noscale','xdimscale':1.0,'ydimscale':1.0,'zdimscale':1.0,\
'tdimscale':1.0,'xdimscale_str':'','ydimscale_str':'','zdimscale_str':'',\
'tdimscale_str':'', 'process data':'on',\
'LSvec':'color','cmap':'hot','title':'on','legend':'on',\
'movFormat':'wmv2','movLength':10.0,\
'elev':30,'azim' : -120,'decades' : 12, 'contours' : 20, \
'FigWidth':14,'FigHeight':8,'FigWidthL':15,'FigHeightL':5,\
'PlotWidth':14,'PlotHeight':8,'SemilogyWidth':15,'SemilogyHeight':5,\
'LogLogWidth':14,'LogLogHeight':8,\
'SurfaceTickFormat':'%0.02e','NumberSurfaceTicks':6,'SurfaceTickFormat':'%0.02e',\
'SurfaceWidth':8,'SurfaceHeight':8,\
'WireframeHeight':8,'WireframeWidth':8,\
'WaterfallHeight':8,'WaterfallWidth':8,\
'ContourfHeight':6,'ContourfWidth':15,\
'WireframeLogHeight':5,'WireframeLogWidth':16,\
'Mode':'basic'}
return default_dict
def loadsettings(self):
""" Internal function which sets configuration dictionary by
loading values from $HOME/.configs/graphdata/graphdata.conf or
using specified default values.
OUTPUTS:
configs : dict
dictionary of configuration values.
"""
configs = dict()
#hm = os.environ.get("HOME")
hm = os.path.expanduser('~')
pr_dir = os.path.join(hm,'.config','graphdata')
pr_file = os.path.join(pr_dir,'settings.conf')
try:
with open(pr_file) as f:
configs = json.load(f)
except IOError as e:
configs = self._loadDefaultsettings()
os.makedirs(pr_dir,exist_ok=True)
with open(pr_file,'w') as outfile:
json.dump(configs,outfile)
return configs
def scale(self):
return self._G['scale']
def settingsDict(self):
return self._G
def displaysettings(self):
for key in sorted(self._G.keys()):
print((key + ":" + " "*8 + str(self._G[key])))
def _writesettings(self):
""" Writes a json configuration file. """
#hm = os.environ.get("HOME")
hm = os.path.expanduser('~')
pr_file = os.path.join(hm,'.config','graphdata','settings.conf')
with open(pr_file,'w') as outfile:
json.dump(self._G,outfile)
def _processScale(self,scale):
if scale == 'femto' or scale == 'f':
return ('f',1.0e-15)
elif scale == 'nano' or scale == 'n':
return ('n',1.0e-9)
elif scale == 'micro' or scale == 'mu':
return ('$\mu$',1.0e-6)
elif scale == 'milli' or scale == 'm':
return ('m',1.0e-3)
elif scale == 'centi' or scale == 'c':
return ('c',1.0e-2)
elif scale == 'mega' or scale == 'M':
return ('M',1.0e6)
elif scale == 'giga' or scale == 'G':
return ('G',1.0e9)
elif scale == '':
return ('',1.0)
else:
return False
def scaleX(self,scale):
scale_str, scale_val = self._processScale(scale)
self._G['xdimscale'] = scale_val
self._G['xdimscale_str'] = scale_str
self._writesettings()
def scaleY(self,scale):
scale_str, scale_val = self._processScale(scale)
self._G['ydimscale'] = scale_val
self._G['ydimscale_str'] = scale_str
self._writesettings()
def scaleZ(self,scale):
scale_str, scale_val = self._processScale(scale)
self._G['zdimscale'] = scale_val
self._G['zdimscale_str'] = scale_str
self._writesettings()
def scaleT(self,scale):
scale_str, scale_val = self._processScale(scale)
self._G['tdimscale'] = scale_val
self._G['tdimscale_str'] = scale_str
self._writesettings()
def addLegend(self,name):
self.legendList.append(name)
def setLegend(self,name):
self.legendList = []
self.legendList.append(name)
def contourSize(self,w,h):
self._G["ContourWidth"] = w
self._G["ContourHeight"] = h
self._writesettings()
def evolveSize(self,w,h):
self._G["EvolveWidth"] = w
self._G["EvolveHeight"] = h
self._writesettings()
def evolveSizeL(self,w,h):
self._G["EvolveWidthL"] = w
self._G["EvolveHeightL"] = h
self._writesettings()
def surfaceSize(self,w,h):
self._G["SurfaceWidth"] = w
self._G["SurfaceHeight"] = h
self._writesettings()
def waterfallSize(self,w,h):
self._G["WaterfallWidth"] = w
self._G["WaterfallHeight"] = h
self._writesettings()
def plotSize(self,w,h):
self._G["PlotWidth"] = w
self._G["PlotHeight"] = h
self._writesettings()
def plotSizeL(self,w,h):
self._G["PlotWidthL"] = w
self._G["PlotHeightL"] = h
self._writesettings()
def wireSize(self,w,h):
self._G["WireWidth"] = w
self._G["WireHeight"] = h
self._writesettings()
def loglogSize(self,w,h):
self._G["LogLogWidth"] = w
self._G["LogLogHeight"] = h
self._writesettings()
def toggleLS(self):
item = self._LSvec.pop(0)
self._LSvec.append(item)
self._LS = self._LSvec[0]
print("Line style toggled to '{}'".format(self._LS))
def toggleCmap(self):
item = self._cmapvec.pop(0)
self._cmapvec.append(item)
self._G["cmap"] = str(self._cmapvec[0])
print(("Colormap toggled to " + str(self._G["cmap"])))
self._writesettings()
def setCmap(self,val):
"""
Cmap styles: hot2,jet,bone2,gray,binary,gist_yarg,etc.
"""
self._G["cmap"] = val
print(("Colormap set to " + str(self._G["cmap"])))
self._writesettings()
def setLS(self,key):
"""
Linestyle options:
basic: ['k-o','k--d','k-.s','r-o','r--d','r-.s']
color: ['k','r','b','g','c']
color2: ['r','b','g','k']
longcolor: ['k','r','b','g','c','k--','r--','b--','g--','c--']
grey: ['k-o','k--d','k.-s','k--*']
cpaper: ['k.','b--','r','gs']
cpaper2: ['k--','r']
converge: ['r-<','k-d','b-o','g-*','t-p','f-v']
converge2: ['t-p','k-d','f-v','g-*']
"""
if key in self._LSoptions:
self._G['LSvec'] = key
self._writesettings()
self.defaultLS()
else:
print("Did not recognize linestyles. Here is a list of the options: ")
print(('\n'.join("%s:\t\t %s" % (kkey, ','.join(map(str, values))) for kkey, values in list(self._LSoptions.items()))))
def defaultLS(self):
self._LSvec = copy.deepcopy(self._LSoptions[self._G['LSvec']])
self._LS = self._LSvec[0]
def setMovieLength(self,num):
self._G["movLength"] = num
self._writesettings()
def decades(self,num):
self._G["decades"] = num
self._writesettings()
def numContours(self,num):
self._G["contours"] = num
self._writesettings()
def surfaceView(self,elev,azim):
self._G["elev"] = elev
self._G["azim"] = azim
self._writesettings()
def points1D(self,num):
self._G["points1D"] = num
self._writesettings()
def points2D(self,num1,num2):
self._G["pointsX_2D"] = num1
self._G["pointsY_2D"] = num2
self._writesettings()
def pointsX_2D(self,num):
self._G["pointsX_2D"] = num
self._writesettings()
def pointsY_2D(self,num):
self._G["pointsY_2D"] = num
self._writesettings()
def scaleset(self,val):
self._G["scale"] = val
self._writesettings()
def setNumSurfTicks(self,val):
self._G['NumberSurfaceTicks'] = val
self._writesettings()
def toggleSurfFormat(self):
if(self._G['SurfaceTickFormat'] == '%0.02e'):
self._G['SurfaceTickFormat'] = '%0.02f'
else:
self._G['SurfaceTickFormat'] = '%0.02e'
self._writesettings()
def toggleTitle(self):
if(str(self._G['title']) == 'on'):
self._G['title'] = 'off'
print('Figure title toggled off')
else:
self._G['title'] = 'on'
print('Figure title toggled on')
self._writesettings()
def toggleLegend(self):
if(self._G['legend'] == 'on'):
self._G['legend'] = 'off'
print('Legend toggled off')
else:
self._G['legend'] = 'on'
print('Legend toggled on')
self._writesettings()
def toggleMovFormat(self):
if(self._G['movFormat'] == 'mpeg4'):
self._G['movFormat'] = 'wmv2'
else:
self._G['movFormat'] = 'mpeg4'
self._writesettings()
def toggleScale(self):
if(str(self._G['scale']) == 'nonDim'):
self._G['scale'] = 'noscale'
print("Scale toggled to noscale")
elif(str(self._G['scale']) == 'noscale'):
self._G['scale'] = 'dimscale'
print("Scale toggled to dimscale")
else:
self._G['scale'] = 'nonDim'
print("Scale toggled to non-dimensional scale")
self._writesettings()
def toggleProcessData(self):
if(str(self._G['process data']) == 'on'):
self._G['process data'] = 'off'
print("Data will not be processed")
elif(str(self._G['process data']) == 'off'):
self._G['process data'] = 'on'
print("Data will be processed")
else:
self._G['process data'] = 'on'
print("Data will be processed")
self._writesettings()
def setMode(self,val):
"""
Mode settings available are
paper: high res figures (sizes show up as they will in a paper)
basic: bigger plots for use with presentations/data visualization.
"""
if val == 'paper':
self._setPaperMode()
self._writesettings()
elif val == 'basic':
self._setBasicMode()
self._writesettings()
else:
print("Did not recognize mode setting")
def _setPaperMode(self):
self._G["Mode"] = 'paper'
font = {'family' : 'sans-serif',
'sans-serif' : 'Helvetica',
'weight' : 'bold',
'size' : 6}
font = {'family' : 'sans-serif',
'sans-serif' : 'Helvetica',
'weight' : 'bold',
'size' : 8}
rc('font',**font)
rcParams['figure.dpi'] = 240
width = 1.1*3.35; height = 1.1*1.6
#width = 3.35; height = 1.5
width = 3.35; height = 1.5
rcParams['figure.figsize'] = width,height # figure size in inches
rcParams['lines.markersize'] = 1.2
#rcParams['lines.linewidth'] = 1.0
rcParams['lines.linewidth'] = 0.75
rcParams['xtick.major.width'] = 0.25
rcParams['xtick.major.size'] = 4
#rcParams['xtick.major.size'] = 2
rcParams['xtick.labelsize'] = 7
#rcParams['xtick.labelsize'] = 6
rcParams['ytick.major.width'] = 0.25
rcParams['ytick.major.size'] = 4
#rcParams['ytick.major.size'] = 2
rcParams['ytick.labelsize'] = 7
rcParams['ytick.labelsize'] = 6
rcParams['figure.subplot.left'] = 0.2 #the left side of the subplots of the figure
rcParams['figure.subplot.right'] = 0.8 # the right side of the subplots of the figure
rcParams['figure.subplot.bottom'] = 0.2 # the bottom of the subplots of the figure
rcParams['figure.subplot.top'] = 0.8
rcParams['figure.subplot.wspace'] = 0.2
rcParams['figure.subplot.hspace'] = 0.2
rcParams['axes.labelsize'] = 8
rcParams['axes.labelsize'] = 7
rcParams['savefig.dpi'] = 600 # figure dots per inch
rcParams['legend.handlelength'] = 2.2 # the length of the legend lines in fraction of fontsize
rcParams['legend.handlelength'] = 2.2 # the length of the legend lines in fraction of fontsize
rcParams['legend.handleheight'] = 0.2 # the height of the legend handle in fraction of fontsize
rcParams['legend.handletextpad'] = 0.4 # the space between the legend line and legend text in fraction of fontsize
rcParams['legend.handlelength'] = 1.6 # the length of the legend lines in fraction of fontsize
rcParams['legend.fontsize'] = 'small'
#rcParams['legend.numpoints'] = 3
rcParams['legend.numpoints'] = 1
rcParams['legend.fontsize'] = 'medium'
rcParams['legend.frameon'] = True
self.evolveSize(3.35,2.0)
self.contourSize(width,height)
self.wireSize(width,height)
self.plotSize(width,height)
self.plotSizeL(width,height)
self.surfaceSize(width,height)
self.loglogSize(3.35,1.5)
def _setBasicMode(self):
self._G["Mode"] = 'basic'
font = {'family' : 'sans-serif',
'sans-serif' : 'Helvetica',
'weight' : 'bold',
'size' : 16}
rc('font',**font)
rcParams['figure.dpi'] = 100
rcParams['axes.labelsize'] = 16
rcParams['xtick.labelsize'] = 14
rcParams['ytick.labelsize'] = 14
rcParams['font.weight'] = 'bold'
rcParams['lines.markersize'] = 2
rcParams['lines.linewidth'] = 1.3
rcParams['font.size'] = 16
rcParams['font.weight'] = 'bold'
rcParams['xtick.major.width'] = 1
rcParams['xtick.major.size'] = 4
rcParams['ytick.major.width'] = 1
rcParams['ytick.major.size'] = 4
rcParams['legend.fontsize'] = 'medium'
rcParams['legend.frameon'] = True
self.evolveSize(12,8)
self.contourSize(12,8)
self.wireSize(12,8)
self.plotSize(8,6)
self.plotSizeL(8,6)
self.surfaceSize(12,8)
self.loglogSize(16,5)
self.setLS('color')
def toggleMode(self):
"""
Mode settings available are
paper: high res figures (sizes show up as they will in a paper)
basic: bigger plots for use with presentations/data visualization.
"""
item = self._modeVec.pop(0)
self._modeVec.append(item)
self.setMode(str(self._modeVec[0]))
print(("Mode toggled to " + str(self._modeVec[0]))) | 0.449634 | 0.13134 |
import glob
import multiprocessing.pool
import os
import tarfile
import urllib.request
import warnings
from setuptools import setup, find_packages, distutils
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CppExtension, include_paths
def download_extract(url, dl_path):
if not os.path.isfile(dl_path):
# Already downloaded
urllib.request.urlretrieve(url, dl_path)
if dl_path.endswith(".tar.gz") and os.path.isdir(dl_path[:-len(".tar.gz")]):
# Already extracted
return
tar = tarfile.open(dl_path)
tar.extractall('third_party/')
tar.close()
# Download/Extract openfst, boost
download_extract('https://github.com/parlance/ctcdecode/releases/download/v1.0/openfst-1.6.7.tar.gz',
'third_party/openfst-1.6.7.tar.gz')
download_extract('https://github.com/parlance/ctcdecode/releases/download/v1.0/boost_1_67_0.tar.gz',
'third_party/boost_1_67_0.tar.gz')
for file in ['third_party/kenlm/setup.py', 'third_party/ThreadPool/ThreadPool.h']:
if not os.path.exists(file):
warnings.warn('File `{}` does not appear to be present. Did you forget `git submodule update`?'.format(file))
# Does gcc compile with this header and library?
def compile_test(header, library):
dummy_path = os.path.join(os.path.dirname(__file__), "dummy")
command = "bash -c \"g++ -include " + header + " -l" + library + " -x c++ - <<<'int main() {}' -o " + dummy_path \
+ " >/dev/null 2>/dev/null && rm " + dummy_path + " 2>/dev/null\""
return os.system(command) == 0
compile_args = ['-O3', '-DKENLM_MAX_ORDER=6', '-std=c++14', '-fPIC']
ext_libs = []
if compile_test('zlib.h', 'z'):
compile_args.append('-DHAVE_ZLIB')
ext_libs.append('z')
if compile_test('bzlib.h', 'bz2'):
compile_args.append('-DHAVE_BZLIB')
ext_libs.append('bz2')
if compile_test('lzma.h', 'lzma'):
compile_args.append('-DHAVE_XZLIB')
ext_libs.append('lzma')
third_party_libs = ["kenlm", "openfst-1.6.7/src/include", "ThreadPool", "boost_1_67_0", "utf8"]
compile_args.extend(['-DINCLUDE_KENLM', '-DKENLM_MAX_ORDER=6'])
lib_sources = glob.glob('third_party/kenlm/util/*.cc') + glob.glob('third_party/kenlm/lm/*.cc') + glob.glob(
'third_party/kenlm/util/double-conversion/*.cc') + glob.glob('third_party/openfst-1.6.7/src/lib/*.cc')
lib_sources = [fn for fn in lib_sources if not (fn.endswith('main.cc') or fn.endswith('test.cc'))]
third_party_includes = [os.path.realpath(os.path.join("third_party", lib)) for lib in third_party_libs]
ctc_sources = glob.glob('ctcdecode/src/*.cpp')
extension = CppExtension(
name='ctcdecode._ext.ctc_decode',
package=True,
with_cuda=False,
sources=ctc_sources + lib_sources,
include_dirs=third_party_includes + include_paths(),
libraries=ext_libs,
extra_compile_args=compile_args,
language='c++'
)
# monkey-patch for parallel compilation
# See: https://stackoverflow.com/a/13176803
def parallelCCompile(self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
def _single_compile(obj):
try:
src, ext = build[obj]
except KeyError:
return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
thread_pool = multiprocessing.pool.ThreadPool(os.cpu_count())
list(thread_pool.imap(_single_compile, objects))
return objects
# hack compile to support parallel compiling
distutils.ccompiler.CCompiler.compile = parallelCCompile
setup(
name="ctcdecode",
version="1.0.2",
description="CTC Decoder for PyTorch based on Paddle Paddle's implementation",
url="https://github.com/parlance/ctcdecode",
author="<NAME>",
author_email="<EMAIL>",
# Exclude the build files.
packages=find_packages(exclude=["build"]),
ext_modules=[extension],
cmdclass={'build_ext': BuildExtension}
) | setup.py | import glob
import multiprocessing.pool
import os
import tarfile
import urllib.request
import warnings
from setuptools import setup, find_packages, distutils
from torch.utils.cpp_extension import BuildExtension
from torch.utils.cpp_extension import CppExtension, include_paths
def download_extract(url, dl_path):
if not os.path.isfile(dl_path):
# Already downloaded
urllib.request.urlretrieve(url, dl_path)
if dl_path.endswith(".tar.gz") and os.path.isdir(dl_path[:-len(".tar.gz")]):
# Already extracted
return
tar = tarfile.open(dl_path)
tar.extractall('third_party/')
tar.close()
# Download/Extract openfst, boost
download_extract('https://github.com/parlance/ctcdecode/releases/download/v1.0/openfst-1.6.7.tar.gz',
'third_party/openfst-1.6.7.tar.gz')
download_extract('https://github.com/parlance/ctcdecode/releases/download/v1.0/boost_1_67_0.tar.gz',
'third_party/boost_1_67_0.tar.gz')
for file in ['third_party/kenlm/setup.py', 'third_party/ThreadPool/ThreadPool.h']:
if not os.path.exists(file):
warnings.warn('File `{}` does not appear to be present. Did you forget `git submodule update`?'.format(file))
# Does gcc compile with this header and library?
def compile_test(header, library):
dummy_path = os.path.join(os.path.dirname(__file__), "dummy")
command = "bash -c \"g++ -include " + header + " -l" + library + " -x c++ - <<<'int main() {}' -o " + dummy_path \
+ " >/dev/null 2>/dev/null && rm " + dummy_path + " 2>/dev/null\""
return os.system(command) == 0
compile_args = ['-O3', '-DKENLM_MAX_ORDER=6', '-std=c++14', '-fPIC']
ext_libs = []
if compile_test('zlib.h', 'z'):
compile_args.append('-DHAVE_ZLIB')
ext_libs.append('z')
if compile_test('bzlib.h', 'bz2'):
compile_args.append('-DHAVE_BZLIB')
ext_libs.append('bz2')
if compile_test('lzma.h', 'lzma'):
compile_args.append('-DHAVE_XZLIB')
ext_libs.append('lzma')
third_party_libs = ["kenlm", "openfst-1.6.7/src/include", "ThreadPool", "boost_1_67_0", "utf8"]
compile_args.extend(['-DINCLUDE_KENLM', '-DKENLM_MAX_ORDER=6'])
lib_sources = glob.glob('third_party/kenlm/util/*.cc') + glob.glob('third_party/kenlm/lm/*.cc') + glob.glob(
'third_party/kenlm/util/double-conversion/*.cc') + glob.glob('third_party/openfst-1.6.7/src/lib/*.cc')
lib_sources = [fn for fn in lib_sources if not (fn.endswith('main.cc') or fn.endswith('test.cc'))]
third_party_includes = [os.path.realpath(os.path.join("third_party", lib)) for lib in third_party_libs]
ctc_sources = glob.glob('ctcdecode/src/*.cpp')
extension = CppExtension(
name='ctcdecode._ext.ctc_decode',
package=True,
with_cuda=False,
sources=ctc_sources + lib_sources,
include_dirs=third_party_includes + include_paths(),
libraries=ext_libs,
extra_compile_args=compile_args,
language='c++'
)
# monkey-patch for parallel compilation
# See: https://stackoverflow.com/a/13176803
def parallelCCompile(self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
def _single_compile(obj):
try:
src, ext = build[obj]
except KeyError:
return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
thread_pool = multiprocessing.pool.ThreadPool(os.cpu_count())
list(thread_pool.imap(_single_compile, objects))
return objects
# hack compile to support parallel compiling
distutils.ccompiler.CCompiler.compile = parallelCCompile
setup(
name="ctcdecode",
version="1.0.2",
description="CTC Decoder for PyTorch based on Paddle Paddle's implementation",
url="https://github.com/parlance/ctcdecode",
author="<NAME>",
author_email="<EMAIL>",
# Exclude the build files.
packages=find_packages(exclude=["build"]),
ext_modules=[extension],
cmdclass={'build_ext': BuildExtension}
) | 0.379608 | 0.073997 |
from __future__ import division, print_function, absolute_import
import weakref
import numpy as np
# -------------------------------------
# Dictionary
# -------------------------------------
class LazyDict(dict):
def get(self, k, d=None, *args):
return self[k] if k in self else d(*args) if callable(d) else d
def setdefault(self, k, d=None, *args):
return self[k] if k in self else dict.setdefault(self, k, d(*args) if callable(d) else d)
class LazyWeakValueDictionary(weakref.WeakValueDictionary):
def get(self, k, d=None, *args):
try:
wr = self.data[k]
except KeyError:
if callable(d):
d = d(*args)
return d
else:
o = wr()
if o is None:
# This should only happen
if callable(d):
d = d(*args)
return d
else:
return o
# noinspection PyUnresolvedReferences
def setdefault(self, k, d=None, *args):
try:
wr = self.data[k]
except KeyError:
if self._pending_removals:
self._commit_removals()
if callable(d):
d = d(*args)
from weakref import KeyedRef
self.data[k] = KeyedRef(d, self._remove, k)
return d
else:
return wr()
def remove_key(d, key):
"""Safely remove the `key` from the dictionary.
Safely remove the `key` from the dictionary `d` by first
making a copy of dictionary. Return the new dictionary together
with the value stored for the `key`.
Parameters
----------
d : dict
The dictionary from which to remove the `key`.
key :
The key to remove
Returns
-------
v :
The value for the key
r : dict
The dictionary with the key removed.
"""
r = dict(d)
v = r.pop(key, None)
return v, r
# -------------------------------------
# List
# -------------------------------------
def listify(obj):
"""Ensure that the object `obj` is of type list.
If the object is not of type `list`, the object is
converted into a list.
Parameters
----------
obj :
The object.
Returns
-------
list :
The object inside a list.
"""
if obj is None:
return []
return obj if isinstance(obj, (list, tuple, np.ndarray, type(None))) else [obj] | rlpy/auxiliary/collection_ext.py | from __future__ import division, print_function, absolute_import
import weakref
import numpy as np
# -------------------------------------
# Dictionary
# -------------------------------------
class LazyDict(dict):
def get(self, k, d=None, *args):
return self[k] if k in self else d(*args) if callable(d) else d
def setdefault(self, k, d=None, *args):
return self[k] if k in self else dict.setdefault(self, k, d(*args) if callable(d) else d)
class LazyWeakValueDictionary(weakref.WeakValueDictionary):
def get(self, k, d=None, *args):
try:
wr = self.data[k]
except KeyError:
if callable(d):
d = d(*args)
return d
else:
o = wr()
if o is None:
# This should only happen
if callable(d):
d = d(*args)
return d
else:
return o
# noinspection PyUnresolvedReferences
def setdefault(self, k, d=None, *args):
try:
wr = self.data[k]
except KeyError:
if self._pending_removals:
self._commit_removals()
if callable(d):
d = d(*args)
from weakref import KeyedRef
self.data[k] = KeyedRef(d, self._remove, k)
return d
else:
return wr()
def remove_key(d, key):
"""Safely remove the `key` from the dictionary.
Safely remove the `key` from the dictionary `d` by first
making a copy of dictionary. Return the new dictionary together
with the value stored for the `key`.
Parameters
----------
d : dict
The dictionary from which to remove the `key`.
key :
The key to remove
Returns
-------
v :
The value for the key
r : dict
The dictionary with the key removed.
"""
r = dict(d)
v = r.pop(key, None)
return v, r
# -------------------------------------
# List
# -------------------------------------
def listify(obj):
"""Ensure that the object `obj` is of type list.
If the object is not of type `list`, the object is
converted into a list.
Parameters
----------
obj :
The object.
Returns
-------
list :
The object inside a list.
"""
if obj is None:
return []
return obj if isinstance(obj, (list, tuple, np.ndarray, type(None))) else [obj] | 0.757436 | 0.19046 |
from actioners.interfaces.i_login_control import ILoginControl
from actioners.navigator import Navigator
import time
class LoginControl(ILoginControl):
def __init__(self, wd, login_ui):
self.wd = wd
self.login_ui = login_ui
self.login_page_url = "https://www.stockopedia.com/auth/login/"
self.app_page_url = "https://app.stockopedia.com/"
def __in_login_page(self):
url = self.wd.current_url
return url.startswith(self.login_page_url)
def __get_username_and_password_from_ui(self):
username = self.login_ui.get_username()
password = self.login_ui.get_password()
return username, password
def __fill_element_fields(self, username_element, password_element, username, password):
username_element.clear()
password_element.clear()
username_element.send_keys(username)
password_element.send_keys(password)
def __try_login(self, username_element, password_element, submit_element):
username, password = self.__get_username_and_password_from_ui()
self.__fill_element_fields(username_element, password_element, username, password)
self.login_ui.display_login_action_message()
submit_element.click()
def __find_login_elements(self):
username_element = self.wd.find_element_by_name('username')
password_element = self.wd.find_element_by_name('password')
submit_element = self.wd.find_element_by_id('auth_submit')
return username_element, password_element, submit_element
def __in_app_page(self):
url = self.wd.current_url
return url.startswith(self.app_page_url)
def __logged_in(self):
if self.__in_app_page():
return True
return False
def __get_try_login_result(self):
time.sleep(2)
if self.__logged_in():
self.login_ui.display_login_success_message()
return True
self.login_ui.display_login_fail_message()
return False
def force_login(self):
if self.__logged_in():
return None
if not self.__in_login_page():
Navigator.get(self.wd, self.login_page_url)
try_login_result = False
while not try_login_result:
username_element, password_element, submit_element = self.__find_login_elements()
self.__try_login(username_element, password_element, submit_element)
try_login_result = self.__get_try_login_result() | src/actioners/login_control.py | from actioners.interfaces.i_login_control import ILoginControl
from actioners.navigator import Navigator
import time
class LoginControl(ILoginControl):
def __init__(self, wd, login_ui):
self.wd = wd
self.login_ui = login_ui
self.login_page_url = "https://www.stockopedia.com/auth/login/"
self.app_page_url = "https://app.stockopedia.com/"
def __in_login_page(self):
url = self.wd.current_url
return url.startswith(self.login_page_url)
def __get_username_and_password_from_ui(self):
username = self.login_ui.get_username()
password = self.login_ui.get_password()
return username, password
def __fill_element_fields(self, username_element, password_element, username, password):
username_element.clear()
password_element.clear()
username_element.send_keys(username)
password_element.send_keys(password)
def __try_login(self, username_element, password_element, submit_element):
username, password = self.__get_username_and_password_from_ui()
self.__fill_element_fields(username_element, password_element, username, password)
self.login_ui.display_login_action_message()
submit_element.click()
def __find_login_elements(self):
username_element = self.wd.find_element_by_name('username')
password_element = self.wd.find_element_by_name('password')
submit_element = self.wd.find_element_by_id('auth_submit')
return username_element, password_element, submit_element
def __in_app_page(self):
url = self.wd.current_url
return url.startswith(self.app_page_url)
def __logged_in(self):
if self.__in_app_page():
return True
return False
def __get_try_login_result(self):
time.sleep(2)
if self.__logged_in():
self.login_ui.display_login_success_message()
return True
self.login_ui.display_login_fail_message()
return False
def force_login(self):
if self.__logged_in():
return None
if not self.__in_login_page():
Navigator.get(self.wd, self.login_page_url)
try_login_result = False
while not try_login_result:
username_element, password_element, submit_element = self.__find_login_elements()
self.__try_login(username_element, password_element, submit_element)
try_login_result = self.__get_try_login_result() | 0.304765 | 0.055311 |
import contextlib
import gettext
import os
import babel
import babel.support
import six
import speaklater
from morphi.libs import packages
class Manager(object):
"""Manages translations"""
def __init__(self, dirname=None, locales=None, domain=None, package_name=None):
self._locales = None
self.translations = None
self.dirname = dirname
self.domain = domain
self.package_name = package_name
# `locales` has a setter that depends on the other values, so needs to be
# initialized last
self.locales = locales
@property
def locales(self):
return self._locales
@locales.setter
def locales(self, value):
if (
value is not None and
not isinstance(value, (tuple, list))
):
value = [value]
if (
value != self._locales or
self.translations is None
):
self._locales = value
# now that we've updated the locale, we need to load the new translations
self.translations = self._translations_loader(self.dirname, self.locales,
self.domain, self.package_name)
def gettext(self, string, **variables):
translations = self.translations
# translate string
translated_string = (
string
if translations is None
else translations.gettext(string)
)
return (
translated_string
if not variables
else translated_string.format(**variables)
)
def lazy_gettext(self, string, **variables):
return speaklater.make_lazy_string(self.gettext, string, **variables)
def lazy_ngettext(self, singular, plural, num, **variables):
return speaklater.make_lazy_string(self.ngettext, singular, plural, num, **variables)
def _mo_finder(self, domain=None, localedir=None, languages=None, all=False,
package_name=None, extension='mo'):
if domain is None:
domain = self.domain
if languages is None:
languages = self.locales
if package_name is None:
package_name = self.package_name
return find_mo_filename(
domain=domain,
localedir=localedir,
languages=languages,
all=all,
package_name=package_name,
extension=extension
)
def ngettext(self, singular, plural, num, **variables):
variables.setdefault('num', num)
string_to_translate = (
singular
if num == 1
else plural
)
return self.gettext(string_to_translate, **variables)
def _translations_loader(self, *args, **kwargs):
return load_translations(*args, **kwargs)
def find_mo_filename(domain=None, localedir=None, languages=None, all=False, # noqa: C901
package_name=None, extension='mo'):
"""
Search the filesystem and package for an appropriate .mo file, and return the path
(or `None`, if not found)
"""
if languages is not None:
if not isinstance(languages, (list, tuple)):
languages = [languages]
languages = [str(language) for language in languages]
for attempted_domain in (None, domain, package_name):
if not attempted_domain:
attempted_domain = babel.support.Translations.DEFAULT_DOMAIN
for attempted_package_name in (
None,
package_name
):
filename = None
attempted_dirnames = [localedir]
if package_name:
attempted_dirnames.extend([
'locale',
'i18n'
])
for attempted_dirname in attempted_dirnames:
path_exists = (
packages.enclose_package_path_exists(package_name)
if attempted_package_name is not None
else None
)
filename = gettext_find(attempted_domain, attempted_dirname, languages,
all, path_exists=path_exists, extension=extension)
if filename:
break
if filename:
break
if filename:
break
# `filename` could be an empty string or an empty list; if so, normalize it to `None`
if not filename:
return None
return filename
def get_mo_data(dirname=None, locales=None, domain=None, package_name=None):
"""
Finds the .mo data for the specified parameters, and returns the binary data.
If the .mo file cannot be found or read, returns `None`
"""
mo_filename = find_mo_filename(localedir=dirname, languages=locales,
domain=domain, package_name=package_name)
if mo_filename is None:
return None
openers = []
if package_name is not None:
openers.append({
'opener': packages.package_open,
'args': (package_name, mo_filename)
})
openers.append({
'opener': open,
'args': (mo_filename, 'rb')
})
resource_data = None
for config in openers:
opener = config['opener']
opener_args = config['args']
try:
with opener(*opener_args) as f:
resource_data = f.read()
break
except NotImplementedError:
pass
return resource_data
def gettext_find(domain, localedir=None, languages=None, all=False, # noqa: C901
path_exists=None, extension='mo'):
"""
Locate a file using the `gettext` strategy.
This is almost a straight copy of `gettext.find`
"""
if path_exists is None:
path_exists = os.path.exists
# Get some reasonable defaults for arguments that were not supplied
if localedir is None:
localedir = gettext._default_localedir
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
if 'C' not in languages:
languages.append('C')
# now normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in gettext._expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
# select a language
if all:
result = []
else:
result = None
for lang in nelangs:
if lang == 'C':
break
mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.%s' % (domain, extension))
mofile_lp = os.path.join("/usr/share/locale-langpack", lang,
'LC_MESSAGES', '%s.%s' % (domain, extension))
# first look into the standard locale dir, then into the
# langpack locale dir
# standard mo file
try:
if path_exists(mofile):
if all:
result.append(mofile)
else:
return mofile
# langpack mofile -> use it
if path_exists(mofile_lp):
if all:
result.append(mofile_lp)
else:
return mofile_lp
except (NotImplementedError, ValueError):
pass
return result
def load_translations(dirname=None, locales=None, domain=None, package_name=None):
"""
Find the .mo data for the specified parameters, and returns the translations.
If the .mo file cannot be found or read, returns `None`
"""
mo_data = get_mo_data(dirname, locales, domain, package_name)
if mo_data is None:
return babel.support.NullTranslations()
with contextlib.closing(six.BytesIO(mo_data)) as fp:
translations = babel.support.Translations(fp=fp,
domain=domain or package_name)
return translations | morphi/messages/manager.py | import contextlib
import gettext
import os
import babel
import babel.support
import six
import speaklater
from morphi.libs import packages
class Manager(object):
"""Manages translations"""
def __init__(self, dirname=None, locales=None, domain=None, package_name=None):
self._locales = None
self.translations = None
self.dirname = dirname
self.domain = domain
self.package_name = package_name
# `locales` has a setter that depends on the other values, so needs to be
# initialized last
self.locales = locales
@property
def locales(self):
return self._locales
@locales.setter
def locales(self, value):
if (
value is not None and
not isinstance(value, (tuple, list))
):
value = [value]
if (
value != self._locales or
self.translations is None
):
self._locales = value
# now that we've updated the locale, we need to load the new translations
self.translations = self._translations_loader(self.dirname, self.locales,
self.domain, self.package_name)
def gettext(self, string, **variables):
translations = self.translations
# translate string
translated_string = (
string
if translations is None
else translations.gettext(string)
)
return (
translated_string
if not variables
else translated_string.format(**variables)
)
def lazy_gettext(self, string, **variables):
return speaklater.make_lazy_string(self.gettext, string, **variables)
def lazy_ngettext(self, singular, plural, num, **variables):
return speaklater.make_lazy_string(self.ngettext, singular, plural, num, **variables)
def _mo_finder(self, domain=None, localedir=None, languages=None, all=False,
package_name=None, extension='mo'):
if domain is None:
domain = self.domain
if languages is None:
languages = self.locales
if package_name is None:
package_name = self.package_name
return find_mo_filename(
domain=domain,
localedir=localedir,
languages=languages,
all=all,
package_name=package_name,
extension=extension
)
def ngettext(self, singular, plural, num, **variables):
variables.setdefault('num', num)
string_to_translate = (
singular
if num == 1
else plural
)
return self.gettext(string_to_translate, **variables)
def _translations_loader(self, *args, **kwargs):
return load_translations(*args, **kwargs)
def find_mo_filename(domain=None, localedir=None, languages=None, all=False, # noqa: C901
package_name=None, extension='mo'):
"""
Search the filesystem and package for an appropriate .mo file, and return the path
(or `None`, if not found)
"""
if languages is not None:
if not isinstance(languages, (list, tuple)):
languages = [languages]
languages = [str(language) for language in languages]
for attempted_domain in (None, domain, package_name):
if not attempted_domain:
attempted_domain = babel.support.Translations.DEFAULT_DOMAIN
for attempted_package_name in (
None,
package_name
):
filename = None
attempted_dirnames = [localedir]
if package_name:
attempted_dirnames.extend([
'locale',
'i18n'
])
for attempted_dirname in attempted_dirnames:
path_exists = (
packages.enclose_package_path_exists(package_name)
if attempted_package_name is not None
else None
)
filename = gettext_find(attempted_domain, attempted_dirname, languages,
all, path_exists=path_exists, extension=extension)
if filename:
break
if filename:
break
if filename:
break
# `filename` could be an empty string or an empty list; if so, normalize it to `None`
if not filename:
return None
return filename
def get_mo_data(dirname=None, locales=None, domain=None, package_name=None):
"""
Finds the .mo data for the specified parameters, and returns the binary data.
If the .mo file cannot be found or read, returns `None`
"""
mo_filename = find_mo_filename(localedir=dirname, languages=locales,
domain=domain, package_name=package_name)
if mo_filename is None:
return None
openers = []
if package_name is not None:
openers.append({
'opener': packages.package_open,
'args': (package_name, mo_filename)
})
openers.append({
'opener': open,
'args': (mo_filename, 'rb')
})
resource_data = None
for config in openers:
opener = config['opener']
opener_args = config['args']
try:
with opener(*opener_args) as f:
resource_data = f.read()
break
except NotImplementedError:
pass
return resource_data
def gettext_find(domain, localedir=None, languages=None, all=False, # noqa: C901
path_exists=None, extension='mo'):
"""
Locate a file using the `gettext` strategy.
This is almost a straight copy of `gettext.find`
"""
if path_exists is None:
path_exists = os.path.exists
# Get some reasonable defaults for arguments that were not supplied
if localedir is None:
localedir = gettext._default_localedir
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
if 'C' not in languages:
languages.append('C')
# now normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in gettext._expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
# select a language
if all:
result = []
else:
result = None
for lang in nelangs:
if lang == 'C':
break
mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.%s' % (domain, extension))
mofile_lp = os.path.join("/usr/share/locale-langpack", lang,
'LC_MESSAGES', '%s.%s' % (domain, extension))
# first look into the standard locale dir, then into the
# langpack locale dir
# standard mo file
try:
if path_exists(mofile):
if all:
result.append(mofile)
else:
return mofile
# langpack mofile -> use it
if path_exists(mofile_lp):
if all:
result.append(mofile_lp)
else:
return mofile_lp
except (NotImplementedError, ValueError):
pass
return result
def load_translations(dirname=None, locales=None, domain=None, package_name=None):
"""
Find the .mo data for the specified parameters, and returns the translations.
If the .mo file cannot be found or read, returns `None`
"""
mo_data = get_mo_data(dirname, locales, domain, package_name)
if mo_data is None:
return babel.support.NullTranslations()
with contextlib.closing(six.BytesIO(mo_data)) as fp:
translations = babel.support.Translations(fp=fp,
domain=domain or package_name)
return translations | 0.510985 | 0.101233 |
import os
import logging
import random
import json
import numpy as np
import torch
import sklearn.metrics
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, PreTrainedBertModel, BertModel
from pytorch_pretrained_bert.optimization import BertAdam
from . import absa_data_utils as data_utils
from . import modelconfig
from . import models
from .trainer import Trainer
logger = logging.getLogger(__name__)
class AdaWeight(Trainer):
"""Adaweight use a adaboost-style example weighting function.
"""
def initial_train_sample_weights(self, train_features):
return torch.ones(len(train_features) )/len(train_features)
def epoch_weight_update(self, args, model, eval_dataloader, all_label_ids, all_sample_weights, train_features):
"""in-place change to all_sample_weights.
"""
epsilon = 1e-07
#>>>> perform weight adjustment the end of each epoch.
all_y_preds=self._evalutate_on_train(model, eval_dataloader)
incorrect = (all_y_preds != all_label_ids.numpy() )
estimator_error = np.average(incorrect, weights=all_sample_weights.numpy(), axis=0)
estimator_weight = np.log(max(epsilon, (1. - estimator_error) + args.factor) / max(epsilon, estimator_error - args.factor) )
scale = np.exp(estimator_weight * incorrect)
all_sample_weights.mul_(torch.from_numpy(scale).float() )
logger.info("sample_weights %s", str(all_sample_weights[:20]) )
logger.info("****************************************************************")
logger.info("estimator_error %f", estimator_error)
logger.info("estimator_weight (should be >0) %f", estimator_weight)
logger.info("# hard examples %i / %i", sum(incorrect), len(incorrect))
all_contra = np.array([f.contra for f in train_features])
p, r, _, _=sklearn.metrics.precision_recall_fscore_support(all_contra, incorrect, average = 'binary')
logger.info("precision and recall of contra %f %f", p, r)
pos_ratio = float(sum(np.logical_and(all_label_ids.numpy() == 0, incorrect))) / sum(incorrect)
neg_ratio = float(sum(np.logical_and(all_label_ids.numpy() == 1, incorrect))) / sum(incorrect)
neu_ratio = float(sum(np.logical_and(all_label_ids.numpy() == 2, incorrect))) / sum(incorrect)
logger.info("pos neg neu ratio %f %f %f", pos_ratio, neg_ratio, neu_ratio) | asclab/adaweight.py |
import os
import logging
import random
import json
import numpy as np
import torch
import sklearn.metrics
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, PreTrainedBertModel, BertModel
from pytorch_pretrained_bert.optimization import BertAdam
from . import absa_data_utils as data_utils
from . import modelconfig
from . import models
from .trainer import Trainer
logger = logging.getLogger(__name__)
class AdaWeight(Trainer):
"""Adaweight use a adaboost-style example weighting function.
"""
def initial_train_sample_weights(self, train_features):
return torch.ones(len(train_features) )/len(train_features)
def epoch_weight_update(self, args, model, eval_dataloader, all_label_ids, all_sample_weights, train_features):
"""in-place change to all_sample_weights.
"""
epsilon = 1e-07
#>>>> perform weight adjustment the end of each epoch.
all_y_preds=self._evalutate_on_train(model, eval_dataloader)
incorrect = (all_y_preds != all_label_ids.numpy() )
estimator_error = np.average(incorrect, weights=all_sample_weights.numpy(), axis=0)
estimator_weight = np.log(max(epsilon, (1. - estimator_error) + args.factor) / max(epsilon, estimator_error - args.factor) )
scale = np.exp(estimator_weight * incorrect)
all_sample_weights.mul_(torch.from_numpy(scale).float() )
logger.info("sample_weights %s", str(all_sample_weights[:20]) )
logger.info("****************************************************************")
logger.info("estimator_error %f", estimator_error)
logger.info("estimator_weight (should be >0) %f", estimator_weight)
logger.info("# hard examples %i / %i", sum(incorrect), len(incorrect))
all_contra = np.array([f.contra for f in train_features])
p, r, _, _=sklearn.metrics.precision_recall_fscore_support(all_contra, incorrect, average = 'binary')
logger.info("precision and recall of contra %f %f", p, r)
pos_ratio = float(sum(np.logical_and(all_label_ids.numpy() == 0, incorrect))) / sum(incorrect)
neg_ratio = float(sum(np.logical_and(all_label_ids.numpy() == 1, incorrect))) / sum(incorrect)
neu_ratio = float(sum(np.logical_and(all_label_ids.numpy() == 2, incorrect))) / sum(incorrect)
logger.info("pos neg neu ratio %f %f %f", pos_ratio, neg_ratio, neu_ratio) | 0.606032 | 0.304274 |
import sys
from http import HTTPStatus
import requests
from rich import box
from rich.panel import Panel
from rich.table import Table
from starwhale.utils import console, fmt_http_server
from starwhale.consts import UserRoleType, SW_API_VERSION, STANDALONE_INSTANCE
from starwhale.base.view import BaseTermView
from starwhale.utils.http import wrap_sw_error_resp
from .model import CloudInstance
DEFAULT_HTTP_TIMEOUT = 90
class InstanceTermView(BaseTermView):
def __init__(self) -> None:
super().__init__()
def select(self, instance: str) -> None:
try:
self.select_current_default(instance=instance)
except Exception as e:
console.print(
f":person_shrugging: failed to select {instance}, reason: {e}"
)
sys.exit(1)
else:
console.print(f":clap: select {self.current_instance} instance")
def login(self, instance: str, username: str, password: str, alias: str) -> None:
if instance == STANDALONE_INSTANCE:
console.print(f":pinching_hand: skip {instance} instance login")
return
server = fmt_http_server(instance)
url = f"{server}/api/{SW_API_VERSION}/login"
r = requests.post(
url,
timeout=DEFAULT_HTTP_TIMEOUT,
data={"userName": username, "userPwd": password},
)
if r.status_code == HTTPStatus.OK:
console.print(f":man_cook: login {server} successfully!")
token = r.headers.get("Authorization")
if not token:
console.print("cannot get token, please contract starwhale")
sys.exit(1)
_d = r.json()["data"]
_role = _d.get("role", {}).get("roleName") if isinstance(_d, dict) else None
self.update_instance(
uri=server,
user_name=username,
user_role=_role or UserRoleType.NORMAL,
sw_token=token,
alias=alias,
)
else:
wrap_sw_error_resp(r, "login failed!", exit=True)
def logout(self, instance: str = "") -> None:
# TODO: do real logout request
instance = instance or self.current_instance
if instance == STANDALONE_INSTANCE:
console.print(f":pinching_hand: skip {instance} instance logout")
return
self.delete_instance(instance)
console.print(":wink: bye.")
@BaseTermView._header # type: ignore
def info(self, instance: str = "") -> None:
instance = instance or self.current_instance
if instance == STANDALONE_INSTANCE:
console.print(f":balloon: standalone instance, root dir @ {self.rootdir}")
else:
# TODO: support use uri directly
# TODO: user async to get
ci = CloudInstance(instance)
_baseimages = ci._fetch_baseimage()
_version = ci._fetch_version()
_agents = ci._fetch_agents()
def _agents_table() -> Table:
table = Table(
show_edge=False,
show_header=True,
row_styles=["none", "dim"],
box=box.SIMPLE,
)
table.add_column("id")
table.add_column("ip", style="green")
table.add_column("status", style="blue")
table.add_column("version")
table.add_column("connected time")
for i, _agent in enumerate(_agents):
table.add_row(
str(i),
_agent["ip"],
str(_agent["status"]),
_agent["version"],
str(_agent["connectedTime"]),
)
return table
def _details() -> Panel:
grid = Table.grid(padding=1, pad_edge=True)
grid.add_column(
"Category", no_wrap=True, justify="left", style="bold green"
)
grid.add_column("Information")
grid.add_row("Version", _version)
grid.add_row("BaseImage", "\n".join([f"- {i}" for i in _baseimages]))
grid.add_row(
"Agents",
_agents_table(),
)
return Panel(grid, title_align="left")
console.print(_details())
def list(self) -> None:
table = Table(
title="List Starwhale Instances",
caption=f"Current Instance: [blink]{self.current_instance}",
box=box.SIMPLE,
)
table.add_column("")
table.add_column("Name")
table.add_column("URI")
table.add_column("UserName")
table.add_column("UserRole")
table.add_column("CurrentProject")
table.add_column("Updated")
for k, v in self._config["instances"].items():
_is_current = (
k == self.current_instance or v["uri"] == self.current_instance
)
table.add_row(
":backhand_index_pointing_right:" if _is_current else "",
k,
v["uri"],
v["user_name"],
v.get("user_role", "--"),
str(v.get("current_project", "--")),
v.get("updated_at", "--"),
style="magenta" if _is_current else "",
)
console.print(table) | client/starwhale/core/instance/view.py | import sys
from http import HTTPStatus
import requests
from rich import box
from rich.panel import Panel
from rich.table import Table
from starwhale.utils import console, fmt_http_server
from starwhale.consts import UserRoleType, SW_API_VERSION, STANDALONE_INSTANCE
from starwhale.base.view import BaseTermView
from starwhale.utils.http import wrap_sw_error_resp
from .model import CloudInstance
DEFAULT_HTTP_TIMEOUT = 90
class InstanceTermView(BaseTermView):
def __init__(self) -> None:
super().__init__()
def select(self, instance: str) -> None:
try:
self.select_current_default(instance=instance)
except Exception as e:
console.print(
f":person_shrugging: failed to select {instance}, reason: {e}"
)
sys.exit(1)
else:
console.print(f":clap: select {self.current_instance} instance")
def login(self, instance: str, username: str, password: str, alias: str) -> None:
if instance == STANDALONE_INSTANCE:
console.print(f":pinching_hand: skip {instance} instance login")
return
server = fmt_http_server(instance)
url = f"{server}/api/{SW_API_VERSION}/login"
r = requests.post(
url,
timeout=DEFAULT_HTTP_TIMEOUT,
data={"userName": username, "userPwd": password},
)
if r.status_code == HTTPStatus.OK:
console.print(f":man_cook: login {server} successfully!")
token = r.headers.get("Authorization")
if not token:
console.print("cannot get token, please contract starwhale")
sys.exit(1)
_d = r.json()["data"]
_role = _d.get("role", {}).get("roleName") if isinstance(_d, dict) else None
self.update_instance(
uri=server,
user_name=username,
user_role=_role or UserRoleType.NORMAL,
sw_token=token,
alias=alias,
)
else:
wrap_sw_error_resp(r, "login failed!", exit=True)
def logout(self, instance: str = "") -> None:
# TODO: do real logout request
instance = instance or self.current_instance
if instance == STANDALONE_INSTANCE:
console.print(f":pinching_hand: skip {instance} instance logout")
return
self.delete_instance(instance)
console.print(":wink: bye.")
@BaseTermView._header # type: ignore
def info(self, instance: str = "") -> None:
instance = instance or self.current_instance
if instance == STANDALONE_INSTANCE:
console.print(f":balloon: standalone instance, root dir @ {self.rootdir}")
else:
# TODO: support use uri directly
# TODO: user async to get
ci = CloudInstance(instance)
_baseimages = ci._fetch_baseimage()
_version = ci._fetch_version()
_agents = ci._fetch_agents()
def _agents_table() -> Table:
table = Table(
show_edge=False,
show_header=True,
row_styles=["none", "dim"],
box=box.SIMPLE,
)
table.add_column("id")
table.add_column("ip", style="green")
table.add_column("status", style="blue")
table.add_column("version")
table.add_column("connected time")
for i, _agent in enumerate(_agents):
table.add_row(
str(i),
_agent["ip"],
str(_agent["status"]),
_agent["version"],
str(_agent["connectedTime"]),
)
return table
def _details() -> Panel:
grid = Table.grid(padding=1, pad_edge=True)
grid.add_column(
"Category", no_wrap=True, justify="left", style="bold green"
)
grid.add_column("Information")
grid.add_row("Version", _version)
grid.add_row("BaseImage", "\n".join([f"- {i}" for i in _baseimages]))
grid.add_row(
"Agents",
_agents_table(),
)
return Panel(grid, title_align="left")
console.print(_details())
def list(self) -> None:
table = Table(
title="List Starwhale Instances",
caption=f"Current Instance: [blink]{self.current_instance}",
box=box.SIMPLE,
)
table.add_column("")
table.add_column("Name")
table.add_column("URI")
table.add_column("UserName")
table.add_column("UserRole")
table.add_column("CurrentProject")
table.add_column("Updated")
for k, v in self._config["instances"].items():
_is_current = (
k == self.current_instance or v["uri"] == self.current_instance
)
table.add_row(
":backhand_index_pointing_right:" if _is_current else "",
k,
v["uri"],
v["user_name"],
v.get("user_role", "--"),
str(v.get("current_project", "--")),
v.get("updated_at", "--"),
style="magenta" if _is_current else "",
)
console.print(table) | 0.192198 | 0.101545 |
from typing import Dict, MutableMapping, Mapping, TypeVar, List
from antu.io.vocabulary import Vocabulary
from antu.io.fields.field import Field
Indices = TypeVar("Indices", List[int], List[List[int]])
class Instance(Mapping[str, Field]):
"""
An ``Instance`` is a collection (list) of multiple data fields.
Parameters
----------
fields : ``List[Field]``, optional (default=``None``)
A list of multiple data fields.
"""
def __init__(self, fields: List[Field]=None) -> None:
self.fields = fields
self._fields_dict = {}
for field in fields: self._fields_dict[field.name] = field
self.indexed = False # Indicates whether the instance has been indexed
def __getitem__(self, key: str) -> Field:
return self._fields_dict[key]
def __iter__(self):
return iter(self.fields)
def __len__(self) -> int:
return len(self.fields)
def add_field(self, field: Field) -> None:
"""
Add the field to the existing ``Instance``.
Parameters
----------
field : ``Field``
Which field needs to be added.
"""
self.fields.append(field)
if self.indexed:
field.index(vocab)
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]) -> None:
"""
Increments counts in the given ``counter`` for all of the vocabulary
items in all of the ``Fields`` in this ``Instance``.
Parameters
----------
counter : ``Dict[str, Dict[str, int]]``
We count the number of strings if the string needs to be counted to
some counters.
"""
for field in self.fields:
field.count_vocab_items(counter)
def index_fields(self, vocab: Vocabulary) -> Dict[str, Dict[str, Indices]]:
"""
Indexes all fields in this ``Instance`` using the provided ``Vocabulary``.
This `mutates` the current object, it does not return a new ``Instance``.
A ``DataIterator`` will call this on each pass through a dataset; we use the ``indexed``
flag to make sure that indexing only happens once.
This means that if for some reason you modify your vocabulary after you've
indexed your instances, you might get unexpected behavior.
Parameters
----------
vocab : ``Vocabulary``
``vocab`` is used to get the index of each item.
Returns
-------
res : ``Dict[str, Dict[str, Indices]]``
Returns the Indices corresponding to the instance. The first key is
field name and the second key is the vocabulary name.
"""
if not self.indexed:
self.indexed = True
for field in self.fields:
field.index(vocab)
res = {}
for field in self.fields:
res[field.name] = field.indexes
return res | antu/io/instance.py | from typing import Dict, MutableMapping, Mapping, TypeVar, List
from antu.io.vocabulary import Vocabulary
from antu.io.fields.field import Field
Indices = TypeVar("Indices", List[int], List[List[int]])
class Instance(Mapping[str, Field]):
"""
An ``Instance`` is a collection (list) of multiple data fields.
Parameters
----------
fields : ``List[Field]``, optional (default=``None``)
A list of multiple data fields.
"""
def __init__(self, fields: List[Field]=None) -> None:
self.fields = fields
self._fields_dict = {}
for field in fields: self._fields_dict[field.name] = field
self.indexed = False # Indicates whether the instance has been indexed
def __getitem__(self, key: str) -> Field:
return self._fields_dict[key]
def __iter__(self):
return iter(self.fields)
def __len__(self) -> int:
return len(self.fields)
def add_field(self, field: Field) -> None:
"""
Add the field to the existing ``Instance``.
Parameters
----------
field : ``Field``
Which field needs to be added.
"""
self.fields.append(field)
if self.indexed:
field.index(vocab)
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]) -> None:
"""
Increments counts in the given ``counter`` for all of the vocabulary
items in all of the ``Fields`` in this ``Instance``.
Parameters
----------
counter : ``Dict[str, Dict[str, int]]``
We count the number of strings if the string needs to be counted to
some counters.
"""
for field in self.fields:
field.count_vocab_items(counter)
def index_fields(self, vocab: Vocabulary) -> Dict[str, Dict[str, Indices]]:
"""
Indexes all fields in this ``Instance`` using the provided ``Vocabulary``.
This `mutates` the current object, it does not return a new ``Instance``.
A ``DataIterator`` will call this on each pass through a dataset; we use the ``indexed``
flag to make sure that indexing only happens once.
This means that if for some reason you modify your vocabulary after you've
indexed your instances, you might get unexpected behavior.
Parameters
----------
vocab : ``Vocabulary``
``vocab`` is used to get the index of each item.
Returns
-------
res : ``Dict[str, Dict[str, Indices]]``
Returns the Indices corresponding to the instance. The first key is
field name and the second key is the vocabulary name.
"""
if not self.indexed:
self.indexed = True
for field in self.fields:
field.index(vocab)
res = {}
for field in self.fields:
res[field.name] = field.indexes
return res | 0.932905 | 0.548794 |
import torch
from sklearn.metrics import accuracy_score
from torch.nn import Linear
from torch.nn.functional import relu, dropout, log_softmax, nll_loss, leaky_relu
from torch_geometric.nn import APPNP
from torch_geometric.utils.num_nodes import maybe_num_nodes
from torch_sparse import coalesce
from Result import Result
def filter_adj(row, col, edge_attr, mask):
return row[mask], col[mask], None if edge_attr is None else edge_attr[mask]
def dropout_adj(edge_index, edge_attr=None, p=0.5, force_undirected=False,
num_nodes=None, training=True):
if p < 0. or p > 1.:
raise ValueError('Dropout probability has to be between 0 and 1, '
'but got {}'.format(p))
if not training:
return edge_index, edge_attr
N = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index
if force_undirected:
row, col, edge_attr = filter_adj(row, col, edge_attr, row < col)
mask = edge_index.new_full((row.size(0),), 1 - p, dtype=torch.float)
mask = torch.bernoulli(mask).to(torch.bool)
row, col, edge_attr = filter_adj(row, col, edge_attr, mask)
if force_undirected:
edge_index = torch.stack(
[torch.cat([row, col], dim=0),
torch.cat([col, row], dim=0)], dim=0)
if edge_attr is not None:
edge_attr = torch.cat([edge_attr, edge_attr], dim=0)
edge_index, edge_attr = coalesce(edge_index, edge_attr, N, N)
else:
edge_index = torch.stack([row, col], dim=0)
return edge_index, edge_attr
class ModelAPPNP(torch.nn.Module):
def __init__(self, K, alpha, hidden, activation, data):
super(ModelAPPNP, self).__init__()
self.linear_1 = Linear(data.num_features, hidden)
self.conv = APPNP(K, alpha)
self.linear_2 = Linear(hidden, data.num_class)
if activation == "relu":
self.activation = relu
elif activation == "leaky_relu":
self.activation = leaky_relu
def reset_parameters(self):
self.linear_1.reset_parameters()
self.linear_2.reset_parameters()
def forward(self, data):
x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight
edge_index, edge_weight = dropout_adj(edge_index, edge_attr=edge_weight, p=0.8, training=self.training)
x = self.linear_1(x)
x = self.activation(x)
x = dropout(x, p=0.5, training=self.training)
x = self.conv(x, edge_index, edge_weight=edge_weight)
x = self.activation(x)
x = dropout(x, p=0.5, training=self.training)
x = self.linear_2(x)
return log_softmax(x, dim=-1)
def main_model_appnp(data, K, alpha, hidden, activation, if_all=False):
torch.backends.cudnn.deterministic = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = ModelAPPNP(
K=K,
alpha=alpha,
hidden=hidden,
activation=activation,
data=data,
)
data.split_train_valid()
model = model.to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4)
epoch = 1
loss_train = float("inf")
loss_valid = float("inf")
best_loss_train = float("inf")
best_loss_valid = float("inf")
best_epoch = 0
while best_epoch + 10 >= epoch:
model.train()
optimizer.zero_grad()
predict = model(data)
loss_train = nll_loss(predict[data.mask_train], data.y[data.mask_train])
loss_valid = nll_loss(predict[data.mask_valid], data.y[data.mask_valid])
loss_train.backward()
optimizer.step()
if loss_valid < best_loss_valid:
best_loss_train = loss_train
best_loss_valid = loss_valid
best_epoch = epoch
epoch += 1
model.eval()
with torch.no_grad():
result = model(data)
if if_all:
return Result(
result=result.cpu(),
loss_train=loss_train.cpu(),
loss_valid=loss_valid.cpu(),
acc_train=accuracy_score(data.y[data.mask_train].cpu().numpy().flatten(),
result[data.mask_train].max(1)[1].cpu().numpy().flatten()),
acc_valid=accuracy_score(data.y[data.mask_valid].cpu().numpy().flatten(),
result[data.mask_valid].max(1)[1].cpu().numpy().flatten()),
epoch=epoch - 1,
)
else:
return Result(
result=result[data.mask_test].max(1)[1].cpu().numpy().flatten(),
loss_train=loss_train.cpu(),
loss_valid=loss_valid.cpu(),
acc_train=accuracy_score(data.y[data.mask_train].cpu().numpy().flatten(),
result[data.mask_train].max(1)[1].cpu().numpy().flatten()),
acc_valid=accuracy_score(data.y[data.mask_valid].cpu().numpy().flatten(),
result[data.mask_valid].max(1)[1].cpu().numpy().flatten()),
epoch=epoch - 1,
) | ModelAPPNP2.py | import torch
from sklearn.metrics import accuracy_score
from torch.nn import Linear
from torch.nn.functional import relu, dropout, log_softmax, nll_loss, leaky_relu
from torch_geometric.nn import APPNP
from torch_geometric.utils.num_nodes import maybe_num_nodes
from torch_sparse import coalesce
from Result import Result
def filter_adj(row, col, edge_attr, mask):
return row[mask], col[mask], None if edge_attr is None else edge_attr[mask]
def dropout_adj(edge_index, edge_attr=None, p=0.5, force_undirected=False,
num_nodes=None, training=True):
if p < 0. or p > 1.:
raise ValueError('Dropout probability has to be between 0 and 1, '
'but got {}'.format(p))
if not training:
return edge_index, edge_attr
N = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index
if force_undirected:
row, col, edge_attr = filter_adj(row, col, edge_attr, row < col)
mask = edge_index.new_full((row.size(0),), 1 - p, dtype=torch.float)
mask = torch.bernoulli(mask).to(torch.bool)
row, col, edge_attr = filter_adj(row, col, edge_attr, mask)
if force_undirected:
edge_index = torch.stack(
[torch.cat([row, col], dim=0),
torch.cat([col, row], dim=0)], dim=0)
if edge_attr is not None:
edge_attr = torch.cat([edge_attr, edge_attr], dim=0)
edge_index, edge_attr = coalesce(edge_index, edge_attr, N, N)
else:
edge_index = torch.stack([row, col], dim=0)
return edge_index, edge_attr
class ModelAPPNP(torch.nn.Module):
def __init__(self, K, alpha, hidden, activation, data):
super(ModelAPPNP, self).__init__()
self.linear_1 = Linear(data.num_features, hidden)
self.conv = APPNP(K, alpha)
self.linear_2 = Linear(hidden, data.num_class)
if activation == "relu":
self.activation = relu
elif activation == "leaky_relu":
self.activation = leaky_relu
def reset_parameters(self):
self.linear_1.reset_parameters()
self.linear_2.reset_parameters()
def forward(self, data):
x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight
edge_index, edge_weight = dropout_adj(edge_index, edge_attr=edge_weight, p=0.8, training=self.training)
x = self.linear_1(x)
x = self.activation(x)
x = dropout(x, p=0.5, training=self.training)
x = self.conv(x, edge_index, edge_weight=edge_weight)
x = self.activation(x)
x = dropout(x, p=0.5, training=self.training)
x = self.linear_2(x)
return log_softmax(x, dim=-1)
def main_model_appnp(data, K, alpha, hidden, activation, if_all=False):
torch.backends.cudnn.deterministic = True
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = ModelAPPNP(
K=K,
alpha=alpha,
hidden=hidden,
activation=activation,
data=data,
)
data.split_train_valid()
model = model.to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.005, weight_decay=5e-4)
epoch = 1
loss_train = float("inf")
loss_valid = float("inf")
best_loss_train = float("inf")
best_loss_valid = float("inf")
best_epoch = 0
while best_epoch + 10 >= epoch:
model.train()
optimizer.zero_grad()
predict = model(data)
loss_train = nll_loss(predict[data.mask_train], data.y[data.mask_train])
loss_valid = nll_loss(predict[data.mask_valid], data.y[data.mask_valid])
loss_train.backward()
optimizer.step()
if loss_valid < best_loss_valid:
best_loss_train = loss_train
best_loss_valid = loss_valid
best_epoch = epoch
epoch += 1
model.eval()
with torch.no_grad():
result = model(data)
if if_all:
return Result(
result=result.cpu(),
loss_train=loss_train.cpu(),
loss_valid=loss_valid.cpu(),
acc_train=accuracy_score(data.y[data.mask_train].cpu().numpy().flatten(),
result[data.mask_train].max(1)[1].cpu().numpy().flatten()),
acc_valid=accuracy_score(data.y[data.mask_valid].cpu().numpy().flatten(),
result[data.mask_valid].max(1)[1].cpu().numpy().flatten()),
epoch=epoch - 1,
)
else:
return Result(
result=result[data.mask_test].max(1)[1].cpu().numpy().flatten(),
loss_train=loss_train.cpu(),
loss_valid=loss_valid.cpu(),
acc_train=accuracy_score(data.y[data.mask_train].cpu().numpy().flatten(),
result[data.mask_train].max(1)[1].cpu().numpy().flatten()),
acc_valid=accuracy_score(data.y[data.mask_valid].cpu().numpy().flatten(),
result[data.mask_valid].max(1)[1].cpu().numpy().flatten()),
epoch=epoch - 1,
) | 0.918435 | 0.561636 |
import sys, os
root_dir = os.path.join(os.path.dirname(__file__),'..')
if root_dir not in sys.path:
sys.path.insert(0, root_dir)
from dataset.image_base import *
from config import args
set_names = {'all':['train','val','test'],'test':['test'],'val':['train','val','test']}
class PW3D(Image_base):
def __init__(self,train_flag = False, split='test', mode='vibe', regress_smpl=True, **kwargs):
super(PW3D,self).__init__(train_flag)
self.data_folder = args().dataset_rootdir
self.data3d_dir = os.path.join(self.data_folder,'sequenceFiles')
self.image_dir = os.path.join(self.data_folder,'imageFiles')
self.mode = mode
self.split = split
logging.info('Loading 3DPW in {} mode, split {}'.format(mode,self.split))
if mode == 'vibe':
self.annots_path = args().annot_dir #os.path.join(config.project_dir,'data/vibe_db')
self.joint_mapper = constants.joint_mapping(constants.LSP_14,constants.SMPL_ALL_54)
self.joint3d_mapper = constants.joint_mapping(constants.LSP_14,constants.SMPL_ALL_54)
self.load_vibe_annots()
elif mode == 'whole':
self.joint_mapper = constants.joint_mapping(constants.COCO_18,constants.SMPL_ALL_54)
self.joint3d_mapper = constants.joint_mapping(constants.SMPL_24,constants.SMPL_ALL_54)
self.annots_path = os.path.join(self.data_folder,'annots.npz')
if not os.path.exists(self.annots_path):
self.pack_data()
self.load_annots()
else:
raise NotImplementedError
self.root_inds = [constants.SMPL_ALL_54['R_Hip'], constants.SMPL_ALL_54['L_Hip']]
logging.info('3DPW dataset {} split total {} samples, loading mode {}'.format(self.split ,self.__len__(), self.mode))
def __len__(self):
return len(self.file_paths)
def get_image_info(self, index):
annots = self.annots[self.file_paths[index%len(self.file_paths)]]
subject_ids, genders, full_kp2d, kp3d_monos, params, bbox = [[] for i in range(6)]
for inds, annot in enumerate(annots):
video_name, gender, person_id, frame_id, kp2d, kp3d, pose_param, beta_param = annot
subject_ids.append(person_id)
genders.append(gender)
kp3d = self.map_kps(kp3d, self.joint3d_mapper)
kp3d_monos.append(kp3d)
params.append(np.concatenate([pose_param[:66], beta_param[:10]]))
kp2d_gt = self.map_kps(kp2d, self.joint_mapper)
full_kp2d.append(kp2d_gt)
imgpath = os.path.join(self.image_dir,video_name,'image_{:05}.jpg'.format(frame_id))
image = cv2.imread(imgpath)[:,:,::-1].copy()
info_2d = ('pw3d_vibe', imgpath, image, full_kp2d, None, subject_ids)
info_3d = ('pw3d_vibe', kp3d_monos, params, None)
return info_2d, info_3d
def load_vibe_annots(self):
set_names = {'all':['train','val','test'],'train':['train'],'test':['test'],'val':['val']}
self.split_used = set_names[self.split]
self.annots = {}
for split in self.split_used:
db_file = os.path.join(self.annots_path,'3dpw_{}_db.pt'.format(split))
db = joblib.load(db_file)
vid_names = db['vid_name']
frame_ids = db['frame_id']
kp2ds, kp3ds, pose_params, beta_params, valids = db['joints2D'], db['joints3D'], db['pose'], db['shape'], db['valid']
if split=='train':
kp3ds = kp3ds[:,25:39]
for vid_name, frame_id, kp2d, kp3d, pose_param, beta_param, valid in zip(vid_names, frame_ids, kp2ds, kp3ds, pose_params, beta_params, valids):
if valid!=1:
continue
video_name, person_id = vid_name[:-2], int(vid_name[-1])
name = '{}_{}'.format(video_name,frame_id)
if name not in self.annots:
self.annots[name] = []
self.annots[name].append([video_name, None, person_id, frame_id, kp2d, kp3d, pose_param, beta_param])
self.file_paths = list(self.annots.keys())
def load_annots(self):
set_names = {'train':['train'],'all':['train','validation','test'],'val':['validation'],'test':['test']}
split_used = set_names[self.split]
annots = np.load(self.annots_path,allow_pickle=True)
params = annots['params'][()]
kp3ds = annots['kp3d'][()]
kp2ds = annots['kp2d'][()]
self.annots = {}
video_names = list(params.keys())
for video_name in video_names:
valid_indices = params[video_name]['valid_indices']
genders = params[video_name]['genders']
for person_id, valid_index in enumerate(valid_indices):
for annot_id,frame_id in enumerate(valid_index):
split = params[video_name]['split']
if split not in split_used:
continue
name = '{}_{}'.format(video_name.strip('.pkl'),frame_id)
kp3d = kp3ds[video_name][person_id][annot_id]
kp2d = kp2ds[video_name][person_id][annot_id]
pose_param = params[video_name]['poses'][person_id][annot_id]
beta_param = params[video_name]['betas'][person_id]
gender = genders[person_id]
if name not in self.annots:
self.annots[name] = []
self.annots[name].append([video_name.strip('.pkl'), gender, person_id, frame_id, kp2d.T, kp3d, pose_param, beta_param])
self.file_paths = list(self.annots.keys())
def pack_data(self):
"""
The function reads all the ground truth and prediction files. And concatenates
:param paths_gt: all the paths corresponding to the ground truth - list of pkl files
:param paths_prd: all the paths corresponding to the predictions - list of pkl files
:return:
jp_pred: jointPositions Prediction. Shape N x 24 x 3
jp_gt: jointPositions ground truth. Shape: N x 24 x 3
mats_pred: Global rotation matrices predictions. Shape N x 24 x 3 x 3
mats_gt: Global rotation matrices ground truths. Shape N x 24 x 3 x 3
"""
# all ground truth smpl parameters / joint positions / rotation matrices
from evaluation.pw3d_eval.SMPL import SMPL
all_params, all_jp_gts, all_jp2d_gts, all_glob_rot_gts = {}, {}, {}, {}
seq = 0
num_jps_pred = 0
num_ors_pred = 0
paths_gt = glob.glob(os.path.join(self.data3d_dir,'*/*.pkl'))
smpl_model_genders = {'f':SMPL(center_idx=0, gender='f', model_root=os.path.join(config.model_dir,'smpl_original')),\
'm':SMPL(center_idx=0, gender='m', model_root=os.path.join(config.model_dir,'smpl_original')) }
# construct the data structures -
for path_gt in paths_gt:
print('Processing: ', path_gt)
video_name = os.path.basename(path_gt)
seq = seq + 1
# Open pkl files
data_gt = pickle.load(open(path_gt, 'rb'), encoding='latin1')
split = path_gt.split('/')[-2]
genders = data_gt['genders']
all_params[video_name], all_jp_gts[video_name], all_jp2d_gts[video_name], all_glob_rot_gts[video_name] = {}, [], [], []
all_params[video_name]['split'] = split
all_params[video_name]['genders'] = genders
all_params[video_name]['poses'], all_params[video_name]['trans'], all_params[video_name]['valid_indices'] = [], [], []
all_params[video_name]['betas'] = np.array(data_gt['betas'])
for i in range(len(genders)):
# Get valid frames
# Frame with no zeros in the poses2d file and where campose_valid is True
poses2d_gt = data_gt['poses2d']
poses2d_gt_i = poses2d_gt[i]
camposes_valid = data_gt['campose_valid']
camposes_valid_i = camposes_valid[i]
valid_indices = check_valid_inds(poses2d_gt_i, camposes_valid_i)
all_jp2d_gts[video_name].append(poses2d_gt_i[valid_indices])
# Get the ground truth SMPL body parameters - poses, betas and translation parameters
pose_params = np.array(data_gt['poses'])
pose_params = pose_params[i, valid_indices, :]
shape_params = np.array(data_gt['betas'][i])
shape_params = np.expand_dims(shape_params, 0)
shape_params = shape_params[:, :10]
shape_params = np.tile(shape_params, (pose_params.shape[0], 1))
trans_params = np.array(data_gt['trans'])
trans_params = trans_params[i, valid_indices, :]
all_params[video_name]['trans'].append(trans_params)
all_params[video_name]['valid_indices'].append(valid_indices)
# Get the GT joint and vertex positions and the global rotation matrices
verts_gt, jp_gt, glb_rot_mats_gt = smpl_model_genders[genders[i]].update(pose_params, shape_params, trans_params)
# Apply Camera Matrix Transformation to ground truth values
cam_matrix = data_gt['cam_poses']
new_cam_poses = np.transpose(cam_matrix, (0, 2, 1))
new_cam_poses = new_cam_poses[valid_indices, :, :]
# we don't have the joint regressor for female/male model. So we can't regress all 54 joints from the mesh of female/male model.
jp_gt, glb_rot_mats_gt = apply_camera_transforms(jp_gt, glb_rot_mats_gt, new_cam_poses)
root_rotation_cam_tranformed = transform_rot_representation(glb_rot_mats_gt[:,0], input_type='mat',out_type='vec')
pose_params[:,:3] = root_rotation_cam_tranformed
all_params[video_name]['poses'].append(pose_params)
all_jp_gts[video_name].append(jp_gt)
all_glob_rot_gts[video_name].append(glb_rot_mats_gt)
np.savez(self.annots_path, params=all_params, kp3d=all_jp_gts, glob_rot=all_glob_rot_gts, kp2d=all_jp2d_gts)
def with_ones(data):
"""
Converts an array in 3d coordinates to 4d homogenous coordiantes
:param data: array of shape A x B x 3
:return return ret_arr: array of shape A x B x 4 where the extra dimension is filled with ones
"""
ext_arr = np.ones((data.shape[0], data.shape[1], 1))
ret_arr = np.concatenate((data, ext_arr), axis=2)
return ret_arr
def apply_camera_transforms(joints, rotations, camera):
"""
Applies camera transformations to joint locations and rotations matrices
:param joints: B x 24 x 3
:param rotations: B x 24 x 3 x 3
:param camera: B x 4 x 4 - already transposed
:return: joints B x 24 x 3 joints after applying camera transformations
rotations B x 24 x 3 x 3 - rotations matrices after applying camera transformations
"""
joints = with_ones(joints) # B x 24 x 4
joints = np.matmul(joints, camera)[:, :, :3]
# multiply all rotation matrices with the camera rotation matrix
# transpose camera coordinates back
cam_new = np.transpose(camera[:, :3, :3], (0, 2, 1))
cam_new = np.expand_dims(cam_new, 1)
cam_new = np.tile(cam_new, (1, 24, 1, 1))
# B x 24 x 3 x 3
rotations = np.matmul(cam_new, rotations)
return joints, rotations
def check_valid_inds(poses2d, camposes_valid):
"""
Computes the indices where further computations are required
:param poses2d: N x 18 x 3 array of 2d Poses
:param camposes_valid: N x 1 array of indices where camera poses are valid
:return: array of indices indicating frame ids in the sequence which are to be evaluated
"""
# find all indices in the N sequences where the sum of the 18x3 array is not zero
# N, numpy array
poses2d_mean = np.mean(np.mean(np.abs(poses2d), axis=2), axis=1)
poses2d_bool = poses2d_mean == 0
poses2d_bool_inv = np.logical_not(poses2d_bool)
# find all the indices where the camposes are valid
camposes_valid = np.array(camposes_valid).astype('bool')
final = np.logical_and(poses2d_bool_inv, camposes_valid)
indices = np.array(np.where(final == True)[0])
return indices
def with_ones(data):
"""
Converts an array in 3d coordinates to 4d homogenous coordiantes
:param data: array of shape A x B x 3
:return return ret_arr: array of shape A x B x 4 where the extra dimension is filled with ones
"""
ext_arr = np.ones((data.shape[0], data.shape[1], 1))
ret_arr = np.concatenate((data, ext_arr), axis=2)
return ret_arr
def apply_camera_transforms(joints, rotations, camera):
"""
Applies camera transformations to joint locations and rotations matrices
:param joints: B x 24 x 3
:param rotations: B x 24 x 3 x 3
:param camera: B x 4 x 4 - already transposed
:return: joints B x 24 x 3 joints after applying camera transformations
rotations B x 24 x 3 x 3 - rotations matrices after applying camera transformations
"""
joints = with_ones(joints) # B x 24 x 4
joints = np.matmul(joints, camera)[:, :, :3]
# multiply all rotation matrices with the camera rotation matrix
# transpose camera coordinates back
cam_new = np.transpose(camera[:, :3, :3], (0, 2, 1))
cam_new = np.expand_dims(cam_new, 1)
cam_new = np.tile(cam_new, (1, 24, 1, 1))
# B x 24 x 3 x 3
rotations = np.matmul(cam_new, rotations)
return joints, rotations
def check_valid_inds(poses2d, camposes_valid):
"""
Computes the indices where further computations are required
:param poses2d: N x 18 x 3 array of 2d Poses
:param camposes_valid: N x 1 array of indices where camera poses are valid
:return: array of indices indicating frame ids in the sequence which are to be evaluated
"""
# find all indices in the N sequences where the sum of the 18x3 array is not zero
# N, numpy array
poses2d_mean = np.mean(np.mean(np.abs(poses2d), axis=2), axis=1)
poses2d_bool = poses2d_mean == 0
poses2d_bool_inv = np.logical_not(poses2d_bool)
# find all the indices where the camposes are valid
camposes_valid = np.array(camposes_valid).astype('bool')
final = np.logical_and(poses2d_bool_inv, camposes_valid)
indices = np.array(np.where(final == True)[0])
return indices
if __name__ == '__main__':
dataset= PW3D()
test_dataset(dataset,with_3d=True,with_smpl=True)
print('Done') | src/lib/dataset/pw3d.py | import sys, os
root_dir = os.path.join(os.path.dirname(__file__),'..')
if root_dir not in sys.path:
sys.path.insert(0, root_dir)
from dataset.image_base import *
from config import args
set_names = {'all':['train','val','test'],'test':['test'],'val':['train','val','test']}
class PW3D(Image_base):
def __init__(self,train_flag = False, split='test', mode='vibe', regress_smpl=True, **kwargs):
super(PW3D,self).__init__(train_flag)
self.data_folder = args().dataset_rootdir
self.data3d_dir = os.path.join(self.data_folder,'sequenceFiles')
self.image_dir = os.path.join(self.data_folder,'imageFiles')
self.mode = mode
self.split = split
logging.info('Loading 3DPW in {} mode, split {}'.format(mode,self.split))
if mode == 'vibe':
self.annots_path = args().annot_dir #os.path.join(config.project_dir,'data/vibe_db')
self.joint_mapper = constants.joint_mapping(constants.LSP_14,constants.SMPL_ALL_54)
self.joint3d_mapper = constants.joint_mapping(constants.LSP_14,constants.SMPL_ALL_54)
self.load_vibe_annots()
elif mode == 'whole':
self.joint_mapper = constants.joint_mapping(constants.COCO_18,constants.SMPL_ALL_54)
self.joint3d_mapper = constants.joint_mapping(constants.SMPL_24,constants.SMPL_ALL_54)
self.annots_path = os.path.join(self.data_folder,'annots.npz')
if not os.path.exists(self.annots_path):
self.pack_data()
self.load_annots()
else:
raise NotImplementedError
self.root_inds = [constants.SMPL_ALL_54['R_Hip'], constants.SMPL_ALL_54['L_Hip']]
logging.info('3DPW dataset {} split total {} samples, loading mode {}'.format(self.split ,self.__len__(), self.mode))
def __len__(self):
return len(self.file_paths)
def get_image_info(self, index):
annots = self.annots[self.file_paths[index%len(self.file_paths)]]
subject_ids, genders, full_kp2d, kp3d_monos, params, bbox = [[] for i in range(6)]
for inds, annot in enumerate(annots):
video_name, gender, person_id, frame_id, kp2d, kp3d, pose_param, beta_param = annot
subject_ids.append(person_id)
genders.append(gender)
kp3d = self.map_kps(kp3d, self.joint3d_mapper)
kp3d_monos.append(kp3d)
params.append(np.concatenate([pose_param[:66], beta_param[:10]]))
kp2d_gt = self.map_kps(kp2d, self.joint_mapper)
full_kp2d.append(kp2d_gt)
imgpath = os.path.join(self.image_dir,video_name,'image_{:05}.jpg'.format(frame_id))
image = cv2.imread(imgpath)[:,:,::-1].copy()
info_2d = ('pw3d_vibe', imgpath, image, full_kp2d, None, subject_ids)
info_3d = ('pw3d_vibe', kp3d_monos, params, None)
return info_2d, info_3d
def load_vibe_annots(self):
set_names = {'all':['train','val','test'],'train':['train'],'test':['test'],'val':['val']}
self.split_used = set_names[self.split]
self.annots = {}
for split in self.split_used:
db_file = os.path.join(self.annots_path,'3dpw_{}_db.pt'.format(split))
db = joblib.load(db_file)
vid_names = db['vid_name']
frame_ids = db['frame_id']
kp2ds, kp3ds, pose_params, beta_params, valids = db['joints2D'], db['joints3D'], db['pose'], db['shape'], db['valid']
if split=='train':
kp3ds = kp3ds[:,25:39]
for vid_name, frame_id, kp2d, kp3d, pose_param, beta_param, valid in zip(vid_names, frame_ids, kp2ds, kp3ds, pose_params, beta_params, valids):
if valid!=1:
continue
video_name, person_id = vid_name[:-2], int(vid_name[-1])
name = '{}_{}'.format(video_name,frame_id)
if name not in self.annots:
self.annots[name] = []
self.annots[name].append([video_name, None, person_id, frame_id, kp2d, kp3d, pose_param, beta_param])
self.file_paths = list(self.annots.keys())
def load_annots(self):
set_names = {'train':['train'],'all':['train','validation','test'],'val':['validation'],'test':['test']}
split_used = set_names[self.split]
annots = np.load(self.annots_path,allow_pickle=True)
params = annots['params'][()]
kp3ds = annots['kp3d'][()]
kp2ds = annots['kp2d'][()]
self.annots = {}
video_names = list(params.keys())
for video_name in video_names:
valid_indices = params[video_name]['valid_indices']
genders = params[video_name]['genders']
for person_id, valid_index in enumerate(valid_indices):
for annot_id,frame_id in enumerate(valid_index):
split = params[video_name]['split']
if split not in split_used:
continue
name = '{}_{}'.format(video_name.strip('.pkl'),frame_id)
kp3d = kp3ds[video_name][person_id][annot_id]
kp2d = kp2ds[video_name][person_id][annot_id]
pose_param = params[video_name]['poses'][person_id][annot_id]
beta_param = params[video_name]['betas'][person_id]
gender = genders[person_id]
if name not in self.annots:
self.annots[name] = []
self.annots[name].append([video_name.strip('.pkl'), gender, person_id, frame_id, kp2d.T, kp3d, pose_param, beta_param])
self.file_paths = list(self.annots.keys())
def pack_data(self):
"""
The function reads all the ground truth and prediction files. And concatenates
:param paths_gt: all the paths corresponding to the ground truth - list of pkl files
:param paths_prd: all the paths corresponding to the predictions - list of pkl files
:return:
jp_pred: jointPositions Prediction. Shape N x 24 x 3
jp_gt: jointPositions ground truth. Shape: N x 24 x 3
mats_pred: Global rotation matrices predictions. Shape N x 24 x 3 x 3
mats_gt: Global rotation matrices ground truths. Shape N x 24 x 3 x 3
"""
# all ground truth smpl parameters / joint positions / rotation matrices
from evaluation.pw3d_eval.SMPL import SMPL
all_params, all_jp_gts, all_jp2d_gts, all_glob_rot_gts = {}, {}, {}, {}
seq = 0
num_jps_pred = 0
num_ors_pred = 0
paths_gt = glob.glob(os.path.join(self.data3d_dir,'*/*.pkl'))
smpl_model_genders = {'f':SMPL(center_idx=0, gender='f', model_root=os.path.join(config.model_dir,'smpl_original')),\
'm':SMPL(center_idx=0, gender='m', model_root=os.path.join(config.model_dir,'smpl_original')) }
# construct the data structures -
for path_gt in paths_gt:
print('Processing: ', path_gt)
video_name = os.path.basename(path_gt)
seq = seq + 1
# Open pkl files
data_gt = pickle.load(open(path_gt, 'rb'), encoding='latin1')
split = path_gt.split('/')[-2]
genders = data_gt['genders']
all_params[video_name], all_jp_gts[video_name], all_jp2d_gts[video_name], all_glob_rot_gts[video_name] = {}, [], [], []
all_params[video_name]['split'] = split
all_params[video_name]['genders'] = genders
all_params[video_name]['poses'], all_params[video_name]['trans'], all_params[video_name]['valid_indices'] = [], [], []
all_params[video_name]['betas'] = np.array(data_gt['betas'])
for i in range(len(genders)):
# Get valid frames
# Frame with no zeros in the poses2d file and where campose_valid is True
poses2d_gt = data_gt['poses2d']
poses2d_gt_i = poses2d_gt[i]
camposes_valid = data_gt['campose_valid']
camposes_valid_i = camposes_valid[i]
valid_indices = check_valid_inds(poses2d_gt_i, camposes_valid_i)
all_jp2d_gts[video_name].append(poses2d_gt_i[valid_indices])
# Get the ground truth SMPL body parameters - poses, betas and translation parameters
pose_params = np.array(data_gt['poses'])
pose_params = pose_params[i, valid_indices, :]
shape_params = np.array(data_gt['betas'][i])
shape_params = np.expand_dims(shape_params, 0)
shape_params = shape_params[:, :10]
shape_params = np.tile(shape_params, (pose_params.shape[0], 1))
trans_params = np.array(data_gt['trans'])
trans_params = trans_params[i, valid_indices, :]
all_params[video_name]['trans'].append(trans_params)
all_params[video_name]['valid_indices'].append(valid_indices)
# Get the GT joint and vertex positions and the global rotation matrices
verts_gt, jp_gt, glb_rot_mats_gt = smpl_model_genders[genders[i]].update(pose_params, shape_params, trans_params)
# Apply Camera Matrix Transformation to ground truth values
cam_matrix = data_gt['cam_poses']
new_cam_poses = np.transpose(cam_matrix, (0, 2, 1))
new_cam_poses = new_cam_poses[valid_indices, :, :]
# we don't have the joint regressor for female/male model. So we can't regress all 54 joints from the mesh of female/male model.
jp_gt, glb_rot_mats_gt = apply_camera_transforms(jp_gt, glb_rot_mats_gt, new_cam_poses)
root_rotation_cam_tranformed = transform_rot_representation(glb_rot_mats_gt[:,0], input_type='mat',out_type='vec')
pose_params[:,:3] = root_rotation_cam_tranformed
all_params[video_name]['poses'].append(pose_params)
all_jp_gts[video_name].append(jp_gt)
all_glob_rot_gts[video_name].append(glb_rot_mats_gt)
np.savez(self.annots_path, params=all_params, kp3d=all_jp_gts, glob_rot=all_glob_rot_gts, kp2d=all_jp2d_gts)
def with_ones(data):
"""
Converts an array in 3d coordinates to 4d homogenous coordiantes
:param data: array of shape A x B x 3
:return return ret_arr: array of shape A x B x 4 where the extra dimension is filled with ones
"""
ext_arr = np.ones((data.shape[0], data.shape[1], 1))
ret_arr = np.concatenate((data, ext_arr), axis=2)
return ret_arr
def apply_camera_transforms(joints, rotations, camera):
"""
Applies camera transformations to joint locations and rotations matrices
:param joints: B x 24 x 3
:param rotations: B x 24 x 3 x 3
:param camera: B x 4 x 4 - already transposed
:return: joints B x 24 x 3 joints after applying camera transformations
rotations B x 24 x 3 x 3 - rotations matrices after applying camera transformations
"""
joints = with_ones(joints) # B x 24 x 4
joints = np.matmul(joints, camera)[:, :, :3]
# multiply all rotation matrices with the camera rotation matrix
# transpose camera coordinates back
cam_new = np.transpose(camera[:, :3, :3], (0, 2, 1))
cam_new = np.expand_dims(cam_new, 1)
cam_new = np.tile(cam_new, (1, 24, 1, 1))
# B x 24 x 3 x 3
rotations = np.matmul(cam_new, rotations)
return joints, rotations
def check_valid_inds(poses2d, camposes_valid):
"""
Computes the indices where further computations are required
:param poses2d: N x 18 x 3 array of 2d Poses
:param camposes_valid: N x 1 array of indices where camera poses are valid
:return: array of indices indicating frame ids in the sequence which are to be evaluated
"""
# find all indices in the N sequences where the sum of the 18x3 array is not zero
# N, numpy array
poses2d_mean = np.mean(np.mean(np.abs(poses2d), axis=2), axis=1)
poses2d_bool = poses2d_mean == 0
poses2d_bool_inv = np.logical_not(poses2d_bool)
# find all the indices where the camposes are valid
camposes_valid = np.array(camposes_valid).astype('bool')
final = np.logical_and(poses2d_bool_inv, camposes_valid)
indices = np.array(np.where(final == True)[0])
return indices
def with_ones(data):
"""
Converts an array in 3d coordinates to 4d homogenous coordiantes
:param data: array of shape A x B x 3
:return return ret_arr: array of shape A x B x 4 where the extra dimension is filled with ones
"""
ext_arr = np.ones((data.shape[0], data.shape[1], 1))
ret_arr = np.concatenate((data, ext_arr), axis=2)
return ret_arr
def apply_camera_transforms(joints, rotations, camera):
"""
Applies camera transformations to joint locations and rotations matrices
:param joints: B x 24 x 3
:param rotations: B x 24 x 3 x 3
:param camera: B x 4 x 4 - already transposed
:return: joints B x 24 x 3 joints after applying camera transformations
rotations B x 24 x 3 x 3 - rotations matrices after applying camera transformations
"""
joints = with_ones(joints) # B x 24 x 4
joints = np.matmul(joints, camera)[:, :, :3]
# multiply all rotation matrices with the camera rotation matrix
# transpose camera coordinates back
cam_new = np.transpose(camera[:, :3, :3], (0, 2, 1))
cam_new = np.expand_dims(cam_new, 1)
cam_new = np.tile(cam_new, (1, 24, 1, 1))
# B x 24 x 3 x 3
rotations = np.matmul(cam_new, rotations)
return joints, rotations
def check_valid_inds(poses2d, camposes_valid):
"""
Computes the indices where further computations are required
:param poses2d: N x 18 x 3 array of 2d Poses
:param camposes_valid: N x 1 array of indices where camera poses are valid
:return: array of indices indicating frame ids in the sequence which are to be evaluated
"""
# find all indices in the N sequences where the sum of the 18x3 array is not zero
# N, numpy array
poses2d_mean = np.mean(np.mean(np.abs(poses2d), axis=2), axis=1)
poses2d_bool = poses2d_mean == 0
poses2d_bool_inv = np.logical_not(poses2d_bool)
# find all the indices where the camposes are valid
camposes_valid = np.array(camposes_valid).astype('bool')
final = np.logical_and(poses2d_bool_inv, camposes_valid)
indices = np.array(np.where(final == True)[0])
return indices
if __name__ == '__main__':
dataset= PW3D()
test_dataset(dataset,with_3d=True,with_smpl=True)
print('Done') | 0.276886 | 0.205795 |
import time
import random
from qtpy.QtCore import Signal, QByteArray, QPoint, QRect, QSize, QTimer, Qt, QObject, QUrl
from qtpy.QtGui import QBrush, QColor, QFont, QImage, QPainter
from qtpy.QtWidgets import QWidget
from qtpy.QtNetwork import QNetworkRequest, QNetworkAccessManager
class Downloader(QObject):
imageReady = Signal(QByteArray)
def __init__(self, parent=None):
super(Downloader, self).__init__(parent)
self.manager = QNetworkAccessManager()
self.url = 'http://localhost:9998/jpg/image.jpg'
self.request = QNetworkRequest()
self.request.setUrl(QUrl(self.url))
self.buffer = QByteArray()
self.reply = None
def setUrl(self, url):
self.url = url
self.request.setUrl(QUrl(self.url))
def downloadData(self):
""" Only request a new image if this is the first/last completed. """
if self.reply is None:
self.reply = self.manager.get(self.request)
self.reply.finished.connect(self.finished)
def finished(self):
""" Read the buffer, emit a signal with the new image in it. """
self.buffer = self.reply.readAll()
self.imageReady.emit(self.buffer)
self.reply.deleteLater()
self.reply = None
class Microscope(QWidget):
roiClicked = Signal(int, int)
def __init__(self, parent=None):
super(Microscope, self).__init__(parent)
self.setMinimumWidth(300)
self.setMinimumHeight(300)
self.image = QImage('image.jpg')
self.clicks = []
self.center = QPoint(
self.image.size().width() / 2, self.image.size().height() / 2
)
self.start = QPoint(0, 0)
self.end = QPoint(1, 1)
self.yDivs = 5
self.xDivs = 5
self.color = False
self.fps = 5
self.scaleBar = False
self.url = 'http://localhost:9998/jpg/image.jpg'
self.timer = QTimer(self)
self.timer.timeout.connect(self.updateImage)
self.downloader = Downloader(self)
self.downloader.imageReady.connect(self.updateImageData)
def updatedImageSize(self):
if self.image.size() != self.minimumSize():
self.setMinimumSize(self.image.size())
self.center = QPoint(
self.image.size().width() / 2, self.image.size().height() / 2
)
def acquire(self, start=True):
self.downloader.setUrl(self.url)
if start:
self.timer.start(1000.0 / self.fps)
else:
self.timer.stop()
def paintEvent(self, event):
tic = time.perf_counter()
painter = QPainter(self)
rect = event.rect()
self.image = self.image.scaledToWidth(646)
painter.drawImage(rect, self.image, rect)
painter.setPen(QColor.fromRgb(255, 0, 0))
#painter.drawPoints(self.clicks)
rect = QRect(
self.start.x(),
self.start.y(),
self.end.x() - self.start.x(),
self.end.y() - self.start.y(),
)
painter.setPen(QColor.fromRgb(0, 255, 0))
painter.drawRect(rect)
# Now draw the lines for the boxes in the rectangle.
x1 = self.start.x()
y1 = self.start.y()
x2 = self.end.x()
y2 = self.end.y()
inc_x = (x2 - x1) / self.xDivs
inc_y = (y2 - y1) / self.yDivs
lines = time.perf_counter()
for i in range(1, self.xDivs):
painter.drawLine(x1 + i * inc_x, y1, x1 + i * inc_x, y2)
for i in range(1, self.yDivs):
painter.drawLine(x1, y1 + i * inc_y, x2, y1 + i * inc_y)
mid = time.perf_counter()
# Now draw the color overlay thing if requested
rects = time.perf_counter()
if self.color:
brushColor = QColor(0, 255, 0, 20)
brush = QBrush(brushColor)
painter.setBrush(brush)
painter.setPen(QColor.fromRgb(0, 255, 0))
for i in range(0, self.xDivs):
for j in range(0, self.yDivs):
alpha = i / self.yDivs * 255
if True:# j % 2 == 0:
brushColor.setAlpha(alpha / 2)
brushColor.setGreen(255)
else:
brushColor.setAlpha(255 / 2)
brushColor.setGreen(alpha)
brush.setColor(brushColor)
painter.setBrush(brush)
rect = QRect(x1 + i * inc_x, y1 + j * inc_y, inc_x, inc_y)
painter.drawRect(rect)
rects2 = time.perf_counter()
# Draw the center mark
painter.setPen(QColor.fromRgb(255, 0, 0))
painter.drawLine(
self.center.x() - 20, self.center.y(), self.center.x() + 20, self.center.y()
)
painter.drawLine(
self.center.x(), self.center.y() - 20, self.center.x(), self.center.y() + 20
)
# Draw the scale bar
if self.scaleBar:
painter.setPen(QColor.fromRgb(40, 40, 40))
painter.setFont(QFont("Arial", 30))
scaleRect = QRect(10, 420, 200, 30)
painter.drawText(scaleRect, Qt.AlignCenter, "10 nm")
pen = painter.pen()
pen.setWidth(5)
painter.setPen(pen)
painter.drawLine(10, 460, 210, 460)
toc = time.perf_counter()
print(
f'Paint time: {toc - tic:0.4f}\tLines: {mid - lines:0.4f}\tRects: {rects2 - rects:0.4f}'
)
def mousePressEvent(self, event):
pos = event.pos()
self.roiClicked.emit(pos.x(), pos.y())
self.clicks.append(pos)
self.start = pos
self.end = pos
self.update()
def mouseMoveEvent(self, event):
self.end = event.pos()
self.update()
def sizeHint(self):
return QSize(400, 400)
def updateImage(self):
""" Request an updated image asynchronously. """
self.downloader.downloadData()
def updateImageData(self, image):
""" Triggered when the new image is ready, update the view. """
self.image.loadFromData(image, 'JPG')
self.image = self.image.scaledToWidth(646)
self.updatedImageSize()
self.update()
def readFromDict(self, settings):
""" Read the settings from a Python dict. """
if settings.has_key('url'):
self.url = settings['url']
if settings.has_key('fps'):
self.fps = settings['fps']
if settings.has_key('xDivs'):
self.xDivs = settings['xDivs']
if settings.has_key('yDivs'):
self.yDivs = settings['yDivs']
if settings.has_key('color'):
self.color = settings['color']
def writeToDict(self):
""" Write the widget's settings to a Python dict. """
settings = {
'url': self.url,
'fps': self.fps,
'xDivs': self.xDivs,
'yDivs': self.yDivs,
'color': self.color
}
return settings
def readSettings(self, settings):
""" Read the settings for this microscope instance. """
self.url = settings.value('url', 'http://localhost:9998/jpg/image.jpg')
self.fps = settings.value('fps', 5, type=int)
self.xDivs = settings.value('xDivs', 5, type=int)
self.yDivs = settings.value('yDivs', 5, type=int)
self.color = settings.value('color', False, type=bool)
def writeSettings(self, settings):
""" Write the settings for this microscope instance. """
settings.setValue('url', self.url)
settings.setValue('fps', self.fps)
settings.setValue('xDivs', self.xDivs)
settings.setValue('yDivs', self.yDivs)
settings.setValue('color', self.color) | microscope/microscope.py | import time
import random
from qtpy.QtCore import Signal, QByteArray, QPoint, QRect, QSize, QTimer, Qt, QObject, QUrl
from qtpy.QtGui import QBrush, QColor, QFont, QImage, QPainter
from qtpy.QtWidgets import QWidget
from qtpy.QtNetwork import QNetworkRequest, QNetworkAccessManager
class Downloader(QObject):
imageReady = Signal(QByteArray)
def __init__(self, parent=None):
super(Downloader, self).__init__(parent)
self.manager = QNetworkAccessManager()
self.url = 'http://localhost:9998/jpg/image.jpg'
self.request = QNetworkRequest()
self.request.setUrl(QUrl(self.url))
self.buffer = QByteArray()
self.reply = None
def setUrl(self, url):
self.url = url
self.request.setUrl(QUrl(self.url))
def downloadData(self):
""" Only request a new image if this is the first/last completed. """
if self.reply is None:
self.reply = self.manager.get(self.request)
self.reply.finished.connect(self.finished)
def finished(self):
""" Read the buffer, emit a signal with the new image in it. """
self.buffer = self.reply.readAll()
self.imageReady.emit(self.buffer)
self.reply.deleteLater()
self.reply = None
class Microscope(QWidget):
roiClicked = Signal(int, int)
def __init__(self, parent=None):
super(Microscope, self).__init__(parent)
self.setMinimumWidth(300)
self.setMinimumHeight(300)
self.image = QImage('image.jpg')
self.clicks = []
self.center = QPoint(
self.image.size().width() / 2, self.image.size().height() / 2
)
self.start = QPoint(0, 0)
self.end = QPoint(1, 1)
self.yDivs = 5
self.xDivs = 5
self.color = False
self.fps = 5
self.scaleBar = False
self.url = 'http://localhost:9998/jpg/image.jpg'
self.timer = QTimer(self)
self.timer.timeout.connect(self.updateImage)
self.downloader = Downloader(self)
self.downloader.imageReady.connect(self.updateImageData)
def updatedImageSize(self):
if self.image.size() != self.minimumSize():
self.setMinimumSize(self.image.size())
self.center = QPoint(
self.image.size().width() / 2, self.image.size().height() / 2
)
def acquire(self, start=True):
self.downloader.setUrl(self.url)
if start:
self.timer.start(1000.0 / self.fps)
else:
self.timer.stop()
def paintEvent(self, event):
tic = time.perf_counter()
painter = QPainter(self)
rect = event.rect()
self.image = self.image.scaledToWidth(646)
painter.drawImage(rect, self.image, rect)
painter.setPen(QColor.fromRgb(255, 0, 0))
#painter.drawPoints(self.clicks)
rect = QRect(
self.start.x(),
self.start.y(),
self.end.x() - self.start.x(),
self.end.y() - self.start.y(),
)
painter.setPen(QColor.fromRgb(0, 255, 0))
painter.drawRect(rect)
# Now draw the lines for the boxes in the rectangle.
x1 = self.start.x()
y1 = self.start.y()
x2 = self.end.x()
y2 = self.end.y()
inc_x = (x2 - x1) / self.xDivs
inc_y = (y2 - y1) / self.yDivs
lines = time.perf_counter()
for i in range(1, self.xDivs):
painter.drawLine(x1 + i * inc_x, y1, x1 + i * inc_x, y2)
for i in range(1, self.yDivs):
painter.drawLine(x1, y1 + i * inc_y, x2, y1 + i * inc_y)
mid = time.perf_counter()
# Now draw the color overlay thing if requested
rects = time.perf_counter()
if self.color:
brushColor = QColor(0, 255, 0, 20)
brush = QBrush(brushColor)
painter.setBrush(brush)
painter.setPen(QColor.fromRgb(0, 255, 0))
for i in range(0, self.xDivs):
for j in range(0, self.yDivs):
alpha = i / self.yDivs * 255
if True:# j % 2 == 0:
brushColor.setAlpha(alpha / 2)
brushColor.setGreen(255)
else:
brushColor.setAlpha(255 / 2)
brushColor.setGreen(alpha)
brush.setColor(brushColor)
painter.setBrush(brush)
rect = QRect(x1 + i * inc_x, y1 + j * inc_y, inc_x, inc_y)
painter.drawRect(rect)
rects2 = time.perf_counter()
# Draw the center mark
painter.setPen(QColor.fromRgb(255, 0, 0))
painter.drawLine(
self.center.x() - 20, self.center.y(), self.center.x() + 20, self.center.y()
)
painter.drawLine(
self.center.x(), self.center.y() - 20, self.center.x(), self.center.y() + 20
)
# Draw the scale bar
if self.scaleBar:
painter.setPen(QColor.fromRgb(40, 40, 40))
painter.setFont(QFont("Arial", 30))
scaleRect = QRect(10, 420, 200, 30)
painter.drawText(scaleRect, Qt.AlignCenter, "10 nm")
pen = painter.pen()
pen.setWidth(5)
painter.setPen(pen)
painter.drawLine(10, 460, 210, 460)
toc = time.perf_counter()
print(
f'Paint time: {toc - tic:0.4f}\tLines: {mid - lines:0.4f}\tRects: {rects2 - rects:0.4f}'
)
def mousePressEvent(self, event):
pos = event.pos()
self.roiClicked.emit(pos.x(), pos.y())
self.clicks.append(pos)
self.start = pos
self.end = pos
self.update()
def mouseMoveEvent(self, event):
self.end = event.pos()
self.update()
def sizeHint(self):
return QSize(400, 400)
def updateImage(self):
""" Request an updated image asynchronously. """
self.downloader.downloadData()
def updateImageData(self, image):
""" Triggered when the new image is ready, update the view. """
self.image.loadFromData(image, 'JPG')
self.image = self.image.scaledToWidth(646)
self.updatedImageSize()
self.update()
def readFromDict(self, settings):
""" Read the settings from a Python dict. """
if settings.has_key('url'):
self.url = settings['url']
if settings.has_key('fps'):
self.fps = settings['fps']
if settings.has_key('xDivs'):
self.xDivs = settings['xDivs']
if settings.has_key('yDivs'):
self.yDivs = settings['yDivs']
if settings.has_key('color'):
self.color = settings['color']
def writeToDict(self):
""" Write the widget's settings to a Python dict. """
settings = {
'url': self.url,
'fps': self.fps,
'xDivs': self.xDivs,
'yDivs': self.yDivs,
'color': self.color
}
return settings
def readSettings(self, settings):
""" Read the settings for this microscope instance. """
self.url = settings.value('url', 'http://localhost:9998/jpg/image.jpg')
self.fps = settings.value('fps', 5, type=int)
self.xDivs = settings.value('xDivs', 5, type=int)
self.yDivs = settings.value('yDivs', 5, type=int)
self.color = settings.value('color', False, type=bool)
def writeSettings(self, settings):
""" Write the settings for this microscope instance. """
settings.setValue('url', self.url)
settings.setValue('fps', self.fps)
settings.setValue('xDivs', self.xDivs)
settings.setValue('yDivs', self.yDivs)
settings.setValue('color', self.color) | 0.429429 | 0.275958 |
import re
from typing import Iterator, Optional
from google.cloud import storage
from ..config import get_config_value
from ..key import StairlightConfigKey
from .base import Template, TemplateSource, TemplateSourceType
from .controller import GCS_URI_SCHEME
class GcsTemplate(Template):
def __init__(
self,
mapping_config: dict,
key: str,
bucket: Optional[str] = None,
project: Optional[str] = None,
default_table_prefix: Optional[str] = None,
):
super().__init__(
mapping_config=mapping_config,
key=key,
source_type=TemplateSourceType.GCS,
bucket=bucket,
project=project,
default_table_prefix=default_table_prefix,
)
self.uri = self.get_uri()
def get_uri(self) -> str:
"""Get uri from file path
Returns:
str: uri
"""
return f"{GCS_URI_SCHEME}{self.bucket}/{self.key}"
def get_template_str(self) -> str:
"""Get template string that read from a file in GCS
Returns:
str: Template string
"""
client = storage.Client(credentials=None, project=self.project)
bucket = client.get_bucket(self.bucket)
blob = bucket.blob(self.key)
return blob.download_as_bytes().decode("utf-8")
class GcsTemplateSource(TemplateSource):
def __init__(
self, stairlight_config: dict, mapping_config: dict, source_attributes: dict
) -> None:
super().__init__(
stairlight_config=stairlight_config, mapping_config=mapping_config
)
self.source_attributes = source_attributes
self.source_type = TemplateSourceType.GCS
def search_templates(self) -> Iterator[Template]:
"""Search SQL template files from GCS
Args:
source (dict): Source attributes of SQL template files
Yields:
Iterator[SQLTemplate]: SQL template file attributes
"""
project = get_config_value(
key=StairlightConfigKey.Gcs.PROJECT_ID,
target=self.source_attributes,
fail_if_not_found=False,
enable_logging=False,
)
bucket = get_config_value(
key=StairlightConfigKey.Gcs.BUCKET_NAME,
target=self.source_attributes,
fail_if_not_found=True,
enable_logging=False,
)
default_table_prefix = get_config_value(
key=StairlightConfigKey.DEFAULT_TABLE_PREFIX,
target=self.source_attributes,
fail_if_not_found=False,
enable_logging=False,
)
regex = get_config_value(
key=StairlightConfigKey.REGEX,
target=self.source_attributes,
fail_if_not_found=True,
enable_logging=False,
)
client = storage.Client(credentials=None, project=project)
blobs = client.list_blobs(bucket)
for blob in blobs:
if (
not re.fullmatch(
rf"{regex}",
blob.name,
)
) or self.is_excluded(source_type=self.source_type, key=blob.name):
self.logger.debug(f"{blob.name} is skipped.")
continue
yield GcsTemplate(
mapping_config=self._mapping_config,
key=blob.name,
project=project,
bucket=bucket,
default_table_prefix=default_table_prefix,
)
def get_gcs_blob(gcs_uri: str) -> storage.Blob:
bucket_name = gcs_uri.replace(GCS_URI_SCHEME, "").split("/")[0]
key = gcs_uri.replace(f"{GCS_URI_SCHEME}{bucket_name}/", "")
client = storage.Client(credentials=None, project=None)
bucket = client.get_bucket(bucket_name)
return bucket.blob(key) | src/stairlight/source/gcs.py | import re
from typing import Iterator, Optional
from google.cloud import storage
from ..config import get_config_value
from ..key import StairlightConfigKey
from .base import Template, TemplateSource, TemplateSourceType
from .controller import GCS_URI_SCHEME
class GcsTemplate(Template):
def __init__(
self,
mapping_config: dict,
key: str,
bucket: Optional[str] = None,
project: Optional[str] = None,
default_table_prefix: Optional[str] = None,
):
super().__init__(
mapping_config=mapping_config,
key=key,
source_type=TemplateSourceType.GCS,
bucket=bucket,
project=project,
default_table_prefix=default_table_prefix,
)
self.uri = self.get_uri()
def get_uri(self) -> str:
"""Get uri from file path
Returns:
str: uri
"""
return f"{GCS_URI_SCHEME}{self.bucket}/{self.key}"
def get_template_str(self) -> str:
"""Get template string that read from a file in GCS
Returns:
str: Template string
"""
client = storage.Client(credentials=None, project=self.project)
bucket = client.get_bucket(self.bucket)
blob = bucket.blob(self.key)
return blob.download_as_bytes().decode("utf-8")
class GcsTemplateSource(TemplateSource):
def __init__(
self, stairlight_config: dict, mapping_config: dict, source_attributes: dict
) -> None:
super().__init__(
stairlight_config=stairlight_config, mapping_config=mapping_config
)
self.source_attributes = source_attributes
self.source_type = TemplateSourceType.GCS
def search_templates(self) -> Iterator[Template]:
"""Search SQL template files from GCS
Args:
source (dict): Source attributes of SQL template files
Yields:
Iterator[SQLTemplate]: SQL template file attributes
"""
project = get_config_value(
key=StairlightConfigKey.Gcs.PROJECT_ID,
target=self.source_attributes,
fail_if_not_found=False,
enable_logging=False,
)
bucket = get_config_value(
key=StairlightConfigKey.Gcs.BUCKET_NAME,
target=self.source_attributes,
fail_if_not_found=True,
enable_logging=False,
)
default_table_prefix = get_config_value(
key=StairlightConfigKey.DEFAULT_TABLE_PREFIX,
target=self.source_attributes,
fail_if_not_found=False,
enable_logging=False,
)
regex = get_config_value(
key=StairlightConfigKey.REGEX,
target=self.source_attributes,
fail_if_not_found=True,
enable_logging=False,
)
client = storage.Client(credentials=None, project=project)
blobs = client.list_blobs(bucket)
for blob in blobs:
if (
not re.fullmatch(
rf"{regex}",
blob.name,
)
) or self.is_excluded(source_type=self.source_type, key=blob.name):
self.logger.debug(f"{blob.name} is skipped.")
continue
yield GcsTemplate(
mapping_config=self._mapping_config,
key=blob.name,
project=project,
bucket=bucket,
default_table_prefix=default_table_prefix,
)
def get_gcs_blob(gcs_uri: str) -> storage.Blob:
bucket_name = gcs_uri.replace(GCS_URI_SCHEME, "").split("/")[0]
key = gcs_uri.replace(f"{GCS_URI_SCHEME}{bucket_name}/", "")
client = storage.Client(credentials=None, project=None)
bucket = client.get_bucket(bucket_name)
return bucket.blob(key) | 0.818592 | 0.079782 |
from solentware_grid.gui.datadelete import DataDelete
from solentware_misc.gui.exceptionhandler import ExceptionHandler
from pgn_read.core.parser import PGN
from ..core.constants import TAG_OPENING
from .repertoiretoplevel import RepertoireToplevel
from .toplevelpgn import DeletePGN
class RepertoireDbDelete(ExceptionHandler, DeletePGN, DataDelete):
"""Delete PGN text for repertoire from database.
parent is used as the master argument in a RepertoireToplevel call.
ui is used as the ui argument in a RepertoireToplevel call.
parent, oldobject, and the RepertoireToplevel instance created, are used
as arguments in the super.__init__ call.
Attribute pgn_score_name provides the name used in widget titles and
message text.
Methods get_title_for_object and set_item, and properties ui_base_table;
ui_items_in_toplevels; and ui, allow similar methods in various classes
to be expressed identically and defined once.
"""
pgn_score_name = "Repertoire"
def __init__(self, parent, oldobject, ui=None):
"""Extend and create toplevel widget for deleting chess game."""
# Toplevel title set '' in __init__ and to proper value in initialize.
super().__init__(
oldobject, parent, RepertoireToplevel(master=parent, ui=ui), ""
)
self.initialize()
@property
def ui_base_table(self):
return self.ui.base_repertoires
@property
def ui_items_in_toplevels(self):
return self.ui.games_and_repertoires_in_toplevels
@property
def ui(self):
return self.oldview.ui
def set_item(self, view, object_):
self.set_default_source_for_object(object_)
view.set_position_analysis_data_source()
view.collected_game = next(
PGN(game_class=view.gameclass).read_games(object_.get_srvalue())
)
view.set_and_tag_item_text()
def get_title_for_object(self, object_=None):
"""Return title for Toplevel containing a Repertoire object_.
Default value of object_ is object attribute from DataDelete class.
"""
if object_ is None:
object_ = self.object
try:
return " ".join(
(
self.pgn_score_name.join(("Delete ", ":")),
object_.value.collected_game._tags[TAG_OPENING],
)
)
except TypeError:
return self.pgn_score_name.join(
("Delete ", " - name unknown or invalid")
)
except KeyError:
return self.pgn_score_name.join(
("Delete ", " - name unknown or invalid")
)
def set_default_source_for_object(self, object_=None):
"""Set default source for Toplevel containing a Repertoire object_.
Default value of object_ is object attribute from DataDelete class.
"""
pass | chesstab/gui/repertoiredbdelete.py | from solentware_grid.gui.datadelete import DataDelete
from solentware_misc.gui.exceptionhandler import ExceptionHandler
from pgn_read.core.parser import PGN
from ..core.constants import TAG_OPENING
from .repertoiretoplevel import RepertoireToplevel
from .toplevelpgn import DeletePGN
class RepertoireDbDelete(ExceptionHandler, DeletePGN, DataDelete):
"""Delete PGN text for repertoire from database.
parent is used as the master argument in a RepertoireToplevel call.
ui is used as the ui argument in a RepertoireToplevel call.
parent, oldobject, and the RepertoireToplevel instance created, are used
as arguments in the super.__init__ call.
Attribute pgn_score_name provides the name used in widget titles and
message text.
Methods get_title_for_object and set_item, and properties ui_base_table;
ui_items_in_toplevels; and ui, allow similar methods in various classes
to be expressed identically and defined once.
"""
pgn_score_name = "Repertoire"
def __init__(self, parent, oldobject, ui=None):
"""Extend and create toplevel widget for deleting chess game."""
# Toplevel title set '' in __init__ and to proper value in initialize.
super().__init__(
oldobject, parent, RepertoireToplevel(master=parent, ui=ui), ""
)
self.initialize()
@property
def ui_base_table(self):
return self.ui.base_repertoires
@property
def ui_items_in_toplevels(self):
return self.ui.games_and_repertoires_in_toplevels
@property
def ui(self):
return self.oldview.ui
def set_item(self, view, object_):
self.set_default_source_for_object(object_)
view.set_position_analysis_data_source()
view.collected_game = next(
PGN(game_class=view.gameclass).read_games(object_.get_srvalue())
)
view.set_and_tag_item_text()
def get_title_for_object(self, object_=None):
"""Return title for Toplevel containing a Repertoire object_.
Default value of object_ is object attribute from DataDelete class.
"""
if object_ is None:
object_ = self.object
try:
return " ".join(
(
self.pgn_score_name.join(("Delete ", ":")),
object_.value.collected_game._tags[TAG_OPENING],
)
)
except TypeError:
return self.pgn_score_name.join(
("Delete ", " - name unknown or invalid")
)
except KeyError:
return self.pgn_score_name.join(
("Delete ", " - name unknown or invalid")
)
def set_default_source_for_object(self, object_=None):
"""Set default source for Toplevel containing a Repertoire object_.
Default value of object_ is object attribute from DataDelete class.
"""
pass | 0.639961 | 0.347316 |
from PIL import Image
from keras.layers import Dense, Input, Conv2D, LSTM, MaxPool2D, UpSampling2D
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping
from keras.utils import to_categorical
from numpy import argmax, array_equal
import matplotlib.pyplot as plt
from keras.models import Model
from imgaug import augmenters
from random import randint
import pandas as pd
import numpy as np
# Dataset Prepration
# read dataset
train = pd.read_csv("../input/fashion-mnist_train.csv")
train_x = train[list(train.columns)[1:]].values
train_y = train['label'].values
# normalize and reshape the predictors
train_x = train_x / 255
# create train and validation datasets
train_x, val_x, train_y, val_y = train_test_split(
train_x, train_y, test_size=0.2)
# reshape the inputs
train_x = train_x.reshape(-1, 784)
val_x = val_x.reshape(-1, 784)
# Create Autoencoder architecture
# input layer
input_layer = Input(shape=(784,))
# encoding architecture
encode_layer1 = Dense(1500, activation='relu')(input_layer)
encode_layer2 = Dense(1000, activation='relu')(encode_layer1)
encode_layer3 = Dense(500, activation='relu')(encode_layer2)
# latent view
latent_view = Dense(10, activation='sigmoid')(encode_layer3)
# decoding architecture
decode_layer1 = Dense(500, activation='relu')(latent_view)
decode_layer2 = Dense(1000, activation='relu')(decode_layer1)
decode_layer3 = Dense(1500, activation='relu')(decode_layer2)
# output layer
output_layer = Dense(784)(decode_layer3)
model = Model(input_layer, output_layer)
model.summary()
# early stopping callback
model.compile(optimizer='adam', loss='mse')
early_stopping = EarlyStopping(
monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='auto')
model.fit(train_x, train_x, epochs=20, batch_size=2048,
validation_data=(val_x, val_x), callbacks=[early_stopping])
preds = model.predict(val_x)
# Inputs : Actual Images
f, ax = plt.subplots(1, 5)
f.set_size_inches(80, 40)
for i in range(5):
ax[i].imshow(val_x[i].reshape(28, 28))
plt.show()
# Predicted : Autoencoder Output
f, ax = plt.subplots(1, 5)
f.set_size_inches(80, 40)
for i in range(5):
ax[i].imshow(preds[i].reshape(28, 28))
plt.show() | modeling/deep_learning/auto_encoder/image_reconstruction.py | from PIL import Image
from keras.layers import Dense, Input, Conv2D, LSTM, MaxPool2D, UpSampling2D
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping
from keras.utils import to_categorical
from numpy import argmax, array_equal
import matplotlib.pyplot as plt
from keras.models import Model
from imgaug import augmenters
from random import randint
import pandas as pd
import numpy as np
# Dataset Prepration
# read dataset
train = pd.read_csv("../input/fashion-mnist_train.csv")
train_x = train[list(train.columns)[1:]].values
train_y = train['label'].values
# normalize and reshape the predictors
train_x = train_x / 255
# create train and validation datasets
train_x, val_x, train_y, val_y = train_test_split(
train_x, train_y, test_size=0.2)
# reshape the inputs
train_x = train_x.reshape(-1, 784)
val_x = val_x.reshape(-1, 784)
# Create Autoencoder architecture
# input layer
input_layer = Input(shape=(784,))
# encoding architecture
encode_layer1 = Dense(1500, activation='relu')(input_layer)
encode_layer2 = Dense(1000, activation='relu')(encode_layer1)
encode_layer3 = Dense(500, activation='relu')(encode_layer2)
# latent view
latent_view = Dense(10, activation='sigmoid')(encode_layer3)
# decoding architecture
decode_layer1 = Dense(500, activation='relu')(latent_view)
decode_layer2 = Dense(1000, activation='relu')(decode_layer1)
decode_layer3 = Dense(1500, activation='relu')(decode_layer2)
# output layer
output_layer = Dense(784)(decode_layer3)
model = Model(input_layer, output_layer)
model.summary()
# early stopping callback
model.compile(optimizer='adam', loss='mse')
early_stopping = EarlyStopping(
monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='auto')
model.fit(train_x, train_x, epochs=20, batch_size=2048,
validation_data=(val_x, val_x), callbacks=[early_stopping])
preds = model.predict(val_x)
# Inputs : Actual Images
f, ax = plt.subplots(1, 5)
f.set_size_inches(80, 40)
for i in range(5):
ax[i].imshow(val_x[i].reshape(28, 28))
plt.show()
# Predicted : Autoencoder Output
f, ax = plt.subplots(1, 5)
f.set_size_inches(80, 40)
for i in range(5):
ax[i].imshow(preds[i].reshape(28, 28))
plt.show() | 0.867134 | 0.492859 |
from __future__ import absolute_import, division, print_function
__author__ = "<NAME>"
__license__ = """Copyright 2017-2019 <NAME> and <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from argparse import ArgumentParser
from os.path import abspath, dirname
import sys
from icecube import dataclasses, icetray, dataio # pylint: disable=unused-import
from I3Tray import I3Tray
if __name__ == "__main__" and __package__ is None:
RETRO_DIR = dirname(dirname(abspath(__file__)))
if RETRO_DIR not in sys.path:
sys.path.append(RETRO_DIR)
from retro import __version__, init_obj
from retro.reco import Reco
def main():
"""Script to run Retro recos in icetray"""
parser = ArgumentParser()
parser.add_argument(
"--input-i3-file", type=str,
required=True,
nargs="+",
help="""Input I3 file""",
)
parser.add_argument(
"--output-i3-file", type=str,
required=True,
help="""Output I3 file""",
)
split_kwargs = init_obj.parse_args(dom_tables=True, tdi_tables=True, parser=parser)
other_kw = split_kwargs.pop("other_kw")
# instantiate Retro reco object
my_reco = Reco(**split_kwargs)
tray = I3Tray()
tray.AddModule(_type="I3Reader", _name="reader", FilenameList=other_kw["input_i3_file"])
tray.Add(
_type=my_reco,
_name="retro",
methods="crs_prefit",
reco_pulse_series_name="SRTTWOfflinePulsesDC",
seeding_recos=["L5_SPEFit11", "LineFit_DC"],
triggers=["I3TriggerHierarchy"],
additional_keys=["L5_oscNext_bool"],
filter='event["header"]["L5_oscNext_bool"]',
point_estimator="median",
)
tray.AddModule(
_type="I3Writer",
_name="writer",
DropOrphanStreams=[icetray.I3Frame.DAQ],
filename=other_kw["output_i3_file"],
)
tray.AddModule(_type="TrashCan", _name="GoHomeYouReDrunk")
tray.Execute()
tray.Finish()
if __name__ == "__main__":
main() | retro/i3reco.py | from __future__ import absolute_import, division, print_function
__author__ = "<NAME>"
__license__ = """Copyright 2017-2019 <NAME> and <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from argparse import ArgumentParser
from os.path import abspath, dirname
import sys
from icecube import dataclasses, icetray, dataio # pylint: disable=unused-import
from I3Tray import I3Tray
if __name__ == "__main__" and __package__ is None:
RETRO_DIR = dirname(dirname(abspath(__file__)))
if RETRO_DIR not in sys.path:
sys.path.append(RETRO_DIR)
from retro import __version__, init_obj
from retro.reco import Reco
def main():
"""Script to run Retro recos in icetray"""
parser = ArgumentParser()
parser.add_argument(
"--input-i3-file", type=str,
required=True,
nargs="+",
help="""Input I3 file""",
)
parser.add_argument(
"--output-i3-file", type=str,
required=True,
help="""Output I3 file""",
)
split_kwargs = init_obj.parse_args(dom_tables=True, tdi_tables=True, parser=parser)
other_kw = split_kwargs.pop("other_kw")
# instantiate Retro reco object
my_reco = Reco(**split_kwargs)
tray = I3Tray()
tray.AddModule(_type="I3Reader", _name="reader", FilenameList=other_kw["input_i3_file"])
tray.Add(
_type=my_reco,
_name="retro",
methods="crs_prefit",
reco_pulse_series_name="SRTTWOfflinePulsesDC",
seeding_recos=["L5_SPEFit11", "LineFit_DC"],
triggers=["I3TriggerHierarchy"],
additional_keys=["L5_oscNext_bool"],
filter='event["header"]["L5_oscNext_bool"]',
point_estimator="median",
)
tray.AddModule(
_type="I3Writer",
_name="writer",
DropOrphanStreams=[icetray.I3Frame.DAQ],
filename=other_kw["output_i3_file"],
)
tray.AddModule(_type="TrashCan", _name="GoHomeYouReDrunk")
tray.Execute()
tray.Finish()
if __name__ == "__main__":
main() | 0.675444 | 0.103794 |
import logging
import math
import random
import re
from contextlib import contextmanager
from datetime import datetime, timedelta, timezone
from typing import Optional, Tuple
from sqlalchemy import create_engine
from sqlalchemy.orm import Session, sessionmaker
from telegram import (Bot, ChatPermissions, InlineKeyboardButton,
InlineKeyboardMarkup, User)
from telegram.ext import (CallbackQueryHandler, CommandHandler, Filters, Job,
MessageHandler, Updater)
from telegram.update import Update
from telegram.utils.request import Request
from config.base import BaseConfig
from . import messages
from .models import QuizPass, create_quizpass, get_active_quizpass, init_models
from .questions import load_questions
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s : %(name)s : %(levelname)s] %(message)s',
)
class GateBot:
"""
The main class of the bot.
"""
def __init__(self, config: BaseConfig) -> None:
self.config = config
self.logger = logging.getLogger('gatebot')
self.updater = self._init_updater()
self.db_sessionmaker = self._init_db_sessionmaker()
self.questions = load_questions(self.config.QUESTIONS_FILE)
def _init_updater(self) -> Updater:
if self.config.PROXY_URL:
request = Request(con_pool_size=8, proxy_url=self.config.PROXY_URL)
bot = Bot(self.config.BOT_TOKEN, request=request)
else:
request = Request(con_pool_size=8)
bot = Bot(self.config.BOT_TOKEN, request=request)
updater = Updater(
bot=bot,
request_kwargs={
"read_timeout": 6,
"connect_timeout": 7,
},
)
dispatcher = updater.dispatcher
dispatcher.add_handler(
MessageHandler(
Filters.status_update.new_chat_members,
self.new_chat_members))
dispatcher.add_handler(
MessageHandler(
Filters.status_update.left_chat_member,
self.left_chat_member))
dispatcher.add_handler(CallbackQueryHandler(self.callback_query))
dispatcher.add_handler(CommandHandler('start', self.command_start))
dispatcher.add_handler(CommandHandler('kick', self.command_kick))
dispatcher.add_handler(CommandHandler('kickme', self.command_kickme))
dispatcher.add_handler(CommandHandler('ban', self.command_ban))
return updater
def _init_db_sessionmaker(self) -> sessionmaker:
engine = create_engine(self.config.SQLALCHEMY_URL)
init_models(engine)
sm = sessionmaker(bind=engine)
return sm
def _escape_html(self, s: str) -> str:
return s.replace("<", "<").replace(">", ">")
def _display_user(self, id, first_name) -> str:
"""Returns an HTML link to the user with the given id and first name."""
return (
f'<a href="tg://user?id={id}">'
f'{self._escape_html(first_name)}'
'</a>')
def _log_user(self, user: User) -> str:
"""
Returns a string represention of the user to be used in logs.
"""
return f"{user.first_name} (id={user.id})"
@contextmanager
def db_session(self):
"""
Starts a DB session. Commits it after the nested code is finished, unless
it raises an exception, in which case rolls back.
"""
session = self.db_sessionmaker()
try:
yield session
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close()
def run(self) -> None:
"""
Runs the bot. This method blocks until interrupted by a signal.
"""
self.logger.info("GateBot started")
self.logger.info("Loaded questions: %s", len(self.questions))
self.updater.start_polling()
def new_chat_members(self, bot: Bot, update: Update) -> None:
"""
Handles user join event.
"""
with self.db_session() as session:
for member in update.message.new_chat_members:
self.logger.info(
"New user joined: %s", self._log_user(member))
quizpass = get_active_quizpass(session, member.id)
allowed_to_chat = quizpass and \
quizpass.is_finished and \
quizpass.has_passed
if not allowed_to_chat:
bot.restrict_chat_member(
chat_id=update.message.chat.id,
user_id=member.id,
permissions=ChatPermissions(
can_send_message=False,
can_send_media_messages=False,
can_send_other_messages=False,
can_add_web_page_previews=False,
can_send_polls=False,
can_change_info=False,
can_invite_users=False,
can_pin_messages=False,
),
)
self.updater.job_queue.run_once(
self.job_kick_if_inactive,
when=self.config.KICK_INACTIVE_AFTER,
context=member.id)
if self.config.DELETE_JOIN_MESSAGES:
bot.delete_message(
chat_id=update.message.chat.id,
message_id=update.message.message_id,
)
def left_chat_member(self, bot: Bot, update: Update) -> None:
"""
Handles user leaving event.
"""
self.logger.info(
"User left: %s", self._log_user(update.message.left_chat_member))
if self.config.DELETE_LEAVE_MESSAGES:
bot.delete_message(
chat_id=update.message.chat.id,
message_id=update.message.message_id,
)
def job_kick_if_inactive(self, bot: Bot, job: Job):
"""
A background job, executed with a delay from the python-telegram-bot's job queue.
Expects job context to a user id.
When executed, kicks the user if they haven't started the quiz yet.
"""
with self.db_session() as session:
user_id = job.context
quizpass = get_active_quizpass(session, user_id)
if not quizpass:
self.logger.info(
"User (id=%s) was kicked for not starting the quiz",
user_id)
bot.kick_chat_member(
chat_id=self.config.GROUP_ID,
user_id=user_id)
bot.unban_chat_member(
chat_id=self.config.GROUP_ID,
user_id=user_id)
def command_start(self, bot: Bot, update: Update) -> None:
"""
Handles /start command.
"""
if update.message.chat.id != update.message.from_user.id:
# Ignore commands sent not in pm
return
self.logger.info(
"/start command sent by %s",
self._log_user(update.message.from_user))
with self.db_session() as session:
if not self._on_start_quiz(
session, bot, update.message.from_user.id):
return
bot.send_message(
chat_id=update.message.chat.id,
text=messages.GETTING_STARTED.format(
questions_total=self.config.QUESTIONS_PER_QUIZ,
answers_required=self.config.CORRECT_ANSWERS_REQUIRED,
),
parse_mode="HTML",
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton(
"Start the quiz", callback_data="start_quiz"),
]]),
)
def _get_target(self, update: Update) -> Optional[Tuple[int, str]]:
"""
Returns the user targetted by the command. A user can be targetted either
by a mention in the message (/cmd @user), by reply to one of their messages or
by passing their ID after the command (/cmd 1234).
Returns the ID and the first name of the user or None if no target was found.
"""
if update.message.entities:
for entity in update.message.entities:
if entity.user:
return entity.user.id, entity.user.first_name
if update.message.reply_to_message:
user = update.message.reply_to_message.from_user
return user.id, user.first_name
try:
command, args = update.message.text.split(" ", 1)
return int(args), args
except (TypeError, ValueError):
pass
return None
def _is_admin(self, bot: Bot, user_id: int) -> bool:
"""
Returns True of the user is an admin of the group chat.
"""
chat_member = bot.get_chat_member(self.config.GROUP_ID, user_id)
return chat_member.status in ['creator', 'admin']
def command_kick(self, bot: Bot, update: Update) -> None:
"""
Handles /kick admin command.
Removes the user from the group and clears their quiz score so that
they have to pass the quiz again on rejoin.
Usage:
/kick @user_mention
By a mention.
/kick 1234
By user id.
/kick
By reply to a message.
"""
if not self._is_admin(bot, update.message.from_user.id):
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.UNAUTHORIZED,
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
return
self.logger.info(
"/kick command sent by %s",
self._log_user(update.message.from_user))
target = self._get_target(update)
if not target:
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.NO_TARGET,
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
return
target_id, target_name = target
bot.kick_chat_member(
chat_id=self.config.GROUP_ID,
user_id=target_id)
bot.unban_chat_member(
chat_id=self.config.GROUP_ID,
user_id=target_id)
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.KICKED.format(user=self._display_user(target_id, target_name)),
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(session, target_id)
if quizpass:
session.delete(quizpass)
def command_kickme(self, bot: Bot, update: Update) -> None:
"""
Handles /kickme command.
Kicks the user who ran the command the same way /kick does.
Can be executed by anybody.
"""
self.logger.info(
"/kickme command sent by %s",
self._log_user(update.message.from_user))
target = update.message.from_user
bot.kick_chat_member(
chat_id=self.config.GROUP_ID,
user_id=target.id)
bot.unban_chat_member(
chat_id=self.config.GROUP_ID,
user_id=target.id)
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.KICKED.format(user=self._display_user(target.id, target.first_name)),
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(session, target.id)
if quizpass:
session.delete(quizpass)
def command_ban(self, bot: Bot, update: Update) -> None:
"""
Handles /ban admin command.
Permanently bans the user.
Usage:
/ban @user_mention
By a mention.
/ban 1234
By user id.
/ban
By reply to a message.
"""
if not self._is_admin(bot, update.message.from_user.id):
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.UNAUTHORIZED,
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
return
self.logger.info(
"/ban command sent by %s",
self._log_user(update.message.from_user))
target = self._get_target(update)
if not target:
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.NO_TARGET,
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
return
target_id, target_name = target
bot.kick_chat_member(
chat_id=self.config.GROUP_ID,
user_id=target_id)
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.BANNED.format(user=self._display_user(target_id, target_name)),
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(session, target_id)
if quizpass:
session.delete(quizpass)
def callback_query(self, bot: Bot, update: Update) -> None:
"""
Handles callback queries from the inline buttons.
"""
answer_re = re.compile(r'^answer_(\d+)$')
answer_match = answer_re.match(update.callback_query.data)
if update.callback_query.data == "ignore":
self.callback_query_ignore(bot, update)
elif update.callback_query.data == "start_quiz":
self.callback_query_start_quiz(bot, update)
elif update.callback_query.data == "next":
self.callback_query_next(bot, update)
elif update.callback_query.data == "prev":
self.callback_query_prev(bot, update)
elif answer_match:
self.callback_query_answer(bot, update, int(answer_match.group(1)))
elif update.callback_query.data == "share_result":
self.callback_query_share_result(bot, update)
else:
self.callback_query_unknown(bot, update)
def callback_query_unknown(self, bot: Bot, update: Update) -> None:
"""
Handles invalid callback query.
"""
self.logger.info(
"Unknown callback query '%s' from %s",
update.callback_query.data,
self._log_user(update.callback_query.from_user))
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
def callback_query_ignore(self, bot: Bot, update: Update) -> None:
"""
Handles "ignore" callback query. Does not do anything.
"ignore" callback query is used on inline buttons that don't do anything.
"""
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
def callback_query_start_quiz(self, bot: Bot, update: Update) -> None:
"""
Handles "start_quiz" callback_query.
Prepares the quiz for the user and displays it or displays an existing quiz
if they have created it previously.
"""
self.logger.info(
"Callback query 'start_quiz' from %s",
self._log_user(update.callback_query.from_user))
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
with self.db_session() as session:
if not self._on_start_quiz(
session, bot, update.callback_query.from_user.id):
return
quizpass = get_active_quizpass(
session, update.callback_query.from_user.id)
if quizpass and not quizpass.is_finished:
self._display_quizpass(
bot,
update.callback_query.message.message_id,
update.callback_query.from_user.id,
quizpass,
)
return
quizpass = self._generate_quizpass(
session, update.callback_query.from_user.id)
self._display_quizpass(
bot,
update.callback_query.message.message_id,
update.callback_query.from_user.id,
quizpass,
)
def callback_query_next(self, bot: Bot, update: Update) -> None:
"""
Handles "next" callback query.
Edits the message to display the next question in the quiz.
"""
self.logger.info(
"Callback query 'next' from %s",
self._log_user(update.callback_query.from_user))
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(
session, update.callback_query.from_user.id)
if not quizpass:
return
quizpass.move_to_next()
session.commit()
self._display_quizpass(
bot,
update.callback_query.message.message_id,
update.callback_query.from_user.id,
quizpass,
)
def callback_query_prev(self, bot: Bot, update: Update) -> None:
"""
Handles "prev" callback query.
Edits the message to display the prev question in the quiz.
"""
self.logger.info(
"Callback query 'prev' from %s",
self._log_user(update.callback_query.from_user))
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(
session, update.callback_query.from_user.id)
if not quizpass:
return
quizpass.move_to_prev()
session.commit()
self._display_quizpass(
bot,
update.callback_query.message.message_id,
update.callback_query.from_user.id,
quizpass,
)
def callback_query_answer(
self, bot: Bot, update: Update, answer: int) -> None:
"""
Handles "answer_N" callback query.
Answers the currently selected question with the answer with index N.
Edits the message to display the result.
"""
self.logger.info(
"Callback query 'answer_%s' from %s",
answer,
self._log_user(update.callback_query.from_user))
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(
session, update.callback_query.from_user.id)
if not quizpass:
return
if not quizpass.current_item.is_answered:
quizpass.current_item.set_answer(answer)
session.commit()
self._display_quizpass(
bot,
update.callback_query.message.message_id,
update.callback_query.from_user.id,
quizpass,
)
if quizpass.is_finished:
if quizpass.has_passed:
bot.send_message(
chat_id=update.callback_query.from_user.id,
text=messages.PASSED.format(
result=quizpass.correct_given,
total=len(quizpass.quizitems),
),
parse_mode="HTML",
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton(
"Share the result",
callback_data="share_result",
),
]]),
)
# May fail if the user is admin
bot.restrict_chat_member(
chat_id=self.config.GROUP_ID,
user_id=update.callback_query.from_user.id,
permissions=ChatPermissions(
can_send_message=True,
can_send_media_messages=True,
can_send_other_messages=True,
can_add_web_page_previews=True,
can_send_polls=True,
can_change_info=True,
can_invite_users=True,
can_pin_messages=True,
),
)
else:
bot.send_message(
chat_id=update.callback_query.from_user.id,
text=messages.FAILED.format(
result=quizpass.correct_given,
total=len(quizpass.quizitems),
required=quizpass.correct_required,
wait_hours=self.config.WAIT_HOURS_ON_FAIL,
),
parse_mode="HTML",
)
def callback_query_share_result(self, bot: Bot, update: Update) -> None:
"""
Handles "share_result" callback query.
Sends user's score to the group chat.
"""
self.logger.info(
"Callback query 'share_result' from %s",
self._log_user(update.callback_query.from_user))
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(
session, update.callback_query.from_user.id)
if not quizpass:
return
can_share = quizpass and \
quizpass.is_finished and \
quizpass.has_passed and\
not quizpass.result_shared
if not can_share:
return
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.RESULT_SHARE.format(
user=self._display_user(
update.callback_query.from_user.id,
update.callback_query.from_user.first_name,
),
result=quizpass.correct_given,
total=len(quizpass.quizitems),
),
parse_mode="HTML",
)
quizpass.result_shared = True
session.commit()
def _generate_quizpass(self, session: Session, user_id: int) -> QuizPass:
"""
Creates a new quiz pass for the given user from randomly selected
questions.
Returns the created QuizPass object.
"""
questions = random.sample(
self.questions,
self.config.QUESTIONS_PER_QUIZ,
)
return create_quizpass(
session,
user_id,
questions,
self.config.CORRECT_ANSWERS_REQUIRED,
)
def _on_start_quiz(
self, session: Session, bot: Bot, user_id: int) -> bool:
"""
Checks if user can start/restart quiz. If they can, returns True.
If they can't, sends appropriate message to the user and returns False.
"""
quizpass = get_active_quizpass(session, user_id)
if quizpass and quizpass.is_finished:
if quizpass.has_passed:
# User has passed.
bot.send_message(
chat_id=user_id,
text=messages.PASSED.format(
result=quizpass.correct_given,
total=len(quizpass.quizitems),
),
parse_mode="HTML",
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton(
"Share the result",
callback_data="share_result",
),
]]),
)
return False
else:
now = datetime.utcnow().replace(tzinfo=timezone.utc)
# Time since last answer
time_passed = now - quizpass.last_answer_at
# Time user has to wait after fail
time_has_to_pass = timedelta(
hours=self.config.WAIT_HOURS_ON_FAIL)
# User failed and hasn't waited enough time.
if time_passed < time_has_to_pass:
wait_seconds = (time_has_to_pass - time_passed)\
.total_seconds()
wait_hours = int(math.ceil(wait_seconds / 3600))
bot.send_message(
chat_id=user_id,
text=messages.FAILED.format(
result=quizpass.correct_given,
total=len(quizpass.quizitems),
required=quizpass.correct_required,
wait_hours=wait_hours,
),
parse_mode="HTML")
return False
return True
def _display_quizpass(
self,
bot: Bot,
message_id: int,
user_id: int,
quizpass: QuizPass,
) -> None:
"""
Edits the given message to display current question in the given
quizpass.
"""
item = quizpass.current_item
text = f"{item.text}\n\n"
for option in item.options:
text += f"{option.index}. {option.text}\n"
if item.is_answered:
text += "\n"
if item.is_answered_correctly:
text += "Correct.\n"
else:
text += "Wrong.\n"
text = text.strip()
ans_buttons = []
for ix in range(len(item.options)):
ans_buttons.append(InlineKeyboardButton(
str(ix), callback_data=f"answer_{ix}",
))
nav_buttons = [
InlineKeyboardButton("<", callback_data="prev"),
InlineKeyboardButton(
f"{item.index + 1}/{self.config.QUESTIONS_PER_QUIZ}",
callback_data="ignore",
),
InlineKeyboardButton(">", callback_data="next"),
]
if item.is_answered:
keyboard = InlineKeyboardMarkup([
nav_buttons,
])
else:
keyboard = InlineKeyboardMarkup([
ans_buttons,
nav_buttons,
])
bot.edit_message_text(
chat_id=user_id,
message_id=message_id,
text=text,
parse_mode="HTML",
reply_markup=keyboard,
) | gatebot/bot.py | import logging
import math
import random
import re
from contextlib import contextmanager
from datetime import datetime, timedelta, timezone
from typing import Optional, Tuple
from sqlalchemy import create_engine
from sqlalchemy.orm import Session, sessionmaker
from telegram import (Bot, ChatPermissions, InlineKeyboardButton,
InlineKeyboardMarkup, User)
from telegram.ext import (CallbackQueryHandler, CommandHandler, Filters, Job,
MessageHandler, Updater)
from telegram.update import Update
from telegram.utils.request import Request
from config.base import BaseConfig
from . import messages
from .models import QuizPass, create_quizpass, get_active_quizpass, init_models
from .questions import load_questions
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s : %(name)s : %(levelname)s] %(message)s',
)
class GateBot:
"""
The main class of the bot.
"""
def __init__(self, config: BaseConfig) -> None:
self.config = config
self.logger = logging.getLogger('gatebot')
self.updater = self._init_updater()
self.db_sessionmaker = self._init_db_sessionmaker()
self.questions = load_questions(self.config.QUESTIONS_FILE)
def _init_updater(self) -> Updater:
if self.config.PROXY_URL:
request = Request(con_pool_size=8, proxy_url=self.config.PROXY_URL)
bot = Bot(self.config.BOT_TOKEN, request=request)
else:
request = Request(con_pool_size=8)
bot = Bot(self.config.BOT_TOKEN, request=request)
updater = Updater(
bot=bot,
request_kwargs={
"read_timeout": 6,
"connect_timeout": 7,
},
)
dispatcher = updater.dispatcher
dispatcher.add_handler(
MessageHandler(
Filters.status_update.new_chat_members,
self.new_chat_members))
dispatcher.add_handler(
MessageHandler(
Filters.status_update.left_chat_member,
self.left_chat_member))
dispatcher.add_handler(CallbackQueryHandler(self.callback_query))
dispatcher.add_handler(CommandHandler('start', self.command_start))
dispatcher.add_handler(CommandHandler('kick', self.command_kick))
dispatcher.add_handler(CommandHandler('kickme', self.command_kickme))
dispatcher.add_handler(CommandHandler('ban', self.command_ban))
return updater
def _init_db_sessionmaker(self) -> sessionmaker:
engine = create_engine(self.config.SQLALCHEMY_URL)
init_models(engine)
sm = sessionmaker(bind=engine)
return sm
def _escape_html(self, s: str) -> str:
return s.replace("<", "<").replace(">", ">")
def _display_user(self, id, first_name) -> str:
"""Returns an HTML link to the user with the given id and first name."""
return (
f'<a href="tg://user?id={id}">'
f'{self._escape_html(first_name)}'
'</a>')
def _log_user(self, user: User) -> str:
"""
Returns a string represention of the user to be used in logs.
"""
return f"{user.first_name} (id={user.id})"
@contextmanager
def db_session(self):
"""
Starts a DB session. Commits it after the nested code is finished, unless
it raises an exception, in which case rolls back.
"""
session = self.db_sessionmaker()
try:
yield session
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close()
def run(self) -> None:
"""
Runs the bot. This method blocks until interrupted by a signal.
"""
self.logger.info("GateBot started")
self.logger.info("Loaded questions: %s", len(self.questions))
self.updater.start_polling()
def new_chat_members(self, bot: Bot, update: Update) -> None:
"""
Handles user join event.
"""
with self.db_session() as session:
for member in update.message.new_chat_members:
self.logger.info(
"New user joined: %s", self._log_user(member))
quizpass = get_active_quizpass(session, member.id)
allowed_to_chat = quizpass and \
quizpass.is_finished and \
quizpass.has_passed
if not allowed_to_chat:
bot.restrict_chat_member(
chat_id=update.message.chat.id,
user_id=member.id,
permissions=ChatPermissions(
can_send_message=False,
can_send_media_messages=False,
can_send_other_messages=False,
can_add_web_page_previews=False,
can_send_polls=False,
can_change_info=False,
can_invite_users=False,
can_pin_messages=False,
),
)
self.updater.job_queue.run_once(
self.job_kick_if_inactive,
when=self.config.KICK_INACTIVE_AFTER,
context=member.id)
if self.config.DELETE_JOIN_MESSAGES:
bot.delete_message(
chat_id=update.message.chat.id,
message_id=update.message.message_id,
)
def left_chat_member(self, bot: Bot, update: Update) -> None:
"""
Handles user leaving event.
"""
self.logger.info(
"User left: %s", self._log_user(update.message.left_chat_member))
if self.config.DELETE_LEAVE_MESSAGES:
bot.delete_message(
chat_id=update.message.chat.id,
message_id=update.message.message_id,
)
def job_kick_if_inactive(self, bot: Bot, job: Job):
"""
A background job, executed with a delay from the python-telegram-bot's job queue.
Expects job context to a user id.
When executed, kicks the user if they haven't started the quiz yet.
"""
with self.db_session() as session:
user_id = job.context
quizpass = get_active_quizpass(session, user_id)
if not quizpass:
self.logger.info(
"User (id=%s) was kicked for not starting the quiz",
user_id)
bot.kick_chat_member(
chat_id=self.config.GROUP_ID,
user_id=user_id)
bot.unban_chat_member(
chat_id=self.config.GROUP_ID,
user_id=user_id)
def command_start(self, bot: Bot, update: Update) -> None:
"""
Handles /start command.
"""
if update.message.chat.id != update.message.from_user.id:
# Ignore commands sent not in pm
return
self.logger.info(
"/start command sent by %s",
self._log_user(update.message.from_user))
with self.db_session() as session:
if not self._on_start_quiz(
session, bot, update.message.from_user.id):
return
bot.send_message(
chat_id=update.message.chat.id,
text=messages.GETTING_STARTED.format(
questions_total=self.config.QUESTIONS_PER_QUIZ,
answers_required=self.config.CORRECT_ANSWERS_REQUIRED,
),
parse_mode="HTML",
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton(
"Start the quiz", callback_data="start_quiz"),
]]),
)
def _get_target(self, update: Update) -> Optional[Tuple[int, str]]:
"""
Returns the user targetted by the command. A user can be targetted either
by a mention in the message (/cmd @user), by reply to one of their messages or
by passing their ID after the command (/cmd 1234).
Returns the ID and the first name of the user or None if no target was found.
"""
if update.message.entities:
for entity in update.message.entities:
if entity.user:
return entity.user.id, entity.user.first_name
if update.message.reply_to_message:
user = update.message.reply_to_message.from_user
return user.id, user.first_name
try:
command, args = update.message.text.split(" ", 1)
return int(args), args
except (TypeError, ValueError):
pass
return None
def _is_admin(self, bot: Bot, user_id: int) -> bool:
"""
Returns True of the user is an admin of the group chat.
"""
chat_member = bot.get_chat_member(self.config.GROUP_ID, user_id)
return chat_member.status in ['creator', 'admin']
def command_kick(self, bot: Bot, update: Update) -> None:
"""
Handles /kick admin command.
Removes the user from the group and clears their quiz score so that
they have to pass the quiz again on rejoin.
Usage:
/kick @user_mention
By a mention.
/kick 1234
By user id.
/kick
By reply to a message.
"""
if not self._is_admin(bot, update.message.from_user.id):
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.UNAUTHORIZED,
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
return
self.logger.info(
"/kick command sent by %s",
self._log_user(update.message.from_user))
target = self._get_target(update)
if not target:
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.NO_TARGET,
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
return
target_id, target_name = target
bot.kick_chat_member(
chat_id=self.config.GROUP_ID,
user_id=target_id)
bot.unban_chat_member(
chat_id=self.config.GROUP_ID,
user_id=target_id)
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.KICKED.format(user=self._display_user(target_id, target_name)),
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(session, target_id)
if quizpass:
session.delete(quizpass)
def command_kickme(self, bot: Bot, update: Update) -> None:
"""
Handles /kickme command.
Kicks the user who ran the command the same way /kick does.
Can be executed by anybody.
"""
self.logger.info(
"/kickme command sent by %s",
self._log_user(update.message.from_user))
target = update.message.from_user
bot.kick_chat_member(
chat_id=self.config.GROUP_ID,
user_id=target.id)
bot.unban_chat_member(
chat_id=self.config.GROUP_ID,
user_id=target.id)
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.KICKED.format(user=self._display_user(target.id, target.first_name)),
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(session, target.id)
if quizpass:
session.delete(quizpass)
def command_ban(self, bot: Bot, update: Update) -> None:
"""
Handles /ban admin command.
Permanently bans the user.
Usage:
/ban @user_mention
By a mention.
/ban 1234
By user id.
/ban
By reply to a message.
"""
if not self._is_admin(bot, update.message.from_user.id):
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.UNAUTHORIZED,
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
return
self.logger.info(
"/ban command sent by %s",
self._log_user(update.message.from_user))
target = self._get_target(update)
if not target:
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.NO_TARGET,
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
return
target_id, target_name = target
bot.kick_chat_member(
chat_id=self.config.GROUP_ID,
user_id=target_id)
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.BANNED.format(user=self._display_user(target_id, target_name)),
parse_mode="HTML",
reply_to_message_id=update.message.message_id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(session, target_id)
if quizpass:
session.delete(quizpass)
def callback_query(self, bot: Bot, update: Update) -> None:
"""
Handles callback queries from the inline buttons.
"""
answer_re = re.compile(r'^answer_(\d+)$')
answer_match = answer_re.match(update.callback_query.data)
if update.callback_query.data == "ignore":
self.callback_query_ignore(bot, update)
elif update.callback_query.data == "start_quiz":
self.callback_query_start_quiz(bot, update)
elif update.callback_query.data == "next":
self.callback_query_next(bot, update)
elif update.callback_query.data == "prev":
self.callback_query_prev(bot, update)
elif answer_match:
self.callback_query_answer(bot, update, int(answer_match.group(1)))
elif update.callback_query.data == "share_result":
self.callback_query_share_result(bot, update)
else:
self.callback_query_unknown(bot, update)
def callback_query_unknown(self, bot: Bot, update: Update) -> None:
"""
Handles invalid callback query.
"""
self.logger.info(
"Unknown callback query '%s' from %s",
update.callback_query.data,
self._log_user(update.callback_query.from_user))
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
def callback_query_ignore(self, bot: Bot, update: Update) -> None:
"""
Handles "ignore" callback query. Does not do anything.
"ignore" callback query is used on inline buttons that don't do anything.
"""
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
def callback_query_start_quiz(self, bot: Bot, update: Update) -> None:
"""
Handles "start_quiz" callback_query.
Prepares the quiz for the user and displays it or displays an existing quiz
if they have created it previously.
"""
self.logger.info(
"Callback query 'start_quiz' from %s",
self._log_user(update.callback_query.from_user))
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
with self.db_session() as session:
if not self._on_start_quiz(
session, bot, update.callback_query.from_user.id):
return
quizpass = get_active_quizpass(
session, update.callback_query.from_user.id)
if quizpass and not quizpass.is_finished:
self._display_quizpass(
bot,
update.callback_query.message.message_id,
update.callback_query.from_user.id,
quizpass,
)
return
quizpass = self._generate_quizpass(
session, update.callback_query.from_user.id)
self._display_quizpass(
bot,
update.callback_query.message.message_id,
update.callback_query.from_user.id,
quizpass,
)
def callback_query_next(self, bot: Bot, update: Update) -> None:
"""
Handles "next" callback query.
Edits the message to display the next question in the quiz.
"""
self.logger.info(
"Callback query 'next' from %s",
self._log_user(update.callback_query.from_user))
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(
session, update.callback_query.from_user.id)
if not quizpass:
return
quizpass.move_to_next()
session.commit()
self._display_quizpass(
bot,
update.callback_query.message.message_id,
update.callback_query.from_user.id,
quizpass,
)
def callback_query_prev(self, bot: Bot, update: Update) -> None:
"""
Handles "prev" callback query.
Edits the message to display the prev question in the quiz.
"""
self.logger.info(
"Callback query 'prev' from %s",
self._log_user(update.callback_query.from_user))
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(
session, update.callback_query.from_user.id)
if not quizpass:
return
quizpass.move_to_prev()
session.commit()
self._display_quizpass(
bot,
update.callback_query.message.message_id,
update.callback_query.from_user.id,
quizpass,
)
def callback_query_answer(
self, bot: Bot, update: Update, answer: int) -> None:
"""
Handles "answer_N" callback query.
Answers the currently selected question with the answer with index N.
Edits the message to display the result.
"""
self.logger.info(
"Callback query 'answer_%s' from %s",
answer,
self._log_user(update.callback_query.from_user))
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(
session, update.callback_query.from_user.id)
if not quizpass:
return
if not quizpass.current_item.is_answered:
quizpass.current_item.set_answer(answer)
session.commit()
self._display_quizpass(
bot,
update.callback_query.message.message_id,
update.callback_query.from_user.id,
quizpass,
)
if quizpass.is_finished:
if quizpass.has_passed:
bot.send_message(
chat_id=update.callback_query.from_user.id,
text=messages.PASSED.format(
result=quizpass.correct_given,
total=len(quizpass.quizitems),
),
parse_mode="HTML",
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton(
"Share the result",
callback_data="share_result",
),
]]),
)
# May fail if the user is admin
bot.restrict_chat_member(
chat_id=self.config.GROUP_ID,
user_id=update.callback_query.from_user.id,
permissions=ChatPermissions(
can_send_message=True,
can_send_media_messages=True,
can_send_other_messages=True,
can_add_web_page_previews=True,
can_send_polls=True,
can_change_info=True,
can_invite_users=True,
can_pin_messages=True,
),
)
else:
bot.send_message(
chat_id=update.callback_query.from_user.id,
text=messages.FAILED.format(
result=quizpass.correct_given,
total=len(quizpass.quizitems),
required=quizpass.correct_required,
wait_hours=self.config.WAIT_HOURS_ON_FAIL,
),
parse_mode="HTML",
)
def callback_query_share_result(self, bot: Bot, update: Update) -> None:
"""
Handles "share_result" callback query.
Sends user's score to the group chat.
"""
self.logger.info(
"Callback query 'share_result' from %s",
self._log_user(update.callback_query.from_user))
bot.answer_callback_query(
callback_query_id=update.callback_query.id,
)
with self.db_session() as session:
quizpass = get_active_quizpass(
session, update.callback_query.from_user.id)
if not quizpass:
return
can_share = quizpass and \
quizpass.is_finished and \
quizpass.has_passed and\
not quizpass.result_shared
if not can_share:
return
bot.send_message(
chat_id=self.config.GROUP_ID,
text=messages.RESULT_SHARE.format(
user=self._display_user(
update.callback_query.from_user.id,
update.callback_query.from_user.first_name,
),
result=quizpass.correct_given,
total=len(quizpass.quizitems),
),
parse_mode="HTML",
)
quizpass.result_shared = True
session.commit()
def _generate_quizpass(self, session: Session, user_id: int) -> QuizPass:
"""
Creates a new quiz pass for the given user from randomly selected
questions.
Returns the created QuizPass object.
"""
questions = random.sample(
self.questions,
self.config.QUESTIONS_PER_QUIZ,
)
return create_quizpass(
session,
user_id,
questions,
self.config.CORRECT_ANSWERS_REQUIRED,
)
def _on_start_quiz(
self, session: Session, bot: Bot, user_id: int) -> bool:
"""
Checks if user can start/restart quiz. If they can, returns True.
If they can't, sends appropriate message to the user and returns False.
"""
quizpass = get_active_quizpass(session, user_id)
if quizpass and quizpass.is_finished:
if quizpass.has_passed:
# User has passed.
bot.send_message(
chat_id=user_id,
text=messages.PASSED.format(
result=quizpass.correct_given,
total=len(quizpass.quizitems),
),
parse_mode="HTML",
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton(
"Share the result",
callback_data="share_result",
),
]]),
)
return False
else:
now = datetime.utcnow().replace(tzinfo=timezone.utc)
# Time since last answer
time_passed = now - quizpass.last_answer_at
# Time user has to wait after fail
time_has_to_pass = timedelta(
hours=self.config.WAIT_HOURS_ON_FAIL)
# User failed and hasn't waited enough time.
if time_passed < time_has_to_pass:
wait_seconds = (time_has_to_pass - time_passed)\
.total_seconds()
wait_hours = int(math.ceil(wait_seconds / 3600))
bot.send_message(
chat_id=user_id,
text=messages.FAILED.format(
result=quizpass.correct_given,
total=len(quizpass.quizitems),
required=quizpass.correct_required,
wait_hours=wait_hours,
),
parse_mode="HTML")
return False
return True
def _display_quizpass(
self,
bot: Bot,
message_id: int,
user_id: int,
quizpass: QuizPass,
) -> None:
"""
Edits the given message to display current question in the given
quizpass.
"""
item = quizpass.current_item
text = f"{item.text}\n\n"
for option in item.options:
text += f"{option.index}. {option.text}\n"
if item.is_answered:
text += "\n"
if item.is_answered_correctly:
text += "Correct.\n"
else:
text += "Wrong.\n"
text = text.strip()
ans_buttons = []
for ix in range(len(item.options)):
ans_buttons.append(InlineKeyboardButton(
str(ix), callback_data=f"answer_{ix}",
))
nav_buttons = [
InlineKeyboardButton("<", callback_data="prev"),
InlineKeyboardButton(
f"{item.index + 1}/{self.config.QUESTIONS_PER_QUIZ}",
callback_data="ignore",
),
InlineKeyboardButton(">", callback_data="next"),
]
if item.is_answered:
keyboard = InlineKeyboardMarkup([
nav_buttons,
])
else:
keyboard = InlineKeyboardMarkup([
ans_buttons,
nav_buttons,
])
bot.edit_message_text(
chat_id=user_id,
message_id=message_id,
text=text,
parse_mode="HTML",
reply_markup=keyboard,
) | 0.727395 | 0.066569 |
from flask import Blueprint, redirect, url_for
from flask_babel import gettext
from flask_login import current_user, login_required
from werkzeug.exceptions import BadRequest
import critiquebrainz.db.review as db_review
from critiquebrainz.frontend import flash
from critiquebrainz.frontend.forms.rate import RatingEditForm
rate_bp = Blueprint('rate', __name__)
@rate_bp.route('/', methods=['POST'])
@login_required
def rate():
form = RatingEditForm()
if form.validate_on_submit():
if current_user.is_blocked:
flash.error(gettext("You are not allowed to rate any entity because your "
"account has been blocked by a moderator."))
return redirect(url_for('{}.entity'.format(form.entity_type.data), id=form.entity_id.data))
reviews, review_count = db_review.list_reviews(
entity_id=form.entity_id.data,
entity_type=form.entity_type.data,
user_id=current_user.id,
)
review = reviews[0] if review_count else None
if not review and form.rating.data is None:
raise BadRequest("Cannot create a review with no rating and no text!")
if not review and form.rating.data is not None:
db_review.create(
user_id=current_user.id,
entity_id=form.entity_id.data,
entity_type=form.entity_type.data,
rating=form.rating.data,
is_draft=False,
)
elif review and review['text'] is None and form.rating.data is None:
db_review.delete(review['id'])
elif review and review['rating'] != form.rating.data:
db_review.update(
review_id=review['id'],
drafted=review['is_draft'],
text=review['text'],
rating=form.rating.data,
)
# TODO(code-master5): Make this message specify the entity
flash.success("We have updated your rating for this entity!")
else:
flash.error("Error! Could not update the rating...")
return redirect(url_for('{}.entity'.format(form.entity_type.data), id=form.entity_id.data)) | critiquebrainz/frontend/views/rate.py |
from flask import Blueprint, redirect, url_for
from flask_babel import gettext
from flask_login import current_user, login_required
from werkzeug.exceptions import BadRequest
import critiquebrainz.db.review as db_review
from critiquebrainz.frontend import flash
from critiquebrainz.frontend.forms.rate import RatingEditForm
rate_bp = Blueprint('rate', __name__)
@rate_bp.route('/', methods=['POST'])
@login_required
def rate():
form = RatingEditForm()
if form.validate_on_submit():
if current_user.is_blocked:
flash.error(gettext("You are not allowed to rate any entity because your "
"account has been blocked by a moderator."))
return redirect(url_for('{}.entity'.format(form.entity_type.data), id=form.entity_id.data))
reviews, review_count = db_review.list_reviews(
entity_id=form.entity_id.data,
entity_type=form.entity_type.data,
user_id=current_user.id,
)
review = reviews[0] if review_count else None
if not review and form.rating.data is None:
raise BadRequest("Cannot create a review with no rating and no text!")
if not review and form.rating.data is not None:
db_review.create(
user_id=current_user.id,
entity_id=form.entity_id.data,
entity_type=form.entity_type.data,
rating=form.rating.data,
is_draft=False,
)
elif review and review['text'] is None and form.rating.data is None:
db_review.delete(review['id'])
elif review and review['rating'] != form.rating.data:
db_review.update(
review_id=review['id'],
drafted=review['is_draft'],
text=review['text'],
rating=form.rating.data,
)
# TODO(code-master5): Make this message specify the entity
flash.success("We have updated your rating for this entity!")
else:
flash.error("Error! Could not update the rating...")
return redirect(url_for('{}.entity'.format(form.entity_type.data), id=form.entity_id.data)) | 0.295535 | 0.055413 |
import contextlib
import inspect
import itertools
import functools
import warnings
from lisa.analysis.base import TraceAnalysisBase
from lisa.utils import Loggable, sig_bind
class _AnalysisPreset:
def __init__(self, instance, params):
self._instance = instance
self._params = params
def __getattr__(self, attr):
x = getattr(self._instance, attr)
try:
sig = inspect.signature(x)
except Exception:
return x
else:
extra = {
k: v
for k, v in self._params.items()
if k in sig.parameters
}
@functools.wraps(x)
def wrapper(*args, **kwargs):
kwargs = {
**extra,
**sig_bind(
sig,
args=args,
kwargs=kwargs,
include_defaults=False
)[0],
}
return x(**kwargs)
# Update the signature so it shows the effective default value
def update_default(param):
# Make it keyword-only if it does not have a default value,
# otherwise we might end up setting a parameter without a
# default after one with a default, which is unfortunately
# illegal.
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
kind = param.kind
else:
kind = param.KEYWORD_ONLY
try:
default = extra[param.name]
except KeyError:
default = param.default
return param.replace(
default=default,
kind=kind
)
wrapper.__signature__ = sig.replace(
parameters=list(
map(
update_default,
sig.parameters.values()
)
)
)
return wrapper
class AnalysisProxy(Loggable):
"""
Entry point to call analysis methods on :class:`~lisa.trace.Trace` objects.
**Example**
# Call lisa.analysis.LoadTrackingAnalysis.df_task_signal() on a trace::
df = trace.ana.load_tracking.df_task_signal(task='foo', signal='util')
The proxy can also be called like a function to define default values for
analysis methods::
ana = trace.ana(task='big_0-3')
ana.load_tracking.df_task_signal(signal='util')
# Equivalent to:
ana.load_tracking.df_task_signal(task='big_0-3', signal='util')
# The proxy can be called again to override the value given to some
# parameters, and the the value can also be overridden when calling the
# method:
ana(task='foo').df_task_signal(signal='util')
ana.df_task_signal(task='foo', signal='util')
:param trace: input Trace object
:type trace: :class:`trace.Trace`
"""
def __init__(self, trace, params=None):
self._preset_params = params or {}
self.trace = trace
# Get the list once when the proxy is built, since we know all classes
# will have had a chance to get registered at that point
self._class_map = TraceAnalysisBase.get_analysis_classes()
self._instance_map = {}
def __call__(self, **kwargs):
return self._with_params(
{
**self._preset_params,
**kwargs,
}
)
def _with_params(self, params):
return self.__class__(
trace=self.trace,
params=params,
)
@classmethod
def get_all_events(cls):
"""
Returns the set of all events used by any of the registered analysis.
"""
return set(itertools.chain.from_iterable(
cls.get_all_events()
for cls in TraceAnalysisBase.get_analysis_classes().values()
))
def __dir__(self):
"""Provide better completion support for interactive notebook usage"""
return itertools.chain(super().__dir__(), self._class_map.keys())
def __getattr__(self, attr):
# dunder name lookup would have succeeded by now, like __setstate__
if attr.startswith('__') and attr.endswith('__'):
return super().__getattribute__(attr)
logger = self.logger
# First, try to get the instance of the Analysis that was built if we
# used it already on that proxy.
try:
return self._instance_map[attr]
except KeyError:
# If that is the first use, we get the analysis class and build an
# instance of it
try:
analysis_cls = self._class_map[attr]
except KeyError:
# No analysis class matching "attr", so we log the ones that
# are available and let an AttributeError bubble up
try:
analysis_cls = super().__getattribute__(attr)
except Exception:
logger.debug(f'{attr} not found. Registered analysis:')
for name, cls in list(self._class_map.items()):
src_file = '<unknown source>'
with contextlib.suppress(TypeError):
src_file = inspect.getsourcefile(cls) or src_file
logger.debug(f'{name} ({cls}) defined in {src_file}')
raise
else:
# Allows straightforward composition of plot methods by
# ensuring that inside an analysis method, self.ana.foo.bar()
# will call bar with no extra implicit value for bar()
# parameters.
proxy = self._with_params({})
instance = analysis_cls(trace=self.trace, proxy=proxy)
preset = _AnalysisPreset(
instance=instance,
params=self._preset_params
)
self._instance_map[attr] = preset
return preset
class _DeprecatedAnalysisProxy(AnalysisProxy):
def __init__(self, trace, params=None):
params = {
# Enable the old behaviour of returning a matplotlib axis when
# matplotlib backend is in use, otherwise return holoviews
# objects (unless output='render')
'_compat_render': True,
**(params or {})
}
super().__init__(trace=trace, params=params)
def __getattr__(self, attr):
# Do not catch dunder names
if not attr.startswith('__'):
warnings.warn(
'trace.analysis is deprecated, use trace.ana instead. Note that plot method will return holoviews objects, use output="render" to render them as matplotlib figure to get legacy behaviour',
DeprecationWarning,
stacklevel=2,
)
return super().__getattr__(attr)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80 | lisa/analysis/_proxy.py | import contextlib
import inspect
import itertools
import functools
import warnings
from lisa.analysis.base import TraceAnalysisBase
from lisa.utils import Loggable, sig_bind
class _AnalysisPreset:
def __init__(self, instance, params):
self._instance = instance
self._params = params
def __getattr__(self, attr):
x = getattr(self._instance, attr)
try:
sig = inspect.signature(x)
except Exception:
return x
else:
extra = {
k: v
for k, v in self._params.items()
if k in sig.parameters
}
@functools.wraps(x)
def wrapper(*args, **kwargs):
kwargs = {
**extra,
**sig_bind(
sig,
args=args,
kwargs=kwargs,
include_defaults=False
)[0],
}
return x(**kwargs)
# Update the signature so it shows the effective default value
def update_default(param):
# Make it keyword-only if it does not have a default value,
# otherwise we might end up setting a parameter without a
# default after one with a default, which is unfortunately
# illegal.
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
kind = param.kind
else:
kind = param.KEYWORD_ONLY
try:
default = extra[param.name]
except KeyError:
default = param.default
return param.replace(
default=default,
kind=kind
)
wrapper.__signature__ = sig.replace(
parameters=list(
map(
update_default,
sig.parameters.values()
)
)
)
return wrapper
class AnalysisProxy(Loggable):
"""
Entry point to call analysis methods on :class:`~lisa.trace.Trace` objects.
**Example**
# Call lisa.analysis.LoadTrackingAnalysis.df_task_signal() on a trace::
df = trace.ana.load_tracking.df_task_signal(task='foo', signal='util')
The proxy can also be called like a function to define default values for
analysis methods::
ana = trace.ana(task='big_0-3')
ana.load_tracking.df_task_signal(signal='util')
# Equivalent to:
ana.load_tracking.df_task_signal(task='big_0-3', signal='util')
# The proxy can be called again to override the value given to some
# parameters, and the the value can also be overridden when calling the
# method:
ana(task='foo').df_task_signal(signal='util')
ana.df_task_signal(task='foo', signal='util')
:param trace: input Trace object
:type trace: :class:`trace.Trace`
"""
def __init__(self, trace, params=None):
self._preset_params = params or {}
self.trace = trace
# Get the list once when the proxy is built, since we know all classes
# will have had a chance to get registered at that point
self._class_map = TraceAnalysisBase.get_analysis_classes()
self._instance_map = {}
def __call__(self, **kwargs):
return self._with_params(
{
**self._preset_params,
**kwargs,
}
)
def _with_params(self, params):
return self.__class__(
trace=self.trace,
params=params,
)
@classmethod
def get_all_events(cls):
"""
Returns the set of all events used by any of the registered analysis.
"""
return set(itertools.chain.from_iterable(
cls.get_all_events()
for cls in TraceAnalysisBase.get_analysis_classes().values()
))
def __dir__(self):
"""Provide better completion support for interactive notebook usage"""
return itertools.chain(super().__dir__(), self._class_map.keys())
def __getattr__(self, attr):
# dunder name lookup would have succeeded by now, like __setstate__
if attr.startswith('__') and attr.endswith('__'):
return super().__getattribute__(attr)
logger = self.logger
# First, try to get the instance of the Analysis that was built if we
# used it already on that proxy.
try:
return self._instance_map[attr]
except KeyError:
# If that is the first use, we get the analysis class and build an
# instance of it
try:
analysis_cls = self._class_map[attr]
except KeyError:
# No analysis class matching "attr", so we log the ones that
# are available and let an AttributeError bubble up
try:
analysis_cls = super().__getattribute__(attr)
except Exception:
logger.debug(f'{attr} not found. Registered analysis:')
for name, cls in list(self._class_map.items()):
src_file = '<unknown source>'
with contextlib.suppress(TypeError):
src_file = inspect.getsourcefile(cls) or src_file
logger.debug(f'{name} ({cls}) defined in {src_file}')
raise
else:
# Allows straightforward composition of plot methods by
# ensuring that inside an analysis method, self.ana.foo.bar()
# will call bar with no extra implicit value for bar()
# parameters.
proxy = self._with_params({})
instance = analysis_cls(trace=self.trace, proxy=proxy)
preset = _AnalysisPreset(
instance=instance,
params=self._preset_params
)
self._instance_map[attr] = preset
return preset
class _DeprecatedAnalysisProxy(AnalysisProxy):
def __init__(self, trace, params=None):
params = {
# Enable the old behaviour of returning a matplotlib axis when
# matplotlib backend is in use, otherwise return holoviews
# objects (unless output='render')
'_compat_render': True,
**(params or {})
}
super().__init__(trace=trace, params=params)
def __getattr__(self, attr):
# Do not catch dunder names
if not attr.startswith('__'):
warnings.warn(
'trace.analysis is deprecated, use trace.ana instead. Note that plot method will return holoviews objects, use output="render" to render them as matplotlib figure to get legacy behaviour',
DeprecationWarning,
stacklevel=2,
)
return super().__getattr__(attr)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80 | 0.655777 | 0.145267 |
import numpy as np
from numpy.core.records import fromarrays
from scipy.io import savemat
from .utils import cart_to_eeglab
def export_set(fname, data, sfreq, ch_names, ch_locs=None, annotations=None,
ref_channels="common"):
"""Export continuous raw data to EEGLAB's .set format.
Parameters
----------
fname : str
Name of the export file.
data : numpy.ndarray, shape (n_epochs, n_channels, n_samples)
Data array containing epochs. Follows the same format as
MNE Epochs' data array.
sfreq : int
sample frequency of data
ch_names : list of str
Channel names.
ch_locs : numpy.ndarray, shape (n_channels, 3)
Array containing channel locations in Cartesian coordinates (x, y, z)
annotations : list, shape (3, n_annotations)
List containing three annotation subarrays:
first array (str) is description,
second array (float) is onset (starting time in seconds),
third array (float) is duration (in seconds)
This roughly follows MNE's Annotations structure.
ref_channels : list of str | str
The name(s) of the channel(s) used to construct the reference,
'average' for average reference, or 'common' (default) when there's no
specific reference set. Note that this parameter is only used to inform
EEGLAB of the existing reference, this method will not reference the
data for you.
See Also
--------
.epochs.export_set
Notes
-----
Channel locations are expanded to the full EEGLAB format.
For more details see :func:`.utils.cart_to_eeglab_sph`.
"""
data = data * 1e6 # convert to microvolts
if ch_locs is not None:
# get full EEGLAB coordinates to export
full_coords = cart_to_eeglab(ch_locs)
# convert to record arrays for MATLAB format
chanlocs = fromarrays(
[ch_names, *full_coords.T, np.repeat('', len(ch_names))],
names=["labels", "X", "Y", "Z", "sph_theta", "sph_phi",
"sph_radius", "theta", "radius",
"sph_theta_besa", "sph_phi_besa", "type"])
else:
chanlocs = fromarrays([ch_names], names=["labels"])
if isinstance(ref_channels, list):
ref_channels = " ".join(ref_channels)
eeg_d = dict(data=data, setname=fname, nbchan=data.shape[0],
pnts=float(data.shape[1]), trials=1, srate=sfreq, xmin=0.0,
xmax=float(data.shape[1] / sfreq), ref=ref_channels,
chanlocs=chanlocs, icawinv=[], icasphere=[], icaweights=[])
if annotations is not None:
events = fromarrays([annotations[0],
annotations[1] * sfreq + 1,
annotations[2] * sfreq],
names=["type", "latency", "duration"])
eeg_d['event'] = events
savemat(fname, eeg_d, appendmat=False) | eeglabio/raw.py | import numpy as np
from numpy.core.records import fromarrays
from scipy.io import savemat
from .utils import cart_to_eeglab
def export_set(fname, data, sfreq, ch_names, ch_locs=None, annotations=None,
ref_channels="common"):
"""Export continuous raw data to EEGLAB's .set format.
Parameters
----------
fname : str
Name of the export file.
data : numpy.ndarray, shape (n_epochs, n_channels, n_samples)
Data array containing epochs. Follows the same format as
MNE Epochs' data array.
sfreq : int
sample frequency of data
ch_names : list of str
Channel names.
ch_locs : numpy.ndarray, shape (n_channels, 3)
Array containing channel locations in Cartesian coordinates (x, y, z)
annotations : list, shape (3, n_annotations)
List containing three annotation subarrays:
first array (str) is description,
second array (float) is onset (starting time in seconds),
third array (float) is duration (in seconds)
This roughly follows MNE's Annotations structure.
ref_channels : list of str | str
The name(s) of the channel(s) used to construct the reference,
'average' for average reference, or 'common' (default) when there's no
specific reference set. Note that this parameter is only used to inform
EEGLAB of the existing reference, this method will not reference the
data for you.
See Also
--------
.epochs.export_set
Notes
-----
Channel locations are expanded to the full EEGLAB format.
For more details see :func:`.utils.cart_to_eeglab_sph`.
"""
data = data * 1e6 # convert to microvolts
if ch_locs is not None:
# get full EEGLAB coordinates to export
full_coords = cart_to_eeglab(ch_locs)
# convert to record arrays for MATLAB format
chanlocs = fromarrays(
[ch_names, *full_coords.T, np.repeat('', len(ch_names))],
names=["labels", "X", "Y", "Z", "sph_theta", "sph_phi",
"sph_radius", "theta", "radius",
"sph_theta_besa", "sph_phi_besa", "type"])
else:
chanlocs = fromarrays([ch_names], names=["labels"])
if isinstance(ref_channels, list):
ref_channels = " ".join(ref_channels)
eeg_d = dict(data=data, setname=fname, nbchan=data.shape[0],
pnts=float(data.shape[1]), trials=1, srate=sfreq, xmin=0.0,
xmax=float(data.shape[1] / sfreq), ref=ref_channels,
chanlocs=chanlocs, icawinv=[], icasphere=[], icaweights=[])
if annotations is not None:
events = fromarrays([annotations[0],
annotations[1] * sfreq + 1,
annotations[2] * sfreq],
names=["type", "latency", "duration"])
eeg_d['event'] = events
savemat(fname, eeg_d, appendmat=False) | 0.887449 | 0.539347 |
import datetime
import pandas as pd
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
from datamongo import BaseMongoClient
from datamongo import CendantCollection
class CountMongoCollections(BaseObject):
""" Provide a convenient way to count the total records in MongoDB collections
"""
def __init__(self,
is_debug: bool = False):
"""
Created:
4-Oct-2019
<EMAIL>
Updated:
26-Nov-2019
<EMAIL>
* add filter-by-date functionality
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
@staticmethod
def _aggregate(names: list):
return {
"src": sorted([name for name in names if "src" in name]),
"tag": sorted([name for name in names if "tag" in name]),
"xdm": sorted([name for name in names if "xdm" in name])}
@staticmethod
def _filter(filter_name: str,
names: list) -> list:
return [name for name in names if name.startswith(filter_name)]
@staticmethod
def _filter_by_date(names: list) -> list:
mydate = datetime.datetime.now() # '2019-11-26 08:13:58.660388'
tokens = str(mydate).split(' ')[0].split('-') # ['2019', '11', '26']
pattern = f"{tokens[0]}{tokens[1]}" # '201911'
return [name for name in names if pattern in name]
def _find_names(self,
base_mongo_client: BaseMongoClient) -> dict:
"""
Purpose:
Generate a dictionary object that aggregates collections by type and stage
Sample Output:
{'demand': {'src': ['demand_src_20190913',
...
'demand_src_20190909'],
'tag': ['demand_tag_20190917',
...
'demand_tag_20191003'],
'xdm': ['demand_xdm_20190927']},
'learning': {'src': ['learning_src_20190806',
...
'learning_src_20191002'],
'tag': ['learning_tag_20190806',
...
'learning_tag_20191004'],
'xdm': []},
'supply': {'src': ['supply_src_20190801',
...
'supply_src_20190814'],
'tag': ['supply_tag_20190913',
...
'supply_tag_20190817'],
'xdm': ['supply_xdm_20190917',
...
'supply_xdm_20190807']}}
:param base_mongo_client:
an instantiated mongoDB client instance
:return:
a dictionary of collections
"""
client = base_mongo_client.client
names = sorted(dict((db, [collection for collection in client[db].collection_names()])
for db in client.database_names()).values())[0]
names = self._filter_by_date(names)
d_collections = {}
for filter_name in ["supply", "demand", "learning", "feedback", "patent", "github"]:
d_collections[filter_name] = self._aggregate(names=self._filter(filter_name, names=names))
return d_collections
def _count_sizes(self,
base_mongo_client: BaseMongoClient,
d_collections: dict) -> DataFrame:
"""
Purpose:
Count Collection Sizes and Generate DataFrame of output
Sample Output:
+----+------------+---------+-----------------------+----------+
| | Category | Count | Name | Type |
|----+------------+---------+-----------------------+----------|
| 0 | src | 207169 | supply_src_20190801 | supply |
| 1 | src | 238246 | supply_src_20190814 | supply |
...s
| 40 | tag | 174660 | learning_tag_20190923 | learning |
| 41 | tag | 169517 | learning_tag_20191004 | learning |
+----+------------+---------+-----------------------+----------+
:param base_mongo_client:
an instantiated mongoDB client instance
:param d_collections:
output produced by the prior step
:return:
a DataFrame of output
"""
results = []
for collection_type in d_collections:
for collection_category in d_collections[collection_type]:
for collection_name in d_collections[collection_type][collection_category]:
total = CendantCollection(some_collection_name=collection_name,
some_base_client=base_mongo_client).count()
if self._is_debug:
self.logger.debug(f"Collection Counted "
f"(name={collection_name}, total={total})")
results.append({
"Type": collection_type,
"Category": collection_category,
"Name": collection_name,
"Count": total})
return pd.DataFrame(results)
def process(self):
base_mongo_client = BaseMongoClient(is_debug=True)
d_collections = self._find_names(base_mongo_client)
df = self._count_sizes(base_mongo_client, d_collections)
self.logger.debug('\n'.join([
"Cendant Collection Counting Completed",
tabulate(df,
headers='keys',
tablefmt='psql')]))
def main():
CountMongoCollections().process()
if __name__ == "__main__":
import plac
plac.call(main) | python/taskadmin/core/svc/count_mongo_collections.py |
import datetime
import pandas as pd
from pandas import DataFrame
from tabulate import tabulate
from base import BaseObject
from datamongo import BaseMongoClient
from datamongo import CendantCollection
class CountMongoCollections(BaseObject):
""" Provide a convenient way to count the total records in MongoDB collections
"""
def __init__(self,
is_debug: bool = False):
"""
Created:
4-Oct-2019
<EMAIL>
Updated:
26-Nov-2019
<EMAIL>
* add filter-by-date functionality
"""
BaseObject.__init__(self, __name__)
self._is_debug = is_debug
@staticmethod
def _aggregate(names: list):
return {
"src": sorted([name for name in names if "src" in name]),
"tag": sorted([name for name in names if "tag" in name]),
"xdm": sorted([name for name in names if "xdm" in name])}
@staticmethod
def _filter(filter_name: str,
names: list) -> list:
return [name for name in names if name.startswith(filter_name)]
@staticmethod
def _filter_by_date(names: list) -> list:
mydate = datetime.datetime.now() # '2019-11-26 08:13:58.660388'
tokens = str(mydate).split(' ')[0].split('-') # ['2019', '11', '26']
pattern = f"{tokens[0]}{tokens[1]}" # '201911'
return [name for name in names if pattern in name]
def _find_names(self,
base_mongo_client: BaseMongoClient) -> dict:
"""
Purpose:
Generate a dictionary object that aggregates collections by type and stage
Sample Output:
{'demand': {'src': ['demand_src_20190913',
...
'demand_src_20190909'],
'tag': ['demand_tag_20190917',
...
'demand_tag_20191003'],
'xdm': ['demand_xdm_20190927']},
'learning': {'src': ['learning_src_20190806',
...
'learning_src_20191002'],
'tag': ['learning_tag_20190806',
...
'learning_tag_20191004'],
'xdm': []},
'supply': {'src': ['supply_src_20190801',
...
'supply_src_20190814'],
'tag': ['supply_tag_20190913',
...
'supply_tag_20190817'],
'xdm': ['supply_xdm_20190917',
...
'supply_xdm_20190807']}}
:param base_mongo_client:
an instantiated mongoDB client instance
:return:
a dictionary of collections
"""
client = base_mongo_client.client
names = sorted(dict((db, [collection for collection in client[db].collection_names()])
for db in client.database_names()).values())[0]
names = self._filter_by_date(names)
d_collections = {}
for filter_name in ["supply", "demand", "learning", "feedback", "patent", "github"]:
d_collections[filter_name] = self._aggregate(names=self._filter(filter_name, names=names))
return d_collections
def _count_sizes(self,
base_mongo_client: BaseMongoClient,
d_collections: dict) -> DataFrame:
"""
Purpose:
Count Collection Sizes and Generate DataFrame of output
Sample Output:
+----+------------+---------+-----------------------+----------+
| | Category | Count | Name | Type |
|----+------------+---------+-----------------------+----------|
| 0 | src | 207169 | supply_src_20190801 | supply |
| 1 | src | 238246 | supply_src_20190814 | supply |
...s
| 40 | tag | 174660 | learning_tag_20190923 | learning |
| 41 | tag | 169517 | learning_tag_20191004 | learning |
+----+------------+---------+-----------------------+----------+
:param base_mongo_client:
an instantiated mongoDB client instance
:param d_collections:
output produced by the prior step
:return:
a DataFrame of output
"""
results = []
for collection_type in d_collections:
for collection_category in d_collections[collection_type]:
for collection_name in d_collections[collection_type][collection_category]:
total = CendantCollection(some_collection_name=collection_name,
some_base_client=base_mongo_client).count()
if self._is_debug:
self.logger.debug(f"Collection Counted "
f"(name={collection_name}, total={total})")
results.append({
"Type": collection_type,
"Category": collection_category,
"Name": collection_name,
"Count": total})
return pd.DataFrame(results)
def process(self):
base_mongo_client = BaseMongoClient(is_debug=True)
d_collections = self._find_names(base_mongo_client)
df = self._count_sizes(base_mongo_client, d_collections)
self.logger.debug('\n'.join([
"Cendant Collection Counting Completed",
tabulate(df,
headers='keys',
tablefmt='psql')]))
def main():
CountMongoCollections().process()
if __name__ == "__main__":
import plac
plac.call(main) | 0.700075 | 0.179531 |
import argparse
import csv
import logging
import sys
from src.utils.predict_entailment import PredictEntailment
class PredictMnliEntailment:
"""
Entailment task predictor for MNLI Tsv dataset
"""
def __init__(self, prediction_csv_file, outputfile, model_or_path, tokenisor_or_path,
do_lower_case=False, max_length=512, batch_size=8):
sentence_a = []
sentence_b = []
labels = []
with open(prediction_csv_file, "r") as f:
for r in csv.DictReader(f):
sentence_a.append(r["sentence1"])
sentence_b.append(r["sentence2"])
labels.append(r["label"])
label_names_in_order = sorted(list(set(labels)))
self._logger.info(f"Using label names in order {label_names_in_order}")
predictor = PredictEntailment(model_or_path, tokenisor_or_path,
do_lower_case, max_length, batch_size)
predictor(sentence_a, sentence_b, labels,
lambda a, b, c: predictor.write_to_file(outputfile, a, b, c, label_names_in_order)
)
@property
def _logger(self):
return logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_csv",
help="The csv file containing data to predict")
parser.add_argument("output_csv",
help="The output csv file")
parser.add_argument("model_path",
help="The model path")
parser.add_argument("tokenisor_path",
help="The tokenisor path")
parser.add_argument("--log-level", help="Log level", default="INFO", choices={"INFO", "WARN", "DEBUG", "ERROR"})
args = parser.parse_args()
return args
def main_run():
args = parse_args()
print(args.__dict__)
# Set up logging
logging.basicConfig(level=logging.getLevelName(args.log_level), handlers=[logging.StreamHandler(sys.stdout)],
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Runs
PredictMnliEntailment(args.input_csv,
args.src_csv_sep,
args.output_csv,
args.model_path,
args.tokenisor_path
)
if __name__ == '__main__':
main_run() | src/utils/predict_mnli_entailment.py | import argparse
import csv
import logging
import sys
from src.utils.predict_entailment import PredictEntailment
class PredictMnliEntailment:
"""
Entailment task predictor for MNLI Tsv dataset
"""
def __init__(self, prediction_csv_file, outputfile, model_or_path, tokenisor_or_path,
do_lower_case=False, max_length=512, batch_size=8):
sentence_a = []
sentence_b = []
labels = []
with open(prediction_csv_file, "r") as f:
for r in csv.DictReader(f):
sentence_a.append(r["sentence1"])
sentence_b.append(r["sentence2"])
labels.append(r["label"])
label_names_in_order = sorted(list(set(labels)))
self._logger.info(f"Using label names in order {label_names_in_order}")
predictor = PredictEntailment(model_or_path, tokenisor_or_path,
do_lower_case, max_length, batch_size)
predictor(sentence_a, sentence_b, labels,
lambda a, b, c: predictor.write_to_file(outputfile, a, b, c, label_names_in_order)
)
@property
def _logger(self):
return logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("input_csv",
help="The csv file containing data to predict")
parser.add_argument("output_csv",
help="The output csv file")
parser.add_argument("model_path",
help="The model path")
parser.add_argument("tokenisor_path",
help="The tokenisor path")
parser.add_argument("--log-level", help="Log level", default="INFO", choices={"INFO", "WARN", "DEBUG", "ERROR"})
args = parser.parse_args()
return args
def main_run():
args = parse_args()
print(args.__dict__)
# Set up logging
logging.basicConfig(level=logging.getLevelName(args.log_level), handlers=[logging.StreamHandler(sys.stdout)],
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Runs
PredictMnliEntailment(args.input_csv,
args.src_csv_sep,
args.output_csv,
args.model_path,
args.tokenisor_path
)
if __name__ == '__main__':
main_run() | 0.591251 | 0.256891 |
import asyncio
import logging
# XXX: REMOVE THIS LINE IN PRODUCTION!
logging.basicConfig(format='%(asctime)s %(lineno)d %(levelname)s:%(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Connected client records
clients = dict()
async def show_tasks():
"""FOR DEBUGGING"""
while True:
await asyncio.sleep(5)
logger.debug(asyncio.Task.all_tasks())
def client_connected_cb(client_reader, client_writer):
# Use peername as client ID
client_id = client_writer.get_extra_info('peername')
logger.info('Client connected: {}'.format(client_id))
# Define the clean up function here
def client_cleanup(fu):
logger.info('Cleaning up client {}'.format(client_id))
try: # Retrievre the result and ignore whatever returned, since it's just cleaning
fu.result()
except Exception as e:
pass
# Remove the client from client records
del clients[client_id]
task = asyncio.ensure_future(client_task(client_reader, client_writer))
task.add_done_callback(client_cleanup)
# Add the client and the task to client records
clients[client_id] = task
async def client_task(reader, writer):
client_addr = writer.get_extra_info('peername')
logger.info('Start echoing back to {}'.format(client_addr))
while True:
data = await reader.read(1024)
if data == b'':
logger.info('Received EOF. Client disconnected.')
return
else:
writer.write(data)
await writer.drain()
if __name__ == '__main__':
host = 'localhost'
port = 9009
loop = asyncio.get_event_loop()
server_coro = asyncio.start_server(client_connected_cb,
host='localhost',
port=9009,
loop=loop)
server = loop.run_until_complete(server_coro)
try:
logger.info('Serving on {}:{}'.format(host, port))
loop.run_forever()
except KeyboardInterrupt as e:
logger.info('Keyboard interrupted. Exit.')
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close() | asyncio/asyncio_socket_server.py | import asyncio
import logging
# XXX: REMOVE THIS LINE IN PRODUCTION!
logging.basicConfig(format='%(asctime)s %(lineno)d %(levelname)s:%(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Connected client records
clients = dict()
async def show_tasks():
"""FOR DEBUGGING"""
while True:
await asyncio.sleep(5)
logger.debug(asyncio.Task.all_tasks())
def client_connected_cb(client_reader, client_writer):
# Use peername as client ID
client_id = client_writer.get_extra_info('peername')
logger.info('Client connected: {}'.format(client_id))
# Define the clean up function here
def client_cleanup(fu):
logger.info('Cleaning up client {}'.format(client_id))
try: # Retrievre the result and ignore whatever returned, since it's just cleaning
fu.result()
except Exception as e:
pass
# Remove the client from client records
del clients[client_id]
task = asyncio.ensure_future(client_task(client_reader, client_writer))
task.add_done_callback(client_cleanup)
# Add the client and the task to client records
clients[client_id] = task
async def client_task(reader, writer):
client_addr = writer.get_extra_info('peername')
logger.info('Start echoing back to {}'.format(client_addr))
while True:
data = await reader.read(1024)
if data == b'':
logger.info('Received EOF. Client disconnected.')
return
else:
writer.write(data)
await writer.drain()
if __name__ == '__main__':
host = 'localhost'
port = 9009
loop = asyncio.get_event_loop()
server_coro = asyncio.start_server(client_connected_cb,
host='localhost',
port=9009,
loop=loop)
server = loop.run_until_complete(server_coro)
try:
logger.info('Serving on {}:{}'.format(host, port))
loop.run_forever()
except KeyboardInterrupt as e:
logger.info('Keyboard interrupted. Exit.')
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close() | 0.282196 | 0.067362 |
from numpy.random import randn
from numpy import *
# generate some overlapping training vectors
num_vectors=5
vec_distance=1
traindat=concatenate((randn(2,num_vectors)-vec_distance,
randn(2,num_vectors)+vec_distance), axis=1)
label_traindat=concatenate((-ones(num_vectors), ones(num_vectors)));
parameter_list = [[traindat,label_traindat]]
def evaluation_cross_validation_mkl_weight_storage(traindat=traindat, label_traindat=label_traindat):
from modshogun import CrossValidation, CrossValidationResult
from modshogun import CrossValidationPrintOutput
from modshogun import CrossValidationMKLStorage
from modshogun import ContingencyTableEvaluation, ACCURACY
from modshogun import StratifiedCrossValidationSplitting
from modshogun import BinaryLabels
from modshogun import RealFeatures, CombinedFeatures
from modshogun import GaussianKernel, CombinedKernel
from modshogun import LibSVM, MKLClassification
from modshogun import Statistics
# training data, combined features all on same data
features=RealFeatures(traindat)
comb_features=CombinedFeatures()
comb_features.append_feature_obj(features)
comb_features.append_feature_obj(features)
comb_features.append_feature_obj(features)
labels=BinaryLabels(label_traindat)
# kernel, different Gaussians combined
kernel=CombinedKernel()
kernel.append_kernel(GaussianKernel(10, 0.1))
kernel.append_kernel(GaussianKernel(10, 1))
kernel.append_kernel(GaussianKernel(10, 2))
# create mkl using libsvm, due to a mem-bug, interleaved is not possible
svm=MKLClassification(LibSVM());
svm.set_interleaved_optimization_enabled(False);
svm.set_kernel(kernel);
# splitting strategy for 5 fold cross-validation (for classification its better
# to use "StratifiedCrossValidation", but the standard
# "StratifiedCrossValidationSplitting" is also available
splitting_strategy=StratifiedCrossValidationSplitting(labels, 5)
# evaluation method
evaluation_criterium=ContingencyTableEvaluation(ACCURACY)
# cross-validation instance
cross_validation=CrossValidation(svm, comb_features, labels,
splitting_strategy, evaluation_criterium)
cross_validation.set_autolock(False)
# append cross vlaidation output classes
#cross_validation.add_cross_validation_output(CrossValidationPrintOutput())
mkl_storage=CrossValidationMKLStorage()
cross_validation.add_cross_validation_output(mkl_storage)
cross_validation.set_num_runs(3)
# perform cross-validation
result=cross_validation.evaluate()
# print mkl weights
weights=mkl_storage.get_mkl_weights()
#print "mkl weights during cross--validation"
#print weights
#print "mean per kernel"
#print Statistics.matrix_mean(weights, False)
#print "variance per kernel"
#print Statistics.matrix_variance(weights, False)
#print "std-dev per kernel"
#print Statistics.matrix_std_deviation(weights, False)
if __name__=='__main__':
print('Evaluation CrossValidationClassification')
evaluation_cross_validation_mkl_weight_storage(*parameter_list[0]) | examples/undocumented/python_modular/evaluation_cross_validation_mkl_weight_storage.py |
from numpy.random import randn
from numpy import *
# generate some overlapping training vectors
num_vectors=5
vec_distance=1
traindat=concatenate((randn(2,num_vectors)-vec_distance,
randn(2,num_vectors)+vec_distance), axis=1)
label_traindat=concatenate((-ones(num_vectors), ones(num_vectors)));
parameter_list = [[traindat,label_traindat]]
def evaluation_cross_validation_mkl_weight_storage(traindat=traindat, label_traindat=label_traindat):
from modshogun import CrossValidation, CrossValidationResult
from modshogun import CrossValidationPrintOutput
from modshogun import CrossValidationMKLStorage
from modshogun import ContingencyTableEvaluation, ACCURACY
from modshogun import StratifiedCrossValidationSplitting
from modshogun import BinaryLabels
from modshogun import RealFeatures, CombinedFeatures
from modshogun import GaussianKernel, CombinedKernel
from modshogun import LibSVM, MKLClassification
from modshogun import Statistics
# training data, combined features all on same data
features=RealFeatures(traindat)
comb_features=CombinedFeatures()
comb_features.append_feature_obj(features)
comb_features.append_feature_obj(features)
comb_features.append_feature_obj(features)
labels=BinaryLabels(label_traindat)
# kernel, different Gaussians combined
kernel=CombinedKernel()
kernel.append_kernel(GaussianKernel(10, 0.1))
kernel.append_kernel(GaussianKernel(10, 1))
kernel.append_kernel(GaussianKernel(10, 2))
# create mkl using libsvm, due to a mem-bug, interleaved is not possible
svm=MKLClassification(LibSVM());
svm.set_interleaved_optimization_enabled(False);
svm.set_kernel(kernel);
# splitting strategy for 5 fold cross-validation (for classification its better
# to use "StratifiedCrossValidation", but the standard
# "StratifiedCrossValidationSplitting" is also available
splitting_strategy=StratifiedCrossValidationSplitting(labels, 5)
# evaluation method
evaluation_criterium=ContingencyTableEvaluation(ACCURACY)
# cross-validation instance
cross_validation=CrossValidation(svm, comb_features, labels,
splitting_strategy, evaluation_criterium)
cross_validation.set_autolock(False)
# append cross vlaidation output classes
#cross_validation.add_cross_validation_output(CrossValidationPrintOutput())
mkl_storage=CrossValidationMKLStorage()
cross_validation.add_cross_validation_output(mkl_storage)
cross_validation.set_num_runs(3)
# perform cross-validation
result=cross_validation.evaluate()
# print mkl weights
weights=mkl_storage.get_mkl_weights()
#print "mkl weights during cross--validation"
#print weights
#print "mean per kernel"
#print Statistics.matrix_mean(weights, False)
#print "variance per kernel"
#print Statistics.matrix_variance(weights, False)
#print "std-dev per kernel"
#print Statistics.matrix_std_deviation(weights, False)
if __name__=='__main__':
print('Evaluation CrossValidationClassification')
evaluation_cross_validation_mkl_weight_storage(*parameter_list[0]) | 0.46952 | 0.303029 |
# # Programación funcional en Python
# Veamos a nuestros nuevos amigos. He aquí una lista con una descripción increíblemente útil. Posteriormente los veremos en acción.
#
# 1. ```lambda``` : Declarar una función anónima.
# 2. ```map``` : Mapear, se especifica primero la función y después el objeto.
# 3. ```filter``` : Filtrar para mantener elementos que cumplan con un criterio.
# 4. ```reduce``` : Aplicar una función cumulativa.
#
# In[2]:
from functools import reduce
# La función reduce no es parte del espacio de nombres por defecto, por decisión del creador de Python : [Guido](https://github.com/gvanrossum). Por eso debemos importarla del módulo de herramientas para programación funcional functools.
# In[10]:
import numpy as np
import seaborn as sns
import pandas as pd
# ## Funciones anónimas
# **Utilidad** : Digamos que queremos calcular algo rápidamente, pero no queremos guardar una función que lo haga. Tal vez es una operación que se hará sólo una vez y no queremos "ocupar ese nombre", ahí usamos una función anónima o expresión lambda.
#
# 1. **Sintaxis** :
# $$ f(x) \; = \; x $$
# In[8]:
lambda x: x
# Ahora con varios argumentos :
# $$ f(x,y,x) \; = \; x\cdot y\cdot z $$
# In[9]:
lambda x, y, z: x*y*z
# 2. **Evaluación**
#
# $$ f(x) = x^{x}\vert_{3} = 27 $$
# In[11]:
(lambda x: x**x)(3)
# Está muy bien eso de que sean anónimas pero, ¿y si yo quisiera guardar mi función?
# 3. **Asignación**
# In[13]:
cuadrado = lambda x: x**2
# In[14]:
cuadrado(3)
# 4. Funciones de orden superior
# In[16]:
aplica_función = lambda x, y: x(y)
# In[17]:
aplica_función(cuadrado, 3)
# 5. **Condicionales**
#
# Digamos que quisiésemos saber si un valor es positivo.
# In[19]:
es_positivo = lambda x: True if x > 0 else False
# In[20]:
es_positivo(3)
# In[22]:
es_positivo(-np.pi)
# ## Mapear
# Hay diversas formas de llevar a cabo la misma operación. A continuación las abordaremos, pasando por clásicos hasta la forma funcional.
#
# **Nuestra tarea :** Elevar una lista de números al cuadrado.
# ```python
# x = [1, 2, 3, 4, 5, 6, 7, 8]
# ```
# 1. La forma tradicional, no pitónica :
# In[33]:
x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
y = x.copy()
for i in range(len(x)):
y[i] = x[i] ** 2
print(x)
print(y)
# 2. Una forma más pitónica :
# In[35]:
x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
y = [ valor**2 for valor in x ]
print(x)
print(y)
# In[ ]: | Ejercicios_funcionales.py |
# # Programación funcional en Python
# Veamos a nuestros nuevos amigos. He aquí una lista con una descripción increíblemente útil. Posteriormente los veremos en acción.
#
# 1. ```lambda``` : Declarar una función anónima.
# 2. ```map``` : Mapear, se especifica primero la función y después el objeto.
# 3. ```filter``` : Filtrar para mantener elementos que cumplan con un criterio.
# 4. ```reduce``` : Aplicar una función cumulativa.
#
# In[2]:
from functools import reduce
# La función reduce no es parte del espacio de nombres por defecto, por decisión del creador de Python : [Guido](https://github.com/gvanrossum). Por eso debemos importarla del módulo de herramientas para programación funcional functools.
# In[10]:
import numpy as np
import seaborn as sns
import pandas as pd
# ## Funciones anónimas
# **Utilidad** : Digamos que queremos calcular algo rápidamente, pero no queremos guardar una función que lo haga. Tal vez es una operación que se hará sólo una vez y no queremos "ocupar ese nombre", ahí usamos una función anónima o expresión lambda.
#
# 1. **Sintaxis** :
# $$ f(x) \; = \; x $$
# In[8]:
lambda x: x
# Ahora con varios argumentos :
# $$ f(x,y,x) \; = \; x\cdot y\cdot z $$
# In[9]:
lambda x, y, z: x*y*z
# 2. **Evaluación**
#
# $$ f(x) = x^{x}\vert_{3} = 27 $$
# In[11]:
(lambda x: x**x)(3)
# Está muy bien eso de que sean anónimas pero, ¿y si yo quisiera guardar mi función?
# 3. **Asignación**
# In[13]:
cuadrado = lambda x: x**2
# In[14]:
cuadrado(3)
# 4. Funciones de orden superior
# In[16]:
aplica_función = lambda x, y: x(y)
# In[17]:
aplica_función(cuadrado, 3)
# 5. **Condicionales**
#
# Digamos que quisiésemos saber si un valor es positivo.
# In[19]:
es_positivo = lambda x: True if x > 0 else False
# In[20]:
es_positivo(3)
# In[22]:
es_positivo(-np.pi)
# ## Mapear
# Hay diversas formas de llevar a cabo la misma operación. A continuación las abordaremos, pasando por clásicos hasta la forma funcional.
#
# **Nuestra tarea :** Elevar una lista de números al cuadrado.
# ```python
# x = [1, 2, 3, 4, 5, 6, 7, 8]
# ```
# 1. La forma tradicional, no pitónica :
# In[33]:
x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
y = x.copy()
for i in range(len(x)):
y[i] = x[i] ** 2
print(x)
print(y)
# 2. Una forma más pitónica :
# In[35]:
x = np.array([1, 2, 3, 4, 5, 6, 7, 8])
y = [ valor**2 for valor in x ]
print(x)
print(y)
# In[ ]: | 0.520009 | 0.962883 |
import json
import networkx as nx
from typing import List, Set, Tuple
from nltk.corpus import stopwords
from collections import defaultdict, Counter
def tuple_contains(tup1: Tuple, tup2: Tuple) -> Tuple[bool, int]:
"""Check whether tuple 1 contains tuple 2"""
len_tup1, len_tup2 = len(tup1), len(tup2)
for i in range(0, len_tup1 + 1 - len_tup2):
if tup1[i:i + len_tup2] == tup2:
return True, i
return False, -1
def get_ngrams(words):
for i in range(len(words) + 1):
for j in range(i, len(words[i:]) + 1):
ngram = words[i:i + j]
if ngram:
yield tuple(ngram)
def get_base_forms(label_set: Set[str]):
"""Determine singular forms"""
token_set = set([l for label in label_set for l in label.split(' ')])
base_forms = dict()
for token in token_set:
if token.endswith('s') and token[:-1] in token_set:
base_forms[token] = token[:-1]
elif token.endswith('ies') and token[:-3] + 'y' in token_set:
base_forms[token] = token[:-3] + 'y'
return base_forms
def real_label_hierarchy_graph(y) -> nx.DiGraph:
print('Getting token baseforms')
label_list = list(set([l for labels in y for l in labels]))
base_forms = get_base_forms(set(label_list))
print('Lemmatizing labels')
label_set_lemmas = set()
label2lemma = dict()
for label in label_list:
lemmas = tuple([base_forms.get(w, w) for w in label.split(' ')])
label2lemma[label] = lemmas
label_set_lemmas.add(lemmas)
y_lemmas = []
stop_words = set(stopwords.words('english'))
for labels in y:
labels_lemmas = []
for label in labels:
label_words = label.split(' ')
filtered = [l for l in label_words if l not in stop_words]
if filtered:
labels_lemmas.append(label2lemma[label])
y_lemmas.append(set(labels_lemmas))
label_counts = Counter([l for labels in y_lemmas for l in labels])
print('Populating graph based on {} labels'.format(len(label_counts)))
g = nx.DiGraph()
label_lemmas = sorted(label_counts.keys(), key=len, reverse=True)
label_lemmas_by_lengths = defaultdict(list)
for label in label_lemmas: # Bucket ngrams by lengths for faster comparison
label_lemmas_by_lengths[len(label)].append(label)
sorted_lengths = sorted(label_lemmas_by_lengths.keys(), reverse=True)
proc_cnt = 0
for i, length in enumerate(sorted_lengths):
for label in label_lemmas_by_lengths[length]:
proc_cnt += 1
print(str(proc_cnt) + '\r', end='', flush=True)
for length2 in sorted_lengths[i+1:]:
for label2 in label_lemmas_by_lengths[length2]:
if tuple_contains(label, label2)[0]:
g.add_edge(label, label2)
nx.set_node_attributes(g, label_counts, 'weight')
nx.set_node_attributes(g, {l: True for l in label_counts}, 'real_label')
return g
def label_hierarchy_graph(y) -> nx.DiGraph:
print('Getting token baseforms')
label_list = list(set([l for labels in y for l in labels]))
base_forms = get_base_forms(set(label_list))
print('Lemmatizing labels and counting ngrams')
ngram_counts = Counter()
label_set_lemmas = set()
label2lemma = dict()
stop_words = set(stopwords.words('english'))
for label in label_list:
lemmas = tuple([base_forms.get(w, w) for w in label.split(' ')])
label2lemma[label] = lemmas
label_set_lemmas.add(lemmas)
for ngram in get_ngrams(lemmas):
if ngram == label:
ngram_counts[ngram] += 1
else:
# Filter ngrams that consist of stopwords only, or those that have stop words at borders
filtered = [l for l in ngram if l not in stop_words]
if filtered:
if not ngram[-1] in stop_words and not ngram[0] in stop_words:
ngram_counts[ngram] += 1
print('Found', len(ngram_counts), 'ngrams')
print('Populating graph')
g = nx.DiGraph()
ngrams = sorted(ngram_counts.keys(), key=len, reverse=True)
ngrams_by_lengths = defaultdict(list)
for ngram in ngrams: # Bucket ngrams by lengths for faster comparison
ngrams_by_lengths[len(ngram)].append(ngram)
sorted_lengths_ngrams = sorted(ngrams_by_lengths.keys(), reverse=True)
proc_cnt = 0
for i, length in enumerate(sorted_lengths_ngrams):
for ngram in ngrams_by_lengths[length]:
proc_cnt += 1
print(str(proc_cnt) + '\r', end='', flush=True)
for length2 in sorted_lengths_ngrams[i+1:]:
for ngram2 in ngrams_by_lengths[length2]:
if tuple_contains(ngram, ngram2)[0]:
g.add_edge(ngram, ngram2)
break
real_labels = {l: True if l in label_set_lemmas else False for l in ngram_counts.keys()}
nx.set_node_attributes(g, real_labels, 'real_label')
label_counts = Counter(l for labels in y for l in labels)
label_counts_lemmas = {label2lemma[l]: c for l, c in label_counts.items()}
nx.set_node_attributes(g, label_counts_lemmas, 'weight')
return g
def prune_real_graph(g: nx.DiGraph) -> nx.DiGraph:
while True:
old_edge_count, old_node_count = len(g.edges()), len(g.nodes())
# Remove edges to grandparents
del_edges = []
for node in g.nbunch_iter():
neighbors = list(g.successors(node))
for neighbor in neighbors:
# neighbor_neighbors = list(g.successors(neighbor))
neighbor_descendants = nx.descendants(g, neighbor)
shared_neighbors = [n for n in neighbor_descendants if n in neighbors]
if shared_neighbors:
# Remove edges from node to shared neighbors
for shared_neighbor in shared_neighbors:
del_edges.append((node, shared_neighbor))
g.remove_edges_from(del_edges)
if len(g.edges()) == old_edge_count and len(g.nodes()) == old_node_count:
break
return g
def prune_graph(g: nx.DiGraph) -> nx.DiGraph:
while True:
old_edge_count, old_node_count = len(g.edges()), len(g.nodes())
# Remove edges to grandparents
del_edges = []
for node in g.nbunch_iter():
neighbors = list(g.successors(node))
for neighbor in neighbors:
# neighbor_neighbors = list(g.successors(neighbor))
neighbor_descendants = nx.descendants(g, neighbor)
shared_neighbors = [n for n in neighbor_descendants if n in neighbors]
if shared_neighbors:
# Remove edges from node to shared neighbors
for shared_neighbor in shared_neighbors:
del_edges.append((node, shared_neighbor))
g.remove_edges_from(del_edges)
# Remove synthetic nodes with only one predecessor;
# link predecessor to successors directly
single_successors_synthetic_nodes = [n for n in g.nbunch_iter() if len(list(g.predecessors(n))) == 1
and not g.nodes()[n]['real_label']]
for node in single_successors_synthetic_nodes:
child = list(g.predecessors(node))[0]
parents = list(g.successors(node))
if parents:
for parent in parents:
g.add_edge(child, parent)
g.remove_node(node)
if len(g.edges()) == old_edge_count and len(g.nodes()) == old_node_count:
break
return g
def add_ancestor_support(g):
for node in g.nodes():
ancestor_support = sum([g.nodes()[anc].get('weight', 0) for anc in nx.ancestors(graph, node)])
g.nodes()[node]['ancestor support'] = ancestor_support
return g
if __name__ == '__main__':
corpus_file = 'sec_corpus_2016-2019_clean.jsonl'
print('Loading data from', corpus_file)
y: List[List[str]] = []
for line in open(corpus_file):
labeled_provision = json.loads(line)
y.append(labeled_provision['label'])
graph = real_label_hierarchy_graph(y)
graph = prune_graph(graph)
graph = add_ancestor_support(graph)
nx.write_gexf(graph, corpus_file.replace('.jsonl', '_label_hierarchy.gexf')) | labelset_hierarchy.py | import json
import networkx as nx
from typing import List, Set, Tuple
from nltk.corpus import stopwords
from collections import defaultdict, Counter
def tuple_contains(tup1: Tuple, tup2: Tuple) -> Tuple[bool, int]:
"""Check whether tuple 1 contains tuple 2"""
len_tup1, len_tup2 = len(tup1), len(tup2)
for i in range(0, len_tup1 + 1 - len_tup2):
if tup1[i:i + len_tup2] == tup2:
return True, i
return False, -1
def get_ngrams(words):
for i in range(len(words) + 1):
for j in range(i, len(words[i:]) + 1):
ngram = words[i:i + j]
if ngram:
yield tuple(ngram)
def get_base_forms(label_set: Set[str]):
"""Determine singular forms"""
token_set = set([l for label in label_set for l in label.split(' ')])
base_forms = dict()
for token in token_set:
if token.endswith('s') and token[:-1] in token_set:
base_forms[token] = token[:-1]
elif token.endswith('ies') and token[:-3] + 'y' in token_set:
base_forms[token] = token[:-3] + 'y'
return base_forms
def real_label_hierarchy_graph(y) -> nx.DiGraph:
print('Getting token baseforms')
label_list = list(set([l for labels in y for l in labels]))
base_forms = get_base_forms(set(label_list))
print('Lemmatizing labels')
label_set_lemmas = set()
label2lemma = dict()
for label in label_list:
lemmas = tuple([base_forms.get(w, w) for w in label.split(' ')])
label2lemma[label] = lemmas
label_set_lemmas.add(lemmas)
y_lemmas = []
stop_words = set(stopwords.words('english'))
for labels in y:
labels_lemmas = []
for label in labels:
label_words = label.split(' ')
filtered = [l for l in label_words if l not in stop_words]
if filtered:
labels_lemmas.append(label2lemma[label])
y_lemmas.append(set(labels_lemmas))
label_counts = Counter([l for labels in y_lemmas for l in labels])
print('Populating graph based on {} labels'.format(len(label_counts)))
g = nx.DiGraph()
label_lemmas = sorted(label_counts.keys(), key=len, reverse=True)
label_lemmas_by_lengths = defaultdict(list)
for label in label_lemmas: # Bucket ngrams by lengths for faster comparison
label_lemmas_by_lengths[len(label)].append(label)
sorted_lengths = sorted(label_lemmas_by_lengths.keys(), reverse=True)
proc_cnt = 0
for i, length in enumerate(sorted_lengths):
for label in label_lemmas_by_lengths[length]:
proc_cnt += 1
print(str(proc_cnt) + '\r', end='', flush=True)
for length2 in sorted_lengths[i+1:]:
for label2 in label_lemmas_by_lengths[length2]:
if tuple_contains(label, label2)[0]:
g.add_edge(label, label2)
nx.set_node_attributes(g, label_counts, 'weight')
nx.set_node_attributes(g, {l: True for l in label_counts}, 'real_label')
return g
def label_hierarchy_graph(y) -> nx.DiGraph:
print('Getting token baseforms')
label_list = list(set([l for labels in y for l in labels]))
base_forms = get_base_forms(set(label_list))
print('Lemmatizing labels and counting ngrams')
ngram_counts = Counter()
label_set_lemmas = set()
label2lemma = dict()
stop_words = set(stopwords.words('english'))
for label in label_list:
lemmas = tuple([base_forms.get(w, w) for w in label.split(' ')])
label2lemma[label] = lemmas
label_set_lemmas.add(lemmas)
for ngram in get_ngrams(lemmas):
if ngram == label:
ngram_counts[ngram] += 1
else:
# Filter ngrams that consist of stopwords only, or those that have stop words at borders
filtered = [l for l in ngram if l not in stop_words]
if filtered:
if not ngram[-1] in stop_words and not ngram[0] in stop_words:
ngram_counts[ngram] += 1
print('Found', len(ngram_counts), 'ngrams')
print('Populating graph')
g = nx.DiGraph()
ngrams = sorted(ngram_counts.keys(), key=len, reverse=True)
ngrams_by_lengths = defaultdict(list)
for ngram in ngrams: # Bucket ngrams by lengths for faster comparison
ngrams_by_lengths[len(ngram)].append(ngram)
sorted_lengths_ngrams = sorted(ngrams_by_lengths.keys(), reverse=True)
proc_cnt = 0
for i, length in enumerate(sorted_lengths_ngrams):
for ngram in ngrams_by_lengths[length]:
proc_cnt += 1
print(str(proc_cnt) + '\r', end='', flush=True)
for length2 in sorted_lengths_ngrams[i+1:]:
for ngram2 in ngrams_by_lengths[length2]:
if tuple_contains(ngram, ngram2)[0]:
g.add_edge(ngram, ngram2)
break
real_labels = {l: True if l in label_set_lemmas else False for l in ngram_counts.keys()}
nx.set_node_attributes(g, real_labels, 'real_label')
label_counts = Counter(l for labels in y for l in labels)
label_counts_lemmas = {label2lemma[l]: c for l, c in label_counts.items()}
nx.set_node_attributes(g, label_counts_lemmas, 'weight')
return g
def prune_real_graph(g: nx.DiGraph) -> nx.DiGraph:
while True:
old_edge_count, old_node_count = len(g.edges()), len(g.nodes())
# Remove edges to grandparents
del_edges = []
for node in g.nbunch_iter():
neighbors = list(g.successors(node))
for neighbor in neighbors:
# neighbor_neighbors = list(g.successors(neighbor))
neighbor_descendants = nx.descendants(g, neighbor)
shared_neighbors = [n for n in neighbor_descendants if n in neighbors]
if shared_neighbors:
# Remove edges from node to shared neighbors
for shared_neighbor in shared_neighbors:
del_edges.append((node, shared_neighbor))
g.remove_edges_from(del_edges)
if len(g.edges()) == old_edge_count and len(g.nodes()) == old_node_count:
break
return g
def prune_graph(g: nx.DiGraph) -> nx.DiGraph:
while True:
old_edge_count, old_node_count = len(g.edges()), len(g.nodes())
# Remove edges to grandparents
del_edges = []
for node in g.nbunch_iter():
neighbors = list(g.successors(node))
for neighbor in neighbors:
# neighbor_neighbors = list(g.successors(neighbor))
neighbor_descendants = nx.descendants(g, neighbor)
shared_neighbors = [n for n in neighbor_descendants if n in neighbors]
if shared_neighbors:
# Remove edges from node to shared neighbors
for shared_neighbor in shared_neighbors:
del_edges.append((node, shared_neighbor))
g.remove_edges_from(del_edges)
# Remove synthetic nodes with only one predecessor;
# link predecessor to successors directly
single_successors_synthetic_nodes = [n for n in g.nbunch_iter() if len(list(g.predecessors(n))) == 1
and not g.nodes()[n]['real_label']]
for node in single_successors_synthetic_nodes:
child = list(g.predecessors(node))[0]
parents = list(g.successors(node))
if parents:
for parent in parents:
g.add_edge(child, parent)
g.remove_node(node)
if len(g.edges()) == old_edge_count and len(g.nodes()) == old_node_count:
break
return g
def add_ancestor_support(g):
for node in g.nodes():
ancestor_support = sum([g.nodes()[anc].get('weight', 0) for anc in nx.ancestors(graph, node)])
g.nodes()[node]['ancestor support'] = ancestor_support
return g
if __name__ == '__main__':
corpus_file = 'sec_corpus_2016-2019_clean.jsonl'
print('Loading data from', corpus_file)
y: List[List[str]] = []
for line in open(corpus_file):
labeled_provision = json.loads(line)
y.append(labeled_provision['label'])
graph = real_label_hierarchy_graph(y)
graph = prune_graph(graph)
graph = add_ancestor_support(graph)
nx.write_gexf(graph, corpus_file.replace('.jsonl', '_label_hierarchy.gexf')) | 0.580233 | 0.343452 |
import weakref
from typing import Any, Mapping, Sequence, TypeVar, Union
from annotypes import Anno, Array
from malcolm.core import Context, Hook, Part
from .infos import LayoutInfo, PortInfo
from .util import LayoutTable
with Anno("The part that has attached to the Hook"):
APart = Part
with Anno("Context that should be used to perform operations on child blocks"):
AContext = Context
with Anno("Whether this operation is taking place at init"):
AInit = bool
T = TypeVar("T")
class ControllerHook(Hook[T]):
"""A hook that takes Part and Context for use in controllers"""
def __init__(self, part: APart, context: AContext, **kwargs: Any) -> None:
# Pass a weak reference to our children
super().__init__(part, context=weakref.proxy(context), **kwargs)
# But hold a strong reference here to stop it disappearing
self.context = context
def prepare(self) -> None:
# context might have been aborted but have nothing servicing
# the queue, we still want the legitimate messages on the
# queue so just tell it to ignore stops it got before now
self.context.ignore_stops_before_now()
def stop(self) -> None:
self.context.stop()
class InitHook(ControllerHook):
"""Called when this controller is told to start by the process"""
class ResetHook(ControllerHook):
"""Called at reset() to reset all parts to a known good state"""
class HaltHook(ControllerHook):
"""Called when this controller is told to halt"""
class DisableHook(ControllerHook):
"""Called at disable() to stop all parts updating their attributes"""
with Anno("The PortInfos for all the parts"):
APortMap = Union[Mapping[str, Array[PortInfo]]]
with Anno(
"A possibly partial set of changes to the layout table that " "should be acted on"
):
ALayoutTable = LayoutTable
with Anno("The current layout information"):
ALayoutInfos = Union[Array[LayoutInfo]]
ULayoutInfos = Union[ALayoutInfos, Sequence[LayoutInfo], LayoutInfo, None]
class LayoutHook(ControllerHook):
"""Called when layout table set and at init to update child layout"""
def __init__(
self, part: APart, context: AContext, ports: APortMap, layout: ALayoutTable
) -> None:
super().__init__(part, context, ports=ports, layout=layout)
def validate_return(self, ret: ULayoutInfos) -> ALayoutInfos:
"""Check that all returned infos are LayoutInfos"""
return ALayoutInfos(ret)
with Anno("The serialized structure to load"):
AStructure = Union[Mapping[str, Any]]
class LoadHook(ControllerHook):
"""Called at load() to load child settings from a structure"""
def __init__(
self, part: APart, context: AContext, structure: AStructure, init: AInit
) -> None:
super().__init__(part, context, structure=structure, init=init)
class SaveHook(ControllerHook):
"""Called at save() to serialize child settings into a dict structure"""
def validate_return(self, ret: AStructure) -> AStructure:
"""Check that a serialized structure is returned"""
assert isinstance(ret, dict), "Expected a structure, got %r" % (ret,)
return ret | malcolm/modules/builtin/hooks.py | import weakref
from typing import Any, Mapping, Sequence, TypeVar, Union
from annotypes import Anno, Array
from malcolm.core import Context, Hook, Part
from .infos import LayoutInfo, PortInfo
from .util import LayoutTable
with Anno("The part that has attached to the Hook"):
APart = Part
with Anno("Context that should be used to perform operations on child blocks"):
AContext = Context
with Anno("Whether this operation is taking place at init"):
AInit = bool
T = TypeVar("T")
class ControllerHook(Hook[T]):
"""A hook that takes Part and Context for use in controllers"""
def __init__(self, part: APart, context: AContext, **kwargs: Any) -> None:
# Pass a weak reference to our children
super().__init__(part, context=weakref.proxy(context), **kwargs)
# But hold a strong reference here to stop it disappearing
self.context = context
def prepare(self) -> None:
# context might have been aborted but have nothing servicing
# the queue, we still want the legitimate messages on the
# queue so just tell it to ignore stops it got before now
self.context.ignore_stops_before_now()
def stop(self) -> None:
self.context.stop()
class InitHook(ControllerHook):
"""Called when this controller is told to start by the process"""
class ResetHook(ControllerHook):
"""Called at reset() to reset all parts to a known good state"""
class HaltHook(ControllerHook):
"""Called when this controller is told to halt"""
class DisableHook(ControllerHook):
"""Called at disable() to stop all parts updating their attributes"""
with Anno("The PortInfos for all the parts"):
APortMap = Union[Mapping[str, Array[PortInfo]]]
with Anno(
"A possibly partial set of changes to the layout table that " "should be acted on"
):
ALayoutTable = LayoutTable
with Anno("The current layout information"):
ALayoutInfos = Union[Array[LayoutInfo]]
ULayoutInfos = Union[ALayoutInfos, Sequence[LayoutInfo], LayoutInfo, None]
class LayoutHook(ControllerHook):
"""Called when layout table set and at init to update child layout"""
def __init__(
self, part: APart, context: AContext, ports: APortMap, layout: ALayoutTable
) -> None:
super().__init__(part, context, ports=ports, layout=layout)
def validate_return(self, ret: ULayoutInfos) -> ALayoutInfos:
"""Check that all returned infos are LayoutInfos"""
return ALayoutInfos(ret)
with Anno("The serialized structure to load"):
AStructure = Union[Mapping[str, Any]]
class LoadHook(ControllerHook):
"""Called at load() to load child settings from a structure"""
def __init__(
self, part: APart, context: AContext, structure: AStructure, init: AInit
) -> None:
super().__init__(part, context, structure=structure, init=init)
class SaveHook(ControllerHook):
"""Called at save() to serialize child settings into a dict structure"""
def validate_return(self, ret: AStructure) -> AStructure:
"""Check that a serialized structure is returned"""
assert isinstance(ret, dict), "Expected a structure, got %r" % (ret,)
return ret | 0.880784 | 0.288958 |
from keras.layers import concatenate
from keras import optimizers
from keras.callbacks import EarlyStopping, ModelCheckpoint
import numpy as np
import json
from sanspy.utils import get_df, alias, mem_usage
from sanspy.callbacks import SaveHistoryEpochEnd
class Model(object):
"""
Model class as a wrapper for Keras. Contains all the information required for building the model, training and testing.
"""
def __init__(self, data_path=None, weights_load=None, weights_save='weights.h5', history_path='history.json',
score_path='score.json',
M_type='M1', lr=0.0001, patience=5, l2=None, skip=None, norm_in=False,
optimizer=optimizers.adam, loss='mse', merge=concatenate,
max_filters=64, activation='relu', kernel_size=3, n_in=4, n_out=3, pp=3, ww=None, sigma=0.5,
n_epochs=100, batch_size=6, queue_size=2, truncate=None, verbose=True):
"""
Model initializer
Args:
data_path: Root path of the dataset.
weights_load: Weights filename required to restart a model from some training state.
weights_save: Weigths filename to save a training state.
history_path: Training history output file.
score_path: Test score output file.
M_type: Model type. See sanspy.utils.alias. Eg: 'M3i2o_1'.
lr: Learning rate.
patience: Number of epochs of patience before early stop is triggered during training.
l2: Regularization value.
skip: Input fields to skip in the data_path. Eg: ['x/P', 'y/ww'].
norm_in: Boolean for normalizing input fields.
optimizer: Keras optimizer.
loss: Keras loss whch is selected from sanspy.losses. Eg: 'mse'.
merge: Keras merging operator.
max_filters: Number of maximum filters on the thickest layer.
activation: Keras activation function.
kernel_size: Convolutional kernel size.
n_in: Number of inputs.
n_out: Number of outputs.
pp: Last layer processing operation:
0: No processing (linear output)
1: Linear + Wake mask
2: Linear + Gaussian filter
3: Linear + Wake mask + Gaussian filter
ww: Type of wake detection layer. 'length' for wake width and 'eps' for threshhold value.
sigma: Gaussian filter layer sigma.
n_epochs: Max number of epochs for training.
batch_size: Size of the training batch.
queue_size: Generetor queue, ie how many samples are loaded in the queue as the batches are being dynamically generated.
truncate: Number of samples to use. If `None`, all samples in the dataset folders are used.
verbose: Boolean to print Keras model information and memory requirements.
"""
self.data_path = data_path
self.weights_load = weights_load
self.weights_save = weights_save
self.history_path = history_path
self.score_path = score_path
self.lr = lr
self.lr_decay = lr*0.1
self.patience = patience
self.optimizer = optimizer
self.loss = alias.get(loss, None)
self.merge = merge
self.max_filters = max_filters
self.activation = activation
self.kernel_size = kernel_size
self.n_in = n_in
self.n_out = n_out
self.pp = pp
self.ww = ww
self.sigma = sigma
self.n_epochs = n_epochs
self.batch_size = batch_size
self.queue_size = queue_size
self.truncate = truncate
self.model = None
self.history = None
self.eval_score = None
self.M_type = M_type
self.l2 = l2
self.skip = skip
self.norm_in = norm_in
model_type = alias.get(M_type, None)[0]
self.stacked_in = alias.get(M_type, None)[2]
if self.stacked_in: self.merge = None
model_params = {'merge': self.merge,
'max_filters': self.max_filters,
'activation': self.activation,
'kernel_size': self.kernel_size,
'optimizer': self.optimizer,
'loss': self.loss,
'lr': self.lr,
'lr_decay': self.lr_decay,
'l2': self.l2,
'pp': self.pp,
'ww': self.ww,
'sigma': self.sigma,}
self.model = model_type(**model_params)
if verbose:
print(self.model.summary())
print('Mem usage = {:.2} GB'.format(mem_usage(self.batch_size, self.model)))
print(M_type)
return
def fit(self):
"""
Fit model.
Returns: History object containing the training metrics at each epoch.
"""
df_fit = get_df(self.data_path, 'fit', truncate=self.truncate, skip=self.skip)
df_val = get_df(self.data_path, 'validate', truncate=self.truncate, skip=self.skip)
print(list(df_fit))
# Generators
gen = alias.get(self.M_type, None)[1]
gen_fit = gen(df_fit, n_in=self.n_in, n_out=self.n_out, stacked_in=self.stacked_in,
batch_size=self.batch_size, shuffle=True, norm_in=self.norm_in,
ww=self.ww)
gen_val = gen(df_val, n_in=self.n_in, n_out=self.n_out, stacked_in=self.stacked_in,
batch_size=self.batch_size, shuffle=False, norm_in=self.norm_in,
ww=self.ww)
# Callbacks
es = EarlyStopping(monitor='val_loss', patience=self.patience, verbose=1)
mc = ModelCheckpoint(self.weights_save+'_{epoch:03d}.h5', save_weights_only=True, verbose=0) # Save weights after every epoch
she = SaveHistoryEpochEnd(history_path=self.history_path) # Save history after every epoch
steps_per_epoch_train = np.ceil(df_fit.shape[0] / self.batch_size)
steps_per_epoch_val = np.ceil(df_val.shape[0] / self.batch_size)
if self.weights_load is not None: self.model.load_weights(self.weights_load)
self.history = self.model.fit_generator(generator=gen_fit,
steps_per_epoch=steps_per_epoch_train,
epochs=self.n_epochs,
validation_data=gen_val,
validation_steps=steps_per_epoch_val,
max_queue_size=self.queue_size,
callbacks=[es, mc, she],
verbose=1)
return self.history
def evaluate(self, dataset='evaluate', shuffle=False, seed=None):
"""
Test model.
Args:
dataset: Dataset within the data_path: 'evaluate' or 'predict'.
shuffle: Boolean to shuffle the samples order.
seed: Seed for shuffle.
Returns: A dictionary containing the test score.
"""
df_eval = get_df(self.data_path, dataset, truncate=self.truncate, skip=self.skip)
print(list(df_eval))
gen = alias.get(self.M_type, None)[1]
gen_eval = gen(df_eval, n_in=self.n_in, n_out=self.n_out, stacked_in=self.stacked_in,
batch_size=self.batch_size, shuffle=shuffle, seed=seed, norm_in=self.norm_in,
ww=self.ww)
if self.weights_load is not None:
self.model.load_weights(self.weights_load)
else:
raise ValueError('Incorrect path of weights to load.')
steps_eval = np.ceil(df_eval.shape[0] / self.batch_size)
score = self.model.evaluate_generator(generator=gen_eval,
steps=steps_eval,
max_queue_size=self.queue_size,
verbose=1)
self.eval_score = {}
print('')
for m, s in zip(self.model.metrics_names, score):
self.eval_score[m] = s
if 'loss' == m:
print('{}: {:.4e}'.format(m,s))
elif 'cc' in m:
print('{}: {:.4f}'.format(m,s))
if self.score_path is not None:
with open(self.score_path, 'w') as f:
json.dump(self.eval_score, f)
return self.eval_score
def predict(self, dataset='predict', shuffle=False, seed=None):
"""
Similar to `evaluate` but for the 'predict' dataset which returns the predictions (output fields).
Args:
dataset: Dataset within the data_path: 'evaluate' or 'predict'.
shuffle: Boolean to shuffle the samples order.
seed: Seed for shuffle.
Returns: (X, Yt, Yp), where X is the input fields, Yt the target output, and Yp the predicted output.
These are dictionaries containing keys of input or output fields: 'x1', 'y1', ...
For each key, there is an array with each dimension being: [snapshot_index, snapshot (2D)].
"""
df = get_df(self.data_path, dataset=dataset, truncate=self.truncate)
gen = alias.get(self.M_type, None)[1]
gen_predict = gen(df, n_in=self.n_in, n_out=self.n_out, stacked_in=self.stacked_in,
batch_size=self.batch_size, shuffle=shuffle, seed=seed, norm_in=self.norm_in,
ww=self.ww)
if self.weights_load is not None:
self.model.load_weights(self.weights_load)
else:
raise ValueError('Incorrect path of weights to load.')
steps_predict = np.ceil(df.shape[0] / self.batch_size)
X, Yp, Yt = {}, {}, {} # Key== x1, x2, y1, y2... val= all predictions (batched flattened and appended in list)
for out in self.model.inputs:
X[out.name.split(':')[0]] = []
for out in self.model.outputs:
Yp[out.name.split('/')[0]] = []
for out in self.model.outputs:
Yt[out.name.split('/')[0]] = []
for i, x_y in enumerate(gen_predict):
if i == steps_predict:
break
print('{:.2f}%'.format(i / steps_predict * 100))
x, y_t = x_y[0], x_y[1] # Dictionary of input batch and outputs batch from the generator
p = self.model.predict(x, batch_size=self.batch_size)
y_p = {out.name.split('/')[0]: p[i] for i, out in enumerate(self.model.outputs)} # Dictionary of predicted outputs
for m in range(self.batch_size):
for k, v in x.items():
X[k].append(x[k][m, ..., 0])
for k, v in y_t.items():
Yt[k].append(y_t[k][m, ..., 0])
for k, v in y_p.items():
Yp[k].append(y_p[k][m, ..., 0])
return X, Yt, Yp
def predict_from_x(self, x_dict):
"""
Similar to `predict` but using an input dictionary instead of a directory path.
Args:
x_dict: Input dictionary as created by a generator. Eg:
x_dict = {'x1': P[np.newaxis, ..., np.newaxis],
'x2': U[np.newaxis, ..., np.newaxis],
'x3': V[np.newaxis, ..., np.newaxis]}
Returns: Predicted output (Yp). See `predict` for output structure.
"""
if self.weights_load is not None:
self.model.load_weights(self.weights_load)
else:
raise ValueError('Incorrect path of weights to load.')
Yp = {}
p = self.model.predict(x_dict, batch_size=1)
y_p = {out.name.split('/')[0]: p[i] for i, out in enumerate(self.model.outputs)}
for k, v in y_p.items():
Yp[k] = y_p[k][0, ..., 0]
return Yp | sanspy/model.py | from keras.layers import concatenate
from keras import optimizers
from keras.callbacks import EarlyStopping, ModelCheckpoint
import numpy as np
import json
from sanspy.utils import get_df, alias, mem_usage
from sanspy.callbacks import SaveHistoryEpochEnd
class Model(object):
"""
Model class as a wrapper for Keras. Contains all the information required for building the model, training and testing.
"""
def __init__(self, data_path=None, weights_load=None, weights_save='weights.h5', history_path='history.json',
score_path='score.json',
M_type='M1', lr=0.0001, patience=5, l2=None, skip=None, norm_in=False,
optimizer=optimizers.adam, loss='mse', merge=concatenate,
max_filters=64, activation='relu', kernel_size=3, n_in=4, n_out=3, pp=3, ww=None, sigma=0.5,
n_epochs=100, batch_size=6, queue_size=2, truncate=None, verbose=True):
"""
Model initializer
Args:
data_path: Root path of the dataset.
weights_load: Weights filename required to restart a model from some training state.
weights_save: Weigths filename to save a training state.
history_path: Training history output file.
score_path: Test score output file.
M_type: Model type. See sanspy.utils.alias. Eg: 'M3i2o_1'.
lr: Learning rate.
patience: Number of epochs of patience before early stop is triggered during training.
l2: Regularization value.
skip: Input fields to skip in the data_path. Eg: ['x/P', 'y/ww'].
norm_in: Boolean for normalizing input fields.
optimizer: Keras optimizer.
loss: Keras loss whch is selected from sanspy.losses. Eg: 'mse'.
merge: Keras merging operator.
max_filters: Number of maximum filters on the thickest layer.
activation: Keras activation function.
kernel_size: Convolutional kernel size.
n_in: Number of inputs.
n_out: Number of outputs.
pp: Last layer processing operation:
0: No processing (linear output)
1: Linear + Wake mask
2: Linear + Gaussian filter
3: Linear + Wake mask + Gaussian filter
ww: Type of wake detection layer. 'length' for wake width and 'eps' for threshhold value.
sigma: Gaussian filter layer sigma.
n_epochs: Max number of epochs for training.
batch_size: Size of the training batch.
queue_size: Generetor queue, ie how many samples are loaded in the queue as the batches are being dynamically generated.
truncate: Number of samples to use. If `None`, all samples in the dataset folders are used.
verbose: Boolean to print Keras model information and memory requirements.
"""
self.data_path = data_path
self.weights_load = weights_load
self.weights_save = weights_save
self.history_path = history_path
self.score_path = score_path
self.lr = lr
self.lr_decay = lr*0.1
self.patience = patience
self.optimizer = optimizer
self.loss = alias.get(loss, None)
self.merge = merge
self.max_filters = max_filters
self.activation = activation
self.kernel_size = kernel_size
self.n_in = n_in
self.n_out = n_out
self.pp = pp
self.ww = ww
self.sigma = sigma
self.n_epochs = n_epochs
self.batch_size = batch_size
self.queue_size = queue_size
self.truncate = truncate
self.model = None
self.history = None
self.eval_score = None
self.M_type = M_type
self.l2 = l2
self.skip = skip
self.norm_in = norm_in
model_type = alias.get(M_type, None)[0]
self.stacked_in = alias.get(M_type, None)[2]
if self.stacked_in: self.merge = None
model_params = {'merge': self.merge,
'max_filters': self.max_filters,
'activation': self.activation,
'kernel_size': self.kernel_size,
'optimizer': self.optimizer,
'loss': self.loss,
'lr': self.lr,
'lr_decay': self.lr_decay,
'l2': self.l2,
'pp': self.pp,
'ww': self.ww,
'sigma': self.sigma,}
self.model = model_type(**model_params)
if verbose:
print(self.model.summary())
print('Mem usage = {:.2} GB'.format(mem_usage(self.batch_size, self.model)))
print(M_type)
return
def fit(self):
"""
Fit model.
Returns: History object containing the training metrics at each epoch.
"""
df_fit = get_df(self.data_path, 'fit', truncate=self.truncate, skip=self.skip)
df_val = get_df(self.data_path, 'validate', truncate=self.truncate, skip=self.skip)
print(list(df_fit))
# Generators
gen = alias.get(self.M_type, None)[1]
gen_fit = gen(df_fit, n_in=self.n_in, n_out=self.n_out, stacked_in=self.stacked_in,
batch_size=self.batch_size, shuffle=True, norm_in=self.norm_in,
ww=self.ww)
gen_val = gen(df_val, n_in=self.n_in, n_out=self.n_out, stacked_in=self.stacked_in,
batch_size=self.batch_size, shuffle=False, norm_in=self.norm_in,
ww=self.ww)
# Callbacks
es = EarlyStopping(monitor='val_loss', patience=self.patience, verbose=1)
mc = ModelCheckpoint(self.weights_save+'_{epoch:03d}.h5', save_weights_only=True, verbose=0) # Save weights after every epoch
she = SaveHistoryEpochEnd(history_path=self.history_path) # Save history after every epoch
steps_per_epoch_train = np.ceil(df_fit.shape[0] / self.batch_size)
steps_per_epoch_val = np.ceil(df_val.shape[0] / self.batch_size)
if self.weights_load is not None: self.model.load_weights(self.weights_load)
self.history = self.model.fit_generator(generator=gen_fit,
steps_per_epoch=steps_per_epoch_train,
epochs=self.n_epochs,
validation_data=gen_val,
validation_steps=steps_per_epoch_val,
max_queue_size=self.queue_size,
callbacks=[es, mc, she],
verbose=1)
return self.history
def evaluate(self, dataset='evaluate', shuffle=False, seed=None):
"""
Test model.
Args:
dataset: Dataset within the data_path: 'evaluate' or 'predict'.
shuffle: Boolean to shuffle the samples order.
seed: Seed for shuffle.
Returns: A dictionary containing the test score.
"""
df_eval = get_df(self.data_path, dataset, truncate=self.truncate, skip=self.skip)
print(list(df_eval))
gen = alias.get(self.M_type, None)[1]
gen_eval = gen(df_eval, n_in=self.n_in, n_out=self.n_out, stacked_in=self.stacked_in,
batch_size=self.batch_size, shuffle=shuffle, seed=seed, norm_in=self.norm_in,
ww=self.ww)
if self.weights_load is not None:
self.model.load_weights(self.weights_load)
else:
raise ValueError('Incorrect path of weights to load.')
steps_eval = np.ceil(df_eval.shape[0] / self.batch_size)
score = self.model.evaluate_generator(generator=gen_eval,
steps=steps_eval,
max_queue_size=self.queue_size,
verbose=1)
self.eval_score = {}
print('')
for m, s in zip(self.model.metrics_names, score):
self.eval_score[m] = s
if 'loss' == m:
print('{}: {:.4e}'.format(m,s))
elif 'cc' in m:
print('{}: {:.4f}'.format(m,s))
if self.score_path is not None:
with open(self.score_path, 'w') as f:
json.dump(self.eval_score, f)
return self.eval_score
def predict(self, dataset='predict', shuffle=False, seed=None):
"""
Similar to `evaluate` but for the 'predict' dataset which returns the predictions (output fields).
Args:
dataset: Dataset within the data_path: 'evaluate' or 'predict'.
shuffle: Boolean to shuffle the samples order.
seed: Seed for shuffle.
Returns: (X, Yt, Yp), where X is the input fields, Yt the target output, and Yp the predicted output.
These are dictionaries containing keys of input or output fields: 'x1', 'y1', ...
For each key, there is an array with each dimension being: [snapshot_index, snapshot (2D)].
"""
df = get_df(self.data_path, dataset=dataset, truncate=self.truncate)
gen = alias.get(self.M_type, None)[1]
gen_predict = gen(df, n_in=self.n_in, n_out=self.n_out, stacked_in=self.stacked_in,
batch_size=self.batch_size, shuffle=shuffle, seed=seed, norm_in=self.norm_in,
ww=self.ww)
if self.weights_load is not None:
self.model.load_weights(self.weights_load)
else:
raise ValueError('Incorrect path of weights to load.')
steps_predict = np.ceil(df.shape[0] / self.batch_size)
X, Yp, Yt = {}, {}, {} # Key== x1, x2, y1, y2... val= all predictions (batched flattened and appended in list)
for out in self.model.inputs:
X[out.name.split(':')[0]] = []
for out in self.model.outputs:
Yp[out.name.split('/')[0]] = []
for out in self.model.outputs:
Yt[out.name.split('/')[0]] = []
for i, x_y in enumerate(gen_predict):
if i == steps_predict:
break
print('{:.2f}%'.format(i / steps_predict * 100))
x, y_t = x_y[0], x_y[1] # Dictionary of input batch and outputs batch from the generator
p = self.model.predict(x, batch_size=self.batch_size)
y_p = {out.name.split('/')[0]: p[i] for i, out in enumerate(self.model.outputs)} # Dictionary of predicted outputs
for m in range(self.batch_size):
for k, v in x.items():
X[k].append(x[k][m, ..., 0])
for k, v in y_t.items():
Yt[k].append(y_t[k][m, ..., 0])
for k, v in y_p.items():
Yp[k].append(y_p[k][m, ..., 0])
return X, Yt, Yp
def predict_from_x(self, x_dict):
"""
Similar to `predict` but using an input dictionary instead of a directory path.
Args:
x_dict: Input dictionary as created by a generator. Eg:
x_dict = {'x1': P[np.newaxis, ..., np.newaxis],
'x2': U[np.newaxis, ..., np.newaxis],
'x3': V[np.newaxis, ..., np.newaxis]}
Returns: Predicted output (Yp). See `predict` for output structure.
"""
if self.weights_load is not None:
self.model.load_weights(self.weights_load)
else:
raise ValueError('Incorrect path of weights to load.')
Yp = {}
p = self.model.predict(x_dict, batch_size=1)
y_p = {out.name.split('/')[0]: p[i] for i, out in enumerate(self.model.outputs)}
for k, v in y_p.items():
Yp[k] = y_p[k][0, ..., 0]
return Yp | 0.879406 | 0.433682 |
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
class BayesModel:
def __init__(self, data, prior_range):
self.data = data
def pdata(data, mu):
r"""
Probability of :math:`x` given :math:`theta`, assuming :math:`\sigma = 1`.
"""
return norm.pdf(data, loc=mu).prod()
self.pdata = pdata
def priorprob(mu):
r"""
Flat prior on the mean
"""
return 1/(prior_range[1] - prior_range[0])
self.priorprob = priorprob
def samplemu(size=1):
r"""
Sample the parameter from the flat prior on the mean
"""
return np.random.uniform(prior_range[0], prior_range[1], size=size)
self.samplemu = samplemu
def metropolis(model, N):
mu = model.samplemu()
posterior = []
for _ in range(int(N)):
new_mu = model.samplemu()
old_posterior = model.pdata(data, mu)*model.priorprob(mu)
new_posterior = model.pdata(data, new_mu)*model.priorprob(new_mu)
paccept = new_posterior/old_posterior
if (np.random.uniform(0, 1) < paccept):
mu = new_mu
posterior.append(mu)
return np.array(posterior)
if __name__ == '__main__':
Ndata = 100
N = 10000
data = np.random.normal(0.0, 1.0, Ndata)
prior_range = [-2.0, 2.0]
bayes = BayesModel(data, prior_range)
posterior = metropolis(bayes, N=N)
plt.figure(figsize=(10, 5))
ax = plt.subplot(121)
ax.hist(data,
bins=max(10, int(np.sqrt(len(data)))),
color='k',
density=True,
histtype='stepfilled',
label='Data',
alpha=0.4)
ax.legend(fontsize=10,
loc='upper left',
title_fontsize=12,
title=r'$N_{\mathrm{data}} =$' + r'${}$'.format(len(data)))
ax.set_title('Data')
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$p(\mathrm{data} \sim \mathcal{N}(0, 1)$')
ax = plt.subplot(122)
ax.hist(bayes.samplemu(size=1000),
bins=100,
density=True,
histtype='stepfilled',
label='Prior',
alpha=0.3)
ax.hist(posterior,
bins=int(np.sqrt(len(posterior))),
density=True,
histtype='stepfilled',
label='Posterior',
alpha=0.3)
ax.legend(fontsize=10,
loc='upper left',
title=r'$\hat{\mu} =$' + r'${:.2f} \pm {:.2f},$'.format(np.mean(posterior), np.std(posterior)) + '\n' + r'$N = {}$'.format(len(posterior)),
title_fontsize=12)
ax.set_title('Posterior Distribution')
ax.set_xlabel(r'$\mu$')
ax.set_ylabel(r'$p(\mu | \mathrm{data} \sim \mathcal{N}(0, 1)$')
plt.savefig('bayes.png') | MCMC/bayes.py | import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
class BayesModel:
def __init__(self, data, prior_range):
self.data = data
def pdata(data, mu):
r"""
Probability of :math:`x` given :math:`theta`, assuming :math:`\sigma = 1`.
"""
return norm.pdf(data, loc=mu).prod()
self.pdata = pdata
def priorprob(mu):
r"""
Flat prior on the mean
"""
return 1/(prior_range[1] - prior_range[0])
self.priorprob = priorprob
def samplemu(size=1):
r"""
Sample the parameter from the flat prior on the mean
"""
return np.random.uniform(prior_range[0], prior_range[1], size=size)
self.samplemu = samplemu
def metropolis(model, N):
mu = model.samplemu()
posterior = []
for _ in range(int(N)):
new_mu = model.samplemu()
old_posterior = model.pdata(data, mu)*model.priorprob(mu)
new_posterior = model.pdata(data, new_mu)*model.priorprob(new_mu)
paccept = new_posterior/old_posterior
if (np.random.uniform(0, 1) < paccept):
mu = new_mu
posterior.append(mu)
return np.array(posterior)
if __name__ == '__main__':
Ndata = 100
N = 10000
data = np.random.normal(0.0, 1.0, Ndata)
prior_range = [-2.0, 2.0]
bayes = BayesModel(data, prior_range)
posterior = metropolis(bayes, N=N)
plt.figure(figsize=(10, 5))
ax = plt.subplot(121)
ax.hist(data,
bins=max(10, int(np.sqrt(len(data)))),
color='k',
density=True,
histtype='stepfilled',
label='Data',
alpha=0.4)
ax.legend(fontsize=10,
loc='upper left',
title_fontsize=12,
title=r'$N_{\mathrm{data}} =$' + r'${}$'.format(len(data)))
ax.set_title('Data')
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$p(\mathrm{data} \sim \mathcal{N}(0, 1)$')
ax = plt.subplot(122)
ax.hist(bayes.samplemu(size=1000),
bins=100,
density=True,
histtype='stepfilled',
label='Prior',
alpha=0.3)
ax.hist(posterior,
bins=int(np.sqrt(len(posterior))),
density=True,
histtype='stepfilled',
label='Posterior',
alpha=0.3)
ax.legend(fontsize=10,
loc='upper left',
title=r'$\hat{\mu} =$' + r'${:.2f} \pm {:.2f},$'.format(np.mean(posterior), np.std(posterior)) + '\n' + r'$N = {}$'.format(len(posterior)),
title_fontsize=12)
ax.set_title('Posterior Distribution')
ax.set_xlabel(r'$\mu$')
ax.set_ylabel(r'$p(\mu | \mathrm{data} \sim \mathcal{N}(0, 1)$')
plt.savefig('bayes.png') | 0.673406 | 0.714055 |
from PyQt5 import QtWidgets, uic, QtGui
from PyQt5.QtWidgets import *
from resultCal import ResultCalculator
courses = 0
def nextPage():
clear()
result.hide()
ask.hide()
table.show()
returnRowCount()
def back():
result.hide()
ask.show()
table.hide()
def returnRowCount():
global courses
courses = int(ask.no_of_courses.text())
table.tableWidget.setRowCount(courses)
course_titles = []
course_codes = []
units = []
grades = []
def getItems():
for column in range(4):
print(courses)
for row in range(courses):
value = table.tableWidget.item(row,column).text()
if column==0:
course_titles.append(value)
elif column==1:
course_codes.append(value)
elif column==2:
units.append(value)
elif column==3:
grades.append(value)
def Main():
getItems()
ask.hide()
result.show()
table.hide()
lcgpa = float(ask.last_cgpa_box.text())
lunits = ask.no_of_units.text()
rc = ResultCalculator(lunits, lcgpa, course_titles, course_codes, units, grades)
result.GPAlineEdit.setText(str(round(rc.gpa,2)))
result.CGPAlineEdit.setText(str(round(rc.confirmed_cgpa,2)))
result.UNITlineEdit.setText(str(rc.unit))
result.TSlineEdit.setText(str(int(rc.current_cgpa)))
result.TUTlineEdit.setText(str(rc.total_units))
result.COlineEdit.setText(str(len(rc.carry_over)))
del course_titles[:]
del course_codes[:]
del units[:]
del grades[:]
def clear():
del course_titles[:]
del course_codes[:]
del units[:]
del grades[:]
for column in range(4):
for row in range(courses):
table.tableWidget.setItem(row,column, QTableWidgetItem(""))
def display_back():
table.show()
ask.hide()
result.hide()
def display_cont():
ask.show()
table.hide()
result.hide()
if __name__ == '__main__':
app = QtWidgets.QApplication([])
ask = uic.loadUi('firstGpa.ui')
table = uic.loadUi('resultTable.ui')
result = uic.loadUi('display.ui')
ask.calculate_result.clicked.connect(nextPage)
table.back_btn.clicked.connect(back)
table.clear_btn.clicked.connect(clear)
table.get_result.clicked.connect(Main)
result.back_btn.clicked.connect(display_back)
result.continue_btn.clicked.connect(display_cont)
ask.show()
app.exec() | MainCal.py | from PyQt5 import QtWidgets, uic, QtGui
from PyQt5.QtWidgets import *
from resultCal import ResultCalculator
courses = 0
def nextPage():
clear()
result.hide()
ask.hide()
table.show()
returnRowCount()
def back():
result.hide()
ask.show()
table.hide()
def returnRowCount():
global courses
courses = int(ask.no_of_courses.text())
table.tableWidget.setRowCount(courses)
course_titles = []
course_codes = []
units = []
grades = []
def getItems():
for column in range(4):
print(courses)
for row in range(courses):
value = table.tableWidget.item(row,column).text()
if column==0:
course_titles.append(value)
elif column==1:
course_codes.append(value)
elif column==2:
units.append(value)
elif column==3:
grades.append(value)
def Main():
getItems()
ask.hide()
result.show()
table.hide()
lcgpa = float(ask.last_cgpa_box.text())
lunits = ask.no_of_units.text()
rc = ResultCalculator(lunits, lcgpa, course_titles, course_codes, units, grades)
result.GPAlineEdit.setText(str(round(rc.gpa,2)))
result.CGPAlineEdit.setText(str(round(rc.confirmed_cgpa,2)))
result.UNITlineEdit.setText(str(rc.unit))
result.TSlineEdit.setText(str(int(rc.current_cgpa)))
result.TUTlineEdit.setText(str(rc.total_units))
result.COlineEdit.setText(str(len(rc.carry_over)))
del course_titles[:]
del course_codes[:]
del units[:]
del grades[:]
def clear():
del course_titles[:]
del course_codes[:]
del units[:]
del grades[:]
for column in range(4):
for row in range(courses):
table.tableWidget.setItem(row,column, QTableWidgetItem(""))
def display_back():
table.show()
ask.hide()
result.hide()
def display_cont():
ask.show()
table.hide()
result.hide()
if __name__ == '__main__':
app = QtWidgets.QApplication([])
ask = uic.loadUi('firstGpa.ui')
table = uic.loadUi('resultTable.ui')
result = uic.loadUi('display.ui')
ask.calculate_result.clicked.connect(nextPage)
table.back_btn.clicked.connect(back)
table.clear_btn.clicked.connect(clear)
table.get_result.clicked.connect(Main)
result.back_btn.clicked.connect(display_back)
result.continue_btn.clicked.connect(display_cont)
ask.show()
app.exec() | 0.319971 | 0.262738 |
# This file is included by CMakeLists.txt.
#[[
import importlib
import glob
import os
import subprocess
import sys
from test_case import TestCase
class Generator(object):
def __init__(self, dirname, filename, fail=False):
self.dirname = dirname
self.category = dirname.replace('/', '_')
self.filename = filename
self.fail = fail
TESTS = [
Generator('model', 'MLP'),
Generator('model', 'Alex'),
Generator('model', 'Resnet_with_loss'),
Generator('node', 'AddMul'),
Generator('node', 'AveragePool2d'),
Generator('node', 'BatchNorm'),
Generator('node', 'Convolution2D'),
Generator('node', 'Id'),
Generator('node', 'Linear'),
Generator('node', 'PadSequence'),
Generator('node', 'Relu'),
Generator('node', 'Softmax'),
Generator('node', 'SoftmaxCrossEntropy'),
Generator('node', 'Unpooling2D'),
Generator('node', 'Variable'),
Generator('node', 'ChainList'),
Generator('node', 'LRN'),
Generator('node/ndarray', 'NpArray'),
Generator('node/ndarray', 'NpFull'),
Generator('node/ndarray', 'NpZeros'),
Generator('node/ndarray', 'Size'),
Generator('node/ndarray', 'Shape'),
Generator('node/ndarray', 'Ceil'),
Generator('node/ndarray', 'Cumsum'),
Generator('node/Functions', 'Reshape'),
Generator('node/Functions', 'SplitAxis'),
Generator('node/Functions', 'Roi'),
Generator('node/Functions', 'SwapAxes'),
Generator('node/Functions', 'Concat'),
Generator('node/Functions', 'Dropout'),
Generator('node/Functions', 'Matmul'),
Generator('node/Functions', 'MaxPool2d'),
Generator('node/Functions', 'ResizeImages'),
Generator('node/Functions', 'Stack'),
Generator('node/Functions', 'Vstack'),
Generator('node/Functions', 'Hstack'),
Generator('node/Functions', 'Squeeze'),
Generator('node/Functions', 'Separate'),
Generator('node/Functions', 'Mean'),
Generator('syntax', 'Alias'),
Generator('syntax', 'Cmp'),
Generator('syntax', 'For'),
Generator('syntax', 'ForAndIf'),
Generator('syntax', 'If'),
Generator('syntax', 'LinkInFor'),
Generator('syntax', 'ListComp'),
Generator('syntax', 'MultiClass'),
Generator('syntax', 'MultiFunction'),
Generator('syntax', 'Range'),
Generator('syntax', 'Sequence'),
Generator('syntax', 'Slice'),
Generator('syntax', 'UserDefinedFunc'),
Generator('syntax', 'Tuple'),
]
def get_test_generators(dirname):
return [test for test in TESTS if test.dirname == dirname]
def print_test_generators(dirname):
tests = []
for gen in get_test_generators(dirname):
tests.append(
os.path.join('elichika/tests', gen.dirname, gen.filename + '.py'))
print(';'.join(tests))
def get_source_dir():
return os.path.dirname(os.path.dirname(sys.argv[0]))
def generate_tests(dirname):
from testtools import testcasegen
for gen in get_test_generators(dirname):
py = os.path.join('tests', gen.dirname, gen.filename)
out_dir = os.path.join(get_source_dir(), 'out', 'elichika_%s_%s' %
(gen.category, gen.filename))
print('Running %s' % py)
module = importlib.import_module(py.replace('/', '.'))
testcasegen.reset_test_generator([out_dir])
module.main()
def get():
tests = []
diversed_whitelist = [
'node_Linear'
]
for gen in TESTS:
category = gen.category
name = gen.filename
test_name = 'elichika_%s_%s' % (category, name)
kwargs = {}
if gen.fail:
kwargs['fail'] = True
diversed = False
for substr in diversed_whitelist:
if substr in test_name:
diversed = True
break
test_dirs = glob.glob('out/%s' % test_name)
test_dirs += glob.glob('out/%s_*' % test_name)
assert test_dirs, 'No tests found for %s' % test_name
for d in test_dirs:
name = os.path.basename(d)
test_dir = os.path.join('out', name)
tests.append(TestCase(name=name, test_dir=test_dir, **kwargs))
if diversed:
tests.append(TestCase(name=name + '_diversed',
test_dir=test_dir,
backend='xcvm_test',
**kwargs))
return tests
if __name__ == '__main__':
if sys.argv[1] == '--list':
print_test_generators(sys.argv[2])
elif sys.argv[1] == '--generate':
generate_tests(sys.argv[2])
else:
raise RuntimeError('See %s for the usage' % sys.argv[0])
#]] | scripts/elichika_tests.py |
# This file is included by CMakeLists.txt.
#[[
import importlib
import glob
import os
import subprocess
import sys
from test_case import TestCase
class Generator(object):
def __init__(self, dirname, filename, fail=False):
self.dirname = dirname
self.category = dirname.replace('/', '_')
self.filename = filename
self.fail = fail
TESTS = [
Generator('model', 'MLP'),
Generator('model', 'Alex'),
Generator('model', 'Resnet_with_loss'),
Generator('node', 'AddMul'),
Generator('node', 'AveragePool2d'),
Generator('node', 'BatchNorm'),
Generator('node', 'Convolution2D'),
Generator('node', 'Id'),
Generator('node', 'Linear'),
Generator('node', 'PadSequence'),
Generator('node', 'Relu'),
Generator('node', 'Softmax'),
Generator('node', 'SoftmaxCrossEntropy'),
Generator('node', 'Unpooling2D'),
Generator('node', 'Variable'),
Generator('node', 'ChainList'),
Generator('node', 'LRN'),
Generator('node/ndarray', 'NpArray'),
Generator('node/ndarray', 'NpFull'),
Generator('node/ndarray', 'NpZeros'),
Generator('node/ndarray', 'Size'),
Generator('node/ndarray', 'Shape'),
Generator('node/ndarray', 'Ceil'),
Generator('node/ndarray', 'Cumsum'),
Generator('node/Functions', 'Reshape'),
Generator('node/Functions', 'SplitAxis'),
Generator('node/Functions', 'Roi'),
Generator('node/Functions', 'SwapAxes'),
Generator('node/Functions', 'Concat'),
Generator('node/Functions', 'Dropout'),
Generator('node/Functions', 'Matmul'),
Generator('node/Functions', 'MaxPool2d'),
Generator('node/Functions', 'ResizeImages'),
Generator('node/Functions', 'Stack'),
Generator('node/Functions', 'Vstack'),
Generator('node/Functions', 'Hstack'),
Generator('node/Functions', 'Squeeze'),
Generator('node/Functions', 'Separate'),
Generator('node/Functions', 'Mean'),
Generator('syntax', 'Alias'),
Generator('syntax', 'Cmp'),
Generator('syntax', 'For'),
Generator('syntax', 'ForAndIf'),
Generator('syntax', 'If'),
Generator('syntax', 'LinkInFor'),
Generator('syntax', 'ListComp'),
Generator('syntax', 'MultiClass'),
Generator('syntax', 'MultiFunction'),
Generator('syntax', 'Range'),
Generator('syntax', 'Sequence'),
Generator('syntax', 'Slice'),
Generator('syntax', 'UserDefinedFunc'),
Generator('syntax', 'Tuple'),
]
def get_test_generators(dirname):
return [test for test in TESTS if test.dirname == dirname]
def print_test_generators(dirname):
tests = []
for gen in get_test_generators(dirname):
tests.append(
os.path.join('elichika/tests', gen.dirname, gen.filename + '.py'))
print(';'.join(tests))
def get_source_dir():
return os.path.dirname(os.path.dirname(sys.argv[0]))
def generate_tests(dirname):
from testtools import testcasegen
for gen in get_test_generators(dirname):
py = os.path.join('tests', gen.dirname, gen.filename)
out_dir = os.path.join(get_source_dir(), 'out', 'elichika_%s_%s' %
(gen.category, gen.filename))
print('Running %s' % py)
module = importlib.import_module(py.replace('/', '.'))
testcasegen.reset_test_generator([out_dir])
module.main()
def get():
tests = []
diversed_whitelist = [
'node_Linear'
]
for gen in TESTS:
category = gen.category
name = gen.filename
test_name = 'elichika_%s_%s' % (category, name)
kwargs = {}
if gen.fail:
kwargs['fail'] = True
diversed = False
for substr in diversed_whitelist:
if substr in test_name:
diversed = True
break
test_dirs = glob.glob('out/%s' % test_name)
test_dirs += glob.glob('out/%s_*' % test_name)
assert test_dirs, 'No tests found for %s' % test_name
for d in test_dirs:
name = os.path.basename(d)
test_dir = os.path.join('out', name)
tests.append(TestCase(name=name, test_dir=test_dir, **kwargs))
if diversed:
tests.append(TestCase(name=name + '_diversed',
test_dir=test_dir,
backend='xcvm_test',
**kwargs))
return tests
if __name__ == '__main__':
if sys.argv[1] == '--list':
print_test_generators(sys.argv[2])
elif sys.argv[1] == '--generate':
generate_tests(sys.argv[2])
else:
raise RuntimeError('See %s for the usage' % sys.argv[0])
#]] | 0.36693 | 0.160102 |
"""Test suite for demoproject.download."""
from django.test import TestCase
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django_anysign import api as django_anysign
class HomeURLTestCase(TestCase):
"""Test homepage."""
def test_get(self):
"""Homepage returns HTTP 200."""
url = reverse('home')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class SendURLTestCase(TestCase):
"""Test "create and send signature" view."""
def test_get(self):
"""GET "send" URL returns HTTP 200."""
url = reverse('send')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_post(self):
"""POST "send" URL creates a signature and redirects to signer view."""
Signature = django_anysign.get_signature_model()
self.assertEqual(Signature.objects.all().count(), 0)
url = reverse('send')
response = self.client.post(url)
self.assertEqual(Signature.objects.all().count(), 1)
signature = Signature.objects.get()
signer = signature.signers.all()[0]
signer_url = signature.signature_backend.get_signer_url(signer)
self.assertRedirects(response, signer_url)
class SignerURLTestCase(TestCase):
"""Test "create and send signature" view."""
def test_get(self):
"""GET "anysign:signer" URL returns HTTP 200."""
# Create a signature.
SignatureType = django_anysign.get_signature_type_model()
Signature = django_anysign.get_signature_model()
Signer = django_anysign.get_signer_model()
signature_type, created = SignatureType.objects.get_or_create(
signature_backend_code='dummysign')
signature = Signature.objects.create(signature_type=signature_type)
signer = Signer.objects.create(signature=signature)
signature.signers.add(signer)
url = reverse('anysign:signer', args=[signer.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_post(self):
"""POST "anysign:signer" URL redirects to "signer return"."""
# Create a signature.
SignatureType = django_anysign.get_signature_type_model()
Signature = django_anysign.get_signature_model()
Signer = django_anysign.get_signer_model()
signature_type, created = SignatureType.objects.get_or_create(
signature_backend_code='dummysign')
signature = Signature.objects.create(signature_type=signature_type)
signer = Signer.objects.create(signature=signature)
url = reverse('anysign:signer', args=[signer.pk])
response = self.client.post(url, follow=True)
signer_return_url = signature.signature_backend.get_signer_return_url(
signer)
self.assertEqual(
signer_return_url,
reverse('anysign:signer_return', args=[signer.pk])
)
self.assertRedirects(response, signer_return_url)
self.assertEqual(response.status_code, 200) | demo/django_anysign_demo/tests.py | """Test suite for demoproject.download."""
from django.test import TestCase
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django_anysign import api as django_anysign
class HomeURLTestCase(TestCase):
"""Test homepage."""
def test_get(self):
"""Homepage returns HTTP 200."""
url = reverse('home')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class SendURLTestCase(TestCase):
"""Test "create and send signature" view."""
def test_get(self):
"""GET "send" URL returns HTTP 200."""
url = reverse('send')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_post(self):
"""POST "send" URL creates a signature and redirects to signer view."""
Signature = django_anysign.get_signature_model()
self.assertEqual(Signature.objects.all().count(), 0)
url = reverse('send')
response = self.client.post(url)
self.assertEqual(Signature.objects.all().count(), 1)
signature = Signature.objects.get()
signer = signature.signers.all()[0]
signer_url = signature.signature_backend.get_signer_url(signer)
self.assertRedirects(response, signer_url)
class SignerURLTestCase(TestCase):
"""Test "create and send signature" view."""
def test_get(self):
"""GET "anysign:signer" URL returns HTTP 200."""
# Create a signature.
SignatureType = django_anysign.get_signature_type_model()
Signature = django_anysign.get_signature_model()
Signer = django_anysign.get_signer_model()
signature_type, created = SignatureType.objects.get_or_create(
signature_backend_code='dummysign')
signature = Signature.objects.create(signature_type=signature_type)
signer = Signer.objects.create(signature=signature)
signature.signers.add(signer)
url = reverse('anysign:signer', args=[signer.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_post(self):
"""POST "anysign:signer" URL redirects to "signer return"."""
# Create a signature.
SignatureType = django_anysign.get_signature_type_model()
Signature = django_anysign.get_signature_model()
Signer = django_anysign.get_signer_model()
signature_type, created = SignatureType.objects.get_or_create(
signature_backend_code='dummysign')
signature = Signature.objects.create(signature_type=signature_type)
signer = Signer.objects.create(signature=signature)
url = reverse('anysign:signer', args=[signer.pk])
response = self.client.post(url, follow=True)
signer_return_url = signature.signature_backend.get_signer_return_url(
signer)
self.assertEqual(
signer_return_url,
reverse('anysign:signer_return', args=[signer.pk])
)
self.assertRedirects(response, signer_return_url)
self.assertEqual(response.status_code, 200) | 0.718199 | 0.275687 |
from .api import grid_pull
from .utils import make_list, meshgrid_ij
import torch
def resize(image, factor=None, shape=None, anchor='c',
interpolation=1, prefilter=True, **kwargs):
"""Resize an image by a factor or to a specific shape.
Notes
-----
.. A least one of `factor` and `shape` must be specified
.. If `anchor in ('centers', 'edges')`, exactly one of `factor` or
`shape must be specified.
.. If `anchor in ('first', 'last')`, `factor` must be provided even
if `shape` is specified.
.. Because of rounding, it is in general not assured that
`resize(resize(x, f), 1/f)` returns a tensor with the same shape as x.
edges centers first last
e - + - + - e + - + - + - + + - + - + - + + - + - + - +
| . | . | . | | c | . | c | | f | . | . | | . | . | . |
+ _ + _ + _ + + _ + _ + _ + + _ + _ + _ + + _ + _ + _ +
| . | . | . | | . | . | . | | . | . | . | | . | . | . |
+ _ + _ + _ + + _ + _ + _ + + _ + _ + _ + + _ + _ + _ +
| . | . | . | | c | . | c | | . | . | . | | . | . | l |
e _ + _ + _ e + _ + _ + _ + + _ + _ + _ + + _ + _ + _ +
Parameters
----------
image : (batch, channel, *inshape) tensor
Image to resize
factor : float or list[float], optional
Resizing factor
* > 1 : larger image <-> smaller voxels
* < 1 : smaller image <-> larger voxels
shape : (ndim,) list[int], optional
Output shape
anchor : {'centers', 'edges', 'first', 'last'} or list, default='centers'
* In cases 'c' and 'e', the volume shape is multiplied by the
zoom factor (and eventually truncated), and two anchor points
are used to determine the voxel size.
* In cases 'f' and 'l', a single anchor point is used so that
the voxel size is exactly divided by the zoom factor.
This case with an integer factor corresponds to subslicing
the volume (e.g., `vol[::f, ::f, ::f]`).
* A list of anchors (one per dimension) can also be provided.
interpolation : int or sequence[int], default=1
Interpolation order.
prefilter : bool, default=True
Apply spline pre-filter (= interpolates the input)
Returns
-------
resized : (batch, channel, *shape) tensor
Resized image
"""
factor = make_list(factor) if factor else []
shape = make_list(shape) if shape else []
anchor = make_list(anchor)
nb_dim = max(len(factor), len(shape), len(anchor)) or (image.dim() - 2)
anchor = [a[0].lower() for a in make_list(anchor, nb_dim)]
backend = dict(dtype=image.dtype, device=image.device)
# compute output shape
inshape = image.shape[-nb_dim:]
if factor:
factor = make_list(factor, nb_dim)
elif not shape:
raise ValueError('One of `factor` or `shape` must be provided')
if shape:
shape = make_list(shape, nb_dim)
else:
shape = [int(i*f) for i, f in zip(inshape, factor)]
if not factor:
factor = [o/i for o, i in zip(shape, inshape)]
# compute transformation grid
lin = []
for anch, f, inshp, outshp in zip(anchor, factor, inshape, shape):
if anch == 'c': # centers
lin.append(torch.linspace(0, inshp - 1, outshp, **backend))
elif anch == 'e': # edges
scale = inshp / outshp
shift = 0.5 * (scale - 1)
lin.append(torch.arange(0., outshp, **backend) * scale + shift)
elif anch == 'f': # first voxel
# scale = 1/f
# shift = 0
lin.append(torch.arange(0., outshp, **backend) / f)
elif anch == 'l': # last voxel
# scale = 1/f
shift = (inshp - 1) - (outshp - 1) / f
lin.append(torch.arange(0., outshp, **backend) / f + shift)
else:
raise ValueError('Unknown anchor {}'.format(anch))
# interpolate
kwargs.setdefault('bound', 'nearest')
kwargs.setdefault('extrapolate', True)
kwargs.setdefault('interpolation', interpolation)
kwargs.setdefault('prefilter', prefilter)
grid = torch.stack(meshgrid_ij(*lin), dim=-1)
resized = grid_pull(image, grid, **kwargs)
return resized | interpol/resize.py | from .api import grid_pull
from .utils import make_list, meshgrid_ij
import torch
def resize(image, factor=None, shape=None, anchor='c',
interpolation=1, prefilter=True, **kwargs):
"""Resize an image by a factor or to a specific shape.
Notes
-----
.. A least one of `factor` and `shape` must be specified
.. If `anchor in ('centers', 'edges')`, exactly one of `factor` or
`shape must be specified.
.. If `anchor in ('first', 'last')`, `factor` must be provided even
if `shape` is specified.
.. Because of rounding, it is in general not assured that
`resize(resize(x, f), 1/f)` returns a tensor with the same shape as x.
edges centers first last
e - + - + - e + - + - + - + + - + - + - + + - + - + - +
| . | . | . | | c | . | c | | f | . | . | | . | . | . |
+ _ + _ + _ + + _ + _ + _ + + _ + _ + _ + + _ + _ + _ +
| . | . | . | | . | . | . | | . | . | . | | . | . | . |
+ _ + _ + _ + + _ + _ + _ + + _ + _ + _ + + _ + _ + _ +
| . | . | . | | c | . | c | | . | . | . | | . | . | l |
e _ + _ + _ e + _ + _ + _ + + _ + _ + _ + + _ + _ + _ +
Parameters
----------
image : (batch, channel, *inshape) tensor
Image to resize
factor : float or list[float], optional
Resizing factor
* > 1 : larger image <-> smaller voxels
* < 1 : smaller image <-> larger voxels
shape : (ndim,) list[int], optional
Output shape
anchor : {'centers', 'edges', 'first', 'last'} or list, default='centers'
* In cases 'c' and 'e', the volume shape is multiplied by the
zoom factor (and eventually truncated), and two anchor points
are used to determine the voxel size.
* In cases 'f' and 'l', a single anchor point is used so that
the voxel size is exactly divided by the zoom factor.
This case with an integer factor corresponds to subslicing
the volume (e.g., `vol[::f, ::f, ::f]`).
* A list of anchors (one per dimension) can also be provided.
interpolation : int or sequence[int], default=1
Interpolation order.
prefilter : bool, default=True
Apply spline pre-filter (= interpolates the input)
Returns
-------
resized : (batch, channel, *shape) tensor
Resized image
"""
factor = make_list(factor) if factor else []
shape = make_list(shape) if shape else []
anchor = make_list(anchor)
nb_dim = max(len(factor), len(shape), len(anchor)) or (image.dim() - 2)
anchor = [a[0].lower() for a in make_list(anchor, nb_dim)]
backend = dict(dtype=image.dtype, device=image.device)
# compute output shape
inshape = image.shape[-nb_dim:]
if factor:
factor = make_list(factor, nb_dim)
elif not shape:
raise ValueError('One of `factor` or `shape` must be provided')
if shape:
shape = make_list(shape, nb_dim)
else:
shape = [int(i*f) for i, f in zip(inshape, factor)]
if not factor:
factor = [o/i for o, i in zip(shape, inshape)]
# compute transformation grid
lin = []
for anch, f, inshp, outshp in zip(anchor, factor, inshape, shape):
if anch == 'c': # centers
lin.append(torch.linspace(0, inshp - 1, outshp, **backend))
elif anch == 'e': # edges
scale = inshp / outshp
shift = 0.5 * (scale - 1)
lin.append(torch.arange(0., outshp, **backend) * scale + shift)
elif anch == 'f': # first voxel
# scale = 1/f
# shift = 0
lin.append(torch.arange(0., outshp, **backend) / f)
elif anch == 'l': # last voxel
# scale = 1/f
shift = (inshp - 1) - (outshp - 1) / f
lin.append(torch.arange(0., outshp, **backend) / f + shift)
else:
raise ValueError('Unknown anchor {}'.format(anch))
# interpolate
kwargs.setdefault('bound', 'nearest')
kwargs.setdefault('extrapolate', True)
kwargs.setdefault('interpolation', interpolation)
kwargs.setdefault('prefilter', prefilter)
grid = torch.stack(meshgrid_ij(*lin), dim=-1)
resized = grid_pull(image, grid, **kwargs)
return resized | 0.913869 | 0.44903 |
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
from pyspark import SparkContext
from util import load_config, load_batch_config, print_batching_info
from util import write_to_table, generate_add_keywords, resolve_placeholder
def run(hive_context, showlog_table, keywords_mapping_table, create_keywords_mapping,
start_date, end_date, load_minutes, keyword_threshold, effective_keywords_table):
"""
# This script goes through the showlog and identifies all the
# keywords that comprise a portion of the overall traffic greater
# than the specified threshold.
"""
# Create ad keywords table if does not exist.
if create_keywords_mapping:
generate_add_keywords(keywords_mapping_table)
#[Row(keyword=u'education', keyword_index=1, spread_app_id=u'C100203741')]
starting_time = datetime.strptime(start_date, "%Y-%m-%d")
ending_time = datetime.strptime(end_date, "%Y-%m-%d")
# In batches, get the show counts for all of the keywords.
keyword_totals = {}
batched_round = 1
while starting_time < ending_time:
time_start = starting_time.strftime("%Y-%m-%d %H:%M:%S")
batch_time_end = starting_time + timedelta(minutes=load_minutes)
batch_time_end = min(batch_time_end, ending_time)
time_end = batch_time_end.strftime("%Y-%m-%d %H:%M:%S")
print_batching_info("Main keywords", batched_round, time_start, time_end)
# Get the impressions for the time window joined with the keywords.
command = """SELECT
logs.spread_app_id,
logs.show_time,
kw.keyword
FROM {log_table} as logs inner join {keyword_table} as kw on logs.spread_app_id = kw.spread_app_id
WHERE logs.show_time >= '{time_start}' AND show_time < '{time_end}' """
df_showlog_batched = hive_context.sql(command.format(log_table=showlog_table,
keyword_table=keywords_mapping_table, time_start=time_start, time_end=time_end))
# Get the number of impressions for each keyword.
df = df_showlog_batched.groupby('keyword').count().collect()
# Add the impression count for each keyword to the dictionary.
for row in df:
keyword_totals[row['keyword']] = keyword_totals.get(row['keyword'], 0) + int(row['count'])
starting_time = batch_time_end
batched_round += 1
# With the total keyword counts calculated, identify the keywords that meet
# the threshold to be included.
# Get the total and calculate the count threshold for effective keywords.
total_impressions = sum(keyword_totals.values())
impression_threshold = keyword_threshold * total_impressions
# For each keyword, if its count is greater than the threshold, add
# it to the effective keyword list.
effective_keywords = []
for key, value in keyword_totals.items():
if value > impression_threshold:
effective_keywords.append((key,)) # Append as a tuple
# Create the dataframe with the results and save to Hive.
sc = SparkContext.getOrCreate()
df_effective_keywords = sc.parallelize(effective_keywords).toDF(['keyword'])
write_to_table(df_effective_keywords, effective_keywords_table)
if __name__ == "__main__":
"""
main_keywords is a process to identify the effective keywords that
comprise a percentage of the traffic above a given threshold.
"""
sc, hive_context, cfg = load_config(
description="clean data of persona, clicklog and showlog.")
resolve_placeholder(cfg)
cfg_clean = cfg['pipeline']['main_clean']
showlog_table = cfg['showlog_table_name']
keywords_mapping_table = cfg['keywords_table']
create_keywords_mapping = cfg_clean['create_keywords']
cfg_keywords = cfg['pipeline']['main_keywords']
keyword_threshold = cfg_keywords['keyword_threshold']
effective_keywords_table = cfg_keywords['keyword_output_table']
start_date, end_date, load_minutes = load_batch_config(cfg)
run(hive_context, showlog_table, keywords_mapping_table, create_keywords_mapping,
start_date, end_date, load_minutes, keyword_threshold, effective_keywords_table)
sc.stop() | Model/lookalike-model/lookalike_model/pipeline/main_keywords.py |
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
from pyspark import SparkContext
from util import load_config, load_batch_config, print_batching_info
from util import write_to_table, generate_add_keywords, resolve_placeholder
def run(hive_context, showlog_table, keywords_mapping_table, create_keywords_mapping,
start_date, end_date, load_minutes, keyword_threshold, effective_keywords_table):
"""
# This script goes through the showlog and identifies all the
# keywords that comprise a portion of the overall traffic greater
# than the specified threshold.
"""
# Create ad keywords table if does not exist.
if create_keywords_mapping:
generate_add_keywords(keywords_mapping_table)
#[Row(keyword=u'education', keyword_index=1, spread_app_id=u'C100203741')]
starting_time = datetime.strptime(start_date, "%Y-%m-%d")
ending_time = datetime.strptime(end_date, "%Y-%m-%d")
# In batches, get the show counts for all of the keywords.
keyword_totals = {}
batched_round = 1
while starting_time < ending_time:
time_start = starting_time.strftime("%Y-%m-%d %H:%M:%S")
batch_time_end = starting_time + timedelta(minutes=load_minutes)
batch_time_end = min(batch_time_end, ending_time)
time_end = batch_time_end.strftime("%Y-%m-%d %H:%M:%S")
print_batching_info("Main keywords", batched_round, time_start, time_end)
# Get the impressions for the time window joined with the keywords.
command = """SELECT
logs.spread_app_id,
logs.show_time,
kw.keyword
FROM {log_table} as logs inner join {keyword_table} as kw on logs.spread_app_id = kw.spread_app_id
WHERE logs.show_time >= '{time_start}' AND show_time < '{time_end}' """
df_showlog_batched = hive_context.sql(command.format(log_table=showlog_table,
keyword_table=keywords_mapping_table, time_start=time_start, time_end=time_end))
# Get the number of impressions for each keyword.
df = df_showlog_batched.groupby('keyword').count().collect()
# Add the impression count for each keyword to the dictionary.
for row in df:
keyword_totals[row['keyword']] = keyword_totals.get(row['keyword'], 0) + int(row['count'])
starting_time = batch_time_end
batched_round += 1
# With the total keyword counts calculated, identify the keywords that meet
# the threshold to be included.
# Get the total and calculate the count threshold for effective keywords.
total_impressions = sum(keyword_totals.values())
impression_threshold = keyword_threshold * total_impressions
# For each keyword, if its count is greater than the threshold, add
# it to the effective keyword list.
effective_keywords = []
for key, value in keyword_totals.items():
if value > impression_threshold:
effective_keywords.append((key,)) # Append as a tuple
# Create the dataframe with the results and save to Hive.
sc = SparkContext.getOrCreate()
df_effective_keywords = sc.parallelize(effective_keywords).toDF(['keyword'])
write_to_table(df_effective_keywords, effective_keywords_table)
if __name__ == "__main__":
"""
main_keywords is a process to identify the effective keywords that
comprise a percentage of the traffic above a given threshold.
"""
sc, hive_context, cfg = load_config(
description="clean data of persona, clicklog and showlog.")
resolve_placeholder(cfg)
cfg_clean = cfg['pipeline']['main_clean']
showlog_table = cfg['showlog_table_name']
keywords_mapping_table = cfg['keywords_table']
create_keywords_mapping = cfg_clean['create_keywords']
cfg_keywords = cfg['pipeline']['main_keywords']
keyword_threshold = cfg_keywords['keyword_threshold']
effective_keywords_table = cfg_keywords['keyword_output_table']
start_date, end_date, load_minutes = load_batch_config(cfg)
run(hive_context, showlog_table, keywords_mapping_table, create_keywords_mapping,
start_date, end_date, load_minutes, keyword_threshold, effective_keywords_table)
sc.stop() | 0.631935 | 0.318485 |
import os
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import json
import cv2
from time import time
import threading
import math
DATASET={'CCT':'iWildCam_2019_CCT','iNat':'iWildCam_2019_iNat_Idaho','IDFG':'iWildCam_IDFG'} #_images_small
DATA_DIR='./data/'
ANNOTATION_DIR =DATA_DIR+ 'iWildCam_2019_Annotations/'
def rewrite_train_data_json(dataset='CCT'):
json_path=ANNOTATION_DIR+DATASET[dataset]+'.json'
json_data = json.load(open(json_path,'r'))
images = json_data['images']
annotations = json_data['annotations']
csv_data={'category_id':[],'date_captured':[],'id':[],'file_name':[],
'rights_holder':[],'width':[],'height':[],'location':[]}
print('len of data:',dataset,len(images))
for ii,(img, annot) in enumerate(zip(images,annotations)):
if img['id'] != annot['image_id']:
print('there are some error in',ii,img['id'],annot['image_id'])
if 'date_captured' in img:
date=img['date_captured']
elif 'datetime' in img:
date = img['datetime']
else:
date = json_data['info']['date_created']
csv_data['date_captured'] += [date]
csv_data['category_id'] += [annot['category_id']]
csv_data['file_name'] += [img['file_name']]
csv_data['rights_holder'] += [img['rights_holder']]
csv_data['id'] += [img['id']]
csv_data['width'] += [img['width']]
csv_data['height'] += [img['height']]
if 'location' in img:
locat = img['location']
else:
locat=-1
csv_data['location'] += [locat]
csv_data = pd.DataFrame(csv_data)
csv_data.to_csv(ANNOTATION_DIR+DATASET[dataset]+'.csv',index=False)
def split_train_dev(CCT=True,iNat=True):
columns=['category_id','date_captured','id','file_name',
'rights_holder','width','height','location']
train=pd.DataFrame()
if CCT:
temp=pd.read_csv(ANNOTATION_DIR+DATASET['CCT']+'.csv')[columns]
temp['dataset'] = 'CCT'
temp['file_name'] = temp['file_name'].map(lambda x:'iWildCam_2019_CCT_images_small/'+x)
print('use CCT data',temp.shape)
train=pd.concat([train,temp])
if iNat:
temp=pd.read_csv(ANNOTATION_DIR+DATASET['iNat']+'.csv')[columns]
temp['dataset'] = 'iNat'
temp['file_name'] = temp['file_name'].map(lambda x: 'iWildCam_2019_iNat_Idaho/' + x)
print('use iNat data',temp.shape)
train=pd.concat([train,temp])
print('train shape',train.shape)
#train=train.sample(frac=1,random_state=0).reset_index(drop=True)
dev_file = train[train['location'] == 46] # 46
train_file = train[train['location'] != 46]
train_file.to_csv(DATA_DIR+'train_file.csv',index=False)
dev_file.to_csv(DATA_DIR+'dev_file.csv',index=False)
print('category ratio for train data:')
cnt = Counter(train_file['category_id'].values)
L = len(train_file)
for ii in range(23):
print(ii, cnt[ii], cnt[ii] / L)
print('category ratio for dev data:')
cnt = Counter(dev_file['category_id'].values)
L = len(dev_file)
for ii in range(23):
print(ii, cnt[ii], cnt[ii] / L)
def save_test():
columns=['date_captured','id','file_name',
'rights_holder','width','height','location']
test = pd.read_csv(DATA_DIR+'test.csv')[columns]
test['dataset'] = 'test'
test['category_id'] = -1
test['file_name'] = test['file_name'].map(lambda x:'test_images/'+x)
print('test shape',test.shape) #153730
test.to_csv(DATA_DIR+'test_file.csv',index=False)
full_data_dir='data/raw_data/iWildCam_2019_IDFG/iWildCam_IDFG_images/'
def get_test_orig_size_split(test_file,name=0):
name=str(name)
print('get_test_orig_size_split for thread',name,test_file.shape)
file_names= test_file['file_name'].values
width,height=[],[]
t1=time()
for ii,fname in enumerate(file_names):
mod_name =full_data_dir + fname.split('/')[-1]
image = cv2.imread(mod_name)
s = image.shape
#imageHeight = s[0]
#imageWidth = s[1]
width.append(s[0])
height.append(s[1])
if ii%100==0:
print('threads %s, index %d, time-cost %f min'%(name,ii,(time()-t1)/60))
if ii % 1000 == 0:
joblib.dump([ii,width,height],DATA_DIR+'raw_data/test_size_temp_{}.pkl'.format(name))
test_file['width']=width
test_file['height'] = height
print(name,'test shape',test_file.shape) #153730
test_file.to_csv(DATA_DIR+'raw_data/test_file_orig_{}.csv'.format(name),index=False)
def get_test_size_multi_thread(thread_num=1):
test_file = pd.read_csv(DATA_DIR+'test_file.csv')
test_file['small_width']=test_file['width']
test_file['small_height'] = test_file['height']
chunk=math.ceil(len(test_file)/thread_num)
thread_list=[]
for ii in range(thread_num):
sup_file=test_file.iloc[ii*chunk:(ii+1)*chunk]
thr=threading.Thread(target=get_test_orig_size_split,args=(sup_file,ii))
thread_list.append(thr)
for t in thread_list:
t.setDaemon(True)
t.start()
for t in thread_list:
t.join()
def merge_test_size_file():
data=pd.DataFrame()
for name in range(10):
data_path=DATA_DIR + 'raw_data/test_file_orig_{}.csv'.format(str(name))
temp=pd.read_csv(data_path)
data=pd.concat([data,temp])
print(name,data.shape)
data.to_csv(DATA_DIR + 'raw_data/test_file.csv',index=False)
def main(CCT=True,iNat=True):
if CCT:
rewrite_train_data_json('CCT')
if iNat:
rewrite_train_data_json('iNat')
split_train_dev(CCT=CCT,iNat=iNat)
save_test()
if __name__=='__main__':
main()
#get_test_size_multi_thread(thread_num=10)
#merge_test_size_file() | prep_data.py | import os
import numpy as np
import pandas as pd
from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import json
import cv2
from time import time
import threading
import math
DATASET={'CCT':'iWildCam_2019_CCT','iNat':'iWildCam_2019_iNat_Idaho','IDFG':'iWildCam_IDFG'} #_images_small
DATA_DIR='./data/'
ANNOTATION_DIR =DATA_DIR+ 'iWildCam_2019_Annotations/'
def rewrite_train_data_json(dataset='CCT'):
json_path=ANNOTATION_DIR+DATASET[dataset]+'.json'
json_data = json.load(open(json_path,'r'))
images = json_data['images']
annotations = json_data['annotations']
csv_data={'category_id':[],'date_captured':[],'id':[],'file_name':[],
'rights_holder':[],'width':[],'height':[],'location':[]}
print('len of data:',dataset,len(images))
for ii,(img, annot) in enumerate(zip(images,annotations)):
if img['id'] != annot['image_id']:
print('there are some error in',ii,img['id'],annot['image_id'])
if 'date_captured' in img:
date=img['date_captured']
elif 'datetime' in img:
date = img['datetime']
else:
date = json_data['info']['date_created']
csv_data['date_captured'] += [date]
csv_data['category_id'] += [annot['category_id']]
csv_data['file_name'] += [img['file_name']]
csv_data['rights_holder'] += [img['rights_holder']]
csv_data['id'] += [img['id']]
csv_data['width'] += [img['width']]
csv_data['height'] += [img['height']]
if 'location' in img:
locat = img['location']
else:
locat=-1
csv_data['location'] += [locat]
csv_data = pd.DataFrame(csv_data)
csv_data.to_csv(ANNOTATION_DIR+DATASET[dataset]+'.csv',index=False)
def split_train_dev(CCT=True,iNat=True):
columns=['category_id','date_captured','id','file_name',
'rights_holder','width','height','location']
train=pd.DataFrame()
if CCT:
temp=pd.read_csv(ANNOTATION_DIR+DATASET['CCT']+'.csv')[columns]
temp['dataset'] = 'CCT'
temp['file_name'] = temp['file_name'].map(lambda x:'iWildCam_2019_CCT_images_small/'+x)
print('use CCT data',temp.shape)
train=pd.concat([train,temp])
if iNat:
temp=pd.read_csv(ANNOTATION_DIR+DATASET['iNat']+'.csv')[columns]
temp['dataset'] = 'iNat'
temp['file_name'] = temp['file_name'].map(lambda x: 'iWildCam_2019_iNat_Idaho/' + x)
print('use iNat data',temp.shape)
train=pd.concat([train,temp])
print('train shape',train.shape)
#train=train.sample(frac=1,random_state=0).reset_index(drop=True)
dev_file = train[train['location'] == 46] # 46
train_file = train[train['location'] != 46]
train_file.to_csv(DATA_DIR+'train_file.csv',index=False)
dev_file.to_csv(DATA_DIR+'dev_file.csv',index=False)
print('category ratio for train data:')
cnt = Counter(train_file['category_id'].values)
L = len(train_file)
for ii in range(23):
print(ii, cnt[ii], cnt[ii] / L)
print('category ratio for dev data:')
cnt = Counter(dev_file['category_id'].values)
L = len(dev_file)
for ii in range(23):
print(ii, cnt[ii], cnt[ii] / L)
def save_test():
columns=['date_captured','id','file_name',
'rights_holder','width','height','location']
test = pd.read_csv(DATA_DIR+'test.csv')[columns]
test['dataset'] = 'test'
test['category_id'] = -1
test['file_name'] = test['file_name'].map(lambda x:'test_images/'+x)
print('test shape',test.shape) #153730
test.to_csv(DATA_DIR+'test_file.csv',index=False)
full_data_dir='data/raw_data/iWildCam_2019_IDFG/iWildCam_IDFG_images/'
def get_test_orig_size_split(test_file,name=0):
name=str(name)
print('get_test_orig_size_split for thread',name,test_file.shape)
file_names= test_file['file_name'].values
width,height=[],[]
t1=time()
for ii,fname in enumerate(file_names):
mod_name =full_data_dir + fname.split('/')[-1]
image = cv2.imread(mod_name)
s = image.shape
#imageHeight = s[0]
#imageWidth = s[1]
width.append(s[0])
height.append(s[1])
if ii%100==0:
print('threads %s, index %d, time-cost %f min'%(name,ii,(time()-t1)/60))
if ii % 1000 == 0:
joblib.dump([ii,width,height],DATA_DIR+'raw_data/test_size_temp_{}.pkl'.format(name))
test_file['width']=width
test_file['height'] = height
print(name,'test shape',test_file.shape) #153730
test_file.to_csv(DATA_DIR+'raw_data/test_file_orig_{}.csv'.format(name),index=False)
def get_test_size_multi_thread(thread_num=1):
test_file = pd.read_csv(DATA_DIR+'test_file.csv')
test_file['small_width']=test_file['width']
test_file['small_height'] = test_file['height']
chunk=math.ceil(len(test_file)/thread_num)
thread_list=[]
for ii in range(thread_num):
sup_file=test_file.iloc[ii*chunk:(ii+1)*chunk]
thr=threading.Thread(target=get_test_orig_size_split,args=(sup_file,ii))
thread_list.append(thr)
for t in thread_list:
t.setDaemon(True)
t.start()
for t in thread_list:
t.join()
def merge_test_size_file():
data=pd.DataFrame()
for name in range(10):
data_path=DATA_DIR + 'raw_data/test_file_orig_{}.csv'.format(str(name))
temp=pd.read_csv(data_path)
data=pd.concat([data,temp])
print(name,data.shape)
data.to_csv(DATA_DIR + 'raw_data/test_file.csv',index=False)
def main(CCT=True,iNat=True):
if CCT:
rewrite_train_data_json('CCT')
if iNat:
rewrite_train_data_json('iNat')
split_train_dev(CCT=CCT,iNat=iNat)
save_test()
if __name__=='__main__':
main()
#get_test_size_multi_thread(thread_num=10)
#merge_test_size_file() | 0.045205 | 0.124985 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from data.scaling import DataScaler
class ResidualOutputModule(nn.Module):
def __init__(self, model, learn_residuals, scalings_lr=None, scalings_hr=None, interpolation_mode='bicubic'):
super(ResidualOutputModule, self).__init__()
self.learn_residuals = learn_residuals
self._verify_scalings(scalings_lr)
self.scalings_lr = scalings_lr
self._verify_scalings(scalings_hr)
self.scalings_hr = scalings_hr
self.interpolation_mode = interpolation_mode
self.model = model
@staticmethod
def _verify_scalings(scalings):
if not (isinstance(scalings, (list, tuple)) or (scalings is None)):
raise AssertionError("[ERROR] Scalings mus be lists or tuples of objects of type <DataScaler> or None.")
if scalings is not None:
for s in scalings:
assert isinstance(s.scaler, DataScaler)
def forward(self, x, estimate_lr=None):
if self.model is not None:
output = self.model(x)
if self.learn_residuals:
if estimate_lr is not None:
if self.scalings_lr is not None:
estimate_lr = self._revert_scalings(estimate_lr, self.scalings_lr)
estimate_hr = self._interpolate(estimate_lr)
if self.scalings_hr is not None:
estimate_hr = self._apply_scalings(estimate_hr, self.scalings_hr)
else:
raise NotImplementedError()
output = output + estimate_hr
return output
else:
raise AttributeError(
'[ERROR] Child classes of <SuperResModule> must override class attribute model'
)
@staticmethod
def _apply_scalings(x, scalings):
output = x
if scalings is not None and len(scalings) > 0:
output = torch.split(output, [s.scaler.channels for s in scalings], dim=1)
output = [s.scaler.transform(channel) for s, channel in zip(scalings, output)]
output = torch.cat(output, dim=1)
return output
@staticmethod
def _revert_scalings(x, scalings):
output = x
if scalings is not None and len(scalings) > 0:
output = torch.split(output, [s.scaler.channels for s in scalings], dim=1)
output = [s.scaler.transform_back(channel) for s, channel in zip(scalings, output)]
output = torch.cat(output, dim=1)
return output
def _interpolate(self, estimate_lr, scale_factor=(4, 3)):
return F.interpolate(estimate_lr, scale_factor=scale_factor, mode=self.interpolation_mode) | networks/modular_downscaling_model/output_modules/ResidualOutputModule.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from data.scaling import DataScaler
class ResidualOutputModule(nn.Module):
def __init__(self, model, learn_residuals, scalings_lr=None, scalings_hr=None, interpolation_mode='bicubic'):
super(ResidualOutputModule, self).__init__()
self.learn_residuals = learn_residuals
self._verify_scalings(scalings_lr)
self.scalings_lr = scalings_lr
self._verify_scalings(scalings_hr)
self.scalings_hr = scalings_hr
self.interpolation_mode = interpolation_mode
self.model = model
@staticmethod
def _verify_scalings(scalings):
if not (isinstance(scalings, (list, tuple)) or (scalings is None)):
raise AssertionError("[ERROR] Scalings mus be lists or tuples of objects of type <DataScaler> or None.")
if scalings is not None:
for s in scalings:
assert isinstance(s.scaler, DataScaler)
def forward(self, x, estimate_lr=None):
if self.model is not None:
output = self.model(x)
if self.learn_residuals:
if estimate_lr is not None:
if self.scalings_lr is not None:
estimate_lr = self._revert_scalings(estimate_lr, self.scalings_lr)
estimate_hr = self._interpolate(estimate_lr)
if self.scalings_hr is not None:
estimate_hr = self._apply_scalings(estimate_hr, self.scalings_hr)
else:
raise NotImplementedError()
output = output + estimate_hr
return output
else:
raise AttributeError(
'[ERROR] Child classes of <SuperResModule> must override class attribute model'
)
@staticmethod
def _apply_scalings(x, scalings):
output = x
if scalings is not None and len(scalings) > 0:
output = torch.split(output, [s.scaler.channels for s in scalings], dim=1)
output = [s.scaler.transform(channel) for s, channel in zip(scalings, output)]
output = torch.cat(output, dim=1)
return output
@staticmethod
def _revert_scalings(x, scalings):
output = x
if scalings is not None and len(scalings) > 0:
output = torch.split(output, [s.scaler.channels for s in scalings], dim=1)
output = [s.scaler.transform_back(channel) for s, channel in zip(scalings, output)]
output = torch.cat(output, dim=1)
return output
def _interpolate(self, estimate_lr, scale_factor=(4, 3)):
return F.interpolate(estimate_lr, scale_factor=scale_factor, mode=self.interpolation_mode) | 0.915992 | 0.331607 |
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: zabbix_action
short_description: Create/Delete/Update Zabbix actions
description:
- This module allows you to create, modify and delete Zabbix actions.
author:
- <NAME> (@rubentsirunyan)
- <NAME> (@K-DOT)
requirements:
- "zabbix-api >= 0.5.4"
options:
name:
description:
- Name of the action
required: true
event_source:
description:
- Type of events that the action will handle.
- Required when C(state=present).
required: false
choices: ['trigger', 'discovery', 'auto_registration', 'internal']
state:
description:
- State of the action.
- On C(present), it will create an action if it does not exist or update the action if the associated data is different.
- On C(absent), it will remove the action if it exists.
choices: ['present', 'absent']
default: 'present'
status:
description:
- Status of the action.
choices: ['enabled', 'disabled']
default: 'enabled'
pause_in_maintenance:
description:
- Whether to pause escalation during maintenance periods or not.
- Can be used when I(event_source=trigger).
type: 'bool'
default: true
esc_period:
description:
- Default operation step duration. Must be greater than 60 seconds.
- Accepts only seconds in int for <= Zabbix 3.2
- Accepts seconds, time unit with suffix and user macro since => Zabbix 3.4
- Required when C(state=present).
required: false
conditions:
type: list
elements: dict
description:
- List of conditions to use for filtering results.
- For more information about suboptions of this option please
check out Zabbix API documentation U(https://www.zabbix.com/documentation/5.0/manual/api/reference/action/object#action_filter_condition)
suboptions:
type:
description:
- Type (label) of the condition.
- 'Possible values when I(event_source=trigger):'
- ' - C(host_group)'
- ' - C(host)'
- ' - C(trigger)'
- ' - C(trigger_name)'
- ' - C(trigger_severity)'
- ' - C(time_period)'
- ' - C(host_template)'
- ' - C(application)'
- ' - C(maintenance_status) known in Zabbix 4.0 and above as "Problem is suppressed"'
- ' - C(event_tag)'
- ' - C(event_tag_value)'
- 'Possible values when I(event_source=discovery):'
- ' - C(host_IP)'
- ' - C(discovered_service_type)'
- ' - C(discovered_service_port)'
- ' - C(discovery_status)'
- ' - C(uptime_or_downtime_duration)'
- ' - C(received_value)'
- ' - C(discovery_rule)'
- ' - C(discovery_check)'
- ' - C(proxy)'
- ' - C(discovery_object)'
- 'Possible values when I(event_source=auto_registration):'
- ' - C(proxy)'
- ' - C(host_name)'
- ' - C(host_metadata)'
- 'Possible values when I(event_source=internal):'
- ' - C(host_group)'
- ' - C(host)'
- ' - C(host_template)'
- ' - C(application)'
- ' - C(event_type)'
value:
description:
- Value to compare with.
- 'When I(type=discovery_status), the choices are:'
- ' - C(up)'
- ' - C(down)'
- ' - C(discovered)'
- ' - C(lost)'
- 'When I(type=discovery_object), the choices are:'
- ' - C(host)'
- ' - C(service)'
- 'When I(type=event_type), the choices are:'
- ' - C(item in not supported state)'
- ' - C(item in normal state)'
- ' - C(LLD rule in not supported state)'
- ' - C(LLD rule in normal state)'
- ' - C(trigger in unknown state)'
- ' - C(trigger in normal state)'
- 'When I(type=trigger_severity), the choices are (case-insensitive):'
- ' - C(not classified)'
- ' - C(information)'
- ' - C(warning)'
- ' - C(average)'
- ' - C(high)'
- ' - C(disaster)'
- Irrespective of user-visible names being changed in Zabbix. Defaults to C(not classified) if omitted.
- Besides the above options, this is usually either the name
of the object or a string to compare with.
value2:
description:
- Secondary value to compare with.
- Required for trigger actions when condition I(type=event_tag_value).
operator:
description:
- Condition operator.
- When I(type) is set to C(time_period), the choices are C(in), C(not in).
- C(matches), C(does not match), C(Yes) and C(No) condition operators work only with >= Zabbix 4.0
choices:
- '='
- '<>'
- 'like'
- 'not like'
- 'in'
- '>='
- '<='
- 'not in'
- 'matches'
- 'does not match'
- 'Yes'
- 'No'
formulaid:
description:
- Arbitrary unique ID that is used to reference the condition from a custom expression.
- Can only contain upper-case letters.
- Required for custom expression filters and ignored otherwise.
eval_type:
description:
- Filter condition evaluation method.
- Defaults to C(andor) if conditions are less then 2 or if
I(formula) is not specified.
- Defaults to C(custom_expression) when formula is specified.
choices:
- 'andor'
- 'and'
- 'or'
- 'custom_expression'
formula:
description:
- User-defined expression to be used for evaluating conditions with a custom expression.
- The expression must contain IDs that reference each condition by its formulaid.
- The IDs used in the expression must exactly match the ones
defined in the I(conditions). No condition can remain unused or omitted.
- Required when I(eval_type=custom_expression).
- Use sequential IDs that start at "A". If non-sequential IDs are used, Zabbix re-indexes them.
This makes each module run notice the difference in IDs and update the action.
default_message:
description:
- Problem message default text.
- With >= Zabbix 5.0 this field is removed from the API and is dropped silently by module.
- Works only with < Zabbix 5.0
default_subject:
description:
- Problem message default subject.
- With >= Zabbix 5.0 this field is removed from the API and is dropped silently by module.
- Works only with < Zabbix 5.0
recovery_default_message:
description:
- Recovery message text.
- With >= Zabbix 5.0 this field is removed from the API and is dropped silently by module.
- Works only with >= Zabbix 3.2 and < Zabbix 5.0
recovery_default_subject:
description:
- Recovery message subject.
- With >= Zabbix 5.0 this field is removed from the API and is dropped silently by module.
- Works only with >= Zabbix 3.2 and < Zabbix 5.0
acknowledge_default_message:
description:
- Update operation (known as "Acknowledge operation" before Zabbix 4.0) message text.
- With >= Zabbix 5.0 this field is removed from the API and is dropped silently by module.
- Works only with >= Zabbix 3.4 and < Zabbix 5.0
acknowledge_default_subject:
description:
- Update operation (known as "Acknowledge operation" before Zabbix 4.0) message subject.
- With >= Zabbix 5.0 this field is removed from the API and is dropped silently by module.
- Works only with >= Zabbix 3.4 and < Zabbix 5.0
operations:
type: list
description:
- List of action operations
suboptions:
type:
description:
- Type of operation.
- 'Valid choices when setting type for I(recovery_operations) and I(acknowledge_operations):'
- ' - C(send_message)'
- ' - C(remote_command)'
- ' - C(notify_all_involved)'
- Choice C(notify_all_involved) only supported in I(recovery_operations) and I(acknowledge_operations).
choices:
- send_message
- remote_command
- add_host
- remove_host
- add_to_host_group
- remove_from_host_group
- link_to_template
- unlink_from_template
- enable_host
- disable_host
- set_host_inventory_mode
- notify_all_involved
esc_period:
description:
- Duration of an escalation step in seconds.
- Must be greater than 60 seconds.
- Accepts only seconds in int for <= Zabbix 3.2
- Accepts seconds, time unit with suffix and user macro since => Zabbix 3.4
- If set to 0 or 0s, the default action escalation period will be used.
default: 0s
esc_step_from:
description:
- Step to start escalation from.
default: 1
esc_step_to:
description:
- Step to end escalation at.
- Specify 0 for infinitely.
default: 1
send_to_groups:
type: list
description:
- User groups to send messages to.
send_to_users:
type: list
description:
- Users (usernames or aliases) to send messages to.
message:
description:
- Operation message text.
- Will check the 'default message' and use the text from I(default_message) if this and I(default_subject) are not specified
subject:
description:
- Operation message subject.
- Will check the 'default message' and use the text from I(default_subject) if this and I(default_subject) are not specified
media_type:
description:
- Media type that will be used to send the message.
- Can be used with I(type=send_message) or I(type=notify_all_involved) inside I(acknowledge_operations).
- Set to C(all) for all media types
default: 'all'
operation_condition:
type: 'str'
description:
- The action operation condition object defines a condition that must be met to perform the current operation.
choices:
- acknowledged
- not_acknowledged
host_groups:
type: list
description:
- List of host groups host should be added to.
- Required when I(type=add_to_host_group) or I(type=remove_from_host_group).
templates:
type: list
description:
- List of templates host should be linked to.
- Required when I(type=link_to_template) or I(type=unlink_from_template).
inventory:
description:
- Host inventory mode.
- Required when I(type=set_host_inventory_mode).
choices:
- manual
- automatic
command_type:
description:
- Type of operation command.
- Required when I(type=remote_command).
choices:
- custom_script
- ipmi
- ssh
- telnet
- global_script
command:
description:
- Command to run.
- Required when I(type=remote_command) and I(command_type!=global_script).
execute_on:
description:
- Target on which the custom script operation command will be executed.
- Required when I(type=remote_command) and I(command_type=custom_script).
choices:
- agent
- server
- proxy
run_on_groups:
description:
- Host groups to run remote commands on.
- Required when I(type=remote_command) and I(run_on_hosts) is not set.
run_on_hosts:
description:
- Hosts to run remote commands on.
- Required when I(type=remote_command) and I(run_on_groups) is not set.
- If set to 0 the command will be run on the current host.
ssh_auth_type:
description:
- Authentication method used for SSH commands.
- Required when I(type=remote_command) and I(command_type=ssh).
choices:
- password
- public_key
ssh_privatekey_file:
description:
- Name of the private key file used for SSH commands with public key authentication.
- Required when I(ssh_auth_type=public_key).
- Can be used when I(type=remote_command).
ssh_publickey_file:
description:
- Name of the public key file used for SSH commands with public key authentication.
- Required when I(ssh_auth_type=public_key).
- Can be used when I(type=remote_command).
username:
description:
- User name used for authentication.
- Required when I(ssh_auth_type in [public_key, password]) or I(command_type=telnet).
- Can be used when I(type=remote_command).
password:
description:
- Password used for authentication.
- Required when I(ssh_auth_type=password) or I(command_type=telnet).
- Can be used when I(type=remote_command).
port:
description:
- Port number used for authentication.
- Can be used when I(command_type in [ssh, telnet]) and I(type=remote_command).
script_name:
description:
- The name of script used for global script commands.
- Required when I(command_type=global_script).
- Can be used when I(type=remote_command).
recovery_operations:
type: list
description:
- List of recovery operations.
- C(Suboptions) are the same as for I(operations).
- Works only with >= Zabbix 3.2
acknowledge_operations:
type: list
description:
- List of acknowledge operations.
- Action acknowledge operations are known as update operations since Zabbix 4.0.
- C(Suboptions) are the same as for I(operations).
- Works only with >= Zabbix 3.4
aliases: [ update_operations ]
notes:
- Only Zabbix >= 3.0 is supported.
extends_documentation_fragment:
- community.zabbix.zabbix
'''
EXAMPLES = '''
# Trigger action with only one condition
- name: Deploy trigger action
community.zabbix.zabbix_action:
server_url: "http://zabbix.example.com/zabbix/"
login_user: Admin
login_password: <PASSWORD>
name: "Send alerts to Admin"
event_source: 'trigger'
state: present
status: enabled
esc_period: 60
conditions:
- type: 'trigger_severity'
operator: '>='
value: 'Information'
operations:
- type: send_message
subject: "Something bad is happening"
message: "Come on, guys do something"
media_type: 'Email'
send_to_users:
- 'Admin'
# Trigger action with multiple conditions and operations
- name: Deploy trigger action
community.zabbix.zabbix_action:
server_url: "http://zabbix.example.com/zabbix/"
login_user: Admin
login_password: <PASSWORD>
name: "Send alerts to Admin"
event_source: 'trigger'
state: present
status: enabled
esc_period: 1m
conditions:
- type: 'trigger_name'
operator: 'like'
value: 'Zabbix agent is unreachable'
formulaid: A
- type: 'trigger_severity'
operator: '>='
value: 'disaster'
formulaid: B
formula: A or B
operations:
- type: send_message
media_type: 'Email'
send_to_users:
- 'Admin'
- type: remote_command
command: 'systemctl restart zabbix-agent'
command_type: custom_script
execute_on: server
run_on_hosts:
- 0
# Trigger action with recovery and acknowledge operations
- name: Deploy trigger action
community.zabbix.zabbix_action:
server_url: "http://zabbix.example.com/zabbix/"
login_user: Admin
login_password: <PASSWORD>
name: "Send alerts to Admin"
event_source: 'trigger'
state: present
status: enabled
esc_period: 1h
conditions:
- type: 'trigger_severity'
operator: '>='
value: 'Information'
operations:
- type: send_message
subject: "Something bad is happening"
message: "Come on, guys do something"
media_type: 'Email'
send_to_users:
- 'Admin'
recovery_operations:
- type: send_message
subject: "Host is down"
message: "Come on, guys do something"
media_type: 'Email'
send_to_users:
- 'Admin'
acknowledge_operations:
- type: send_message
media_type: 'Email'
send_to_users:
- 'Admin'
'''
RETURN = '''
msg:
description: The result of the operation
returned: success
type: str
sample: 'Action Deleted: Register webservers, ID: 0001'
'''
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.zabbix.plugins.module_utils.base import ZabbixBase
from ansible_collections.community.zabbix.plugins.module_utils.wrappers import ZapiWrapper
import ansible_collections.community.zabbix.plugins.module_utils.helpers as zabbix_utils
class Zapi(ZapiWrapper):
def __init__(self, module, zbx=None):
super(Zapi, self).__init__(module, zbx)
self._zapi_wrapper = self
def check_if_action_exists(self, name):
"""Check if action exists.
Args:
name: Name of the action.
Returns:
The return value. True for success, False otherwise.
"""
try:
_action = self._zapi.action.get({
"selectOperations": "extend",
"selectRecoveryOperations": "extend",
"selectAcknowledgeOperations": "extend",
"selectFilter": "extend",
'filter': {'name': [name]}
})
if len(_action) > 0:
_action[0]['recovery_operations'] = _action[0].pop('recoveryOperations', [])
_action[0]['acknowledge_operations'] = _action[0].pop('acknowledgeOperations', [])
return _action
except Exception as e:
self._module.fail_json(msg="Failed to check if action '%s' exists: %s" % (name, e))
def get_action_by_name(self, name):
"""Get action by name
Args:
name: Name of the action.
Returns:
dict: Zabbix action
"""
try:
action_list = self._zapi.action.get({
'output': 'extend',
'filter': {'name': [name]}
})
if len(action_list) < 1:
self._module.fail_json(msg="Action not found: " % name)
else:
return action_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get ID of '%s': %s" % (name, e))
def get_host_by_host_name(self, host_name):
"""Get host by host name
Args:
host_name: host name.
Returns:
host matching host name
"""
try:
host_list = self._zapi.host.get({
'output': 'extend',
'selectInventory': 'extend',
'filter': {'host': [host_name]}
})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
return host_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get host '%s': %s" % (host_name, e))
def get_hostgroup_by_hostgroup_name(self, hostgroup_name):
"""Get host group by host group name
Args:
hostgroup_name: host group name.
Returns:
host group matching host group name
"""
try:
hostgroup_list = self._zapi.hostgroup.get({
'output': 'extend',
'filter': {'name': [hostgroup_name]}
})
if len(hostgroup_list) < 1:
self._module.fail_json(msg="Host group not found: %s" % hostgroup_name)
else:
return hostgroup_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get host group '%s': %s" % (hostgroup_name, e))
def get_template_by_template_name(self, template_name):
"""Get template by template name
Args:
template_name: template name.
Returns:
template matching template name
"""
try:
template_list = self._zapi.template.get({
'output': 'extend',
'filter': {'host': [template_name]}
})
if len(template_list) < 1:
self._module.fail_json(msg="Template not found: %s" % template_name)
else:
return template_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get template '%s': %s" % (template_name, e))
def get_trigger_by_trigger_name(self, trigger_name):
"""Get trigger by trigger name
Args:
trigger_name: trigger name.
Returns:
trigger matching trigger name
"""
try:
trigger_list = self._zapi.trigger.get({
'output': 'extend',
'filter': {'description': [trigger_name]}
})
if len(trigger_list) < 1:
self._module.fail_json(msg="Trigger not found: %s" % trigger_name)
else:
return trigger_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get trigger '%s': %s" % (trigger_name, e))
def get_discovery_rule_by_discovery_rule_name(self, discovery_rule_name):
"""Get discovery rule by discovery rule name
Args:
discovery_rule_name: discovery rule name.
Returns:
discovery rule matching discovery rule name
"""
try:
discovery_rule_list = self._zapi.drule.get({
'output': 'extend',
'filter': {'name': [discovery_rule_name]}
})
if len(discovery_rule_list) < 1:
self._module.fail_json(msg="Discovery rule not found: %s" % discovery_rule_name)
else:
return discovery_rule_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get discovery rule '%s': %s" % (discovery_rule_name, e))
def get_discovery_check_by_discovery_check_name(self, discovery_check_name):
"""Get discovery check by discovery check name
Args:
discovery_check_name: discovery check name.
Returns:
discovery check matching discovery check name
"""
try:
discovery_check_list = self._zapi.dcheck.get({
'output': 'extend',
'filter': {'key_': [discovery_check_name]}
})
if len(discovery_check_list) < 1:
self._module.fail_json(msg="Discovery check not found: %s" % discovery_check_name)
else:
return discovery_check_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get discovery check '%s': %s" % (discovery_check_name, e))
def get_proxy_by_proxy_name(self, proxy_name):
"""Get proxy by proxy name
Args:
proxy_name: proxy name.
Returns:
proxy matching proxy name
"""
try:
proxy_list = self._zapi.proxy.get({
'output': 'extend',
'filter': {'host': [proxy_name]}
})
if len(proxy_list) < 1:
self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
else:
return proxy_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get proxy '%s': %s" % (proxy_name, e))
def get_mediatype_by_mediatype_name(self, mediatype_name):
"""Get mediatype by mediatype name
Args:
mediatype_name: mediatype name
Returns:
mediatype matching mediatype name
"""
if LooseVersion(self._zbx_api_version) >= LooseVersion('4.4'):
filter = {'name': [mediatype_name]}
else:
filter = {'description': [mediatype_name]}
try:
if str(mediatype_name).lower() == 'all':
return '0'
mediatype_list = self._zapi.mediatype.get({
'output': 'extend',
'filter': filter
})
if len(mediatype_list) < 1:
self._module.fail_json(msg="Media type not found: %s" % mediatype_name)
else:
return mediatype_list[0]['mediatypeid']
except Exception as e:
self._module.fail_json(msg="Failed to get mediatype '%s': %s" % (mediatype_name, e))
def get_user_by_user_name(self, user_name):
"""Get user by user name
Args:
user_name: user name
Returns:
user matching user name
"""
try:
if LooseVersion(self._zbx_api_version) >= LooseVersion('5.4'):
filter = {'username': [user_name]}
else:
filter = {'alias': [user_name]}
user_list = self._zapi.user.get({
'output': 'extend',
'filter': filter,
})
if len(user_list) < 1:
self._module.fail_json(msg="User not found: %s" % user_name)
else:
return user_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get user '%s': %s" % (user_name, e))
def get_usergroup_by_usergroup_name(self, usergroup_name):
"""Get usergroup by usergroup name
Args:
usergroup_name: usergroup name
Returns:
usergroup matching usergroup name
"""
try:
usergroup_list = self._zapi.usergroup.get({
'output': 'extend',
'filter': {'name': [usergroup_name]}
})
if len(usergroup_list) < 1:
self._module.fail_json(msg="User group not found: %s" % usergroup_name)
else:
return usergroup_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get user group '%s': %s" % (usergroup_name, e))
# get script by script name
def get_script_by_script_name(self, script_name):
"""Get script by script name
Args:
script_name: script name
Returns:
script matching script name
"""
try:
if script_name is None:
return {}
script_list = self._zapi.script.get({
'output': 'extend',
'filter': {'name': [script_name]}
})
if len(script_list) < 1:
self._module.fail_json(msg="Script not found: %s" % script_name)
else:
return script_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get script '%s': %s" % (script_name, e))
class Action(ZabbixBase):
def __init__(self, module, zbx=None, zapi_wrapper=None):
super(Action, self).__init__(module, zbx, zapi_wrapper)
self.existing_data = None
def _construct_parameters(self, **kwargs):
"""Construct parameters.
Args:
**kwargs: Arbitrary keyword parameters.
Returns:
dict: dictionary of specified parameters
"""
_params = {
'name': kwargs['name'],
'eventsource': to_numeric_value([
'trigger',
'discovery',
'auto_registration',
'internal'], kwargs['event_source']),
'esc_period': kwargs.get('esc_period'),
'filter': kwargs['conditions'],
'def_longdata': kwargs['default_message'],
'def_shortdata': kwargs['default_subject'],
'r_longdata': kwargs['recovery_default_message'],
'r_shortdata': kwargs['recovery_default_subject'],
'ack_longdata': kwargs['acknowledge_default_message'],
'ack_shortdata': kwargs['acknowledge_default_subject'],
'operations': kwargs['operations'],
'recovery_operations': kwargs.get('recovery_operations'),
'acknowledge_operations': kwargs.get('acknowledge_operations'),
'status': to_numeric_value([
'enabled',
'disabled'], kwargs['status'])
}
if kwargs['event_source'] == 'trigger':
if LooseVersion(self._zbx_api_version) >= LooseVersion('4.0'):
_params['pause_suppressed'] = '1' if kwargs['pause_in_maintenance'] else '0'
else:
_params['maintenance_mode'] = '1' if kwargs['pause_in_maintenance'] else '0'
if LooseVersion(self._zbx_api_version) >= LooseVersion('5.0'):
# remove some fields regarding
# https://www.zabbix.com/documentation/5.0/manual/api/reference/action/object
_params.pop('def_longdata', None)
_params.pop('def_shortdata', None)
_params.pop('r_longdata', None)
_params.pop('r_shortdata', None)
if (LooseVersion(self._zbx_api_version) < LooseVersion('3.4')
or LooseVersion(self._zbx_api_version) >= LooseVersion('5.0')):
_params.pop('ack_longdata', None)
_params.pop('ack_shortdata', None)
return _params
def check_difference(self, **kwargs):
"""Check difference between action and user specified parameters.
Args:
**kwargs: Arbitrary keyword parameters.
Returns:
dict: dictionary of differences
"""
existing_action = convert_unicode_to_str(self._zapi_wrapper.check_if_action_exists(kwargs['name'])[0])
parameters = convert_unicode_to_str(self._construct_parameters(**kwargs))
change_parameters = {}
_diff = cleanup_data(compare_dictionaries(parameters, existing_action, change_parameters))
return _diff
def update_action(self, **kwargs):
"""Update action.
Args:
**kwargs: Arbitrary keyword parameters.
Returns:
action: updated action
"""
try:
if self._module.check_mode:
self._module.exit_json(msg="Action would be updated if check mode was not specified: %s" % kwargs, changed=True)
kwargs['actionid'] = kwargs.pop('action_id')
return self._zapi.action.update(kwargs)
except Exception as e:
self._module.fail_json(msg="Failed to update action '%s': %s" % (kwargs['actionid'], e))
def add_action(self, **kwargs):
"""Add action.
Args:
**kwargs: Arbitrary keyword parameters.
Returns:
action: added action
"""
try:
if self._module.check_mode:
self._module.exit_json(msg="Action would be added if check mode was not specified", changed=True)
parameters = self._construct_parameters(**kwargs)
action_list = self._zapi.action.create(parameters)
return action_list['actionids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create action '%s': %s" % (kwargs['name'], e))
def delete_action(self, action_id):
"""Delete action.
Args:
action_id: Action id
Returns:
action: deleted action
"""
try:
if self._module.check_mode:
self._module.exit_json(msg="Action would be deleted if check mode was not specified", changed=True)
return self._zapi.action.delete([action_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete action '%s': %s" % (action_id, e))
class Operations(Zapi):
def _construct_operationtype(self, operation):
"""Construct operation type.
Args:
operation: operation to construct
Returns:
str: constructed operation
"""
try:
return to_numeric_value([
"send_message",
"remote_command",
"add_host",
"remove_host",
"add_to_host_group",
"remove_from_host_group",
"link_to_template",
"unlink_from_template",
"enable_host",
"disable_host",
"set_host_inventory_mode"], operation['type']
)
except Exception:
self._module.fail_json(msg="Unsupported value '%s' for operation type." % operation['type'])
def _construct_opmessage(self, operation):
"""Construct operation message.
Args:
operation: operation to construct the message
Returns:
dict: constructed operation message
"""
try:
return {
'default_msg': '0' if operation.get('message') is not None or operation.get('subject') is not None else '1',
'mediatypeid': self._zapi_wrapper.get_mediatype_by_mediatype_name(
operation.get('media_type')
) if operation.get('media_type') is not None else '0',
'message': operation.get('message'),
'subject': operation.get('subject'),
}
except Exception as e:
self._module.fail_json(msg="Failed to construct operation message. The error was: %s" % e)
def _construct_opmessage_usr(self, operation):
"""Construct operation message user.
Args:
operation: operation to construct the message user
Returns:
list: constructed operation message user or None if operation not found
"""
if operation.get('send_to_users') is None:
return None
return [{
'userid': self._zapi_wrapper.get_user_by_user_name(_user)['userid']
} for _user in operation.get('send_to_users')]
def _construct_opmessage_grp(self, operation):
"""Construct operation message group.
Args:
operation: operation to construct the message group
Returns:
list: constructed operation message group or None if operation not found
"""
if operation.get('send_to_groups') is None:
return None
return [{
'usrgrpid': self._zapi_wrapper.get_usergroup_by_usergroup_name(_group)['usrgrpid']
} for _group in operation.get('send_to_groups')]
def _construct_opcommand(self, operation):
"""Construct operation command.
Args:
operation: operation to construct command
Returns:
list: constructed operation command
"""
try:
return {
'type': to_numeric_value([
'custom_script',
'ipmi',
'ssh',
'telnet',
'global_script'], operation.get('command_type', 'custom_script')),
'command': operation.get('command'),
'execute_on': to_numeric_value([
'agent',
'server',
'proxy'], operation.get('execute_on', 'server')),
'scriptid': self._zapi_wrapper.get_script_by_script_name(
operation.get('script_name')
).get('scriptid'),
'authtype': to_numeric_value([
'password',
'public_key'
], operation.get('ssh_auth_type')),
'privatekey': operation.get('ssh_privatekey_file'),
'publickey': operation.get('ssh_publickey_file'),
'username': operation.get('username'),
'password': operation.get('password'),
'port': operation.get('port')
}
except Exception as e:
self._module.fail_json(msg="Failed to construct operation command. The error was: %s" % e)
def _construct_opcommand_hst(self, operation):
"""Construct operation command host.
Args:
operation: operation to construct command host
Returns:
list: constructed operation command host
"""
if operation.get('run_on_hosts') is None:
return None
return [{
'hostid': self._zapi_wrapper.get_host_by_host_name(_host)['hostid']
} if str(_host) != '0' else {'hostid': '0'} for _host in operation.get('run_on_hosts')]
def _construct_opcommand_grp(self, operation):
"""Construct operation command group.
Args:
operation: operation to construct command group
Returns:
list: constructed operation command group
"""
if operation.get('run_on_groups') is None:
return None
return [{
'groupid': self._zapi_wrapper.get_hostgroup_by_hostgroup_name(_group)['hostid']
} for _group in operation.get('run_on_groups')]
def _construct_opgroup(self, operation):
"""Construct operation group.
Args:
operation: operation to construct group
Returns:
list: constructed operation group
"""
return [{
'groupid': self._zapi_wrapper.get_hostgroup_by_hostgroup_name(_group)['groupid']
} for _group in operation.get('host_groups', [])]
def _construct_optemplate(self, operation):
"""Construct operation template.
Args:
operation: operation to construct template
Returns:
list: constructed operation template
"""
return [{
'templateid': self._zapi_wrapper.get_template_by_template_name(_template)['templateid']
} for _template in operation.get('templates', [])]
def _construct_opinventory(self, operation):
"""Construct operation inventory.
Args:
operation: operation to construct inventory
Returns:
dict: constructed operation inventory
"""
return {
'inventory_mode': to_numeric_value([
'manual',
'automatic'
], operation.get('inventory'))
}
def _construct_opconditions(self, operation):
"""Construct operation conditions.
Args:
operation: operation to construct the conditions
Returns:
list: constructed operation conditions
"""
_opcond = operation.get('operation_condition')
if _opcond is not None:
if _opcond == 'acknowledged':
_value = '1'
elif _opcond == 'not_acknowledged':
_value = '0'
return [{
'conditiontype': '14',
'operator': '0',
'value': _value
}]
return []
def construct_the_data(self, operations):
"""Construct the operation data using helper methods.
Args:
operation: operation to construct
Returns:
list: constructed operation data
"""
constructed_data = []
for op in operations:
operation_type = self._construct_operationtype(op)
constructed_operation = {
'operationtype': operation_type,
'esc_period': op.get('esc_period'),
'esc_step_from': op.get('esc_step_from'),
'esc_step_to': op.get('esc_step_to')
}
# Send Message type
if constructed_operation['operationtype'] == '0':
constructed_operation['opmessage'] = self._construct_opmessage(op)
constructed_operation['opmessage_usr'] = self._construct_opmessage_usr(op)
constructed_operation['opmessage_grp'] = self._construct_opmessage_grp(op)
constructed_operation['opconditions'] = self._construct_opconditions(op)
# Send Command type
if constructed_operation['operationtype'] == '1':
constructed_operation['opcommand'] = self._construct_opcommand(op)
constructed_operation['opcommand_hst'] = self._construct_opcommand_hst(op)
constructed_operation['opcommand_grp'] = self._construct_opcommand_grp(op)
constructed_operation['opconditions'] = self._construct_opconditions(op)
# Add to/Remove from host group
if constructed_operation['operationtype'] in ('4', '5'):
constructed_operation['opgroup'] = self._construct_opgroup(op)
# Link/Unlink template
if constructed_operation['operationtype'] in ('6', '7'):
constructed_operation['optemplate'] = self._construct_optemplate(op)
# Set inventory mode
if constructed_operation['operationtype'] == '10':
constructed_operation['opinventory'] = self._construct_opinventory(op)
constructed_data.append(constructed_operation)
return cleanup_data(constructed_data)
class RecoveryOperations(Operations):
"""
Restructures the user defined recovery operations data to fit the Zabbix API requirements
"""
def _construct_operationtype(self, operation):
"""Construct operation type.
Args:
operation: operation to construct type
Returns:
str: constructed operation type
"""
try:
return to_numeric_value([
"send_message",
"remote_command",
None,
None,
None,
None,
None,
None,
None,
None,
None,
"notify_all_involved"], operation['type']
)
except Exception:
self._module.fail_json(msg="Unsupported value '%s' for recovery operation type." % operation['type'])
def construct_the_data(self, operations):
"""Construct the recovery operations data using helper methods.
Args:
operation: operation to construct
Returns:
list: constructed recovery operations data
"""
constructed_data = []
for op in operations:
operation_type = self._construct_operationtype(op)
constructed_operation = {
'operationtype': operation_type,
}
# Send Message type
if constructed_operation['operationtype'] in ('0', '11'):
constructed_operation['opmessage'] = self._construct_opmessage(op)
constructed_operation['opmessage_usr'] = self._construct_opmessage_usr(op)
constructed_operation['opmessage_grp'] = self._construct_opmessage_grp(op)
# Send Command type
if constructed_operation['operationtype'] == '1':
constructed_operation['opcommand'] = self._construct_opcommand(op)
constructed_operation['opcommand_hst'] = self._construct_opcommand_hst(op)
constructed_operation['opcommand_grp'] = self._construct_opcommand_grp(op)
constructed_data.append(constructed_operation)
return cleanup_data(constructed_data)
class AcknowledgeOperations(Operations):
"""
Restructures the user defined acknowledge operations data to fit the Zabbix API requirements
"""
def _construct_operationtype(self, operation):
"""Construct operation type.
Args:
operation: operation to construct type
Returns:
str: constructed operation type
"""
try:
return to_numeric_value([
"send_message",
"remote_command",
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
"notify_all_involved"], operation['type']
)
except Exception:
self._module.fail_json(msg="Unsupported value '%s' for acknowledge operation type." % operation['type'])
def construct_the_data(self, operations):
"""Construct the acknowledge operations data using helper methods.
Args:
operation: operation to construct
Returns:
list: constructed acknowledge operations data
"""
constructed_data = []
for op in operations:
operation_type = self._construct_operationtype(op)
constructed_operation = {
'operationtype': operation_type,
}
# Send Message type
if constructed_operation['operationtype'] in ('0', '11'):
constructed_operation['opmessage'] = self._construct_opmessage(op)
constructed_operation['opmessage_usr'] = self._construct_opmessage_usr(op)
constructed_operation['opmessage_grp'] = self._construct_opmessage_grp(op)
# Send Command type
if constructed_operation['operationtype'] == '1':
constructed_operation['opcommand'] = self._construct_opcommand(op)
constructed_operation['opcommand_hst'] = self._construct_opcommand_hst(op)
constructed_operation['opcommand_grp'] = self._construct_opcommand_grp(op)
constructed_data.append(constructed_operation)
return cleanup_data(constructed_data)
class Filter(Zapi):
def _construct_evaltype(self, _eval_type, _formula, _conditions):
"""Construct the eval type
Args:
_formula: zabbix condition evaluation formula
_conditions: list of conditions to check
Returns:
dict: constructed acknowledge operations data
"""
if len(_conditions) <= 1:
return {
'evaltype': '0',
'formula': None
}
if _eval_type == 'andor':
return {
'evaltype': '0',
'formula': None
}
if _eval_type == 'and':
return {
'evaltype': '1',
'formula': None
}
if _eval_type == 'or':
return {
'evaltype': '2',
'formula': None
}
if _eval_type == 'custom_expression':
if _formula is not None:
return {
'evaltype': '3',
'formula': _formula
}
else:
self._module.fail_json(msg="'formula' is required when 'eval_type' is set to 'custom_expression'")
if _formula is not None:
return {
'evaltype': '3',
'formula': _formula
}
return {
'evaltype': '0',
'formula': None
}
def _construct_conditiontype(self, _condition):
"""Construct the condition type
Args:
_condition: condition to check
Returns:
str: constructed condition type data
"""
try:
return to_numeric_value([
"host_group",
"host",
"trigger",
"trigger_name",
"trigger_severity",
"trigger_value",
"time_period",
"host_ip",
"discovered_service_type",
"discovered_service_port",
"discovery_status",
"uptime_or_downtime_duration",
"received_value",
"host_template",
None,
"application",
"maintenance_status",
None,
"discovery_rule",
"discovery_check",
"proxy",
"discovery_object",
"host_name",
"event_type",
"host_metadata",
"event_tag",
"event_tag_value"], _condition['type']
)
except Exception:
self._module.fail_json(msg="Unsupported value '%s' for condition type." % _condition['type'])
def _construct_operator(self, _condition):
"""Construct operator
Args:
_condition: condition to construct
Returns:
str: constructed operator
"""
try:
return to_numeric_value([
"=",
"<>",
"like",
"not like",
"in",
">=",
"<=",
"not in",
"matches",
"does not match",
"Yes",
"No"], _condition['operator']
)
except Exception:
self._module.fail_json(msg="Unsupported value '%s' for operator." % _condition['operator'])
def _construct_value(self, conditiontype, value):
"""Construct operator
Args:
conditiontype: type of condition to construct
value: value to construct
Returns:
str: constructed value
"""
try:
# Host group
if conditiontype == '0':
return self._zapi_wrapper.get_hostgroup_by_hostgroup_name(value)['groupid']
# Host
if conditiontype == '1':
return self._zapi_wrapper.get_host_by_host_name(value)['hostid']
# Trigger
if conditiontype == '2':
return self._zapi_wrapper.get_trigger_by_trigger_name(value)['triggerid']
# Trigger name: return as is
# Trigger severity
if conditiontype == '4':
return to_numeric_value([
"not classified",
"information",
"warning",
"average",
"high",
"disaster"], value or "not classified"
)
# Trigger value
if conditiontype == '5':
return to_numeric_value([
"ok",
"problem"], value or "ok"
)
# Time period: return as is
# Host IP: return as is
# Discovered service type
if conditiontype == '8':
return to_numeric_value([
"SSH",
"LDAP",
"SMTP",
"FTP",
"HTTP",
"POP",
"NNTP",
"IMAP",
"TCP",
"Zabbix agent",
"SNMPv1 agent",
"SNMPv2 agent",
"ICMP ping",
"SNMPv3 agent",
"HTTPS",
"Telnet"], value
)
# Discovered service port: return as is
# Discovery status
if conditiontype == '10':
return to_numeric_value([
"up",
"down",
"discovered",
"lost"], value
)
if conditiontype == '13':
return self._zapi_wrapper.get_template_by_template_name(value)['templateid']
if conditiontype == '18':
return self._zapi_wrapper.get_discovery_rule_by_discovery_rule_name(value)['druleid']
if conditiontype == '19':
return self._zapi_wrapper.get_discovery_check_by_discovery_check_name(value)['dcheckid']
if conditiontype == '20':
return self._zapi_wrapper.get_proxy_by_proxy_name(value)['proxyid']
if conditiontype == '21':
return to_numeric_value([
"pchldrfor0",
"host",
"service"], value
)
if conditiontype == '23':
return to_numeric_value([
"item in not supported state",
"item in normal state",
"LLD rule in not supported state",
"LLD rule in normal state",
"trigger in unknown state",
"trigger in normal state"], value
)
return value
except Exception:
self._module.fail_json(
msg="""Unsupported value '%s' for specified condition type.
Check out Zabbix API documentation for supported values for
condition type '%s' at
https://www.zabbix.com/documentation/3.4/manual/api/reference/action/object#action_filter_condition""" % (value, conditiontype)
)
def construct_the_data(self, _eval_type, _formula, _conditions):
"""Construct the user defined filter conditions to fit the Zabbix API
requirements operations data using helper methods.
Args:
_formula: zabbix condition evaluation formula
_conditions: conditions to construct
Returns:
dict: user defined filter conditions
"""
if _conditions is None:
return None
constructed_data = {}
constructed_data['conditions'] = []
for cond in _conditions:
condition_type = self._construct_conditiontype(cond)
constructed_data['conditions'].append({
"conditiontype": condition_type,
"value": self._construct_value(condition_type, cond.get("value")),
"value2": cond.get("value2"),
"formulaid": cond.get("formulaid"),
"operator": self._construct_operator(cond)
})
_constructed_evaltype = self._construct_evaltype(
_eval_type,
_formula,
constructed_data['conditions']
)
constructed_data['evaltype'] = _constructed_evaltype['evaltype']
constructed_data['formula'] = _constructed_evaltype['formula']
return cleanup_data(constructed_data)
def convert_unicode_to_str(data):
"""Converts unicode objects to strings in dictionary
args:
data: unicode object
Returns:
dict: strings in dictionary
"""
if isinstance(data, dict):
return dict(map(convert_unicode_to_str, data.items()))
elif isinstance(data, (list, tuple, set)):
return type(data)(map(convert_unicode_to_str, data))
elif data is None:
return data
else:
return str(data)
def to_numeric_value(strs, value):
"""Converts string values to integers
Args:
value: string value
Returns:
int: converted integer
"""
if value is None:
return value
strs = [s.lower() if isinstance(s, str) else s for s in strs]
value = value.lower()
tmp_dict = dict(zip(strs, list(range(len(strs)))))
return str(tmp_dict[value])
def compare_lists(l1, l2, diff_dict):
"""
Compares l1 and l2 lists and adds the items that are different
to the diff_dict dictionary.
Used in recursion with compare_dictionaries() function.
Args:
l1: first list to compare
l2: second list to compare
diff_dict: dictionary to store the difference
Returns:
dict: items that are different
"""
if len(l1) != len(l2):
diff_dict.append(l1)
return diff_dict
for i, item in enumerate(l1):
if isinstance(item, dict):
diff_dict.insert(i, {})
diff_dict[i] = compare_dictionaries(item, l2[i], diff_dict[i])
else:
if item != l2[i]:
diff_dict.append(item)
while {} in diff_dict:
diff_dict.remove({})
return diff_dict
def compare_dictionaries(d1, d2, diff_dict):
"""
Compares d1 and d2 dictionaries and adds the items that are different
to the diff_dict dictionary.
Used in recursion with compare_lists() function.
Args:
d1: first dictionary to compare
d2: second dictionary to compare
diff_dict: dictionary to store the difference
Returns:
dict: items that are different
"""
for k, v in d1.items():
if k not in d2:
diff_dict[k] = v
continue
if isinstance(v, dict):
diff_dict[k] = {}
compare_dictionaries(v, d2[k], diff_dict[k])
if diff_dict[k] == {}:
del diff_dict[k]
else:
diff_dict[k] = v
elif isinstance(v, list):
diff_dict[k] = []
compare_lists(v, d2[k], diff_dict[k])
if diff_dict[k] == []:
del diff_dict[k]
else:
diff_dict[k] = v
else:
if v != d2[k]:
diff_dict[k] = v
return diff_dict
def cleanup_data(obj):
"""Removes the None values from the object and returns the object
Args:
obj: object to cleanup
Returns:
object: cleaned object
"""
if isinstance(obj, (list, tuple, set)):
return type(obj)(cleanup_data(x) for x in obj if x is not None)
elif isinstance(obj, dict):
return type(obj)((cleanup_data(k), cleanup_data(v))
for k, v in obj.items() if k is not None and v is not None)
else:
return obj
def main():
"""Main ansible module function
"""
argument_spec = zabbix_utils.zabbix_common_argument_spec()
argument_spec.update(dict(
esc_period=dict(type='str', required=False),
name=dict(type='str', required=True),
event_source=dict(type='str', required=False, choices=['trigger', 'discovery', 'auto_registration', 'internal']),
state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
status=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']),
pause_in_maintenance=dict(type='bool', required=False, default=True),
default_message=dict(type='str', required=False, default=''),
default_subject=dict(type='str', required=False, default=''),
recovery_default_message=dict(type='str', required=False, default=''),
recovery_default_subject=dict(type='str', required=False, default=''),
acknowledge_default_message=dict(type='str', required=False, default=''),
acknowledge_default_subject=dict(type='str', required=False, default=''),
conditions=dict(
type='list',
required=False,
default=[],
elements='dict',
options=dict(
formulaid=dict(type='str', required=False),
operator=dict(type='str', required=True),
type=dict(type='str', required=True),
value=dict(type='str', required=True),
value2=dict(type='str', required=False)
),
required_if=[
['type', 'event_tag_value', ['value2']],
]
),
formula=dict(type='str', required=False, default=None),
eval_type=dict(type='str', required=False, default=None, choices=['andor', 'and', 'or', 'custom_expression']),
operations=dict(
type='list',
required=False,
default=[],
elements='dict',
options=dict(
type=dict(
type='str',
required=True,
choices=[
'send_message',
'remote_command',
'add_host',
'remove_host',
'add_to_host_group',
'remove_from_host_group',
'link_to_template',
'unlink_from_template',
'enable_host',
'disable_host',
'set_host_inventory_mode',
]
),
esc_period=dict(type='str', required=False),
esc_step_from=dict(type='int', required=False, default=1),
esc_step_to=dict(type='int', required=False, default=1),
operation_condition=dict(
type='str',
required=False,
default=None,
choices=['acknowledged', 'not_acknowledged']
),
# when type is remote_command
command_type=dict(
type='str',
required=False,
choices=[
'custom_script',
'ipmi',
'ssh',
'telnet',
'global_script'
]
),
command=dict(type='str', required=False),
execute_on=dict(
type='str',
required=False,
choices=['agent', 'server', 'proxy']
),
password=dict(type='str', required=False, no_log=True),
port=dict(type='int', required=False),
run_on_groups=dict(type='list', required=False),
run_on_hosts=dict(type='list', required=False),
script_name=dict(type='str', required=False),
ssh_auth_type=dict(type='str', required=False, choices=['password', 'public_key']),
ssh_privatekey_file=dict(type='str', required=False),
ssh_publickey_file=dict(type='str', required=False),
username=dict(type='str', required=False),
# when type is send_message
media_type=dict(type='str', required=False),
subject=dict(type='str', required=False),
message=dict(type='str', required=False),
send_to_groups=dict(type='list', required=False),
send_to_users=dict(type='list', required=False),
# when type is add_to_host_group or remove_from_host_group
host_groups=dict(type='list', required=False),
# when type is set_host_inventory_mode
inventory=dict(type='str', required=False, choices=['manual', 'automatic']),
# when type is link_to_template or unlink_from_template
templates=dict(type='list', required=False)
),
required_if=[
['type', 'remote_command', ['command_type']],
['type', 'remote_command', ['run_on_groups', 'run_on_hosts'], True],
['command_type', 'custom_script', ['command', 'execute_on']],
['command_type', 'ipmi', ['command']],
['command_type', 'ssh', ['command', 'ssh_auth_type']],
['ssh_auth_type', 'password', ['username', 'password']],
['ssh_auth_type', 'public_key', ['username', 'ssh_privatekey_file', 'ssh_publickey_file']],
['command_type', 'telnet', ['command', 'username', 'password']],
['command_type', 'global_script', ['script_name']],
['type', 'add_to_host_group', ['host_groups']],
['type', 'remove_from_host_group', ['host_groups']],
['type', 'link_to_template', ['templates']],
['type', 'unlink_from_template', ['templates']],
['type', 'set_host_inventory_mode', ['inventory']],
['type', 'send_message', ['send_to_users', 'send_to_groups'], True]
]
),
recovery_operations=dict(
type='list',
required=False,
default=[],
elements='dict',
options=dict(
type=dict(
type='str',
required=True,
choices=[
'send_message',
'remote_command',
'notify_all_involved'
]
),
# when type is remote_command
command_type=dict(
type='str',
required=False,
choices=[
'custom_script',
'ipmi',
'ssh',
'telnet',
'global_script'
]
),
command=dict(type='str', required=False),
execute_on=dict(
type='str',
required=False,
choices=['agent', 'server', 'proxy']
),
password=dict(type='str', required=False, no_log=True),
port=dict(type='int', required=False),
run_on_groups=dict(type='list', required=False),
run_on_hosts=dict(type='list', required=False),
script_name=dict(type='str', required=False),
ssh_auth_type=dict(type='str', required=False, choices=['password', 'public_key']),
ssh_privatekey_file=dict(type='str', required=False),
ssh_publickey_file=dict(type='str', required=False),
username=dict(type='str', required=False),
# when type is send_message
media_type=dict(type='str', required=False),
subject=dict(type='str', required=False),
message=dict(type='str', required=False),
send_to_groups=dict(type='list', required=False),
send_to_users=dict(type='list', required=False),
),
required_if=[
['type', 'remote_command', ['command_type']],
['type', 'remote_command', [
'run_on_groups',
'run_on_hosts'
], True],
['command_type', 'custom_script', [
'command',
'execute_on'
]],
['command_type', 'ipmi', ['command']],
['command_type', 'ssh', ['command', 'ssh_auth_type']],
['ssh_auth_type', 'password', ['username', 'password']],
['ssh_auth_type', 'public_key', ['username', 'ssh_privatekey_file', 'ssh_publickey_file']],
['command_type', 'telnet', ['command', 'username', 'password']],
['command_type', 'global_script', ['script_name']],
['type', 'send_message', ['send_to_users', 'send_to_groups'], True]
]
),
acknowledge_operations=dict(
type='list',
required=False,
default=[],
elements='dict',
aliases=['update_operations'],
options=dict(
type=dict(
type='str',
required=True,
choices=[
'send_message',
'remote_command',
'notify_all_involved'
]
),
# when type is remote_command
command_type=dict(
type='str',
required=False,
choices=[
'custom_script',
'ipmi',
'ssh',
'telnet',
'global_script'
]
),
command=dict(type='str', required=False),
execute_on=dict(
type='str',
required=False,
choices=['agent', 'server', 'proxy']
),
password=dict(type='str', required=False, no_log=True),
port=dict(type='int', required=False),
run_on_groups=dict(type='list', required=False),
run_on_hosts=dict(type='list', required=False),
script_name=dict(type='str', required=False),
ssh_auth_type=dict(type='str', required=False, choices=['password', 'public_key']),
ssh_privatekey_file=dict(type='str', required=False),
ssh_publickey_file=dict(type='str', required=False),
username=dict(type='str', required=False),
# when type is send_message
media_type=dict(type='str', required=False),
subject=dict(type='str', required=False),
message=dict(type='str', required=False),
send_to_groups=dict(type='list', required=False),
send_to_users=dict(type='list', required=False),
),
required_if=[
['type', 'remote_command', ['command_type']],
['type', 'remote_command', [
'run_on_groups',
'run_on_hosts'
], True],
['command_type', 'custom_script', [
'command',
'execute_on'
]],
['command_type', 'ipmi', ['command']],
['command_type', 'ssh', ['command', 'ssh_auth_type']],
['ssh_auth_type', 'password', ['username', 'password']],
['ssh_auth_type', 'public_key', ['username', 'ssh_privatekey_file', 'ssh_publickey_file']],
['command_type', 'telnet', ['command', 'username', 'password']],
['command_type', 'global_script', ['script_name']],
['type', 'send_message', ['send_to_users', 'send_to_groups'], True]
]
)
))
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
['state', 'present', [
'esc_period',
'event_source'
]]
],
supports_check_mode=True
)
name = module.params['name']
esc_period = module.params['esc_period']
event_source = module.params['event_source']
state = module.params['state']
status = module.params['status']
pause_in_maintenance = module.params['pause_in_maintenance']
default_message = module.params['default_message']
default_subject = module.params['default_subject']
recovery_default_message = module.params['recovery_default_message']
recovery_default_subject = module.params['recovery_default_subject']
acknowledge_default_message = module.params['acknowledge_default_message']
acknowledge_default_subject = module.params['acknowledge_default_subject']
conditions = module.params['conditions']
formula = module.params['formula']
eval_type = module.params['eval_type']
operations = module.params['operations']
recovery_operations = module.params['recovery_operations']
acknowledge_operations = module.params['acknowledge_operations']
zapi_wrapper = Zapi(module)
action = Action(module, zapi_wrapper=zapi_wrapper)
action_exists = zapi_wrapper.check_if_action_exists(name)
ops = Operations(module, zapi_wrapper)
recovery_ops = RecoveryOperations(module, zapi_wrapper)
acknowledge_ops = AcknowledgeOperations(module, zapi_wrapper)
fltr = Filter(module, zapi_wrapper)
if action_exists:
action_id = zapi_wrapper.get_action_by_name(name)['actionid']
if state == "absent":
result = action.delete_action(action_id)
module.exit_json(changed=True, msg="Action Deleted: %s, ID: %s" % (name, result))
else:
difference = action.check_difference(
action_id=action_id,
name=name,
event_source=event_source,
esc_period=esc_period,
status=status,
pause_in_maintenance=pause_in_maintenance,
default_message=default_message,
default_subject=default_subject,
recovery_default_message=recovery_default_message,
recovery_default_subject=recovery_default_subject,
acknowledge_default_message=acknowledge_default_message,
acknowledge_default_subject=acknowledge_default_subject,
operations=ops.construct_the_data(operations),
recovery_operations=recovery_ops.construct_the_data(recovery_operations),
acknowledge_operations=acknowledge_ops.construct_the_data(acknowledge_operations),
conditions=fltr.construct_the_data(eval_type, formula, conditions)
)
if difference == {}:
module.exit_json(changed=False, msg="Action is up to date: %s" % (name))
else:
result = action.update_action(
action_id=action_id,
**difference
)
module.exit_json(changed=True, msg="Action Updated: %s, ID: %s" % (name, result))
else:
if state == "absent":
module.exit_json(changed=False)
else:
action_id = action.add_action(
name=name,
event_source=event_source,
esc_period=esc_period,
status=status,
pause_in_maintenance=pause_in_maintenance,
default_message=default_message,
default_subject=default_subject,
recovery_default_message=recovery_default_message,
recovery_default_subject=recovery_default_subject,
acknowledge_default_message=acknowledge_default_message,
acknowledge_default_subject=acknowledge_default_subject,
operations=ops.construct_the_data(operations),
recovery_operations=recovery_ops.construct_the_data(recovery_operations),
acknowledge_operations=acknowledge_ops.construct_the_data(acknowledge_operations),
conditions=fltr.construct_the_data(eval_type, formula, conditions)
)
module.exit_json(changed=True, msg="Action created: %s, ID: %s" % (name, action_id))
if __name__ == '__main__':
main() | venv/lib/python3.6/site-packages/ansible_collections/community/zabbix/plugins/modules/zabbix_action.py |
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: zabbix_action
short_description: Create/Delete/Update Zabbix actions
description:
- This module allows you to create, modify and delete Zabbix actions.
author:
- <NAME> (@rubentsirunyan)
- <NAME> (@K-DOT)
requirements:
- "zabbix-api >= 0.5.4"
options:
name:
description:
- Name of the action
required: true
event_source:
description:
- Type of events that the action will handle.
- Required when C(state=present).
required: false
choices: ['trigger', 'discovery', 'auto_registration', 'internal']
state:
description:
- State of the action.
- On C(present), it will create an action if it does not exist or update the action if the associated data is different.
- On C(absent), it will remove the action if it exists.
choices: ['present', 'absent']
default: 'present'
status:
description:
- Status of the action.
choices: ['enabled', 'disabled']
default: 'enabled'
pause_in_maintenance:
description:
- Whether to pause escalation during maintenance periods or not.
- Can be used when I(event_source=trigger).
type: 'bool'
default: true
esc_period:
description:
- Default operation step duration. Must be greater than 60 seconds.
- Accepts only seconds in int for <= Zabbix 3.2
- Accepts seconds, time unit with suffix and user macro since => Zabbix 3.4
- Required when C(state=present).
required: false
conditions:
type: list
elements: dict
description:
- List of conditions to use for filtering results.
- For more information about suboptions of this option please
check out Zabbix API documentation U(https://www.zabbix.com/documentation/5.0/manual/api/reference/action/object#action_filter_condition)
suboptions:
type:
description:
- Type (label) of the condition.
- 'Possible values when I(event_source=trigger):'
- ' - C(host_group)'
- ' - C(host)'
- ' - C(trigger)'
- ' - C(trigger_name)'
- ' - C(trigger_severity)'
- ' - C(time_period)'
- ' - C(host_template)'
- ' - C(application)'
- ' - C(maintenance_status) known in Zabbix 4.0 and above as "Problem is suppressed"'
- ' - C(event_tag)'
- ' - C(event_tag_value)'
- 'Possible values when I(event_source=discovery):'
- ' - C(host_IP)'
- ' - C(discovered_service_type)'
- ' - C(discovered_service_port)'
- ' - C(discovery_status)'
- ' - C(uptime_or_downtime_duration)'
- ' - C(received_value)'
- ' - C(discovery_rule)'
- ' - C(discovery_check)'
- ' - C(proxy)'
- ' - C(discovery_object)'
- 'Possible values when I(event_source=auto_registration):'
- ' - C(proxy)'
- ' - C(host_name)'
- ' - C(host_metadata)'
- 'Possible values when I(event_source=internal):'
- ' - C(host_group)'
- ' - C(host)'
- ' - C(host_template)'
- ' - C(application)'
- ' - C(event_type)'
value:
description:
- Value to compare with.
- 'When I(type=discovery_status), the choices are:'
- ' - C(up)'
- ' - C(down)'
- ' - C(discovered)'
- ' - C(lost)'
- 'When I(type=discovery_object), the choices are:'
- ' - C(host)'
- ' - C(service)'
- 'When I(type=event_type), the choices are:'
- ' - C(item in not supported state)'
- ' - C(item in normal state)'
- ' - C(LLD rule in not supported state)'
- ' - C(LLD rule in normal state)'
- ' - C(trigger in unknown state)'
- ' - C(trigger in normal state)'
- 'When I(type=trigger_severity), the choices are (case-insensitive):'
- ' - C(not classified)'
- ' - C(information)'
- ' - C(warning)'
- ' - C(average)'
- ' - C(high)'
- ' - C(disaster)'
- Irrespective of user-visible names being changed in Zabbix. Defaults to C(not classified) if omitted.
- Besides the above options, this is usually either the name
of the object or a string to compare with.
value2:
description:
- Secondary value to compare with.
- Required for trigger actions when condition I(type=event_tag_value).
operator:
description:
- Condition operator.
- When I(type) is set to C(time_period), the choices are C(in), C(not in).
- C(matches), C(does not match), C(Yes) and C(No) condition operators work only with >= Zabbix 4.0
choices:
- '='
- '<>'
- 'like'
- 'not like'
- 'in'
- '>='
- '<='
- 'not in'
- 'matches'
- 'does not match'
- 'Yes'
- 'No'
formulaid:
description:
- Arbitrary unique ID that is used to reference the condition from a custom expression.
- Can only contain upper-case letters.
- Required for custom expression filters and ignored otherwise.
eval_type:
description:
- Filter condition evaluation method.
- Defaults to C(andor) if conditions are less then 2 or if
I(formula) is not specified.
- Defaults to C(custom_expression) when formula is specified.
choices:
- 'andor'
- 'and'
- 'or'
- 'custom_expression'
formula:
description:
- User-defined expression to be used for evaluating conditions with a custom expression.
- The expression must contain IDs that reference each condition by its formulaid.
- The IDs used in the expression must exactly match the ones
defined in the I(conditions). No condition can remain unused or omitted.
- Required when I(eval_type=custom_expression).
- Use sequential IDs that start at "A". If non-sequential IDs are used, Zabbix re-indexes them.
This makes each module run notice the difference in IDs and update the action.
default_message:
description:
- Problem message default text.
- With >= Zabbix 5.0 this field is removed from the API and is dropped silently by module.
- Works only with < Zabbix 5.0
default_subject:
description:
- Problem message default subject.
- With >= Zabbix 5.0 this field is removed from the API and is dropped silently by module.
- Works only with < Zabbix 5.0
recovery_default_message:
description:
- Recovery message text.
- With >= Zabbix 5.0 this field is removed from the API and is dropped silently by module.
- Works only with >= Zabbix 3.2 and < Zabbix 5.0
recovery_default_subject:
description:
- Recovery message subject.
- With >= Zabbix 5.0 this field is removed from the API and is dropped silently by module.
- Works only with >= Zabbix 3.2 and < Zabbix 5.0
acknowledge_default_message:
description:
- Update operation (known as "Acknowledge operation" before Zabbix 4.0) message text.
- With >= Zabbix 5.0 this field is removed from the API and is dropped silently by module.
- Works only with >= Zabbix 3.4 and < Zabbix 5.0
acknowledge_default_subject:
description:
- Update operation (known as "Acknowledge operation" before Zabbix 4.0) message subject.
- With >= Zabbix 5.0 this field is removed from the API and is dropped silently by module.
- Works only with >= Zabbix 3.4 and < Zabbix 5.0
operations:
type: list
description:
- List of action operations
suboptions:
type:
description:
- Type of operation.
- 'Valid choices when setting type for I(recovery_operations) and I(acknowledge_operations):'
- ' - C(send_message)'
- ' - C(remote_command)'
- ' - C(notify_all_involved)'
- Choice C(notify_all_involved) only supported in I(recovery_operations) and I(acknowledge_operations).
choices:
- send_message
- remote_command
- add_host
- remove_host
- add_to_host_group
- remove_from_host_group
- link_to_template
- unlink_from_template
- enable_host
- disable_host
- set_host_inventory_mode
- notify_all_involved
esc_period:
description:
- Duration of an escalation step in seconds.
- Must be greater than 60 seconds.
- Accepts only seconds in int for <= Zabbix 3.2
- Accepts seconds, time unit with suffix and user macro since => Zabbix 3.4
- If set to 0 or 0s, the default action escalation period will be used.
default: 0s
esc_step_from:
description:
- Step to start escalation from.
default: 1
esc_step_to:
description:
- Step to end escalation at.
- Specify 0 for infinitely.
default: 1
send_to_groups:
type: list
description:
- User groups to send messages to.
send_to_users:
type: list
description:
- Users (usernames or aliases) to send messages to.
message:
description:
- Operation message text.
- Will check the 'default message' and use the text from I(default_message) if this and I(default_subject) are not specified
subject:
description:
- Operation message subject.
- Will check the 'default message' and use the text from I(default_subject) if this and I(default_subject) are not specified
media_type:
description:
- Media type that will be used to send the message.
- Can be used with I(type=send_message) or I(type=notify_all_involved) inside I(acknowledge_operations).
- Set to C(all) for all media types
default: 'all'
operation_condition:
type: 'str'
description:
- The action operation condition object defines a condition that must be met to perform the current operation.
choices:
- acknowledged
- not_acknowledged
host_groups:
type: list
description:
- List of host groups host should be added to.
- Required when I(type=add_to_host_group) or I(type=remove_from_host_group).
templates:
type: list
description:
- List of templates host should be linked to.
- Required when I(type=link_to_template) or I(type=unlink_from_template).
inventory:
description:
- Host inventory mode.
- Required when I(type=set_host_inventory_mode).
choices:
- manual
- automatic
command_type:
description:
- Type of operation command.
- Required when I(type=remote_command).
choices:
- custom_script
- ipmi
- ssh
- telnet
- global_script
command:
description:
- Command to run.
- Required when I(type=remote_command) and I(command_type!=global_script).
execute_on:
description:
- Target on which the custom script operation command will be executed.
- Required when I(type=remote_command) and I(command_type=custom_script).
choices:
- agent
- server
- proxy
run_on_groups:
description:
- Host groups to run remote commands on.
- Required when I(type=remote_command) and I(run_on_hosts) is not set.
run_on_hosts:
description:
- Hosts to run remote commands on.
- Required when I(type=remote_command) and I(run_on_groups) is not set.
- If set to 0 the command will be run on the current host.
ssh_auth_type:
description:
- Authentication method used for SSH commands.
- Required when I(type=remote_command) and I(command_type=ssh).
choices:
- password
- public_key
ssh_privatekey_file:
description:
- Name of the private key file used for SSH commands with public key authentication.
- Required when I(ssh_auth_type=public_key).
- Can be used when I(type=remote_command).
ssh_publickey_file:
description:
- Name of the public key file used for SSH commands with public key authentication.
- Required when I(ssh_auth_type=public_key).
- Can be used when I(type=remote_command).
username:
description:
- User name used for authentication.
- Required when I(ssh_auth_type in [public_key, password]) or I(command_type=telnet).
- Can be used when I(type=remote_command).
password:
description:
- Password used for authentication.
- Required when I(ssh_auth_type=password) or I(command_type=telnet).
- Can be used when I(type=remote_command).
port:
description:
- Port number used for authentication.
- Can be used when I(command_type in [ssh, telnet]) and I(type=remote_command).
script_name:
description:
- The name of script used for global script commands.
- Required when I(command_type=global_script).
- Can be used when I(type=remote_command).
recovery_operations:
type: list
description:
- List of recovery operations.
- C(Suboptions) are the same as for I(operations).
- Works only with >= Zabbix 3.2
acknowledge_operations:
type: list
description:
- List of acknowledge operations.
- Action acknowledge operations are known as update operations since Zabbix 4.0.
- C(Suboptions) are the same as for I(operations).
- Works only with >= Zabbix 3.4
aliases: [ update_operations ]
notes:
- Only Zabbix >= 3.0 is supported.
extends_documentation_fragment:
- community.zabbix.zabbix
'''
EXAMPLES = '''
# Trigger action with only one condition
- name: Deploy trigger action
community.zabbix.zabbix_action:
server_url: "http://zabbix.example.com/zabbix/"
login_user: Admin
login_password: <PASSWORD>
name: "Send alerts to Admin"
event_source: 'trigger'
state: present
status: enabled
esc_period: 60
conditions:
- type: 'trigger_severity'
operator: '>='
value: 'Information'
operations:
- type: send_message
subject: "Something bad is happening"
message: "Come on, guys do something"
media_type: 'Email'
send_to_users:
- 'Admin'
# Trigger action with multiple conditions and operations
- name: Deploy trigger action
community.zabbix.zabbix_action:
server_url: "http://zabbix.example.com/zabbix/"
login_user: Admin
login_password: <PASSWORD>
name: "Send alerts to Admin"
event_source: 'trigger'
state: present
status: enabled
esc_period: 1m
conditions:
- type: 'trigger_name'
operator: 'like'
value: 'Zabbix agent is unreachable'
formulaid: A
- type: 'trigger_severity'
operator: '>='
value: 'disaster'
formulaid: B
formula: A or B
operations:
- type: send_message
media_type: 'Email'
send_to_users:
- 'Admin'
- type: remote_command
command: 'systemctl restart zabbix-agent'
command_type: custom_script
execute_on: server
run_on_hosts:
- 0
# Trigger action with recovery and acknowledge operations
- name: Deploy trigger action
community.zabbix.zabbix_action:
server_url: "http://zabbix.example.com/zabbix/"
login_user: Admin
login_password: <PASSWORD>
name: "Send alerts to Admin"
event_source: 'trigger'
state: present
status: enabled
esc_period: 1h
conditions:
- type: 'trigger_severity'
operator: '>='
value: 'Information'
operations:
- type: send_message
subject: "Something bad is happening"
message: "Come on, guys do something"
media_type: 'Email'
send_to_users:
- 'Admin'
recovery_operations:
- type: send_message
subject: "Host is down"
message: "Come on, guys do something"
media_type: 'Email'
send_to_users:
- 'Admin'
acknowledge_operations:
- type: send_message
media_type: 'Email'
send_to_users:
- 'Admin'
'''
RETURN = '''
msg:
description: The result of the operation
returned: success
type: str
sample: 'Action Deleted: Register webservers, ID: 0001'
'''
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.zabbix.plugins.module_utils.base import ZabbixBase
from ansible_collections.community.zabbix.plugins.module_utils.wrappers import ZapiWrapper
import ansible_collections.community.zabbix.plugins.module_utils.helpers as zabbix_utils
class Zapi(ZapiWrapper):
def __init__(self, module, zbx=None):
super(Zapi, self).__init__(module, zbx)
self._zapi_wrapper = self
def check_if_action_exists(self, name):
"""Check if action exists.
Args:
name: Name of the action.
Returns:
The return value. True for success, False otherwise.
"""
try:
_action = self._zapi.action.get({
"selectOperations": "extend",
"selectRecoveryOperations": "extend",
"selectAcknowledgeOperations": "extend",
"selectFilter": "extend",
'filter': {'name': [name]}
})
if len(_action) > 0:
_action[0]['recovery_operations'] = _action[0].pop('recoveryOperations', [])
_action[0]['acknowledge_operations'] = _action[0].pop('acknowledgeOperations', [])
return _action
except Exception as e:
self._module.fail_json(msg="Failed to check if action '%s' exists: %s" % (name, e))
def get_action_by_name(self, name):
"""Get action by name
Args:
name: Name of the action.
Returns:
dict: Zabbix action
"""
try:
action_list = self._zapi.action.get({
'output': 'extend',
'filter': {'name': [name]}
})
if len(action_list) < 1:
self._module.fail_json(msg="Action not found: " % name)
else:
return action_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get ID of '%s': %s" % (name, e))
def get_host_by_host_name(self, host_name):
"""Get host by host name
Args:
host_name: host name.
Returns:
host matching host name
"""
try:
host_list = self._zapi.host.get({
'output': 'extend',
'selectInventory': 'extend',
'filter': {'host': [host_name]}
})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
return host_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get host '%s': %s" % (host_name, e))
def get_hostgroup_by_hostgroup_name(self, hostgroup_name):
"""Get host group by host group name
Args:
hostgroup_name: host group name.
Returns:
host group matching host group name
"""
try:
hostgroup_list = self._zapi.hostgroup.get({
'output': 'extend',
'filter': {'name': [hostgroup_name]}
})
if len(hostgroup_list) < 1:
self._module.fail_json(msg="Host group not found: %s" % hostgroup_name)
else:
return hostgroup_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get host group '%s': %s" % (hostgroup_name, e))
def get_template_by_template_name(self, template_name):
"""Get template by template name
Args:
template_name: template name.
Returns:
template matching template name
"""
try:
template_list = self._zapi.template.get({
'output': 'extend',
'filter': {'host': [template_name]}
})
if len(template_list) < 1:
self._module.fail_json(msg="Template not found: %s" % template_name)
else:
return template_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get template '%s': %s" % (template_name, e))
def get_trigger_by_trigger_name(self, trigger_name):
"""Get trigger by trigger name
Args:
trigger_name: trigger name.
Returns:
trigger matching trigger name
"""
try:
trigger_list = self._zapi.trigger.get({
'output': 'extend',
'filter': {'description': [trigger_name]}
})
if len(trigger_list) < 1:
self._module.fail_json(msg="Trigger not found: %s" % trigger_name)
else:
return trigger_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get trigger '%s': %s" % (trigger_name, e))
def get_discovery_rule_by_discovery_rule_name(self, discovery_rule_name):
"""Get discovery rule by discovery rule name
Args:
discovery_rule_name: discovery rule name.
Returns:
discovery rule matching discovery rule name
"""
try:
discovery_rule_list = self._zapi.drule.get({
'output': 'extend',
'filter': {'name': [discovery_rule_name]}
})
if len(discovery_rule_list) < 1:
self._module.fail_json(msg="Discovery rule not found: %s" % discovery_rule_name)
else:
return discovery_rule_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get discovery rule '%s': %s" % (discovery_rule_name, e))
def get_discovery_check_by_discovery_check_name(self, discovery_check_name):
"""Get discovery check by discovery check name
Args:
discovery_check_name: discovery check name.
Returns:
discovery check matching discovery check name
"""
try:
discovery_check_list = self._zapi.dcheck.get({
'output': 'extend',
'filter': {'key_': [discovery_check_name]}
})
if len(discovery_check_list) < 1:
self._module.fail_json(msg="Discovery check not found: %s" % discovery_check_name)
else:
return discovery_check_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get discovery check '%s': %s" % (discovery_check_name, e))
def get_proxy_by_proxy_name(self, proxy_name):
"""Get proxy by proxy name
Args:
proxy_name: proxy name.
Returns:
proxy matching proxy name
"""
try:
proxy_list = self._zapi.proxy.get({
'output': 'extend',
'filter': {'host': [proxy_name]}
})
if len(proxy_list) < 1:
self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
else:
return proxy_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get proxy '%s': %s" % (proxy_name, e))
def get_mediatype_by_mediatype_name(self, mediatype_name):
"""Get mediatype by mediatype name
Args:
mediatype_name: mediatype name
Returns:
mediatype matching mediatype name
"""
if LooseVersion(self._zbx_api_version) >= LooseVersion('4.4'):
filter = {'name': [mediatype_name]}
else:
filter = {'description': [mediatype_name]}
try:
if str(mediatype_name).lower() == 'all':
return '0'
mediatype_list = self._zapi.mediatype.get({
'output': 'extend',
'filter': filter
})
if len(mediatype_list) < 1:
self._module.fail_json(msg="Media type not found: %s" % mediatype_name)
else:
return mediatype_list[0]['mediatypeid']
except Exception as e:
self._module.fail_json(msg="Failed to get mediatype '%s': %s" % (mediatype_name, e))
def get_user_by_user_name(self, user_name):
"""Get user by user name
Args:
user_name: user name
Returns:
user matching user name
"""
try:
if LooseVersion(self._zbx_api_version) >= LooseVersion('5.4'):
filter = {'username': [user_name]}
else:
filter = {'alias': [user_name]}
user_list = self._zapi.user.get({
'output': 'extend',
'filter': filter,
})
if len(user_list) < 1:
self._module.fail_json(msg="User not found: %s" % user_name)
else:
return user_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get user '%s': %s" % (user_name, e))
def get_usergroup_by_usergroup_name(self, usergroup_name):
"""Get usergroup by usergroup name
Args:
usergroup_name: usergroup name
Returns:
usergroup matching usergroup name
"""
try:
usergroup_list = self._zapi.usergroup.get({
'output': 'extend',
'filter': {'name': [usergroup_name]}
})
if len(usergroup_list) < 1:
self._module.fail_json(msg="User group not found: %s" % usergroup_name)
else:
return usergroup_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get user group '%s': %s" % (usergroup_name, e))
# get script by script name
def get_script_by_script_name(self, script_name):
"""Get script by script name
Args:
script_name: script name
Returns:
script matching script name
"""
try:
if script_name is None:
return {}
script_list = self._zapi.script.get({
'output': 'extend',
'filter': {'name': [script_name]}
})
if len(script_list) < 1:
self._module.fail_json(msg="Script not found: %s" % script_name)
else:
return script_list[0]
except Exception as e:
self._module.fail_json(msg="Failed to get script '%s': %s" % (script_name, e))
class Action(ZabbixBase):
def __init__(self, module, zbx=None, zapi_wrapper=None):
super(Action, self).__init__(module, zbx, zapi_wrapper)
self.existing_data = None
def _construct_parameters(self, **kwargs):
"""Construct parameters.
Args:
**kwargs: Arbitrary keyword parameters.
Returns:
dict: dictionary of specified parameters
"""
_params = {
'name': kwargs['name'],
'eventsource': to_numeric_value([
'trigger',
'discovery',
'auto_registration',
'internal'], kwargs['event_source']),
'esc_period': kwargs.get('esc_period'),
'filter': kwargs['conditions'],
'def_longdata': kwargs['default_message'],
'def_shortdata': kwargs['default_subject'],
'r_longdata': kwargs['recovery_default_message'],
'r_shortdata': kwargs['recovery_default_subject'],
'ack_longdata': kwargs['acknowledge_default_message'],
'ack_shortdata': kwargs['acknowledge_default_subject'],
'operations': kwargs['operations'],
'recovery_operations': kwargs.get('recovery_operations'),
'acknowledge_operations': kwargs.get('acknowledge_operations'),
'status': to_numeric_value([
'enabled',
'disabled'], kwargs['status'])
}
if kwargs['event_source'] == 'trigger':
if LooseVersion(self._zbx_api_version) >= LooseVersion('4.0'):
_params['pause_suppressed'] = '1' if kwargs['pause_in_maintenance'] else '0'
else:
_params['maintenance_mode'] = '1' if kwargs['pause_in_maintenance'] else '0'
if LooseVersion(self._zbx_api_version) >= LooseVersion('5.0'):
# remove some fields regarding
# https://www.zabbix.com/documentation/5.0/manual/api/reference/action/object
_params.pop('def_longdata', None)
_params.pop('def_shortdata', None)
_params.pop('r_longdata', None)
_params.pop('r_shortdata', None)
if (LooseVersion(self._zbx_api_version) < LooseVersion('3.4')
or LooseVersion(self._zbx_api_version) >= LooseVersion('5.0')):
_params.pop('ack_longdata', None)
_params.pop('ack_shortdata', None)
return _params
def check_difference(self, **kwargs):
"""Check difference between action and user specified parameters.
Args:
**kwargs: Arbitrary keyword parameters.
Returns:
dict: dictionary of differences
"""
existing_action = convert_unicode_to_str(self._zapi_wrapper.check_if_action_exists(kwargs['name'])[0])
parameters = convert_unicode_to_str(self._construct_parameters(**kwargs))
change_parameters = {}
_diff = cleanup_data(compare_dictionaries(parameters, existing_action, change_parameters))
return _diff
def update_action(self, **kwargs):
"""Update action.
Args:
**kwargs: Arbitrary keyword parameters.
Returns:
action: updated action
"""
try:
if self._module.check_mode:
self._module.exit_json(msg="Action would be updated if check mode was not specified: %s" % kwargs, changed=True)
kwargs['actionid'] = kwargs.pop('action_id')
return self._zapi.action.update(kwargs)
except Exception as e:
self._module.fail_json(msg="Failed to update action '%s': %s" % (kwargs['actionid'], e))
def add_action(self, **kwargs):
"""Add action.
Args:
**kwargs: Arbitrary keyword parameters.
Returns:
action: added action
"""
try:
if self._module.check_mode:
self._module.exit_json(msg="Action would be added if check mode was not specified", changed=True)
parameters = self._construct_parameters(**kwargs)
action_list = self._zapi.action.create(parameters)
return action_list['actionids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create action '%s': %s" % (kwargs['name'], e))
def delete_action(self, action_id):
"""Delete action.
Args:
action_id: Action id
Returns:
action: deleted action
"""
try:
if self._module.check_mode:
self._module.exit_json(msg="Action would be deleted if check mode was not specified", changed=True)
return self._zapi.action.delete([action_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete action '%s': %s" % (action_id, e))
class Operations(Zapi):
def _construct_operationtype(self, operation):
"""Construct operation type.
Args:
operation: operation to construct
Returns:
str: constructed operation
"""
try:
return to_numeric_value([
"send_message",
"remote_command",
"add_host",
"remove_host",
"add_to_host_group",
"remove_from_host_group",
"link_to_template",
"unlink_from_template",
"enable_host",
"disable_host",
"set_host_inventory_mode"], operation['type']
)
except Exception:
self._module.fail_json(msg="Unsupported value '%s' for operation type." % operation['type'])
def _construct_opmessage(self, operation):
"""Construct operation message.
Args:
operation: operation to construct the message
Returns:
dict: constructed operation message
"""
try:
return {
'default_msg': '0' if operation.get('message') is not None or operation.get('subject') is not None else '1',
'mediatypeid': self._zapi_wrapper.get_mediatype_by_mediatype_name(
operation.get('media_type')
) if operation.get('media_type') is not None else '0',
'message': operation.get('message'),
'subject': operation.get('subject'),
}
except Exception as e:
self._module.fail_json(msg="Failed to construct operation message. The error was: %s" % e)
def _construct_opmessage_usr(self, operation):
"""Construct operation message user.
Args:
operation: operation to construct the message user
Returns:
list: constructed operation message user or None if operation not found
"""
if operation.get('send_to_users') is None:
return None
return [{
'userid': self._zapi_wrapper.get_user_by_user_name(_user)['userid']
} for _user in operation.get('send_to_users')]
def _construct_opmessage_grp(self, operation):
"""Construct operation message group.
Args:
operation: operation to construct the message group
Returns:
list: constructed operation message group or None if operation not found
"""
if operation.get('send_to_groups') is None:
return None
return [{
'usrgrpid': self._zapi_wrapper.get_usergroup_by_usergroup_name(_group)['usrgrpid']
} for _group in operation.get('send_to_groups')]
def _construct_opcommand(self, operation):
"""Construct operation command.
Args:
operation: operation to construct command
Returns:
list: constructed operation command
"""
try:
return {
'type': to_numeric_value([
'custom_script',
'ipmi',
'ssh',
'telnet',
'global_script'], operation.get('command_type', 'custom_script')),
'command': operation.get('command'),
'execute_on': to_numeric_value([
'agent',
'server',
'proxy'], operation.get('execute_on', 'server')),
'scriptid': self._zapi_wrapper.get_script_by_script_name(
operation.get('script_name')
).get('scriptid'),
'authtype': to_numeric_value([
'password',
'public_key'
], operation.get('ssh_auth_type')),
'privatekey': operation.get('ssh_privatekey_file'),
'publickey': operation.get('ssh_publickey_file'),
'username': operation.get('username'),
'password': operation.get('password'),
'port': operation.get('port')
}
except Exception as e:
self._module.fail_json(msg="Failed to construct operation command. The error was: %s" % e)
def _construct_opcommand_hst(self, operation):
"""Construct operation command host.
Args:
operation: operation to construct command host
Returns:
list: constructed operation command host
"""
if operation.get('run_on_hosts') is None:
return None
return [{
'hostid': self._zapi_wrapper.get_host_by_host_name(_host)['hostid']
} if str(_host) != '0' else {'hostid': '0'} for _host in operation.get('run_on_hosts')]
def _construct_opcommand_grp(self, operation):
"""Construct operation command group.
Args:
operation: operation to construct command group
Returns:
list: constructed operation command group
"""
if operation.get('run_on_groups') is None:
return None
return [{
'groupid': self._zapi_wrapper.get_hostgroup_by_hostgroup_name(_group)['hostid']
} for _group in operation.get('run_on_groups')]
def _construct_opgroup(self, operation):
"""Construct operation group.
Args:
operation: operation to construct group
Returns:
list: constructed operation group
"""
return [{
'groupid': self._zapi_wrapper.get_hostgroup_by_hostgroup_name(_group)['groupid']
} for _group in operation.get('host_groups', [])]
def _construct_optemplate(self, operation):
"""Construct operation template.
Args:
operation: operation to construct template
Returns:
list: constructed operation template
"""
return [{
'templateid': self._zapi_wrapper.get_template_by_template_name(_template)['templateid']
} for _template in operation.get('templates', [])]
def _construct_opinventory(self, operation):
"""Construct operation inventory.
Args:
operation: operation to construct inventory
Returns:
dict: constructed operation inventory
"""
return {
'inventory_mode': to_numeric_value([
'manual',
'automatic'
], operation.get('inventory'))
}
def _construct_opconditions(self, operation):
"""Construct operation conditions.
Args:
operation: operation to construct the conditions
Returns:
list: constructed operation conditions
"""
_opcond = operation.get('operation_condition')
if _opcond is not None:
if _opcond == 'acknowledged':
_value = '1'
elif _opcond == 'not_acknowledged':
_value = '0'
return [{
'conditiontype': '14',
'operator': '0',
'value': _value
}]
return []
def construct_the_data(self, operations):
"""Construct the operation data using helper methods.
Args:
operation: operation to construct
Returns:
list: constructed operation data
"""
constructed_data = []
for op in operations:
operation_type = self._construct_operationtype(op)
constructed_operation = {
'operationtype': operation_type,
'esc_period': op.get('esc_period'),
'esc_step_from': op.get('esc_step_from'),
'esc_step_to': op.get('esc_step_to')
}
# Send Message type
if constructed_operation['operationtype'] == '0':
constructed_operation['opmessage'] = self._construct_opmessage(op)
constructed_operation['opmessage_usr'] = self._construct_opmessage_usr(op)
constructed_operation['opmessage_grp'] = self._construct_opmessage_grp(op)
constructed_operation['opconditions'] = self._construct_opconditions(op)
# Send Command type
if constructed_operation['operationtype'] == '1':
constructed_operation['opcommand'] = self._construct_opcommand(op)
constructed_operation['opcommand_hst'] = self._construct_opcommand_hst(op)
constructed_operation['opcommand_grp'] = self._construct_opcommand_grp(op)
constructed_operation['opconditions'] = self._construct_opconditions(op)
# Add to/Remove from host group
if constructed_operation['operationtype'] in ('4', '5'):
constructed_operation['opgroup'] = self._construct_opgroup(op)
# Link/Unlink template
if constructed_operation['operationtype'] in ('6', '7'):
constructed_operation['optemplate'] = self._construct_optemplate(op)
# Set inventory mode
if constructed_operation['operationtype'] == '10':
constructed_operation['opinventory'] = self._construct_opinventory(op)
constructed_data.append(constructed_operation)
return cleanup_data(constructed_data)
class RecoveryOperations(Operations):
"""
Restructures the user defined recovery operations data to fit the Zabbix API requirements
"""
def _construct_operationtype(self, operation):
"""Construct operation type.
Args:
operation: operation to construct type
Returns:
str: constructed operation type
"""
try:
return to_numeric_value([
"send_message",
"remote_command",
None,
None,
None,
None,
None,
None,
None,
None,
None,
"notify_all_involved"], operation['type']
)
except Exception:
self._module.fail_json(msg="Unsupported value '%s' for recovery operation type." % operation['type'])
def construct_the_data(self, operations):
"""Construct the recovery operations data using helper methods.
Args:
operation: operation to construct
Returns:
list: constructed recovery operations data
"""
constructed_data = []
for op in operations:
operation_type = self._construct_operationtype(op)
constructed_operation = {
'operationtype': operation_type,
}
# Send Message type
if constructed_operation['operationtype'] in ('0', '11'):
constructed_operation['opmessage'] = self._construct_opmessage(op)
constructed_operation['opmessage_usr'] = self._construct_opmessage_usr(op)
constructed_operation['opmessage_grp'] = self._construct_opmessage_grp(op)
# Send Command type
if constructed_operation['operationtype'] == '1':
constructed_operation['opcommand'] = self._construct_opcommand(op)
constructed_operation['opcommand_hst'] = self._construct_opcommand_hst(op)
constructed_operation['opcommand_grp'] = self._construct_opcommand_grp(op)
constructed_data.append(constructed_operation)
return cleanup_data(constructed_data)
class AcknowledgeOperations(Operations):
"""
Restructures the user defined acknowledge operations data to fit the Zabbix API requirements
"""
def _construct_operationtype(self, operation):
"""Construct operation type.
Args:
operation: operation to construct type
Returns:
str: constructed operation type
"""
try:
return to_numeric_value([
"send_message",
"remote_command",
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
"notify_all_involved"], operation['type']
)
except Exception:
self._module.fail_json(msg="Unsupported value '%s' for acknowledge operation type." % operation['type'])
def construct_the_data(self, operations):
"""Construct the acknowledge operations data using helper methods.
Args:
operation: operation to construct
Returns:
list: constructed acknowledge operations data
"""
constructed_data = []
for op in operations:
operation_type = self._construct_operationtype(op)
constructed_operation = {
'operationtype': operation_type,
}
# Send Message type
if constructed_operation['operationtype'] in ('0', '11'):
constructed_operation['opmessage'] = self._construct_opmessage(op)
constructed_operation['opmessage_usr'] = self._construct_opmessage_usr(op)
constructed_operation['opmessage_grp'] = self._construct_opmessage_grp(op)
# Send Command type
if constructed_operation['operationtype'] == '1':
constructed_operation['opcommand'] = self._construct_opcommand(op)
constructed_operation['opcommand_hst'] = self._construct_opcommand_hst(op)
constructed_operation['opcommand_grp'] = self._construct_opcommand_grp(op)
constructed_data.append(constructed_operation)
return cleanup_data(constructed_data)
class Filter(Zapi):
def _construct_evaltype(self, _eval_type, _formula, _conditions):
"""Construct the eval type
Args:
_formula: zabbix condition evaluation formula
_conditions: list of conditions to check
Returns:
dict: constructed acknowledge operations data
"""
if len(_conditions) <= 1:
return {
'evaltype': '0',
'formula': None
}
if _eval_type == 'andor':
return {
'evaltype': '0',
'formula': None
}
if _eval_type == 'and':
return {
'evaltype': '1',
'formula': None
}
if _eval_type == 'or':
return {
'evaltype': '2',
'formula': None
}
if _eval_type == 'custom_expression':
if _formula is not None:
return {
'evaltype': '3',
'formula': _formula
}
else:
self._module.fail_json(msg="'formula' is required when 'eval_type' is set to 'custom_expression'")
if _formula is not None:
return {
'evaltype': '3',
'formula': _formula
}
return {
'evaltype': '0',
'formula': None
}
def _construct_conditiontype(self, _condition):
"""Construct the condition type
Args:
_condition: condition to check
Returns:
str: constructed condition type data
"""
try:
return to_numeric_value([
"host_group",
"host",
"trigger",
"trigger_name",
"trigger_severity",
"trigger_value",
"time_period",
"host_ip",
"discovered_service_type",
"discovered_service_port",
"discovery_status",
"uptime_or_downtime_duration",
"received_value",
"host_template",
None,
"application",
"maintenance_status",
None,
"discovery_rule",
"discovery_check",
"proxy",
"discovery_object",
"host_name",
"event_type",
"host_metadata",
"event_tag",
"event_tag_value"], _condition['type']
)
except Exception:
self._module.fail_json(msg="Unsupported value '%s' for condition type." % _condition['type'])
def _construct_operator(self, _condition):
"""Construct operator
Args:
_condition: condition to construct
Returns:
str: constructed operator
"""
try:
return to_numeric_value([
"=",
"<>",
"like",
"not like",
"in",
">=",
"<=",
"not in",
"matches",
"does not match",
"Yes",
"No"], _condition['operator']
)
except Exception:
self._module.fail_json(msg="Unsupported value '%s' for operator." % _condition['operator'])
def _construct_value(self, conditiontype, value):
"""Construct operator
Args:
conditiontype: type of condition to construct
value: value to construct
Returns:
str: constructed value
"""
try:
# Host group
if conditiontype == '0':
return self._zapi_wrapper.get_hostgroup_by_hostgroup_name(value)['groupid']
# Host
if conditiontype == '1':
return self._zapi_wrapper.get_host_by_host_name(value)['hostid']
# Trigger
if conditiontype == '2':
return self._zapi_wrapper.get_trigger_by_trigger_name(value)['triggerid']
# Trigger name: return as is
# Trigger severity
if conditiontype == '4':
return to_numeric_value([
"not classified",
"information",
"warning",
"average",
"high",
"disaster"], value or "not classified"
)
# Trigger value
if conditiontype == '5':
return to_numeric_value([
"ok",
"problem"], value or "ok"
)
# Time period: return as is
# Host IP: return as is
# Discovered service type
if conditiontype == '8':
return to_numeric_value([
"SSH",
"LDAP",
"SMTP",
"FTP",
"HTTP",
"POP",
"NNTP",
"IMAP",
"TCP",
"Zabbix agent",
"SNMPv1 agent",
"SNMPv2 agent",
"ICMP ping",
"SNMPv3 agent",
"HTTPS",
"Telnet"], value
)
# Discovered service port: return as is
# Discovery status
if conditiontype == '10':
return to_numeric_value([
"up",
"down",
"discovered",
"lost"], value
)
if conditiontype == '13':
return self._zapi_wrapper.get_template_by_template_name(value)['templateid']
if conditiontype == '18':
return self._zapi_wrapper.get_discovery_rule_by_discovery_rule_name(value)['druleid']
if conditiontype == '19':
return self._zapi_wrapper.get_discovery_check_by_discovery_check_name(value)['dcheckid']
if conditiontype == '20':
return self._zapi_wrapper.get_proxy_by_proxy_name(value)['proxyid']
if conditiontype == '21':
return to_numeric_value([
"pchldrfor0",
"host",
"service"], value
)
if conditiontype == '23':
return to_numeric_value([
"item in not supported state",
"item in normal state",
"LLD rule in not supported state",
"LLD rule in normal state",
"trigger in unknown state",
"trigger in normal state"], value
)
return value
except Exception:
self._module.fail_json(
msg="""Unsupported value '%s' for specified condition type.
Check out Zabbix API documentation for supported values for
condition type '%s' at
https://www.zabbix.com/documentation/3.4/manual/api/reference/action/object#action_filter_condition""" % (value, conditiontype)
)
def construct_the_data(self, _eval_type, _formula, _conditions):
"""Construct the user defined filter conditions to fit the Zabbix API
requirements operations data using helper methods.
Args:
_formula: zabbix condition evaluation formula
_conditions: conditions to construct
Returns:
dict: user defined filter conditions
"""
if _conditions is None:
return None
constructed_data = {}
constructed_data['conditions'] = []
for cond in _conditions:
condition_type = self._construct_conditiontype(cond)
constructed_data['conditions'].append({
"conditiontype": condition_type,
"value": self._construct_value(condition_type, cond.get("value")),
"value2": cond.get("value2"),
"formulaid": cond.get("formulaid"),
"operator": self._construct_operator(cond)
})
_constructed_evaltype = self._construct_evaltype(
_eval_type,
_formula,
constructed_data['conditions']
)
constructed_data['evaltype'] = _constructed_evaltype['evaltype']
constructed_data['formula'] = _constructed_evaltype['formula']
return cleanup_data(constructed_data)
def convert_unicode_to_str(data):
"""Converts unicode objects to strings in dictionary
args:
data: unicode object
Returns:
dict: strings in dictionary
"""
if isinstance(data, dict):
return dict(map(convert_unicode_to_str, data.items()))
elif isinstance(data, (list, tuple, set)):
return type(data)(map(convert_unicode_to_str, data))
elif data is None:
return data
else:
return str(data)
def to_numeric_value(strs, value):
"""Converts string values to integers
Args:
value: string value
Returns:
int: converted integer
"""
if value is None:
return value
strs = [s.lower() if isinstance(s, str) else s for s in strs]
value = value.lower()
tmp_dict = dict(zip(strs, list(range(len(strs)))))
return str(tmp_dict[value])
def compare_lists(l1, l2, diff_dict):
"""
Compares l1 and l2 lists and adds the items that are different
to the diff_dict dictionary.
Used in recursion with compare_dictionaries() function.
Args:
l1: first list to compare
l2: second list to compare
diff_dict: dictionary to store the difference
Returns:
dict: items that are different
"""
if len(l1) != len(l2):
diff_dict.append(l1)
return diff_dict
for i, item in enumerate(l1):
if isinstance(item, dict):
diff_dict.insert(i, {})
diff_dict[i] = compare_dictionaries(item, l2[i], diff_dict[i])
else:
if item != l2[i]:
diff_dict.append(item)
while {} in diff_dict:
diff_dict.remove({})
return diff_dict
def compare_dictionaries(d1, d2, diff_dict):
"""
Compares d1 and d2 dictionaries and adds the items that are different
to the diff_dict dictionary.
Used in recursion with compare_lists() function.
Args:
d1: first dictionary to compare
d2: second dictionary to compare
diff_dict: dictionary to store the difference
Returns:
dict: items that are different
"""
for k, v in d1.items():
if k not in d2:
diff_dict[k] = v
continue
if isinstance(v, dict):
diff_dict[k] = {}
compare_dictionaries(v, d2[k], diff_dict[k])
if diff_dict[k] == {}:
del diff_dict[k]
else:
diff_dict[k] = v
elif isinstance(v, list):
diff_dict[k] = []
compare_lists(v, d2[k], diff_dict[k])
if diff_dict[k] == []:
del diff_dict[k]
else:
diff_dict[k] = v
else:
if v != d2[k]:
diff_dict[k] = v
return diff_dict
def cleanup_data(obj):
"""Removes the None values from the object and returns the object
Args:
obj: object to cleanup
Returns:
object: cleaned object
"""
if isinstance(obj, (list, tuple, set)):
return type(obj)(cleanup_data(x) for x in obj if x is not None)
elif isinstance(obj, dict):
return type(obj)((cleanup_data(k), cleanup_data(v))
for k, v in obj.items() if k is not None and v is not None)
else:
return obj
def main():
"""Main ansible module function
"""
argument_spec = zabbix_utils.zabbix_common_argument_spec()
argument_spec.update(dict(
esc_period=dict(type='str', required=False),
name=dict(type='str', required=True),
event_source=dict(type='str', required=False, choices=['trigger', 'discovery', 'auto_registration', 'internal']),
state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
status=dict(type='str', required=False, default='enabled', choices=['enabled', 'disabled']),
pause_in_maintenance=dict(type='bool', required=False, default=True),
default_message=dict(type='str', required=False, default=''),
default_subject=dict(type='str', required=False, default=''),
recovery_default_message=dict(type='str', required=False, default=''),
recovery_default_subject=dict(type='str', required=False, default=''),
acknowledge_default_message=dict(type='str', required=False, default=''),
acknowledge_default_subject=dict(type='str', required=False, default=''),
conditions=dict(
type='list',
required=False,
default=[],
elements='dict',
options=dict(
formulaid=dict(type='str', required=False),
operator=dict(type='str', required=True),
type=dict(type='str', required=True),
value=dict(type='str', required=True),
value2=dict(type='str', required=False)
),
required_if=[
['type', 'event_tag_value', ['value2']],
]
),
formula=dict(type='str', required=False, default=None),
eval_type=dict(type='str', required=False, default=None, choices=['andor', 'and', 'or', 'custom_expression']),
operations=dict(
type='list',
required=False,
default=[],
elements='dict',
options=dict(
type=dict(
type='str',
required=True,
choices=[
'send_message',
'remote_command',
'add_host',
'remove_host',
'add_to_host_group',
'remove_from_host_group',
'link_to_template',
'unlink_from_template',
'enable_host',
'disable_host',
'set_host_inventory_mode',
]
),
esc_period=dict(type='str', required=False),
esc_step_from=dict(type='int', required=False, default=1),
esc_step_to=dict(type='int', required=False, default=1),
operation_condition=dict(
type='str',
required=False,
default=None,
choices=['acknowledged', 'not_acknowledged']
),
# when type is remote_command
command_type=dict(
type='str',
required=False,
choices=[
'custom_script',
'ipmi',
'ssh',
'telnet',
'global_script'
]
),
command=dict(type='str', required=False),
execute_on=dict(
type='str',
required=False,
choices=['agent', 'server', 'proxy']
),
password=dict(type='str', required=False, no_log=True),
port=dict(type='int', required=False),
run_on_groups=dict(type='list', required=False),
run_on_hosts=dict(type='list', required=False),
script_name=dict(type='str', required=False),
ssh_auth_type=dict(type='str', required=False, choices=['password', 'public_key']),
ssh_privatekey_file=dict(type='str', required=False),
ssh_publickey_file=dict(type='str', required=False),
username=dict(type='str', required=False),
# when type is send_message
media_type=dict(type='str', required=False),
subject=dict(type='str', required=False),
message=dict(type='str', required=False),
send_to_groups=dict(type='list', required=False),
send_to_users=dict(type='list', required=False),
# when type is add_to_host_group or remove_from_host_group
host_groups=dict(type='list', required=False),
# when type is set_host_inventory_mode
inventory=dict(type='str', required=False, choices=['manual', 'automatic']),
# when type is link_to_template or unlink_from_template
templates=dict(type='list', required=False)
),
required_if=[
['type', 'remote_command', ['command_type']],
['type', 'remote_command', ['run_on_groups', 'run_on_hosts'], True],
['command_type', 'custom_script', ['command', 'execute_on']],
['command_type', 'ipmi', ['command']],
['command_type', 'ssh', ['command', 'ssh_auth_type']],
['ssh_auth_type', 'password', ['username', 'password']],
['ssh_auth_type', 'public_key', ['username', 'ssh_privatekey_file', 'ssh_publickey_file']],
['command_type', 'telnet', ['command', 'username', 'password']],
['command_type', 'global_script', ['script_name']],
['type', 'add_to_host_group', ['host_groups']],
['type', 'remove_from_host_group', ['host_groups']],
['type', 'link_to_template', ['templates']],
['type', 'unlink_from_template', ['templates']],
['type', 'set_host_inventory_mode', ['inventory']],
['type', 'send_message', ['send_to_users', 'send_to_groups'], True]
]
),
recovery_operations=dict(
type='list',
required=False,
default=[],
elements='dict',
options=dict(
type=dict(
type='str',
required=True,
choices=[
'send_message',
'remote_command',
'notify_all_involved'
]
),
# when type is remote_command
command_type=dict(
type='str',
required=False,
choices=[
'custom_script',
'ipmi',
'ssh',
'telnet',
'global_script'
]
),
command=dict(type='str', required=False),
execute_on=dict(
type='str',
required=False,
choices=['agent', 'server', 'proxy']
),
password=dict(type='str', required=False, no_log=True),
port=dict(type='int', required=False),
run_on_groups=dict(type='list', required=False),
run_on_hosts=dict(type='list', required=False),
script_name=dict(type='str', required=False),
ssh_auth_type=dict(type='str', required=False, choices=['password', 'public_key']),
ssh_privatekey_file=dict(type='str', required=False),
ssh_publickey_file=dict(type='str', required=False),
username=dict(type='str', required=False),
# when type is send_message
media_type=dict(type='str', required=False),
subject=dict(type='str', required=False),
message=dict(type='str', required=False),
send_to_groups=dict(type='list', required=False),
send_to_users=dict(type='list', required=False),
),
required_if=[
['type', 'remote_command', ['command_type']],
['type', 'remote_command', [
'run_on_groups',
'run_on_hosts'
], True],
['command_type', 'custom_script', [
'command',
'execute_on'
]],
['command_type', 'ipmi', ['command']],
['command_type', 'ssh', ['command', 'ssh_auth_type']],
['ssh_auth_type', 'password', ['username', 'password']],
['ssh_auth_type', 'public_key', ['username', 'ssh_privatekey_file', 'ssh_publickey_file']],
['command_type', 'telnet', ['command', 'username', 'password']],
['command_type', 'global_script', ['script_name']],
['type', 'send_message', ['send_to_users', 'send_to_groups'], True]
]
),
acknowledge_operations=dict(
type='list',
required=False,
default=[],
elements='dict',
aliases=['update_operations'],
options=dict(
type=dict(
type='str',
required=True,
choices=[
'send_message',
'remote_command',
'notify_all_involved'
]
),
# when type is remote_command
command_type=dict(
type='str',
required=False,
choices=[
'custom_script',
'ipmi',
'ssh',
'telnet',
'global_script'
]
),
command=dict(type='str', required=False),
execute_on=dict(
type='str',
required=False,
choices=['agent', 'server', 'proxy']
),
password=dict(type='str', required=False, no_log=True),
port=dict(type='int', required=False),
run_on_groups=dict(type='list', required=False),
run_on_hosts=dict(type='list', required=False),
script_name=dict(type='str', required=False),
ssh_auth_type=dict(type='str', required=False, choices=['password', 'public_key']),
ssh_privatekey_file=dict(type='str', required=False),
ssh_publickey_file=dict(type='str', required=False),
username=dict(type='str', required=False),
# when type is send_message
media_type=dict(type='str', required=False),
subject=dict(type='str', required=False),
message=dict(type='str', required=False),
send_to_groups=dict(type='list', required=False),
send_to_users=dict(type='list', required=False),
),
required_if=[
['type', 'remote_command', ['command_type']],
['type', 'remote_command', [
'run_on_groups',
'run_on_hosts'
], True],
['command_type', 'custom_script', [
'command',
'execute_on'
]],
['command_type', 'ipmi', ['command']],
['command_type', 'ssh', ['command', 'ssh_auth_type']],
['ssh_auth_type', 'password', ['username', 'password']],
['ssh_auth_type', 'public_key', ['username', 'ssh_privatekey_file', 'ssh_publickey_file']],
['command_type', 'telnet', ['command', 'username', 'password']],
['command_type', 'global_script', ['script_name']],
['type', 'send_message', ['send_to_users', 'send_to_groups'], True]
]
)
))
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[
['state', 'present', [
'esc_period',
'event_source'
]]
],
supports_check_mode=True
)
name = module.params['name']
esc_period = module.params['esc_period']
event_source = module.params['event_source']
state = module.params['state']
status = module.params['status']
pause_in_maintenance = module.params['pause_in_maintenance']
default_message = module.params['default_message']
default_subject = module.params['default_subject']
recovery_default_message = module.params['recovery_default_message']
recovery_default_subject = module.params['recovery_default_subject']
acknowledge_default_message = module.params['acknowledge_default_message']
acknowledge_default_subject = module.params['acknowledge_default_subject']
conditions = module.params['conditions']
formula = module.params['formula']
eval_type = module.params['eval_type']
operations = module.params['operations']
recovery_operations = module.params['recovery_operations']
acknowledge_operations = module.params['acknowledge_operations']
zapi_wrapper = Zapi(module)
action = Action(module, zapi_wrapper=zapi_wrapper)
action_exists = zapi_wrapper.check_if_action_exists(name)
ops = Operations(module, zapi_wrapper)
recovery_ops = RecoveryOperations(module, zapi_wrapper)
acknowledge_ops = AcknowledgeOperations(module, zapi_wrapper)
fltr = Filter(module, zapi_wrapper)
if action_exists:
action_id = zapi_wrapper.get_action_by_name(name)['actionid']
if state == "absent":
result = action.delete_action(action_id)
module.exit_json(changed=True, msg="Action Deleted: %s, ID: %s" % (name, result))
else:
difference = action.check_difference(
action_id=action_id,
name=name,
event_source=event_source,
esc_period=esc_period,
status=status,
pause_in_maintenance=pause_in_maintenance,
default_message=default_message,
default_subject=default_subject,
recovery_default_message=recovery_default_message,
recovery_default_subject=recovery_default_subject,
acknowledge_default_message=acknowledge_default_message,
acknowledge_default_subject=acknowledge_default_subject,
operations=ops.construct_the_data(operations),
recovery_operations=recovery_ops.construct_the_data(recovery_operations),
acknowledge_operations=acknowledge_ops.construct_the_data(acknowledge_operations),
conditions=fltr.construct_the_data(eval_type, formula, conditions)
)
if difference == {}:
module.exit_json(changed=False, msg="Action is up to date: %s" % (name))
else:
result = action.update_action(
action_id=action_id,
**difference
)
module.exit_json(changed=True, msg="Action Updated: %s, ID: %s" % (name, result))
else:
if state == "absent":
module.exit_json(changed=False)
else:
action_id = action.add_action(
name=name,
event_source=event_source,
esc_period=esc_period,
status=status,
pause_in_maintenance=pause_in_maintenance,
default_message=default_message,
default_subject=default_subject,
recovery_default_message=recovery_default_message,
recovery_default_subject=recovery_default_subject,
acknowledge_default_message=acknowledge_default_message,
acknowledge_default_subject=acknowledge_default_subject,
operations=ops.construct_the_data(operations),
recovery_operations=recovery_ops.construct_the_data(recovery_operations),
acknowledge_operations=acknowledge_ops.construct_the_data(acknowledge_operations),
conditions=fltr.construct_the_data(eval_type, formula, conditions)
)
module.exit_json(changed=True, msg="Action created: %s, ID: %s" % (name, action_id))
if __name__ == '__main__':
main() | 0.827724 | 0.260196 |
import re
import socket
from cfgm_common import PERMS_RX
from vnc_api.gen.resource_common import VirtualDns
from vnc_cfg_api_server.resources._resource_base import ResourceMixin
class VirtualDnsServer(ResourceMixin, VirtualDns):
@classmethod
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn):
# enable domain level sharing for virtual DNS
domain_uuid = obj_dict.get('parent_uuid')
if domain_uuid is None:
domain_uuid = db_conn.fq_name_to_uuid('domain',
obj_dict['fq_name'][0:1])
share_item = {
'tenant': 'domain:%s' % domain_uuid,
'tenant_access': PERMS_RX
}
obj_dict['perms2'].setdefault('share', []).append(share_item)
return cls.validate_dns_server(obj_dict, db_conn)
@classmethod
def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs):
return cls.validate_dns_server(obj_dict, db_conn)
@classmethod
def pre_dbe_delete(cls, id, obj_dict, db_conn):
vdns_name = ":".join(obj_dict['fq_name'])
if 'parent_uuid' in obj_dict:
ok, read_result = cls.dbe_read(db_conn, 'domain',
obj_dict['parent_uuid'])
if not ok:
return ok, read_result, None
virtual_DNSs = read_result.get('virtual_DNSs') or []
for vdns in virtual_DNSs:
ok, read_result = cls.dbe_read(db_conn, 'virtual_DNS',
vdns['uuid'])
if not ok:
code, msg = read_result
if code == 404:
continue
return ok, (code, msg), None
vdns_data = read_result['virtual_DNS_data']
if 'next_virtual_DNS' in vdns_data:
if vdns_data['next_virtual_DNS'] == vdns_name:
return (
False,
(403,
"Virtual DNS server is referred"
" by other virtual DNS servers"), None)
return True, "", None
@classmethod
def is_valid_dns_name(cls, name):
if len(name) > 255:
return False
if name.endswith("."): # A single trailing dot is legal
# strip exactly one dot from the right, if present
name = name[:-1]
disallowed = re.compile(r'[^A-Z\d-]', re.IGNORECASE)
return all( # Split by labels and verify individually
(label and len(label) <= 63 and # length is within proper range
# no bordering hyphens
not label.startswith("-") and not label.endswith("-") and
not disallowed.search(label)) # contains only legal char
for label in name.split("."))
@classmethod
def is_valid_ipv4_address(cls, address):
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
try:
if not 0 <= int(item) <= 255:
return False
except ValueError:
return False
return True
@classmethod
def is_valid_ipv6_address(cls, address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
return False
return True
@classmethod
def validate_dns_server(cls, obj_dict, db_conn):
if 'fq_name' in obj_dict:
virtual_dns = obj_dict['fq_name'][1]
disallowed = re.compile(r'[^A-Z\d-]', re.IGNORECASE)
if disallowed.search(virtual_dns) or virtual_dns.startswith("-"):
msg = ("Special characters are not allowed in Virtual DNS "
"server name")
return False, (403, msg)
vdns_data = obj_dict['virtual_DNS_data']
if not cls.is_valid_dns_name(vdns_data['domain_name']):
return (
False,
(403, "Domain name does not adhere to DNS name requirements"))
record_order = ["fixed", "random", "round-robin"]
if not str(vdns_data['record_order']).lower() in record_order:
return (False, (403, "Invalid value for record order"))
ttl = vdns_data['default_ttl_seconds']
if ttl < 0 or ttl > 2147483647:
return (False, (400, "Invalid value for TTL"))
if 'next_virtual_DNS' in vdns_data:
vdns_next = vdns_data['next_virtual_DNS']
if not vdns_next or vdns_next is None:
return True, ""
next_vdns = vdns_data['next_virtual_DNS'].split(":")
# check that next vdns exists
try:
next_vdns_uuid = db_conn.fq_name_to_uuid(
'virtual_DNS', next_vdns)
except Exception:
if not cls.is_valid_ipv4_address(
vdns_data['next_virtual_DNS']):
return (
False,
(400,
"Invalid Virtual Forwarder(next virtual dns server)"))
else:
return True, ""
# check that next virtual dns servers arent referring to each other
# above check doesnt allow during create, but entry could be
# modified later
ok, read_result = cls.dbe_read(db_conn, 'virtual_DNS',
next_vdns_uuid)
if ok:
next_vdns_data = read_result['virtual_DNS_data']
if 'next_virtual_DNS' in next_vdns_data:
vdns_name = ":".join(obj_dict['fq_name'])
if next_vdns_data['next_virtual_DNS'] == vdns_name:
return (
False,
(403,
"Cannot have Virtual DNS Servers "
"referring to each other"))
return True, "" | src/config/api-server/vnc_cfg_api_server/resources/virtual_dns.py |
import re
import socket
from cfgm_common import PERMS_RX
from vnc_api.gen.resource_common import VirtualDns
from vnc_cfg_api_server.resources._resource_base import ResourceMixin
class VirtualDnsServer(ResourceMixin, VirtualDns):
@classmethod
def pre_dbe_create(cls, tenant_name, obj_dict, db_conn):
# enable domain level sharing for virtual DNS
domain_uuid = obj_dict.get('parent_uuid')
if domain_uuid is None:
domain_uuid = db_conn.fq_name_to_uuid('domain',
obj_dict['fq_name'][0:1])
share_item = {
'tenant': 'domain:%s' % domain_uuid,
'tenant_access': PERMS_RX
}
obj_dict['perms2'].setdefault('share', []).append(share_item)
return cls.validate_dns_server(obj_dict, db_conn)
@classmethod
def pre_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs):
return cls.validate_dns_server(obj_dict, db_conn)
@classmethod
def pre_dbe_delete(cls, id, obj_dict, db_conn):
vdns_name = ":".join(obj_dict['fq_name'])
if 'parent_uuid' in obj_dict:
ok, read_result = cls.dbe_read(db_conn, 'domain',
obj_dict['parent_uuid'])
if not ok:
return ok, read_result, None
virtual_DNSs = read_result.get('virtual_DNSs') or []
for vdns in virtual_DNSs:
ok, read_result = cls.dbe_read(db_conn, 'virtual_DNS',
vdns['uuid'])
if not ok:
code, msg = read_result
if code == 404:
continue
return ok, (code, msg), None
vdns_data = read_result['virtual_DNS_data']
if 'next_virtual_DNS' in vdns_data:
if vdns_data['next_virtual_DNS'] == vdns_name:
return (
False,
(403,
"Virtual DNS server is referred"
" by other virtual DNS servers"), None)
return True, "", None
@classmethod
def is_valid_dns_name(cls, name):
if len(name) > 255:
return False
if name.endswith("."): # A single trailing dot is legal
# strip exactly one dot from the right, if present
name = name[:-1]
disallowed = re.compile(r'[^A-Z\d-]', re.IGNORECASE)
return all( # Split by labels and verify individually
(label and len(label) <= 63 and # length is within proper range
# no bordering hyphens
not label.startswith("-") and not label.endswith("-") and
not disallowed.search(label)) # contains only legal char
for label in name.split("."))
@classmethod
def is_valid_ipv4_address(cls, address):
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
try:
if not 0 <= int(item) <= 255:
return False
except ValueError:
return False
return True
@classmethod
def is_valid_ipv6_address(cls, address):
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
return False
return True
@classmethod
def validate_dns_server(cls, obj_dict, db_conn):
if 'fq_name' in obj_dict:
virtual_dns = obj_dict['fq_name'][1]
disallowed = re.compile(r'[^A-Z\d-]', re.IGNORECASE)
if disallowed.search(virtual_dns) or virtual_dns.startswith("-"):
msg = ("Special characters are not allowed in Virtual DNS "
"server name")
return False, (403, msg)
vdns_data = obj_dict['virtual_DNS_data']
if not cls.is_valid_dns_name(vdns_data['domain_name']):
return (
False,
(403, "Domain name does not adhere to DNS name requirements"))
record_order = ["fixed", "random", "round-robin"]
if not str(vdns_data['record_order']).lower() in record_order:
return (False, (403, "Invalid value for record order"))
ttl = vdns_data['default_ttl_seconds']
if ttl < 0 or ttl > 2147483647:
return (False, (400, "Invalid value for TTL"))
if 'next_virtual_DNS' in vdns_data:
vdns_next = vdns_data['next_virtual_DNS']
if not vdns_next or vdns_next is None:
return True, ""
next_vdns = vdns_data['next_virtual_DNS'].split(":")
# check that next vdns exists
try:
next_vdns_uuid = db_conn.fq_name_to_uuid(
'virtual_DNS', next_vdns)
except Exception:
if not cls.is_valid_ipv4_address(
vdns_data['next_virtual_DNS']):
return (
False,
(400,
"Invalid Virtual Forwarder(next virtual dns server)"))
else:
return True, ""
# check that next virtual dns servers arent referring to each other
# above check doesnt allow during create, but entry could be
# modified later
ok, read_result = cls.dbe_read(db_conn, 'virtual_DNS',
next_vdns_uuid)
if ok:
next_vdns_data = read_result['virtual_DNS_data']
if 'next_virtual_DNS' in next_vdns_data:
vdns_name = ":".join(obj_dict['fq_name'])
if next_vdns_data['next_virtual_DNS'] == vdns_name:
return (
False,
(403,
"Cannot have Virtual DNS Servers "
"referring to each other"))
return True, "" | 0.45641 | 0.054955 |
import sys
from setuptools import setup
# load __version__
exec(open("pyrender/version.py").read())
def get_imageio_dep():
if sys.version[0] == "2":
return "imageio<=2.6.1"
return "imageio"
requirements = [
"freetype-py", # For font loading
get_imageio_dep(), # For Image I/O
"networkx", # For the scene graph
"numpy", # Numpy
"Pillow", # For Trimesh texture conversions
"pyglet>=1.4.10", # For the pyglet viewer
"PyOpenGL==3.1.0", # For OpenGL
# 'PyOpenGL_accelerate==3.1.0', # For OpenGL
"scipy", # Because of trimesh missing dep
"six", # For Python 2/3 interop
"trimesh", # For meshes
]
dev_requirements = [
"flake8", # Code formatting checker
"pre-commit", # Pre-commit hooks
"pytest", # Code testing
"pytest-cov", # Coverage testing
"tox", # Automatic virtualenv testing
]
docs_requirements = [
"sphinx", # General doc library
"sphinx_rtd_theme", # RTD theme for sphinx
"sphinx-automodapi", # For generating nice tables
]
setup(
name="pyrender",
version=__version__,
description="Easy-to-use Python renderer for 3D visualization",
long_description="A simple implementation of Physically-Based Rendering "
"(PBR) in Python. Compliant with the glTF 2.0 standard.",
author="<NAME>",
author_email="<EMAIL>",
license="MIT License",
url="https://github.com/mmatl/pyrender",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Natural Language :: English",
"Topic :: Scientific/Engineering",
],
keywords="rendering graphics opengl 3d visualization pbr gltf",
packages=["pyrender", "pyrender.platforms"],
setup_requires=requirements,
install_requires=requirements,
extras_require={"dev": dev_requirements, "docs": docs_requirements,},
include_package_data=True,
) | setup.py | import sys
from setuptools import setup
# load __version__
exec(open("pyrender/version.py").read())
def get_imageio_dep():
if sys.version[0] == "2":
return "imageio<=2.6.1"
return "imageio"
requirements = [
"freetype-py", # For font loading
get_imageio_dep(), # For Image I/O
"networkx", # For the scene graph
"numpy", # Numpy
"Pillow", # For Trimesh texture conversions
"pyglet>=1.4.10", # For the pyglet viewer
"PyOpenGL==3.1.0", # For OpenGL
# 'PyOpenGL_accelerate==3.1.0', # For OpenGL
"scipy", # Because of trimesh missing dep
"six", # For Python 2/3 interop
"trimesh", # For meshes
]
dev_requirements = [
"flake8", # Code formatting checker
"pre-commit", # Pre-commit hooks
"pytest", # Code testing
"pytest-cov", # Coverage testing
"tox", # Automatic virtualenv testing
]
docs_requirements = [
"sphinx", # General doc library
"sphinx_rtd_theme", # RTD theme for sphinx
"sphinx-automodapi", # For generating nice tables
]
setup(
name="pyrender",
version=__version__,
description="Easy-to-use Python renderer for 3D visualization",
long_description="A simple implementation of Physically-Based Rendering "
"(PBR) in Python. Compliant with the glTF 2.0 standard.",
author="<NAME>",
author_email="<EMAIL>",
license="MIT License",
url="https://github.com/mmatl/pyrender",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Natural Language :: English",
"Topic :: Scientific/Engineering",
],
keywords="rendering graphics opengl 3d visualization pbr gltf",
packages=["pyrender", "pyrender.platforms"],
setup_requires=requirements,
install_requires=requirements,
extras_require={"dev": dev_requirements, "docs": docs_requirements,},
include_package_data=True,
) | 0.316792 | 0.287112 |
import os
import tqdm
import numpy as np
import tensorflow as tf
gpu_id='2'
class Config:
train_path='/data/dataset/pinyin2hanzi/py2hz_train.tsv'
hz2id_dict='/data/dataset/dict/hz2id_dict.txt'
test_path='/data/dataset/pinyin2hanzi/py2hz_test.tsv'
dev_path='/data/dataset/pinyin2hanzi/py2hz_dev.tsv'
py2id_dict='/data/dataset/dict/py2id_dict.txt'
model_dir='log/CBHG_model/'
model_name='model'
model_path=model_dir+model_name
board_path='tensorboard/CBHG'
embed_size = 300
num_highwaynet_blocks = 4
encoder_num_banks = 8
lr = 0.001
is_training = True
epochs = 25
batch_size = 256
def read_dict():
"""
根据路径dict_path读取文本和英文字典
return: pny2idx idx2pny hanzi2idx idx2hanzi
"""
pny2idx={}
hanzi2idx={}
idx2pny={}
idx2hanzi={}
with open(Config.hz2id_dict,'r',encoding='utf-8') as file:
for line in file:
hanzi,idx=line.strip('\n').split('\t')
hanzi2idx[hanzi]=int(idx.strip())
idx2hanzi[int(idx.strip())]=hanzi.strip()
with open(Config.py2id_dict,'r',encoding='utf-8') as file:
for line in file:
pny,idx=line.strip('\n').split('\t')
pny2idx[pny]=int(idx.strip())
idx2pny[int(idx.strip())]=pny.strip()
Config.pny_size=len(pny2idx)
Config.hanzi_size=len(hanzi2idx)
return pny2idx,idx2pny,hanzi2idx,idx2hanzi
def read_data(type):
"""
根据路径data_path读取中文文本到英文文本的对应关系
return: inputs->拼音->[[一句话的拼音列表],[]] lables->汉字->[[一句话的汉字列表],[]]
"""
inputs=[]
labels=[]
if type=='train':
data_path=Config.train_path
elif type=='test':
data_path=Config.test_path
elif type=='dev':
data_path=Config.dev_path
else:
raise Exception("Invalid type!", type)
with open(data_path,'r',encoding='utf-8') as file:
for line in file:
key,pny,hanzi=line.strip('\n').strip().split('\t')
pnys=pny.strip().split(' ')
hanzis=hanzi.strip().split(' ')
assert len(pnys)==len(hanzis)
inputs.append(pnys)
labels.append(hanzis)
pny2idx,idx2pny,hanzi2idx,idx2hanzi=read_dict()
input_num = [[pny2idx[pny] for pny in line ] for line in inputs]
label_num = [[hanzi2idx[han] for han in line] for line in labels]
return input_num,label_num
def get_batch(inputs,labels):
batch_size=Config.batch_size
batch_num = len(inputs) // batch_size
for k in range(batch_num):
begin = k * batch_size
end = begin + batch_size
input_batch = inputs[begin:end]
label_batch = labels[begin:end]
max_len = max([len(line) for line in input_batch])
input_batch = np.array([line + [0] * (max_len - len(line)) for line in input_batch])
label_batch = np.array([line + [0] * (max_len - len(line)) for line in label_batch])
yield input_batch, label_batch
def embed(inputs, vocab_size, num_units, zero_pad=True, scope="embedding", reuse=None):
with tf.variable_scope(scope, reuse=reuse):
lookup_table = tf.get_variable('lookup_table',
dtype=tf.float32,
shape=[vocab_size, num_units],
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01))
if zero_pad:
lookup_table = tf.concat((tf.zeros(shape=[1, num_units]),
lookup_table[1:, :]), 0)
return tf.nn.embedding_lookup(lookup_table, inputs)
def prenet(inputs, num_units=None, is_training=True, scope="prenet", reuse=None, dropout_rate=0.2):
'''
inputs: batch_size*length*embed_size
return:batch_size*length*num_units/2
'''
with tf.variable_scope(scope, reuse=reuse):
outputs = tf.layers.dense(inputs, units=num_units[0], activation=tf.nn.relu, name="dense1")
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=is_training, name="dropout1")
outputs = tf.layers.dense(outputs, units=num_units[1], activation=tf.nn.relu, name="dense2")
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=is_training, name="dropout2")
return outputs # (N, ..., num_units[1])
def conv1d(inputs,filters=None, size=1,rate=1, padding="SAME",use_bias=False,activation_fn=None, scope="conv1d", reuse=None):
'''
Args:
inputs: A 3-D tensor with shape of [batch, time, depth].
filters: An int. Number of outputs (=activation maps)
size: An int. Filter size.
rate: An int. Dilation rate.
padding: Either `same` or `valid` or `causal` (case-insensitive).
use_bias: A boolean.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A masked tensor of the same shape and dtypes as `inputs`.
'''
with tf.variable_scope(scope):
if padding.lower() == "causal":
# pre-padding for causality
pad_len = (size - 1) * rate # padding size
inputs = tf.pad(inputs, [[0, 0], [pad_len, 0], [0, 0]])
padding = "valid"
if filters is None:
filters = inputs.get_shape().as_list[-1]
params = {"inputs": inputs, "filters": filters, "kernel_size": size,
"dilation_rate": rate, "padding": padding, "activation": activation_fn,
"use_bias": use_bias, "reuse": reuse}
outputs = tf.layers.conv1d(**params)
return outputs
def conv1d_banks(inputs, num_units=None, K=16, is_training=True, scope="conv1d_banks", reuse=None):
'''Applies a series of conv1d separately.
Args:
inputs: A 3d tensor with shape of [N, T, C]
K: An int. The size of conv1d banks. That is,
The `inputs` are convolved with K filters: 1, 2, ..., K.
is_training: A boolean. This is passed to an argument of `batch_normalize`.
Returns:
A 3d tensor with shape of [N, T, K*Hp.embed_size//2].
'''
with tf.variable_scope(scope, reuse=reuse):
outputs = conv1d(inputs, num_units // 2, 1) # k=1
for k in range(2, K + 1): # k = 2...K
with tf.variable_scope("num_{}".format(k)):
output = conv1d(inputs, num_units, k)
outputs = tf.concat((outputs, output), -1)
outputs = normalize(outputs, is_training=is_training,
activation_fn=tf.nn.relu)
return outputs # (N, T, Hp.embed_size//2*K)
def conv1d_banks(inputs, num_units=None, K=16, is_training=True, scope="conv1d_banks", reuse=None):
'''Applies a series of conv1d separately.
Args:
inputs: A 3d tensor with shape of [N, T, C]
K: An int. The size of conv1d banks. That is,
The `inputs` are convolved with K filters: 1, 2, ..., K.
is_training: A boolean. This is passed to an argument of `batch_normalize`.
Returns:
A 3d tensor with shape of [N, T, K*Hp.embed_size//2].
'''
with tf.variable_scope(scope, reuse=reuse):
outputs = conv1d(inputs, num_units // 2, 1) # k=1
for k in range(2, K + 1): # k = 2...K
with tf.variable_scope("num_{}".format(k)):
output = conv1d(inputs, num_units, k)
outputs = tf.concat((outputs, output), -1)
outputs = normalize(outputs, is_training=is_training,
activation_fn=tf.nn.relu)
return outputs # (N, T, Hp.embed_size//2*K)
def gru(inputs, num_units=None, bidirection=False, seqlen=None, scope="gru", reuse=None):
'''Applies a GRU.
Args:
inputs: A 3d tensor with shape of [N, T, C].
num_units: An int. The number of hidden units.
bidirection: A boolean. If True, bidirectional results
are concatenated.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
If bidirection is True, a 3d tensor with shape of [N, T, 2*num_units],
otherwise [N, T, num_units].
'''
with tf.variable_scope(scope, reuse=reuse):
if num_units is None:
num_units = inputs.get_shape().as_list[-1]
cell = tf.contrib.rnn.GRUCell(num_units)
if bidirection:
cell_bw = tf.contrib.rnn.GRUCell(num_units)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, inputs,
sequence_length=seqlen,
dtype=tf.float32)
return tf.concat(outputs, 2)
else:
outputs, _ = tf.nn.dynamic_rnn(cell, inputs,
sequence_length=seqlen,
dtype=tf.float32)
return outputs
def highwaynet(inputs, num_units=None, scope="highwaynet", reuse=None):
'''Highway networks, see https://arxiv.org/abs/1505.00387
Args:
inputs: A 3D tensor of shape [N, T, W].
num_units: An int or `None`. Specifies the number of units in the highway layer
or uses the input size if `None`.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A 3D tensor of shape [N, T, W].
'''
if not num_units:
num_units = inputs.get_shape()[-1]
with tf.variable_scope(scope, reuse=reuse):
H = tf.layers.dense(inputs, units=num_units, activation=tf.nn.relu, name="dense1")
T = tf.layers.dense(inputs, units=num_units, activation=tf.nn.sigmoid,
bias_initializer=tf.constant_initializer(-1.0), name="dense2")
C = 1. - T
outputs = H * T + inputs * C
return outputs
def normalize(inputs,
decay=.99,
epsilon=1e-8,
is_training=True,
activation_fn=None,
reuse=None,
scope="normalize"):
'''Applies {batch|layer} normalization.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. If type is `bn`, the normalization is over all but
the last dimension. Or if type is `ln`, the normalization is over
the last dimension. Note that this is different from the native
`tf.contrib.layers.batch_norm`. For this I recommend you change
a line in ``tensorflow/contrib/layers/python/layers/layer.py`
as follows.
Before: mean, variance = nn.moments(inputs, axis, keep_dims=True)
After: mean, variance = nn.moments(inputs, [-1], keep_dims=True)
type: A string. Either "bn" or "ln".
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance.
is_training: Whether or not the layer is in training mode. W
activation_fn: Activation function.
scope: Optional scope for `variable_scope`.
Returns:
A tensor with the same shape and data dtype as `inputs`.
'''
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
# use fused batch norm if inputs_rank in [2, 3, 4] as it is much faster.
# pay attention to the fact that fused_batch_norm requires shape to be rank 4 of NHWC.
inputs = tf.expand_dims(inputs, axis=1)
outputs = tf.contrib.layers.batch_norm(inputs=inputs,
decay=decay,
center=True,
scale=True,
updates_collections=None,
is_training=is_training,
scope=scope,
zero_debias_moving_mean=True,
fused=True,
reuse=reuse)
outputs = tf.squeeze(outputs, axis=1)
if activation_fn:
outputs = activation_fn(outputs)
return outputs
class Graph():
'''Builds a model graph'''
def __init__(self):
tf.reset_default_graph()
self.pny_size = Config.pny_size
self.han_size = Config.hanzi_size
self.embed_size = Config.embed_size
self.is_training = Config.is_training
self.num_highwaynet_blocks = Config.num_highwaynet_blocks
self.encoder_num_banks = Config.encoder_num_banks
self.lr = Config.lr
self.x = tf.placeholder(tf.int32, shape=(None, None))
self.y = tf.placeholder(tf.int32, shape=(None, None))
# Character Embedding for x
enc = embed(self.x, self.pny_size, self.embed_size, scope="emb_x")
# Encoder pre-net
prenet_out = prenet(enc,
num_units=[self.embed_size, self.embed_size // 2],
is_training=self.is_training) # (N, T, E/2)
# Encoder CBHG
## Conv1D bank
enc = conv1d_banks(prenet_out,
K=self.encoder_num_banks,
num_units=self.embed_size // 2,
is_training=self.is_training) # (N, T, K * E / 2)
## Max pooling
enc = tf.layers.max_pooling1d(enc, 2, 1, padding="same") # (N, T, K * E / 2)
## Conv1D projections
enc = conv1d(enc, self.embed_size // 2, 5, scope="conv1d_1") # (N, T, E/2)
enc = normalize(enc, is_training=self.is_training,
activation_fn=tf.nn.relu, scope="norm1")
enc = conv1d(enc, self.embed_size // 2, 5, scope="conv1d_2") # (N, T, E/2)
enc = normalize(enc, is_training=self.is_training,
activation_fn=None, scope="norm2")
enc += prenet_out # (N, T, E/2) # residual connections
## Highway Nets
for i in range(self.num_highwaynet_blocks):
enc = highwaynet(enc, num_units=self.embed_size // 2,
scope='highwaynet_{}'.format(i)) # (N, T, E/2)
## Bidirectional GRU
enc = gru(enc, self.embed_size // 2, True, scope="gru1") # (N, T, E)
## Readout
self.outputs = tf.layers.dense(enc, self.han_size, use_bias=False)
self.preds = tf.to_int32(tf.argmax(self.outputs, axis=-1))
if self.is_training:
self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=self.outputs)
self.istarget = tf.to_float(tf.not_equal(self.y, tf.zeros_like(self.y))) # masking
self.hits = tf.to_float(tf.equal(self.preds, self.y)) * self.istarget
self.acc = tf.reduce_sum(self.hits) / tf.reduce_sum(self.istarget)
self.mean_loss = tf.reduce_sum(self.loss * self.istarget) / tf.reduce_sum(self.istarget)
# Training Scheme
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.train_op = self.optimizer.minimize(self.mean_loss, global_step=self.global_step)
# Summary
tf.summary.scalar('mean_loss', self.mean_loss)
tf.summary.scalar('acc', self.acc)
self.merged = tf.summary.merge_all()
def train():
inputs,labels=read_data('train')
dev_inputs,dev_labels=read_data('dev')
g = Graph()
config=tf.ConfigProto(log_device_placement=True)
saver =tf.train.Saver()
with tf.Session(config=config) as sess:
merged = tf.summary.merge_all()
sess.run(tf.global_variables_initializer())
ckpt=tf.train.latest_checkpoint(Config.model_dir)
if ckpt!=None:
print("正在恢复模型")
saver.restore(sess, ckpt)
writer = tf.summary.FileWriter(Config.board_path, tf.get_default_graph())
batch_num = len(inputs) // Config.batch_size
dev_num=len(dev_inputs)//Config.batch_size
for k in range(Config.epochs):
total_loss = 0
batch = get_batch(inputs, labels)
for i in range(batch_num):
input_batch, label_batch = next(batch)
feed = {g.x: input_batch, g.y: label_batch}
cost,_ = sess.run([g.mean_loss,g.train_op], feed_dict=feed)
total_loss += cost
if (k * batch_num + i) % 10 == 0:
rs=sess.run(merged, feed_dict=feed)
writer.add_summary(rs, k * batch_num + i)
dev_batch=get_batch(dev_inputs,labels)
for i in range(dev_num):
dev_inputs_batch,dev_labels_batch=next(dev_batch)
preds=sess.run(g.preds,{g.x:dev_inputs_batch})
print('epochs', k+1, ': average loss = ', total_loss/batch_num)
saver.save(sess,Config.model_path)
writer.close()
def test():
Config.is_training=False
_,_=read_data('train')
pny2id,id2pny,han2id,id2han=read_dict()
g=Graph()
config=tf.ConfigProto(log_device_placement=True)
saver =tf.train.Saver()
with tf.Session(config=config) as sess:
ckpt=tf.train.latest_checkpoint(Config.model_dir)
if ckpt!=None:
print("正在恢复模型")
saver.restore(sess, ckpt)
while True:
line = input('输入测试拼音: ')
if line == 'exit': break
line = line.strip('\n').split(' ')
x = np.array([pny2id[pny] for pny in line])
x = x.reshape(1, -1)
preds = sess.run(g.preds, {g.x: x})
got = ''.join(id2han[idx] for idx in preds[0])
print(got)
if __name__=="__main__":
#train()
test() | language_model/CBHG_self.py | import os
import tqdm
import numpy as np
import tensorflow as tf
gpu_id='2'
class Config:
train_path='/data/dataset/pinyin2hanzi/py2hz_train.tsv'
hz2id_dict='/data/dataset/dict/hz2id_dict.txt'
test_path='/data/dataset/pinyin2hanzi/py2hz_test.tsv'
dev_path='/data/dataset/pinyin2hanzi/py2hz_dev.tsv'
py2id_dict='/data/dataset/dict/py2id_dict.txt'
model_dir='log/CBHG_model/'
model_name='model'
model_path=model_dir+model_name
board_path='tensorboard/CBHG'
embed_size = 300
num_highwaynet_blocks = 4
encoder_num_banks = 8
lr = 0.001
is_training = True
epochs = 25
batch_size = 256
def read_dict():
"""
根据路径dict_path读取文本和英文字典
return: pny2idx idx2pny hanzi2idx idx2hanzi
"""
pny2idx={}
hanzi2idx={}
idx2pny={}
idx2hanzi={}
with open(Config.hz2id_dict,'r',encoding='utf-8') as file:
for line in file:
hanzi,idx=line.strip('\n').split('\t')
hanzi2idx[hanzi]=int(idx.strip())
idx2hanzi[int(idx.strip())]=hanzi.strip()
with open(Config.py2id_dict,'r',encoding='utf-8') as file:
for line in file:
pny,idx=line.strip('\n').split('\t')
pny2idx[pny]=int(idx.strip())
idx2pny[int(idx.strip())]=pny.strip()
Config.pny_size=len(pny2idx)
Config.hanzi_size=len(hanzi2idx)
return pny2idx,idx2pny,hanzi2idx,idx2hanzi
def read_data(type):
"""
根据路径data_path读取中文文本到英文文本的对应关系
return: inputs->拼音->[[一句话的拼音列表],[]] lables->汉字->[[一句话的汉字列表],[]]
"""
inputs=[]
labels=[]
if type=='train':
data_path=Config.train_path
elif type=='test':
data_path=Config.test_path
elif type=='dev':
data_path=Config.dev_path
else:
raise Exception("Invalid type!", type)
with open(data_path,'r',encoding='utf-8') as file:
for line in file:
key,pny,hanzi=line.strip('\n').strip().split('\t')
pnys=pny.strip().split(' ')
hanzis=hanzi.strip().split(' ')
assert len(pnys)==len(hanzis)
inputs.append(pnys)
labels.append(hanzis)
pny2idx,idx2pny,hanzi2idx,idx2hanzi=read_dict()
input_num = [[pny2idx[pny] for pny in line ] for line in inputs]
label_num = [[hanzi2idx[han] for han in line] for line in labels]
return input_num,label_num
def get_batch(inputs,labels):
batch_size=Config.batch_size
batch_num = len(inputs) // batch_size
for k in range(batch_num):
begin = k * batch_size
end = begin + batch_size
input_batch = inputs[begin:end]
label_batch = labels[begin:end]
max_len = max([len(line) for line in input_batch])
input_batch = np.array([line + [0] * (max_len - len(line)) for line in input_batch])
label_batch = np.array([line + [0] * (max_len - len(line)) for line in label_batch])
yield input_batch, label_batch
def embed(inputs, vocab_size, num_units, zero_pad=True, scope="embedding", reuse=None):
with tf.variable_scope(scope, reuse=reuse):
lookup_table = tf.get_variable('lookup_table',
dtype=tf.float32,
shape=[vocab_size, num_units],
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01))
if zero_pad:
lookup_table = tf.concat((tf.zeros(shape=[1, num_units]),
lookup_table[1:, :]), 0)
return tf.nn.embedding_lookup(lookup_table, inputs)
def prenet(inputs, num_units=None, is_training=True, scope="prenet", reuse=None, dropout_rate=0.2):
'''
inputs: batch_size*length*embed_size
return:batch_size*length*num_units/2
'''
with tf.variable_scope(scope, reuse=reuse):
outputs = tf.layers.dense(inputs, units=num_units[0], activation=tf.nn.relu, name="dense1")
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=is_training, name="dropout1")
outputs = tf.layers.dense(outputs, units=num_units[1], activation=tf.nn.relu, name="dense2")
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=is_training, name="dropout2")
return outputs # (N, ..., num_units[1])
def conv1d(inputs,filters=None, size=1,rate=1, padding="SAME",use_bias=False,activation_fn=None, scope="conv1d", reuse=None):
'''
Args:
inputs: A 3-D tensor with shape of [batch, time, depth].
filters: An int. Number of outputs (=activation maps)
size: An int. Filter size.
rate: An int. Dilation rate.
padding: Either `same` or `valid` or `causal` (case-insensitive).
use_bias: A boolean.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A masked tensor of the same shape and dtypes as `inputs`.
'''
with tf.variable_scope(scope):
if padding.lower() == "causal":
# pre-padding for causality
pad_len = (size - 1) * rate # padding size
inputs = tf.pad(inputs, [[0, 0], [pad_len, 0], [0, 0]])
padding = "valid"
if filters is None:
filters = inputs.get_shape().as_list[-1]
params = {"inputs": inputs, "filters": filters, "kernel_size": size,
"dilation_rate": rate, "padding": padding, "activation": activation_fn,
"use_bias": use_bias, "reuse": reuse}
outputs = tf.layers.conv1d(**params)
return outputs
def conv1d_banks(inputs, num_units=None, K=16, is_training=True, scope="conv1d_banks", reuse=None):
'''Applies a series of conv1d separately.
Args:
inputs: A 3d tensor with shape of [N, T, C]
K: An int. The size of conv1d banks. That is,
The `inputs` are convolved with K filters: 1, 2, ..., K.
is_training: A boolean. This is passed to an argument of `batch_normalize`.
Returns:
A 3d tensor with shape of [N, T, K*Hp.embed_size//2].
'''
with tf.variable_scope(scope, reuse=reuse):
outputs = conv1d(inputs, num_units // 2, 1) # k=1
for k in range(2, K + 1): # k = 2...K
with tf.variable_scope("num_{}".format(k)):
output = conv1d(inputs, num_units, k)
outputs = tf.concat((outputs, output), -1)
outputs = normalize(outputs, is_training=is_training,
activation_fn=tf.nn.relu)
return outputs # (N, T, Hp.embed_size//2*K)
def conv1d_banks(inputs, num_units=None, K=16, is_training=True, scope="conv1d_banks", reuse=None):
'''Applies a series of conv1d separately.
Args:
inputs: A 3d tensor with shape of [N, T, C]
K: An int. The size of conv1d banks. That is,
The `inputs` are convolved with K filters: 1, 2, ..., K.
is_training: A boolean. This is passed to an argument of `batch_normalize`.
Returns:
A 3d tensor with shape of [N, T, K*Hp.embed_size//2].
'''
with tf.variable_scope(scope, reuse=reuse):
outputs = conv1d(inputs, num_units // 2, 1) # k=1
for k in range(2, K + 1): # k = 2...K
with tf.variable_scope("num_{}".format(k)):
output = conv1d(inputs, num_units, k)
outputs = tf.concat((outputs, output), -1)
outputs = normalize(outputs, is_training=is_training,
activation_fn=tf.nn.relu)
return outputs # (N, T, Hp.embed_size//2*K)
def gru(inputs, num_units=None, bidirection=False, seqlen=None, scope="gru", reuse=None):
'''Applies a GRU.
Args:
inputs: A 3d tensor with shape of [N, T, C].
num_units: An int. The number of hidden units.
bidirection: A boolean. If True, bidirectional results
are concatenated.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
If bidirection is True, a 3d tensor with shape of [N, T, 2*num_units],
otherwise [N, T, num_units].
'''
with tf.variable_scope(scope, reuse=reuse):
if num_units is None:
num_units = inputs.get_shape().as_list[-1]
cell = tf.contrib.rnn.GRUCell(num_units)
if bidirection:
cell_bw = tf.contrib.rnn.GRUCell(num_units)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell, cell_bw, inputs,
sequence_length=seqlen,
dtype=tf.float32)
return tf.concat(outputs, 2)
else:
outputs, _ = tf.nn.dynamic_rnn(cell, inputs,
sequence_length=seqlen,
dtype=tf.float32)
return outputs
def highwaynet(inputs, num_units=None, scope="highwaynet", reuse=None):
'''Highway networks, see https://arxiv.org/abs/1505.00387
Args:
inputs: A 3D tensor of shape [N, T, W].
num_units: An int or `None`. Specifies the number of units in the highway layer
or uses the input size if `None`.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A 3D tensor of shape [N, T, W].
'''
if not num_units:
num_units = inputs.get_shape()[-1]
with tf.variable_scope(scope, reuse=reuse):
H = tf.layers.dense(inputs, units=num_units, activation=tf.nn.relu, name="dense1")
T = tf.layers.dense(inputs, units=num_units, activation=tf.nn.sigmoid,
bias_initializer=tf.constant_initializer(-1.0), name="dense2")
C = 1. - T
outputs = H * T + inputs * C
return outputs
def normalize(inputs,
decay=.99,
epsilon=1e-8,
is_training=True,
activation_fn=None,
reuse=None,
scope="normalize"):
'''Applies {batch|layer} normalization.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. If type is `bn`, the normalization is over all but
the last dimension. Or if type is `ln`, the normalization is over
the last dimension. Note that this is different from the native
`tf.contrib.layers.batch_norm`. For this I recommend you change
a line in ``tensorflow/contrib/layers/python/layers/layer.py`
as follows.
Before: mean, variance = nn.moments(inputs, axis, keep_dims=True)
After: mean, variance = nn.moments(inputs, [-1], keep_dims=True)
type: A string. Either "bn" or "ln".
decay: Decay for the moving average. Reasonable values for `decay` are close
to 1.0, typically in the multiple-nines range: 0.999, 0.99, 0.9, etc.
Lower `decay` value (recommend trying `decay`=0.9) if model experiences
reasonably good training performance but poor validation and/or test
performance.
is_training: Whether or not the layer is in training mode. W
activation_fn: Activation function.
scope: Optional scope for `variable_scope`.
Returns:
A tensor with the same shape and data dtype as `inputs`.
'''
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
# use fused batch norm if inputs_rank in [2, 3, 4] as it is much faster.
# pay attention to the fact that fused_batch_norm requires shape to be rank 4 of NHWC.
inputs = tf.expand_dims(inputs, axis=1)
outputs = tf.contrib.layers.batch_norm(inputs=inputs,
decay=decay,
center=True,
scale=True,
updates_collections=None,
is_training=is_training,
scope=scope,
zero_debias_moving_mean=True,
fused=True,
reuse=reuse)
outputs = tf.squeeze(outputs, axis=1)
if activation_fn:
outputs = activation_fn(outputs)
return outputs
class Graph():
'''Builds a model graph'''
def __init__(self):
tf.reset_default_graph()
self.pny_size = Config.pny_size
self.han_size = Config.hanzi_size
self.embed_size = Config.embed_size
self.is_training = Config.is_training
self.num_highwaynet_blocks = Config.num_highwaynet_blocks
self.encoder_num_banks = Config.encoder_num_banks
self.lr = Config.lr
self.x = tf.placeholder(tf.int32, shape=(None, None))
self.y = tf.placeholder(tf.int32, shape=(None, None))
# Character Embedding for x
enc = embed(self.x, self.pny_size, self.embed_size, scope="emb_x")
# Encoder pre-net
prenet_out = prenet(enc,
num_units=[self.embed_size, self.embed_size // 2],
is_training=self.is_training) # (N, T, E/2)
# Encoder CBHG
## Conv1D bank
enc = conv1d_banks(prenet_out,
K=self.encoder_num_banks,
num_units=self.embed_size // 2,
is_training=self.is_training) # (N, T, K * E / 2)
## Max pooling
enc = tf.layers.max_pooling1d(enc, 2, 1, padding="same") # (N, T, K * E / 2)
## Conv1D projections
enc = conv1d(enc, self.embed_size // 2, 5, scope="conv1d_1") # (N, T, E/2)
enc = normalize(enc, is_training=self.is_training,
activation_fn=tf.nn.relu, scope="norm1")
enc = conv1d(enc, self.embed_size // 2, 5, scope="conv1d_2") # (N, T, E/2)
enc = normalize(enc, is_training=self.is_training,
activation_fn=None, scope="norm2")
enc += prenet_out # (N, T, E/2) # residual connections
## Highway Nets
for i in range(self.num_highwaynet_blocks):
enc = highwaynet(enc, num_units=self.embed_size // 2,
scope='highwaynet_{}'.format(i)) # (N, T, E/2)
## Bidirectional GRU
enc = gru(enc, self.embed_size // 2, True, scope="gru1") # (N, T, E)
## Readout
self.outputs = tf.layers.dense(enc, self.han_size, use_bias=False)
self.preds = tf.to_int32(tf.argmax(self.outputs, axis=-1))
if self.is_training:
self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=self.outputs)
self.istarget = tf.to_float(tf.not_equal(self.y, tf.zeros_like(self.y))) # masking
self.hits = tf.to_float(tf.equal(self.preds, self.y)) * self.istarget
self.acc = tf.reduce_sum(self.hits) / tf.reduce_sum(self.istarget)
self.mean_loss = tf.reduce_sum(self.loss * self.istarget) / tf.reduce_sum(self.istarget)
# Training Scheme
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.train_op = self.optimizer.minimize(self.mean_loss, global_step=self.global_step)
# Summary
tf.summary.scalar('mean_loss', self.mean_loss)
tf.summary.scalar('acc', self.acc)
self.merged = tf.summary.merge_all()
def train():
inputs,labels=read_data('train')
dev_inputs,dev_labels=read_data('dev')
g = Graph()
config=tf.ConfigProto(log_device_placement=True)
saver =tf.train.Saver()
with tf.Session(config=config) as sess:
merged = tf.summary.merge_all()
sess.run(tf.global_variables_initializer())
ckpt=tf.train.latest_checkpoint(Config.model_dir)
if ckpt!=None:
print("正在恢复模型")
saver.restore(sess, ckpt)
writer = tf.summary.FileWriter(Config.board_path, tf.get_default_graph())
batch_num = len(inputs) // Config.batch_size
dev_num=len(dev_inputs)//Config.batch_size
for k in range(Config.epochs):
total_loss = 0
batch = get_batch(inputs, labels)
for i in range(batch_num):
input_batch, label_batch = next(batch)
feed = {g.x: input_batch, g.y: label_batch}
cost,_ = sess.run([g.mean_loss,g.train_op], feed_dict=feed)
total_loss += cost
if (k * batch_num + i) % 10 == 0:
rs=sess.run(merged, feed_dict=feed)
writer.add_summary(rs, k * batch_num + i)
dev_batch=get_batch(dev_inputs,labels)
for i in range(dev_num):
dev_inputs_batch,dev_labels_batch=next(dev_batch)
preds=sess.run(g.preds,{g.x:dev_inputs_batch})
print('epochs', k+1, ': average loss = ', total_loss/batch_num)
saver.save(sess,Config.model_path)
writer.close()
def test():
Config.is_training=False
_,_=read_data('train')
pny2id,id2pny,han2id,id2han=read_dict()
g=Graph()
config=tf.ConfigProto(log_device_placement=True)
saver =tf.train.Saver()
with tf.Session(config=config) as sess:
ckpt=tf.train.latest_checkpoint(Config.model_dir)
if ckpt!=None:
print("正在恢复模型")
saver.restore(sess, ckpt)
while True:
line = input('输入测试拼音: ')
if line == 'exit': break
line = line.strip('\n').split(' ')
x = np.array([pny2id[pny] for pny in line])
x = x.reshape(1, -1)
preds = sess.run(g.preds, {g.x: x})
got = ''.join(id2han[idx] for idx in preds[0])
print(got)
if __name__=="__main__":
#train()
test() | 0.608361 | 0.268797 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def median_filter(x, y, num_bins, bin_width=None, x_min=None, x_max=None):
"""Computes the median y-value in uniform intervals (bins) along the x-axis.
The interval [x_min, x_max) is divided into num_bins uniformly spaced
intervals of width bin_width. The value computed for each bin is the median
of all y-values whose corresponding x-value is in the interval.
NOTE: x must be sorted in ascending order or the results will be incorrect.
Args:
x: 1D array of x-coordinates sorted in ascending order. Must have at least 2
elements, and all elements cannot be the same value.
y: 1D array of y-coordinates with the same size as x.
num_bins: The number of intervals to divide the x-axis into. Must be at
least 2.
bin_width: The width of each bin on the x-axis. Must be positive, and less
than x_max - x_min. Defaults to (x_max - x_min) / num_bins.
x_min: The inclusive leftmost value to consider on the x-axis. Must be less
than or equal to the largest value of x. Defaults to min(x).
x_max: The exclusive rightmost value to consider on the x-axis. Must be
greater than x_min. Defaults to max(x).
Returns:
1D NumPy array of size num_bins containing the median y-values of uniformly
spaced bins on the x-axis.
Raises:
ValueError: If an argument has an inappropriate value.
"""
if num_bins < 2:
raise ValueError("num_bins must be at least 2. Got: %d" % num_bins)
# Validate the lengths of x and y.
x_len = len(x)
if x_len < 2:
raise ValueError("len(x) must be at least 2. Got: %s" % x_len)
if x_len != len(y):
raise ValueError("len(x) (got: %d) must equal len(y) (got: %d)" % (x_len,
len(y)))
# Validate x_min and x_max.
x_min = x_min if x_min is not None else x[0]
x_max = x_max if x_max is not None else x[-1]
if x_min >= x_max:
raise ValueError("x_min (got: %d) must be less than x_max (got: %d)" %
(x_min, x_max))
if x_min > x[-1]:
raise ValueError(
"x_min (got: %d) must be less than or equal to the largest value of x "
"(got: %d)" % (x_min, x[-1]))
# Validate bin_width.
bin_width = bin_width if bin_width is not None else (x_max - x_min) / num_bins
if bin_width <= 0:
raise ValueError("bin_width must be positive. Got: %d" % bin_width)
if bin_width >= x_max - x_min:
raise ValueError(
"bin_width (got: %d) must be less than x_max - x_min (got: %d)" %
(bin_width, x_max - x_min))
bin_spacing = (x_max - x_min - bin_width) / (num_bins - 1)
# Bins with no y-values will fall back to the global median.
result = np.repeat(np.median(y), num_bins)
# Find the first element of x >= x_min. This loop is guaranteed to produce
# a valid index because we know that x_min <= x[-1].
x_start = 0
while x[x_start] < x_min:
x_start += 1
# The bin at index i is the median of all elements y[j] such that
# bin_min <= x[j] < bin_max, where bin_min and bin_max are the endpoints of
# bin i.
bin_min = x_min # Left endpoint of the current bin.
bin_max = x_min + bin_width # Right endpoint of the current bin.
j_start = x_start # Inclusive left index of the current bin.
j_end = x_start # Exclusive end index of the current bin.
for i in range(num_bins):
# Move j_start to the first index of x >= bin_min.
while j_start < x_len and x[j_start] < bin_min:
j_start += 1
# Move j_end to the first index of x >= bin_max (exclusive end index).
while j_end < x_len and x[j_end] < bin_max:
j_end += 1
if j_end > j_start:
# Compute and insert the median bin value.
result[i] = np.median(y[j_start:j_end])
# Advance the bin.
bin_min += bin_spacing
bin_max += bin_spacing
return result | research/astronet/light_curve_util/median_filter.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def median_filter(x, y, num_bins, bin_width=None, x_min=None, x_max=None):
"""Computes the median y-value in uniform intervals (bins) along the x-axis.
The interval [x_min, x_max) is divided into num_bins uniformly spaced
intervals of width bin_width. The value computed for each bin is the median
of all y-values whose corresponding x-value is in the interval.
NOTE: x must be sorted in ascending order or the results will be incorrect.
Args:
x: 1D array of x-coordinates sorted in ascending order. Must have at least 2
elements, and all elements cannot be the same value.
y: 1D array of y-coordinates with the same size as x.
num_bins: The number of intervals to divide the x-axis into. Must be at
least 2.
bin_width: The width of each bin on the x-axis. Must be positive, and less
than x_max - x_min. Defaults to (x_max - x_min) / num_bins.
x_min: The inclusive leftmost value to consider on the x-axis. Must be less
than or equal to the largest value of x. Defaults to min(x).
x_max: The exclusive rightmost value to consider on the x-axis. Must be
greater than x_min. Defaults to max(x).
Returns:
1D NumPy array of size num_bins containing the median y-values of uniformly
spaced bins on the x-axis.
Raises:
ValueError: If an argument has an inappropriate value.
"""
if num_bins < 2:
raise ValueError("num_bins must be at least 2. Got: %d" % num_bins)
# Validate the lengths of x and y.
x_len = len(x)
if x_len < 2:
raise ValueError("len(x) must be at least 2. Got: %s" % x_len)
if x_len != len(y):
raise ValueError("len(x) (got: %d) must equal len(y) (got: %d)" % (x_len,
len(y)))
# Validate x_min and x_max.
x_min = x_min if x_min is not None else x[0]
x_max = x_max if x_max is not None else x[-1]
if x_min >= x_max:
raise ValueError("x_min (got: %d) must be less than x_max (got: %d)" %
(x_min, x_max))
if x_min > x[-1]:
raise ValueError(
"x_min (got: %d) must be less than or equal to the largest value of x "
"(got: %d)" % (x_min, x[-1]))
# Validate bin_width.
bin_width = bin_width if bin_width is not None else (x_max - x_min) / num_bins
if bin_width <= 0:
raise ValueError("bin_width must be positive. Got: %d" % bin_width)
if bin_width >= x_max - x_min:
raise ValueError(
"bin_width (got: %d) must be less than x_max - x_min (got: %d)" %
(bin_width, x_max - x_min))
bin_spacing = (x_max - x_min - bin_width) / (num_bins - 1)
# Bins with no y-values will fall back to the global median.
result = np.repeat(np.median(y), num_bins)
# Find the first element of x >= x_min. This loop is guaranteed to produce
# a valid index because we know that x_min <= x[-1].
x_start = 0
while x[x_start] < x_min:
x_start += 1
# The bin at index i is the median of all elements y[j] such that
# bin_min <= x[j] < bin_max, where bin_min and bin_max are the endpoints of
# bin i.
bin_min = x_min # Left endpoint of the current bin.
bin_max = x_min + bin_width # Right endpoint of the current bin.
j_start = x_start # Inclusive left index of the current bin.
j_end = x_start # Exclusive end index of the current bin.
for i in range(num_bins):
# Move j_start to the first index of x >= bin_min.
while j_start < x_len and x[j_start] < bin_min:
j_start += 1
# Move j_end to the first index of x >= bin_max (exclusive end index).
while j_end < x_len and x[j_end] < bin_max:
j_end += 1
if j_end > j_start:
# Compute and insert the median bin value.
result[i] = np.median(y[j_start:j_end])
# Advance the bin.
bin_min += bin_spacing
bin_max += bin_spacing
return result | 0.909459 | 0.691758 |
import sys
import argparse
import time
import threading
import os
import logging
import power_reader
from argparse import RawTextHelpFormatter
scriptLocation = os.path.dirname(os.path.realpath(__file__))
def rebootThread(waitTime, stop_event):
logging.debug('Starting reboot thread, booting in ' + str(waitTime) + ' seconds if no change in conditions...')
stop_event.wait(waitTime)
if(stop_event.isSet()):
logging.warning('Critical thread Reboot cancelled')
return
logging.warning('Booting...')
os.system('reboot --reboot')
parser = argparse.ArgumentParser(description='\
This script is used to check if GPU cards are spending "enough" power.\n\
If not, reboot is tried as recovery action.\n',
formatter_class=RawTextHelpFormatter)
parser.add_argument("--interval", help='How often(minutes) to check the power consumption. default 1', default=1, type=int)
parser.add_argument("--wait_reboot", help='How long to wait before rebooting after problems detected default 10', default=10, type=int)
parser.add_argument("--total_power", help='Minimum acceptable total power consumption(W) default 1000', default=1000, type=int)
parser.add_argument("--one_gpu_power", help='Minimum acceptable total power consumption(W) in one GPU(0=disbled), default 0', default=0, type=int)
parser.add_argument("-v", "--verbose", action="store_true", help="Print debug logs")
parser.add_argument("-l", "--log_to_file", action="store_true", help="Print to file, instead of stdout")
args = parser.parse_args()
debug = args.verbose
interval = args.interval
waitReboot = args.wait_reboot
totalPower = args.total_power
oneGpuPower = args.one_gpu_power
logToFile = args.log_to_file
loggingLevel=None
if(debug):
loggingLevel = logging.DEBUG
else:
loggingLevel = logging.INFO
if(logToFile):
logging.basicConfig(
filename=scriptLocation + '/' + os.path.splitext(os.path.basename(__file__))[0] + '.log',
level=loggingLevel,
format='%(asctime)s %(message)s')
else:
logging.basicConfig(level=loggingLevel)
logging.info('Checking power consumption in every ' + str(interval) + ' minutes')
logging.info('Booting computer if total power is below ' + str(totalPower) + 'W')
if(oneGpuPower):
logging.info('Booting computer if one GPU has power below ' + str(oneGpuPower) + 'W')
reboot_thread_stop = None
reboot_thread = None
def cancelRebootThread():
global reboot_thread
global reboot_thread_stop
if(reboot_thread and reboot_thread.isAlive()):
reboot_thread_stop.set()
def startRebootThread():
global reboot_thread
global reboot_thread_stop
if(reboot_thread and reboot_thread.isAlive()):
logging.debug('Reboot thread seems to be running already')
else:
rebootTimeSeconds = waitReboot*60
reboot_thread_stop = threading.Event()
reboot_thread = threading.Thread(target=rebootThread, args=(rebootTimeSeconds, reboot_thread_stop))
reboot_thread.start()
try:
while(1):
error, gpuPowerArray = power_reader.getGpuPowerArray()
totalPowerUsed = 0
oneGpuProblem = False
totalProblem = False
if(error != 0):
logging.error('Could not read GPU powers, stack might be stuck')
startRebootThread()
else:
for gpuPower in gpuPowerArray:
if(oneGpuPower > 0 and gpuPower < oneGpuPower):
logging.warning('One GPU is using too little power (' + str(gpuPower) + 'W)')
oneGpuProblem = True
totalPowerUsed += gpuPower
logging.debug('Total power: ' + str(totalPowerUsed))
if(totalPowerUsed < totalPower):
logging.warning('total power usage is too little (' + str(totalPowerUsed) + 'W)')
totalProblem = True
if(totalProblem or oneGpuProblem):
startRebootThread()
else:
cancelRebootThread()
time.sleep(interval*60)
except KeyboardInterrupt as e:
cancelRebootThread()
raise e | power-supervisor.py |
import sys
import argparse
import time
import threading
import os
import logging
import power_reader
from argparse import RawTextHelpFormatter
scriptLocation = os.path.dirname(os.path.realpath(__file__))
def rebootThread(waitTime, stop_event):
logging.debug('Starting reboot thread, booting in ' + str(waitTime) + ' seconds if no change in conditions...')
stop_event.wait(waitTime)
if(stop_event.isSet()):
logging.warning('Critical thread Reboot cancelled')
return
logging.warning('Booting...')
os.system('reboot --reboot')
parser = argparse.ArgumentParser(description='\
This script is used to check if GPU cards are spending "enough" power.\n\
If not, reboot is tried as recovery action.\n',
formatter_class=RawTextHelpFormatter)
parser.add_argument("--interval", help='How often(minutes) to check the power consumption. default 1', default=1, type=int)
parser.add_argument("--wait_reboot", help='How long to wait before rebooting after problems detected default 10', default=10, type=int)
parser.add_argument("--total_power", help='Minimum acceptable total power consumption(W) default 1000', default=1000, type=int)
parser.add_argument("--one_gpu_power", help='Minimum acceptable total power consumption(W) in one GPU(0=disbled), default 0', default=0, type=int)
parser.add_argument("-v", "--verbose", action="store_true", help="Print debug logs")
parser.add_argument("-l", "--log_to_file", action="store_true", help="Print to file, instead of stdout")
args = parser.parse_args()
debug = args.verbose
interval = args.interval
waitReboot = args.wait_reboot
totalPower = args.total_power
oneGpuPower = args.one_gpu_power
logToFile = args.log_to_file
loggingLevel=None
if(debug):
loggingLevel = logging.DEBUG
else:
loggingLevel = logging.INFO
if(logToFile):
logging.basicConfig(
filename=scriptLocation + '/' + os.path.splitext(os.path.basename(__file__))[0] + '.log',
level=loggingLevel,
format='%(asctime)s %(message)s')
else:
logging.basicConfig(level=loggingLevel)
logging.info('Checking power consumption in every ' + str(interval) + ' minutes')
logging.info('Booting computer if total power is below ' + str(totalPower) + 'W')
if(oneGpuPower):
logging.info('Booting computer if one GPU has power below ' + str(oneGpuPower) + 'W')
reboot_thread_stop = None
reboot_thread = None
def cancelRebootThread():
global reboot_thread
global reboot_thread_stop
if(reboot_thread and reboot_thread.isAlive()):
reboot_thread_stop.set()
def startRebootThread():
global reboot_thread
global reboot_thread_stop
if(reboot_thread and reboot_thread.isAlive()):
logging.debug('Reboot thread seems to be running already')
else:
rebootTimeSeconds = waitReboot*60
reboot_thread_stop = threading.Event()
reboot_thread = threading.Thread(target=rebootThread, args=(rebootTimeSeconds, reboot_thread_stop))
reboot_thread.start()
try:
while(1):
error, gpuPowerArray = power_reader.getGpuPowerArray()
totalPowerUsed = 0
oneGpuProblem = False
totalProblem = False
if(error != 0):
logging.error('Could not read GPU powers, stack might be stuck')
startRebootThread()
else:
for gpuPower in gpuPowerArray:
if(oneGpuPower > 0 and gpuPower < oneGpuPower):
logging.warning('One GPU is using too little power (' + str(gpuPower) + 'W)')
oneGpuProblem = True
totalPowerUsed += gpuPower
logging.debug('Total power: ' + str(totalPowerUsed))
if(totalPowerUsed < totalPower):
logging.warning('total power usage is too little (' + str(totalPowerUsed) + 'W)')
totalProblem = True
if(totalProblem or oneGpuProblem):
startRebootThread()
else:
cancelRebootThread()
time.sleep(interval*60)
except KeyboardInterrupt as e:
cancelRebootThread()
raise e | 0.233881 | 0.066206 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/gym/gym_battle.proto',
package='pogoprotos.data.gym',
syntax='proto3',
serialized_pb=_b('\n$pogoprotos/data/gym/gym_battle.proto\x12\x13pogoprotos.data.gym\"\\\n\tGymBattle\x12\x11\n\tbattle_id\x18\x01 \x01(\t\x12\x14\n\x0c\x63ompleted_ms\x18\x02 \x01(\x03\x12&\n\x1eincremented_gym_battle_friends\x18\x03 \x01(\x08\x62\x06proto3')
)
_GYMBATTLE = _descriptor.Descriptor(
name='GymBattle',
full_name='pogoprotos.data.gym.GymBattle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='battle_id', full_name='pogoprotos.data.gym.GymBattle.battle_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='completed_ms', full_name='pogoprotos.data.gym.GymBattle.completed_ms', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='incremented_gym_battle_friends', full_name='pogoprotos.data.gym.GymBattle.incremented_gym_battle_friends', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=153,
)
DESCRIPTOR.message_types_by_name['GymBattle'] = _GYMBATTLE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GymBattle = _reflection.GeneratedProtocolMessageType('GymBattle', (_message.Message,), dict(
DESCRIPTOR = _GYMBATTLE,
__module__ = 'pogoprotos.data.gym.gym_battle_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.gym.GymBattle)
))
_sym_db.RegisterMessage(GymBattle)
# @@protoc_insertion_point(module_scope) | pgoapi/protos/pogoprotos/data/gym/gym_battle_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/data/gym/gym_battle.proto',
package='pogoprotos.data.gym',
syntax='proto3',
serialized_pb=_b('\n$pogoprotos/data/gym/gym_battle.proto\x12\x13pogoprotos.data.gym\"\\\n\tGymBattle\x12\x11\n\tbattle_id\x18\x01 \x01(\t\x12\x14\n\x0c\x63ompleted_ms\x18\x02 \x01(\x03\x12&\n\x1eincremented_gym_battle_friends\x18\x03 \x01(\x08\x62\x06proto3')
)
_GYMBATTLE = _descriptor.Descriptor(
name='GymBattle',
full_name='pogoprotos.data.gym.GymBattle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='battle_id', full_name='pogoprotos.data.gym.GymBattle.battle_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='completed_ms', full_name='pogoprotos.data.gym.GymBattle.completed_ms', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='incremented_gym_battle_friends', full_name='pogoprotos.data.gym.GymBattle.incremented_gym_battle_friends', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=153,
)
DESCRIPTOR.message_types_by_name['GymBattle'] = _GYMBATTLE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GymBattle = _reflection.GeneratedProtocolMessageType('GymBattle', (_message.Message,), dict(
DESCRIPTOR = _GYMBATTLE,
__module__ = 'pogoprotos.data.gym.gym_battle_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.data.gym.GymBattle)
))
_sym_db.RegisterMessage(GymBattle)
# @@protoc_insertion_point(module_scope) | 0.160529 | 0.169922 |
import string
def translate(self, symbol_table):
return ''.join([symbol_table.get(c, c) for c in self])
def as_rest_table(data, full=False):
"""
>>> from report_table import as_rest_table
>>> data = [('what', 'how', 'who'),
... ('lorem', 'that is a long value', 3.1415),
... ('ipsum', 89798, 0.2)]
>>> print as_rest_table(data, full=True)
+-------+----------------------+--------+
| what | how | who |
+=======+======================+========+
| lorem | that is a long value | 3.1415 |
+-------+----------------------+--------+
| ipsum | 89798 | 0.2 |
+-------+----------------------+--------+
>>> print as_rest_table(data)
===== ==================== ======
what how who
===== ==================== ======
lorem that is a long value 3.1415
ipsum 89798 0.2
===== ==================== ======
"""
data = data if data else [['No Data']]
table = []
# max size of each column
sizes = list(map(max, zip(*[[len(str(elt)) for elt in member]
for member in data])))
num_elts = len(sizes)
if full:
start_of_line = '| '
vertical_separator = ' | '
end_of_line = ' |'
line_marker = '-'
else:
start_of_line = ''
vertical_separator = ' '
end_of_line = ''
line_marker = '='
meta_template = vertical_separator.join(['{{{{{0}:{{{0}}}}}}}'.format(i)
for i in range(num_elts)])
template = '{0}{1}{2}'.format(start_of_line,
meta_template.format(*sizes),
end_of_line)
# determine top/bottom borders
if full:
to_separator = {"|": "+-"}
else:
to_separator = {"|": "+"}
start_of_line = translate(start_of_line, to_separator)
vertical_separator = translate(vertical_separator, to_separator)
end_of_line = translate(end_of_line, to_separator)
separator = '{0}{1}{2}'.format(start_of_line,
vertical_separator.join(
[x*line_marker for x in sizes]),
end_of_line)
# determine header separator
th_separator_tr = "".maketrans('-', '=')
start_of_line = translate(start_of_line, th_separator_tr)
line_marker = translate(line_marker, th_separator_tr)
vertical_separator = translate(vertical_separator, th_separator_tr)
end_of_line = translate(end_of_line, th_separator_tr)
th_separator = '{0}{1}{2}'.format(start_of_line,
vertical_separator.join(
[x*line_marker for x in sizes]),
end_of_line)
# prepare result
table.append(separator)
# set table header
titles = data[0]
table.append(template.format(*titles))
table.append(th_separator)
for d in data[1:-1]:
table.append(template.format(*d))
if full:
table.append(separator)
table.append(template.format(*data[-1]))
table.append(separator)
return '\n'.join(table) | docs/source/exts/rst_table.py |
import string
def translate(self, symbol_table):
return ''.join([symbol_table.get(c, c) for c in self])
def as_rest_table(data, full=False):
"""
>>> from report_table import as_rest_table
>>> data = [('what', 'how', 'who'),
... ('lorem', 'that is a long value', 3.1415),
... ('ipsum', 89798, 0.2)]
>>> print as_rest_table(data, full=True)
+-------+----------------------+--------+
| what | how | who |
+=======+======================+========+
| lorem | that is a long value | 3.1415 |
+-------+----------------------+--------+
| ipsum | 89798 | 0.2 |
+-------+----------------------+--------+
>>> print as_rest_table(data)
===== ==================== ======
what how who
===== ==================== ======
lorem that is a long value 3.1415
ipsum 89798 0.2
===== ==================== ======
"""
data = data if data else [['No Data']]
table = []
# max size of each column
sizes = list(map(max, zip(*[[len(str(elt)) for elt in member]
for member in data])))
num_elts = len(sizes)
if full:
start_of_line = '| '
vertical_separator = ' | '
end_of_line = ' |'
line_marker = '-'
else:
start_of_line = ''
vertical_separator = ' '
end_of_line = ''
line_marker = '='
meta_template = vertical_separator.join(['{{{{{0}:{{{0}}}}}}}'.format(i)
for i in range(num_elts)])
template = '{0}{1}{2}'.format(start_of_line,
meta_template.format(*sizes),
end_of_line)
# determine top/bottom borders
if full:
to_separator = {"|": "+-"}
else:
to_separator = {"|": "+"}
start_of_line = translate(start_of_line, to_separator)
vertical_separator = translate(vertical_separator, to_separator)
end_of_line = translate(end_of_line, to_separator)
separator = '{0}{1}{2}'.format(start_of_line,
vertical_separator.join(
[x*line_marker for x in sizes]),
end_of_line)
# determine header separator
th_separator_tr = "".maketrans('-', '=')
start_of_line = translate(start_of_line, th_separator_tr)
line_marker = translate(line_marker, th_separator_tr)
vertical_separator = translate(vertical_separator, th_separator_tr)
end_of_line = translate(end_of_line, th_separator_tr)
th_separator = '{0}{1}{2}'.format(start_of_line,
vertical_separator.join(
[x*line_marker for x in sizes]),
end_of_line)
# prepare result
table.append(separator)
# set table header
titles = data[0]
table.append(template.format(*titles))
table.append(th_separator)
for d in data[1:-1]:
table.append(template.format(*d))
if full:
table.append(separator)
table.append(template.format(*data[-1]))
table.append(separator)
return '\n'.join(table) | 0.446736 | 0.39129 |
import lxmls.readers.simple_data_set as sds
import lxmls.classifiers.linear_classifier as lcc
import lxmls.classifiers.naive_bayes as nbc
import lxmls.classifiers.perceptron as percc
import lxmls.classifiers.svm as svmc
import lxmls.classifiers.mira as mirac
import lxmls.classifiers.max_ent_batch as mec_batch
import lxmls.classifiers.max_ent_online as mec_online
def run_all_classifiers(dataset):
fig, axis = dataset.plot_data()
print "Naive Bayes"
nb = nbc.NaiveBayes()
params_nb = nb.train(dataset.train_X, dataset.train_y)
print params_nb.reshape(-1)
predict = nb.test(dataset.train_X, params_nb)
evaluation = nb.evaluate(predict, dataset.train_y)
predict2 = nb.test(dataset.test_X, params_nb)
evaluation2 = nb.evaluate(predict2, dataset.test_y)
print "Accuracy train: %f test: %f" % (evaluation, evaluation2)
fig, axis = dataset.add_line(fig, axis, params_nb, "Naive Bayes", "red")
print "Perceptron"
perc = percc.Perceptron()
params_perc = perc.train(dataset.train_X, dataset.train_y)
print params_perc.reshape(-1)
predict = perc.test(dataset.train_X, params_perc)
evaluation = perc.evaluate(predict, dataset.train_y)
predict2 = perc.test(dataset.test_X, params_perc)
evaluation2 = perc.evaluate(predict2, dataset.test_y)
print "Accuracy train: %f test: %f" % (evaluation, evaluation2)
fig, axis = dataset.add_line(fig, axis, params_perc, "Perceptron", "blue")
print "MaxEnt LBFGS"
me = mec_batch.MaxEntBatch()
params_me = me.train(dataset.train_X, dataset.train_y)
print params_me.reshape(-1)
predict = me.test(dataset.train_X, params_me)
evaluation = me.evaluate(predict, dataset.train_y)
predict2 = me.test(dataset.test_X, params_me)
evaluation2 = me.evaluate(predict2, dataset.test_y)
print "Accuracy train: %f test: %f" % (evaluation, evaluation2)
fig, axis = dataset.add_line(fig, axis, params_me, "ME-LBFGS", "green")
print "MaxEnt Online"
me_online = mec_online.MaxEntOnline()
params_me = me_online.train(dataset.train_X, dataset.train_y)
print params_me.reshape(-1)
predict = me_online.test(dataset.train_X, params_me)
evaluation = me_online.evaluate(predict, dataset.train_y)
predict2 = me_online.test(dataset.test_X, params_me)
evaluation2 = me.evaluate(predict2, dataset.test_y)
print "Accuracy train: %f test: %f" % (evaluation, evaluation2)
fig, axis = dataset.add_line(fig, axis, params_me, "ME-Online", "pink")
print "MIRA"
mira = mirac.Mira()
params_mira = mira.train(dataset.train_X, dataset.train_y)
print params_mira.reshape(-1)
predict = mira.test(dataset.train_X, params_mira)
evaluation = mira.evaluate(predict, dataset.train_y)
predict2 = mira.test(dataset.test_X, params_mira)
evaluation2 = mira.evaluate(predict2, dataset.test_y)
print "Accuracy train: %f test: %f" % (evaluation, evaluation2)
fig, axis = dataset.add_line(fig, axis, params_mira, "Mira", "orange")
print "SVM"
svm = svmc.SVM()
params_svm = svm.train(dataset.train_X, dataset.train_y)
print params_svm.reshape(-1)
predict = svm.test(dataset.train_X, params_svm)
evaluation = svm.evaluate(predict, dataset.train_y)
predict2 = svm.test(dataset.test_X, params_svm)
evaluation2 = svm.evaluate(predict2, dataset.test_y)
print "Accuracy train: %f test: %f" % (evaluation, evaluation2)
fig, axis = dataset.add_line(fig, axis, params_svm, "SVM", "brown") | lxmls/run_all_classifiers.py | import lxmls.readers.simple_data_set as sds
import lxmls.classifiers.linear_classifier as lcc
import lxmls.classifiers.naive_bayes as nbc
import lxmls.classifiers.perceptron as percc
import lxmls.classifiers.svm as svmc
import lxmls.classifiers.mira as mirac
import lxmls.classifiers.max_ent_batch as mec_batch
import lxmls.classifiers.max_ent_online as mec_online
def run_all_classifiers(dataset):
fig, axis = dataset.plot_data()
print "Naive Bayes"
nb = nbc.NaiveBayes()
params_nb = nb.train(dataset.train_X, dataset.train_y)
print params_nb.reshape(-1)
predict = nb.test(dataset.train_X, params_nb)
evaluation = nb.evaluate(predict, dataset.train_y)
predict2 = nb.test(dataset.test_X, params_nb)
evaluation2 = nb.evaluate(predict2, dataset.test_y)
print "Accuracy train: %f test: %f" % (evaluation, evaluation2)
fig, axis = dataset.add_line(fig, axis, params_nb, "Naive Bayes", "red")
print "Perceptron"
perc = percc.Perceptron()
params_perc = perc.train(dataset.train_X, dataset.train_y)
print params_perc.reshape(-1)
predict = perc.test(dataset.train_X, params_perc)
evaluation = perc.evaluate(predict, dataset.train_y)
predict2 = perc.test(dataset.test_X, params_perc)
evaluation2 = perc.evaluate(predict2, dataset.test_y)
print "Accuracy train: %f test: %f" % (evaluation, evaluation2)
fig, axis = dataset.add_line(fig, axis, params_perc, "Perceptron", "blue")
print "MaxEnt LBFGS"
me = mec_batch.MaxEntBatch()
params_me = me.train(dataset.train_X, dataset.train_y)
print params_me.reshape(-1)
predict = me.test(dataset.train_X, params_me)
evaluation = me.evaluate(predict, dataset.train_y)
predict2 = me.test(dataset.test_X, params_me)
evaluation2 = me.evaluate(predict2, dataset.test_y)
print "Accuracy train: %f test: %f" % (evaluation, evaluation2)
fig, axis = dataset.add_line(fig, axis, params_me, "ME-LBFGS", "green")
print "MaxEnt Online"
me_online = mec_online.MaxEntOnline()
params_me = me_online.train(dataset.train_X, dataset.train_y)
print params_me.reshape(-1)
predict = me_online.test(dataset.train_X, params_me)
evaluation = me_online.evaluate(predict, dataset.train_y)
predict2 = me_online.test(dataset.test_X, params_me)
evaluation2 = me.evaluate(predict2, dataset.test_y)
print "Accuracy train: %f test: %f" % (evaluation, evaluation2)
fig, axis = dataset.add_line(fig, axis, params_me, "ME-Online", "pink")
print "MIRA"
mira = mirac.Mira()
params_mira = mira.train(dataset.train_X, dataset.train_y)
print params_mira.reshape(-1)
predict = mira.test(dataset.train_X, params_mira)
evaluation = mira.evaluate(predict, dataset.train_y)
predict2 = mira.test(dataset.test_X, params_mira)
evaluation2 = mira.evaluate(predict2, dataset.test_y)
print "Accuracy train: %f test: %f" % (evaluation, evaluation2)
fig, axis = dataset.add_line(fig, axis, params_mira, "Mira", "orange")
print "SVM"
svm = svmc.SVM()
params_svm = svm.train(dataset.train_X, dataset.train_y)
print params_svm.reshape(-1)
predict = svm.test(dataset.train_X, params_svm)
evaluation = svm.evaluate(predict, dataset.train_y)
predict2 = svm.test(dataset.test_X, params_svm)
evaluation2 = svm.evaluate(predict2, dataset.test_y)
print "Accuracy train: %f test: %f" % (evaluation, evaluation2)
fig, axis = dataset.add_line(fig, axis, params_svm, "SVM", "brown") | 0.361954 | 0.358241 |
import re
import datetime
# Logger:
import logging
logging.basicConfig(filename='cstats.log', filemode='w', level='DEBUG',format='%(asctime)s - %(name)s [%(levelname)s] %(message)s', datefmt='%d-%b-%y %H:%M:%S')
class Convertor():
def cleanup():
"""
Removing the first couple of lines, because we're not interested
in the header of the table.
Header should contain:
Country, Total Cases, New Cases, Total Deaths, New Deaths, Total Recovered,
Active Cases, Serious/Critical, Total Cases/1M Pop, Deaths/1M Pop, 1st case date.
"""
clean = []
try:
with open('rawdata/tableData.txt', 'r') as toClean, open('rawdata/data.txt', 'w') as cleaned:
for _ in range(156):
clean.append(next(toClean))
for line in toClean:
cleaned.write(line)
logging.info('Cleanup completed')
except Exception as excpt:
logging.warning('Unexpected:')
logging.exception('Exception occured in the cleanup method')
def convert_to_csv():
try:
with open('rawdata/data.txt', 'r') as data, open('rawdata/data.csv', 'w') as csv:
# Header:
csv.write("Country,Total Cases,New Cases,Total Deaths,New Deaths,Total Recovered,Active Cases,Serious/Critical,Total Cases/1M Pop,Deaths/1M Pop,Total Tests,Tests/1M Pop,Continent__\n")
# Data:
lines = data.readlines()
count = 1
for line in lines:
line = re.sub('[,]', '.', str(line))
if count < 15:
csv.write(''.join(str(line))[:-1] + ',')
count += 1
else:
csv.write('\n')
count = 1
logging.info('Succesfully converted data to .csv')
except Exception as excpt:
logging.warning('Unexpected:')
logging.exception('Exception occured in the convert_to_csv method')
class CreateFinal():
def makefile():
DATETIME = datetime.datetime.now()
DATETIME = str(DATETIME.strftime("%d")) + str(DATETIME.strftime("%b") + str(DATETIME.strftime("%Y"))) + '_' + str(DATETIME.strftime("%H")) + str(DATETIME.strftime("%M"))
FILENAME = '../data/data_' + DATETIME + '.csv'
try:
with open('rawdata/data.csv', 'r') as csv, open(FILENAME, 'w') as final:
lines = csv.readlines()
for line in lines:
line = str(line)[:-3]
final.write(line+'\n')
print('[INFO] CStats done')
logging.info('CStats batch data done @ %s' % DATETIME)
except Exception as excpt:
logging.warning('Unexpected:')
logging.exception('Exception occured in the makefile method')
logging.debug('<end of parsetext>')
# © <NAME> 2020 - Open-source @ github.com/davidp-ro
# License: See LICENSE file | python/parsetext.py | import re
import datetime
# Logger:
import logging
logging.basicConfig(filename='cstats.log', filemode='w', level='DEBUG',format='%(asctime)s - %(name)s [%(levelname)s] %(message)s', datefmt='%d-%b-%y %H:%M:%S')
class Convertor():
def cleanup():
"""
Removing the first couple of lines, because we're not interested
in the header of the table.
Header should contain:
Country, Total Cases, New Cases, Total Deaths, New Deaths, Total Recovered,
Active Cases, Serious/Critical, Total Cases/1M Pop, Deaths/1M Pop, 1st case date.
"""
clean = []
try:
with open('rawdata/tableData.txt', 'r') as toClean, open('rawdata/data.txt', 'w') as cleaned:
for _ in range(156):
clean.append(next(toClean))
for line in toClean:
cleaned.write(line)
logging.info('Cleanup completed')
except Exception as excpt:
logging.warning('Unexpected:')
logging.exception('Exception occured in the cleanup method')
def convert_to_csv():
try:
with open('rawdata/data.txt', 'r') as data, open('rawdata/data.csv', 'w') as csv:
# Header:
csv.write("Country,Total Cases,New Cases,Total Deaths,New Deaths,Total Recovered,Active Cases,Serious/Critical,Total Cases/1M Pop,Deaths/1M Pop,Total Tests,Tests/1M Pop,Continent__\n")
# Data:
lines = data.readlines()
count = 1
for line in lines:
line = re.sub('[,]', '.', str(line))
if count < 15:
csv.write(''.join(str(line))[:-1] + ',')
count += 1
else:
csv.write('\n')
count = 1
logging.info('Succesfully converted data to .csv')
except Exception as excpt:
logging.warning('Unexpected:')
logging.exception('Exception occured in the convert_to_csv method')
class CreateFinal():
def makefile():
DATETIME = datetime.datetime.now()
DATETIME = str(DATETIME.strftime("%d")) + str(DATETIME.strftime("%b") + str(DATETIME.strftime("%Y"))) + '_' + str(DATETIME.strftime("%H")) + str(DATETIME.strftime("%M"))
FILENAME = '../data/data_' + DATETIME + '.csv'
try:
with open('rawdata/data.csv', 'r') as csv, open(FILENAME, 'w') as final:
lines = csv.readlines()
for line in lines:
line = str(line)[:-3]
final.write(line+'\n')
print('[INFO] CStats done')
logging.info('CStats batch data done @ %s' % DATETIME)
except Exception as excpt:
logging.warning('Unexpected:')
logging.exception('Exception occured in the makefile method')
logging.debug('<end of parsetext>')
# © <NAME> 2020 - Open-source @ github.com/davidp-ro
# License: See LICENSE file | 0.324342 | 0.137446 |
testA = [ 2, 1, 5, 3, 4 ];
testB = [ 2, 5, 1, 3, 4 ];
testC = [ 2,
1,
4,
5,
3,
8,
7,
10,
6,
12,
11,
9,
15,
13,
16,
18,
14,
19,
21,
17,
23,
22,
20,
26,
24,
27,
25,
29,
28,
31,
30,
34,
32,
36,
35,
38,
33,
40,
39,
37,
43,
42,
41,
46,
44,
48,
47,
50,
45,
51,
52,
49,
55,
54,
53,
58,
57,
59,
56,
62,
60,
64,
63,
61,
67,
68,
66,
65,
69,
72,
70,
74,
73,
71,
75,
78,
79,
76,
81,
80,
77,
84,
83,
86,
82,
88,
87,
85,
91,
89,
92,
94,
93,
90,
97,
96,
95,
99,
101,
98,
100,
103,
102,
106,
105,
107,
104,
110,
109,
108,
113,
112,
115,
114,
117,
111,
119,
116,
121,
120,
123,
122,
125,
126,
118,
124,
129,
130,
127,
128,
133,
132,
131,
136,
134,
135,
137,
140,
139,
141,
138,
144,
143,
145,
142,
148,
147,
146,
151,
150,
149,
153,
152,
156,
154,
158,
159,
157,
155,
162,
160,
164,
163,
161,
167,
166,
169,
165,
171,
168,
173,
172,
170,
176,
175,
178,
177,
174,
179,
180,
183,
184,
182,
181,
187,
186,
189,
185,
191,
192,
190,
188,
194,
193,
197,
195,
196,
198,
201,
199,
203,
202,
200,
206,
205,
204,
208,
210,
207,
209,
212,
211,
215,
213,
216,
214,
219,
218,
220,
217,
223,
222,
221,
224,
225,
228,
226,
230,
231,
229,
227,
234,
233,
232,
237,
236,
235,
240,
239,
241,
238,
244,
243,
242,
247,
246,
245,
250,
251,
249,
248,
254,
253,
252,
257,
256,
255,
260,
259,
258,
263,
262,
261,
264,
267,
266,
269,
270,
268,
265,
273,
274,
271,
276,
275,
272,
277,
280,
279,
278,
282,
284,
281,
286,
285,
283,
287,
290,
291,
288,
289,
293,
292,
296,
294,
297,
299,
300,
295,
298,
303,
302,
301,
305,
307,
304,
309,
308,
306,
312,
310,
314,
313,
311,
317,
315,
319,
320,
316,
322,
323,
321,
318,
325,
324,
328,
327,
326,
331,
332,
330,
334,
329,
333,
335,
336,
339,
337,
338,
342,
341,
344,
340,
343,
347,
345,
349,
348,
346,
352,
351,
353,
350,
356,
354,
355,
357,
358,
361,
359,
362,
364,
363,
360,
365,
367,
366,
368,
370,
369,
373,
371,
375,
372,
374,
378,
377,
380,
379,
382,
376,
384,
385,
381,
383,
388,
387,
386,
390,
389,
393,
392,
391,
396,
395,
398,
394,
397,
401,
399,
403,
402,
405,
404,
400,
408,
406,
410,
411,
407,
413,
409,
415,
414,
412,
417,
419,
416,
418,
420,
421,
424,
423,
422,
425,
427,
429,
430,
426,
432,
431,
428,
435,
436,
434,
433,
438,
439,
437,
441,
440,
444,
445,
443,
447,
442,
446,
450,
448,
452,
451,
449,
453,
456,
454,
457,
455,
460,
459,
458,
463,
462,
461,
466,
465,
468,
464,
470,
469,
472,
471,
467,
475,
474,
473,
478,
479,
477,
480,
482,
476,
481,
485,
486,
483,
488,
484,
487,
491,
490,
489,
494,
493,
492,
497,
496,
498,
495,
501,
502,
499,
500,
504,
503,
506,
505,
507,
510,
511,
509,
508,
513,
512,
516,
517,
515,
518,
514,
521,
519,
523,
520,
522,
526,
525,
528,
524,
530,
531,
529,
527,
534,
533,
536,
532,
537,
535,
540,
539,
538,
542,
544,
541,
545,
543,
547,
546,
550,
548,
552,
551,
553,
549,
556,
554,
558,
559,
555,
561,
557,
563,
562,
560,
566,
564,
568,
565,
569,
570,
572,
567,
573,
571,
575,
576,
574,
577,
580,
581,
578,
579,
584,
585,
586,
582,
587,
583,
589,
588,
592,
591,
594,
590,
593,
597,
595,
599,
596,
598,
602,
601,
600,
605,
603,
604,
608,
607,
606,
611,
612,
609,
614,
615,
610,
616,
613,
619,
618,
617,
622,
620,
624,
625,
621,
627,
623,
626,
630,
629,
628,
633,
631,
635,
636,
634,
638,
632,
639,
637,
640,
642,
644,
643,
641,
647,
645,
649,
648,
646,
652,
653,
650,
651,
656,
655,
654,
658,
657,
661,
660,
659,
664,
663,
662,
665,
668,
669,
667,
671,
666,
673,
670,
675,
674,
677,
678,
676,
672,
681,
679,
683,
680,
685,
684,
682,
687,
686,
690,
691,
689,
693,
688,
692,
695,
694,
698,
697,
696,
700,
702,
701,
699,
704,
706,
703,
707,
705,
710,
709,
711,
708,
714,
712,
716,
715,
713,
719,
717,
720,
718,
722,
724,
723,
721,
727,
726,
725,
729,
731,
728,
730,
734,
733,
732,
736,
735,
739,
738,
741,
737,
740,
744,
745,
743,
747,
742,
749,
748,
750,
746,
753,
751,
755,
752,
757,
754,
759,
756,
758,
761,
762,
760,
765,
763,
764,
766,
768,
769,
770,
767,
773,
771,
772,
776,
775,
778,
777,
774,
781,
780,
782,
779,
785,
783,
787,
784,
789,
786,
791,
790,
792,
788,
795,
796,
794,
793,
799,
798,
797,
802,
801,
804,
800,
806,
803,
808,
809,
807,
805,
811,
813,
812,
815,
810,
817,
814,
816,
818,
821,
819,
823,
822,
825,
824,
820,
828,
826,
830,
829,
827,
832,
834,
831,
836,
837,
835,
833,
838,
841,
839,
840,
844,
842,
846,
845,
843,
847,
850,
848,
849,
853,
851,
855,
856,
854,
852,
859,
858,
860,
857,
862,
861,
864,
865,
863,
868,
867,
866,
871,
869,
873,
870,
872,
876,
877,
874,
879,
880,
881,
882,
875,
883,
885,
878,
884,
887,
886,
890,
888,
892,
889,
894,
893,
896,
897,
891,
895,
900,
898,
902,
901,
904,
899,
903,
906,
908,
905,
907,
911,
910,
913,
912,
909,
914,
915,
916,
919,
917,
920,
918,
922,
921,
923,
926,
925,
924,
927,
928,
930,
929,
933,
932,
935,
931,
936,
934,
939,
938,
937,
942,
941,
940,
945,
944,
943,
948,
947,
946,
951,
952,
950,
949,
955,
954,
956,
953,
957,
960,
959,
961,
958,
962,
963,
966,
964,
967,
965,
969,
968,
972,
970,
973,
971,
975,
977,
978,
974,
976,
981,
979,
980,
984,
982,
986,
983,
988,
989,
985,
991,
987,
993,
992,
990,
996,
994,
995 ];
import time
"""
/* currently passes half of the test cases by counting number of inversions,
or if we see a number too out of place, return too chaotic
NOTE: Try divide and conquer recursion for inversion counting
to speed things up
NOTE: Alternative to nested for loops
*/
"""
def makeSenseOfChaos(input):
length = len(input)
tooMuchChaos = 'Too chaotic'
inversions = 0
for x in range(0, length, 1):
count = 0
bribesMade = input[x] - (x + 1)
if bribesMade > 2: return tooMuchChaos;
for y in range(x, length, 1):
if input[x] > input[y]:
inversions += 1
count += 1
if count == 2: break
return inversions
start = time.time()
result = makeSenseOfChaos(testC)
end = time.time()
print result
print (end - start) | coding-challenges/hacker-rank/algorithms/NewYearsChaos.py | testA = [ 2, 1, 5, 3, 4 ];
testB = [ 2, 5, 1, 3, 4 ];
testC = [ 2,
1,
4,
5,
3,
8,
7,
10,
6,
12,
11,
9,
15,
13,
16,
18,
14,
19,
21,
17,
23,
22,
20,
26,
24,
27,
25,
29,
28,
31,
30,
34,
32,
36,
35,
38,
33,
40,
39,
37,
43,
42,
41,
46,
44,
48,
47,
50,
45,
51,
52,
49,
55,
54,
53,
58,
57,
59,
56,
62,
60,
64,
63,
61,
67,
68,
66,
65,
69,
72,
70,
74,
73,
71,
75,
78,
79,
76,
81,
80,
77,
84,
83,
86,
82,
88,
87,
85,
91,
89,
92,
94,
93,
90,
97,
96,
95,
99,
101,
98,
100,
103,
102,
106,
105,
107,
104,
110,
109,
108,
113,
112,
115,
114,
117,
111,
119,
116,
121,
120,
123,
122,
125,
126,
118,
124,
129,
130,
127,
128,
133,
132,
131,
136,
134,
135,
137,
140,
139,
141,
138,
144,
143,
145,
142,
148,
147,
146,
151,
150,
149,
153,
152,
156,
154,
158,
159,
157,
155,
162,
160,
164,
163,
161,
167,
166,
169,
165,
171,
168,
173,
172,
170,
176,
175,
178,
177,
174,
179,
180,
183,
184,
182,
181,
187,
186,
189,
185,
191,
192,
190,
188,
194,
193,
197,
195,
196,
198,
201,
199,
203,
202,
200,
206,
205,
204,
208,
210,
207,
209,
212,
211,
215,
213,
216,
214,
219,
218,
220,
217,
223,
222,
221,
224,
225,
228,
226,
230,
231,
229,
227,
234,
233,
232,
237,
236,
235,
240,
239,
241,
238,
244,
243,
242,
247,
246,
245,
250,
251,
249,
248,
254,
253,
252,
257,
256,
255,
260,
259,
258,
263,
262,
261,
264,
267,
266,
269,
270,
268,
265,
273,
274,
271,
276,
275,
272,
277,
280,
279,
278,
282,
284,
281,
286,
285,
283,
287,
290,
291,
288,
289,
293,
292,
296,
294,
297,
299,
300,
295,
298,
303,
302,
301,
305,
307,
304,
309,
308,
306,
312,
310,
314,
313,
311,
317,
315,
319,
320,
316,
322,
323,
321,
318,
325,
324,
328,
327,
326,
331,
332,
330,
334,
329,
333,
335,
336,
339,
337,
338,
342,
341,
344,
340,
343,
347,
345,
349,
348,
346,
352,
351,
353,
350,
356,
354,
355,
357,
358,
361,
359,
362,
364,
363,
360,
365,
367,
366,
368,
370,
369,
373,
371,
375,
372,
374,
378,
377,
380,
379,
382,
376,
384,
385,
381,
383,
388,
387,
386,
390,
389,
393,
392,
391,
396,
395,
398,
394,
397,
401,
399,
403,
402,
405,
404,
400,
408,
406,
410,
411,
407,
413,
409,
415,
414,
412,
417,
419,
416,
418,
420,
421,
424,
423,
422,
425,
427,
429,
430,
426,
432,
431,
428,
435,
436,
434,
433,
438,
439,
437,
441,
440,
444,
445,
443,
447,
442,
446,
450,
448,
452,
451,
449,
453,
456,
454,
457,
455,
460,
459,
458,
463,
462,
461,
466,
465,
468,
464,
470,
469,
472,
471,
467,
475,
474,
473,
478,
479,
477,
480,
482,
476,
481,
485,
486,
483,
488,
484,
487,
491,
490,
489,
494,
493,
492,
497,
496,
498,
495,
501,
502,
499,
500,
504,
503,
506,
505,
507,
510,
511,
509,
508,
513,
512,
516,
517,
515,
518,
514,
521,
519,
523,
520,
522,
526,
525,
528,
524,
530,
531,
529,
527,
534,
533,
536,
532,
537,
535,
540,
539,
538,
542,
544,
541,
545,
543,
547,
546,
550,
548,
552,
551,
553,
549,
556,
554,
558,
559,
555,
561,
557,
563,
562,
560,
566,
564,
568,
565,
569,
570,
572,
567,
573,
571,
575,
576,
574,
577,
580,
581,
578,
579,
584,
585,
586,
582,
587,
583,
589,
588,
592,
591,
594,
590,
593,
597,
595,
599,
596,
598,
602,
601,
600,
605,
603,
604,
608,
607,
606,
611,
612,
609,
614,
615,
610,
616,
613,
619,
618,
617,
622,
620,
624,
625,
621,
627,
623,
626,
630,
629,
628,
633,
631,
635,
636,
634,
638,
632,
639,
637,
640,
642,
644,
643,
641,
647,
645,
649,
648,
646,
652,
653,
650,
651,
656,
655,
654,
658,
657,
661,
660,
659,
664,
663,
662,
665,
668,
669,
667,
671,
666,
673,
670,
675,
674,
677,
678,
676,
672,
681,
679,
683,
680,
685,
684,
682,
687,
686,
690,
691,
689,
693,
688,
692,
695,
694,
698,
697,
696,
700,
702,
701,
699,
704,
706,
703,
707,
705,
710,
709,
711,
708,
714,
712,
716,
715,
713,
719,
717,
720,
718,
722,
724,
723,
721,
727,
726,
725,
729,
731,
728,
730,
734,
733,
732,
736,
735,
739,
738,
741,
737,
740,
744,
745,
743,
747,
742,
749,
748,
750,
746,
753,
751,
755,
752,
757,
754,
759,
756,
758,
761,
762,
760,
765,
763,
764,
766,
768,
769,
770,
767,
773,
771,
772,
776,
775,
778,
777,
774,
781,
780,
782,
779,
785,
783,
787,
784,
789,
786,
791,
790,
792,
788,
795,
796,
794,
793,
799,
798,
797,
802,
801,
804,
800,
806,
803,
808,
809,
807,
805,
811,
813,
812,
815,
810,
817,
814,
816,
818,
821,
819,
823,
822,
825,
824,
820,
828,
826,
830,
829,
827,
832,
834,
831,
836,
837,
835,
833,
838,
841,
839,
840,
844,
842,
846,
845,
843,
847,
850,
848,
849,
853,
851,
855,
856,
854,
852,
859,
858,
860,
857,
862,
861,
864,
865,
863,
868,
867,
866,
871,
869,
873,
870,
872,
876,
877,
874,
879,
880,
881,
882,
875,
883,
885,
878,
884,
887,
886,
890,
888,
892,
889,
894,
893,
896,
897,
891,
895,
900,
898,
902,
901,
904,
899,
903,
906,
908,
905,
907,
911,
910,
913,
912,
909,
914,
915,
916,
919,
917,
920,
918,
922,
921,
923,
926,
925,
924,
927,
928,
930,
929,
933,
932,
935,
931,
936,
934,
939,
938,
937,
942,
941,
940,
945,
944,
943,
948,
947,
946,
951,
952,
950,
949,
955,
954,
956,
953,
957,
960,
959,
961,
958,
962,
963,
966,
964,
967,
965,
969,
968,
972,
970,
973,
971,
975,
977,
978,
974,
976,
981,
979,
980,
984,
982,
986,
983,
988,
989,
985,
991,
987,
993,
992,
990,
996,
994,
995 ];
import time
"""
/* currently passes half of the test cases by counting number of inversions,
or if we see a number too out of place, return too chaotic
NOTE: Try divide and conquer recursion for inversion counting
to speed things up
NOTE: Alternative to nested for loops
*/
"""
def makeSenseOfChaos(input):
length = len(input)
tooMuchChaos = 'Too chaotic'
inversions = 0
for x in range(0, length, 1):
count = 0
bribesMade = input[x] - (x + 1)
if bribesMade > 2: return tooMuchChaos;
for y in range(x, length, 1):
if input[x] > input[y]:
inversions += 1
count += 1
if count == 2: break
return inversions
start = time.time()
result = makeSenseOfChaos(testC)
end = time.time()
print result
print (end - start) | 0.155431 | 0.288156 |
import requests
import argparse
import json
SCRYFALL_CARDS_API = "https://api.scryfall.com/cards"
SCRYFALL_SETS_API = "https://api.scryfall.com/sets"
SCRYFALL_SET_CONVERSION = {
'G18' : 'M19'
}
def normalize_set(set_id):
return SCRYFALL_SET_CONVERSION.get(set_id, set_id)
class ScryfallError(ValueError):
pass
def get_mtga_card(arena_id):
scryfall_card = get_arena_card_json(arena_id)
return scryfall_to_mtga(scryfall_card)
def get_arena_card_json(arena_id):
"""Get card from Scryfall by arena id"""
response = requests.get(SCRYFALL_CARDS_API+'/arena/'+str(arena_id))
if response.status_code != requests.codes.ok:
raise ScryfallError('Unknown card id %s. Status code: %s' % (arena_id, response.status_code))
return response.json()
def scryfall_to_mtga(scryfall_card):
from mtga.models.card import Card
name = scryfall_card['name'].lower().replace(' ', '_')
pretty_name = scryfall_card['name']
cost = list(scryfall_card['mana_cost'].replace('}', '').replace('{', ''))
color_identity = scryfall_card['color_identity']
types = scryfall_card['type_line'].split(u' — ')
card_type = types[0]
try:
sub_types = types[1]
except IndexError:
sub_types = ""
set_id = normalize_set(scryfall_card['set'].upper())
rarity = scryfall_card['rarity']
set_number = scryfall_card['collector_number']
mtga_id = scryfall_card['arena_id']
collectible = scryfall_card['collector_number'] != ""
abilities = {}
mtga_card = Card(
name, pretty_name, cost, color_identity,
card_type, sub_types, abilities, set_id, rarity, collectible, set_number, mtga_id
)
return mtga_card
def get_set_info(set_name):
"""gets info on requested set"""
response = requests.get(SCRYFALL_SETS_API+'/'+str(set_name))
if response.status_code == requests.codes.not_found:
print('Unknown set: %s. Reason: %s %s' % (set_name, response.status_code, response.reason))
return {}
if response.status_code != requests.codes.ok:
raise ScryfallError('Unknown set: %s. Status code: %s' % (set, response.status_code))
return response.json()
if __name__ == "__main__":
x = get_arena_card_json(68369)
print(scryfall_to_mtga(x))
y = get_arena_card_json(67542)
print(scryfall_to_mtga(y)) | scryfall.py | import requests
import argparse
import json
SCRYFALL_CARDS_API = "https://api.scryfall.com/cards"
SCRYFALL_SETS_API = "https://api.scryfall.com/sets"
SCRYFALL_SET_CONVERSION = {
'G18' : 'M19'
}
def normalize_set(set_id):
return SCRYFALL_SET_CONVERSION.get(set_id, set_id)
class ScryfallError(ValueError):
pass
def get_mtga_card(arena_id):
scryfall_card = get_arena_card_json(arena_id)
return scryfall_to_mtga(scryfall_card)
def get_arena_card_json(arena_id):
"""Get card from Scryfall by arena id"""
response = requests.get(SCRYFALL_CARDS_API+'/arena/'+str(arena_id))
if response.status_code != requests.codes.ok:
raise ScryfallError('Unknown card id %s. Status code: %s' % (arena_id, response.status_code))
return response.json()
def scryfall_to_mtga(scryfall_card):
from mtga.models.card import Card
name = scryfall_card['name'].lower().replace(' ', '_')
pretty_name = scryfall_card['name']
cost = list(scryfall_card['mana_cost'].replace('}', '').replace('{', ''))
color_identity = scryfall_card['color_identity']
types = scryfall_card['type_line'].split(u' — ')
card_type = types[0]
try:
sub_types = types[1]
except IndexError:
sub_types = ""
set_id = normalize_set(scryfall_card['set'].upper())
rarity = scryfall_card['rarity']
set_number = scryfall_card['collector_number']
mtga_id = scryfall_card['arena_id']
collectible = scryfall_card['collector_number'] != ""
abilities = {}
mtga_card = Card(
name, pretty_name, cost, color_identity,
card_type, sub_types, abilities, set_id, rarity, collectible, set_number, mtga_id
)
return mtga_card
def get_set_info(set_name):
"""gets info on requested set"""
response = requests.get(SCRYFALL_SETS_API+'/'+str(set_name))
if response.status_code == requests.codes.not_found:
print('Unknown set: %s. Reason: %s %s' % (set_name, response.status_code, response.reason))
return {}
if response.status_code != requests.codes.ok:
raise ScryfallError('Unknown set: %s. Status code: %s' % (set, response.status_code))
return response.json()
if __name__ == "__main__":
x = get_arena_card_json(68369)
print(scryfall_to_mtga(x))
y = get_arena_card_json(67542)
print(scryfall_to_mtga(y)) | 0.263789 | 0.071786 |
import tensorflow as tf
from tensorflow.keras import Input
from tensorflow.keras.layers import Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from graphgallery.nn.layers.tensorflow import GCNConv
from graphgallery.gallery.nodeclas.tensorflow.bvat.utils import kl_divergence_with_logit, entropy_y_x, get_normalized_vector
from graphgallery.nn.models.tf_engine import TFEngine
from graphgallery.gallery.nodeclas import TensorFlow
from ..bvat.obvat import OBVAT
@TensorFlow.register()
class SimplifiedOBVAT(OBVAT):
"""
Implementation of optimization-based Batch Virtual Adversarial Training Graph Convolutional Networks (OBVAT).
`Batch Virtual Adversarial Training for Graph Convolutional Networks <https://arxiv.org/abs/1902.09192>`
Tensorflow 1.x implementation: <https://github.com/thudzj/BVAT>
"""
def model_step(self,
hids=[16],
acts=['relu'],
dropout=0.,
lr=0.01,
weight_decay=5e-4,
bias=False,
p1=1.4,
p2=0.7,
epsilon=0.01):
x = Input(batch_shape=[None, self.graph.num_node_attrs],
dtype=self.floatx,
name='node_attr')
adj = Input(batch_shape=[None, None],
dtype=self.floatx,
sparse=True,
name='adj_matrix')
GCN_layers = []
for hid, act in zip(hids, acts):
GCN_layers.append(
GCNConv(
hid,
activation=act,
use_bias=bias,
kernel_regularizer=regularizers.l2(weight_decay)))
GCN_layers.append(
GCNConv(self.graph.num_node_classes,
use_bias=bias))
self.GCN_layers = GCN_layers
self.dropout = Dropout(rate=dropout)
h = self.forward(x, adj)
model = TFEngine(inputs=[x, adj], outputs=h)
model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
optimizer=Adam(lr=lr),
metrics=['accuracy'])
entropy_loss = entropy_y_x(h)
vat_loss = self.virtual_adversarial_loss(x, adj, h, epsilon)
model.add_loss(p1 * vat_loss + p2 * entropy_loss)
return model
def train_step(self, sequence):
return super(OBVAT, self).train_step(sequence)
def virtual_adversarial_loss(self, x, adj, logit, epsilon):
d = tf.random.normal(
shape=[self.graph.num_nodes, self.graph.num_node_attrs],
dtype=self.floatx)
r_vadv = get_normalized_vector(d) * epsilon
logit_p = tf.stop_gradient(logit)
logit_m = self.forward(x + r_vadv, adj)
loss = kl_divergence_with_logit(logit_p, logit_m)
return loss | graphgallery/gallery/nodeclas/tensorflow/experimental/s_obvat.py | import tensorflow as tf
from tensorflow.keras import Input
from tensorflow.keras.layers import Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from graphgallery.nn.layers.tensorflow import GCNConv
from graphgallery.gallery.nodeclas.tensorflow.bvat.utils import kl_divergence_with_logit, entropy_y_x, get_normalized_vector
from graphgallery.nn.models.tf_engine import TFEngine
from graphgallery.gallery.nodeclas import TensorFlow
from ..bvat.obvat import OBVAT
@TensorFlow.register()
class SimplifiedOBVAT(OBVAT):
"""
Implementation of optimization-based Batch Virtual Adversarial Training Graph Convolutional Networks (OBVAT).
`Batch Virtual Adversarial Training for Graph Convolutional Networks <https://arxiv.org/abs/1902.09192>`
Tensorflow 1.x implementation: <https://github.com/thudzj/BVAT>
"""
def model_step(self,
hids=[16],
acts=['relu'],
dropout=0.,
lr=0.01,
weight_decay=5e-4,
bias=False,
p1=1.4,
p2=0.7,
epsilon=0.01):
x = Input(batch_shape=[None, self.graph.num_node_attrs],
dtype=self.floatx,
name='node_attr')
adj = Input(batch_shape=[None, None],
dtype=self.floatx,
sparse=True,
name='adj_matrix')
GCN_layers = []
for hid, act in zip(hids, acts):
GCN_layers.append(
GCNConv(
hid,
activation=act,
use_bias=bias,
kernel_regularizer=regularizers.l2(weight_decay)))
GCN_layers.append(
GCNConv(self.graph.num_node_classes,
use_bias=bias))
self.GCN_layers = GCN_layers
self.dropout = Dropout(rate=dropout)
h = self.forward(x, adj)
model = TFEngine(inputs=[x, adj], outputs=h)
model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
optimizer=Adam(lr=lr),
metrics=['accuracy'])
entropy_loss = entropy_y_x(h)
vat_loss = self.virtual_adversarial_loss(x, adj, h, epsilon)
model.add_loss(p1 * vat_loss + p2 * entropy_loss)
return model
def train_step(self, sequence):
return super(OBVAT, self).train_step(sequence)
def virtual_adversarial_loss(self, x, adj, logit, epsilon):
d = tf.random.normal(
shape=[self.graph.num_nodes, self.graph.num_node_attrs],
dtype=self.floatx)
r_vadv = get_normalized_vector(d) * epsilon
logit_p = tf.stop_gradient(logit)
logit_m = self.forward(x + r_vadv, adj)
loss = kl_divergence_with_logit(logit_p, logit_m)
return loss | 0.865565 | 0.500732 |
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from rest_framework import status
from dcim.choices import SiteStatusChoices
from dcim.models import Site
from extras.choices import *
from extras.models import CustomField, ObjectChange, Tag
from utilities.testing import APITestCase
from utilities.testing.utils import create_tags, post_data
from utilities.testing.views import ModelViewTestCase
class ChangeLogViewTest(ModelViewTestCase):
model = Site
@classmethod
def setUpTestData(cls):
# Create a custom field on the Site model
ct = ContentType.objects.get_for_model(Site)
cf = CustomField(
type=CustomFieldTypeChoices.TYPE_TEXT,
name='my_field',
required=False
)
cf.save()
cf.content_types.set([ct])
# Create a select custom field on the Site model
cf_select = CustomField(
type=CustomFieldTypeChoices.TYPE_SELECT,
name='my_field_select',
required=False,
choices=['Bar', 'Foo']
)
cf_select.save()
cf_select.content_types.set([ct])
def test_create_object(self):
tags = create_tags('Tag 1', 'Tag 2')
form_data = {
'name': 'Site 1',
'slug': 'site-1',
'status': SiteStatusChoices.STATUS_ACTIVE,
'cf_my_field': 'ABC',
'cf_my_field_select': 'Bar',
'tags': [tag.pk for tag in tags],
}
request = {
'path': self._get_url('add'),
'data': post_data(form_data),
}
self.add_permissions('dcim.add_site', 'extras.view_tag')
response = self.client.post(**request)
self.assertHttpStatus(response, 302)
# Verify the creation of a new ObjectChange record
site = Site.objects.get(name='Site 1')
oc = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=site.pk
)
self.assertEqual(oc.changed_object, site)
self.assertEqual(oc.action, ObjectChangeActionChoices.ACTION_CREATE)
self.assertEqual(oc.prechange_data, None)
self.assertEqual(oc.postchange_data['custom_fields']['my_field'], form_data['cf_my_field'])
self.assertEqual(oc.postchange_data['custom_fields']['my_field_select'], form_data['cf_my_field_select'])
self.assertEqual(oc.postchange_data['tags'], ['Tag 1', 'Tag 2'])
def test_update_object(self):
site = Site(name='Site 1', slug='site-1')
site.save()
tags = create_tags('Tag 1', 'Tag 2', 'Tag 3')
site.tags.set(['Tag 1', 'Tag 2'])
form_data = {
'name': 'Site X',
'slug': 'site-x',
'status': SiteStatusChoices.STATUS_PLANNED,
'cf_my_field': 'DEF',
'cf_my_field_select': 'Foo',
'tags': [tags[2].pk],
}
request = {
'path': self._get_url('edit', instance=site),
'data': post_data(form_data),
}
self.add_permissions('dcim.change_site', 'extras.view_tag')
response = self.client.post(**request)
self.assertHttpStatus(response, 302)
# Verify the creation of a new ObjectChange record
site.refresh_from_db()
oc = ObjectChange.objects.filter(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=site.pk
).first()
self.assertEqual(oc.changed_object, site)
self.assertEqual(oc.action, ObjectChangeActionChoices.ACTION_UPDATE)
self.assertEqual(oc.prechange_data['name'], 'Site 1')
self.assertEqual(oc.prechange_data['tags'], ['Tag 1', 'Tag 2'])
self.assertEqual(oc.postchange_data['custom_fields']['my_field'], form_data['cf_my_field'])
self.assertEqual(oc.postchange_data['custom_fields']['my_field_select'], form_data['cf_my_field_select'])
self.assertEqual(oc.postchange_data['tags'], ['Tag 3'])
def test_delete_object(self):
site = Site(
name='Site 1',
slug='site-1',
custom_field_data={
'my_field': 'ABC',
'my_field_select': 'Bar'
}
)
site.save()
create_tags('Tag 1', 'Tag 2')
site.tags.set(['Tag 1', 'Tag 2'])
request = {
'path': self._get_url('delete', instance=site),
'data': post_data({'confirm': True}),
}
self.add_permissions('dcim.delete_site')
response = self.client.post(**request)
self.assertHttpStatus(response, 302)
oc = ObjectChange.objects.first()
self.assertEqual(oc.changed_object, None)
self.assertEqual(oc.object_repr, site.name)
self.assertEqual(oc.action, ObjectChangeActionChoices.ACTION_DELETE)
self.assertEqual(oc.prechange_data['custom_fields']['my_field'], 'ABC')
self.assertEqual(oc.prechange_data['custom_fields']['my_field_select'], 'Bar')
self.assertEqual(oc.prechange_data['tags'], ['Tag 1', 'Tag 2'])
self.assertEqual(oc.postchange_data, None)
def test_bulk_update_objects(self):
sites = (
Site(name='Site 1', slug='site-1', status=SiteStatusChoices.STATUS_ACTIVE),
Site(name='Site 2', slug='site-2', status=SiteStatusChoices.STATUS_ACTIVE),
Site(name='Site 3', slug='site-3', status=SiteStatusChoices.STATUS_ACTIVE),
)
Site.objects.bulk_create(sites)
form_data = {
'pk': [site.pk for site in sites],
'_apply': True,
'status': SiteStatusChoices.STATUS_PLANNED,
'description': 'New description',
}
request = {
'path': self._get_url('bulk_edit'),
'data': post_data(form_data),
}
self.add_permissions('dcim.view_site', 'dcim.change_site')
response = self.client.post(**request)
self.assertHttpStatus(response, 302)
objectchange = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=sites[0].pk
)
self.assertEqual(objectchange.changed_object, sites[0])
self.assertEqual(objectchange.action, ObjectChangeActionChoices.ACTION_UPDATE)
self.assertEqual(objectchange.prechange_data['status'], SiteStatusChoices.STATUS_ACTIVE)
self.assertEqual(objectchange.prechange_data['description'], '')
self.assertEqual(objectchange.postchange_data['status'], form_data['status'])
self.assertEqual(objectchange.postchange_data['description'], form_data['description'])
def test_bulk_delete_objects(self):
sites = (
Site(name='Site 1', slug='site-1', status=SiteStatusChoices.STATUS_ACTIVE),
Site(name='Site 2', slug='site-2', status=SiteStatusChoices.STATUS_ACTIVE),
Site(name='Site 3', slug='site-3', status=SiteStatusChoices.STATUS_ACTIVE),
)
Site.objects.bulk_create(sites)
form_data = {
'pk': [site.pk for site in sites],
'confirm': True,
'_confirm': True,
}
request = {
'path': self._get_url('bulk_delete'),
'data': post_data(form_data),
}
self.add_permissions('dcim.delete_site')
response = self.client.post(**request)
self.assertHttpStatus(response, 302)
objectchange = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=sites[0].pk
)
self.assertEqual(objectchange.changed_object_type, ContentType.objects.get_for_model(Site))
self.assertEqual(objectchange.changed_object_id, sites[0].pk)
self.assertEqual(objectchange.action, ObjectChangeActionChoices.ACTION_DELETE)
self.assertEqual(objectchange.prechange_data['name'], sites[0].name)
self.assertEqual(objectchange.prechange_data['slug'], sites[0].slug)
self.assertEqual(objectchange.postchange_data, None)
class ChangeLogAPITest(APITestCase):
@classmethod
def setUpTestData(cls):
# Create a custom field on the Site model
ct = ContentType.objects.get_for_model(Site)
cf = CustomField(
type=CustomFieldTypeChoices.TYPE_TEXT,
name='my_field',
required=False
)
cf.save()
cf.content_types.set([ct])
# Create a select custom field on the Site model
cf_select = CustomField(
type=CustomFieldTypeChoices.TYPE_SELECT,
name='my_field_select',
required=False,
choices=['Bar', 'Foo']
)
cf_select.save()
cf_select.content_types.set([ct])
# Create some tags
tags = (
Tag(name='Tag 1', slug='tag-1'),
Tag(name='Tag 2', slug='tag-2'),
Tag(name='Tag 3', slug='tag-3'),
)
Tag.objects.bulk_create(tags)
def test_create_object(self):
data = {
'name': 'Site 1',
'slug': 'site-1',
'custom_fields': {
'my_field': 'ABC',
'my_field_select': 'Bar',
},
'tags': [
{'name': 'Tag 1'},
{'name': 'Tag 2'},
]
}
self.assertEqual(ObjectChange.objects.count(), 0)
url = reverse('dcim-api:site-list')
self.add_permissions('dcim.add_site')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
site = Site.objects.get(pk=response.data['id'])
oc = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=site.pk
)
self.assertEqual(oc.changed_object, site)
self.assertEqual(oc.action, ObjectChangeActionChoices.ACTION_CREATE)
self.assertEqual(oc.prechange_data, None)
self.assertEqual(oc.postchange_data['custom_fields'], data['custom_fields'])
self.assertEqual(oc.postchange_data['tags'], ['Tag 1', 'Tag 2'])
def test_update_object(self):
site = Site(name='Site 1', slug='site-1')
site.save()
data = {
'name': 'Site X',
'slug': 'site-x',
'custom_fields': {
'my_field': 'DEF',
'my_field_select': 'Foo',
},
'tags': [
{'name': 'Tag 3'}
]
}
self.assertEqual(ObjectChange.objects.count(), 0)
self.add_permissions('dcim.change_site')
url = reverse('dcim-api:site-detail', kwargs={'pk': site.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
site = Site.objects.get(pk=response.data['id'])
oc = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=site.pk
)
self.assertEqual(oc.changed_object, site)
self.assertEqual(oc.action, ObjectChangeActionChoices.ACTION_UPDATE)
self.assertEqual(oc.postchange_data['custom_fields'], data['custom_fields'])
self.assertEqual(oc.postchange_data['tags'], ['Tag 3'])
def test_delete_object(self):
site = Site(
name='Site 1',
slug='site-1',
custom_field_data={
'my_field': 'ABC',
'my_field_select': 'Bar'
}
)
site.save()
site.tags.set(Tag.objects.all()[:2])
self.assertEqual(ObjectChange.objects.count(), 0)
self.add_permissions('dcim.delete_site')
url = reverse('dcim-api:site-detail', kwargs={'pk': site.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Site.objects.count(), 0)
oc = ObjectChange.objects.first()
self.assertEqual(oc.changed_object, None)
self.assertEqual(oc.object_repr, site.name)
self.assertEqual(oc.action, ObjectChangeActionChoices.ACTION_DELETE)
self.assertEqual(oc.prechange_data['custom_fields']['my_field'], 'ABC')
self.assertEqual(oc.prechange_data['custom_fields']['my_field_select'], 'Bar')
self.assertEqual(oc.prechange_data['tags'], ['Tag 1', 'Tag 2'])
self.assertEqual(oc.postchange_data, None)
def test_bulk_create_objects(self):
data = (
{
'name': 'Site 1',
'slug': 'site-1',
},
{
'name': 'Site 2',
'slug': 'site-2',
},
{
'name': 'Site 3',
'slug': 'site-3',
},
)
self.assertEqual(ObjectChange.objects.count(), 0)
url = reverse('dcim-api:site-list')
self.add_permissions('dcim.add_site')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(ObjectChange.objects.count(), 3)
site1 = Site.objects.get(pk=response.data[0]['id'])
objectchange = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=site1.pk
)
self.assertEqual(objectchange.changed_object, site1)
self.assertEqual(objectchange.action, ObjectChangeActionChoices.ACTION_CREATE)
self.assertEqual(objectchange.prechange_data, None)
self.assertEqual(objectchange.postchange_data['name'], data[0]['name'])
self.assertEqual(objectchange.postchange_data['slug'], data[0]['slug'])
def test_bulk_edit_objects(self):
sites = (
Site(name='Site 1', slug='site-1'),
Site(name='Site 2', slug='site-2'),
Site(name='Site 3', slug='site-3'),
)
Site.objects.bulk_create(sites)
data = (
{
'id': sites[0].pk,
'name': 'Site A',
'slug': 'site-A',
},
{
'id': sites[1].pk,
'name': 'Site B',
'slug': 'site-b',
},
{
'id': sites[2].pk,
'name': 'Site C',
'slug': 'site-c',
},
)
self.assertEqual(ObjectChange.objects.count(), 0)
url = reverse('dcim-api:site-list')
self.add_permissions('dcim.change_site')
response = self.client.patch(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(ObjectChange.objects.count(), 3)
objectchange = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=sites[0].pk
)
self.assertEqual(objectchange.changed_object, sites[0])
self.assertEqual(objectchange.action, ObjectChangeActionChoices.ACTION_UPDATE)
self.assertEqual(objectchange.prechange_data['name'], 'Site 1')
self.assertEqual(objectchange.prechange_data['slug'], 'site-1')
self.assertEqual(objectchange.postchange_data['name'], data[0]['name'])
self.assertEqual(objectchange.postchange_data['slug'], data[0]['slug'])
def test_bulk_delete_objects(self):
sites = (
Site(name='Site 1', slug='site-1'),
Site(name='Site 2', slug='site-2'),
Site(name='Site 3', slug='site-3'),
)
Site.objects.bulk_create(sites)
data = (
{
'id': sites[0].pk,
},
{
'id': sites[1].pk,
},
{
'id': sites[2].pk,
},
)
self.assertEqual(ObjectChange.objects.count(), 0)
url = reverse('dcim-api:site-list')
self.add_permissions('dcim.delete_site')
response = self.client.delete(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(ObjectChange.objects.count(), 3)
objectchange = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=sites[0].pk
)
self.assertEqual(objectchange.changed_object_type, ContentType.objects.get_for_model(Site))
self.assertEqual(objectchange.changed_object_id, sites[0].pk)
self.assertEqual(objectchange.action, ObjectChangeActionChoices.ACTION_DELETE)
self.assertEqual(objectchange.prechange_data['name'], 'Site 1')
self.assertEqual(objectchange.prechange_data['slug'], 'site-1')
self.assertEqual(objectchange.postchange_data, None) | netbox/extras/tests/test_changelog.py | from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from rest_framework import status
from dcim.choices import SiteStatusChoices
from dcim.models import Site
from extras.choices import *
from extras.models import CustomField, ObjectChange, Tag
from utilities.testing import APITestCase
from utilities.testing.utils import create_tags, post_data
from utilities.testing.views import ModelViewTestCase
class ChangeLogViewTest(ModelViewTestCase):
model = Site
@classmethod
def setUpTestData(cls):
# Create a custom field on the Site model
ct = ContentType.objects.get_for_model(Site)
cf = CustomField(
type=CustomFieldTypeChoices.TYPE_TEXT,
name='my_field',
required=False
)
cf.save()
cf.content_types.set([ct])
# Create a select custom field on the Site model
cf_select = CustomField(
type=CustomFieldTypeChoices.TYPE_SELECT,
name='my_field_select',
required=False,
choices=['Bar', 'Foo']
)
cf_select.save()
cf_select.content_types.set([ct])
def test_create_object(self):
tags = create_tags('Tag 1', 'Tag 2')
form_data = {
'name': 'Site 1',
'slug': 'site-1',
'status': SiteStatusChoices.STATUS_ACTIVE,
'cf_my_field': 'ABC',
'cf_my_field_select': 'Bar',
'tags': [tag.pk for tag in tags],
}
request = {
'path': self._get_url('add'),
'data': post_data(form_data),
}
self.add_permissions('dcim.add_site', 'extras.view_tag')
response = self.client.post(**request)
self.assertHttpStatus(response, 302)
# Verify the creation of a new ObjectChange record
site = Site.objects.get(name='Site 1')
oc = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=site.pk
)
self.assertEqual(oc.changed_object, site)
self.assertEqual(oc.action, ObjectChangeActionChoices.ACTION_CREATE)
self.assertEqual(oc.prechange_data, None)
self.assertEqual(oc.postchange_data['custom_fields']['my_field'], form_data['cf_my_field'])
self.assertEqual(oc.postchange_data['custom_fields']['my_field_select'], form_data['cf_my_field_select'])
self.assertEqual(oc.postchange_data['tags'], ['Tag 1', 'Tag 2'])
def test_update_object(self):
site = Site(name='Site 1', slug='site-1')
site.save()
tags = create_tags('Tag 1', 'Tag 2', 'Tag 3')
site.tags.set(['Tag 1', 'Tag 2'])
form_data = {
'name': 'Site X',
'slug': 'site-x',
'status': SiteStatusChoices.STATUS_PLANNED,
'cf_my_field': 'DEF',
'cf_my_field_select': 'Foo',
'tags': [tags[2].pk],
}
request = {
'path': self._get_url('edit', instance=site),
'data': post_data(form_data),
}
self.add_permissions('dcim.change_site', 'extras.view_tag')
response = self.client.post(**request)
self.assertHttpStatus(response, 302)
# Verify the creation of a new ObjectChange record
site.refresh_from_db()
oc = ObjectChange.objects.filter(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=site.pk
).first()
self.assertEqual(oc.changed_object, site)
self.assertEqual(oc.action, ObjectChangeActionChoices.ACTION_UPDATE)
self.assertEqual(oc.prechange_data['name'], 'Site 1')
self.assertEqual(oc.prechange_data['tags'], ['Tag 1', 'Tag 2'])
self.assertEqual(oc.postchange_data['custom_fields']['my_field'], form_data['cf_my_field'])
self.assertEqual(oc.postchange_data['custom_fields']['my_field_select'], form_data['cf_my_field_select'])
self.assertEqual(oc.postchange_data['tags'], ['Tag 3'])
def test_delete_object(self):
site = Site(
name='Site 1',
slug='site-1',
custom_field_data={
'my_field': 'ABC',
'my_field_select': 'Bar'
}
)
site.save()
create_tags('Tag 1', 'Tag 2')
site.tags.set(['Tag 1', 'Tag 2'])
request = {
'path': self._get_url('delete', instance=site),
'data': post_data({'confirm': True}),
}
self.add_permissions('dcim.delete_site')
response = self.client.post(**request)
self.assertHttpStatus(response, 302)
oc = ObjectChange.objects.first()
self.assertEqual(oc.changed_object, None)
self.assertEqual(oc.object_repr, site.name)
self.assertEqual(oc.action, ObjectChangeActionChoices.ACTION_DELETE)
self.assertEqual(oc.prechange_data['custom_fields']['my_field'], 'ABC')
self.assertEqual(oc.prechange_data['custom_fields']['my_field_select'], 'Bar')
self.assertEqual(oc.prechange_data['tags'], ['Tag 1', 'Tag 2'])
self.assertEqual(oc.postchange_data, None)
def test_bulk_update_objects(self):
sites = (
Site(name='Site 1', slug='site-1', status=SiteStatusChoices.STATUS_ACTIVE),
Site(name='Site 2', slug='site-2', status=SiteStatusChoices.STATUS_ACTIVE),
Site(name='Site 3', slug='site-3', status=SiteStatusChoices.STATUS_ACTIVE),
)
Site.objects.bulk_create(sites)
form_data = {
'pk': [site.pk for site in sites],
'_apply': True,
'status': SiteStatusChoices.STATUS_PLANNED,
'description': 'New description',
}
request = {
'path': self._get_url('bulk_edit'),
'data': post_data(form_data),
}
self.add_permissions('dcim.view_site', 'dcim.change_site')
response = self.client.post(**request)
self.assertHttpStatus(response, 302)
objectchange = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=sites[0].pk
)
self.assertEqual(objectchange.changed_object, sites[0])
self.assertEqual(objectchange.action, ObjectChangeActionChoices.ACTION_UPDATE)
self.assertEqual(objectchange.prechange_data['status'], SiteStatusChoices.STATUS_ACTIVE)
self.assertEqual(objectchange.prechange_data['description'], '')
self.assertEqual(objectchange.postchange_data['status'], form_data['status'])
self.assertEqual(objectchange.postchange_data['description'], form_data['description'])
def test_bulk_delete_objects(self):
sites = (
Site(name='Site 1', slug='site-1', status=SiteStatusChoices.STATUS_ACTIVE),
Site(name='Site 2', slug='site-2', status=SiteStatusChoices.STATUS_ACTIVE),
Site(name='Site 3', slug='site-3', status=SiteStatusChoices.STATUS_ACTIVE),
)
Site.objects.bulk_create(sites)
form_data = {
'pk': [site.pk for site in sites],
'confirm': True,
'_confirm': True,
}
request = {
'path': self._get_url('bulk_delete'),
'data': post_data(form_data),
}
self.add_permissions('dcim.delete_site')
response = self.client.post(**request)
self.assertHttpStatus(response, 302)
objectchange = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=sites[0].pk
)
self.assertEqual(objectchange.changed_object_type, ContentType.objects.get_for_model(Site))
self.assertEqual(objectchange.changed_object_id, sites[0].pk)
self.assertEqual(objectchange.action, ObjectChangeActionChoices.ACTION_DELETE)
self.assertEqual(objectchange.prechange_data['name'], sites[0].name)
self.assertEqual(objectchange.prechange_data['slug'], sites[0].slug)
self.assertEqual(objectchange.postchange_data, None)
class ChangeLogAPITest(APITestCase):
@classmethod
def setUpTestData(cls):
# Create a custom field on the Site model
ct = ContentType.objects.get_for_model(Site)
cf = CustomField(
type=CustomFieldTypeChoices.TYPE_TEXT,
name='my_field',
required=False
)
cf.save()
cf.content_types.set([ct])
# Create a select custom field on the Site model
cf_select = CustomField(
type=CustomFieldTypeChoices.TYPE_SELECT,
name='my_field_select',
required=False,
choices=['Bar', 'Foo']
)
cf_select.save()
cf_select.content_types.set([ct])
# Create some tags
tags = (
Tag(name='Tag 1', slug='tag-1'),
Tag(name='Tag 2', slug='tag-2'),
Tag(name='Tag 3', slug='tag-3'),
)
Tag.objects.bulk_create(tags)
def test_create_object(self):
data = {
'name': 'Site 1',
'slug': 'site-1',
'custom_fields': {
'my_field': 'ABC',
'my_field_select': 'Bar',
},
'tags': [
{'name': 'Tag 1'},
{'name': 'Tag 2'},
]
}
self.assertEqual(ObjectChange.objects.count(), 0)
url = reverse('dcim-api:site-list')
self.add_permissions('dcim.add_site')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
site = Site.objects.get(pk=response.data['id'])
oc = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=site.pk
)
self.assertEqual(oc.changed_object, site)
self.assertEqual(oc.action, ObjectChangeActionChoices.ACTION_CREATE)
self.assertEqual(oc.prechange_data, None)
self.assertEqual(oc.postchange_data['custom_fields'], data['custom_fields'])
self.assertEqual(oc.postchange_data['tags'], ['Tag 1', 'Tag 2'])
def test_update_object(self):
site = Site(name='Site 1', slug='site-1')
site.save()
data = {
'name': 'Site X',
'slug': 'site-x',
'custom_fields': {
'my_field': 'DEF',
'my_field_select': 'Foo',
},
'tags': [
{'name': 'Tag 3'}
]
}
self.assertEqual(ObjectChange.objects.count(), 0)
self.add_permissions('dcim.change_site')
url = reverse('dcim-api:site-detail', kwargs={'pk': site.pk})
response = self.client.put(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
site = Site.objects.get(pk=response.data['id'])
oc = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=site.pk
)
self.assertEqual(oc.changed_object, site)
self.assertEqual(oc.action, ObjectChangeActionChoices.ACTION_UPDATE)
self.assertEqual(oc.postchange_data['custom_fields'], data['custom_fields'])
self.assertEqual(oc.postchange_data['tags'], ['Tag 3'])
def test_delete_object(self):
site = Site(
name='Site 1',
slug='site-1',
custom_field_data={
'my_field': 'ABC',
'my_field_select': 'Bar'
}
)
site.save()
site.tags.set(Tag.objects.all()[:2])
self.assertEqual(ObjectChange.objects.count(), 0)
self.add_permissions('dcim.delete_site')
url = reverse('dcim-api:site-detail', kwargs={'pk': site.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(Site.objects.count(), 0)
oc = ObjectChange.objects.first()
self.assertEqual(oc.changed_object, None)
self.assertEqual(oc.object_repr, site.name)
self.assertEqual(oc.action, ObjectChangeActionChoices.ACTION_DELETE)
self.assertEqual(oc.prechange_data['custom_fields']['my_field'], 'ABC')
self.assertEqual(oc.prechange_data['custom_fields']['my_field_select'], 'Bar')
self.assertEqual(oc.prechange_data['tags'], ['Tag 1', 'Tag 2'])
self.assertEqual(oc.postchange_data, None)
def test_bulk_create_objects(self):
data = (
{
'name': 'Site 1',
'slug': 'site-1',
},
{
'name': 'Site 2',
'slug': 'site-2',
},
{
'name': 'Site 3',
'slug': 'site-3',
},
)
self.assertEqual(ObjectChange.objects.count(), 0)
url = reverse('dcim-api:site-list')
self.add_permissions('dcim.add_site')
response = self.client.post(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(ObjectChange.objects.count(), 3)
site1 = Site.objects.get(pk=response.data[0]['id'])
objectchange = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=site1.pk
)
self.assertEqual(objectchange.changed_object, site1)
self.assertEqual(objectchange.action, ObjectChangeActionChoices.ACTION_CREATE)
self.assertEqual(objectchange.prechange_data, None)
self.assertEqual(objectchange.postchange_data['name'], data[0]['name'])
self.assertEqual(objectchange.postchange_data['slug'], data[0]['slug'])
def test_bulk_edit_objects(self):
sites = (
Site(name='Site 1', slug='site-1'),
Site(name='Site 2', slug='site-2'),
Site(name='Site 3', slug='site-3'),
)
Site.objects.bulk_create(sites)
data = (
{
'id': sites[0].pk,
'name': 'Site A',
'slug': 'site-A',
},
{
'id': sites[1].pk,
'name': 'Site B',
'slug': 'site-b',
},
{
'id': sites[2].pk,
'name': 'Site C',
'slug': 'site-c',
},
)
self.assertEqual(ObjectChange.objects.count(), 0)
url = reverse('dcim-api:site-list')
self.add_permissions('dcim.change_site')
response = self.client.patch(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(ObjectChange.objects.count(), 3)
objectchange = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=sites[0].pk
)
self.assertEqual(objectchange.changed_object, sites[0])
self.assertEqual(objectchange.action, ObjectChangeActionChoices.ACTION_UPDATE)
self.assertEqual(objectchange.prechange_data['name'], 'Site 1')
self.assertEqual(objectchange.prechange_data['slug'], 'site-1')
self.assertEqual(objectchange.postchange_data['name'], data[0]['name'])
self.assertEqual(objectchange.postchange_data['slug'], data[0]['slug'])
def test_bulk_delete_objects(self):
sites = (
Site(name='Site 1', slug='site-1'),
Site(name='Site 2', slug='site-2'),
Site(name='Site 3', slug='site-3'),
)
Site.objects.bulk_create(sites)
data = (
{
'id': sites[0].pk,
},
{
'id': sites[1].pk,
},
{
'id': sites[2].pk,
},
)
self.assertEqual(ObjectChange.objects.count(), 0)
url = reverse('dcim-api:site-list')
self.add_permissions('dcim.delete_site')
response = self.client.delete(url, data, format='json', **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
self.assertEqual(ObjectChange.objects.count(), 3)
objectchange = ObjectChange.objects.get(
changed_object_type=ContentType.objects.get_for_model(Site),
changed_object_id=sites[0].pk
)
self.assertEqual(objectchange.changed_object_type, ContentType.objects.get_for_model(Site))
self.assertEqual(objectchange.changed_object_id, sites[0].pk)
self.assertEqual(objectchange.action, ObjectChangeActionChoices.ACTION_DELETE)
self.assertEqual(objectchange.prechange_data['name'], 'Site 1')
self.assertEqual(objectchange.prechange_data['slug'], 'site-1')
self.assertEqual(objectchange.postchange_data, None) | 0.623835 | 0.344443 |
import typing
import csv
from pathlib import Path
import pandas as pd
import matchzoo
from matchzoo.engine.base_task import BaseTask
_url = "https://download.microsoft.com/download/E/5/F/" \
"E5FCFCEE-7005-4814-853D-DAA7C66507E0/WikiQACorpus.zip"
def load_data(
stage: str = 'train',
task: typing.Union[str, BaseTask] = 'ranking',
filtered: bool = False,
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load WikiQA data.
:param stage: One of `train`, `dev`, and `test`.
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance.
:param filtered: Whether remove the questions without correct answers.
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f'WikiQA-{stage}.tsv')
data_pack = _read_data(file_path, task)
if filtered and stage in ('dev', 'test'):
ref_path = data_root.joinpath(f'WikiQA-{stage}.ref')
filter_ref_path = data_root.joinpath(f'WikiQA-{stage}-filtered.ref')
with open(filter_ref_path, mode='r') as f:
filtered_ids = set([line.split()[0] for line in f])
filtered_lines = []
with open(ref_path, mode='r') as f:
for idx, line in enumerate(f.readlines()):
if line.split()[0] in filtered_ids:
filtered_lines.append(idx)
data_pack = data_pack[filtered_lines]
if task == 'ranking' or isinstance(task, matchzoo.tasks.Ranking):
return data_pack
elif task == 'classification' or isinstance(
task, matchzoo.tasks.Classification):
if return_classes:
return data_pack, [False, True]
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data():
ref_path = matchzoo.utils.get_file(
'wikiqa', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='wiki_qa'
)
return Path(ref_path).parent.joinpath('WikiQACorpus')
def _read_data(path, task):
table = pd.read_csv(path, sep='\t', header=0, quoting=csv.QUOTE_NONE)
df = pd.DataFrame({
'text_left': table['Question'],
'text_right': table['Sentence'],
'id_left': table['QuestionID'],
'id_right': table['SentenceID'],
'label': table['Label']
})
return matchzoo.pack(df, task) | matchzoo/datasets/wiki_qa/load_data.py |
import typing
import csv
from pathlib import Path
import pandas as pd
import matchzoo
from matchzoo.engine.base_task import BaseTask
_url = "https://download.microsoft.com/download/E/5/F/" \
"E5FCFCEE-7005-4814-853D-DAA7C66507E0/WikiQACorpus.zip"
def load_data(
stage: str = 'train',
task: typing.Union[str, BaseTask] = 'ranking',
filtered: bool = False,
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load WikiQA data.
:param stage: One of `train`, `dev`, and `test`.
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance.
:param filtered: Whether remove the questions without correct answers.
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f'WikiQA-{stage}.tsv')
data_pack = _read_data(file_path, task)
if filtered and stage in ('dev', 'test'):
ref_path = data_root.joinpath(f'WikiQA-{stage}.ref')
filter_ref_path = data_root.joinpath(f'WikiQA-{stage}-filtered.ref')
with open(filter_ref_path, mode='r') as f:
filtered_ids = set([line.split()[0] for line in f])
filtered_lines = []
with open(ref_path, mode='r') as f:
for idx, line in enumerate(f.readlines()):
if line.split()[0] in filtered_ids:
filtered_lines.append(idx)
data_pack = data_pack[filtered_lines]
if task == 'ranking' or isinstance(task, matchzoo.tasks.Ranking):
return data_pack
elif task == 'classification' or isinstance(
task, matchzoo.tasks.Classification):
if return_classes:
return data_pack, [False, True]
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data():
ref_path = matchzoo.utils.get_file(
'wikiqa', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='wiki_qa'
)
return Path(ref_path).parent.joinpath('WikiQACorpus')
def _read_data(path, task):
table = pd.read_csv(path, sep='\t', header=0, quoting=csv.QUOTE_NONE)
df = pd.DataFrame({
'text_left': table['Question'],
'text_right': table['Sentence'],
'id_left': table['QuestionID'],
'id_right': table['SentenceID'],
'label': table['Label']
})
return matchzoo.pack(df, task) | 0.716615 | 0.39356 |