text string | size int64 | token_count int64 |
|---|---|---|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# StreamOnDemand-PureITA / XBMC Plugin
# Canal para italiafilmvideohd
# http://www.mimediacenter.info/foro/viewtopic.php?f=36&t=7808
# ------------------------------------------------------------
import base64
import re
import urlparse
from core import config
from core import httptools
from core import logger
from core import scrapertools
from core.item import Item
from core.tmdb import infoSod
from servers import servertools
__channel__ = "italiafilmvideohd"
host = "https://italiafilm.network/"
headers = [['User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:44.0) Gecko/20100101 Firefox/44.0'],
['Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'],
['Accept-Encoding', 'gzip, deflate'],
['Referer', host],
['Cache-Control', 'max-age=0']]
def isGeneric():
return True
def mainlist(item):
logger.info("[italiafilmvideohd.py] mainlist")
itemlist = [
Item(channel=__channel__,
title="[COLOR azure]Film[COLOR orange] - Al Cinema[/COLOR]",
action="fichas",
url=host + "/cinema/",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/popcorn_cinema_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film[COLOR orange] - Novita'[/COLOR]",
action="fichas",
url=host + "/film-hd/",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movie_new_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film[COLOR orange] - HD[/COLOR]",
action="fichas",
url=host + "/nuove-uscite/",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movie_new_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film[COLOR orange] - Categorie[/COLOR]",
action="genere",
url=host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/genres_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV [COLOR orange]- Aggiornate[/COLOR]",
action="fichas_tv",
url=host + "/serie-tv-hd/",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/tv_serie_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film & Serie TV [COLOR orange]- Popolari[/COLOR]",
action="fichas",
url=host + "/film-piu-popolari/",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/tv_serie_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film & Serie TV [COLOR orange]- Piu' Votati[/COLOR]",
action="fichas",
url=host + "/film-piu-votati/",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/tv_serie_P.png"),
Item(channel=__channel__,
title="[COLOR orange]Cerca...[/COLOR]",
action="search",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png"),
Item(channel=__channel__,
title="[COLOR orange]Cerca Serie TV...[/COLOR]",
action="search",
extra="serie",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png")]
return itemlist
# ===================================================================================================================================================
def search(item, texto):
logger.info("[italiafilmvideohd.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
return fichas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ===================================================================================================================================================
def genere(item):
logger.info("[italiafilmvideohd.py] genere")
itemlist = []
data = scrapertools.anti_cloudflare(item.url, headers)
patron = '<div class="sub_title">Genere</div>(.+?)</div>'
data = scrapertools.find_single_match(data, patron)
patron = '<li>.*?'
patron += 'href="([^"]+)".*?'
patron += '<i>([^"]+)</i>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace('&', '-')
itemlist.append(
Item(channel=__channel__,
action="fichas",
title=scrapedtitle,
url=scrapedurl,
thumbnail='https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/genre_P.png',
folder=True))
return itemlist
# ===================================================================================================================================================
def fichas(item):
logger.info("[italiafilmvideohd.py] fichas")
itemlist = []
# Descarga la pagina
data = scrapertools.anti_cloudflare(item.url, headers)
# fix - calidad
# ------------------------------------------------
cookies = ""
matches = re.compile('(.italiafilm.video.*?)\n', re.DOTALL).findall(config.get_cookie_data())
for cookie in matches:
name = cookie.split('\t')[5]
value = cookie.split('\t')[6]
cookies += name + "=" + value + ";"
headers.append(['Cookie', cookies[:-1]])
import urllib
_headers = urllib.urlencode(dict(headers))
# ------------------------------------------------
patron = '<li class="item">.*?href="([^"]+)".*?'
patron += 'title="([^"]+)".*?<img src="([^"]+)".*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scraped_2, scrapedtitle, scrapedthumbnail in matches:
scrapedurl = scraped_2
if "serie" in scraped_2:
scrapedtitle= scrapedtitle + " [COLOR orange](Serie TV)[/COLOR]"
title = scrapertools.decodeHtmlentities(scrapedtitle)
scrapedthumbnail += "|" + _headers
itemlist.append(infoSod(
Item(channel=__channel__,
action="findvideos_all" if not "serie" in scrapedurl else "episodios",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=scrapedtitle), tipo='movie' if not "serie" in scrapedurl else "tv"))
# Paginación
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"\s*><span aria-hidden="true">»')
if next_page != "":
itemlist.append(
Item(channel=__channel__,
action="fichas",
title="[COLOR orange]Successivi >>[/COLOR]",
url=next_page,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"))
return itemlist
# ===================================================================================================================================================
def fichas_tv(item):
logger.info("[seriehd.py] fichas")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<a class="poster" href="([^"]+)" title="(.*?)">\s*'
patron += '<img src="([^"]+)" alt=".*?" />'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
itemlist.append(infoSod(
Item(channel=__channel__,
action="episodios",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
fulltitle=scrapedtitle,
url=scrapedurl,
show=scrapedtitle,
thumbnail=scrapedthumbnail), tipo='tv'))
patron = '<a href="([^"]+)"\s*><span aria-hidden="true">»'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=__channel__,
action="fichas_tv",
title="[COLOR orange]Successivi >>[/COLOR]",
url=next_page,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"))
return itemlist
# ===================================================================================================================================================
def episodios(item):
logger.info("[seriehd.py] episodios")
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data
patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">'
url = scrapertools.find_single_match(data, patron).replace("?italiafilm", "")
data = httptools.downloadpage(url).data.replace('\n', '').replace(' class="active"', '')
section_stagione = scrapertools.find_single_match(data, '<h3>STAGIONE</h3>\s*<ul>(.*?)</ul>')
patron = '<li[^>]+><a href="([^"]+)">(\d+)<'
seasons = re.compile(patron, re.DOTALL).findall(section_stagione)
for scrapedseason_url, scrapedseason in seasons:
season_url = urlparse.urljoin(url, scrapedseason_url)
data = httptools.downloadpage(season_url).data.replace('\n', '').replace(' class="active"', '')
section_episodio = scrapertools.find_single_match(data, '<h3>EPISODIO</h3>\s*<ul>(.*?)</ul>')
patron = '<li><a href="([^"]+)">(\d+)<'
episodes = re.compile(patron, re.DOTALL).findall(section_episodio)
for scrapedepisode_url, scrapedepisode in episodes:
episode_url = urlparse.urljoin(url, scrapedepisode_url)
title = scrapedseason + "x" + scrapedepisode.zfill(2)
itemlist.append(
Item(channel=__channel__,
action="findvideos_tv",
contentType="episode",
title=title + " - " + item.show,
url=episode_url,
fulltitle=title,
show=item.show,
plot=item.plot,
thumbnail=item.thumbnail))
return itemlist
# ===================================================================================================================================================
def findvideos_tv(item):
logger.info("[seriehd.py] findvideos")
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url, headers=headers).data.replace('\n', '')
patron = r'<iframe id="iframeVid" width=".*?" height=".*?" src="([^"]+)" allowfullscreen=""></iframe>'
url = scrapertools.find_single_match(data, patron)
if not url.startswith("https:"):
url = "https:" + url
if 'hdpass' in url:
data = httptools.downloadpage(url, headers=headers).data
start = data.find('<div class="row mobileRes">')
end = data.find('<div id="playerFront">', start)
data = data[start:end]
patron_res = '<div class="row mobileRes">(.*?)</div>'
patron_mir = '<div class="row mobileMirrs">(.*?)</div>'
patron_media = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed" value="([^"]+)".*?>'
res = scrapertools.find_single_match(data, patron_res)
urls = []
for res_url, res_video in scrapertools.find_multiple_matches(res, '<option.*?value="([^"]+?)">([^<]+?)</option>'):
data = httptools.downloadpage(urlparse.urljoin(url, res_url), headers=headers).data.replace('\n', '')
mir = scrapertools.find_single_match(data, patron_mir)
for mir_url in scrapertools.find_multiple_matches(mir, '<option.*?value="([^"]+?)">[^<]+?</value>'):
data = httptools.downloadpage(urlparse.urljoin(url, mir_url), headers=headers).data.replace('\n', '')
for media_label, media_url in re.compile(patron_media).findall(data):
urls.append(url_decode(media_url))
itemlist = servertools.find_video_items(data='\n'.join(urls))
for videoitem in itemlist:
videoitem.title = item.title + "[COLOR orange]" + videoitem.title + "[/COLOR]"
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = __channel__
return itemlist
# ===================================================================================================================================================
def findvideos(item):
logger.info("[italiafilmvideohd.py] findvideos")
itemlist = []
# Descarga la página
data = scrapertools.anti_cloudflare(item.url, headers).replace('\n', '')
patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">'
url = scrapertools.find_single_match(data, patron).replace("?italiafilm", "")
if 'hdpass' in url:
data = scrapertools.cache_page(url, headers=headers)
start = data.find('<div class="row mobileRes">')
end = data.find('<div id="playerFront">', start)
data = data[start:end]
patron_res = r'<div class="row mobileRes">([\s\S]*)<\/div>'
patron_mir = r'<div class="row mobileMirrs">([\s\S]*)<\/div>'
patron_media = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed" value="([^"]+)"[^>]+>'
res = scrapertools.find_single_match(data, patron_res)
urls = []
for res_url, res_video in scrapertools.find_multiple_matches(res, '<option.*?value="([^"]+?)">([^<]+?)</option>'):
data = scrapertools.cache_page(urlparse.urljoin(url, res_url), headers=headers).replace('\n', '')
mir = scrapertools.find_single_match(data, patron_mir)
for mir_url in scrapertools.find_multiple_matches(mir, '<option.*?value="([^"]+?)">[^<]+?</value>'):
data = scrapertools.cache_page(urlparse.urljoin(url, mir_url), headers=headers).replace('\n', '')
for media_label, media_url in re.compile(patron_media).findall(data):
urls.append(url_decode(media_url))
itemlist = servertools.find_video_items(data='\n'.join(urls))
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = __channel__
return itemlist
# ===================================================================================================================================================
def url_decode(url_enc):
lenght = len(url_enc)
if lenght % 2 == 0:
len2 = lenght / 2
first = url_enc[0:len2]
last = url_enc[len2:lenght]
url_enc = last + first
reverse = url_enc[::-1]
return base64.b64decode(reverse)
last_car = url_enc[lenght - 1]
url_enc[lenght - 1] = ' '
url_enc = url_enc.strip()
len1 = len(url_enc)
len2 = len1 / 2
first = url_enc[0:len2]
last = url_enc[len2:len1]
url_enc = last + first
reverse = url_enc[::-1]
reverse = reverse + last_car
return base64.b64decode(reverse)
# ===================================================================================================================================================
def findvideos_all(item):
logger.info("[streamondemand-pureita italiafilmvideohd] findvideos_all")
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url, headers=headers).data
patron = '<iframe width=".*?" height=".*?" src="([^"]+)" width=".*?" height=".*?" frameborder=".*?" scrolling=".*?" allowfullscreen /></iframe></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
data = httptools.downloadpage(scrapedurl).data
videos = servertools.find_video_items(data=data)
for video in videos:
itemlist.append(video)
for videoitem in itemlist:
servername = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "[[COLOR orange]" + servername.capitalize() + "[/COLOR]] " + item.title
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = __channel__
return itemlist
| 17,263 | 5,388 |
"""Tests Tier Data"""
import numpy as np
from covid.data import TierData
def test_url_tier_data():
config = {
"AreaCodeData": {
"input": "json",
"address": "https://services1.arcgis.com/ESMARspQHYMw9BZ9/arcgis/rest/services/LAD_APR_2019_UK_NC/FeatureServer/0/query?where=1%3D1&outFields=LAD19CD,LAD19NM&returnGeometry=false&returnDistinctValues=true&orderByFields=LAD19CD&outSR=4326&f=json",
"format": "ons",
"output": "processed_data/processed_lad19cd.csv",
"regions": ["E"],
},
"TierData": {
"input": "api",
"address": None,
"format": "api",
},
"GenerateOutput": {
"storeInputs": True,
"scrapedDataDir": "scraped_data",
"storeProcessedInputs": True,
},
"Global": {
"prependID": False,
"prependDate": False,
"inference_period": ["2020-10-12", "2021-01-04"],
},
}
xarr = TierData.process(config)
print("xarr", xarr)
np.testing.assert_array_equal(xarr.shape, [315, 84, 6])
| 1,129 | 414 |
import pyautogui
import time
import pyperclip
import pandas as pd
#pyautogui.displayMousePosition()
pyautogui.PAUSE = 1
#Passo 1
#Abrir uma nova aba
time.sleep(2)
pyautogui.hotkey('ctrl', 't')
#Entrar no link do sistema
link = "https://drive.google.com/drive/folders/149xknr9JvrlEnhNWO49zPcw0PW5icxga"
pyperclip.copy(link)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('enter')
#Passo 2
time.sleep(5)
pyautogui.click(389, 270, clicks = 2)
time.sleep(2)
#Passo 3
pyautogui.click(401, 337) #clicar no arquivo
pyautogui.click(1713, 157) #clicar nos 3 pontos
pyautogui.click(1525, 561) #clicar no fazer download
time.sleep(10)
#Passo 4
tabela = pd.read_excel(r'C:\Users\Pichau\Downloads\Vendas - Dez.xlsx')
faturamento = tabela['Valor Final'].sum()
quantidade = tabela['Quantidade'].sum()
#Passo 5
time.sleep(2)
pyautogui.hotkey('ctrl', 't')
#Entrar no link do sistema
link = "https://mail.google.com/mail/u/0/#inbox"
pyperclip.copy(link)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('enter')
time.sleep(7)
pyautogui.click(33, 170)
pyautogui.write('gustavo.ibis.gb+diretoria@gmail.com')
pyautogui.press('tab')
pyautogui.press('tab')
assunto = 'Relatório de Vendas'
pyperclip.copy(assunto)
pyautogui.hotkey('ctrl', 'v')
pyautogui.press('tab')
texto_email = f"""
Prezados, bom dia
O faturamento de ontem foi de: R${faturamento:,.2f}
A quantidade de produtos foi de: R${quantidade:,.2f}
Abs
"""
pyperclip.copy(texto_email)
pyautogui.hotkey('ctrl', 'v')
pyautogui.hotkey('ctrl', 'enter') | 1,550 | 716 |
import tkinter
import json
from math import *
from random import *
from tkinter import *
class brain_abstract():
''' # один слой это лист туплов: колво нейронов в группе,
# плюс(True) или минус(False) на выходе нейронов группы,
# номер функции нейронов группы,
# дискретный выход у нейронов или нет (True - да)
# тулпа с номерами inputs-групп, с которыми связанна нейронная группа
# длинна входного input-вектора группы
# коэф нормализации вывода группы
laysConfigs = [ [(15, True, 1, False, (0,), 25, 10), (5, True, 1, False, (1,), 6, 10)],
[( 7, True, 0, True, (0,1), 20, 10), (4, True, 1, False, (0,1), 20, 10)],
[( 5, True, 1, False, (0,1), 11, 5), (15, False, 0, False, (0,1), 11, 5)],
[( 6, True, 1, False, (0,1), 20, 10)],
[( 2, True, 1, True, (0,), 6, 0)] ]'''
def __init__(self, laysConfigs = [], NNtemp = 70, cooldownTemp = 50):
self.lays = []
self.lay_counts = 0
self.NN_learning_temp = NNtemp
self.NN_cooldown_temp = cooldownTemp
if not laysConfigs:
# один слой это лист туплов: колво нейронов в группе,
# плюс(True) или минус(False) на выходе нейронов группы,
# номер функции нейронов группы,
# дискретный выход у нейронов или нет (True - да)
# тулпа с номерами inputs-групп, с которыми связанна нейронная группа
# длинна входного input-вектора группы
# коэф нормализации вывода группы
laysConfigs = [ [(10, True, 1, False, (0,), 25, 10), (7, False, 1, False, (0,), 25, 10), (3, True, 1, False, (1,), 3, 10)],
[(10, False, 0, True, (0,1),17, 0), (7, True, 1, True, (0,1,2), 20, 0)],
#[( 5, True, 1, False, (0,1), 14, 5), (15, False, 0, False, (0,1), 14, 5)],
[( 5, True, 1, False, (0,1), 17, 10)],
[( 2, True, 1, True, (0,), 5, 0)] ]
for lay_conf in laysConfigs:
self.lays.append(lay_abstact(lay_conf))
self.frozen_mind = {'scheme':laysConfigs, 'weights' :self.get_all_synapse_weight()}
def train(self, input = [], output = []):
lays_output = []
lays_output.append(input)
lay_input = input
for lay in self.lays:
lays_output.append( lay.get_excited(lay_input) )
lay_input = lays_output[-1]
desire_lay_output = output
i = len(self.lays)
j = 0
while i > 0:
lay_out = lays_output[i]
lay_in = lays_output[i-1]
lay_temp = self.NN_learning_temp - (j*self.NN_cooldown_temp)
changingNeuronsCount = round(self.lays[i-1].neuron_count * lay_temp / 100)
Mu = lay_temp / ( len(self.lays) * 100 )
changingNeuronsWeightCount = round(self.lays[i-2].neuron_count * (self.NN_learning_temp - (j*self.NN_cooldown_temp) ) / 100)
cool_inp = self.lays[i-1].fcingCooldown(lay_in, lay_out, desire_lay_output,
changingNeuronsCount, changingNeuronsWeightCount, Mu)
desire_lay_output = cool_inp
i-=1
j+=1
def get_err_out(self, input = [[],], output = [[],]):
return (1,1)
def guess(self, input = []):
lay_output = []
lay_input = input
for lay in self.lays:
lay_output = lay.get_excited(lay_input)
lay_input = lay_output
return lay_output
def learn(self, input = [], output = [], maxSteps = 3):
desire_lay_output = output
net_output = self.guess(input)
i = 0
while not self.isEqualOuts(net_output, desire_lay_output) and i < maxSteps:
self.train(input, output)
net_output = self.guess(input)
i+=1
def think(self):
pass
def isEqualOuts(self, out1, out2):
try:
len1 = len(out1)
i = 0
while i < len1:
len2 = len(out1[i])
j = 0
while j < len2:
if out1[i][j] != out2[i][j]:
return False
j+=1
i+=1
finally:
return False
return True
def get_lay_ref(self, layNum = 0):
return self.lays[layNum]
def get_all_synapse_weight(self):
synapse_weight = []
for lay in self.lays:
for lay_group in lay.neuronGroups_list:
for neuron in lay_group:
synapse_weight.extend(neuron.get_weights())
return synapse_weight
def load_consciousness(self, consciousness = [0]*100):
neuron_weights = []
shift = 0
for lay in self.lays:
for lay_group in lay.neuronGroups_list:
for neuron in lay_group:
neuron_weights = consciousness[shift:shift+neuron.weightsCount]
neuron.set_weights(neuron_weights)
shift += neuron.weightsCount
def get_draw_scheme(self):
draw_scheme = []
for lay in self.lays:
draw_scheme.append(lay.get_draw_scheme())
return draw_scheme
def draw_brain_scheme(self, root_win = None, width = 800, height = 600):
worm_brain = self
brain_scheme_width = width
brain_scheme_height = height
if root_win == None:
self.scheme_window = Tk()
self.scheme_window.title("Brain scheme")
else:
self.scheme_window = Toplevel(root_win)
self.canvas_brain = Canvas(self.scheme_window, width=brain_scheme_width + 60, height=brain_scheme_height + 40,
bg='white')
self.canvas_brain.pack()
# draw_scheme pattern [[ (Ncount, sign, links), ],]
brain_draw_scheme = worm_brain.get_draw_scheme()
color_lay = self.get_color(200, 200, 200)[1]
color_pos_group = self.get_color(150, 250, 150)[1]
color_neg_group = self.get_color(250, 150, 150)[1]
color_digital_neuron = self.get_color(120, 110, 120)[1]
color_analog_neuron = self.get_color(130, 170, 130)[1]
color_input = self.get_color(250, 250, 60)[1]
color_output = self.get_color(60, 250, 250)[1]
color_inside = self.get_color(250, 200, 120)[1]
lay_width = round(brain_scheme_width / len(brain_draw_scheme))
lay_height = brain_scheme_height
layNum = 0
dec_lay_height = brain_scheme_height / (2.5 * len(brain_draw_scheme) )
group_out_cord = []
prev_lay_group_out_cord = []
for lay_scheme in brain_draw_scheme:
group_count = len(lay_scheme)
group_height = round(lay_height / group_count)
groups_heights = []
for group in lay_scheme:
groups_heights.append(group[0])
groups_heights = self.get_proportion(groups_heights)
max_group_height = max(groups_heights)
min_group_height = min(groups_heights)
while (max_group_height - min_group_height) > 2 * min_group_height:
minNum = groups_heights.index(min_group_height)
maxNum = groups_heights.index(max_group_height)
delta = groups_heights[maxNum] * 0.1
groups_heights[maxNum] -= delta
groups_heights[minNum] += delta
max_group_height = max(groups_heights)
min_group_height = min(groups_heights)
x_preset = 30 + layNum * lay_width
y_preset = 20 + layNum * dec_lay_height
lay_thicc = lay_width/3
self.canvas_brain.create_rectangle(x_preset+lay_thicc, y_preset, x_preset+lay_thicc*2,
lay_height-y_preset, fill=color_lay, outline=color_lay)
gr_bound = 10
lay_h = lay_height - 2 * y_preset
gr_preset = y_preset + gr_bound
gNum = 0
for group_h in groups_heights:
group_h *= lay_h
color_group = color_pos_group if lay_scheme[gNum][1] > 0 else color_neg_group
n_count = lay_scheme[gNum][0]
#lay
x_group_preset = x_preset + lay_thicc + gr_bound
self.canvas_brain.create_rectangle(x_group_preset,
gr_preset,
x_group_preset + lay_thicc - 2 * gr_bound,
group_h+gr_preset - 2 * gr_bound,
fill=color_group, outline=color_group)#"#000000")
#in
x_input_vec_preset = x_preset + lay_thicc/2
x_inp_vec_width = lay_thicc/10
self.canvas_brain.create_rectangle(x_input_vec_preset,
gr_preset,
x_input_vec_preset + x_inp_vec_width,
group_h + gr_preset - 2 * gr_bound,
fill=color_input, outline="#000000")
#in vector
weight_count = lay_scheme[gNum][4]
weight_height = (group_h - 2 * gr_bound)/weight_count
for i in range(0,weight_count):
self.canvas_brain.create_line(x_input_vec_preset,
gr_preset + i*weight_height,
x_input_vec_preset + x_inp_vec_width,
gr_preset + i*weight_height,
fill='#000000')
#in link
for link in lay_scheme[gNum][3]:
if brain_draw_scheme.index(lay_scheme) == 0:
break
cord = prev_lay_group_out_cord[link]
self.canvas_brain.create_line(cord[0],
cord[1],
x_input_vec_preset,
gr_preset + (group_h - 2 * gr_bound) / 2,
fill='#000000', arrow=LAST)
#out
x_output_vec_preset = x_input_vec_preset + 2* lay_thicc
x_out_vec_width = lay_thicc/10
self.canvas_brain.create_rectangle(x_output_vec_preset,
gr_preset,
x_output_vec_preset + x_out_vec_width,
group_h + gr_preset - 2 * gr_bound,
fill=color_output, outline="#000000")
group_out_cord.append((x_output_vec_preset + x_out_vec_width,
gr_preset + (group_h - 2 * gr_bound) / 2))
#out vector
out_height = (group_h - 2 * gr_bound)/n_count
for i in range(0,n_count):
self.canvas_brain.create_line(x_output_vec_preset,
gr_preset + i*out_height,
x_output_vec_preset + x_inp_vec_width,
gr_preset + i*out_height,
fill='#000000')
nr_preset = gr_preset
nr_bound_w = 4
nr_bound_h = 4
neuron_draw_size = (group_h - 2 * gr_bound) / n_count
neuron_area_width = lay_thicc - 2 * gr_bound
min_neuron_draw_size = (lay_thicc - 2 * gr_bound)/2
gr_h = (group_h - 2 * gr_bound)
column_count = 1
n_in_col = n_count
neuron_area = 0
down_shift = 0
if neuron_draw_size > neuron_area_width:
nr_bound_w = 4
neuron_draw_size = neuron_area_width - 2 * nr_bound_w
nr_bound_h = (gr_h - (neuron_draw_size * n_count)) / (2 * n_count)
elif neuron_draw_size < neuron_area_width:
if neuron_draw_size < min_neuron_draw_size:
neuron_area = sqrt((neuron_area_width * gr_h) / (n_count*1.5) )
column_count = ceil((n_count * neuron_area) / gr_h)
n_in_col = round(n_count / column_count)
column_count += 0 if n_count % column_count == 0 else 1
if neuron_area_width/column_count < neuron_area:
neuron_draw_size = neuron_area_width/column_count
column_count = ceil((n_count * neuron_draw_size) / gr_h)
n_in_col = round(n_count / column_count)
column_count += 0 if n_count % column_count == 0 else 1
else:
neuron_draw_size = neuron_area
nr_bound_w = (neuron_area_width - (neuron_draw_size * column_count)) / (column_count + 1)
nr_bound_h = (gr_h - (neuron_draw_size * n_in_col)) / (2 * n_in_col)
if nr_bound_w < 1:
nr_bound_w = 1
nr_bound_h = 1
neuron_draw_size = (neuron_area_width - (nr_bound_w * (column_count + 1))) / column_count
if neuron_draw_size < 2:
nr_bound_w = 1
nr_bound_h = 1
neuron_draw_size = 2
neuron_area = neuron_draw_size+nr_bound_w+nr_bound_h
n_count = round((neuron_area_width/neuron_area) * (gr_h/neuron_area))
column_count = ceil((n_count * neuron_area) / gr_h)
n_in_col = round(n_count / column_count)
n_count = column_count * n_in_col
down_shift = (gr_h - ((neuron_draw_size + 2*nr_bound_h) * (n_in_col)))/2
else:
nr_bound_h = neuron_draw_size*0.1
neuron_draw_size = neuron_draw_size - 2 * nr_bound_h
nr_bound_w = (neuron_area_width - neuron_draw_size ) / 2
colNum = 0
nrNum = 0
draw_neuron = 0
while draw_neuron < n_count:
nrNum+=1
if lay_scheme[gNum][2]:
color_neuron = color_digital_neuron
else:
color_neuron = color_analog_neuron
# neuron
self.canvas_brain.create_arc(x_group_preset + nr_bound_w + colNum * (neuron_draw_size + nr_bound_w),
nr_preset + down_shift + nr_bound_h,
x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + neuron_draw_size,
nr_preset + down_shift + neuron_draw_size + nr_bound_h,
start=1, extent=359, fill=color_neuron, outline=color_neuron)
if lay_scheme[gNum][2]:
#center
self.canvas_brain.create_rectangle(x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + neuron_draw_size / 3,
nr_preset + down_shift + nr_bound_h + neuron_draw_size / 3,
x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + 2 * neuron_draw_size / 3,
nr_preset + down_shift + 2 * neuron_draw_size / 3 + nr_bound_h,
fill=color_inside, outline=color_inside)
else:
#center
self.canvas_brain.create_arc(x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + neuron_draw_size / 3,
nr_preset + down_shift + nr_bound_h + neuron_draw_size / 3,
x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + 2 * neuron_draw_size / 3,
nr_preset + down_shift + 2 * neuron_draw_size / 3 + nr_bound_h,
start=1, extent=359, fill=color_inside, outline=color_inside)
self.canvas_brain.create_arc(x_group_preset + nr_bound_w + colNum * (neuron_draw_size + nr_bound_w) + neuron_draw_size / 3,
nr_preset + down_shift + nr_bound_h + neuron_draw_size / 3,
x_group_preset + nr_bound_w + colNum * (
neuron_draw_size + nr_bound_w) + 2 * neuron_draw_size / 3,
nr_preset + down_shift + 2 * neuron_draw_size / 3 + nr_bound_h,
start=1, extent=359, fill=color_inside, outline=color_inside)
if colNum == 0:
#input
self.canvas_brain.create_line(x_input_vec_preset + x_inp_vec_width,
gr_preset + (group_h - 2 * gr_bound) / 2,
x_group_preset,
nr_preset + down_shift + nr_bound_h + neuron_draw_size/2,
fill='#000000', arrow=LAST)
#output
self.canvas_brain.create_line(x_group_preset + lay_thicc - 2 * gr_bound,
nr_preset + down_shift + nr_bound_h + neuron_draw_size/2,
x_output_vec_preset,
nr_preset + down_shift + nr_bound_h + neuron_draw_size/2,
fill='#000000', arrow=LAST)
#self.canvas_brain.create_line(x_output_vec_preset,
# nr_preset,
# x_output_vec_preset + x_out_vec_width,
# nr_preset,
# fill='#000000')
nr_preset = nr_preset + neuron_draw_size + 2 * nr_bound_h
if nrNum == n_in_col:
colNum+=1
nrNum = 0
if colNum % 2 == 0:
nr_preset = gr_preset
else:
nr_preset = gr_preset
draw_neuron+=1
gr_preset = group_h+gr_preset
gNum+=1
layNum+=1
prev_lay_group_out_cord = group_out_cord
group_out_cord = []
self.scheme_window.mainloop()
def get_proportion(self, vector = []):
sum = 0
for el in vector:
sum += el
out_vector = []
for el in vector:
out_vector.append(el / sum)
return out_vector
def get_color(self, r=0, g=0, b=0):
clr = ((r * 1.0, g * 1.0, b * 1.0),
'#' + r.to_bytes(1, 'little').hex().__str__()
+ g.to_bytes(1, 'little').hex().__str__()
+ b.to_bytes(1, 'little').hex().__str__())
return clr
def frozed_mind(self):
build_scheme = []
for lay in self.lays:
build_scheme.append(lay.get_build_scheme())
consciousness = self.get_all_synapse_weight()
self.frozen_mind = {'scheme': build_scheme, 'weights': consciousness}
return self.frozen_mind
def unfrozed_mind(self, ice_piece):
self.__init__(laysConfigs = ice_piece['scheme'])
self.load_consciousness(consciousness = ice_piece['weights'])
def save_to_file(self, filename = 'frozen_mind.txt'):
self.frozed_mind()
f = open(filename, 'w')
f.write(json.dumps(self.frozen_mind))
f.close()
def load_from_file(self, filename = 'frozen_mind.txt'):
f = open(filename, 'r')
json_mind = f.read()
self.unfrozed_mind(ice_piece = json.loads(json_mind))
f.close()
def __gt__(self, other):
return self.NN_learning_temp > other.NN_learning_temp
def __lt__(self, other):
return self.NN_learning_temp < other.NN_learning_temp
def __ge__(self, other):
return self.NN_learning_temp >= other.NN_learning_temp
def __le__(self, other):
return self.NN_learning_temp <= other.NN_learning_temp
class lay_abstact():
'''layConfig = [ (0, True, 0, (0,), 25, 0), ]
лист туплов: колво нейронов в группе,
# плюс(True) или минус(False) на выходе нейронов группы,
# номер функции нейронов группы,
# дискретный выход у нейронов или нет (True - да)
# тулпа с номерами inputs-групп, с которыми связанна нейронная группа
# длинна входного input-вектора группы
# коэф нормализации вывода группы'''
def __init__(self, layConfig = [ (0, True, 0, True, (0,), 25, 0), ]):
self.neuronGroups_count = len(layConfig)
self.neuronGroups_list = []
self.neuronGroups_inputs_link = []
self.groupsNormalization_coeff = []
self.neuron_count = 0
if layConfig[0][0] == 0:
pass
else:
for neuron_group in layConfig:
neuton_with_pos_out = neuron_group[1]
lay_temp = []
i = 0
while i < neuron_group[0]:
neuron = neuron_abstact(generateWeightsCount=neuron_group[5],
positiveOutput=neuton_with_pos_out,
funcNum=neuron_group[2],
digitalOut=neuron_group[3] )
lay_temp.append(neuron)
self.neuron_count+=1
i+=1
self.neuronGroups_list.append(lay_temp)
self.neuronGroups_inputs_link.append(neuron_group[4])
self.groupsNormalization_coeff.append(neuron_group[6])
def get_excited(self, inputsGoups = [[],]):
output_groups = []
i=0
while i<self.neuronGroups_count:
group_input = []
neurou_group_output = []
for inputgroupNum in self.neuronGroups_inputs_link[i]:
group_input.extend(inputsGoups[inputgroupNum])
for neuron in self.neuronGroups_list[i]:
neurou_group_output.append(neuron.spike( group_input ))
if self.groupsNormalization_coeff[i] != 0:
neurou_group_output = self.normalize_vector(neurou_group_output, self.groupsNormalization_coeff[i])
output_groups.append(neurou_group_output)
i+=1
return output_groups
def normalize_vector(self, inputVector = [], norm_coeff = 1):
min_val = min(inputVector)
max_val = max(inputVector)
delitel = max_val - min_val
normalOutputVector = []
if delitel == 0:
for x_val in inputVector:
norm_x_val = 0
normalOutputVector.append(norm_x_val)
else:
for x_val in inputVector:
norm_x_val = ( (x_val - min_val) * norm_coeff )/delitel
normalOutputVector.append(norm_x_val)
return normalOutputVector
def fcingCooldown(self, inputGroups = [[],], output = [[],], desire_out = [[],],
changingNeuronsCount = 1, changingNeuronsWeightCount = 1, Lwa = 1):
errOuts = self.get_err_out(output, desire_out)
errOuts.sort()
changing_errOuts = []
changing_neuron_weights_info = []
inputs = []
len_groupinput = []
i = 0
while i < self.neuronGroups_count:
group_input = []
for inputgroupNum in self.neuronGroups_inputs_link[i]:
group_input.extend(inputGroups[inputgroupNum])
inputs.append(group_input)
i+=1
i = 0
while i < changingNeuronsCount and i < len(errOuts):
changing_errOuts.append(errOuts[i])
i+=1
for err_out in changing_errOuts:
err_neuron = self.neuron_at(err_out[2])
err_neuron_vector = err_neuron.learning_spike(inputs[err_out[2][0]])
err_neuron_vector.sort()
i = 0
Nwa = Lwa
while i < changingNeuronsWeightCount:
if err_out[1] == 1:#надо увеличить выход нейрона
if err_neuron_vector[i][1] < err_out[1]: #вес нейрона находится на отрицательном ребре
Nwa = - Lwa
self.neuron_at(err_out[2]).adjust_weight(err_neuron_vector[i][2], Nwa)
else: #вес нейрона находится на положительном ребре
Nwa = Lwa
self.neuron_at(err_out[2]).adjust_weight(err_neuron_vector[i][2], Nwa)
else: #надо уменьшить выход
if err_neuron_vector[i][1] < err_out[1]: #вес нейрона находится на отрицательном ребре
Nwa = Lwa
self.neuron_at(err_out[2]).adjust_weight(err_neuron_vector[i][2], Nwa)
else: #вес нейрона находится на положительном ребре
Nwa = - Lwa
self.neuron_at(err_out[2]).adjust_weight(err_neuron_vector[i][2], Nwa)
changing_neuron_weights_info.append( (err_out, err_neuron_vector[i], Nwa) )
i+=1
input_group_len = []
for input in inputGroups:
input_group_len.append(len(input))
for ch_neur in changing_neuron_weights_info:
#изменить инпут пропорионально аджастам весов нейронов
Gnum = 0
Nnum = ch_neur[1][2]
for link in self.neuronGroups_inputs_link[Gnum]:
if Nnum >= input_group_len[link]:
Nnum -= input_group_len[link]
else:
Gnum = link
break
inputGroups[Gnum][Nnum] += inputGroups[Gnum][Nnum] * ch_neur[2]
return inputGroups
def get_err_out(self, list, example_list):
'''На входе лист листов с интами'''
err_elements = []
try:
len1 = len(list)
i = 0
while i < len1:
len2 = len(list[i])
j = 0
while j < len2:
if list[i][j] != example_list[i][j]:
diff = example_list[i][j] - list[i][j]
delta = abs(diff)
sign = 1 if diff > 0 else -1
er_element = (delta, sign, (i,j))
err_elements.append(er_element)
j+=1
i+=1
finally:
return err_elements
return err_elements
def neuron_at(self, coord):
'''coord = (groupNum, neuronNum)'''
return self.neuronGroups_list[coord[0]][coord[1]]
def get_neuron_group_ref(self, NgNum = 0):
return self.neuronGroups_list[NgNum]
def get_draw_scheme(self):
draw_scheme = []
for group in self.neuronGroups_list:
neuron_count = len(group)
sigh = group[0].output_sign
digital_out = group[0].digital_out
links = self.neuronGroups_inputs_link[self.neuronGroups_list.index(group)]
weights_count = group[0].weightsCount
draw_scheme.append((neuron_count, sigh, digital_out, links, weights_count))
return draw_scheme
def get_build_scheme(self):
build_scheme = []
for group in self.neuronGroups_list:
neuron_count = len(group)
sigh = True if group[0].output_sign == 1 else False
digital_out = group[0].digital_out
links = self.neuronGroups_inputs_link[self.neuronGroups_list.index(group)]
weights_count = group[0].weightsCount
func_num = group[0].funcNum
normal_coef = self.groupsNormalization_coeff[self.neuronGroups_list.index(group)]
build_scheme.append((neuron_count, sigh, func_num, digital_out, links, weights_count, normal_coef))
return build_scheme
class neuron_abstact():
'''funcNum: 0 - сумматор (если цифровой выход, то пороговый сумматор
1 - рациональная сигмоида
threshold порог срабатывания для цифрового выхода'''
def __init__(self, weights = [], generateWeightsCount = 0, positiveOutput = True, funcNum = 0,
digitalOut = True):
if generateWeightsCount > 0:
self.weights = []
self.set_random_weights(weights, generateWeightsCount)
else:
self.weights = weights
self.weightsCount = len(self.weights)
self.output_sign = 1 if positiveOutput else -1
self.digital_out = digitalOut
self.funcNum = funcNum
self.threshold = round(generateWeightsCount/2)
self.recurrent_mem = []
def set_random_weights(self, weights = [], weightsCount = 25,):
if len(weights) == 0:
self.weights = [(9.9 + x - x) / randint(1, 100) + 0.1 for x in range(weightsCount)]
else:
self.weights = weights.copy()
self.weightsCount = len(self.weights)
def set_funcNum(self,funcNum):
self.funcNum = funcNum
def set_weights(self, weights = []):
self.weights = weights
self.weightsCount = len(self.weights)
if self.funcNum == 1:
for weight in self.weights:
if weight == 0:
weights = self.get_random_weight(from_=0.1,to=10)
def get_weights(self):
return self.weights
def get_random_weight(self, from_ = 0, to = 10):
return (to - from_) / randint(1, 100) + from_
def spike(self, input = []):
if self.funcNum == 0: #linear sum
output = 0
i = 0
while i < self.weightsCount:
output += ( input[i] * self.weights[i] )
i+=1
if self.digital_out:
return 1 if output > self.threshold else 0
else:
return output * self.output_sign
elif self.funcNum == 1: #rational sig
output = 0
i = 0
while i < self.weightsCount:
abs_inp = abs(input[i])
output += abs_inp / ( abs_inp + abs(self.weights[i]) )
i+=1
if self.digital_out:
return 1 if output > self.threshold else 0
else:
return output * self.output_sign
elif self.funcNum == 2: #RelU
output = 0
i = 0
while i < self.weightsCount:
output += ( input[i] + self.weights[i] ) * self.weights[i] #закоментить, если черви сойдут с ума
i+=1
output = max([0, output])
if self.digital_out:
return 1 if output > self.threshold else 0
else:
return output * self.output_sign
else:
pass
def learning_spike(self, input = []):
'''output = [(x,y,z),] x - input*weight, y - output sigh, z - weight number'''
output = []
i = 0
if self.funcNum == 1:
while i < self.weightsCount:
abs_inp = abs(input[i])
output.append( ( abs_inp/(abs_inp + self.weights[i]), self.output_sign, i ) )
i+=1
else:
while i < self.weightsCount:
output.append( (input[i] * self.weights[i], self.output_sign, i) )
i+=1
return output
def adjust_weight(self, weightNum = 0, Nwa = 0 ):
if self.funcNum == 1:
self.weights[weightNum] -= Nwa * self.weights[weightNum]
if self.weights[weightNum] == 0:
self.weights[weightNum] = self.get_random_weight(from_=0.1,to=10)
else:
self.weights[weightNum] += Nwa * self.weights[weightNum]
| 34,289 | 10,508 |
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('', views.index, name='home'),
url(r'^artist/(?P<inputID>.*?)/$', views.showArtistPage, name='showArtistPage'),
url(r'^album/(?P<inputID>.*?)/$', views.showAlbumPage, name='showAlbumPage'),
url(r'^song/(?P<inputID>.*?)/$', views.showSongPage, name='showSongPage'),
url(r'^genre/(?P<inputID>.*?)/$', views.showGenrePage, name='showGenrePage'),
url(r'^search/$', views.search, name='search'),
url(r'^search/artist/$', views.searchArtistByName, name='searchArtistByName'),
url(r'^search/album/$', views.searchAlbumByName, name='searchAlbumByName'),
url(r'^search/song/$', views.searchSongByName, name='searchSongByName')
]
| 761 | 286 |
"""Table CLI formatter.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import yaml
from treadmill.formatter import tablefmt
def _sort(unsorted):
"""Sort list."""
unsorted.sort()
return '\n'.join(unsorted)
def _state(state):
"""Get status from instance."""
return state['Name']
def _name_from_tags(tags):
"""Get name from tags."""
for tag in tags:
if tag['Key'] == 'Name':
return tag['Value']
return None
def _fmt_tags():
"""Output formatter tags."""
def _fmt(items):
"""Format tags, discard cloudformation tags."""
filtered = [
item for item in items
if not item['Key'].startswith('aws:cloudformation:')
]
schema = [
('key', 'Key', None),
('value', 'Value', None),
]
return tablefmt.list_to_table(
filtered, schema, header=False, align=None
)
return _fmt
def _fmt_secgroups():
"""Output formatter security groups."""
def _fmt(items):
"""Format tags, discard cloudformation tags."""
schema = [
('name', 'GroupName', None),
('id', 'GroupId', None),
]
return tablefmt.list_to_table(
items, schema, header=False, align=None
)
return _fmt
def _fmt_list():
"""Output formatter list."""
def _fmt(items):
"""Format list."""
schema = [
('item', None, None),
]
return tablefmt.list_to_table(
items, schema, header=False, align=None
)
return _fmt
def _fmt_trusted_entities(policy):
def _statement_principals(statement):
entities = []
if (statement['Action'] == 'sts:AssumeRole' and
statement['Effect'] == 'Allow' and
'AWS' in statement['Principal']):
principals = statement['Principal']['AWS']
if isinstance(principals, str):
principals = [principals]
principals.sort()
for principal in principals:
parts = principal.split(':')
parts[5] = parts[5].replace('/', ':')
entities.append({'Entity': parts[5], 'Arn': principal})
return entities
def _statement_saml_providers(statement):
entities = []
if (statement['Action'] == 'sts:AssumeRoleWithSAML' and
statement['Effect'] == 'Allow'):
saml_providers = statement['Principal']['Federated']
if isinstance(saml_providers, str):
saml_providers = [saml_providers]
saml_providers.sort()
for saml_provider in saml_providers:
parts = saml_provider.split(':')
parts[5] = parts[5].replace('/', ':')
entities.append({'Entity': parts[5], 'Arn': saml_provider})
return entities
def _statement_services(statement):
entities = []
if (statement['Action'] == 'sts:AssumeRole' and
statement['Effect'] == 'Allow' and
'Service' in statement['Principal']):
services = statement['Principal']['Service']
if isinstance(services, str):
services = [services]
services.sort()
for service in services:
entities.append({'Entity': 'service:%s' % service,
'Arn': service})
return entities
# pylint: disable=R0912
def _trusted_entities(pol):
entities = []
for statement in pol['Statement']:
principals = _statement_principals(statement)
if principals:
for principal in principals:
entities.append(principal)
saml_providers = _statement_saml_providers(statement)
if saml_providers:
for saml_provider in saml_providers:
entities.append(saml_provider)
services = _statement_services(statement)
if services:
for service in services:
entities.append(service)
return entities
items = _trusted_entities(policy)
schema = [
('Entity', 'Entity', None),
('Arn', 'Arn', None)
]
return tablefmt.list_to_table(items, schema, header=False, align=None)
def _fmt_attached_policies(policies):
def _fpolicies(policies):
fpolicies = []
for policy in policies:
if policy['PolicyArn']. startswith('arn:aws:iam::aws:policy/'):
pn = policy['PolicyArn'].replace('arn:aws:iam::aws:policy/',
'')
fpolicies.append({
'Type': 'global',
'PolicyName': pn,
'PolicyArn': policy['PolicyArn']
})
else:
fpolicies.append({
'Type': 'local',
'PolicyName': policy['PolicyName'],
'PolicyArn': policy['PolicyArn']
})
return fpolicies
items = _fpolicies(policies)
schema = [
('Type', 'Type', None),
('PolicyName', 'PolicyName', None),
('PolicyArn', 'PolicyArn', None),
]
return tablefmt.list_to_table(items,
schema,
header=False,
align=None,
sortby='PolicyName')
def _fmt_policy_version(policy_version):
return yaml.dump(policy_version, default_flow_style=False, indent=4)
class SubnetPrettyFormatter:
"""Pretty table formatter for AWS subnets."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('id', 'SubnetId', None),
('state', 'State', None),
('zone', 'AvailabilityZone', None),
('cidr_block', 'CidrBlock', None),
('vpc', 'VpcId', None),
('tags', 'Tags', _fmt_tags()),
]
format_item = tablefmt.make_dict_to_table(schema)
format_list = tablefmt.make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class VpcPrettyFormatter:
"""Pretty table formatter for AWS vpcs."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('id', 'VpcId', None),
('default', 'IsDefault', None),
('state', 'State', None),
('cidr_block', 'CidrBlock', None),
('tags', 'Tags', _fmt_tags()),
]
format_item = tablefmt.make_dict_to_table(schema)
format_list = tablefmt.make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class InstancePrettyFormatter:
"""Pretty table formatter for AWS instances."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
item_schema = [
('hostname', 'Tags', _name_from_tags),
('id', 'InstanceId', None),
('arch', 'Architecture', None),
('image', 'ImageId', None),
('type', 'InstanceType', None),
('key', 'KeyName', None),
('launch', 'LaunchTime', None),
('state', 'State', _state),
('vpc', 'VpcId', None),
('subnet', 'SubnetId', None),
('secgroups', 'SecurityGroups', _fmt_secgroups()),
('tags', 'Tags', _fmt_tags()),
]
list_schema = [
('hostname', 'Tags', _name_from_tags),
('id', 'InstanceId', None),
('image', 'ImageId', None),
('type', 'InstanceType', None),
('key', 'KeyName', None),
('vpc', 'VpcId', None),
('subnet', 'SubnetId', None),
('tags', 'Tags', _fmt_tags()),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class SpotPrettyFormatter:
"""Pretty table formatter for Spot Instance Requests."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
item_schema = [
('id', 'id', None),
('status', 'state', None),
('code', 'status_code', None),
('changed', 'status_timestamp', None),
('zone', 'az', None),
('subnet', 'subnet', None),
('type', 'instance_type', None),
('instance_id', 'instance_id', None),
('ami_id', 'ami_id', None),
('hostname', 'hostname', None),
('launch', 'instance_launch', None),
('state', 'instance_status', None),
('duration', 'duration', None),
]
list_schema = item_schema
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class IamRolePrettyFormatter:
"""Pretty table formatter for AWS IAM roles."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('RoleName', 'RoleName', None),
('Arn', 'Arn', None),
('MaxSessionDuration', 'MaxSessionDuration', None),
('CreateDate', 'CreateDate', None),
]
item_schema = [
('RoleName', 'RoleName', None),
('Path', 'Path', None),
('Arn', 'Arn', None),
('MaxSessionDuration', 'MaxSessionDuration', None),
('CreateDate', 'CreateDate', None),
('RoleId', 'RoleId', None),
('TrustedEntities',
'AssumeRolePolicyDocument',
_fmt_trusted_entities),
('InlinePolicies', 'RolePolicies', None),
('AttachedPolicies', 'AttachedPolicies', _fmt_attached_policies),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class IamPolicyPrettyFormatter:
"""Pretty table formatter for AWS IAM policies."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('AttachmentCount', 'DefaultVersionId', None),
('DefaultVersionId', 'DefaultVersionId', None),
('Arn', 'Arn', None),
('MaxSessionDuration', 'MaxSessionDuration', None),
('CreateDate', 'CreateDate', None),
]
item_schema = [
('Arn', 'Arn', None),
('PolicyName', 'PolicyName', None),
('Path', 'Path', None),
('DefaultVersionId', 'DefaultVersionId', None),
('IsAttachable', 'IsAttachable', None),
('AttachmentCount', 'AttachmentCount', None),
('Description', 'Description', None),
('CreateDate', 'CreateDate', None),
('UpdateDate', 'UpdateDate', None),
('PolicyVersion', 'PolicyVersion', _fmt_policy_version)
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class SnapshotPrettyFormatter:
"""Pretty table formatter for AWS snaphots."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('Name', 'Tags', _name_from_tags),
('SnapshotId', 'SnapshotId', None),
('VolumeId', 'VolumeId', None),
('State', 'State', None),
('Progress', 'Progress', None),
('VolumeSize', 'VolumeSize', None),
('StartTime', 'StartTime', None),
('Description', 'Description', None),
]
item_schema = [
('Name', 'Tags', _name_from_tags),
('Description', 'Description', None),
('SnapshotId', 'SnapshotId', None),
('VolumeId', 'VolumeId', None),
('State', 'State', None),
('Progress', 'Progress', None),
('VolumeSize', 'VolumeSize', None),
('StartTime', 'StartTime', None),
('Encrypted', 'Encrypted', None),
('KmsKeyId', 'KmsKeyId', None),
('tags', 'Tags', _fmt_tags()),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class ImagePrettyFormatter:
"""Pretty table formatter for AWS images."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('id', 'ImageId', None),
('name', 'Name', None),
('owner', 'OwnerId', None),
('created', 'CreationDate', None),
('public', 'Public', lambda v: 'yes' if v else 'no'),
('state', 'State', None),
]
item_schema = list_schema + [
('tags', 'Tags', _fmt_tags()),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class SecgroupPrettyFormatter:
"""Pretty table formatter for AWS security groups."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('id', 'GroupId', None),
('owner', 'OwnerId', None),
('vpc', 'VpcId', None),
('tags', 'Tags', _fmt_tags()),
]
# TODO: add ip ingress/egress permissions to the output.
item_schema = [
('id', 'GroupId', None),
('owner', 'OwnerId', None),
('vpc', 'VpcId', None),
('tags', 'Tags', _fmt_tags()),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class IpaUserPrettyFormatter:
"""Pretty table formatter for AWS user."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('username', 'uid', lambda _: _[0]),
]
item_schema = [
('username', 'uid', lambda _: _[0]),
('class', 'userclass', lambda _: _[0]),
('groups', 'memberof_group', _sort),
('indirect-groups', 'memberofindirect_group', '\n'.join),
('hbac-rule', 'memberofindirect_hbacrule', '\n'.join),
('sudo-rule', 'memberofindirect_sudorule', '\n'.join),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class IamUserPrettyFormatter:
"""Pretty table formatter for AWS users."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
list_schema = [
('UserName', 'UserName', None),
('Arn', 'Arn', None),
]
item_schema = [
('UserName', 'UserName', None),
('Path', 'Path', None),
('Arn', 'Arn', None),
('CreateDate', 'CreateDate', None),
('UserId', 'UserId', None),
('InlinePolicies', 'UserPolicies', None),
('AttachedPolicies', 'AttachedPolicies', _fmt_attached_policies),
]
format_item = tablefmt.make_dict_to_table(item_schema)
format_list = tablefmt.make_list_to_table(list_schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class CellDataFormatter:
"""Pretty table formatter for cell data."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('aws_account', 'aws_account', None),
('aws_admin', 'aws_admin', None),
('aws_region', 'aws_region', None),
('docker-registries', 'docker_registries', ','.join),
('disk-size', 'disk_size', None),
('hostgroups', 'hostgroups', ','.join),
('image', 'image', None),
('image-accounts', 'image_accounts', ','.join),
('instance-profile', 'instance_profile', None),
('realm', 'realm', None),
('secgroup', 'secgroup', None),
('size', 'size', None),
('sns-topic', 'sns_topic', None),
('subnets', 'subnets', ','.join),
('s3_registry_bucket', 's3_registry_bucket', None),
('tls_certs', 'tls_certs', None),
]
format_item = tablefmt.make_dict_to_table(schema)
format_list = tablefmt.make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class PartDataFormatter:
"""Pretty table formatter for partition data."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('autoscale', 'autoscale', None),
('image', 'image', None),
('image-accounts', 'image_accounts', ','.join),
('instance-types', 'instance_types', ','.join),
('spot-instance-types', 'spot_instance_types', ','.join),
('spot-duration', 'spot_duration', None),
('disk-size', 'disk_size', None),
('hostgroups', 'hostgroups', ','.join),
('secgroup', 'secgroup', None),
('instance-profile', 'instance_profile', None),
('subnets', 'subnets', ','.join),
('s3_registry_bucket', 's3_registry_bucket', None),
]
format_item = tablefmt.make_dict_to_table(schema)
format_list = tablefmt.make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
| 19,030 | 5,422 |
r=""
for _ in range(int(input())):
s=input();tn=s[-2]+s[-1]
if tn=='ne':
r+=s[:-2]+'anes\n'
else:
t=s[-1]
if t=='a' or t=='o' or t=='u':
r+=s+'s\n'
elif t=='i' or t=='y':
r+=s[:-1]+'ios\n'
elif t=='l' or t=='r' or t=='v':
r+=s+'es\n'
elif t=='n':
r+=s[:-1]+'anes\n'
elif t=='t' or t=='w':
r+=s+'as\n'
else:
r+=s+'us\n'
print(r,end="")
| 483 | 222 |
from __future__ import print_function
"""Updates database json representation
"""
import argparse
import itertools
import logging
import relstorage.adapters.postgresql
import relstorage.options
import sys
from . import pg_connection
from . import follow
from .jsonpickle import Jsonifier
from ._adapter import DELETE_TRIGGER
from ._util import closing, table_exists, trigger_exists
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('connection_string', help='Postgresql connection string')
parser.add_argument('-t', '--poll-timeout', type=int, default=300,
help='Change-poll timeout, in seconds')
parser.add_argument('-m', '--transaction-size-limit', type=int, default=100000,
help='Transaction size limit (aproximate)')
parser.add_argument(
'-l', '--logging-configuration', default='info',
help='Logging configuration file path, or a logging level name')
parser.add_argument(
'-d', '--driver', default='auto',
help='Provide an explicit Postgres driver name (e.g. psycopg2)')
parser.add_argument(
'-T', '--remove-delete-trigger', action="store_true",
help="""\
Remove the Newt DB delete trigger, if it exists.
The Newt DB delete trigger is incompatible with the updater. It can cause
deadlock errors is packed while the updater is running.
""")
gc_sql = """
delete from newt n where not exists (
select from object_state s where n.zoid = s.zoid)
"""
parser.add_argument(
'-g', '--gc-only', action="store_true",
help="""\
Collect garbage and exit.
This removes Newt DB records that don't have corresponding database records.
This is done by executing:
%s
Note that garbage collection is normally performed on startup unless
the -G option is used.
""" % gc_sql)
parser.add_argument(
'-G', '--no-gc', action="store_true",
help="Don't perform garbage collection on startup.")
parser.add_argument(
'--compute-missing', action='store_true',
help="""\
Compute missing newt records.
Rather than processing new records, process records written up through
the current time and stop. Only missing records are updated. This
option requires PostgreSQL 9.5.
This is used to compute newt records after adding Newt DB to an existing
PostgreSQL RelStorage application.
""")
parser.add_argument(
'--nagios',
help="""\
Check the status of the updater.
The status is checked by checking the updater lag, which is the
difference between the last transaction committed to the database, and
the last transaction processed by the updater. The option takes 2
numbers, separated by commas. The first number is the lag, in
seconds, for the updater to be considered to be OK. The second number
is the maximum lag for which the updater isn't considered to be in
error. For example, 1,99 indicates OK if 1 or less, WARNING if more
than 1 and less than or equal to 99 and ERROR of more than 99 seconds.
""")
parser.add_argument(
'-x', '--transform',
help = """\
The dotted name of a function (or callable object) to
transform generated JSON data. This provides a way to control
how your JSON data are generated and also provides a mechanism
for ignoring some objects. See the Newt DB transform option.
""")
def _update_newt(conn, cursor, jsonifier, Binary, batch):
ex = cursor.execute
mogrify = cursor.mogrify
tid = None
while True:
data = list(itertools.islice(batch, 0, 100))
if not data:
break
tid = data[-1][0]
# Delete any existing records for the values. 2 reasons:
# a) Make sire that new invalid data removes old valid data, and
# b) Don't depend on upsert.
ex("delete from newt where zoid = any(%s)", ([d[1] for d in data], ))
# Convert, filtering out null conversions (uninteresting classes)
to_save = []
for tid, zoid, state in data:
class_name, ghost_pickle, state = jsonifier((tid, zoid), state)
if state is not None:
to_save.append((zoid, class_name, Binary(ghost_pickle), state))
if to_save:
ex("insert into newt (zoid, class_name, ghost_pickle, state)"
" values " +
', '.join(mogrify('(%s, %s, %s, %s)', d).decode('ascii')
for d in to_save)
)
if tid is not None:
follow.set_progress_tid(conn, __name__, tid)
conn.commit()
def _compute_missing(conn, cursor, jsonifier, Binary, batch):
ex = cursor.execute
mogrify = cursor.mogrify
tid = None
while True:
data = list(itertools.islice(batch, 0, 100))
if not data:
break
tid = data[-1][0]
# Convert, filtering out null conversions (uninteresting classes)
to_save = []
for tid, zoid, state in data:
class_name, ghost_pickle, state = jsonifier((tid, zoid), state)
if state is not None:
to_save.append((zoid, class_name, Binary(ghost_pickle), state))
if to_save:
ex("insert into newt (zoid, class_name, ghost_pickle, state)"
" values %s on conflict do nothing" %
', '.join(mogrify('(%s, %s, %s, %s)', d).decode('ascii')
for d in to_save)
)
conn.commit()
logging_levels = 'DEBUG INFO WARNING ERROR CRITICAL'.split()
def main(args=None):
options = parser.parse_args(args)
if options.logging_configuration.upper() in logging_levels:
logging.basicConfig(level=options.logging_configuration.upper())
else:
with open(options.logging_configuration) as f:
from ZConfig import configureLoggers
configureLoggers(f.read())
transform = options.transform
if transform is not None:
from .component import global_by_name
transform = global_by_name(transform)
jsonifier = Jsonifier(transform=transform)
driver = relstorage.adapters.postgresql.select_driver(
relstorage.options.Options(driver=options.driver))
Binary = driver.Binary
dsn = options.connection_string
with closing(pg_connection(dsn)) as conn:
with closing(conn.cursor()) as cursor:
if options.nagios:
if not table_exists(cursor, 'newt_follow_progress'):
print("Updater has not run")
return 2
cursor.execute("select max(tid) from object_state")
[[stid]] = cursor
utid = follow.get_progress_tid(conn, __name__)
if stid is None:
if utid == -1:
print("No transactions")
return 0
else:
print("Updater saw data but there was None")
return 2
elif utid < 0:
print("Updater hasn't done anything")
return 2
else:
from ZODB.utils import p64
from ZODB.TimeStamp import TimeStamp
lag = (TimeStamp(p64(stid)).timeTime() -
TimeStamp(p64(utid)).timeTime())
if lag < 0:
print("Updater is ahead")
return 2
warn, error = map(int, options.nagios.split(','))
flag = lambda : ("%99.3f" % lag).strip()
if lag > error:
print("Updater is too far behind | %s" % flag())
return 2
elif lag > warn:
print("Updater is behind | %s" % flag())
return 1
else:
print("OK | %s" % flag())
return 0
compute_missing = options.compute_missing
if (compute_missing and
not table_exists(cursor, follow.PROGRESS_TABLE)
):
if not table_exists(cursor, 'newt'):
raise AssertionError("newt table doesn't exist")
cursor.execute("select max(tid) from object_state")
[[tid]] = cursor
else:
tid = follow.get_progress_tid(conn, __name__)
if tid < 0 and not table_exists(cursor, 'newt'):
from ._adapter import _newt_ddl
cursor.execute(_newt_ddl)
elif trigger_exists(cursor, DELETE_TRIGGER):
if options.remove_delete_trigger:
cursor.execute("drop trigger %s on object_state" %
DELETE_TRIGGER)
else:
logger.error(
"The Newt DB delete trigger exists.\n"
"It is incompatible with the updater.\n"
"Use -T to remove it.")
return 1
if not options.no_gc:
cursor.execute(gc_sql)
conn.commit()
if options.gc_only:
if options.no_gc:
logger.warn(
"Exiting after garbage collection,\n"
"but garbage collection was suppressed.")
return 0
if options.compute_missing:
start_tid = -1
end_tid = tid
logger.info("Compute_missing through %s", tid)
process = _compute_missing
else:
logger.info("Starting updater at %s", tid)
start_tid = tid
end_tid = None
process = _update_newt
for batch in follow.updates(
dsn,
start_tid=start_tid,
end_tid=end_tid,
batch_limit=options.transaction_size_limit,
poll_timeout=options.poll_timeout,
):
process(conn, cursor, jsonifier, Binary, batch)
if __name__ == '__main__':
sys.exit(main())
| 10,156 | 2,782 |
import string
ALPHABET = set(string.ascii_lowercase)
def is_pangram(sentence):
return ALPHABET <= set(sentence.lower()) | 124 | 47 |
class ShardConfigException(ValueError):
pass | 48 | 13 |
#!/usr/bin/env python
"""Merge multiple evaluation files into one with prefixed measure names
If directories are given, and --out-dir, will group by filename.
Example usage:
./scripts/merge_evaluations.py --label-re='[^/]+/?$' -x eval_merged -l =TEDL2015_neleval-no1331 --out-dir /tmp/foobar tac15data/TEDL2015_neleval-no1331 $(find tac15data/TEDL2015_neleval-no1331/00filtered/ -type d )
"""
from __future__ import print_function
import argparse
import os
import glob
import collections
import sys
import re
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument('-o', '--out-dir', default=None)
ap.add_argument('-x', '--out-extension', default=None)
ap.add_argument('-l', '--label', dest='labels', action='append',
type=lambda s: s.split('=', 1))
ap.add_argument('-r', '--label-re', default=None, type=re.compile)
ap.add_argument('--fmt', default='{label}/{{}}')
ap.add_argument('paths', nargs='+')
args = ap.parse_args()
def _swap_ext(name, new_ext):
if new_ext is None:
return name
name, ext = os.path.splitext(name)
return name + '.' + new_ext
nonexist = [path for path in args.paths if not os.path.exists(path)]
if nonexist:
ap.error('Paths do not exist: %r' % nonexist)
is_dir = [os.path.isdir(path) for path in args.paths]
if all(is_dir):
if args.out_dir is None:
ap.error('Must specify --out-dir in path mode')
input_paths = collections.defaultdict(list)
for dir_path in args.paths:
for path in glob.glob(os.path.join(dir_path, '*.evaluation')):
input_paths[os.path.basename(path)].append(path)
outputs = {name: os.path.join(args.out_dir,
_swap_ext(name, args.out_extension))
for name in input_paths}
elif not any(is_dir):
if args.out_dir is not None or args.out_extension is not None:
ap.error('--out-dir and --out-extension not used in files mode; output is STDOUT')
input_paths = {'all': args.paths}
outputs = {'all': sys.stdout}
else:
ap.error('Got mixture of directories (e.g. %r) and files (e.g. %r)' % (args.paths[is_dir.index(True)], args.paths[is_dir.index(False)]))
seen_labels = set()
labels = {src: dst for dst, src in args.labels or []}
def get_label(path):
name = os.path.dirname(path)
if args.label_re:
match = args.label_re.search(name)
if match is not None:
name = match.group()
seen_labels.add(name)
return labels.get(name, name)
for name in input_paths:
fout = outputs[name]
if not hasattr(fout, 'read'):
opened = True
fout = open(fout, 'w')
else:
opened = False
print('Processing', name, 'to', fout.name, file=sys.stderr)
for i, path in enumerate(input_paths[name]):
label = get_label(path)
if label:
fmt = args.fmt.format(label=label)
else:
fmt = '{}'
fmt = '{{}}\t{}'.format(fmt)
with open(path) as fin:
fin = iter(fin)
try:
header = next(fin)
except StopIteration:
print('Found empty file at', path, file=sys.stderr)
if i == 0:
fout.write(header)
for l in fin:
l, measure = l.rstrip('\n\r').rsplit('\t', 1)
print(fmt.format(l, measure), file=fout)
if opened:
fout.close()
unseen_labels = set(labels) - seen_labels
if unseen_labels:
print('WARNING: did not see labels %r' % sorted(unseen_labels), file=sys.stderr)
| 3,538 | 1,217 |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import theano.tensor as T
from .model_basis import ModelBasis
from .model_record import Record
from ..layers import layers
class Model(ModelBasis):
def __init__(self, model_config, rng=None):
super(Model, self).__init__(model_config, rng)
self.set_configs(model_config)
self.layers['feat'] = []
self.layers['feat_fc'] = []
self.layers['reg_loc'] = []
self.layers['reg_mos'] = []
print('\nBIECON base model')
print(' - Model file: %s' % (os.path.split(__file__)[1]))
self.init_model()
def set_configs(self, model_config):
self.set_opt_configs(model_config)
self.wl_loc = float(model_config.get('wl_loc', 1e2))
self.wl_mos = float(model_config.get('wl_mos', 1e2))
self.wr_l2 = float(model_config.get('wr_l2', 1e-4))
self.dropout = model_config.get('use_dropout', False)
self.update_wrt_loc = model_config.get(
'update_wrt_loc', ['feat', 'feat_fc', 'reg_loc'])
self.update_wrt_iqa = model_config.get(
'update_wrt_iqa', ['feat', 'feat_fc', 'reg_mos'])
def init_model(self):
print(' - Feature conv layers')
cur_key = 'feat'
self.layers[cur_key] = []
# Conv. layers
self.layers[cur_key].append(layers.ConvLayer(
input_shape=self.get_input_shape(),
num_filts=64,
filt_size=(5, 5),
layer_name=cur_key + '/conv1',
activation=layers.relu,
))
self.layers[cur_key].append(layers.Pool2DLayer(
input_shape=self.get_out_shape(cur_key),
pool_size=(2, 2), mode='max'))
self.layers[cur_key].append(layers.ConvLayer(
input_shape=self.get_out_shape(cur_key),
num_filts=64,
filt_size=(5, 5),
layer_name=cur_key + '/conv2',
activation=layers.relu,
))
self.layers[cur_key].append(layers.Pool2DLayer(
input_shape=self.get_out_shape(cur_key),
pool_size=(2, 2), mode='max'))
# Reshaping layer
self.layers[cur_key].append(
layers.TensorToVectorLayer(self.get_out_shape(cur_key)))
# Fully connected layers
cur_key = 'feat_fc'
self.layers[cur_key] = []
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape('feat'),
n_out=1024,
layer_name=cur_key + '/fc1',
activation=layers.relu,
))
if self.dropout:
self.layers[cur_key].append(layers.DropoutLayer(p=0.5))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape(cur_key),
n_out=512,
layer_name=cur_key + '/fc2',
activation=layers.relu,
))
if self.dropout:
self.layers[cur_key].append(layers.DropoutLayer(p=0.5))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape(cur_key),
n_out=256,
layer_name=cur_key + '/fc3',
activation=layers.relu,
))
if self.dropout:
self.layers[cur_key].append(layers.DropoutLayer(p=0.5))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape(cur_key),
n_out=128,
layer_name=cur_key + '/fc4',
activation=layers.relu,
))
#######################################################################
print(' - Regression metric layers')
cur_key = 'reg_loc'
self.layers[cur_key] = []
if self.dropout:
self.layers[cur_key].append(layers.DropoutLayer(p=0.5))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape('feat_fc'),
n_out=128,
layer_name=cur_key + '/fc1',
activation=layers.relu,
))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape('feat_fc'),
n_out=1,
layer_name=cur_key + '/fc2',
b_init=np.ones((1,), dtype='float32') * 0.5,
))
#######################################################################
print(' - Regression mos layers')
cur_key = 'reg_mos'
self.layers[cur_key] = []
if self.dropout:
self.layers[cur_key].append(layers.DropoutLayer(p=0.5))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape('feat_fc'),
n_out=128,
layer_name=cur_key + '/fc1',
activation=layers.relu,
))
self.layers[cur_key].append(layers.FCLayer(
n_in=self.get_out_shape(cur_key),
n_out=1,
layer_name=cur_key + '/fc2',
b_init=np.ones((1,), dtype='float32') * 0.5,
))
#######################################################################
super(Model, self).make_param_list()
super(Model, self).show_num_params()
def aggregation_fn(self, feat_vec):
feat_avg = T.mean(feat_vec, axis=0, keepdims=True)
return feat_avg
# feat_std = T.std(feat_vec, axis=0, keepdims=True)
# return T.concatenate([feat_avg, feat_std], axis=1)
def feat_fn(self, x):
out = self.get_key_layers_output(x, 'feat')
return self.get_key_layers_output(out, 'feat_fc')
def regress_loc_fn(self, feat_vec):
return self.get_key_layers_output(feat_vec, 'reg_loc')
def regress_mos_fn(self, feat_vec):
return self.get_key_layers_output(feat_vec, 'reg_mos')
def cost_reg_loc(self, x_c, met_s, n_img=None, bat2img_idx_set=None):
"""Get cost: regression onto local metroc scores
"""
records = Record()
# concatenate the image patches
if bat2img_idx_set:
# if dummy data with fixed size is given and current data is
# overwritten on dummy data with size of n_patches,
# pick current dataset with size of n_patches
n_patches = bat2img_idx_set[n_img - 1][1]
x_c_set = x_c[:n_patches]
met_s_set = met_s[:n_patches]
else:
# if input is current data
x_c_set = x_c
met_s_set = met_s
######################################################################
x_c_im = self.image_vec_to_tensor(x_c_set)
met_s_im = self.image_vec_to_tensor(met_s_set)
feat_vec = self.feat_fn(x_c_im)
met_s_p = self.regress_loc_fn(feat_vec).flatten()
met_s_mean = T.mean(met_s_set, axis=[1, 2, 3])
loc_cost = self.get_cost_mse_mae(met_s_mean, met_s_p)
# regularization
l2_reg = self.get_l2_regularization(
['feat', 'feat_fc', 'reg_loc'], mode='sum')
cost = self.add_all_losses_with_weight(
[loc_cost, l2_reg],
[self.wl_loc, self.wr_l2])
# Parameters to record
records.add_data('loc_mse', self.wl_loc * loc_cost)
records.add_data('l2_reg', self.wr_l2 * l2_reg)
# records.add_im_data('met_s_p', met_s_p_set)
# records.add_im_data('met_s', met_s_set)
records.add_imgs('x_c', x_c_im, caxis=[-0.25, 0.25])
if bat2img_idx_set:
def score_to_img(score, repeat=1):
tmp = score.dimshuffle(0, 'x', 'x', 'x')
tmp = T.extra_ops.repeat(tmp, repeat, axis=2)
return T.extra_ops.repeat(tmp, repeat, axis=3)
met_s_img = score_to_img(met_s_mean, 10)
records.add_imgs('met_s', met_s_img, caxis='auto')
met_s_p_img = score_to_img(met_s_p, 10)
records.add_imgs('met_s_p', met_s_p_img, caxis='auto')
return cost, records
def cost_updates_reg_loc(self, x_c, met_s,
n_img=None, bat2img_idx_set=None):
cost, records = self.cost_reg_loc(
x_c, met_s, n_img=n_img, bat2img_idx_set=bat2img_idx_set)
updates = self.get_updates_keys(cost, self.update_wrt_loc)
return cost, updates, records
def cost_nr_iqa(self, x_c, mos, n_img=None, bat2img_idx_set=None):
records = Record()
# concatenate the image patches
if bat2img_idx_set:
# if dummy data with fixed size is given and current data is
# overwritten on dummy data with size of n_patches,
# pick current dataset with size of n_patches
n_patches = bat2img_idx_set[n_img - 1][1]
x_c_set = x_c[:n_patches]
else:
# if input is current data
x_c_set = x_c
######################################################################
x_c_im = self.image_vec_to_tensor(x_c_set)
# x_c_im = normalize_lowpass_subt(x_c_im, 3)
feat_vec = self.feat_fn(x_c_im)
# get feature vector and concatenate the mos_p set
if bat2img_idx_set:
# if patch based
aggr_feat_list = []
for idx in range(n_img):
idx_from = bat2img_idx_set[idx][0]
idx_to = bat2img_idx_set[idx][1]
cur_feat_vec = feat_vec[idx_from: idx_to]
cur_aggr_feat = self.aggregation_fn(cur_feat_vec)
aggr_feat_list.append(cur_aggr_feat)
aggr_feat = T.concatenate(aggr_feat_list, axis=0).flatten(2)
# aggr_feat = T.stack(aggr_feat_list).flatten()
else:
# aggr_feat = self.regress_mos_fn(feat_vec).flatten()
raise NotImplementedError
######################################################################
# regress onto MOS
mos_p = self.regress_mos_fn(aggr_feat).flatten()
# MOS loss
subj_loss = self.get_cost_mse_mae(mos, mos_p)
# L2 regularization
l2_reg = self.get_l2_regularization(
['feat', 'feat_fc', 'reg_mos'], mode='sum')
cost = self.add_all_losses_with_weight(
[subj_loss, l2_reg],
[self.wl_mos, self.wr_l2])
# Parameters to record
records.add_data('subj', self.wl_mos * subj_loss)
records.add_data('l2_reg', self.wr_l2 * l2_reg)
records.add_im_data('mos_p', mos_p)
records.add_im_data('mos_gt', mos)
records.add_imgs('x_c', x_c_im, caxis=[-0.25, 0.25])
return cost, records
def cost_updates_nr_iqa(self, x_c, mos, n_img=None, bat2img_idx_set=None):
cost, records = self.cost_nr_iqa(
x_c, mos, n_img=n_img, bat2img_idx_set=bat2img_idx_set)
updates = self.get_updates_keys(cost, self.update_wrt_iqa)
return cost, updates, records
def set_training_mode(self, training):
# Decide behaviors of the model during training
# Dropout
self.set_dropout_on(training)
| 11,276 | 4,095 |
import csv
import os
import json
from string import join
from scrapy import log
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class Volt220 (BaseSpider):
base_url = "http://www.220-volt.ru/"
name = '220volt.ru'
allowed_domains = ['220-volt.ru']
start_urls = [base_url]
def start_requests(self):
with open(os.path.join(HERE, 'bosh_products.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
sku = row['sku'].strip()
name = row['name'].strip()#.replace("(","").replace(")","")
url = 'http://www.220-volt.ru/selection/do/'
yield FormRequest(url, method='POST',formdata={'pattern': name}, meta={'sku': sku, 'name': name})
def parse(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select(u'//div[contains(@class,"goodContainerInner")]')
selectionWarning = hxs.select(u'//div[@class="selectionWarning"]')
if selectionWarning:
return None
# categories
# if only on product is found
if len(products) == 1:
for product in products:
url = join(product.select(u'div[contains(@class,"goodTitle")]/p/a/@href').extract())
if len(url) != 0:
url = urljoin_rfc(self.base_url, url)
name = join(product.select(u'div[contains(@class,"goodTitle")]/p/a/text()').extract())
price = join(product.select(u'div[contains(@class,"priceAndBuyConyainer")]/div[contains(@class,"priceContainer")]/div[contains(@class,"iPrice")]/text()').extract()).replace(" ","")
#log.msg(price, log.DEBUG)
product_loader = ProductLoader(item=Product(), selector=product)
product_loader.add_value('name', name)
product_loader.add_value('url', url)
product_loader.add_value('price', price)
product_loader.add_value('sku', response.meta["sku"])
return product_loader.load_item()
# if more than one product is found
if len(products) > 1:
product_match = {}
full_product_list = {}
#iterate through all search results
for product in products:
url = join(product.select(u'div[contains(@class,"goodTitle")]/p/a/@href').extract())
if len(url) != 0:
url = urljoin_rfc(self.base_url, url)
name = join(product.select(u'div[contains(@class,"goodTitle")]/p/a/text()').extract())
price = join(product.select(u'div[contains(@class,"priceAndBuyConyainer")]/div[contains(@class,"priceContainer")]/div[contains(@class,"iPrice")]/text()').extract()).replace(" ","")
name_arr = name.split(" ")
csv_name_arr = response.meta["name"].split(" ")
words_match = 0.0
# count the matching words from the search result in the products from the CSV
for word in name_arr:
for csv_word in csv_name_arr:
if word.lower() == csv_word.lower() and word.strip()!= "":
words_match = words_match+1
#log.msg(word+" in "+response.meta["name"], log.DEBUG)
# continue if less words match that the number of words in the CSV name
if words_match<len(csv_name_arr):
continue
product_match[name] = words_match/(len(name_arr)+len(name))
#log.msg(name+" : "+str(words_match)+" : "+str((len(name_arr)+len(name))), log.DEBUG)
full_product_list[name] = {'url': url, 'price': price}
# the search result with the highest word matches per length and words is selected as the best choice
for key, value in sorted(product_match.iteritems(), reverse=True, key=lambda (k,v): (v,k)):
product_loader = ProductLoader(item=Product(), selector=products)
product_loader.add_value('name', key)
product_loader.add_value('url', full_product_list[key]['url'])
product_loader.add_value('price', full_product_list[key]['price'])
product_loader.add_value('sku', response.meta["sku"])
return product_loader.load_item()
break
| 3,964 | 1,474 |
import streamlit as st
title = st.text_input("Search:", "")
st.write("You search for: ", title)
| 97 | 33 |
#!/usr/bin/env python
# WS client example to test server
import asyncio
import websockets
import json
import time
async def hello():
uri = "ws://localhost:8001"
async with websockets.connect(uri) as websocket:
inp = input("Input msg number? ")
obj = {
"source" : "sim",
"msgNum" : inp,
"msgType" : "telemetry",
"timestamp" : time.strftime("%Y-%m-%d %H:%M.%S"),
"cardDist" : [6.0,7.0,8.0,9.0],
"depth" : 10.0,
"alt" : 11.0,
"assetDistances" :
{
"cage" : 12.0,
"tree1" : 13.0,
"tree2" : 14.0
}
}
msg = json.dumps(obj)
await websocket.send(msg)
print(f"> {msg}")
resp = await websocket.recv()
print(f"< {resp}")
asyncio.get_event_loop().run_until_complete(hello()) | 908 | 321 |
#NOT YET WORKING PROPERLY
#NOT INTEGRATED WITH THE ASSISTANT YET
from pytrends.request import TrendReq
# Only need to run this once, the rest of requests will use the same session.
pytrend = TrendReq()
def trending_searches():
# Get Google Hot Trends data
trending_searches = pytrend.trending_searches()
print(trending_searches.head())
def todays_trends():
# Get Google Today's Trend data
today_searches = pytrend.today_searches()
print(today_searches.head())
def top_charts():
# Get Google Top Charts
top_charts = pytrend.top_charts(2018, hl='en-US', tz=300, geo='GLOBAL')
print(top_charts.head())
def keyword_suggestions():
# Get Google Keyword Suggestions
kw=input("please enter the keyword you want to search:")
suggestions_dict = pytrend.suggestions(keyword=kw)
if suggestions_dict==[]:
print("No suggestions found for the keyword: " + kw)
else:
print(suggestions_dict)
def console_trends():
print("Available Google Trends Research Options :\n1. Trending Searches\n2. Today's Trends\n3. Top Charts\n4. Keyword Suggestions\n\n type Exit to leave the trends Research console")
while True:
choice = input("\n\nPlease enter your choice: ")
if choice == "1" or choice == "Trending Searches": trending_searches()
elif choice == "2" or choice == "Today's Trends": todays_trends()
elif choice == "3" or choice == "Top Charts":
top_charts()
elif choice == "4" or choice == "Keyword Suggestions": keyword_suggestions()
elif choice == "Exit" or choice == "exit" or choice == "quit": break
else:
print("\n\nInvalid choice, please try again, Dumbass!")
continue
if __name__ == '__main__':
console_trends() | 1,834 | 612 |
import os
import asyncio
import re
import requests
import time
import lottie
import PIL.ImageOps
from os.path import basename
from PIL import Image
from typing import Optional
from .. import LOGS
from ..config import Config
from ..utils.extras import edit_or_reply as eor
from .progress import *
from .runner import runcmd
dwlpath = Config.TMP_DOWNLOAD_DIRECTORY
# convertions are done here...
# make a image
async def convert_to_image(event, bot):
speedo = await event.get_reply_message()
if not (
speedo.gif
or speedo.audio
or speedo.voice
or speedo.video
or speedo.video_note
or speedo.photo
or speedo.sticker
or speedo.media
):
await eor(event, "`Format Not Supported.`")
return
else:
try:
c_time = time.time()
downloaded_file_name = await bot.download_media(
speedo.media,
dwlpath,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, event, c_time, "`Downloading...`")
),
)
except Exception as e: # pylint:disable=C0103,W0703
await eor(event, str(e))
else:
await eor(event,
"Downloaded to `{}` successfully.".format(downloaded_file_name)
)
if not os.path.exists(downloaded_file_name):
await eor(event, "Download Unsucessfull :(")
return
if speedo and speedo.photo:
speedo_final = downloaded_file_name
elif speedo.sticker and speedo.sticker.mime_type == "application/x-tgsticker":
rpath = downloaded_file_name
image_name20 = os.path.join(dwlpath, "omk.png")
cmd = f"lottie_convert.py --frame 0 -if lottie -of png {downloaded_file_name} {image_name20}"
stdout, stderr = (await runcmd(cmd))[:2]
os.remove(rpath)
speedo_final = image_name20
elif speedo.sticker and speedo.sticker.mime_type == "image/webp":
pathofsticker2 = downloaded_file_name
image_new_path = dwlpath + "image.png"
im = Image.open(pathofsticker2)
im.save(image_new_path, "PNG")
if not os.path.exists(image_new_path):
await eor(event, "`Unable To Fetch Shot.`")
return
speedo_final = image_new_path
elif speedo.audio:
omk_p = downloaded_file_name
hmmyes = dwlpath + "semx.mp3"
imgpath = dwlpath + "semxy.jpg"
os.rename(omk_p, hmmyes)
await runcmd(f"ffmpeg -i {hmmyes} -filter:v scale=500:500 -an {imgpath}")
os.remove(omk_p)
if not os.path.exists(imgpath):
await eor(event, "`Unable To Fetch Shot.`")
return
speedo_final = imgpath
elif speedo.gif or speedo.video or speedo.video_note:
omk_p2 = downloaded_file_name
jpg_file = os.path.join(dwlpath, "image.jpg")
await take_screen_shot(omk_p2, 0, jpg_file)
os.remove(omk_p2)
if not os.path.exists(jpg_file):
await eor(event, "`Couldn't Fetch shot`")
return
speedo_final = jpg_file
return speedo_final
async def take_ss(
video_file: str, duration: int, path: str = ""
) -> Optional[str]:
LOGS.info(
"[[[Extracting a frame from %s ||| Video duration => %s]]]",
video_file,
duration,
)
ttl = duration // 2
thumb_image_path = path or os.path.join(dwlpath, f"{basename(video_file)}.jpg")
command = f'''ffmpeg -ss {ttl} -i "{video_file}" -vframes 1 "{thumb_image_path}"'''
err = (await runcmd(command))[1]
if err:
LOGS.error(err)
return thumb_image_path if os.path.exists(thumb_image_path) else None
def tgs_to_gif(sticker_path: str, quality: int = 256) -> str:
semx = os.path.join(dwlpath, "Speedotgs.gif")
with open(semx, 'wb') as t_g:
lottie.exporters.gif.export_gif(lottie.parsers.tgs.parse_tgs(sticker_path), t_g, quality, 1)
os.remove(sticker_path)
return semx
# deal with it...
EMOJI_PATTERN = re.compile(
"["
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F600-\U0001F64F" # emoticons
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F700-\U0001F77F" # alchemical symbols
"\U0001F780-\U0001F7FF" # Geometric Shapes Extended
"\U0001F800-\U0001F8FF" # Supplemental Arrows-C
"\U0001F900-\U0001F9FF" # Supplemental Symbols and Pictographs
"\U0001FA00-\U0001FA6F" # Chess Symbols
"\U0001FA70-\U0001FAFF" # Symbols and Pictographs Extended-A
"\U00002702-\U000027B0" # Dingbats
"]+"
)
def deEmojify(inputString: str) -> str:
"""Remove emojis and other non-safe characters from string"""
return re.sub(EMOJI_PATTERN, "", inputString)
# Speedo
| 4,902 | 1,813 |
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'config.settings.'+os.environ.get('APP_ENV', 'local'))
app = Celery('app')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@ app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| 453 | 156 |
import time
from typing import cast, Dict, Any
from flask import request
from assemblyline.common import forge
from assemblyline.common.constants import SERVICE_STATE_HASH, ServiceStatus
from assemblyline.common.dict_utils import flatten, unflatten
from assemblyline.common.forge import CachedObject
from assemblyline.common.heuristics import HeuristicHandler, InvalidHeuristicException
from assemblyline.common.isotime import now_as_iso
from assemblyline.odm import construct_safe
from assemblyline.odm.messages.service_heartbeat import Metrics
from assemblyline.odm.messages.task import Task as ServiceTask
from assemblyline.odm.models.error import Error
from assemblyline.odm.models.heuristic import Heuristic
from assemblyline.odm.models.result import Result
from assemblyline.odm.models.tagging import Tagging
from assemblyline.remote.datatypes.exporting_counter import export_metrics_once
from assemblyline.remote.datatypes.hash import ExpiringHash
from assemblyline_core.dispatching.client import DispatchClient
from assemblyline_service_server.api.base import make_subapi_blueprint, make_api_response, api_login
from assemblyline_service_server.config import FILESTORE, LOGGER, STORAGE, config
from assemblyline_service_server.helper.heuristics import get_heuristics
status_table = ExpiringHash(SERVICE_STATE_HASH, ttl=60*30)
dispatch_client = DispatchClient(STORAGE)
heuristics = cast(Dict[str, Heuristic], CachedObject(get_heuristics, refresh=300))
heuristic_hander = HeuristicHandler(STORAGE)
tag_safelister = CachedObject(forge.get_tag_safelister,
kwargs=dict(log=LOGGER, config=config, datastore=STORAGE),
refresh=300)
SUB_API = 'task'
task_api = make_subapi_blueprint(SUB_API, api_version=1)
task_api._doc = "Perform operations on service tasks"
@task_api.route("/", methods=["GET"])
@api_login()
def get_task(client_info):
"""
Header:
{'container_id': abcd...123
'service_name': 'Extract',
'service_version': '4.0.1',
'service_tool_version': '
'timeout': '30'}
Result example:
{'keep_alive': true}
"""
service_name = client_info['service_name']
service_version = client_info['service_version']
service_tool_version = client_info['service_tool_version']
client_id = client_info['client_id']
remaining_time = timeout = int(float(request.headers.get('timeout', 30)))
try:
service_data = dispatch_client.service_data[service_name]
except KeyError:
return make_api_response({}, "The service you're asking task for does not exist, try later", 404)
start_time = time.time()
stats = {
"execute": 0,
"cache_miss": 0,
"cache_hit": 0,
"cache_skipped": 0,
"scored": 0,
"not_scored": 0
}
try:
while remaining_time > 0:
cache_found = False
# Set the service status to Idle since we will be waiting for a task
status_table.set(client_id, (service_name, ServiceStatus.Idle, start_time + timeout))
# Getting a new task
task = dispatch_client.request_work(client_id, service_name, service_version, timeout=remaining_time)
if not task:
# We've reached the timeout and no task found in service queue
return make_api_response(dict(task=False))
# We've got a task to process, consider us busy
status_table.set(client_id, (service_name, ServiceStatus.Running, time.time() + service_data.timeout))
stats['execute'] += 1
result_key = Result.help_build_key(sha256=task.fileinfo.sha256,
service_name=service_name,
service_version=service_version,
service_tool_version=service_tool_version,
is_empty=False,
task=task)
# If we are allowed, try to see if the result has been cached
if not task.ignore_cache and not service_data.disable_cache:
# Checking for previous results for this key
result = STORAGE.result.get_if_exists(result_key)
if result:
stats['cache_hit'] += 1
if result.result.score:
stats['scored'] += 1
else:
stats['not_scored'] += 1
result.archive_ts = now_as_iso(config.datastore.ilm.days_until_archive * 24 * 60 * 60)
if task.ttl:
result.expiry_ts = now_as_iso(task.ttl * 24 * 60 * 60)
dispatch_client.service_finished(task.sid, result_key, result)
cache_found = True
if not cache_found:
# Checking for previous empty results for this key
result = STORAGE.emptyresult.get_if_exists(f"{result_key}.e")
if result:
stats['cache_hit'] += 1
stats['not_scored'] += 1
result = STORAGE.create_empty_result_from_key(result_key)
dispatch_client.service_finished(task.sid, f"{result_key}.e", result)
cache_found = True
if not cache_found:
stats['cache_miss'] += 1
else:
stats['cache_skipped'] += 1
if not cache_found:
# No luck with the cache, lets dispatch the task to a client
return make_api_response(dict(task=task.as_primitives()))
# Recalculating how much time we have left before we reach the timeout
remaining_time = start_time + timeout - time.time()
# We've been processing cache hit for the length of the timeout... bailing out!
return make_api_response(dict(task=False))
finally:
export_metrics_once(service_name, Metrics, stats, host=client_id, counter_type='service')
@task_api.route("/", methods=["POST"])
@api_login()
def task_finished(client_info):
"""
Header:
{'container_id': abcd...123
'service_name': 'Extract',
'service_version': '4.0.1',
'service_tool_version': '
}
Data Block:
{
"exec_time": 300,
"task": <Original Task Dict>,
"result": <AL Result Dict>,
"freshen": true
}
"""
data = request.json
exec_time = data.get('exec_time')
try:
task = ServiceTask(data['task'])
if 'result' in data: # Task created a result
missing_files = handle_task_result(exec_time, task, data['result'], client_info, data['freshen'])
if missing_files:
return make_api_response(dict(success=False, missing_files=missing_files))
return make_api_response(dict(success=True))
elif 'error' in data: # Task created an error
error = data['error']
handle_task_error(exec_time, task, error, client_info)
return make_api_response(dict(success=True))
else:
return make_api_response("", "No result or error provided by service.", 400)
except ValueError as e: # Catch errors when building Task or Result model
return make_api_response("", e, 400)
def handle_task_result(exec_time: int, task: ServiceTask, result: Dict[str, Any], client_info: Dict[str, str],
freshen: bool):
archive_ts = now_as_iso(config.datastore.ilm.days_until_archive * 24 * 60 * 60)
if task.ttl:
expiry_ts = now_as_iso(task.ttl * 24 * 60 * 60)
else:
expiry_ts = None
# Check if all files are in the filestore
if freshen:
missing_files = []
for f in result['response']['extracted'] + result['response']['supplementary']:
cur_file_info = STORAGE.file.get_if_exists(f['sha256'], as_obj=False)
if cur_file_info is None or not FILESTORE.exists(f['sha256']):
missing_files.append(f['sha256'])
else:
cur_file_info['archive_ts'] = archive_ts
cur_file_info['expiry_ts'] = expiry_ts
cur_file_info['classification'] = f['classification']
STORAGE.save_or_freshen_file(f['sha256'], cur_file_info,
cur_file_info['expiry_ts'], cur_file_info['classification'],
is_section_image=f.get('is_section_image', False))
if missing_files:
return missing_files
service_name = client_info['service_name']
client_id = client_info['client_id']
# Add scores to the heuristics, if any section set a heuristic
total_score = 0
for section in result['result']['sections']:
zeroize_on_sig_safe = section.pop('zeroize_on_sig_safe', True)
section['tags'] = flatten(section['tags'])
if section.get('heuristic'):
heur_id = f"{client_info['service_name'].upper()}.{str(section['heuristic']['heur_id'])}"
section['heuristic']['heur_id'] = heur_id
try:
section['heuristic'], new_tags = heuristic_hander.service_heuristic_to_result_heuristic(
section['heuristic'], heuristics, zeroize_on_sig_safe)
for tag in new_tags:
section['tags'].setdefault(tag[0], [])
if tag[1] not in section['tags'][tag[0]]:
section['tags'][tag[0]].append(tag[1])
total_score += section['heuristic']['score']
except InvalidHeuristicException:
section['heuristic'] = None
# Update the total score of the result
result['result']['score'] = total_score
# Add timestamps for creation, archive and expiry
result['created'] = now_as_iso()
result['archive_ts'] = archive_ts
result['expiry_ts'] = expiry_ts
# Pop the temporary submission data
temp_submission_data = result.pop('temp_submission_data', None)
# Process the tag values
for section in result['result']['sections']:
# Perform tag safelisting
tags, safelisted_tags = tag_safelister.get_validated_tag_map(section['tags'])
section['tags'] = unflatten(tags)
section['safelisted_tags'] = safelisted_tags
section['tags'], dropped = construct_safe(Tagging, section.get('tags', {}))
# Set section score to zero and lower total score if service is set to zeroize score
# and all tags were safelisted
if section.pop('zeroize_on_tag_safe', False) and \
section.get('heuristic') and \
len(tags) == 0 and \
len(safelisted_tags) != 0:
result['result']['score'] -= section['heuristic']['score']
section['heuristic']['score'] = 0
if dropped:
LOGGER.warning(f"[{task.sid}] Invalid tag data from {client_info['service_name']}: {dropped}")
result = Result(result)
result_key = result.build_key(service_tool_version=result.response.service_tool_version, task=task)
dispatch_client.service_finished(task.sid, result_key, result, temp_submission_data)
# Metrics
if result.result.score > 0:
export_metrics_once(service_name, Metrics, dict(scored=1), host=client_id, counter_type='service')
else:
export_metrics_once(service_name, Metrics, dict(not_scored=1), host=client_id, counter_type='service')
LOGGER.info(f"[{task.sid}] {client_info['client_id']} - {client_info['service_name']} "
f"successfully completed task {f' in {exec_time}ms' if exec_time else ''}")
def handle_task_error(exec_time: int, task: ServiceTask, error: Dict[str, Any], client_info: Dict[str, str]) -> None:
service_name = client_info['service_name']
client_id = client_info['client_id']
LOGGER.info(f"[{task.sid}] {client_info['client_id']} - {client_info['service_name']} "
f"failed to complete task {f' in {exec_time}ms' if exec_time else ''}")
# Add timestamps for creation, archive and expiry
error['created'] = now_as_iso()
error['archive_ts'] = now_as_iso(config.datastore.ilm.days_until_archive * 24 * 60 * 60)
if task.ttl:
error['expiry_ts'] = now_as_iso(task.ttl * 24 * 60 * 60)
error = Error(error)
error_key = error.build_key(service_tool_version=error.response.service_tool_version, task=task)
dispatch_client.service_failed(task.sid, error_key, error)
# Metrics
if error.response.status == 'FAIL_RECOVERABLE':
export_metrics_once(service_name, Metrics, dict(fail_recoverable=1), host=client_id, counter_type='service')
else:
export_metrics_once(service_name, Metrics, dict(fail_nonrecoverable=1), host=client_id, counter_type='service')
| 12,953 | 3,896 |
# Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import sgtk
from sgtk.platform.qt import QtCore
from .search_result_delegate import SearchResultDelegate
# import the shotgun_model and view modules from the shotgun utils framework
shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model")
shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals")
views = sgtk.platform.current_bundle().import_module("views")
class GlobalSearchResultDelegate(SearchResultDelegate):
"""
Delegate which renders search match entries in the global
search completer.
"""
def _render_result(self, widget, model_index):
"""
Renders a result from the model into the provided widget.
:param widget: Widget used to render the result.
:type widget: ``SearchResultWidget``
:param model_index: Index of the item to render.
:type model_index: :class:`~PySide.QtCore.QModelIndex`
"""
from .global_search_completer import GlobalSearchCompleter
icon = shotgun_model.get_sanitized_data(model_index, QtCore.Qt.DecorationRole)
if icon:
thumb = icon.pixmap(512)
widget.set_thumbnail(thumb)
else:
# probably won't hit here, but just in case, use default/empty
# thumbnail
widget.set_thumbnail(self._pixmaps.no_thumbnail)
data = shotgun_model.get_sanitized_data(model_index, GlobalSearchCompleter.SG_DATA_ROLE)
# Example of data stored in the data role:
# {'status': 'vwd',
# 'name': 'bunny_010_0050_comp_v001',
# 'links': ['Shot', 'bunny_010_0050'],
# 'image': 'https://xxx',
# 'project_id': 65,
# 'type': 'Version',
# 'id': 99}
entity_type_display_name = shotgun_globals.get_type_display_name(data["type"])
content = ""
et_url = shotgun_globals.get_entity_type_icon_url(data["type"])
underlined_name = self._underline_search_term(data["name"])
if et_url:
# present thumbnail icon and name
content += "<img src='%s'/> <b style='color: rgb(48, 167, 227)';>%s</b>" % (
et_url, underlined_name
)
else:
# present type name name
content += "%s" % underlined_name
content += "<br>%s" % entity_type_display_name
links = data["links"]
# note users return weird data so ignore it.
if links and links[0] != "" and links[0] != "HumanUser" and links[0] != "ClientUser":
underlined_link = self._underline_search_term(links[1])
# there is a referenced entity
et_url = shotgun_globals.get_entity_type_icon_url(links[0])
if et_url:
# present thumbnail icon and name
content += " on <img align=absmiddle src='%s'/> %s" % (et_url, underlined_link)
else:
# present type name name
link_entity_type = links[0]
content += " on %s %s" % (shotgun_globals.get_type_display_name(link_entity_type), underlined_link)
widget.set_text(content)
| 3,627 | 1,103 |
__all__ = ['Elastix', 'Resampling']; | 36 | 15 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-01 23:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tournament', '0085_auto_20160901_2341'),
]
operations = [
migrations.AlterUniqueTogether(
name='gameselection',
unique_together=set([('season', 'game_link')]),
),
]
| 398 | 156 |
# -------- BEGIN LICENSE BLOCK --------
# Copyright 2022 FZI Forschungszentrum Informatik
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the {copyright_holder} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------- END LICENSE BLOCK --------
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
import sys
import time
from ros_bt_py_msgs.msg import Node as NodeMsg, Message, Package
from ros_bt_py_msgs.msg import NodeData, NodeDataWiring, NodeDataLocation, Tree
from ros_bt_py_msgs.srv import (WireNodeDataRequest, AddNodeRequest, RemoveNodeRequest,
ControlTreeExecutionRequest, GetAvailableNodesRequest,
SetExecutionModeRequest, SetOptionsRequest, ContinueRequest,
LoadTreeRequest, MoveNodeRequest, ReplaceNodeRequest,
MorphNodeRequest, ClearTreeRequest, LoadTreeFromPathRequest,
SetExecutionModeResponse, ModifyBreakpointsRequest,
GetSubtreeRequest, ReloadTreeRequest, WireNodeDataResponse,
RemoveNodeResponse, GenerateSubtreeRequest, AddNodeAtIndexRequest,
ChangeTreeNameRequest)
from ros_bt_py.node import Node, Leaf, FlowControl, define_bt_node
from ros_bt_py.node_config import NodeConfig
from ros_bt_py.nodes.sequence import Sequence
from ros_bt_py.nodes.mock_nodes import MockLeaf
from ros_bt_py.exceptions import BehaviorTreeException, MissingParentError, TreeTopologyError
from ros_bt_py.tree_manager import TreeManager
from ros_bt_py.tree_manager import (get_success as tm_get_success,
get_error_message as tm_get_error_message)
from ros_bt_py.helpers import json_encode, json_decode
from ros_bt_py.ros_helpers import LoggerLevel
try:
unicode
except NameError:
unicode = str
@define_bt_node(NodeConfig(
options={},
inputs={},
outputs={},
max_children=0))
class LongRunningNode(Leaf):
def _do_setup(self):
pass
def _do_tick(self):
time.sleep(1.0)
return NodeMsg.SUCCEEDED
def _do_shutdown(self):
pass
def _do_reset(self):
return NodeMsg.IDLE
def _do_untick(self):
return NodeMsg.IDLE
class TestTreeManager(unittest.TestCase):
def setUp(self):
self.tree_msg = None
self.debug_info_msg = None
def set_tree_msg(msg):
self.tree_msg = msg
def set_debug_info_msg(msg):
self.debug_info_msg = msg
self.manager = TreeManager(publish_tree_callback=set_tree_msg,
publish_debug_info_callback=set_debug_info_msg)
self.node_msg = NodeMsg(
module='ros_bt_py.nodes.passthrough_node',
node_class='PassthroughNode',
inputs=[NodeData(key='in',
serialized_value=json_encode(42))],
options=[NodeData(key='passthrough_type',
serialized_value=json_encode(int))])
self.constant_msg = NodeMsg(
module='ros_bt_py.nodes.constant',
node_class='Constant',
options=[NodeData(key='constant_type',
serialized_value=json_encode(int)),
NodeData(key='constant_value',
serialized_value=json_encode(42))])
self.sequence_msg = NodeMsg(
module='ros_bt_py.nodes.sequence',
node_class='Sequence')
self.memory_sequence_msg = NodeMsg(
module='ros_bt_py.nodes.sequence',
node_class='MemorySequence')
self.succeeder_msg = NodeMsg(
module='ros_bt_py.nodes.mock_nodes',
node_class='MockLeaf',
options=[NodeData(key='output_type',
serialized_value=json_encode(str)),
NodeData(key='state_values',
serialized_value=json_encode([NodeMsg.SUCCEEDED])),
NodeData(key='output_values',
serialized_value=json_encode(['Yay!']))])
def testEnsureTickFrequencyGreaterZero(self):
manager = TreeManager(tick_frequency_hz=0)
self.assertNotEquals(manager.tree_msg.tick_frequency_hz, 0)
def testTickFrequencyTooHigh(self):
tick_frequency_hz = 10000000000000.0
sleep_duration_sec = (1.0 / tick_frequency_hz)
manager = TreeManager(tick_frequency_hz=tick_frequency_hz)
add_request = AddNodeRequest(node=self.node_msg,
allow_rename=True)
self.assertTrue(manager.add_node(add_request).success)
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.TICK_PERIODICALLY
start_time = time.time()
self.assertTrue(get_success(manager.control_execution(execution_request)))
tick_duration = time.time() - start_time
self.assertGreater(tick_duration, sleep_duration_sec)
time.sleep(0.1)
manager.tree_msg.state = Tree.STOP_REQUESTED
manager._tick_thread.join(0.1)
self.assertFalse(manager._tick_thread.is_alive())
def testLoadNodeModule(self):
manager = TreeManager(module_list=['ros_bt_py.nodes.sequence'])
self.assertIn('ros_bt_py.nodes.sequence', sys.modules)
def testCycle(self):
node = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.manager.nodes[node.name].parent = node.name
self.assertRaises(TreeTopologyError, self.manager.find_root)
def testOrphan(self):
node = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
node2 = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.manager.nodes[node.name].parent = node2
self.manager.remove_node(RemoveNodeRequest(node_name=node2.name,
remove_children=False))
self.assertRaises(MissingParentError, self.manager.tick, True)
def testNoNodes(self):
self.manager.tick(once=True)
self.assertEqual(self.manager.tree_msg.state, Tree.EDITABLE)
def testGetSuccessErrorMessageDict(self):
message = {'success': False,
'error_message': 'error'}
self.assertFalse(tm_get_success(message))
self.assertEqual(tm_get_error_message(message), 'error')
def testGenerateSubtreeService(self):
generate_request = GenerateSubtreeRequest()
generate_response = self.manager.generate_subtree(generate_request)
self.assertFalse(get_success(generate_response))
add_request = AddNodeRequest(node=self.sequence_msg,
allow_rename=True)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 1)
self.assertTrue(get_success(response))
add_request = AddNodeRequest(node=self.sequence_msg,
allow_rename=True,
parent_name=response.actual_node_name)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 2)
self.assertTrue(get_success(response))
seq_2_name = response.actual_node_name
add_request = AddNodeRequest(node=self.succeeder_msg,
allow_rename=True,
parent_name=seq_2_name)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 3)
self.assertTrue(get_success(response))
generate_request = GenerateSubtreeRequest()
generate_request.nodes = [response.actual_node_name]
generate_response = self.manager.generate_subtree(generate_request)
self.assertTrue(get_success(generate_response))
def testGetSubtreeService(self):
add_request = AddNodeRequest(node=self.sequence_msg,
allow_rename=True)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 1)
self.assertTrue(get_success(response))
add_request = AddNodeRequest(node=self.sequence_msg,
allow_rename=True,
parent_name=response.actual_node_name)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 2)
self.assertTrue(get_success(response))
seq_2_name = response.actual_node_name
add_request = AddNodeRequest(node=self.succeeder_msg,
allow_rename=True,
parent_name=seq_2_name)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 3)
self.assertTrue(get_success(response))
add_request = AddNodeRequest(node=self.succeeder_msg,
allow_rename=True,
parent_name=seq_2_name)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 4)
self.assertTrue(get_success(response))
subtree_request = GetSubtreeRequest(subtree_root_name=seq_2_name)
subtree_response = self.manager.get_subtree(subtree_request)
self.assertTrue(get_success(subtree_response))
self.assertEqual(len(subtree_response.subtree.nodes), 3)
subtree_request = GetSubtreeRequest(subtree_root_name='not_in_tree')
subtree_response = self.manager.get_subtree(subtree_request)
self.assertFalse(get_success(subtree_response))
def testGetSubtreeServiceWirings(self):
load_request = LoadTreeRequest(tree=Tree(
name='from_file',
path='package://ros_bt_py/test/testdata/trees/get_subtree.yaml'))
self.assertTrue(get_success(self.manager.load_tree(load_request)))
wire_request = WireNodeDataRequest()
wire_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='PassthroughNode_2',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='PassthroughNode',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
wire_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='PassthroughNode_2',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA),
target=NodeDataLocation(node_name='PassthroughNode',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA)))
wire_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='PassthroughNode_2',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA),
target=NodeDataLocation(node_name='PassthroughNode',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
wire_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='PassthroughNode_2',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='PassthroughNode',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA)))
wire_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='PassthroughNode',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='PassthroughNode_2',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
wire_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='PassthroughNode',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA),
target=NodeDataLocation(node_name='PassthroughNode_2',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA)))
wire_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='PassthroughNode',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA),
target=NodeDataLocation(node_name='PassthroughNode_2',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
wire_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='PassthroughNode',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='PassthroughNode_2',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA)))
response = self.manager.wire_data(wire_request)
self.assertTrue(get_success(response), get_error_message(response))
subtree_request = GetSubtreeRequest(subtree_root_name='Sequence')
subtree_response = self.manager.get_subtree(subtree_request)
self.assertTrue(get_success(subtree_response))
self.assertEqual(len(subtree_response.subtree.nodes), 4)
response = self.manager.unwire_data(wire_request)
self.assertTrue(get_success(response), get_error_message(response))
def testGetSubtreeServiceSubscriptions(self):
add_request = AddNodeRequest(node=self.sequence_msg,
allow_rename=True)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 1)
self.assertTrue(get_success(response))
node = self.manager.nodes[response.actual_node_name]
node.subscriptions.append(NodeDataWiring(
source=NodeDataLocation(node_name=node.name,
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='also_missing',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA)))
subtree_request = GetSubtreeRequest(subtree_root_name=response.actual_node_name)
subtree_response = self.manager.get_subtree(subtree_request)
self.assertTrue(get_success(subtree_response))
node.subscriptions.append(NodeDataWiring(
source=NodeDataLocation(node_name='missing',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='also_missing',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA)))
subtree_request = GetSubtreeRequest(subtree_root_name=response.actual_node_name)
subtree_response = self.manager.get_subtree(subtree_request)
self.assertFalse(get_success(subtree_response))
def testTickExceptionHandling(self):
@define_bt_node(NodeConfig(
options={},
inputs={},
outputs={},
max_children=0))
class ExceptionNode(Leaf):
def _do_setup(self):
pass
def _do_tick(self):
raise BehaviorTreeException
def _do_shutdown(self):
pass
def _do_reset(self):
return NodeMsg.IDLE
def _do_untick(self):
return NodeMsg.IDLE
node = ExceptionNode()
manager = TreeManager(show_traceback_on_exception=False)
manager.nodes[node.name] = node
self.assertEqual(manager.tree_msg.state, Tree.EDITABLE)
manager.tick_report_exceptions()
self.assertEqual(manager.tree_msg.state, Tree.ERROR)
manager = TreeManager(show_traceback_on_exception=True)
manager.nodes[node.name] = node
self.assertEqual(manager.tree_msg.state, Tree.EDITABLE)
manager.tick_report_exceptions()
self.assertEqual(manager.tree_msg.state, Tree.ERROR)
def testLoadNode(self):
_ = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
_ = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.node_msg.name = 'Test Node'
_ = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.assertIn('PassthroughNode', self.manager.nodes)
self.assertIn('PassthroughNode_2', self.manager.nodes)
self.assertIn('Test Node', self.manager.nodes)
def testWireData(self):
root = self.manager.instantiate_node_from_msg(
NodeMsg(
module='ros_bt_py.nodes.sequence',
node_class='Sequence',
name='root'),
allow_rename=False)
self.node_msg.name = 'source_node'
source = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
root.add_child(source)
self.node_msg.name = 'target_node'
target = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
root.add_child(target)
self.assertIn('source_node', self.manager.nodes)
self.assertIn('target_node', self.manager.nodes)
valid_request = WireNodeDataRequest()
valid_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='source_node',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='target_node',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
response = self.manager.wire_data(valid_request)
self.assertTrue(get_success(response), get_error_message(response))
self.assertEqual(len(self.manager.nodes['source_node'].outputs.callbacks), 1)
self.assertEqual(len(self.manager.tree_msg.data_wirings), 1)
def testWireWithInvalidKey(self):
self.node_msg.name = 'source_node'
self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.node_msg.name = 'target_node'
self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.assertIn('source_node', self.manager.nodes)
self.assertIn('target_node', self.manager.nodes)
invalid_key_request = WireNodeDataRequest()
invalid_key_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='source_node',
# PassthroghNode does not have this key!
data_key='wrong',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='target_node',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
response = self.manager.wire_data(invalid_key_request)
self.assertFalse(get_success(response))
def testWireWithInvalidNodeName(self):
self.node_msg.name = 'source_node'
self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.node_msg.name = 'target_node'
self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.assertIn('source_node', self.manager.nodes)
self.assertIn('target_node', self.manager.nodes)
invalid_node_request = WireNodeDataRequest()
invalid_node_request.wirings.append(NodeDataWiring(
# Wrong node name for source node
source=NodeDataLocation(node_name='fantasy_node',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='target_node',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
response = self.manager.wire_data(invalid_node_request)
self.assertFalse(get_success(response))
def testMultiWireWithOneInvalid(self):
"""WireNodeData supports wiring multiple pairs of NodeData at once.
If there's an error while handling any pair, none of the wirings must be applied!
"""
self.node_msg.name = 'source_node'
self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.node_msg.name = 'target_node'
self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.assertIn('source_node', self.manager.nodes)
self.assertIn('target_node', self.manager.nodes)
invalid_multi_request = WireNodeDataRequest()
# This is fine and should work
invalid_multi_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='source_node',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='target_node',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
invalid_multi_request.wirings.append(NodeDataWiring(
# Wrong node name for source node
source=NodeDataLocation(node_name='fantasy_node',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='target_node',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
response = self.manager.wire_data(invalid_multi_request)
self.assertFalse(get_success(response))
# The first half should not have been applied -> no callbacks for
# source_node
self.assertEqual(len(self.manager.nodes['source_node'].outputs.callbacks), 0)
def testWireWithError(self):
root = self.manager.instantiate_node_from_msg(
NodeMsg(
module='ros_bt_py.nodes.sequence',
node_class='Sequence',
name='root'),
allow_rename=False)
self.node_msg.name = 'source_node'
source = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
root.add_child(source)
self.node_msg.name = 'target_node'
target = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
root.add_child(target)
self.assertIn('source_node', self.manager.nodes)
self.assertIn('target_node', self.manager.nodes)
valid_request = WireNodeDataRequest()
valid_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='source_node',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='target_node',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
response = self.manager.wire_data(valid_request)
self.assertTrue(get_success(response))
valid_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='source_node',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='target_node_does_not_exist',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
self.manager.nodes['target_node'].wire_data = mock.MagicMock()
self.manager.nodes['target_node'].wire_data.side_effect = BehaviorTreeException()
response = self.manager.unwire_data(valid_request)
self.assertFalse(get_success(response))
def testUnwire(self):
root = self.manager.instantiate_node_from_msg(
NodeMsg(
module='ros_bt_py.nodes.sequence',
node_class='Sequence',
name='root'),
allow_rename=False)
wire_request = WireNodeDataRequest()
wire_request.wirings.append(NodeDataWiring(
# Wrong node name for source node
source=NodeDataLocation(node_name='source_node',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='target_node',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
response = self.manager.unwire_data(wire_request)
# Our manager has no nodes at all, so unwiring anything won't work
self.assertFalse(get_success(response))
self.node_msg.name = 'source_node'
source = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
root.add_child(source)
self.node_msg.name = 'target_node'
target = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
root.add_child(target)
response = self.manager.unwire_data(wire_request)
# The nodes and keys exist. There aren't any callbacks to remove, but
# the unwire operation still succeeds (after running it, the two data
# values are unconnected).
self.assertTrue(
get_success(response),
get_error_message(response) + "\n" + str(self.manager.nodes))
response = self.manager.wire_data(wire_request)
self.assertTrue(get_success(response))
self.assertEqual(len(self.manager.tree_msg.data_wirings), 1)
response = self.manager.unwire_data(wire_request)
self.assertTrue(get_success(response))
self.assertEqual(len(self.manager.tree_msg.data_wirings), 0)
def testUnwireWithError(self):
root = self.manager.instantiate_node_from_msg(
NodeMsg(
module='ros_bt_py.nodes.sequence',
node_class='Sequence',
name='root'),
allow_rename=False)
self.node_msg.name = 'source_node'
source = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
root.add_child(source)
self.node_msg.name = 'target_node'
target = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
root.add_child(target)
self.assertIn('source_node', self.manager.nodes)
self.assertIn('target_node', self.manager.nodes)
self.manager.nodes['target_node'].unwire_data = mock.MagicMock()
self.manager.nodes['target_node'].unwire_data.side_effect = BehaviorTreeException()
valid_request = WireNodeDataRequest()
valid_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='source_node',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='target_node',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
valid_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(node_name='source_node',
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='target_node_does_not_exist',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA)))
response = self.manager.wire_data(valid_request)
self.assertFalse(get_success(response))
def testClearTree(self):
# Adding a node to the tree and ticking it once
add_request = AddNodeRequest(node=self.succeeder_msg)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 1)
self.assertTrue(get_success(response))
self.manager.nodes['MockLeaf'].state = NodeMsg.RUNNING
# Clear will fail until the tree is shutdown
clear_request = ClearTreeRequest()
response = self.manager.clear(clear_request)
self.assertFalse(get_success(response))
self.assertEqual(len(self.manager.nodes), 1)
execution_request = ControlTreeExecutionRequest(
command=ControlTreeExecutionRequest.TICK_ONCE)
execution_request.command = ControlTreeExecutionRequest.SHUTDOWN
self.assertTrue(self.manager.control_execution(execution_request).success)
# after shutdown clear works again
response = self.manager.clear(clear_request)
self.assertTrue(get_success(response))
self.assertEqual(len(self.manager.nodes), 0)
# even a tree with multiple nodes (and no root) is cleared
add_request = AddNodeRequest(node=self.succeeder_msg, allow_rename=True)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 1)
self.assertTrue(get_success(response))
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 2)
self.assertTrue(get_success(response))
response = self.manager.clear(clear_request)
self.assertTrue(get_success(response))
self.assertEqual(len(self.manager.nodes), 0)
def testAddNode(self):
add_request = AddNodeRequest(node=self.node_msg)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 1)
self.assertTrue(get_success(response))
broken_add = AddNodeRequest(node=NodeMsg(module='asdf',
node_class='foo'))
response = self.manager.add_node(broken_add)
self.assertFalse(get_success(response))
def testAddWithMissingParent(self):
self.assertFalse(self.manager.add_node(AddNodeRequest(node=self.node_msg,
parent_name='foo')).success)
def testAddMultiple(self):
add_request = AddNodeRequest(node=self.sequence_msg)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 1)
self.assertTrue(get_success(response))
add_request = AddNodeRequest(node=self.node_msg,
parent_name=response.actual_node_name)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 2)
self.assertTrue(get_success(response))
def testAddRenaming(self):
add_request = AddNodeRequest(node=self.sequence_msg)
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 1)
self.assertTrue(get_success(response))
# Add the same node again - since allow_rename should default
# to false, this will fail.
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 1)
self.assertFalse(get_success(response))
# Same with allow_rename set to False explicitly
add_request.allow_rename = False
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 1)
self.assertFalse(get_success(response))
# But it should work if we set allow_rename to True
add_request.allow_rename = True
response = self.manager.add_node(add_request)
self.assertEqual(len(self.manager.nodes), 2)
self.assertTrue(get_success(response))
def testAddWithChild(self):
add_request = AddNodeRequest(node=self.sequence_msg)
response = self.manager.add_node(add_request)
self.assertTrue(get_success(response))
self.assertEqual(len(self.manager.nodes), 1)
self.sequence_msg.child_names.append(response.actual_node_name)
add_request = AddNodeRequest(node=self.sequence_msg,
allow_rename=True)
response = self.manager.add_node(add_request)
self.assertTrue(get_success(response))
self.assertEqual(len(self.manager.nodes), 2)
root = self.manager.find_root()
# The newly inserted second node should be the root of the tree, since
# the other one is its child
self.assertEqual(response.actual_node_name, root.name)
self.assertEqual(len(root.children), 1)
def testAddWithMissingChild(self):
self.sequence_msg.child_names.append('imaginary_node')
add_request = AddNodeRequest(node=self.sequence_msg)
response = self.manager.add_node(add_request)
# Don't add nodes with missing children to the tree
self.assertFalse(get_success(response))
self.assertEqual(len(self.manager.nodes), 0)
def testAddWithInvalidOption(self):
self.node_msg.options = [
NodeData(key='passthrough_type',
# passthrough_type must be a type, not an int
serialized_value=json_encode(42))]
add_request = AddNodeRequest(node=self.node_msg)
response = self.manager.add_node(add_request)
self.assertFalse(get_success(response))
def testBuildCycle(self):
add_request = AddNodeRequest(node=self.sequence_msg)
response = self.manager.add_node(add_request)
self.assertTrue(get_success(response))
self.assertEqual(len(self.manager.nodes), 1)
self.sequence_msg.child_names.append(response.actual_node_name)
add_request = AddNodeRequest(parent_name=response.actual_node_name,
node=self.sequence_msg,
allow_rename=True)
response = self.manager.add_node(add_request)
self.assertFalse(get_success(response))
self.assertEqual(len(self.manager.nodes), 1)
def testRemoveNode(self):
instance = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.assertEqual(len(self.manager.nodes), 1)
remove_request = RemoveNodeRequest(node_name=instance.name)
response = self.manager.remove_node(remove_request)
self.assertTrue(get_success(response))
self.assertEqual(len(self.manager.nodes), 0)
# Second remove will fail, there's nothing left to remove.
response = self.manager.remove_node(remove_request)
self.assertFalse(get_success(response))
def testRemoveParent(self):
add_response = self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))
self.manager.add_node(
AddNodeRequest(node=self.node_msg,
parent_name=add_response.actual_node_name))
self.assertEqual(len(self.manager.nodes), 2)
remove_response = self.manager.remove_node(
RemoveNodeRequest(node_name=add_response.actual_node_name,
remove_children=False))
self.assertTrue(get_success(remove_response))
self.assertEqual(len(self.manager.nodes), 1)
self.manager.tick(once=True)
root_node = [node for node in self.tree_msg.nodes
if node.name == self.tree_msg.root_name][0]
self.assertEqual(root_node.state, NodeMsg.SUCCEEDED)
def testRemoveParentAndChildren(self):
add_response = self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))
self.manager.add_node(
AddNodeRequest(node=self.node_msg,
parent_name=add_response.actual_node_name))
self.assertEqual(len(self.manager.nodes), 2)
remove_response = self.manager.remove_node(
RemoveNodeRequest(node_name=add_response.actual_node_name,
remove_children=True))
self.assertTrue(get_success(remove_response), get_error_message(remove_response))
self.assertEqual(len(self.manager.nodes), 0)
def testRemoveParentAndChildrenWithBrokenChildren(self):
add_response = self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))
child_response = self.manager.add_node(
AddNodeRequest(node=self.node_msg,
parent_name=add_response.actual_node_name))
self.assertEqual(len(self.manager.nodes), 2)
self.manager.nodes[child_response.actual_node_name].children.append(
Sequence(name='not_in_tree'))
remove_response = self.manager.remove_node(
RemoveNodeRequest(node_name=add_response.actual_node_name,
remove_children=True))
self.assertFalse(get_success(remove_response), get_error_message(remove_response))
self.assertEqual(len(self.manager.nodes), 2)
def testRemoveParentAndChildrenWithIdenticalChildren(self):
add_response = self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))
child_response = self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name=add_response.actual_node_name))
first_child_name = child_response.actual_node_name
child_response = self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name=add_response.actual_node_name,
allow_rename=True))
self.assertEqual(len(self.manager.nodes), 3)
self.manager.nodes[add_response.actual_node_name].children[1].name = first_child_name
remove_response = self.manager.remove_node(
RemoveNodeRequest(node_name=add_response.actual_node_name,
remove_children=True))
self.assertTrue(get_success(remove_response))
self.assertEqual(len(self.manager.nodes), 1)
def testRemoveParentAndChildrenWithParentThatDoesNotShutdownItsChildren(self):
@define_bt_node(NodeConfig(
options={},
inputs={},
outputs={},
max_children=None))
class FlowControlNode(FlowControl):
def _do_setup(self):
for child in self.children:
child.setup()
def _do_tick(self):
return NodeMsg.SUCCEEDED
def _do_shutdown(self):
pass
def _do_reset(self):
return NodeMsg.IDLE
def _do_untick(self):
return NodeMsg.IDLE
parent = FlowControlNode()
self.manager.nodes[parent.name] = parent
child = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
parent.add_child(child)
self.assertEqual(self.manager.nodes[parent.name].state, NodeMsg.UNINITIALIZED)
self.assertEqual(self.manager.nodes[child.name].state, NodeMsg.UNINITIALIZED)
self.manager.nodes[parent.name].setup()
self.assertEqual(self.manager.nodes[parent.name].state, NodeMsg.IDLE)
self.assertEqual(self.manager.nodes[child.name].state, NodeMsg.IDLE)
remove_response = self.manager.remove_node(
RemoveNodeRequest(node_name=parent.name,
remove_children=True))
self.assertTrue(get_success(remove_response))
self.assertEqual(len(self.manager.nodes), 0)
def testMoveNode(self):
self.sequence_msg.name = 'outer_seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = "inner_seq"
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='outer_seq'))))
self.succeeder_msg.name = 'A'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='outer_seq'))))
self.succeeder_msg.name = 'B'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='inner_seq'))))
self.assertEqual(len(self.manager.nodes), 4)
# Should fail, since "A" is a MockLeaf, which can't have children
self.assertFalse(get_success(self.manager.move_node(
MoveNodeRequest(
node_name='B',
new_parent_name='A',
new_child_index=0))))
# Should fail, since "asdf" is not in the tree
self.assertFalse(get_success(self.manager.move_node(
MoveNodeRequest(
node_name='B',
new_parent_name='asdf',
new_child_index=0))))
# Should fail, since "asdf" is not in the tree
self.assertFalse(get_success(self.manager.move_node(
MoveNodeRequest(
node_name='asdf',
new_parent_name='outer_seq',
new_child_index=0))))
# Should succeed and put "A" after "B" (-1 means
# "first from the back")
self.assertTrue(get_success(self.manager.move_node(
MoveNodeRequest(
node_name='A',
new_parent_name='inner_seq',
new_child_index=-1))))
self.assertIn('inner_seq', [node.name for node in self.tree_msg.nodes])
for node in self.tree_msg.nodes:
if node.name == 'outer_seq':
# After moving A into inner_seq, outer_seq has only
# one child
self.assertEqual(len(node.child_names), 1)
if node.name == 'inner_seq':
A_index = None
B_index = None
for index, name in enumerate(node.child_names):
if name == "A":
A_index = index
if name == "B":
B_index = index
self.assertIsNotNone(A_index, 'Node A is not a child of inner_seq!')
self.assertIsNotNone(B_index, 'Node B is not a child of inner_seq!')
# As mentioned above, A should appear *after* B in the
# list of inner_seq's children!
self.assertGreater(A_index, B_index)
def testMoveToNoParent(self):
self.sequence_msg.name = 'seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.succeeder_msg.name = 'A'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.assertEqual(len(self.tree_msg.nodes), 2)
self.assertTrue(get_success(self.manager.move_node(
MoveNodeRequest(
node_name='A',
new_parent_name=''))))
# With A removed from seq's children, no node should have any
# children!
self.assertTrue(all([not node.child_names for node in self.tree_msg.nodes]))
def testMoveWithinSameParent(self):
self.sequence_msg.name = 'seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.succeeder_msg.name = 'A'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.succeeder_msg.name = 'B'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.succeeder_msg.name = 'C'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.assertEqual(len(self.tree_msg.nodes), 4)
# Confirm the positions of all three succeeders
seq_msg = None
for node in self.tree_msg.nodes:
if node.name == 'seq':
seq_msg = node
break
self.assertIsNotNone(seq_msg, 'Failed to find sequence in tree message')
self.assertEqual(seq_msg.child_names, ['A', 'B', 'C'])
self.assertTrue(get_success(self.manager.move_node(
MoveNodeRequest(
node_name='A',
new_parent_name='seq',
new_child_index=1
))))
seq_msg = None
for node in self.tree_msg.nodes:
if node.name == 'seq':
seq_msg = node
break
self.assertIsNotNone(seq_msg, 'Failed to find sequence in tree message')
self.assertEqual(seq_msg.child_names, ['B', 'A', 'C'])
def testMoveToOwnChild(self):
self.sequence_msg.name = 'seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = 'seq_2'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='seq'))))
# This should be impossible, since it leads to a
# circular graph!
self.assertFalse(get_success(self.manager.move_node(
MoveNodeRequest(
node_name='seq',
new_parent_name='seq_2',
new_child_index=0
))))
def testMorphNode(self):
self.sequence_msg.name = 'seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.succeeder_msg.name = 'A'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.succeeder_msg.name = 'B'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.assertEqual(len(self.tree_msg.nodes), 3)
self.assertFalse(get_success(self.manager.morph_node(
MorphNodeRequest(node_name='node_not_in_tree',
new_node=self.memory_sequence_msg)
)))
self.assertTrue(get_success(self.manager.morph_node(
MorphNodeRequest(node_name='seq',
new_node=self.memory_sequence_msg)
)))
def testMorphNodeWithParent(self):
self.sequence_msg.name = 'outer_seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = "inner_seq"
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='outer_seq'))))
self.assertTrue(get_success(self.manager.morph_node(
MorphNodeRequest(node_name='inner_seq',
new_node=self.memory_sequence_msg)
)))
def testMorphNodeWithBrokenParent(self):
self.sequence_msg.name = 'outer_seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = "inner_seq"
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='outer_seq'))))
# break parent node
self.manager.nodes['outer_seq'].node_config.max_children = 0
self.assertFalse(get_success(self.manager.morph_node(
MorphNodeRequest(node_name='inner_seq',
new_node=self.memory_sequence_msg)
)))
self.manager.nodes['outer_seq'].node_config.max_children = None
self.manager.nodes['outer_seq'].children = []
self.assertFalse(get_success(self.manager.morph_node(
MorphNodeRequest(node_name='inner_seq',
new_node=self.memory_sequence_msg)
)))
def testMorphNodeWithAnotherBrokenParent(self):
self.sequence_msg.name = 'outer_seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = "inner_seq"
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='outer_seq'))))
# break parent node
self.manager.nodes['outer_seq'].children = []
self.assertFalse(get_success(self.manager.morph_node(
MorphNodeRequest(node_name='inner_seq',
new_node=self.memory_sequence_msg)
)))
def testMorphNodeBrokenMessage(self):
self.sequence_msg.name = 'seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
msg = NodeMsg(
module="ros_bt_py.nodes.passthrough_node",
node_class="PassthroughNode",
options=[NodeData(key='passthrough_type',
serialized_value='definitely_not_a_type')])
self.assertFalse(get_success(self.manager.morph_node(
MorphNodeRequest(node_name='seq',
new_node=msg)
)))
# intentionally break wiring
self.manager.tree_msg.data_wirings.append(
NodeDataWiring(
source=NodeDataLocation(
node_name='seq'
),
target=NodeDataLocation(
node_name='missing'
)
))
self.assertFalse(get_success(self.manager.morph_node(
MorphNodeRequest(node_name='seq',
new_node=self.memory_sequence_msg)
)))
def testMorphNodeWithParentError(self):
self.sequence_msg.name = 'outer_seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = "inner_seq"
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='outer_seq'))))
self.manager.nodes['outer_seq'].add_child = mock.MagicMock()
self.manager.nodes['outer_seq'].add_child.side_effect = [BehaviorTreeException(), None]
self.assertFalse(get_success(self.manager.morph_node(
MorphNodeRequest(node_name='inner_seq',
new_node=self.memory_sequence_msg)
)))
def testMorphNodeWithParentWireError(self):
self.sequence_msg.name = 'outer_seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = "inner_seq"
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='outer_seq'))))
self.manager.wire_data = mock.MagicMock()
self.manager.wire_data.return_value = WireNodeDataResponse(success=False)
self.assertTrue(get_success(self.manager.morph_node(
MorphNodeRequest(node_name='inner_seq',
new_node=self.memory_sequence_msg)
)))
def testReplaceNode(self):
self.sequence_msg.name = 'seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.succeeder_msg.name = 'A'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.succeeder_msg.name = 'B'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.assertEqual(len(self.tree_msg.nodes), 3)
self.assertFalse(get_success(self.manager.replace_node(
ReplaceNodeRequest(
old_node_name="asdf",
new_node_name="A"))))
self.assertFalse(get_success(self.manager.replace_node(
ReplaceNodeRequest(
old_node_name="B",
new_node_name="asdf"))))
self.assertTrue(get_success(self.manager.replace_node(
ReplaceNodeRequest(
old_node_name="B",
new_node_name="A"))))
self.assertEqual(len(self.tree_msg.nodes), 2)
# B was overwritten by A
self.assertNotIn("B", [node.name for node in self.tree_msg.nodes])
self.sequence_msg.name = 'new_seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.assertEqual(len(self.tree_msg.nodes), 3)
self.assertTrue(get_success(self.manager.replace_node(
ReplaceNodeRequest(
old_node_name="seq",
new_node_name="new_seq"))))
self.assertEqual(len(self.tree_msg.nodes), 2)
# seq should be overwritten by new_seq
self.assertNotIn("seq", [node.name for node in self.tree_msg.nodes])
self.assertIn("new_seq", [node.name for node in self.tree_msg.nodes])
for node in self.tree_msg.nodes:
if node.name == 'new_seq':
self.assertIn("A", node.child_names)
def testReplaceParent(self):
self.sequence_msg.name = 'seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = 'seq_2'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='seq'))))
# This should succeed, but to avoid cycles, seq_2 cannot
# inherit all of seq's children (which would include itself)
self.assertTrue(get_success(self.manager.replace_node(
ReplaceNodeRequest(
old_node_name='seq',
new_node_name='seq_2'
))))
self.assertEqual(len(self.tree_msg.nodes), 1)
# seq only had seq_2 as its child, so seq_2 should have 0
# children now
self.assertEqual(len(self.tree_msg.nodes[0].child_names), 0)
def testReplaceOrder(self):
self.sequence_msg.name = 'seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.succeeder_msg.name = 'A'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.succeeder_msg.name = 'B'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.succeeder_msg.name = 'C'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.assertEqual(len(self.tree_msg.nodes), 4)
# Confirm the positions of all three succeeders
seq_msg = None
for node in self.tree_msg.nodes:
if node.name == 'seq':
seq_msg = node
break
self.assertIsNotNone(seq_msg, 'Failed to find sequence in tree message')
self.assertEqual(seq_msg.child_names, ['A', 'B', 'C'])
self.assertTrue(get_success(self.manager.replace_node(
ReplaceNodeRequest(
new_node_name='A',
old_node_name='B'
))))
seq_msg = None
for node in self.tree_msg.nodes:
if node.name == 'seq':
seq_msg = node
break
self.assertIsNotNone(seq_msg, 'Failed to find sequence in tree message')
self.assertEqual(seq_msg.child_names, ['A', 'C'])
def testReplaceBrokenNode(self):
self.succeeder_msg.name = 'A'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg))))
self.succeeder_msg.name = 'B'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg))))
self.assertEqual(len(self.tree_msg.nodes), 2)
# Break the node
node = self.manager.nodes['A'].children = [self.manager.nodes['B']]
self.assertFalse(get_success(self.manager.replace_node(
ReplaceNodeRequest(
old_node_name="A",
new_node_name="B"))))
def testReplaceNodeNotSuccessful(self):
self.sequence_msg.name = 'seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.succeeder_msg.name = 'A'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.succeeder_msg.name = 'B'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.assertEqual(len(self.tree_msg.nodes), 3)
self.manager.remove_node = mock.MagicMock()
self.manager.remove_node.return_value = RemoveNodeResponse(success=False)
self.assertFalse(get_success(self.manager.replace_node(
ReplaceNodeRequest(
old_node_name="B",
new_node_name="A"))))
def testTick(self):
add_request = AddNodeRequest(node=self.node_msg)
add_request.node.inputs.append(NodeData(key='in',
serialized_value=json_encode(42)))
response = self.manager.add_node(add_request)
self.assertTrue(get_success(response))
self.manager.tick(once=True)
self.assertEqual(self.manager.nodes[response.actual_node_name].outputs['out'], 42)
# After finishing the tick, the TreeManager should have called the tree
# and debug info callbacks, setting these values.
self.assertIsNotNone(self.tree_msg)
self.assertIsNotNone(self.debug_info_msg)
self.assertIn(response.actual_node_name,
[node.name for
node in self.tree_msg.nodes])
node_msg = next((node for
node in self.tree_msg.nodes if node.name == response.actual_node_name))
self.assertEqual(json_decode(node_msg.inputs[0].serialized_value), 42)
self.assertEqual(json_decode(node_msg.outputs[0].serialized_value), 42)
def testControlTree(self):
add_request = AddNodeRequest(node=self.node_msg)
add_request.node.name = 'passthrough'
add_request.node.inputs.append(NodeData(key='in',
serialized_value=json_encode(42)))
self.assertTrue(self.manager.add_node(add_request).success)
self.assertEqual(self.manager.nodes['passthrough'].inputs['in'], 42)
self.assertIsNone(self.manager.nodes['passthrough'].outputs['out'])
execution_request = ControlTreeExecutionRequest(
command=ControlTreeExecutionRequest.TICK_ONCE)
response = self.manager.control_execution(execution_request)
self.assertTrue(get_success(response))
self.assertEqual(response.tree_state, Tree.WAITING_FOR_TICK)
self.assertEqual(self.manager.nodes['passthrough'].outputs['out'], 42)
# Start, then stop, continuous execution
execution_request.command = ControlTreeExecutionRequest.TICK_PERIODICALLY
execution_request.tick_frequency_hz = 2
response = self.manager.control_execution(execution_request)
self.assertTrue(get_success(response))
self.assertEqual(response.tree_state, Tree.TICKING)
# Trying to start ticking while the tree already is ticking should fail
self.assertFalse(self.manager.control_execution(execution_request).success)
execution_request.command = ControlTreeExecutionRequest.TICK_ONCE
self.assertFalse(self.manager.control_execution(execution_request).success)
# Stopping should put the tree back in the IDLE state
execution_request.command = ControlTreeExecutionRequest.STOP
response = self.manager.control_execution(execution_request)
self.assertTrue(get_success(response))
self.assertEqual(response.tree_state, Tree.IDLE)
# stopping a stopped tree is fine
self.assertTrue(self.manager.control_execution(execution_request).success)
# After resetting, output should be None again
execution_request.command = ControlTreeExecutionRequest.RESET
self.assertIsNotNone(self.manager.nodes['passthrough'].outputs['out'])
self.assertTrue(self.manager.control_execution(execution_request).success)
self.assertIsNone(self.manager.nodes['passthrough'].outputs['out'])
execution_request.command = ControlTreeExecutionRequest.SHUTDOWN
self.assertTrue(self.manager.control_execution(execution_request).success)
self.assertEqual(self.manager.nodes['passthrough'].state, NodeMsg.SHUTDOWN)
# test DO_NOTHING and an unknown command
execution_request.command = ControlTreeExecutionRequest.DO_NOTHING
self.assertTrue(self.manager.control_execution(execution_request).success)
execution_request.command = 42
self.assertFalse(self.manager.control_execution(execution_request).success)
def testControlBrokenTree(self):
add_request = AddNodeRequest(node=self.node_msg,
allow_rename=True)
# Add two nodes, so there's no one root node
self.assertTrue(self.manager.add_node(add_request).success)
self.assertTrue(self.manager.add_node(add_request).success)
execution_request = ControlTreeExecutionRequest()
# All of these should fail, since the manager cannot find a root node
# to tick (or reset)
execution_request.command = ControlTreeExecutionRequest.TICK_ONCE
self.assertFalse(get_success(self.manager.control_execution(execution_request)))
execution_request.command = ControlTreeExecutionRequest.TICK_PERIODICALLY
self.assertFalse(get_success(self.manager.control_execution(execution_request)))
execution_request.command = ControlTreeExecutionRequest.RESET
self.assertFalse(get_success(self.manager.control_execution(execution_request)))
def testControlTreeWithUnsetInputNode(self):
load_request = LoadTreeRequest(tree=Tree(
name='from_file',
path='package://ros_bt_py/test/testdata/trees/subtree_compare.yaml'))
self.assertTrue(get_success(self.manager.load_tree(load_request)))
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.TICK_ONCE
self.assertFalse(get_success(self.manager.control_execution(execution_request)))
self.assertFalse(get_success(self.manager.control_execution(execution_request)))
def testControlSetupAndShutdown(self):
add_request = AddNodeRequest(node=self.node_msg,
allow_rename=True)
self.assertTrue(self.manager.add_node(add_request).success)
execution_request = ControlTreeExecutionRequest()
# SETUP_AND_SHUTDOWN does not work when ticking
execution_request.command = ControlTreeExecutionRequest.TICK_PERIODICALLY
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
execution_request.command = ControlTreeExecutionRequest.SETUP_AND_SHUTDOWN
self.assertFalse(get_success(self.manager.control_execution(execution_request)))
execution_request.command = ControlTreeExecutionRequest.SHUTDOWN
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
execution_request.command = ControlTreeExecutionRequest.SETUP_AND_SHUTDOWN
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
# SETUP fails on a TreeTopologyError
self.assertTrue(self.manager.add_node(add_request).success)
execution_request.command = ControlTreeExecutionRequest.SETUP_AND_SHUTDOWN
self.assertFalse(get_success(self.manager.control_execution(execution_request)))
def testControlSetupAndShutdownFails(self):
random_int_msg = NodeMsg(
module='ros_bt_py.nodes.random_number',
node_class='RandomInt',
options=[NodeData(key='min',
serialized_value=json_encode(1)),
NodeData(key='max',
serialized_value=json_encode(0))])
add_request = AddNodeRequest(node=random_int_msg,
allow_rename=True)
self.assertTrue(self.manager.add_node(add_request).success)
execution_request = ControlTreeExecutionRequest()
# Fails because of the nodes BehaviorTreeException
execution_request.command = ControlTreeExecutionRequest.SETUP_AND_SHUTDOWN
self.assertFalse(get_success(self.manager.control_execution(execution_request)))
def testControlTickPeriodicallyNoNodes(self):
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.TICK_PERIODICALLY
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
execution_request.command = ControlTreeExecutionRequest.SHUTDOWN
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
execution_request.command = ControlTreeExecutionRequest.RESET
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
def testControlTickPeriodically0Hz(self):
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.manager.tree_msg.tick_frequency_hz = 0
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.RESET
self.assertFalse(get_success(self.manager.control_execution(execution_request)))
execution_request.command = ControlTreeExecutionRequest.TICK_PERIODICALLY
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
time.sleep(0.01)
execution_request.command = ControlTreeExecutionRequest.RESET
self.assertFalse(get_success(self.manager.control_execution(execution_request)))
execution_request.command = ControlTreeExecutionRequest.SHUTDOWN
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
def testControlLongRunningTreeNode(self):
node = LongRunningNode()
self.manager.nodes[node.name] = node
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.TICK_PERIODICALLY
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
time.sleep(0.1)
execution_request.command = ControlTreeExecutionRequest.STOP
self.assertRaises(BehaviorTreeException,
self.manager.control_execution, execution_request)
def testControlLongRunningTreeNodetickOnce(self):
node = LongRunningNode()
self.manager.nodes[node.name] = node
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.TICK_ONCE
self.assertRaises(BehaviorTreeException,
self.manager.control_execution, execution_request)
time.sleep(0.1)
execution_request.command = ControlTreeExecutionRequest.STOP
self.assertRaises(BehaviorTreeException,
self.manager.control_execution, execution_request)
def testControlLongRunningTreeNodeDebugging(self):
node = LongRunningNode()
self.manager.nodes[node.name] = node
request = SetExecutionModeRequest(single_step=True,
collect_performance_data=False, publish_subtrees=False)
self.assertEqual(self.manager.set_execution_mode(request), SetExecutionModeResponse())
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.TICK_PERIODICALLY
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
time.sleep(0.1)
execution_request.command = ControlTreeExecutionRequest.STOP
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
def testControlLongRunningTreeNodeDebuggingTickOnce(self):
node = LongRunningNode()
self.manager.nodes[node.name] = node
request = SetExecutionModeRequest(single_step=True,
collect_performance_data=False, publish_subtrees=False)
self.assertEqual(self.manager.set_execution_mode(request), SetExecutionModeResponse())
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.TICK_ONCE
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
time.sleep(0.1)
execution_request.command = ControlTreeExecutionRequest.STOP
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
def testControlDebugRaceCondition(self):
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.constant_msg))))
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.TICK_PERIODICALLY
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
debug_request = SetExecutionModeRequest(single_step=True,
collect_performance_data=False,
publish_subtrees=False)
self.assertEqual(self.manager.set_execution_mode(debug_request),
SetExecutionModeResponse())
debug_request = SetExecutionModeRequest(single_step=False,
collect_performance_data=False,
publish_subtrees=False)
self.assertEqual(self.manager.set_execution_mode(debug_request),
SetExecutionModeResponse())
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.SHUTDOWN
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
def testControlTickExceptionNode(self):
@define_bt_node(NodeConfig(
options={},
inputs={},
outputs={},
max_children=0))
class ExceptionNode(Leaf):
def _do_setup(self):
pass
def _do_tick(self):
raise BehaviorTreeException
def _do_shutdown(self):
pass
def _do_reset(self):
return NodeMsg.IDLE
def _do_untick(self):
return NodeMsg.IDLE
node = ExceptionNode()
self.manager.nodes[node.name] = node
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.TICK_PERIODICALLY
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
time.sleep(0.1)
execution_request.command = ControlTreeExecutionRequest.STOP
self.assertFalse(get_success(self.manager.control_execution(execution_request)))
def testControlUntickExceptionNode(self):
@define_bt_node(NodeConfig(
options={},
inputs={},
outputs={},
max_children=0))
class ExceptionNode(Leaf):
def _do_setup(self):
pass
def _do_tick(self):
return NodeMsg.SUCCEEDED
def _do_shutdown(self):
pass
def _do_reset(self):
return NodeMsg.IDLE
def _do_untick(self):
raise BehaviorTreeException
node = ExceptionNode()
self.manager.nodes[node.name] = node
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.TICK_PERIODICALLY
self.assertTrue(get_success(self.manager.control_execution(execution_request)))
time.sleep(0.1)
execution_request.command = ControlTreeExecutionRequest.STOP
self.assertFalse(get_success(self.manager.control_execution(execution_request)))
def testControlUntickNoNodes(self):
self.manager.tree_msg.state = Tree.WAITING_FOR_TICK
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.STOP
response = self.manager.control_execution(execution_request)
self.assertTrue(get_success(response))
self.assertEqual(response.tree_state, Tree.IDLE)
def testControlStopTopologyError(self):
# build a cycle
node = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.manager.nodes[node.name].parent = node.name
self.manager.tree_msg.state = Tree.WAITING_FOR_TICK
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.STOP
response = self.manager.control_execution(execution_request)
self.assertFalse(get_success(response))
def testControlShutdownNotRunningTopologyError(self):
# build a cycle
node = self.manager.instantiate_node_from_msg(self.node_msg, allow_rename=True)
self.manager.nodes[node.name].parent = node.name
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.SHUTDOWN
response = self.manager.control_execution(execution_request)
self.assertFalse(get_success(response))
def testControlTickNoNodes(self):
execution_request = ControlTreeExecutionRequest()
execution_request.command = ControlTreeExecutionRequest.TICK_ONCE
response = self.manager.control_execution(execution_request)
self.assertTrue(get_success(response))
def testControlTickThreadAlive(self):
add_request = AddNodeRequest(node=self.node_msg)
add_request.node.name = 'passthrough'
add_request.node.inputs.append(NodeData(key='in',
serialized_value=json_encode(42)))
self.assertTrue(self.manager.add_node(add_request).success)
self.assertEqual(self.manager.nodes['passthrough'].inputs['in'], 42)
self.assertIsNone(self.manager.nodes['passthrough'].outputs['out'])
execution_request = ControlTreeExecutionRequest(
command=ControlTreeExecutionRequest.TICK_ONCE)
response = self.manager.control_execution(execution_request)
self.assertTrue(get_success(response))
self.assertEqual(response.tree_state, Tree.WAITING_FOR_TICK)
self.manager._tick_thread.is_alive = mock.MagicMock()
self.manager._tick_thread.is_alive.return_value = True
self.manager.get_state = mock.MagicMock()
self.manager.get_state.return_value = Tree.IDLE
self.assertRaises(BehaviorTreeException, self.manager.control_execution, execution_request)
def testControlTreeStateNotIdle(self):
add_request = AddNodeRequest(node=self.node_msg)
add_request.node.name = 'passthrough'
add_request.node.inputs.append(NodeData(key='in',
serialized_value=json_encode(42)))
self.assertTrue(self.manager.add_node(add_request).success)
self.assertEqual(self.manager.nodes['passthrough'].inputs['in'], 42)
self.assertIsNone(self.manager.nodes['passthrough'].outputs['out'])
execution_request = ControlTreeExecutionRequest(
command=ControlTreeExecutionRequest.TICK_PERIODICALLY)
response = self.manager.control_execution(execution_request)
self.assertTrue(get_success(response))
self.assertEqual(response.tree_state, Tree.TICKING)
execution_request = ControlTreeExecutionRequest(
command=ControlTreeExecutionRequest.STOP)
self.manager.get_state = mock.MagicMock()
self.manager.get_state.side_effect = [
Tree.TICKING, Tree.TICKING, Tree.STOP_REQUESTED, Tree.STOP_REQUESTED]
response = self.manager.control_execution(execution_request)
self.assertFalse(get_success(response))
def testControlTreeStateNotIdleOrPaused(self):
add_request = AddNodeRequest(node=self.node_msg)
add_request.node.name = 'passthrough'
add_request.node.inputs.append(NodeData(key='in',
serialized_value=json_encode(42)))
self.assertTrue(self.manager.add_node(add_request).success)
self.assertEqual(self.manager.nodes['passthrough'].inputs['in'], 42)
self.assertIsNone(self.manager.nodes['passthrough'].outputs['out'])
execution_request = ControlTreeExecutionRequest(
command=ControlTreeExecutionRequest.TICK_ONCE)
response = self.manager.control_execution(execution_request)
self.assertTrue(get_success(response))
self.assertEqual(response.tree_state, Tree.WAITING_FOR_TICK)
execution_request = ControlTreeExecutionRequest(
command=ControlTreeExecutionRequest.STOP)
self.manager.find_root = mock.MagicMock()
node = MockLeaf(name='error',
options={'output_type': int,
'state_values': [NodeMsg.FAILED],
'output_values': [1]})
node.state = NodeMsg.FAILED
node.untick = mock.MagicMock()
self.manager.find_root.return_value = node
response = self.manager.control_execution(execution_request)
self.assertFalse(get_success(response))
def testControlTreeStateTickOnceIdle(self):
add_request = AddNodeRequest(node=self.node_msg)
add_request.node.name = 'passthrough'
add_request.node.inputs.append(NodeData(key='in',
serialized_value=json_encode(42)))
self.assertTrue(self.manager.add_node(add_request).success)
self.assertEqual(self.manager.nodes['passthrough'].inputs['in'], 42)
self.assertIsNone(self.manager.nodes['passthrough'].outputs['out'])
execution_request = ControlTreeExecutionRequest(
command=ControlTreeExecutionRequest.TICK_ONCE)
self.manager.get_state = mock.MagicMock()
self.manager.get_state.return_value = Tree.STOP_REQUESTED
response = self.manager.control_execution(execution_request)
self.assertFalse(get_success(response))
def testGetAvailableNodes(self):
request = GetAvailableNodesRequest(node_modules=['ros_bt_py.nodes.passthrough_node'])
response = self.manager.get_available_nodes(request)
self.assertTrue(get_success(response), get_error_message(response))
self.assertGreaterEqual(len(response.available_nodes), 1)
self.assertIn("PassthroughNode", [node.node_class for node in response.available_nodes])
request = GetAvailableNodesRequest(node_modules=['ros_bt_py.tests.node_does_not_exist'])
response = self.manager.get_available_nodes(request)
self.assertFalse(get_success(response))
def testSetOptions(self):
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.node_msg))))
# There's only one node...
node = self.tree_msg.nodes[0]
# and it only has one option
self.assertEqual(node.options[0].serialized_value, json_encode(int))
# a node that is not in the tree should fail
self.assertFalse(get_success(self.manager.set_options(
SetOptionsRequest(node_name='not_in_tree',
rename_node=False,
options=[NodeData(key='passthrough_type',
serialized_value=json_encode(str))]))))
# unparseable values should fail
self.assertFalse(get_success(self.manager.set_options(
SetOptionsRequest(node_name='PassthroughNode',
rename_node=False,
options=[NodeData(key='passthrough_type',
serialized_value='invalid_value')]))))
# assigning values to invalid keys should fail too
self.assertFalse(get_success(self.manager.set_options(
SetOptionsRequest(node_name='PassthroughNode',
rename_node=False,
options=[NodeData(key='invalid_key',
serialized_value=json_encode(str))]))))
# assigning values of the wrong type should also fail
self.assertFalse(get_success(self.manager.set_options(
SetOptionsRequest(node_name='PassthroughNode',
rename_node=False,
options=[NodeData(key='passthrough_type',
serialized_value=json_encode(
'I am not a type, but a string!'))]))))
# finally, this is valid :)
self.assertTrue(get_success(self.manager.set_options(
SetOptionsRequest(node_name='PassthroughNode',
rename_node=False,
options=[NodeData(key='passthrough_type',
serialized_value=json_encode(str))]))))
node = self.tree_msg.nodes[0]
self.assertEqual(node.options[0].serialized_value, json_encode(str))
def testSetSomeOptions(self):
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.constant_msg))))
# We expect the old value of constant_type (int) to be
# preserved - if it weren't Node.__init__() would raise an
# error!
self.assertTrue(get_success(self.manager.set_options(
SetOptionsRequest(node_name='Constant',
rename_node=False,
options=[NodeData(key='constant_value',
serialized_value=json_encode(23))]))))
def testRename(self):
self.sequence_msg.name = 'foo'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.constant_msg.name = 'const'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(parent_name=self.sequence_msg.name,
node=self.constant_msg))))
set_options_response = self.manager.set_options(
SetOptionsRequest(node_name=self.constant_msg.name,
rename_node=True,
new_name='bar'))
self.assertTrue(get_success(set_options_response),
get_error_message(set_options_response))
self.assertIn('bar', (node.name for node in self.tree_msg.nodes))
self.assertNotIn('const', (node.name for node in self.tree_msg.nodes))
set_options_response = self.manager.set_options(
SetOptionsRequest(node_name='bar',
rename_node=True,
new_name='foo'))
# 'foo' is already taken, so this shouldn't succeed
self.assertFalse(get_success(set_options_response))
def testSetOptionsWithWirings(self):
# Add a Sequence with two children
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.node_msg.name = 'child1'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(parent_name='Sequence',
node=self.node_msg))))
self.node_msg.name = 'child2'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(parent_name='Sequence',
node=self.node_msg))))
wire_request = WireNodeDataRequest()
wire_request.wirings.append(NodeDataWiring(
source=NodeDataLocation(
node_name='child1',
data_kind=NodeDataLocation.OUTPUT_DATA,
data_key='out'),
target=NodeDataLocation(
node_name='child2',
data_kind=NodeDataLocation.INPUT_DATA,
data_key='in')))
self.assertTrue(get_success(self.manager.wire_data(wire_request)))
# Should work - the new value is the same as the old one, so
# it definitely works
self.assertTrue(get_success(self.manager.set_options(SetOptionsRequest(
node_name='child1',
options=[NodeData(key='passthrough_type',
serialized_value=json_encode(int))]))))
# Should fail because the wiring cannot be re-established
# (child1.out is now a str, but child2.in still expects an
# int)
failed_res = self.manager.set_options(SetOptionsRequest(
node_name='child1',
options=[NodeData(key='passthrough_type',
serialized_value=json_encode(str))]))
self.assertFalse(get_success(failed_res))
# The failed attempt should reset everything to the way it was
# before, so this must still work
retry_res = self.manager.set_options(SetOptionsRequest(
node_name='child1',
options=[NodeData(key='passthrough_type',
serialized_value=json_encode(int))]))
self.assertTrue(get_success(retry_res), get_error_message(retry_res))
# Renaming should work
rename_res = self.manager.set_options(SetOptionsRequest(
node_name='child1',
rename_node=True,
new_name='child_new_name1'))
self.assertTrue(get_success(rename_res), get_error_message(rename_res))
rename_res = self.manager.set_options(SetOptionsRequest(
node_name='child2',
rename_node=True,
new_name='child_new_name2'))
self.assertTrue(get_success(rename_res), get_error_message(rename_res))
rename_res = self.manager.set_options(SetOptionsRequest(
node_name='Sequence',
rename_node=True,
new_name='Sequence_new_name'))
self.assertTrue(get_success(rename_res), get_error_message(rename_res))
def testSetOptionsChangeTypeWithOptionWirings(self):
# OptionWirings allow a semantic relationship between option fields
# For example the constant_type and constant_value options of the Constant node
# have a wiring where the constant_type is the source and the constant_value the target
add_response = self.manager.add_node(AddNodeRequest(node=self.constant_msg))
self.assertTrue(get_success(add_response))
node = self.manager.nodes[add_response.actual_node_name]
self.assertEqual(node.options.get_serialized('constant_value'), json_encode(42))
self.assertEqual(node.options.get_serialized('constant_type'), json_encode(int))
# Changing type and value at the same time should work
set_options_response = self.manager.set_options(SetOptionsRequest(
node_name=add_response.actual_node_name,
options=[NodeData(key='constant_value',
serialized_value=json_encode('foo')),
NodeData(key='constant_type',
serialized_value=json_encode(str))]))
self.assertTrue(get_success(set_options_response))
# The node has been replaced, so we need an updated reference
node = self.manager.nodes[add_response.actual_node_name]
self.assertEqual(node.options.get_serialized('constant_value'), json_encode('foo'))
self.assertEqual(node.options.get_serialized('constant_type'), json_encode(str))
# Changing type and value at the same time should work
# str and unicode are considered equal
set_options_response = self.manager.set_options(SetOptionsRequest(
node_name=add_response.actual_node_name,
options=[NodeData(key='constant_value',
serialized_value=json_encode('bar')),
NodeData(key='constant_type',
serialized_value=json_encode(unicode))]))
self.assertTrue(get_success(set_options_response))
# The node has been replaced, so we need an updated reference
node = self.manager.nodes[add_response.actual_node_name]
self.assertEqual(node.options.get_serialized('constant_value'), json_encode('bar'))
self.assertEqual(node.options.get_serialized('constant_type'), json_encode(unicode))
# Changing type and value also works with ROS Messages
tree_msg = Tree(name='test')
set_options_response = self.manager.set_options(SetOptionsRequest(
node_name=add_response.actual_node_name,
options=[NodeData(key='constant_value',
serialized_value=json_encode(tree_msg)),
NodeData(key='constant_type',
serialized_value=json_encode(Tree))]))
self.assertTrue(get_success(set_options_response))
# The node has been replaced, so we need an updated reference
node = self.manager.nodes[add_response.actual_node_name]
self.assertEqual(node.options.get_serialized('constant_value'),
json_encode(tree_msg))
self.assertEqual(node.options.get_serialized('constant_type'), json_encode(Tree))
def testSetOptionsChangeTypeWithOptionWiringsBroken(self):
add_response = self.manager.add_node(AddNodeRequest(node=self.constant_msg))
self.assertTrue(get_success(add_response))
node = self.manager.nodes[add_response.actual_node_name]
# intentionally break wiring
self.manager.tree_msg.data_wirings.append(
NodeDataWiring(
source=NodeDataLocation(
node_name='Constant'
),
target=NodeDataLocation(
node_name='missing'
)
))
# with broken wirings changing options should not work
set_options_response = self.manager.set_options(SetOptionsRequest(
node_name=add_response.actual_node_name,
options=[NodeData(key='constant_value',
serialized_value=json_encode('foo')),
NodeData(key='constant_type',
serialized_value=json_encode(str))]))
self.assertFalse(get_success(set_options_response))
def testSetOptionsBrokenNodes(self):
add_response = self.manager.add_node(AddNodeRequest(node=self.constant_msg))
self.assertTrue(get_success(add_response))
constant_node = self.manager.nodes[add_response.actual_node_name]
add_response = self.manager.add_node(AddNodeRequest(node=self.sequence_msg))
self.assertTrue(get_success(add_response))
sequence_node = self.manager.nodes[add_response.actual_node_name]
self.assertIsNone(constant_node.parent)
self.assertEqual(len(sequence_node.children), 0)
# now deliberately break the constant node by claiming to have a parent
constant_node.parent = sequence_node
self.assertFalse(get_success(self.manager.set_options(
SetOptionsRequest(node_name='Constant',
rename_node=False,
options=[NodeData(key='constant_value',
serialized_value=json_encode(23))]))))
def testSetOptionsErrorOnRemove(self):
self.sequence_msg.name = 'outer_seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = "inner_seq"
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='outer_seq'))))
self.manager.nodes['outer_seq'].remove_child = mock.MagicMock()
self.manager.nodes['outer_seq'].remove_child.side_effect = KeyError()
self.manager.wire_data = mock.MagicMock()
self.manager.wire_data.return_value = WireNodeDataResponse(success=False)
# self.manager.nodes['outer_seq'].remove_child = mock.MagicMock()
# self.manager.nodes['outer_seq'].remove_child.side_effect = KeyError()
set_options_response = self.manager.set_options(
SetOptionsRequest(node_name="inner_seq",
rename_node=True,
new_name='bar'))
def testSetOptionsErrorOnAdd(self):
self.sequence_msg.name = 'outer_seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = "inner_seq"
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='outer_seq'))))
self.manager.nodes['outer_seq'].add_child = mock.MagicMock()
self.manager.nodes['outer_seq'].add_child.side_effect = [BehaviorTreeException(), None]
self.manager.wire_data = mock.MagicMock()
self.manager.wire_data.return_value = WireNodeDataResponse(success=False)
set_options_response = self.manager.set_options(
SetOptionsRequest(node_name="inner_seq",
rename_node=True,
new_name='bar'))
def testSetOptionsErrorOnAddException(self):
self.sequence_msg.name = 'outer_seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = "inner_seq"
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='outer_seq'))))
self.manager.nodes['outer_seq'].add_child = mock.MagicMock()
self.manager.nodes['outer_seq'].add_child.side_effect = BehaviorTreeException()
self.manager.wire_data = mock.MagicMock()
self.manager.wire_data.return_value = WireNodeDataResponse(success=False)
set_options_response = self.manager.set_options(
SetOptionsRequest(node_name="inner_seq",
rename_node=True,
new_name='bar'))
def testSetOptionsErrorOnAddRewire(self):
self.sequence_msg.name = 'outer_seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = "inner_seq"
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='outer_seq'))))
self.manager.nodes['outer_seq'].remove_child = mock.MagicMock()
self.manager.nodes['outer_seq'].remove_child.side_effect = [None, BehaviorTreeException()]
self.manager.wire_data = mock.MagicMock()
self.manager.wire_data.return_value = WireNodeDataResponse(success=False)
set_options_response = self.manager.set_options(
SetOptionsRequest(node_name="inner_seq",
rename_node=True,
new_name='bar'))
def testSetOptionsErrorOnReAddChildren(self):
self.sequence_msg.name = 'outer_seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.sequence_msg.name = "inner_seq"
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg,
parent_name='outer_seq'))))
self.manager.nodes['outer_seq'].remove_child = mock.MagicMock()
self.manager.nodes['outer_seq'].remove_child.side_effect = BehaviorTreeException()
set_options_response = self.manager.set_options(
SetOptionsRequest(node_name="outer_seq",
rename_node=True,
new_name='bar'))
def testEnforceEditable(self):
add_request = AddNodeRequest(node=self.node_msg)
add_request.node.name = 'first'
self.assertEqual(self.tree_msg.state, "EDITABLE")
self.assertTrue(get_success(self.manager.add_node(add_request)))
self.assertTrue(get_success(self.manager.control_execution(ControlTreeExecutionRequest(
command=ControlTreeExecutionRequest.TICK_ONCE))))
add_request.node.name = 'second'
# The tree is not editable after ticking once
self.assertNotEqual(self.tree_msg.state, "EDITABLE")
# Neither by adding...
self.assertFalse(get_success(self.manager.add_node(add_request)))
# Nor deleting a node
self.assertFalse(get_success(self.manager.remove_node(
RemoveNodeRequest(node_name='first',
remove_children=False))))
self.assertFalse(get_success(self.manager.set_options(
SetOptionsRequest(node_name='first', options=[]))))
# TODO(nberg): test other editing services here as they're implemented
# But after shutting it down, we can edit it again
self.assertTrue(get_success(self.manager.control_execution(ControlTreeExecutionRequest(
command=ControlTreeExecutionRequest.SHUTDOWN))))
self.assertEqual(self.tree_msg.state, "EDITABLE")
self.assertTrue(get_success(self.manager.add_node(add_request)))
self.assertTrue(get_success(self.manager.remove_node(
RemoveNodeRequest(node_name='first',
remove_children=False))))
def testLoadTreeFromPath(self):
load_request = LoadTreeFromPathRequest(
path='package://ros_bt_py/test/testdata/trees/subtree_constant.yaml')
self.assertTrue(get_success(self.manager.load_tree_from_path(load_request)))
def testLoadTreeFromPathBuiltins(self):
load_request = LoadTreeFromPathRequest(
path='package://ros_bt_py/test/testdata/trees/builtins_constant.yaml')
self.assertTrue(get_success(self.manager.load_tree_from_path(load_request)))
node = self.manager.nodes['Constant']
self.assertEqual(node.options.get_serialized('constant_value'), json_encode(42))
self.assertEqual(node.options.get_serialized('constant_type'), json_encode(int))
self.assertEqual(node.options.get_type('constant_value'), int)
self.assertEqual(node.options.get_type('constant_type'), type)
def testLoadWithAndWithoutName(self):
load_request = LoadTreeRequest(tree=Tree(
path='package://ros_bt_py/test/testdata/trees/without_name.yaml'))
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response))
self.assertEqual(self.manager.tree_msg.name, 'without_name.yaml')
load_request = LoadTreeRequest(tree=Tree(
path='package://ros_bt_py/test/testdata/trees/with_name.yaml'))
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response))
self.assertEqual(self.manager.tree_msg.name, 'with_name.yaml')
def testLoadFromInvalidFiles(self):
load_request = LoadTreeRequest(tree=Tree(
name='from_file',
path='/notareal.file'))
self.assertFalse(get_success(self.manager.load_tree(load_request)))
load_request = LoadTreeRequest(tree=Tree(
name='from_file',
path='file://'))
self.assertFalse(get_success(self.manager.load_tree(load_request)))
load_request = LoadTreeRequest(tree=Tree(
name='from_file',
path='package://ros_bt_py/etc/trees/notareal.file'))
self.assertFalse(get_success(self.manager.load_tree(load_request)))
load_request = LoadTreeRequest(tree=Tree(
name='from_file',
path='package://ros_bt_py/etc/trees/two_trees.yaml'))
self.assertFalse(get_success(self.manager.load_tree(load_request)))
load_request = LoadTreeRequest(tree=Tree(
name='from_file',
path='package://ros_bt_py/etc/trees/empty.yaml'))
self.assertFalse(get_success(self.manager.load_tree(load_request)))
load_request = LoadTreeRequest(tree=Tree(
name='from_file',
path='package://ros_bt_py/test/testdata/trees/broken_node_with_child.yaml'))
self.assertFalse(get_success(self.manager.load_tree(load_request)))
load_request = LoadTreeRequest(tree=Tree(
name='from_file',
path='package://ros_bt_py/test/testdata/trees/broken_node_with_missing_child.yaml'))
self.assertFalse(get_success(self.manager.load_tree(load_request)))
load_request = LoadTreeRequest(tree=Tree(
name='from_file',
path='package://ros_bt_py/test/testdata/trees/broken_wiring.yaml'))
self.assertFalse(get_success(self.manager.load_tree(load_request)))
def testLoadFromValidFile(self):
load_request = LoadTreeRequest(tree=Tree(name='from_file',
path='package://ros_bt_py/etc/trees/test.yaml'))
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
# test.yaml contains a sequence, two succeeders, a fallback and a failer
self.assertEqual(len(self.manager.nodes), 5)
self.assertTrue(get_success(self.manager.control_execution(ControlTreeExecutionRequest(
command=ControlTreeExecutionRequest.TICK_ONCE))))
def testLoadPermissive(self):
load_request = LoadTreeRequest(
tree=Tree(
name='permissive_load',
path='package://ros_bt_py/test/testdata/trees/permissive_changed_msg.yaml'),
permissive=False)
response = self.manager.load_tree(load_request)
self.assertFalse(get_success(response), get_error_message(response))
load_request = LoadTreeRequest(
tree=Tree(
name='permissive_load',
path='package://ros_bt_py/test/testdata/trees/permissive_changed_msg.yaml'),
permissive=True)
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
def testLoadPermissiveService(self):
load_request = LoadTreeRequest(
tree=Tree(
name='permissive_load',
path='package://ros_bt_py/test/testdata/trees/permissive_changed_srv.yaml'),
permissive=False)
response = self.manager.load_tree(load_request)
self.assertFalse(get_success(response), get_error_message(response))
load_request = LoadTreeRequest(
tree=Tree(
name='permissive_load',
path='package://ros_bt_py/test/testdata/trees/permissive_changed_srv.yaml'),
permissive=True)
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
def testLoadFromValidFileWithEmptyObject(self):
"""Load a tree from a rostopic echo file that has "---" at the end"""
load_request = LoadTreeRequest(
tree=Tree(name='from_file',
path='package://ros_bt_py/etc/trees/test_extra_empty.yaml'))
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
# test.yaml contains a sequence, two succeeders, a fallback and a failer
self.assertEqual(len(self.manager.nodes), 5)
self.assertTrue(get_success(self.manager.control_execution(ControlTreeExecutionRequest(
command=ControlTreeExecutionRequest.TICK_ONCE))))
def testLoadWithoutNodesAndWithoutPath(self):
request = self.manager.load_tree(
LoadTreeRequest(tree=Tree(name='broken')))
self.assertFalse(get_success(request))
def testLoadFromFileWithIndirection(self):
request = self.manager.load_tree(
LoadTreeRequest(tree=Tree(name='from_file',
path='package://ros_bt_py/etc/trees/indirection.yaml')))
# Indirection should work as well (this yaml file refers to test.yaml)
self.assertTrue(get_success(request), get_error_message(request))
def testLoadSubtree(self):
load_request = LoadTreeRequest(tree=Tree(name='from_file',
path='package://ros_bt_py/etc/trees/test.yaml'))
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
# Fallback is an inner node with 2 children
fallback = self.manager.find_root().find_node('fallback')
self.assertIsNotNone(fallback)
subtree, _, _ = fallback.get_subtree_msg()
# Now load the subtree
load_request = LoadTreeRequest(tree=subtree)
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
def testSetExecutionMode(self):
request = SetExecutionModeRequest(single_step=False,
collect_performance_data=False, publish_subtrees=True)
self.assertEqual(self.manager.set_execution_mode(request), SetExecutionModeResponse())
self.assertEqual(self.manager.get_state(), Tree.EDITABLE)
request = SetExecutionModeRequest(single_step=False,
collect_performance_data=False, publish_subtrees=False)
self.assertEqual(self.manager.set_execution_mode(request), SetExecutionModeResponse())
def testDebugStep(self):
request = ContinueRequest()
self.assertTrue(self.manager.debug_step(request).success)
def testModifyBreakpoints(self):
breakpoints = ["first", "second", "third", "fourth"]
request = ModifyBreakpointsRequest(add=breakpoints)
self.assertEqual(self.manager.modify_breakpoints(request).current_breakpoints,
breakpoints)
def testReloadTree(self):
# reload empty tree
reload_response = self.manager.reload_tree(request=ReloadTreeRequest())
self.assertFalse(get_success(reload_response))
# reload a valid tree
load_request = LoadTreeRequest(tree=Tree(name='from_file',
path='package://ros_bt_py/etc/trees/test.yaml'))
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
reload_response = self.manager.reload_tree(request=ReloadTreeRequest())
self.assertTrue(get_success(reload_response))
def testChangeTreeName(self):
change_response = self.manager.change_tree_name(request=ChangeTreeNameRequest(name='hi'))
self.assertTrue(get_success(change_response))
self.assertEqual(self.tree_msg.name, 'hi')
def testGenerateSubtree(self):
res = self.manager.generate_subtree(request=GenerateSubtreeRequest())
self.assertFalse(get_success(res))
self.sequence_msg.name = 'seq'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))))
self.succeeder_msg.name = 'A'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.succeeder_msg.name = 'B'
self.assertTrue(get_success(self.manager.add_node(
AddNodeRequest(node=self.succeeder_msg,
parent_name='seq'))))
self.assertEqual(len(self.tree_msg.nodes), 3)
res = self.manager.generate_subtree(request=GenerateSubtreeRequest(nodes=['A']))
self.assertTrue(get_success(res))
res = self.manager.generate_subtree(
request=GenerateSubtreeRequest(nodes=[]))
self.assertTrue(get_success(res))
def testLoadFromFileWithPyYAMLgenpyMigration(self):
load_request = LoadTreeRequest(
tree=Tree(
name='migration',
path='package://ros_bt_py/test/testdata/trees/pyyaml_5_3_1_seq_multilayer.yaml'),
permissive=True)
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
load_request = LoadTreeRequest(
tree=Tree(
name='migration',
path='package://ros_bt_py/test/testdata/trees/pyyaml_5_3_1_1_child.yaml'),
permissive=True)
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
load_request = LoadTreeRequest(
tree=Tree(
name='migration',
path='package://ros_bt_py/test/testdata/trees/pyyaml_5_3_1_100_children.yaml'),
permissive=True)
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
load_request = LoadTreeRequest(
tree=Tree(
name='migration',
path='package://ros_bt_py/test/testdata/trees/pyyaml_5_3_1.yaml'),
permissive=True)
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
load_request = LoadTreeRequest(
tree=Tree(
name='migration',
path='package://ros_bt_py/test/testdata/trees/pyyaml_3_13.yaml'),
permissive=True)
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
load_request = LoadTreeRequest(
tree=Tree(
name='migration',
path='package://ros_bt_py/test/testdata/trees/pyyaml_3_13_100_children.yaml'),
permissive=True)
response = self.manager.load_tree(load_request)
self.assertTrue(get_success(response), get_error_message(response))
class TestWiringServices(unittest.TestCase):
def setUp(self):
self.tree_msg = None
self.debug_info_msg = None
def set_tree_msg(msg):
self.tree_msg = msg
def set_debug_info_msg(msg):
self.debug_info_msg = msg
self.manager = TreeManager(publish_tree_callback=set_tree_msg,
publish_debug_info_callback=set_debug_info_msg)
node_msg = NodeMsg(
module='ros_bt_py.nodes.passthrough_node',
node_class='PassthroughNode',
inputs=[NodeData(key='in',
serialized_value=json_encode(42))],
options=[NodeData(key='passthrough_type',
serialized_value=json_encode(int))])
self.sequence_msg = NodeMsg(
module='ros_bt_py.nodes.sequence',
node_class='Sequence')
response = self.manager.add_node(
AddNodeRequest(node=self.sequence_msg))
# Add three passthrough nodes that we can wire between
self.node_1_name = 'passthrough_1'
node_msg.name = self.node_1_name
self.manager.add_node(
AddNodeRequest(parent_name=response.actual_node_name,
node=node_msg))
self.node_2_name = 'passthrough_2'
node_msg.name = self.node_2_name
self.manager.add_node(
AddNodeRequest(parent_name=response.actual_node_name,
node=node_msg))
self.node_3_name = 'passthrough_3'
node_msg.name = self.node_3_name
self.manager.add_node(
AddNodeRequest(parent_name=response.actual_node_name,
node=node_msg))
def wiring(self, from_name, to_name):
return NodeDataWiring(
source=NodeDataLocation(node_name=from_name,
data_key='out',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name=to_name,
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA))
def testWireMultiple(self):
wire_request = WireNodeDataRequest()
wire_request.wirings.append(self.wiring(self.node_1_name, self.node_2_name))
wire_request.wirings.append(self.wiring(self.node_2_name, self.node_3_name))
response = self.manager.wire_data(wire_request)
self.assertTrue(get_success(response), get_error_message(response))
self.assertEqual(len(self.manager.tree_msg.data_wirings), 2)
response = self.manager.unwire_data(wire_request)
self.assertTrue(get_success(response), get_error_message(response))
self.assertEqual(len(self.manager.tree_msg.data_wirings), 0)
def testUndoWiringOnError(self):
wire_request = WireNodeDataRequest()
wire_request.wirings.append(self.wiring(self.node_1_name, self.node_2_name))
wire_request.wirings.append(
NodeDataWiring(
source=NodeDataLocation(node_name=self.node_2_name,
data_key='fake',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name=self.node_3_name,
data_key='invalid',
data_kind=NodeDataLocation.INPUT_DATA)))
response = self.manager.wire_data(wire_request)
self.assertFalse(get_success(response))
# Even though the first wiring was valid, it should be undone if
# another in the same request is invalid
self.assertEqual(len(self.manager.tree_msg.data_wirings), 0)
def testRewireAfterUnwire(self):
wire_request = WireNodeDataRequest()
wire_request.wirings.append(self.wiring(self.node_1_name, self.node_2_name))
response = self.manager.wire_data(wire_request)
self.assertTrue(get_success(response), get_error_message(response))
self.assertEqual(len(self.manager.tree_msg.data_wirings), 1)
response = self.manager.unwire_data(wire_request)
self.assertTrue(get_success(response), get_error_message(response))
self.assertEqual(len(self.manager.tree_msg.data_wirings), 0)
response = self.manager.wire_data(wire_request)
self.assertTrue(get_success(response), get_error_message(response))
self.assertEqual(len(self.manager.tree_msg.data_wirings), 1)
def testRedoWiringOnError(self):
wire_request = WireNodeDataRequest()
wire_request.wirings.append(self.wiring(self.node_1_name, self.node_2_name))
wire_request.wirings.append(self.wiring(self.node_2_name, self.node_3_name))
unwire_request = WireNodeDataRequest()
unwire_request.wirings.append(self.wiring(self.node_1_name, self.node_2_name))
unwire_request.wirings.append(
NodeDataWiring(
source=NodeDataLocation(node_name=self.node_2_name,
data_key='fake',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='missing',
data_key='invalid',
data_kind=NodeDataLocation.INPUT_DATA)))
wire_response = self.manager.wire_data(wire_request)
self.assertTrue(get_success(wire_response), get_error_message(wire_response))
self.assertEqual(len(self.manager.tree_msg.data_wirings), 2)
# Number of wirings should stay the same, since the unwire operation failed
unwire_response = self.manager.unwire_data(unwire_request)
self.assertFalse(get_success(unwire_response))
self.assertEqual(len(self.manager.tree_msg.data_wirings), 2)
unwire_request = WireNodeDataRequest()
unwire_request.wirings.append(self.wiring(self.node_1_name, self.node_2_name))
unwire_request.wirings.append(
NodeDataWiring(
source=NodeDataLocation(node_name='missing',
data_key='fake',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name=self.node_2_name,
data_key='invalid',
data_kind=NodeDataLocation.INPUT_DATA)))
# Number of wirings should be reduced by one since the second unwire request
# was ignore but the first was performed
unwire_response = self.manager.unwire_data(unwire_request)
self.assertTrue(get_success(unwire_response)) # unwire is forgiving with wrong sources
self.assertEqual(len(self.manager.tree_msg.data_wirings), 1)
def testWiringWithoutNodes(self):
manager = TreeManager()
wire_request = WireNodeDataRequest()
wire_response = manager.wire_data(wire_request)
self.assertFalse(get_success(wire_response))
def testWireAfterNodeRemoveAndAdd(self):
manager = TreeManager()
sequence_msg = NodeMsg(
module='ros_bt_py.nodes.sequence',
node_class='Sequence')
constant_msg = NodeMsg(
module='ros_bt_py.nodes.constant',
node_class='Constant',
options=[NodeData(key='constant_type',
serialized_value=json_encode(str)),
NodeData(key='constant_value',
serialized_value=json_encode('hello'))])
log_msg = NodeMsg(
module='ros_bt_py.nodes.log',
node_class='Log',
options=[NodeData(key='logger_level',
serialized_value=json_encode(
LoggerLevel(logger_level='info'))),
NodeData(key='log_type',
serialized_value=json_encode(str))])
add_response = manager.add_node(AddNodeRequest(node=sequence_msg))
self.assertTrue(get_success(add_response))
add_response = manager.add_node(
AddNodeRequest(node=constant_msg, parent_name='Sequence'))
self.assertTrue(get_success(add_response))
add_response = manager.add_node(
AddNodeRequest(node=log_msg, parent_name='Sequence'))
self.assertTrue(get_success(add_response))
# now that the nodes are added, wire constant.constant to log.in
wiring = NodeDataWiring(
source=NodeDataLocation(node_name='Constant',
data_key='constant',
data_kind=NodeDataLocation.OUTPUT_DATA),
target=NodeDataLocation(node_name='Log',
data_key='in',
data_kind=NodeDataLocation.INPUT_DATA))
wire_request = WireNodeDataRequest()
wire_request.wirings.append(wiring)
wire_response = manager.wire_data(wire_request)
self.assertTrue(get_success(wire_response))
remove_response = manager.remove_node(
RemoveNodeRequest(node_name='Constant'))
self.assertTrue(get_success(remove_response))
add_response = manager.add_node_at_index(
AddNodeAtIndexRequest(node=constant_msg, parent_name='Sequence',
new_child_index=0, allow_rename=False))
self.assertTrue(get_success(add_response))
# wiring should work because the old node got deleted
wire_request = WireNodeDataRequest()
wire_request.wirings.append(wiring)
wire_response = manager.wire_data(wire_request)
self.assertTrue(get_success(wire_response))
def get_success(response):
if isinstance(response, dict):
return response['success']
return response.success
def get_error_message(response):
if isinstance(response, dict):
return response['error_message']
return response.error_message
| 124,728 | 35,280 |
from gaffer import Plugin
__all__ = ['DummyPlugin']
from .app import DummyApp
class DummyPlugin(Plugin):
name = "dummy"
version = "1.0"
description = "test"
def app(self, cfg):
return DummyApp()
| 225 | 82 |
import numpy as np
import cv2
from preprocessing.utils import get_original_with_fakes
from tqdm import tqdm
from multiprocessing.pool import Pool
from functools import partial
# from skimage.measure import compare_ssim
from skimage import metrics
import argparse
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
cv2.ocl.setUseOpenCL(False)
cv2.setNumThreads(0)
cache = {}
def save_diffs(pair, root_dir):
ori_id, fake_id = pair
ori_dir = os.path.join(root_dir, "crops", ori_id)
fake_dir = os.path.join(root_dir, "crops", fake_id)
diff_dir = os.path.join(root_dir, "diffs", fake_id)
os.makedirs(diff_dir, exist_ok=True)
for frame in range(320):
if frame % 10 != 0:
continue
for actor in range(2):
image_id = "{}_{}.png".format(frame, actor)
diff_image_id = "{}_{}_diff.png".format(frame, actor)
ori_path = os.path.join(ori_dir, image_id)
fake_path = os.path.join(fake_dir, image_id)
diff_path = os.path.join(diff_dir, diff_image_id)
# some frames didn't exist...
if os.path.exists(ori_path) and os.path.exists(fake_path):
img1 = cv2.imread(ori_path, cv2.IMREAD_COLOR)
img2 = cv2.imread(fake_path, cv2.IMREAD_COLOR)
try:
d, a = metrics.structural_similarity(
img1, img2, multichannel=True, full=True)
a = 1 - a
diff = (a * 255).astype(np.uint8)
diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
cv2.imwrite(diff_path, diff)
except Exception as e:
print(e)
def parse_args():
parser = argparse.ArgumentParser(
description="Extract image diffs")
parser.add_argument("--root-dir", help="root directory",
default="/mnt/sota/datasets/deepfake")
args = parser.parse_args()
return args
def main():
args = parse_args()
pairs = get_original_with_fakes(args.root_dir)
os.makedirs(os.path.join(args.root_dir, "diffs"), exist_ok=True)
with Pool(processes=os.cpu_count() - 2) as p:
with tqdm(total=len(pairs)) as pbar:
func = partial(save_diffs, root_dir=args.root_dir)
for v in p.imap_unordered(func, pairs):
pbar.update()
if __name__ == '__main__':
main()
| 2,485 | 874 |
import ast
import community
import datetime
import lightgbm as lgb
import math
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import pickle
import plotly.express as px
import os
from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score
from tqdm import tqdm
from make_boruta import *
class Zeus:
"""
Class criada para construir modelo
"""
def __init__(self, termo, user, treino_id, test_id):
"""
Metodo construtor, aqui serão armazenadas as informações padrões do modelo:
- User
- run_id da base
"""
self.term = termo
self.data = datetime.date.today()
self.user = str(user).upper()
self.path_user = ''
self.treino_id = treino_id
self.test_id = test_id
self.var_treino = ''
self.var_teste = ''
self.filtro_local = False
self.filtro_data = False
self.local = ''
self.data_start = ''
self.data_end = ''
self.filtro = ''
self.random_state = 101
self.base_sintetica = ''
self.data_active = False
self.data_local = False
self.mm = ''
self.ids = ''
self.train = ''
self.numero_de_amostras_sinteticas_para_criar = ''
self.porcentagem_para_criacao_de_amostras = ''
self.df_cluster = ''
self.clusters = ''
self.var_teste_original = ''
self.pega_variaveis()
self.agregado = ''
self.df_agregado = ''
self.informacoes = ''
self.sentimento = ''
self.data_df = ''
self.load_df = ''
def pega_path_user(self):
"""
Metodo que pega o path de acordo com o usuario que inicializou a class
"""
os.chdir(os.path.dirname(
r'C:\Users\wilgn\Desktop\Faculdade\3° Semestre\Insper Data\Projeto\Git projeto\Data_BCG_News\Model\\'))
path_atual = os.getcwd()
#print(os.listdir())
if self.user == 'WILGNER':
path_aux_funcs = path_atual.replace('Model', r'aux_funcs\\')
else:
path_aux_funcs = path_atual.replace('Model', r'aux_funcs/')
os.chdir(os.path.dirname(path_aux_funcs))
#print(os.listdir())
with open('set_path.py', 'r') as arquivo_path:
ler_arquivo = arquivo_path.read()
dicionario = ast.literal_eval(ler_arquivo)
lista_users = list(dicionario.keys())
if self.user in lista_users:
print('USUARIO VALIDO !')
self.path_user = dicionario[self.user]
else:
raise TypeError(
'O USUARIO SELECIONADO NÃO TEM UM ENDEREÇO VALIDO CADASTRADO')
os.chdir(os.path.dirname(
r'C:\Users\wilgn\Desktop\Faculdade\3° Semestre\Insper Data\Projeto\Git projeto\Data_BCG_News\Model\\'))
#print(os.listdir())
# arquivo_path.close()
def valida_acesso_path_user(self):
self.pega_path_user()
try:
os.path.exists(self.path_user)
print('PATH VALIDO PARA ACESSO')
except:
raise TypeError('IMPOSSIVEL ACESSAR O PATH')
def pega_variaveis(self, teste=False, load_df=False, file_name_load=False):
if teste:
path_name = f'{self.path_user}Variables/{self.term}/{self.test_id}.parquet'
os.chdir(os.path.dirname(path_name))
self.var_teste = pd.read_parquet(os.path.basename(path_name))
elif load_df:
path_name = f'{self.path_user}Model/{file_name_load}'
os.chdir(os.path.dirname(path_name))
self.load_df = pd.read_parquet(os.path.basename(path_name))
else:
self.valida_acesso_path_user()
path_name = f'{self.path_user}Variables/{self.term}/{self.treino_id}.parquet'
os.chdir(os.path.dirname(path_name))
self.var_treino = pd.read_parquet(os.path.basename(path_name))
def seleciona_filtros(self, local=False, data_start=False, data_end=False):
"""
Isinstance verifica se houve uma solicitação de filtro em alguma das variaveis
"""
estado_local = isinstance(local, bool)
estado_data_start = isinstance(data_start, bool)
if not estado_local:
self.local = local
self.filtro_local = True
if not estado_data_start:
self.data_start = data_start
self.data_end = data_end
self.filtro_data = True
def construir_filtro(self, teste=False):
self.filtro = ''
if not teste:
if self.filtro_local and self.filtro_data:
self.filtro = (self.var_treino.sigla == self.local.upper())
self.data_local = True
self.data_active = True
elif self.filtro_data and not self.filtro_local:
self.data_active = True
elif self.filtro_local and not self.filtro_data:
self.filtro = (self.var_treino.sigla == self.local.upper())
else:
if self.filtro_local and self.filtro_data:
self.filtro = (self.var_teste.sigla == self.local.upper())
self.data_local = True
self.data_active = True
elif self.filtro_data and not self.filtro_local:
self.data_active = True
elif self.filtro_local and not self.filtro_data:
self.filtro = (self.var_teste.sigla == self.local.upper())
def filtrar_treino(self, local=False, data_start=False, data_end=False):
self.seleciona_filtros(
local=local, data_start=data_start, data_end=data_end)
self.construir_filtro()
self.var_treino.data = pd.to_datetime(self.var_treino.data)
if self.data_active and self.data_local:
self.var_treino = self.var_treino[self.filtro]
self.var_treino = self.var_treino[(self.var_treino.data > self.data_start) & (
self.var_treino.data < self.data_end)]
elif self.data_active and not self.data_local:
self.var_treino = self.var_treino[
(self.var_treino.data > self.data_start) & (self.var_treino.data < self.data_end)]
else:
self.var_treino = self.var_treino[self.filtro]
def filtrar_teste(self, local=False, data_start=False, data_end=False):
self.seleciona_filtros(
local=local, data_start=data_start, data_end=data_end)
self.construir_filtro(teste=True)
self.var_teste.data = pd.to_datetime(self.var_teste.data)
if self.data_active and self.data_local:
self.var_teste = self.var_teste[self.filtro]
self.var_teste = self.var_teste[
(self.var_teste.data > self.data_start) & (self.var_teste.data < self.data_end)]
elif self.data_active and not self.data_local:
self.var_teste = self.var_teste[
(self.var_teste.data > self.data_start) & (self.var_teste.data < self.data_end)]
else:
self.var_teste = self.var_teste[self.filtro]
def criar_base_sintetica(self, numero_de_amostras=3, porcentagem_para_criacao=.25):
bases_sinteticas = []
self.numero_de_amostras_sinteticas_para_criar = numero_de_amostras
self.porcentagem_para_criacao_de_amostras = porcentagem_para_criacao
colunas_pro_drop = ['unique_identifier', 'sigla', 'data']
for i in range(numero_de_amostras):
unique_identifier = self.var_treino['unique_identifier']
df_com_drop = self.var_treino.drop(columns=colunas_pro_drop)
df_com_colunas_sorteadas = df_com_drop.sample(
frac=porcentagem_para_criacao, replace=True, random_state=self.random_state, axis=1)
amostra = pd.concat(
[unique_identifier, df_com_colunas_sorteadas], axis=1)
amostra_sintetica = pd.DataFrame()
amostra = amostra.loc[:, ~amostra.columns.duplicated()]
for coluna in amostra.columns.tolist():
amostra_sintetica[coluna] = amostra[coluna].sample(frac=1, replace=True,
random_state=self.random_state).tolist()
amostra_sintetica['label'] = 1
amostra['label'] = 0
amostra_concluida = pd.concat([amostra, amostra_sintetica])
amostra_concluida.reset_index(inplace=True, drop=True)
bases_sinteticas.append(amostra_concluida)
self.base_sintetica = bases_sinteticas
def treina_lightGBM(self, boruta_percs=[10], thr_bor_good=.5, thr_bor_ok=.9):
numero_de_amostras = len(self.base_sintetica)
x_list = []
y_list = []
col_lists = []
model_list = []
trained_models = []
dfs = self.base_sintetica
for i in range(numero_de_amostras):
numero_de_colunas = dfs[i].shape[1]
self.Y = dfs[i]['label']
self.X = dfs[i].drop(columns=['unique_identifier', 'label'])
self.take_out_cols_0 = []
self.take_out_cols = []
self.full_cols = self.X.columns.tolist()
self.thr_bor_good = thr_bor_good
self.thr_bor_ok = thr_bor_ok
self.boruta_percs = boruta_percs
self.boruta_res = boruta_select(
X_df=self.X[[
col for col in self.full_cols if col not in self.take_out_cols]],
Y=self.Y, perc_list=self.boruta_percs, allowed_perc_good=self.thr_bor_good,
allowed_perc_med=self.thr_bor_ok)
self.take_out_cols_irrelevant = self.boruta_res[0].loc[~self.boruta_res[0]['use']].index.tolist(
)
self.take_out_cols += self.take_out_cols_irrelevant
self.use_cols = self.X[[col for col in self.X.columns.tolist(
) if col not in self.take_out_cols]].columns.tolist()
y_list += [dfs[i]['label'].values]
x_list += [dfs[i].drop(columns=['unique_identifier', 'label'])]
col_lists += [self.use_cols]
model_list += [{'type': 'LGBM',
'params': {'num_leaves': 25, 'n_estimators': 300, 'boosting_type': 'rf',
'bagging_fraction': .8, 'bagging_freq': 1, 'random_state': self.random_state}}]
# Treinando modelo
for (model, x, y, cols) in zip(model_list, x_list, y_list, col_lists):
X = x[cols]
Y = y
if model['type'] == 'LGBM':
model_to_train = lgb.LGBMClassifier(**model['params'])
trained_models += [model_to_train.fit(X=X.values, y=Y)]
self.models = trained_models
self.rf_models = self.models
self.col_lists = col_lists
def coleta_folhas(self, porcentagem_do_sample=0.1):
self.df_random = self.var_treino.sample(
frac=porcentagem_do_sample, replace=True, random_state=self.random_state, axis=0).copy()
print(self.df_random.shape)
# frame_list = []
model_c = 0
self.mm = set()
self.ids = self.df_random['unique_identifier'].tolist()
print('start with list values')
# Pegando o resultado das folhas do model
for (model, cols) in zip(self.rf_models, self.col_lists):
if cols == 'label':
continue
else:
raw_leafs = model.predict(
self.df_random[cols].values, pred_leaf=True)
# return raw_leafs
if model_c == 0:
full_leafs = raw_leafs
else:
full_leafs = np.concatenate(
(full_leafs, raw_leafs), axis=1)
model_c += 1
self.raw = raw_leafs
def criando_matriz_de_similaridade(self, porcentagem_do_sample=0.1):
self.porcentagem_para_matriz = porcentagem_do_sample
self.coleta_folhas(porcentagem_do_sample=porcentagem_do_sample)
print('CRIANDO EDGES')
edges = []
# Criando matriz de similaridade
for cc1, i in tqdm(enumerate(self.raw), 'FOLHAS:'):
if cc1 % 100 == 0:
print(cc1, datetime.datetime.now())
for cc2_, j in enumerate(self.raw[cc1 + 1:]):
cc2 = cc2_ + cc1 + 1
if (cc1, cc2) not in self.mm and (cc2, cc1) not in self.mm:
leaf_count = sum(i == j)
# TODO: Fix similarity matrix with the square root
edges += [(self.ids[cc1], self.ids[cc2],
math.sqrt(leaf_count / len(self.raw[0])))]
self.mm.add((cc1, cc2))
print('done with list values')
# YOU ARE HERE
G = nx.Graph()
G.add_weighted_edges_from(edges)
self.G = G
def rodando_louvain(self, porcentagem_do_sample):
self.criando_matriz_de_similaridade(
porcentagem_do_sample=porcentagem_do_sample)
self.clusters = (community.best_partition(
self.G, weight='weight', randomize=True))
def desenha_cluster_no_edges(self):
plt.figure(figsize=(12, 8), dpi=150)
plt.title('Louvain Tets', fontsize=20, loc='left', pad=15)
self.pos = nx.spring_layout(self.G)
nx.draw_networkx_nodes(self.G, self.pos, self.clusters.keys(), node_size=150,
node_color=list(self.clusters.values()))
plt.show()
def classifica_agrupamento(self, boruta_percs=[10], thr_bor_good=.5, thr_bor_ok=.9, take_out_cols=False):
self.df_cluster = pd.DataFrame({'Rotulo': self.clusters.keys(),
'Label': self.clusters.values()})
print(f'Tamanho dos dados de cluster {self.df_cluster.shape}')
self.train = self.var_treino[self.var_treino['unique_identifier'].isin(
self.df_cluster.Rotulo.values.tolist())]
self.train['label'] = self.df_cluster.Label.values.tolist()
self.train.reset_index(drop=True)
colunas_pro_drop = ['unique_identifier', 'sigla', 'data', 'artigo_original']
self.var_teste_original = self.var_teste
self.sentimento = self.var_teste_original['sentimento']
self.data_df = self.var_teste_original['data']
self.var_teste = self.var_teste.drop(columns=colunas_pro_drop)
self.train = self.train.drop(columns=['sigla', 'data', 'artigo_original'])
self.var_teste.reset_index(drop=True)
self.train.reset_index(drop=True)
print(f'Tamanho dos dados de treinamento {self.train.shape}')
print(f'Tamanho dos dados de teste {self.var_teste.shape}')
self.x_list = []
self.y_list = []
self.col_lists = []
model_list = []
trained_models = 0
self.Y = self.train['label']
self.X = self.train.drop(columns=['unique_identifier', 'label'])
self.take_out_cols_0 = []
self.take_out_cols = []
self.full_cols = self.X.columns.tolist()
self.thr_bor_good = thr_bor_good
self.thr_bor_ok = thr_bor_ok
self.boruta_percs = boruta_percs
self.boruta_res = boruta_select(X_df=self.X[[col for col in self.full_cols if col not in self.take_out_cols]],
Y=self.Y, perc_list=self.boruta_percs, allowed_perc_good=self.thr_bor_good,
allowed_perc_med=self.thr_bor_ok)
self.take_out_cols_irrelevant = self.boruta_res[0].loc[~self.boruta_res[0]['use']].index.tolist(
)
self.take_out_cols += self.take_out_cols_irrelevant
self.use_cols = self.X[[col for col in self.X.columns.tolist(
) if col not in self.take_out_cols]].columns.tolist()
if len(self.use_cols) < 1:
self.use_cols = self.X.columns.tolist()
self.y_list += [self.train['label'].values]
self.x_list += [self.train.drop(columns=['unique_identifier', 'label'])]
self.col_lists += [self.use_cols]
model_list += [{'type': 'LGBM',
'params': {'num_leaves': 30, 'n_estimators': 500, 'boosting_type': 'rf',
'bagging_fraction': .8, 'bagging_freq': 1, 'random_state': self.random_state}}]
for (model, x, y, cols) in zip(model_list, self.x_list, self.y_list, self.col_lists):
X = x[cols]
print(X.shape)
Y = y
print(Y.shape)
if model['type'] == 'LGBM':
model_to_train = lgb.LGBMClassifier(**model['params'])
trained_models = model_to_train.fit(X=X.values, y=Y)
self.models = trained_models
self.previsão = trained_models.predict(
self.var_teste[self.col_lists[0]])
self.resultado = self.previsão
self.var_teste['label'] = self.resultado
self.faz_agregacao()
print('FREQUENCIA CLUSTER')
print(self.df_cluster.Label.value_counts(sort=False))
print('********************')
print('FREQUENCIA CLASSIFICADO')
print(self.var_teste.label.value_counts(sort=False))
def plota_palavras_maiores(self, numero):
for i in range(len(self.var_teste.label.unique())):
df_data = pd.DataFrame({'word': self.var_teste[self.var_teste.label == i].drop(
columns=['label']).sum(axis=0).nlargest(numero).index.tolist(),
'value': self.var_teste[self.var_teste.label == i].drop(
columns=['label']).sum(axis=0).nlargest(
numero).values.tolist()})
fig = px.bar(df_data, x='word', y='value', color='value', color_continuous_scale='Blues')
fig.show()
def salva_parametros(self):
self.informacoes = {
'user': self.user,
'data': self.data,
'run_id_treino': self.treino_id,
'run_id_teste': self.test_id,
'path_user': self.path_user,
'filtro_nome': self.term,
'filtro_data': self.data,
'filtro_local': self.local,
'numero_de_amostras_bases_sinteticas': self.numero_de_amostras_sinteticas_para_criar,
'porcentagem_para_criacao_de_amostras': self.porcentagem_para_criacao_de_amostras,
'porcentagem_para_matriz': self.porcentagem_para_matriz
}
def faz_agregacao(self):
# Agrega os resultados
self.var_teste_original['label'] = self.var_teste['label']
self.agregado = self.var_teste_original[['unique_identifier', 'sigla', 'data', 'label']]
self.df_agregado = pd.crosstab(self.agregado.sigla, self.agregado.label, normalize='index')
return self.df_agregado
| 18,905 | 6,383 |
from django import forms
from recipe_app.models import Author
from django.contrib.auth.forms import UserCreationForm
# Create two forms: RecipeForm & AuthorForm
"""
Author:
- Name: CharField
- Bio: TextField
Recipe:
- Title: CharField
- Author: ForeignKey
- Description: TextField
- Time Required: CharField (for example, "One hour")
- Instructions: TextField
"""
class AddAuthorForm(UserCreationForm):
name = forms.CharField(max_length=100)
bio = forms.CharField(max_length=250)
username = forms.CharField(max_length=150)
password1 = forms.CharField(widget=forms.PasswordInput)
password2 = None
# class Meta:
# model = Author
# fields = [
# 'name',
# 'bio'
# ]
class AddRecipeForm(forms.Form):
title = forms.CharField(max_length=100)
author = forms.ModelChoiceField(queryset=Author.objects.all())
description = forms.CharField(max_length=500)
time_required = forms.CharField(max_length=50)
instructions = forms.CharField(widget=forms.Textarea)
| 1,074 | 339 |
from typing import *
import collections
import copy
import hashlib
import math
import numpy as np
from pathlib import Path
import random
import re
from tqdm import tqdm
import traceback
import sys
from seutil import LoggingUtils, IOUtils, BashUtils
from seutil.project import Project
from roosterize.data.CoqDocument import CoqDocument
from roosterize.FilesManager import FilesManager
from roosterize.data.Definition import Definition
from roosterize.data.Lemma import Lemma
from roosterize.data.LemmaBackendSexpTransformers import LemmaBackendSexpTransformers
from roosterize.data.LemmaForeendSexpTransformers import LemmaForeendSexpTransformers
from roosterize.Environment import Environment
from roosterize.Macros import Macros
from roosterize.parser.CoqParser import CoqParser
from roosterize.parser.ParserUtils import ParserUtils
from roosterize.parser.SexpAnalyzer import SexpAnalyzer, SexpInfo
from roosterize.sexp import *
from roosterize.Utils import Utils
class DataMiner:
logger = LoggingUtils.get_logger(__name__, LoggingUtils.DEBUG)
from roosterize.Debug import Debug
if Debug.is_debug: logger.setLevel(LoggingUtils.DEBUG)
Project.set_downloads_dir(Macros.downloads_dir)
TASK_COQ_DOCUMENTS = FilesManager.COQ_DOCUMENTS # "coq-documents"
TASK_DATA_INDEXES = FilesManager.DATA_INDEXES # "data-indexes"
TASK_DEFINITIONS = FilesManager.DEFINITIONS # "definitions"
TASK_INSTALL_COQ_PROJECTS = "install-coq-projects"
TASK_LEMMA = FilesManager.LEMMAS # "lemmas"
TASK_LEMMA_BACKEND_SEXP_TRANSFORMATIONS = FilesManager.LEMMAS_BACKEND_SEXP_TRANSFORMATIONS # "lemmas-bsexp-transformations"
TASK_LEMMA_FILTERED = FilesManager.LEMMAS_FILTERED # "lemmas-filtered"
TASK_LEMMA_FOREEND_SEXP_TRANSFORMATIONS = FilesManager.LEMMAS_FOREEND_SEXP_TRANSFORMATIONS # "lemmas-fsexp-transformations"
dataset_dir = Macros.project_dir.parent / "math-comp-corpus"
@classmethod
def collect_data(cls, **options) -> NoReturn:
data_mgr = FilesManager(cls.dataset_dir)
task = options["task"]
projects_path = Path(options.get("corpus", cls.dataset_dir / "projects-standalone-8.10.yml"))
projects: List[Project] = IOUtils.dejsonfy(IOUtils.load(projects_path, "json"), Project)
if task == cls.TASK_COQ_DOCUMENTS:
files = Utils.get_option_as_list(options, "files", None)
is_verifying_tokenizer = Utils.get_option_as_boolean(options, "verify-tokenizer")
cls.collect_coq_documents_projects(data_mgr, projects, files, is_verifying_tokenizer)
elif task == cls.TASK_DATA_INDEXES:
cls.collect_data_indexes(data_mgr, projects)
elif task == cls.TASK_DEFINITIONS:
cls.collect_definitions(data_mgr)
elif task == cls.TASK_INSTALL_COQ_PROJECTS:
cls.install_coq_projects(projects)
elif task == cls.TASK_LEMMA:
files = Utils.get_option_as_list(options, "files", None)
cls.collect_lemmas(data_mgr, projects, files)
elif task == cls.TASK_LEMMA_BACKEND_SEXP_TRANSFORMATIONS:
cls.collect_lemmas_backend_sexp_transformations(data_mgr)
elif task == cls.TASK_LEMMA_FILTERED:
cls.filter_lemmas(data_mgr)
elif task == cls.TASK_LEMMA_FOREEND_SEXP_TRANSFORMATIONS:
cls.collect_lemmas_foreend_sexp_transformations(data_mgr)
else:
LoggingUtils.log_and_raise(cls.logger, f"Unknown task {task}", ValueError)
# end if
return
@classmethod
def collect_coq_documents_projects(cls,
data_mgr: FilesManager,
projects: List[Project],
files: List[str] = None,
is_verifying_tokenizer: bool = False,
) -> NoReturn:
# Prepare the used directories (coq-documents, raw-files, original-files)
for rel_path in [
[FilesManager.COQ_DOCUMENTS],
[FilesManager.RAW_FILES],
[FilesManager.ORIGINAL_FILES],
]:
data_mgr.clean_path(rel_path)
data_mgr.resolve(rel_path).mkdir(parents=True)
# end for
coq_documents: List[CoqDocument] = list()
names_projects = {p.full_name: p for p in projects}
for i, project in enumerate(projects):
try:
cls.logger.info(f"Project {i + 1}/{len(projects)}: {project.full_name}")
coq_documents_project = cls.collect_coq_documents_project(data_mgr, project, names_projects=names_projects, files=files, is_verifying_tokenizer=is_verifying_tokenizer)
except KeyboardInterrupt:
raise
except:
cls.logger.warning(f"Error while processing project {project.full_name}: {traceback.format_exc()}")
continue
else:
coq_documents.extend(coq_documents_project)
# end try
# end for
# Save datasets
data_mgr.dump_data([FilesManager.COQ_DOCUMENTS, FilesManager.COQ_DOCUMENTS], coq_documents, IOUtils.Format.json, is_batched=True)
return
@classmethod
def load_coq_documents(cls, data_mgr: FilesManager) -> List[CoqDocument]:
return data_mgr.load_data([FilesManager.COQ_DOCUMENTS, FilesManager.COQ_DOCUMENTS], IOUtils.Format.json, is_batched=True, clz=CoqDocument)
@classmethod
def collect_coq_documents_project(cls,
data_mgr: FilesManager,
project: Project,
names_projects: Dict[str, Project],
files: List[str] = None,
is_verifying_tokenizer: bool = False,
) -> List[CoqDocument]:
coq_documents: List[CoqDocument] = list()
# Clone and checkout repo
project.clone()
project.checkout(project.data["sha"], is_forced=True)
# Build the project
cls.install_coq_project(project, names_projects)
# For each file, parse code to tokens
with IOUtils.cd(project.checkout_dir):
coq_files: List[str] = BashUtils.run(f"find -name '*.v' -type f").stdout.split("\n")[:-1]
if files is not None:
coq_files = [f for f in coq_files if f[2:] in files] # [2:] is to remove the ./
# end if
re_ignore_path = re.compile(project.data["ignore_path_regex"]) if "ignore_path_regex" in project.data else None
for i, coq_file in enumerate(coq_files):
try:
coq_file = coq_file[2:]
cls.logger.debug(f"File {i + 1}/{len(coq_files)}: {coq_file}")
# Check if file is ignored
if re_ignore_path is not None and re_ignore_path.fullmatch(coq_file):
cls.logger.info(f"Ignoring file {coq_file}")
continue
# end if
# Read file
with open(coq_file, "r", newline="") as f:
source_code = f.read()
# end with
# Get unicode offsets
unicode_offsets = ParserUtils.get_unicode_offsets(source_code)
# Save original file to original_files
data_mgr.dump_data([FilesManager.ORIGINAL_FILES,project.full_name, coq_file], source_code, IOUtils.Format.txt)
# Call SerAPI
serapi_options = project.data.get("serapi_options", "")
ast_sexp_str: str = BashUtils.run(f"sercomp {serapi_options} --mode=sexp -- {coq_file}", expected_return_code=0).stdout
tok_sexp_str: str = BashUtils.run(f"sertok {serapi_options} -- {coq_file}", expected_return_code=0).stdout
# Save ast sexp to dataset (.ast.sexp)
data_mgr.dump_data([FilesManager.RAW_FILES,project.full_name, coq_file[:-2] + ".ast.sexp"], ast_sexp_str, IOUtils.Format.txt)
# Save tok sexp to dataset (.tok.sexp)
data_mgr.dump_data([FilesManager.RAW_FILES, project.full_name, coq_file[:-2] + ".tok.sexp"], tok_sexp_str, IOUtils.Format.txt)
# Parse ast sexp
ast_sexp_list: List[SexpNode] = SexpParser.parse_list(ast_sexp_str)
tok_sexp_list: List[SexpNode] = SexpParser.parse_list(tok_sexp_str)
# Verify the tokenizer if requested
if is_verifying_tokenizer:
if not cls.verify_tokenizer(tok_sexp_list, source_code, unicode_offsets):
LoggingUtils.log_and_raise(cls.logger, "Tokenized content doesn't match original file!", Exception)
# end if
# end if
# Parse the document
coq_document = CoqParser.parse_document(source_code, ast_sexp_list, tok_sexp_list, unicode_offsets=unicode_offsets)
# Save the parsed document (printed format) to raw_files
data_mgr.dump_data([FilesManager.RAW_FILES, project.full_name, coq_file], coq_document.str_with_space(), IOUtils.Format.txt)
# Set meta data
coq_document.file_name = coq_file
coq_document.project_name = project.full_name
coq_document.revision = project.revision
coq_documents.append(coq_document)
except KeyboardInterrupt:
cls.logger.warning("Keyboard interrupt!")
raise
except:
cls.logger.warning(f"File {coq_file} failed! Exception was: {traceback.format_exc()}")
continue
# end try
# end for
# end with
return coq_documents
@classmethod
def verify_tokenizer(cls, tok_sexp_list: List[SexpNode], source_code: str, unicode_offsets: List[int]) -> bool:
sertok_sentences = SexpAnalyzer.analyze_sertok_sentences(tok_sexp_list, unicode_offsets)
vernac_sentences = CoqParser.parse_sertok_sentences(sertok_sentences, source_code)
code_i = 0
has_error: bool = False
for sent_i, sentence in enumerate(vernac_sentences):
for token_i, token in enumerate(sentence.tokens):
# Check space/comment
if token.beg_charno != code_i:
if not ParserUtils.is_ws_or_comment(source_code[code_i:token.beg_charno]):
cls.logger.error(f"Unresolved characters at charno {code_i} to {token.beg_charno}; next expect token {token.content} beginning at charno {token.beg_charno} (lineno {token.lineno}); file content {source_code[code_i:token.beg_charno]};")
cls.logger.error(f"assotiated sexp: \n{tok_sexp_list[sent_i][1][token_i].pretty_format()}")
has_error = True
# end if
# end if
# Check token
code_i = token.beg_charno
if token.content != source_code[code_i:token.end_charno]:
cls.logger.error(f"Mismatch token at charno {code_i} to {token.end_charno}; expect token {token.content} beginning at charno {token.beg_charno} (lineno {token.lineno}); file content {source_code[code_i:token.end_charno]};")
cls.logger.error(f"assotiated sexp: \n{tok_sexp_list[sent_i][1][token_i].pretty_format()}")
has_error = True
# end if
code_i = token.end_charno
# end for, for
# Check space/comment at end of file
if code_i != len(source_code):
if not ParserUtils.is_ws_or_comment(source_code[code_i:len(source_code)]):
cls.logger.error(f"Unresolved characters at charno {code_i} to {len(source_code)} (end of file); file content {source_code[code_i:len(source_code)]}")
has_error = True
# end if
# end if
return not has_error
@classmethod
def install_coq_projects(cls, projects: List[Project]) -> None:
names_projects = {p.full_name: p for p in projects}
for i, p in enumerate(projects):
cls.logger.info(f"Installing {p.full_name} ({i}/{len(projects)})")
cls.install_coq_project(p, names_projects)
# end for
return
@classmethod
def install_coq_project(cls, project: Project, names_projects: Dict[str, Project]) -> None:
"""
:requires: the project is cloned and checked-out to the desired version.
"""
if not project.is_cloned:
project.clone()
project.checkout(project.data["sha"], is_forced=True)
# end if
# Check if the project is already compiled
confirmation_file = "lpc-installed.txt"
confirmation_content = project.revision + " " + BashUtils.run("opam list coq -s", expected_return_code=0).stdout.strip()
if (project.checkout_dir/confirmation_file).is_file() and IOUtils.load(project.checkout_dir/confirmation_file, "txt") == confirmation_content:
cls.logger.debug(f"Project {project.full_name} already installed")
return
# end if
project.clean()
# Install dependencies
for dependency in project.data.get("dependencies", []):
dependency_project = names_projects.get(dependency)
if dependency_project is None: raise Exception(f"Cannot find dependency {dependency}")
cls.logger.info(f"For Project {project.full_name}, installing dependency {dependency}")
cls.install_coq_project(dependency_project, names_projects)
# end for
if "build_cmd" not in project.data: raise Exception(f"Project {project.full_name} does not have build_cmd")
if "install_cmd" not in project.data: raise Exception(f"Project {project.full_name} does not have install_cmd")
with IOUtils.cd(project.checkout_dir):
# Build
cls.logger.info(f"Project {project.full_name}: Building with {project.data['build_cmd']}")
r = BashUtils.run(project.data["build_cmd"])
if r.return_code != 0:
raise Exception(f"Compilation failed! Return code is {r.return_code}! stdout:\n{r.stdout}\n; stderr:\n{r.stderr}")
else:
cls.logger.debug(f"Compilation finished. Return code is {r.return_code}. stdout:\n{r.stdout}\n; stderr:\n{r.stderr}")
# end if
# Install
cls.logger.info(f"Project {project.full_name}: Installing with {project.data['install_cmd']}")
r = BashUtils.run(project.data["install_cmd"])
if r.return_code != 0:
raise Exception(f"Installation failed! Return code is {r.return_code}! stdout:\n{r.stdout}\n; stderr:\n{r.stderr}")
else:
cls.logger.debug(f"Installation finished. Return code is {r.return_code}. stdout:\n{r.stdout}\n; stderr:\n{r.stderr}")
# end if
IOUtils.dump(project.checkout_dir / confirmation_file, confirmation_content, "txt")
# end with
return
@classmethod
def collect_data_indexes(cls, data_mgr: FilesManager, projects: List[Project]) -> NoReturn:
"""
Split the dataset and record the data indexes for {t1, t2, t3, lo, ta, allgroup} * {train, val, test, all} dataset parts.
"""
data_mgr.clean_path([FilesManager.DATA_INDEXES])
data_mgr.resolve([FilesManager.DATA_INDEXES]).mkdir(parents=True)
# (Random) Split by train/val/test
cls.logger.info(f"Splitting regular dataset info train/val/test sets with ratio of {Macros.DS_TRAIN_RATIO}/{Macros.DS_VAL_RATIO}/{Macros.DS_TEST_RATIO}")
cls.logger.info(f"Splitting leave-out dataset info train/val/test sets with ratio of {Macros.DS_LO_TRAIN_RATIO}/{Macros.DS_LO_VAL_RATIO}/{Macros.DS_LO_TEST_RATIO}")
# Load and sort coq-documents data
coq_documents: List[CoqDocument] = cls.load_coq_documents(data_mgr)
coq_documents.sort(key=lambda d: d.get_data_index())
cls.logger.info(f"Total dataset #doc = {len(coq_documents)}")
if len(coq_documents) < 10:
cls.logger.warning(f"Dataset is probably too small: {len(coq_documents)}")
# end if
trainevals_data_indexes: Dict[str, Set[str]] = collections.defaultdict(set)
# Split data for each project, using the same random seed salted with the project name
for project in projects:
documents_this_project: List[CoqDocument] = sorted([d for d in coq_documents if d.project_name == project.full_name])
hasher = hashlib.sha256()
hasher.update(str.encode(project.full_name))
hasher.update(str.encode(str(Environment.random_seed)))
salted_seed = int.from_bytes(hasher.digest(), "big")
random.seed(salted_seed)
random.shuffle(documents_this_project)
if project.data["group"] in [Macros.DS_GROUP_T1, Macros.DS_GROUP_T2, Macros.DS_GROUP_T3]:
train_ratio, val_ratio, test_ratio = Macros.DS_TRAIN_RATIO, Macros.DS_VAL_RATIO, Macros.DS_TEST_RATIO
elif project.data["group"] in [Macros.DS_GROUP_LO]:
train_ratio, val_ratio, test_ratio = Macros.DS_LO_TRAIN_RATIO, Macros.DS_LO_VAL_RATIO, Macros.DS_LO_TEST_RATIO
else:
LoggingUtils.log_and_raise(cls.logger, f"Invalid group name {project.data['group']} for {project.full_name}", Exception)
# end if
train_val_split_point = int(math.ceil(train_ratio * len(documents_this_project)))
val_test_split_point = int(math.ceil((train_ratio + val_ratio) * len(documents_this_project)))
trainevals_data_indexes[Macros.DS_TRAIN].update(set([d.get_data_index() for d in documents_this_project[:train_val_split_point]]))
trainevals_data_indexes[Macros.DS_VAL].update(set([d.get_data_index() for d in documents_this_project[train_val_split_point:val_test_split_point]]))
trainevals_data_indexes[Macros.DS_TEST].update(set([d.get_data_index() for d in documents_this_project[val_test_split_point:]]))
# end for
trainevals_data_indexes[Macros.DS_TRAINEVAL_ALL] = set.union(*trainevals_data_indexes.values())
cls.logger.info(f"Train/eval split #doc:\n" + ";\n".join([
f"{traineval}: {len(data_indexes)}"
for traineval, data_indexes in trainevals_data_indexes.items()
]))
# Split by groups
groups_project_names: Dict[str, List[str]] = {group: [p.full_name for p in projects if p.data["group"] == group] for group in Macros.DS_GROUPS}
groups_data_indexes: Dict[str, Set[str]] = dict()
for group, project_names in groups_project_names.items():
documents_this_group: List[CoqDocument] = [d for d in coq_documents if d.project_name in project_names]
groups_data_indexes[group] = set([d.get_data_index() for d in documents_this_group])
# end for
groups_data_indexes[Macros.DS_GROUP_TA] = set.union(groups_data_indexes[Macros.DS_GROUP_T1], groups_data_indexes[Macros.DS_GROUP_T2], groups_data_indexes[Macros.DS_GROUP_T3])
groups_data_indexes[Macros.DS_GROUP_ALL] = set.union(groups_data_indexes[Macros.DS_GROUP_T1], groups_data_indexes[Macros.DS_GROUP_T2], groups_data_indexes[Macros.DS_GROUP_T3], groups_project_names[Macros.DS_GROUP_LO])
cls.logger.info(f"Groups split #doc:\n" + ";\n".join([
f"{group}: {len(data_indexes)}"
for group, data_indexes in groups_data_indexes.items()
]))
# The final data indexes is cross product of the two splits
for traineval in Macros.DS_TRAINEVALS + [Macros.DS_TRAINEVAL_ALL]:
for group in Macros.DS_GROUPS + [Macros.DS_GROUP_TA, Macros.DS_GROUP_ALL]:
data_indexes = list(set.intersection(groups_data_indexes[group], trainevals_data_indexes[traineval]))
cls.logger.info(f"{group}-{traineval} #doc = {len(data_indexes)}")
data_mgr.dump_data([FilesManager.DATA_INDEXES, f"{group}-{traineval}.json"], data_indexes, IOUtils.Format.jsonPretty)
# end for
# end for
return
RE_PATH_TO_QUALIFIED_PREFIX = re.compile(r"-[QR] (?P<path>[^,]+),(?P<qprefix>\S+)")
@classmethod
def collect_lemmas(cls, data_mgr: FilesManager, projects: List[Project], files: List[str] = None):
data_mgr.clean_path([FilesManager.LEMMAS])
data_mgr.resolve([FilesManager.LEMMAS]).mkdir(parents=True)
# Increase recursion limit because the backend sexps are CRAZZZZY deep
sys.setrecursionlimit(10000)
# Load coq-documents
coq_documents: List[CoqDocument] = cls.load_coq_documents(data_mgr)
if files is not None: coq_documents = [d for d in coq_documents if d.file_name in files]
lemmas: List[Lemma] = list()
# Prepare serapi_options
project_2_serapi_options: Dict[str, str] = {p.full_name: p.data["serapi_options"] for p in projects}
errors: List[Tuple[str, str]] = list()
for doc_i, doc in enumerate(tqdm(coq_documents)):
try:
cls.logger.info(f"Collecting from file {doc.get_data_index()} ({doc_i}/{len(coq_documents)}). Collected: {len(lemmas)}")
# Load AST sexp
ast_sexp_list: List[SexpNode] = SexpParser.parse_list(data_mgr.load_data([FilesManager.RAW_FILES, doc.get_data_index()[:-2] + ".ast.sexp"], IOUtils.Format.txt))
# Collect lemmas from this doc
lemmas_doc: List[Lemma] = cls.collect_lemmas_doc(doc, ast_sexp_list, project_2_serapi_options[doc.project_name])
lemmas.extend(lemmas_doc)
except KeyboardInterrupt:
cls.logger.warning(f"Keyboard Interrupt!")
raise
except:
cls.logger.warning(f"Error while parsing {doc.get_data_index()}: {traceback.format_exc()}")
cls.logger.warning(f"The script will continue on other files before it returns with failure. Use Ctrl+C to cut it early.")
errors.append((doc.get_data_index(), traceback.format_exc()))
continue
# end try
# end for
if len(errors) > 0:
LoggingUtils.log_and_raise(cls.logger, f"There were {len(errors)} errors during collection.", Exception)
data_mgr.dump_data([FilesManager.LEMMAS, "errors.txt"], errors, IOUtils.Format.jsonPretty)
# end if
# Assign uids
for lemma_i, lemma in enumerate(lemmas): lemma.uid = lemma_i
data_mgr.dump_data([FilesManager.LEMMAS], lemmas, IOUtils.Format.json, is_batched=True, per_batch=5000)
return
@classmethod
def filter_lemmas(cls, data_mgr: FilesManager):
# Increase recursion limit because the backend sexps are CRAZZZZY deep
sys.setrecursionlimit(10000)
data_mgr.clean_path([FilesManager.LEMMAS_FILTERED])
data_mgr.resolve([FilesManager.LEMMAS_FILTERED]).mkdir(parents=True)
# Load lemmas
lemmas: List[Lemma] = data_mgr.load_data([FilesManager.LEMMAS], IOUtils.Format.json, is_batched=True, clz=Lemma)
heights: List[int] = [l.backend_sexp.height() for l in lemmas]
depth_cutoff_point = sorted(heights)[int(np.ceil(Macros.LEMMAS_DEPTH_CUTOFF * len(lemmas)))]
data_indexes_names: List[Tuple[str, str]] = [(l.data_index, l.name) for l in lemmas if l.backend_sexp.height() <= depth_cutoff_point]
cls.logger.info(f"Cutoff depth is {depth_cutoff_point}, and {len(data_indexes_names)} data are included")
lemmas_filtered: List[Lemma] = [l for l in lemmas if (l.data_index, l.name) in data_indexes_names]
# Assign uids
for lemma_i, lemma in enumerate(lemmas_filtered): lemma.uid = lemma_i
data_mgr.dump_data([FilesManager.LEMMAS_FILTERED], lemmas_filtered, IOUtils.Format.json, is_batched=True, per_batch=5000)
return
@classmethod
def collect_definitions(cls, data_mgr: FilesManager):
data_mgr.clean_path([FilesManager.DEFINITIONS])
data_mgr.resolve([FilesManager.DEFINITIONS]).mkdir(parents=True)
# Load coq-documents
coq_documents: List[CoqDocument] = cls.load_coq_documents(data_mgr)
definitions: List[Definition] = list()
errors: List[Tuple[str, str]] = list()
for doc_i, doc in enumerate(tqdm(coq_documents)):
try:
# Load AST sexp
ast_sexp_list: List[SexpNode] = SexpParser.parse_list(data_mgr.load_data([FilesManager.RAW_FILES, doc.get_data_index()[:-2] + ".ast.sexp"], IOUtils.Format.txt))
definitions_doc: List[Definition] = cls.collect_definitions_doc(doc, ast_sexp_list)
definitions.extend(definitions_doc)
except KeyboardInterrupt:
cls.logger.warning(f"Keyboard Interrupt!")
raise
except:
cls.logger.warning(f"Error while parsing {doc.get_data_index()}: {traceback.format_exc()}")
cls.logger.warning(f"The script will continue on other files before it returns with failure. Use Ctrl+C to cut it early.")
errors.append((doc.get_data_index(), traceback.format_exc()))
continue
# end try
# end for
if len(errors) > 0:
LoggingUtils.log_and_raise(cls.logger, f"There were {len(errors)} errors during collection.", Exception)
data_mgr.dump_data([FilesManager.DEFINITIONS, "errors.txt"], errors, IOUtils.Format.jsonPretty)
# end if
data_mgr.dump_data([FilesManager.DEFINITIONS, "definitions.json"], definitions, IOUtils.Format.json)
return
@classmethod
def collect_lemmas_backend_sexp_transformations(cls, data_mgr: FilesManager):
data_mgr.clean_path([cls.TASK_LEMMA_BACKEND_SEXP_TRANSFORMATIONS])
data_mgr.resolve([cls.TASK_LEMMA_BACKEND_SEXP_TRANSFORMATIONS]).mkdir(parents=True)
# Increase recursion limit because the backend sexps are CRAZZZZY deep
sys.setrecursionlimit(10000)
lemmas_filtered: List[Lemma] = data_mgr.load_data([FilesManager.LEMMAS_FILTERED], IOUtils.Format.json, is_batched=True, clz=Lemma)
# Main stream transformations, applied one after another
levels_lemmas_bsexp_transformed: Dict[str, List[SexpNode]] = dict()
last_level: Optional[str] = None # None means original
for level in LemmaBackendSexpTransformers.LEVELS:
cls.logger.info(f"Doing {last_level if last_level is not None else 'orig'} -> {level} transformation")
levels_lemmas_bsexp_transformed[level] = list()
for lemma_i, lemma in enumerate(tqdm(lemmas_filtered)):
orig_sexp = lemma.backend_sexp if last_level is None else levels_lemmas_bsexp_transformed[last_level][lemma_i]
bsexp_transformed = LemmaBackendSexpTransformers.transform(level, copy.deepcopy(orig_sexp))
levels_lemmas_bsexp_transformed[level].append(bsexp_transformed)
# end for
last_level = level
data_mgr.dump_data([cls.TASK_LEMMA_BACKEND_SEXP_TRANSFORMATIONS, level, "transformed"], levels_lemmas_bsexp_transformed[level], IOUtils.Format.json, is_batched=True, per_batch=5000)
# end for
# Other special transformation, directly applied on original trees
for tr_name in LemmaBackendSexpTransformers.SPECIALS:
cls.logger.info(f"Doing orig -> {tr_name} transformation")
bsexp_transformed_list = list()
for lemma_i, lemma in enumerate(tqdm(lemmas_filtered)):
orig_sexp = lemma.backend_sexp
bsexp_transformed = LemmaBackendSexpTransformers.transform(tr_name, copy.deepcopy(orig_sexp))
bsexp_transformed_list.append(bsexp_transformed)
# end for
data_mgr.dump_data([cls.TASK_LEMMA_BACKEND_SEXP_TRANSFORMATIONS, tr_name, "transformed"], bsexp_transformed_list, IOUtils.Format.json, is_batched=True, per_batch=5000)
# end for
return
@classmethod
def collect_lemmas_foreend_sexp_transformations(cls, data_mgr: FilesManager):
data_mgr.clean_path([cls.TASK_LEMMA_FOREEND_SEXP_TRANSFORMATIONS])
data_mgr.resolve([cls.TASK_LEMMA_FOREEND_SEXP_TRANSFORMATIONS]).mkdir(parents=True)
# Increase recursion limit because the backend sexps are CRAZZZZY deep
sys.setrecursionlimit(10000)
lemmas_filtered: List[Lemma] = data_mgr.load_data([FilesManager.LEMMAS_FILTERED], IOUtils.Format.json, is_batched=True, clz=Lemma)
# Main stream transformations, applied one after another
levels_lemmas_fsexp_transformed: Dict[str, List[SexpNode]] = dict()
last_level: Optional[str] = None # None means original
for level in LemmaForeendSexpTransformers.LEVELS:
cls.logger.info(f"Doing {last_level if last_level is not None else 'orig'} -> {level} transformation")
levels_lemmas_fsexp_transformed[level] = list()
for lemma_i, lemma in enumerate(tqdm(lemmas_filtered)):
orig_sexp = lemma.ast_sexp if last_level is None else levels_lemmas_fsexp_transformed[last_level][lemma_i]
fsexp_transformed = LemmaForeendSexpTransformers.transform(level, copy.deepcopy(orig_sexp))
levels_lemmas_fsexp_transformed[level].append(fsexp_transformed)
# end for
last_level = level
data_mgr.dump_data([cls.TASK_LEMMA_FOREEND_SEXP_TRANSFORMATIONS, level, "transformed"], levels_lemmas_fsexp_transformed[level], IOUtils.Format.json, is_batched=True, per_batch=5000)
# end for
# Other special transformation, directly applied on level 0 trees
for tr_name in LemmaForeendSexpTransformers.SPECIALS:
cls.logger.info(f"Doing {LemmaForeendSexpTransformers.LEVEL_0} -> {tr_name} transformation")
fsexp_transformed_list = list()
for lemma_i, lemma in enumerate(tqdm(lemmas_filtered)):
orig_sexp = levels_lemmas_fsexp_transformed[LemmaForeendSexpTransformers.LEVEL_0][lemma_i]
fsexp_transformed = LemmaForeendSexpTransformers.transform(tr_name, copy.deepcopy(orig_sexp))
fsexp_transformed_list.append(fsexp_transformed)
# end for
data_mgr.dump_data([cls.TASK_LEMMA_FOREEND_SEXP_TRANSFORMATIONS, tr_name, "transformed"], fsexp_transformed_list, IOUtils.Format.json, is_batched=True, per_batch=5000)
# end for
return
VTYPES_LEMMA = [SexpInfo.VernacConsts.type_start_theorem_proof]
VTYPES_MODULE_BEG = [SexpInfo.VernacConsts.type_define_module]
VTYPES_MODULE_END = [SexpInfo.VernacConsts.type_end_segment]
VTYPES_DEFINITIONS = [SexpInfo.VernacConsts.type_definition]
@classmethod
def collect_lemmas_doc(
cls,
doc: CoqDocument,
ast_sexp_list: List[SexpNode],
serapi_options: str,
) -> List[Lemma]:
lemmas_doc: List[Lemma] = list()
data_index = doc.get_data_index()
# Maintain a stack of module
modules: List[str] = list()
# Prepare qualified name prefix
qprefix_this_doc = "./" + doc.file_name[:-2] # Remove .v
for m in cls.RE_PATH_TO_QUALIFIED_PREFIX.finditer(serapi_options):
path = m.group("path")
if path != ".": path = "./" + path
qprefix = m.group("qprefix")
if qprefix_this_doc.startswith(path):
qprefix_this_doc = qprefix + qprefix_this_doc[len(path):]
break
# end if
# end for
if qprefix_this_doc.startswith("./"): qprefix_this_doc = qprefix_this_doc[len("./"):]
qprefix_this_doc = qprefix_this_doc.replace("/", ".")
for sent_i, sent in enumerate(doc.sentences):
ast_sexp = ast_sexp_list[sent_i]
vernac = SexpAnalyzer.analyze_vernac(ast_sexp)
if vernac.vernac_type in cls.VTYPES_MODULE_BEG:
# (VernacExpr()(VernacDefineModule() ( ( v ( Id <module name>)) ...
# 0 1 2 20 21 22 220 2201 22011
module_name = vernac.vernac_sexp[2][2][0][1][1].content_no_quote
modules.append(module_name)
elif vernac.vernac_type in cls.VTYPES_MODULE_END:
# (VernacExpr()(VernacEndSegment ( ( v ( Id <module name>)) ...
# 0 1 2 20 21 210 2101 21011
try:
module_name = vernac.vernac_sexp[2][1][0][1][1].content_no_quote
except:
print(vernac.vernac_sexp.pretty_format())
raise
# end try
if len(modules) > 0 and module_name == modules[-1]: modules.pop() # EndModule and EndSection share the same vernac type
elif vernac.vernac_type in cls.VTYPES_LEMMA:
# (VernacExpr()(VernacStartTheoremProof Lemma ( ( ( ( ( v ( Id <lemma name>))
# 0 1 2 20 21 22 2200000 2200001 22000011
lemma = Lemma()
lemma.data_index = data_index
lemma.name = vernac.vernac_sexp[2][2][0][0][0][0][1][1].content_no_quote
lemma.qname = qprefix_this_doc + "." + ".".join(modules + [lemma.name])
# Find lemma content, after the first token matching the lemma name
tok_i = 0
for tok in sent.tokens:
if tok.content == lemma.name: break
tok_i += 1
# end for
if tok_i == len(sent.tokens): LoggingUtils.log_and_raise(cls.logger, f"Lemma name {lemma.name} didn't appear in the source code {sent.str_with_space()}", Exception)
lemma.vernac_command = sent.tokens[:tok_i]
lemma.statement = sent.tokens[tok_i + 1:]
lemma.ast_sexp = vernac.vernac_sexp
lemmas_doc.append(lemma)
# end if
# end for
# Use sername to get the backend representations
lemma_qnames: str = "".join([l.qname + "\n" for l in lemmas_doc])
lemma_qnames_file = BashUtils.get_temp_file()
IOUtils.dump(lemma_qnames_file, lemma_qnames, IOUtils.Format.txt)
lemma_qnames_backend_sexps_str: str = BashUtils.run(f"sername {serapi_options} --require-lib={qprefix_this_doc} {lemma_qnames_file}", expected_return_code=0).stdout
IOUtils.rm(lemma_qnames_file)
for qname_backend_sexp_str in lemma_qnames_backend_sexps_str.splitlines():
qname, backend_sexp_str = qname_backend_sexp_str.split(":", 1)
backend_sexp = SexpParser.parse(backend_sexp_str)
for lemma in lemmas_doc:
if lemma.qname == qname:
lemma.backend_sexp = backend_sexp
break
# end if
# end for
# end for
lemmas_doc = [l for l in lemmas_doc if l.backend_sexp is not None]
return lemmas_doc
@classmethod
def collect_definitions_doc(cls,
doc: CoqDocument,
ast_sexp_list: List[SexpNode],
) -> List[Definition]:
definitions_doc: List[Definition] = list()
data_index = doc.get_data_index()
for sent_i, sent in enumerate(doc.sentences):
ast_sexp = ast_sexp_list[sent_i]
vernac = SexpAnalyzer.analyze_vernac(ast_sexp)
if vernac.vernac_type in cls.VTYPES_DEFINITIONS:
# (VernacExpr()( VernacDefinition ( NoDischarge Definition) ( ( ( v ( Name ( Id codom ))) ...
# 0 1 2 20 21 210 211 22 220 2200 22000 22001 220010 220011 2200110 2200111
try:
if vernac.vernac_sexp[2][1][0].content == "NoDischarge" and vernac.vernac_sexp[2][1][1].content == "Definition":
definition = Definition()
definition.data_index = data_index
definition.name = vernac.vernac_sexp[2][2][0][0][1][1][1].content_no_quote
definitions_doc.append(definition)
# end if
except IllegalSexpOperationException:
continue
# end try
# end if
# end for
return definitions_doc
@classmethod
def extract_data_project(cls,
project_path: Path,
files: Optional[List[str]],
exclude_files: Optional[List[str]],
exclude_pattern: Optional[str],
serapi_options: str,
output_path: Path,
):
# 1. Prepare output path
if output_path.is_dir():
cls.logger.warning(f"{output_path} already exists, will overwrite the files.")
elif output_path.is_file():
LoggingUtils.log_and_raise(cls.logger, f"{output_path} already exists as a file. Aborting.", Exception)
else:
IOUtils.mk_dir(output_path)
# end if
# 2. Extract documents, tok.sexp and ast.sexp
coq_documents: Dict[str, CoqDocument] = collections.OrderedDict()
ast_sexp_lists: Dict[str, List[SexpNode]] = dict()
tok_sexp_lists: Dict[str, List[SexpNode]] = dict()
with IOUtils.cd(project_path):
coq_files: List[str] = BashUtils.run(f"find -name '*.v' -type f").stdout.split("\n")[:-1]
coq_files = [coq_file[2:] for coq_file in coq_files]
if files is not None:
coq_files = [f for f in coq_files if f in files]
# end if
if exclude_files is not None:
coq_files = [f for f in coq_files if f not in exclude_files]
# end if
if exclude_pattern is not None:
re_exclude_pattern = re.compile(exclude_pattern)
coq_files = [f for f in coq_files if not re_exclude_pattern.fullmatch(f)]
# end if
for i, coq_file in enumerate(tqdm(coq_files)):
try:
# Read file
with open(coq_file, "r", newline="") as f:
source_code = f.read()
# end with
# Get unicode offsets
unicode_offsets = ParserUtils.get_unicode_offsets(source_code)
# Call SerAPI
ast_sexp_str: str = BashUtils.run(f"sercomp {serapi_options} --mode=sexp -- {coq_file}", expected_return_code=0).stdout
tok_sexp_str: str = BashUtils.run(f"sertok {serapi_options} -- {coq_file}", expected_return_code=0).stdout
# Parse ast sexp
ast_sexp_list: List[SexpNode] = SexpParser.parse_list(ast_sexp_str)
tok_sexp_list: List[SexpNode] = SexpParser.parse_list(tok_sexp_str)
# Parse the document
coq_document = CoqParser.parse_document(source_code, ast_sexp_list, tok_sexp_list, unicode_offsets=unicode_offsets)
# Set meta data
coq_document.file_name = coq_file
coq_document.project_name = project_path.name
coq_documents[coq_file] = coq_document
ast_sexp_lists[coq_file] = ast_sexp_list
tok_sexp_lists[coq_file] = tok_sexp_list
except KeyboardInterrupt:
cls.logger.warning("Keyboard interrupt!")
raise
except:
cls.logger.warning(f"File {coq_file} failed! Exception was: {traceback.format_exc()}")
continue
# end try
# end for
# 3. Extract and save lemmas and definitions
lemmas: List[Lemma] = list()
definitions: List[Definition] = list()
# Increase recursion limit because the backend sexps are CRAZZZZY deep
sys.setrecursionlimit(10000)
for file_path, doc in tqdm(coq_documents.items()):
ast_sexp_list = ast_sexp_lists[file_path]
lemmas_doc = cls.collect_lemmas_doc(doc, ast_sexp_list, serapi_options)
lemmas.extend(lemmas_doc)
definitions_doc = cls.collect_definitions_doc(doc, ast_sexp_list)
definitions.extend(definitions_doc)
# end for
IOUtils.dump(output_path/"lemmas.json", IOUtils.jsonfy(lemmas), IOUtils.Format.json)
IOUtils.dump(output_path/"definitions.json", IOUtils.jsonfy(definitions), IOUtils.Format.json)
# end with
return
@classmethod
def extract_data_from_corpus(cls,
corpus_path: Path,
trainevals: List[str],
groups: List[str],
output_path: Path,
):
# 1. Prepare output path
if output_path.is_dir():
cls.logger.warning(f"{output_path} already exists, will overwrite the files.")
elif output_path.is_file():
LoggingUtils.log_and_raise(cls.logger, f"{output_path} already exists as a file. Aborting.", Exception)
else:
IOUtils.mk_dir(output_path)
# end if
assert all([traineval in Macros.DS_TRAINEVALS for traineval in trainevals])
assert all([group in Macros.DS_GROUPS+[Macros.DS_GROUP_TA] for group in groups])
data_mgr = FilesManager(corpus_path)
# 2. Load lemmas and definitions
lemmas_filtered: List[Lemma] = data_mgr.load_data([FilesManager.LEMMAS_FILTERED], IOUtils.Format.json, is_batched=True, clz=Lemma)
definitions: List[Definition] = data_mgr.load_data([FilesManager.DEFINITIONS, "definitions.json"], IOUtils.Format.json, clz=Definition)
# 3. Output to output_path for each combination of traineval and group
for traineval in trainevals:
for group in groups:
IOUtils.mk_dir(output_path/f"{group}-{traineval}")
data_indexes = IOUtils.load(Macros.project_dir/"training"/f"{group}-{traineval}.json", IOUtils.Format.json)
IOUtils.dump(output_path/f"{group}-{traineval}/lemmas.json", IOUtils.jsonfy([l for l in lemmas_filtered if l.data_index in data_indexes]), IOUtils.Format.json)
IOUtils.dump(output_path/f"{group}-{traineval}/definitions.json", IOUtils.jsonfy([d for d in definitions if d.data_index in data_indexes]), IOUtils.Format.json)
# end for
# end for
return
| 42,840 | 13,884 |
"""
This package contains implementations of pairwise similarity queries.
"""
# bring classes directly into package namespace, to save some typing
import warnings
try:
import Levenshtein # noqa:F401
except ImportError:
msg = (
"The gensim.similarities.levenshtein submodule is disabled, because the optional "
"Levenshtein package <https://pypi.org/project/python-Levenshtein/> is unavailable. "
"Install Levenhstein (e.g. `pip install python-Levenshtein`) to suppress this warning."
)
warnings.warn(msg)
LevenshteinSimilarityIndex = None
else:
from .levenshtein import LevenshteinSimilarityIndex # noqa:F401
from .docsim import ( # noqa:F401
Similarity,
MatrixSimilarity,
SparseMatrixSimilarity,
SoftCosineSimilarity,
WmdSimilarity)
from .termsim import ( # noqa:F401
TermSimilarityIndex,
UniformTermSimilarityIndex,
WordEmbeddingSimilarityIndex,
SparseTermSimilarityMatrix)
| 965 | 317 |
from tpucolab.core import * | 27 | 10 |
import time
class PIDController:
def __init__(self, Kp=0.25, Ki=0.0, Kd=0.0, anti_windup=10.0, cmd_freq=0.0):
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
# Set max integral correction per timestep
self.anti_windup = anti_windup
# Set delay between updates (seconds)
self.cmd_freq = cmd_freq
self.current_time = time.time()
self.prev_time = self.current_time
self.reset()
def reset(self):
self.setpoint = 0.0
self.p_ = 0.0
self.i_ = 0.0
self.d_ = 0.0
self.prev_error = 0.0
def compute(self, setpoint, measured_value):
''' Compute PID correction wrt. measured_value - setpoint '''
self.current_time = time.time()
delta_time = self.current_time - self.prev_time
if delta_time >= self.cmd_freq:
self.setpoint = setpoint
error = self.setpoint - measured_value
delta_error = error - self.prev_error
self.accumulated_error = error * delta_time
# Limit the integration to prevent absolutely wrecking yourself
if self.accumulated_error < -self.anti_windup:
self.accumulated_error = -self.anti_windup
if self.accumulated_error > self.anti_windup:
self.accumulated_error = self.anti_windup
self.i_ = self.i_ + self.accumulated_error
self.d_ = delta_error / delta_time
self.prev_error = error
self.prev_time = self.current_time
return self.Kp * error + self.Ki * self.i_ + self.Kd * self.d_
def set_kp(self, kp):
self.Kp = kp
def set_ki(self, ki):
self.Ki = ki
def set_kd(self, kd):
self.Kd = kd
def set_anti_windup(self, anti_windup):
self.anti_windup = anti_windup
| 1,858 | 630 |
# -*- coding: utf-8 -*-
""" SQLite emitter """
import logging
import sqlite3
LOGGER = logging.getLogger()
def __type__() -> str:
return 'SQLite'
class SQLite: # pylint: disable=too-few-public-methods
""" SQLite wrapper class """
def __init__(self, config: dict) -> None:
""" Initializer
Args:
config: (dict) represents the configuration for the emitter
"""
# <start config sample>
# [sqlite]
# file = /etc/tilty/tilt.sqlite
self.conn = sqlite3.connect(config['file'])
self.conn.execute('''
CREATE TABLE IF NOT EXISTS data(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
gravity INTEGER,
temp INTEGER,
color VARCHAR(16),
mac VARCHAR(17),
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL)
''')
def emit(self, tilt_data: dict) -> None:
""" Initializer
Args:
tilt_data (dict): data returned from valid tilt device scan
"""
LOGGER.info('[sqlite] creating row')
self.conn.execute(
"insert into data (gravity,temp,color,mac) values (?,?,?,?)",
(
tilt_data['gravity'],
tilt_data['temp'],
tilt_data['color'],
tilt_data['mac']
)
)
self.conn.commit()
| 1,420 | 404 |
import cv2
import numpy as np
from numpy.linalg import norm
import math
import csv
from operator import itemgetter
from datetime import datetime
import VideoEnhancement
import fishpredictor
import detector
import kmeancluster
import preproccesing
import randomforst
cluster = kmeancluster.kmeans()
classifier = randomforst.randomforst()
samak = []
framenum = 0
sum = 0
max = 0
mylist = [[]]
yolo = detector.detector()
cap = cv2.VideoCapture('chaos1.avi')
ret, frame = cap.read()
fheight, fwidth, channels = frame.shape
resize = False
if (fheight > 352 or fwidth > 640):
resize = True
fwidth = 640
fheight = 352
frame = cv2.resize(frame, (640, 352))
mask = np.zeros_like(frame)
# Needed for saving video
fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
dt_string = datetime.now().strftime("%H_%M_%S_%d_%m_%y")
num_seconds = 10
video = cv2.VideoWriter('videonormal/' +str(num_seconds*round(fps))+'_'+str(dt_string)+'.avi', fourcc, fps, (fwidth, fheight))
# Read until video is completed
counter = 0
buffer = [[]]
apperance = [[]]
last_changed = []
top = 0
frms = 0
# Needed to track objects
n_frame = 8
ref_n_frame_axies = []
ref_n_frame_label = []
ref_n_frame_axies_flatten = []
ref_n_frame_label_flatten = []
frm_num = 1
coloredLine = np.random.randint(0, 255, (10000, 3))
arr = []
label_cnt = 1
min_distance = 50
while (cap.isOpened()):
ret, img = cap.read()
if ret == True:
if frms % 2 == 0:
img = VideoEnhancement.enhanceVideo(img, resize)
v = 0
cur_frame_axies = []
cur_frame_label = []
height, width, channels = img.shape
boxes, confidences, centers, colors = yolo.detect(img)
counter += 1
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.1, 0.4)
font = cv2.FONT_HERSHEY_PLAIN
fishcounter = 1
for i in range(len(boxes)):
if i in indexes:
lbl = float('nan')
x, y, w, h, = boxes[i]
center_x, center_y = centers[i]
color = colors[0]
if (len(ref_n_frame_label_flatten) > 0):
b = np.array([(center_x, center_y)])
a = np.array(ref_n_frame_axies_flatten)
distance = norm(a - b, axis=1)
min_value = distance.min()
if (min_value < min_distance):
idx = np.where(distance == min_value)[0][0]
lbl = ref_n_frame_label_flatten[idx]
points = (int(ref_n_frame_axies_flatten[idx][0]), int(ref_n_frame_axies_flatten[idx][1]))
mask = cv2.line(mask, (center_x, center_y), points, coloredLine[lbl].tolist(), 2)
cv2.circle(img, points, 5, coloredLine[lbl].tolist(), -1)
if (math.isnan(lbl)):
lbl = label_cnt
label_cnt += 1
arr.append([counter, lbl, center_x, center_y])
cur_frame_label.append(lbl)
cur_frame_axies.append((center_x, center_y))
samak.append([lbl, x, y, w, h])
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
cv2.putText(img, '{}{}'.format("Fish", lbl), (x, y - 5), font, 1, (255, 255, 255), 2)
if (len(ref_n_frame_axies) == n_frame):
del ref_n_frame_axies[0]
del ref_n_frame_label[0]
ref_n_frame_label.append(cur_frame_label)
ref_n_frame_axies.append(cur_frame_axies)
ref_n_frame_axies_flatten = [a for ref_n_frame_axie in ref_n_frame_axies for a in ref_n_frame_axie]
ref_n_frame_label_flatten = [b for ref_n_frame_lbl in ref_n_frame_label for b in ref_n_frame_lbl]
z = sorted(samak, key=itemgetter(0))
samak = []
if (len(z) != 0):
fishpredictor.predictfish(z, apperance, buffer, last_changed, top, img, color, mylist, framenum)
img = cv2.add(img, mask)
# cv2.imshow("Image", img)
mylist.append([])
framenum += 1
print(frms)
print("----------")
# cap.set(1,frms)
video.write(img)
if (frms % (round(fps) * num_seconds) == 0 and frms!=0):
result = cluster.classify(mask)
print(classifier.classify(z, mask,fps))
if (result == 1):
with open('exceltext/' + str(frms)+'_'+str(dt_string)+ '.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(mylist)
# writer.writerows(preproccesing.featuresCalc(mylist))
cv2.imwrite("trajecstest" + str(frms)+'_'+str(dt_string) + ".png", mask)
video.release()
dt_string = datetime.now().strftime("%H_%M_%S_%d_%m_%y")
video = cv2.VideoWriter('videotest/' + str(frms+(num_seconds*round(fps)))+'_'+str(dt_string)+'.avi', fourcc, fps,
(fwidth, fheight))
print("result " + str(result))
mask = np.zeros_like(frame)
ref_n_frame_axies = []
ref_n_frame_label = []
ref_n_frame_axies_flatten = []
ref_n_frame_label_flatten = []
buffer = [[]]
apperance = [[]]
last_changed = []
# frms = 0
counter = 0
mylist = [[]]
framenum = 0
fishcounter = 1
label_cnt = 1
top = 0
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
frms += 1
cap.release()
cv2.destroyAllWindows()
video.release() | 6,043 | 2,039 |
""" --- geometry parameters for Wei et al stochastic sensing --- """
nm = 1e-0
# @TODO maybe include tolc in parent file
tolc = 1e-2*nm # tolerance for coordinate comparisons
dim = 3
# molecule radius
rMolecule = 5*nm
# effective pore radius ~ d^SAM_p/2
r0 = 13*nm
# aperture angle in degrees
angle = 40
# SiN membrane thickness (in vertical direction)
lsin = 50*nm
# Au membrane thickness (in vertical direction)
lau = 40*nm
# Au thickness in radial direction
rlau = 10*nm
# SAM layer thickness (in vertical direction)
lsam = 3*nm
# Radius of domain
Rz = 150.0*nm
R = 150.0*nm
# fraction of R which is considered as outer membrane boundary
outerfrac = 0.3
# mesh generation parameters
# length scales
lc = 10*nm
lcMolecule = lc*1e-1
lcOuter = lc*5
lcCenter = lc/5
# provide default values for boundary layer around membrane/molecule
membraneblayer = None
moleculeblayer = None
| 889 | 340 |
import requests
sample_user = {"latitude" : 42.2561110, "longitude" : -71.0741010, "uid" : "0" , "first_name" : "alex", "last_name" : "iansiti"}
send_url = "http://flask-macoder.rhcloud.com/near"
r = requests.post(send_url, sample_user)
print(r.json()) | 254 | 115 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_vlan_find
short_description: Find VLANs on Cisco UCS Manager
description:
- Find VLANs on Cisco UCS Manager based on different criteria.
extends_documentation_fragment: ucs
options:
pattern:
description:
- Regex pattern to find within the name property of the fabricVlan class.
- This is required if C(vlanid) parameter is not supplied.
type: str
fabric:
description:
- "The fabric configuration of the VLAN. This can be one of the following:"
- "common - The VLAN applies to both fabrics and uses the same configuration parameters in both cases."
- "A — The VLAN only applies to fabric A."
- "B — The VLAN only applies to fabric B."
choices: [common, A, B]
default: common
type: str
vlanid:
description:
- The unique string identifier assigned to the VLAN.
- A VLAN ID can be between '1' and '3967', or between '4048' and '4093'.
- This is required if C(pattern) parameter is not supplied.
type: str
requirements:
- ucsmsdk
author:
- David Martinez (@dx0xm)
- CiscoUcs (@CiscoUcs)
version_added: '2.9'
'''
EXAMPLES = r'''
- name: Get all vlans in fabric A
ucs_vlan_find:
hostname: 172.16.143.150
username: admin
password: password
fabric: 'A'
pattern: '.'
- name: Confirm if vlan 15 is present
ucs_vlan_find:
hostname: 172.16.143.150
username: admin
password: password
vlanid: '15'
'''
RETURN = r'''
vlan_list:
description: basic details of vlans found
returned: on success
type: list
sample: [
{
"id": "0",
"name": "vlcloud1"
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def main():
argument_spec = ucs_argument_spec
argument_spec.update(
fabric=dict(type='str', default='common', choices=['common', 'A', 'B']),
pattern=dict(type='str'),
vlanid=dict(type='str')
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['pattern', 'vlanid']]
)
ucs = UCSModule(module)
filtls = ['(cloud,"ethlan")']
if module.params['fabric'] != 'common':
filtls.append('(switch_id,"' + module.params['fabric'] + '")')
if module.params['vlanid']:
filtls.append('(id,"' + module.params['vlanid'] + '")')
else:
filtls.append('(name,"' + module.params['pattern'] + '")')
object_dict = ucs.login_handle.query_classid("fabricVlan", filter_str=' and '.join(filtls))
if object_dict is None:
module.fail_json(msg="Failed to query vlan objects")
vlnlist = []
for ob in object_dict:
vlnlist.append(dict(name=ob.name, id=ob.id))
module.exit_json(changed=False,
vlan_list=vlnlist)
if __name__ == '__main__':
main()
| 3,293 | 1,140 |
from functools import total_ordering
def keys_to_scrap(keyprice, keys, metal):
return round(keys * round(keyprice * 9)) + round(metal * 9)
def scrap_to_keys(keyprice, scrap, force_ref=False):
keys = 0
while not force_ref and scrap > round(keyprice * 9):
keys += 1
scrap -= round(keyprice * 9)
ref = scrap / 9
ref = fix_ref(ref)
return keys, ref
def fix_ref(ref):
ref = str(round(ref * 18) / 18)
if "." in ref:
bd, ad = ref.split(".")
if ad == "0":
ref = int(bd)
else:
ad = ad[:2]
ref = float(bd + "." + ad)
else:
ref = int(ref)
return ref
@total_ordering
class Currency:
def __init__(self, keys=0, metal=0, scrap=0, keyprice=56.11):
self.keyprice = keyprice
if scrap:
self.scrap = scrap
self.raw_keys = 0
self.raw_metal = 0
else:
self.scrap = self.keys_to_scrap(keys, metal)
self.raw_keys = keys
self.raw_metal = metal
@property
def keys(self):
return self.scrap_to_keys(self.scrap)[0]
@property
def metal(self):
return self.scrap_to_keys(self.scrap)[1]
@property
def ref(self):
return self.scrap_to_keys(self.scrap)[1]
def keys_to_scrap(self, keys, metal):
return keys_to_scrap(self.keyprice, keys, metal)
def scrap_to_keys(self, scrap):
return scrap_to_keys(self.keyprice, scrap)
def __eq__(self, other):
return self.scrap == other.scrap
def __gt__(self, other):
return self.scrap > other.scrap
| 1,660 | 598 |
from typing import Tuple, Pattern
from urllib.request import urlopen
import os
from ftplib import FTP
from tempfile import TemporaryDirectory
from hashlib import sha256
import re
from course_valve.valve_defs import (
TARGET_FTP,
ENCRYPTED_PASSWORD,
TARGET_FILE_NAME,
IDENTIFIER,
NO_IDENTIFIER,
FTP_USER,
OPENED_TEMPLATE_NAME,
CLOSED_TEMPLATE_NAME,
TEMPLATES_TARGETS_PREFIX,
)
class PageUpdater:
_work_dir: str
_backup_path: str
_yes_path: str
_no_path: str
_page_content: str
_is_closed: bool
_date_pattern: Pattern
def __init__(self, page_address: str) -> None:
self._init_work_dir()
self._date_pattern = re.compile(r"(\d+\.\d+\.\d+)")
self._page_content = self._read_html(page_address)
self._is_closed = NO_IDENTIFIER in self._page_content
self._save_backup_and_templates(self._page_content)
@property
def course_closed_on_load(self) -> bool:
return self._is_closed
def _init_work_dir(self) -> None:
self._work_dir = os.path.abspath(os.path.dirname(__file__))
self._backup_path = os.path.join(self._work_dir, f"{TARGET_FILE_NAME}.bkp")
self._yes_path = os.path.join(self._work_dir, OPENED_TEMPLATE_NAME)
self._no_path = os.path.join(self._work_dir, CLOSED_TEMPLATE_NAME)
for path in (self._backup_path, self._yes_path, self._no_path):
if os.path.exists(path):
os.remove(path)
def restore_from_backup(self, password: str) -> bool:
if self.is_backup_exists():
with open(self._backup_path, "r", encoding="utf-8") as f:
backup_content = f.read()
return self._upload_content_aux(backup_content, FTP_USER, password)
return False
def is_backup_exists(self) -> bool:
return os.path.exists(self._backup_path)
def open_course(self, new_date: str, password: str) -> bool:
if self._is_closed and os.path.exists(self._yes_path):
with open(self._yes_path, "r", encoding="utf-8") as f:
page_content = f.read()
else:
page_content = self._page_content
orig_text_begin_idx, orig_text_end_idx = self._get_begin_end_for_edit_text(page_content)
new_sentence = self._replace_date(
page_content[orig_text_begin_idx:orig_text_end_idx], new_date
)
new_page_content = self._insert_new_course_text(new_sentence, page_content)
return self._upload_content_aux(new_page_content, FTP_USER, password)
def close_course(self, password: str) -> bool:
if self._is_closed or not os.path.exists(self._no_path):
return False
with open(self._no_path, "r", encoding="utf-8") as f:
closed_content = f.read()
return self._upload_content_aux(closed_content, FTP_USER, password)
def _save_backup_and_templates(self, page_content: str) -> None:
with open(self._backup_path, "w", encoding="utf-8") as f:
f.write(page_content)
no_template_content = self._read_html(TEMPLATES_TARGETS_PREFIX+".no")
with open(self._no_path, "w", encoding="utf-8") as f:
f.write(no_template_content)
yes_template_content = self._read_html(TEMPLATES_TARGETS_PREFIX+".yes")
with open(self._yes_path, "w", encoding="utf-8") as f:
f.write(yes_template_content)
def _read_html(self, page_address: str) -> str:
with urlopen(page_address) as webpage:
return webpage.read().decode("utf-8")
def _replace_date(self, orig_sentence: str, new_date: str) -> str:
if re.match(self._date_pattern, new_date) is None:
raise ValueError(
"ERROR: PageUpdate::_replace_date : new_date is not in the right format"
)
orig_date = re.search(self._date_pattern, orig_sentence).group(1)
return orig_sentence.replace(orig_date, new_date)
def _get_begin_end_for_edit_text(self, page_content: str) -> Tuple[int, int]:
div_id_idx = page_content.index(IDENTIFIER)
orig_text_begin_idx = div_id_idx + page_content[div_id_idx:].index(">") + 1
orig_text_end_idx = orig_text_begin_idx + page_content[
orig_text_begin_idx:
].index("</")
return orig_text_begin_idx, orig_text_end_idx
def _insert_new_course_text(self, text: str, page_content: str) -> str:
orig_text_begin_idx, orig_text_end_idx = self._get_begin_end_for_edit_text(page_content)
return f"{page_content[:orig_text_begin_idx]}{text}{page_content[orig_text_end_idx:]}"
@staticmethod
def _upload_content_aux(page_content: str, user: str, password: str) -> bool:
try:
file_name = TARGET_FILE_NAME
ftp_password = PageUpdater._decrypt_password(password, ENCRYPTED_PASSWORD)
with FTP(
TARGET_FTP, user, ftp_password
) as ftp, TemporaryDirectory() as dirpath:
content_path = os.path.join(dirpath, file_name)
with open(content_path, "w", encoding="utf-8") as f:
f.write(page_content)
temp_orig_file_name = file_name + ".orig.bkp"
ftp.rename(file_name, temp_orig_file_name)
try:
with open(content_path, "rb") as f:
ftp.storbinary(f"STOR {file_name}", f)
ftp.delete(temp_orig_file_name)
except Exception as e:
print(f"ERROR: PageUpdater::_upload_content_aux: Error during FTP upload: {e}")
ftp.rename(temp_orig_file_name, file_name)
return False
except Exception as e2:
print(f"ERROR: PageUpdater::_upload_content_aux: Cannot connect to FTP: {e2}")
return False
return True
@staticmethod
def _decrypt_password(key_pass: str, encrypted_pass: bytes) -> str:
key_pass_bytes = (
sha256(key_pass.encode("utf-8"))
.hexdigest()[: len(encrypted_pass)]
.encode("utf-8")
)
return bytes(x ^ y for x, y in zip(key_pass_bytes, encrypted_pass)).decode(
"utf-8"
)
| 6,251 | 2,096 |
"""
Tests for the base module
"""
from vlnm.normalizers.base import (
FormantGenericNormalizer,
FormantSpecificNormalizer,
FormantsTransformNormalizer,
Normalizer)
from tests.helpers import Helper
class TestBaseNormalizers(Helper.TestNormalizerBase):
"""
Tests for the base Normalizer class.
"""
def test_normalizer_instantiation(self):
"""Base Normalizer class cannot be instantiated"""
with self.assertRaises(TypeError):
Normalizer()
def test_formant_generic_normalizer_instantiation(self):
"""Base FormantGenericNormalizer class cannot be instantiated"""
with self.assertRaises(TypeError):
FormantGenericNormalizer()
def test_formant_specific_normalizer_instantiation(self):
"""Base FormantsSpecificNormalizer class cannot be instantiated"""
with self.assertRaises(TypeError):
FormantSpecificNormalizer()
def test_formant_transform_normalizer_instantiation(self):
"""Base FormantsTransformNormalizer class cannot be instantiated"""
with self.assertRaises(TypeError):
FormantsTransformNormalizer()
def test_config_default(self):
"""Check default config"""
expected = dict(columns=[], keywords=[], options=dict(), outputs=[])
actual = self.normalizer()
self.assertDictEqual(actual.config, expected)
def test_config_merged(self):
"""Check config merged in subclass"""
class Subclass(Normalizer):
"""Test sub-class"""
config = dict(options=dict(transform=True))
expected = dict(columns=[], keywords=[], options=dict(transform=True), outputs=[])
actual = Subclass()
self.assertDictEqual(actual.config, expected)
| 1,782 | 480 |
#!/usr/bin/env python3
import os
import sys
import time
import traceback
from itertools import combinations
from multiprocessing.pool import ThreadPool
from exchangebase import ExchangeBase
from exchangepair import ExchangePair
from gdaxapi import Gdax
from krakenapi import Kraken
class Arbiter:
def __init__(self):
# Set up 'exchanges' dictionary to hold all of the exchanges
self.exchanges = {"kraken": Kraken(), "gdax": Gdax()}
# exchanges["gemini"] = Gemini()
self.cutoff = 0.1 # %gain on the trade
self.exchangePairs = []
for exchange in combinations(self.exchanges.values(), 2): # 2 for pairs, 3 for triplets, etc
self.exchangePairs.append(ExchangePair(self.cutoff, exchange[0], exchange[1]))
self.arbitrar = "USD"
self.lastKey = ""
for exchange in self.exchanges.values():
exchange.set_arbitrar(self.arbitrar)
if exchange.valueWallet.currency != self.arbitrar:
self.lastKey = exchange.valueWallet.currency
self.trades = []
# First trade loses money, but gets the ball rolling
self.totalGain = 1
self.pool = ThreadPool(processes=2)
def run(self):
os.system('clear')
# always print out how much money there is each wallet that has money
for exchName, exchange in self.exchanges.items():
print(exchName)
for walletName, wallet in exchange.wallets.items():
if wallet.amount > 0:
print(wallet.currency, ":", round(wallet.amount, 5))
print()
for exchange in self.exchangePairs: # 2 for pairs, 3 for triplets, etc
# Check to make sure exactly one has USD
arbitrar_exchange = 0
if exchange[0].valueWallet.currency == self.arbitrar:
arbitrar_exchange = 1
if exchange[1].valueWallet.currency == self.arbitrar:
arbitrar_exchange += 2
if arbitrar_exchange == 0 or arbitrar_exchange == 3:
continue
i = 1
try:
diffp = exchange.get_diff(self.lastKey)
last = exchange.last
goal = 0
if arbitrar_exchange == 1:
# goal = exchange.runningAverages[lastKey] + cutoff/2
goal = self.cutoff / 2
# goal = last + cutoff if last + cutoff > minimum else minimum
print("goal : >" + str("%.3f" % goal) + "%")
if arbitrar_exchange == 2:
# goal = exchange.runningAverages[lastKey] - cutoff/2
goal = -self.cutoff / 2
# goal = last - cutoff if last - cutoff < maximum else maximum
print("goal : <" + str("%.3f" % goal) + "%")
print()
if diffp >= goal and arbitrar_exchange == 1 \
or diffp <= goal and arbitrar_exchange == 2:
sell_exchange = 1 if arbitrar_exchange == 1 else 0
buy_exchange = 0 if arbitrar_exchange == 1 else 1
# buy_symbol, buy_rate, lastKey = exchange.buy(buy_exchange)
# Do the buys and sells asynchronously
async_sell = self.pool.apply_async(ExchangeBase.sell, (exchange[sell_exchange],))
async_buy = self.pool.apply_async(ExchangeBase.buy, (exchange[buy_exchange], self.lastKey))
buy_symbol, buy_rate = async_buy.get()
sell_symbol, sell_rate = async_sell.get()
exchange.last = diffp
total_value = exchange[buy_exchange].get_value() + exchange[sell_exchange].get_value()
# last = difference between exchanges on last trade
real_diff = exchange.last - last
# divide by 2 bc we only make money on money in crypto,
# then again because we only make money in 1 direction (pos or neg)
real_gain = (sell_rate / buy_rate - 1) / 2 * 100
self.totalGain *= 1 + real_gain / 100
localtime = time.asctime(time.localtime(time.time()))
self.trades.append(
"Sold " + sell_symbol + " at " + str(sell_rate) + " on " + exchange[sell_exchange].get_name()
+ "; Bought " + buy_symbol + " at " + str(buy_rate) + " on " + exchange[
buy_exchange].get_name()
+ "; diff: " + str("%.3f" % exchange.last) + "%; gain: " + str("%.3f" % real_diff) + "%"
+ "\n\tReal Gain: " + str("%.3f" % real_gain) + "%; Total (multiplier): "
+ str("%.6f" % self.totalGain) + "; time: " + localtime
+ "\n\t\tTotal Value of portfolio: " + str(total_value))
for trade in self.trades:
print(trade)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
localtime = time.asctime(time.localtime(time.time()))
self.trades.append("Unexpected " + exc_type.__name__ +
" at " + fname + ":" + str(exc_tb.tb_lineno) +
" on " + localtime + ": \"" + str(e) + "\"")
print(self.trades[-1])
print(traceback.format_exc())
time.sleep(max(2 * i, 2))
# So we don't get rate limited by exchanges
time.sleep(max(2 * i, 2))
if __name__ == "__main__":
arbiter = Arbiter()
# Infinite loop
while True:
try:
arbiter.run()
except KeyboardInterrupt:
print("Goodbye.")
break
| 5,934 | 1,735 |
import boto3
import sys
import numpy as np
import random
import time
# endpoint_url = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
endpoint_url = 'https://mturk-requester.us-east-1.amazonaws.com'
##Remember to put the AWS credential in ~/.aws/credential like below:
#######
#[default]
#aws_access_key_id = XXXXX
#aws_secret_access_key = XXXXXX
client = boto3.client('mturk',endpoint_url = endpoint_url,region_name='us-east-1')
#Number of participants
number_participants=int(sys.argv[1])
result_hits= client.list_hits()
number_of_parallel_hits=len(result_hits['HITs'])
vector_completed_experiments = np.zeros(number_of_parallel_hits)
# Check that all the experiments have been completed
while np.mean(vector_completed_experiments) != number_participants:
result_hits= client.list_hits()
number_of_parallel_hits=len(result_hits['HITs'])
vector_completed_experiments=np.zeros(number_of_parallel_hits)
for i in range(number_of_parallel_hits):
hits_completed=int(result_hits['HITs'][i]['NumberOfAssignmentsCompleted'])
vector_completed_experiments[i]=hits_completed
if hits_completed != number_participants:
##Checking if it is necessary to extend the HIT (Available and Pending HIT should be set to zero for extending)
if int(result_hits['HITs'][i]['NumberOfAssignmentsAvailable']) == 0 and int(result_hits['HITs'][i]['NumberOfAssignmentsPending']) == 0:
#There is a little bit of lag when checking whether the HIT has been completed, waiting 30 second to avoid this issue
time.sleep(30)
result_hits= client.list_hits()
hits_completed=int(result_hits['HITs'][i]['NumberOfAssignmentsCompleted'])
if hits_completed < number_participants and hits_completed > 0:
hit = result_hits['HITs'][i]['HITId']
#The request token should always be unique for each additional assignment
request_token= 'Request_{}_{}_{}'.format(hit,random.randint(1,100000),hits_completed)
print("Extending the HIT for the following ID: {}".format(hit))
client.create_additional_assignments_for_hit(HITId = hit, NumberOfAdditionalAssignments=1, UniqueRequestToken=request_token)
#Sleep for 10 minutes..
print("Sleeping for 10 minutes...")
time.sleep(600)
print("Completed participants:",vector_completed_experiments)
| 2,289 | 825 |
# def get_param_traj_file_path(dir_name, net_name, index):
# return f'{dir_name}/{net_name}_{index}.txt'
import os
from datetime import datetime
def get_current_timestamp():
return datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
def get_project_dir():
project_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), '..', '..', '..'))
return project_dir
def get_run_name(args):
if args.additional_notes == "":
add_note = ""
else:
add_note = f'_additional_notes_{args.additional_notes}'
return f'optimizer_{args.optimizer}_env_{args.env}_time_step_{args.num_timesteps}_' \
f'normalize_{args.normalize}_n_steps_{args.n_steps}_nminibatches_{args.nminibatches}_seed_{args.seed}' \
f'_run_{args.run_num}' \
f'{add_note}'
def get_dir_path_for_this_run(args, proj_dir=None):
if proj_dir is not None:
return f'{proj_dir}/stable_baselines/{args.alg}/{get_run_name(args)}'
else:
return f'{get_project_dir()}/stable_baselines/{args.alg}/{get_run_name(args)}'
def get_log_dir(this_run_dir):
return f"{this_run_dir}/the_log_dir"
def get_save_dir(this_run_dir):
return f"{this_run_dir}/the_save_dir"
def get_test_data_dir(this_run_dir):
return f"{this_run_dir}/test_data"
def get_full_params_dir(this_run_dir):
return f"{this_run_dir}/full_params"
def get_aug_plot_dir(this_run_dir):
return f"{this_run_dir}/aug_plot_dir"
def get_intermediate_data_dir(this_run_dir, params_scope="pi"):
return f"{this_run_dir}/{params_scope}_intermediate_data"
def get_eval_losses_file_path(dir_name, total_timesteps):
return f'{dir_name}/eval_loss_{total_timesteps}.hdf5'
def get_full_param_traj_file_path(dir_name, index):
return f'{dir_name}/all_params_{index}.txt'
def get_plot_dir(args):
return f'{get_project_dir()}/plots/{args.alg}/{get_current_timestamp()}_{get_run_name(args)}'
def get_cma_plot_dir(plot_dir, n_comp_to_use, run_num, origin):
return f'{plot_dir}/cma/cma_n_comp_{n_comp_to_use}_origin_{origin}_run_num_{run_num}'
def get_cma_and_then_ppo_plot_dir(plot_dir, pca_indexes, run_num, cma_num_steps, ppo_num_steps, origin):
return f'{plot_dir}/cma_and_then_ppo/cma_and_then_ppo_pca_indexes_{pca_indexes}' \
f'_ppo_num_steps_{ppo_num_steps}_cma_num_steps_{cma_num_steps}_origin_{origin}_run_num_{run_num}'
def get_other_pcs_plane_plot_dir(plot_dir, other_pcs):
return f'{plot_dir}/other_pcs_{other_pcs}'
def get_ppos_plot_dir(plot_dir, n_comp_to_use, cma_run_num):
return f'{plot_dir}/ppos/ppos_n_comp_{n_comp_to_use}_run_num_{cma_run_num}'
def get_first_n_pc1_vs_V_plot_dir(plot_dir, granularity):
return f'{plot_dir}/first_n_pc1_vs_V/first_n_pc1_vs_V_granularity_{granularity}'
def get_plane_angles_vs_final_plane_along_the_way_plot_dir(plot_dir, n_comp_to_use):
return f'{plot_dir}/plane_angles_vs_final_plane/plane_angles_vs_final_plane_n_comp_to_use_{n_comp_to_use}'
def get_pcs_filename(intermediate_dir, n_comp):
return f"{intermediate_dir}/n_comp_{n_comp}_pcs"
def get_mean_param_filename(intermediate_dir):
return f"{intermediate_dir}/mean_param"
def get_explain_ratios_filename(intermediate_dir, n_comp):
return f"{intermediate_dir}/n_comp_{n_comp}_explain_ratios"
def get_projected_full_path_filename(intermediate_dir, n_comp, pca_center, which_components=(1,2)):
return f"{intermediate_dir}/n_comp_{n_comp}_pca_center_{pca_center}_which_components_{which_components}_projected_full_path"
def get_eval_returns_filename(intermediate_dir, eval_string, n_comp, pca_center, which_components=(1,2)):
return f"{intermediate_dir}/{eval_string}_n_comp_{n_comp}_pca_center_{pca_center}_which_components_{which_components}eval_returns"
def get_projected_finals_eval_returns_filename(intermediate_dir, n_comp_start, np_comp_end, pca_center):
return f"{intermediate_dir}/n_comp_start_{n_comp_start}_np_comp_end_{np_comp_end}_pca_center_{pca_center}eval_returns"
def get_cma_returns_dirname(intermediate_dir, n_comp, run_num):
return f"{intermediate_dir}/cma/cma_n_comp_{n_comp}_run_num_{run_num}"
def get_ppos_returns_dirname(intermediate_dir, n_comp, run_num):
return f"{intermediate_dir}/ppos/ppos_n_comp_{n_comp}_run_num_{run_num}"
def get_cma_and_then_ppo_run_dir(intermediate_dir, pca_indexes, run_num, cma_steps):
return f"{intermediate_dir}/cma_and_then_ppo/ctp_pca_index_{pca_indexes}_cma_steps_{cma_steps}_run_num_{run_num}"
def get_ppo_part(this_run_dir):
return f"{this_run_dir}/ppo_part"
if __name__ == '__main__':
print(get_log_dir("a", 1, "s", False, 0)) | 4,623 | 1,876 |
from __future__ import print_function
import pytest
from tempfile import mkdtemp
from glob import glob
import shutil
import fnmatch
import os
import os.path
import sami
TEST_DIR = os.path.join(os.path.split(__file__)[0], "test_data")
# Note: if the test data is changed, then these lists must be updated
# (too hard to automate!)
bias_files = ("22apr10035", "22apr10036", "22apr10037",
"22apr20035", "22apr20036", "22apr20037")
dark_files = ("22apr10001", "22apr10002", "22apr10003",
"22apr20001", "22apr20002", "22apr20003")
lflat_files = ("14apr10027", "22apr10088",
"14apr20027", "22apr20088")
tlm_files = ("22apr10074", "22apr20074")
flat_files = tlm_files
arc_files = ("22apr10075", "22apr20075")
obj_files = ("22apr10078", "22apr20078", "22apr10079", "22apr20079")
all_files = set(bias_files + dark_files + lflat_files + tlm_files + flat_files + arc_files + obj_files)
def find_files(path, pattern):
"""From:
http://stackoverflow.com/questions/2186525/use-a-glob-to-find-files-recursively-in-python
"""
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, pattern):
matches.append(filename)
return matches
@pytest.fixture(scope='module')
def reduction_dir(request):
tmpdir = mkdtemp(prefix="sami_test")
print(tmpdir)
def fin():
shutil.rmtree(tmpdir)
pass
request.addfinalizer(fin)
return tmpdir
@pytest.mark.incremental
class TestSAMIManagerReduction:
@pytest.fixture
def sami_manager(self, reduction_dir):
mngr = sami.manager.Manager(reduction_dir + "/test/", fast=True, debug=True)
return mngr
def test_pytest_not_capturing_fds(self, pytestconfig):
# Note: pytest must be run in sys capture mode, instead of file descriptor capture mode
# otherwise calls to "aaorun" seem to fail. This next test ensures that is the case.
print("If this test fails, then you must run pytest with the option '--capture=sys'.")
assert pytestconfig.getoption("capture") == "sys"
def test_tests(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager
assert isinstance(mngr, sami.manager.Manager)
print(reduction_dir)
assert isinstance(reduction_dir, str)
# assert os.path.exists(reduction_dir + "/test")
def test_import_data(self, sami_manager, raw_test_data):
mngr = sami_manager # type: sami.Manager
mngr.import_dir(raw_test_data)
print(len(mngr.file_list))
print(len(all_files))
assert len(mngr.file_list) == len(all_files)
def test_reduce_bias(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager # type: sami.Manager
mngr.reduce_bias()
# Check that files actually generated
for base in bias_files:
assert base + "red.fits" in find_files(reduction_dir + "/test/reduced/bias", base + "*")
def test_combine_bias(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager # type: sami.Manager
mngr.combine_bias()
# Check that files actually generated
assert "BIAScombined.fits" in find_files(reduction_dir + "/test/reduced/bias/ccd_1", "*.fits")
assert "BIAScombined.fits" in find_files(reduction_dir + "/test/reduced/bias/ccd_2", "*.fits")
def test_reduce_dark(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager # type: sami.Manager
mngr.reduce_dark()
# Check that files actually generated
for base in dark_files:
assert base + "red.fits" in find_files(reduction_dir + "/test/reduced/dark", base + "*")
def test_combine_dark(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager # type: sami.Manager
mngr.combine_dark()
# Check that files actually generated
assert "DARKcombined1800.fits" in find_files(reduction_dir + "/test/reduced/dark/ccd_1", "*.fits")
assert "DARKcombined1800.fits" in find_files(reduction_dir + "/test/reduced/dark/ccd_2", "*.fits")
def test_reduce_lflat(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager # type: sami.Manager
mngr.reduce_lflat()
# Check that files actually generated
for base in lflat_files:
assert base + "red.fits" in find_files(reduction_dir + "/test/reduced/lflat", base + "*")
def test_combine_lflat(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager # type: sami.Manager
mngr.combine_lflat()
# Check that files actually generated
assert "LFLATcombined.fits" in find_files(reduction_dir + "/test/reduced/lflat/ccd_1", "*.fits")
assert "LFLATcombined.fits" in find_files(reduction_dir + "/test/reduced/lflat/ccd_2", "*.fits")
def test_make_tlm(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager
mngr.make_tlm()
# Check that files actually generated
for base in tlm_files:
assert base + "tlm.fits" in find_files(reduction_dir + "/test/", base + "*")
def test_reduce_arc(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager
mngr.reduce_arc()
# Check that files actually generated
for base in arc_files:
assert base + "red.fits" in find_files(reduction_dir + "/test/", base + "*")
def test_reduce_fflat(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager
mngr.reduce_fflat()
for base in flat_files:
assert base + "red.fits" in find_files(reduction_dir + "/test/", base + "*")
def test_reduce_object(self, sami_manager, raw_test_data, reduction_dir):
mngr = sami_manager
mngr.reduce_object()
for base in obj_files:
assert base + "red.fits" in find_files(reduction_dir + "/test/", base + "*")
| 6,018 | 2,153 |
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app):
old_contacts = app.contact.get_contact_list()
contact = Contact(firstname='firstname', middlename='middlename', lastname='lastname', nickname='nick', title='title', company='company', address='address', home='home phone', mobile='mobile', work='work phone', fax='fax', email='email 1', email2='email 2', email3='email 3', homepage='homepage', bday='6', bmonth='August', byear='1980', aday='8', amonth='January', ayear='2000', address2='Address 2', phone2='phone 2', notes='notes')
app.contact.create(contact)
# print(new_contacts)
assert len(old_contacts) + 1 == app.contact.count()
new_contacts = app.contact.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
# def test_add_empty_contact(app):
# old_contacts = app.contact.get_contact_list()
# contact = Contact(firstname='', middlename='', lastname='', nickname='', title='', company='', address='', home='', mobile='', work='', fax='', email='', email2='', email3='', homepage='', bday='', bmonth='-', byear='', aday='', amonth='-', ayear='', address2='', phone2='', notes='')
# app.contact.create(contact)
# new_contacts = app.contact.get_contact_list()
# assert len(old_contacts) + 1 == len(new_contacts)
# old_contacts.append(contact)
# assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
| 1,536 | 539 |
# -*- coding:utf-8 -*-
"""
Description:
Inventory Class
Usage:
from AntShares.Network.Inventory import Inventory
"""
from AntShares.IO.MemoryStream import MemoryStream
from AntShares.IO.BinaryWriter import BinaryWriter
from AntShares.Cryptography.Helper import *
from AntShares.Helper import *
import binascii
class Inventory(object):
"""docstring for Inventory"""
def __init__(self):
super(Inventory, self).__init__()
self.hash = None
def ensureHash(self):
self.hash = big_or_little(binascii.hexlify(
bin_dbl_sha256(binascii.unhexlify(self.getHashData()))))
return self.hash
def getHashData(self):
ms = MemoryStream()
w = BinaryWriter(ms)
self.serializeUnsigned(w)
return ms.toArray()
def getScriptHashesForVerifying(self):
pass
def serialize(self):
pass
def serializeUnsigned(self):
pass
def deserialize(self):
pass
def deserializeUnsigned(self):
pass
| 1,038 | 325 |
#!/usr/bin/python
# Copyright (c) 2015-2017 Martin F. Falatic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
from __future__ import print_function
from collections import OrderedDict
import re
def TranslateColorTable(infile):
''' Usage: TranslateColorTable("tkcolors") '''
DofL = OrderedDict()
with open(infile) as f:
for line in f:
m = re.match(r"^(.+?)\t(.+?)\t(.+?)\t(.+?)$", line)
if m:
name = m.group(1)
red = int(m.group(2))
grn = int(m.group(3))
blu = int(m.group(4))
rgb = '{0:02X}{1:02X}{2:02X}'.format(red, grn, blu)
if rgb in DofL.keys():
DofL[rgb].append(name)
else:
DofL[rgb] = [name]
print('COLORS_DICT = OrderedDict([')
for d in DofL:
print(' (\'{0}\', {1}),'.format(d, repr(DofL[d])))
print('])')
if __name__ == "__main__":
TranslateColorTable("colors_tk.orig")
| 1,585 | 563 |
#!/usr/bin/env python3
import os.path
import random
from pathlib import Path
from random import randint
from pymongo import MongoClient
from web3.logs import DISCARD
from broker import cfg
from broker._utils import _log
from broker._utils._log import console_ruler
from broker._utils.tools import _time, _timestamp, countdown, is_process_on, log, run
from broker._utils.web3_tools import get_tx_status
from broker._utils.yaml import Yaml
from broker.libs.mongodb import BaseMongoClass
from broker.submit_base import SubmitBase
from broker.test_setup._users import users
from broker.utils import print_tb
yaml_files = ["job_nas.yaml"]
Ebb = cfg.Ebb
cfg.IS_FULL_TEST = True
_log.ll.LOG_FILENAME = Path.home() / ".ebloc-broker" / "test.log"
provider_addresses = [
"0x3e6FfC5EdE9ee6d782303B2dc5f13AFeEE277AeA",
"0x765508fc8f78a465f518ae79897d0e4b249e82dc",
"0x38cc03c7e2a7d2acce50045141633ecdcf477e9a",
"0xeab50158e8e51de21616307a99c9604c1c453a02",
]
def create_cppr_job_script():
"""Create cppr slurm job script to be submitted."""
registered_data_hashes_small = [
"b6aaf03752dc68d625fc57b451faa2bf",
"f1de03edab51f281815c3c1e5ecb88c6",
"082d2a71d86a64250f06be14c55ca27e",
"03919732a417cb1d14049844b9de0f47",
"983b9fe8a85b543dd5a4a75d031f1091",
"f71df9d36cd519d80a3302114779741d",
"c0fee5472f3c956ba759fd54f1fe843e",
"63ffd1da6122e3fe9f63b1e7fcac1ff5",
"9e8918ff9903e3314451bf2943296d31",
"eaf488aea87a13a0bea5b83a41f3d49a",
"e62593609805db0cd3a028194afb43b1",
"3b0f75445e662dc87e28d60a5b13cd43",
"ebe53bd498a9f6446cd77d9252a9847c",
"f82aa511f8631bfc9a82fe6fa30f4b52",
"761691119cedfb9836a78a08742b14cc",
"f93b9a9f63447e0e086322b8416d4a39",
]
registered_data_hashes_medium = [
"050e6cc8dd7e889bf7874689f1e1ead6",
"9d5d892a63b5758090258300a59eb389",
"779745f315060d1bc0cd44b7266fb4da",
"fe801973c5b22ef6861f2ea79dc1eb9c",
"0d6c3288ef71d89fb93734972d4eb903",
"4613abc322e8f2fdeae9a5dd10f17540",
"dd0fbccccf7a198681ab838c67b68fbf",
"45281dfec4618e5d20570812dea38760",
"fa64e96bcee96dbc480a1495bddbf53c",
"8f6faf6cfd245cae1b5feb11ae9eb3cf",
"1bfca57fe54bc46ba948023f754521d6",
]
hash_small_data = random.choice(registered_data_hashes_small)
hash_med_data = random.choice(registered_data_hashes_medium)
fn = Path.home() / "test_eblocbroker" / "run_cppr" / "run.sh"
f = open(fn, "w+")
f.write("#!/bin/bash\n")
f.write("#SBATCH -o slurm.out # STDOUT\n")
f.write("#SBATCH -e slurm.err # STDERR\n")
f.write("#SBATCH --mail-type=ALL\n\n")
f.write("export OMP_NUM_THREADS=1\n")
f.write("current_date=$(LANG=en_us_88591; date)\n")
f.write(f"DATA_HASH='{hash_small_data}'\n")
f.write("DATA1_DIR='../data_link/'$DATA_HASH'/'\n")
f.write("echo ' * '$current_date > output.log\n")
f.write("find $DATA1_DIR -name '*.max' -print0 | while read -d $'\\0' file\n")
f.write("do\n")
f.write(" echo $file >> output.log\n")
f.write(" (/usr/bin/time -v cppr -a pr $file) >> output.log 2>&1\n")
f.write("done\n")
f.write(f"DATA_HASH='{hash_med_data}'\n")
f.write("DATA2_DIR='../data_link/'$DATA_HASH'/'\n")
f.write("echo ' * '$current_date >> output.log\n")
f.write("find $DATA2_DIR -name '*.max' -print0 | while read -d $'\\0' file\n")
f.write("do\n")
f.write(" echo $file >> output.log\n")
f.write(" (/usr/bin/time -v cppr -a pr $file) >> output.log 2>&1\n")
f.write("done\n")
#
f.write("DATA_HASH='change_folder_hash'\n")
f.write("if [[ '$DATA_HASH' != 'change_folder_hash' ]]; then\n")
f.write(" DATA3_DIR='../data_link/'$DATA_HASH'/'\n")
f.write(" echo ' * '$current_date >> output.log\n")
f.write(" find $DATA3_DIR -name '*.max' -print0 | while read -d $'\\0' file\n")
f.write(" do\n")
f.write(" echo $file >> output.log\n")
f.write(" (/usr/bin/time -v cppr -a pr $file) >> output.log 2>&1\n")
f.write(" done\n")
f.write("fi\n")
f.write("echo ' [ DONE ] ' >> output.log\n")
f.close()
run(["sed", "-i", r"s/\x0//g", fn]) # remove NULL characters from the SBATCH file
return hash_small_data, hash_med_data
def create_nas_job_script(is_small=False):
"""Create NPB3.3-SER slurm job script to be submitted."""
benchmark_names = ["bt", "cg", "ep", "is", "lu", "sp", "ua"]
benchmark_name = random.choice(benchmark_names)
output_fn = "output.log"
hash_str = random.getrandbits(128)
fn = Path.home() / "test_eblocbroker" / "NPB3.3-SER_source_code" / "run.sh"
f = open(fn, "w+")
f.write("#!/bin/bash\n")
f.write("#SBATCH -o slurm.out # STDOUT\n")
f.write("#SBATCH -e slurm.err # STDERR\n")
f.write("#SBATCH --mail-type=ALL\n\n")
f.write(f"make {benchmark_name} CLASS=A > {output_fn}\n")
f.write(f"/usr/bin/time -v bin/{benchmark_name}.A.x >> {output_fn}\n")
if not is_small:
f.write(f"make {benchmark_name} CLASS=B >> {output_fn}\n")
f.write(f"/usr/bin/time -v bin/{benchmark_name}.B.x >> {output_fn}\n")
f.write(f"make {benchmark_name} CLASS=C >> {output_fn}\n")
f.write(f"/usr/bin/time -v bin/{benchmark_name}.C.x >> {output_fn}\n")
f.write(f"# {hash_str}\n")
f.close()
run(["sed", "-i", r"s/\x0//g", fn]) # remove NULL characters from the SBATCH file
return benchmark_name
def pre_submit(storage_ids, provider_address):
is_pass = True
required_confs = 0
yaml_fn = Path.home() / "ebloc-broker" / "broker" / "test_setup" / "nas" / "job_nas.yaml"
yaml_cfg = Yaml(yaml_fn)
yaml_cfg["config"]["provider_address"] = provider_address
for storage_id in storage_ids:
yaml_cfg["config"]["source_code"]["storage_id"] = storage_id
benchmark_name = create_nas_job_script(is_small=True)
submit_base = SubmitBase(yaml_cfg.path)
tx_hash = submit_base.submit(is_pass, required_confs)
if required_confs >= 1:
tx_receipt = get_tx_status(tx_hash, is_silent=True)
if tx_receipt["status"] == 1:
processed_logs = Ebb._eBlocBroker.events.LogJob().processReceipt(tx_receipt, errors=DISCARD)
try:
if processed_logs:
job_result = vars(processed_logs[0].args)
job_result["tx_hash"] = tx_hash
job_result["submitted_job_kind"] = f"nas_{benchmark_name}"
log(job_result)
except IndexError:
log(f"E: Tx({tx_hash}) is reverted")
# breakpoint() # DEBUG
def main():
console_ruler(f"NEW_TEST {Ebb.get_block_number()}")
if not is_process_on("mongod", "mongod"):
raise Exception("mongodb is not running in the background")
mc = MongoClient()
ebb_mongo = BaseMongoClass(mc, mc["ebloc_broker"]["tests"])
storage_ids = ["eudat", "gdrive", "ipfs"]
ipfs_ids = ["ipfs_gpg", "ipfs"]
# for provider_address in provider_addresses:
# pre_submit(storage_ids, provider_address)
benchmarks = ["nas", "cppr"]
test_dir = Path.home() / "ebloc-broker" / "broker" / "test_setup" / "nas"
nas_yaml_fn = test_dir / "job_nas.yaml"
cppr_yam_fn = test_dir / "job_cppr.yaml"
counter = 0
yaml_cfg = None
# storage = None
for _ in range(60):
for _ in range(2): # submitted as batch is faster
for idx, provider_address in enumerate(provider_addresses):
# yaml_cfg["config"]["data"]["data3"]["storage_id"] = random.choice(storage_ids)
storage_id = (idx + counter) % len(storage_ids)
selected_benchmark = random.choice(benchmarks)
storage = storage_ids[storage_id]
if storage == "ipfs":
storage = random.choice(ipfs_ids)
if selected_benchmark == "nas":
log(f" * Submitting job from NAS Benchmark to [green]{provider_address}", "bold blue")
yaml_cfg = Yaml(nas_yaml_fn)
benchmark_name = create_nas_job_script()
elif selected_benchmark == "cppr":
log(f" * Submitting job with cppr datasets to [green]{provider_address}", "bold blue")
yaml_cfg = Yaml(cppr_yam_fn)
hash_small_data, hash_med_data = create_cppr_job_script()
yaml_cfg["config"]["data"]["data1"]["hash"] = hash_small_data
yaml_cfg["config"]["data"]["data2"]["hash"] = hash_med_data
yaml_cfg["config"]["data"]["data3"]["storage_id"] = storage
small_datasets = Path.home() / "test_eblocbroker" / "dataset_zip" / "small"
dirs = [d for d in os.listdir(small_datasets) if os.path.isdir(os.path.join(small_datasets, d))]
dir_name = random.choice(dirs)
yaml_cfg["config"]["data"]["data3"]["path"] = str(small_datasets / dir_name)
yaml_cfg["config"]["source_code"]["storage_id"] = storage
yaml_cfg["config"]["provider_address"] = provider_address
try:
submit_base = SubmitBase(yaml_cfg.path)
submission_date = _time()
submission_timestamp = _timestamp()
requester_address = random.choice(users).lower()
yaml_cfg["config"]["requester_address"] = requester_address
log(f"requester={requester_address}", "bold")
tx_hash = submit_base.submit(is_pass=True)
log(f"tx_hash={tx_hash}", "bold")
tx_receipt = get_tx_status(tx_hash, is_silent=True)
if tx_receipt["status"] == 1:
processed_logs = Ebb._eBlocBroker.events.LogJob().processReceipt(tx_receipt, errors=DISCARD)
job_result = vars(processed_logs[0].args)
job_result["submit_date"] = submission_date
job_result["submit_timestamp"] = submission_timestamp
job_result["tx_hash"] = tx_hash
if selected_benchmark == "nas":
job_result["submitted_job_kind"] = f"{selected_benchmark}_{benchmark_name}"
elif selected_benchmark == "cppr":
job_result["submitted_job_kind"] = f"{selected_benchmark}_{hash_small_data}_{hash_med_data}"
ebb_mongo.add_item(tx_hash, job_result)
log(job_result)
countdown(seconds=5, is_silent=True)
except Exception as e:
print_tb(e)
counter += 1
sleep_time = randint(200, 400)
countdown(sleep_time)
if __name__ == "__main__":
main()
| 11,024 | 4,378 |
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch as th
from components.transforms import _to_batch, _from_batch, _check_inputs_validity, _tdim, _vdim
class DQN(nn.Module):
def __init__(self, input_shapes, n_actions, output_type=None, output_shapes=None, layer_args=None, args=None):
super(DQN, self).__init__()
self.args = args
self.n_actions = n_actions
assert output_type is not None, "you have to set an output_type!"
self.output_type = output_type
# Set up input regions automatically if required (if sensible)
self.input_shapes = {}
self.input_shapes.update(input_shapes)
# Set up output_shapes automatically if required
self.output_shapes = {}
self.output_shapes["fc2"] = self.n_actions # output
if output_shapes is not None:
self.output_shapes.update(output_shapes)
# Set up layer_args automatically if required
self.layer_args = {}
self.layer_args["fc1"] = {"in": self.input_shapes["main"], "out":64}
self.layer_args["fc2"] = {"in": self.layer_args["fc1"]["out"], "out": self.output_shapes["fc2"]}
if layer_args is not None:
self.layer_args.update(layer_args)
# Set up network layers
self.fc1 = nn.Linear(self.layer_args["fc1"]["in"], self.layer_args["fc1"]["out"])
self.fc2 = nn.Linear(self.layer_args["fc2"]["in"], self.layer_args["fc2"]["out"])
def init_hidden(self, batch_size, *args, **kwargs):
"""
model has no hidden state, but we will pretend otherwise for consistency
"""
vbl = Variable(th.zeros(batch_size, 1, 1))
tformat = "bs*t*v"
return vbl.cuda() if self.args.use_cuda else vbl, tformat
def forward(self, inputs, tformat, loss_fn=None, hidden_states=None, **kwargs):
_check_inputs_validity(inputs, self.input_shapes, tformat)
# Execute model branch "main"
x, params, tformat = _to_batch(inputs["main"], tformat)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = _from_batch(x, params, tformat)
losses = None
if self.output_type in ["policies"]:
log_softmax = kwargs.get("log_softmax", False)
if log_softmax:
x = F.log_softmax(x, dim=_vdim(tformat))
else:
x = F.softmax(x, dim=_vdim(tformat))
if loss_fn is not None:
losses, _ = loss_fn(x, tformat=tformat)
return x, hidden_states, losses, tformat # output, hidden state, losses
class MLPEncoder(nn.Module):
def __init__(self, input_shapes, output_shapes={}, layer_args={}, args=None):
super(MLPEncoder, self).__init__()
self.args = args
# Set up input regions automatically if required (if sensible)
self.input_shapes = {}
assert set(input_shapes.keys()) == {"main"}, \
"set of input_shapes does not coincide with model structure!"
self.input_shapes.update(input_shapes)
# Set up layer_args automatically if required
self.output_shapes = {}
self.output_shapes["fc1"] = 64 # output
self.output_shapes.update(output_shapes)
# Set up layer_args automatically if required
self.layer_args = {}
self.layer_args["fc1"] = {"in":input_shapes["main"], "out":output_shapes["main"]}
self.layer_args.update(layer_args)
#Set up network layers
self.fc1 = nn.Linear(self.input_shapes["main"], self.output_shapes["main"])
pass
def forward(self, inputs, tformat):
x, n_seq, tformat = _to_batch(inputs["main"], tformat)
x = F.relu(self.fc1(x))
return _from_batch(x, n_seq, tformat), tformat
class RNN(nn.Module):
def __init__(self, input_shapes, n_actions, output_type=None, output_shapes={}, layer_args={}, args=None, **kwargs):
super(RNN, self).__init__()
self.args = args
self.n_actions = n_actions
assert output_type is not None, "you have to set an output_type!"
# self.output_type=output_type
# Set up input regions automatically if required (if sensible)
self.input_shapes = {}
self.input_shapes.update(input_shapes)
# Set up layer_args automatically if required
self.output_shapes = {}
self.output_shapes["output"] = self.n_actions # output
if self.output_shapes is not None:
self.output_shapes.update(output_shapes)
# Set up layer_args automatically if required
self.layer_args = {}
self.layer_args["encoder"] = {"in":self.input_shapes["main"], "out":64}
self.layer_args["gru"] = {"in":self.layer_args["encoder"]["out"], "hidden":64}
self.layer_args["output"] = {"in":self.layer_args["gru"]["hidden"], "out":self.output_shapes["output"]}
self.layer_args.update(layer_args)
# Set up network layers
self.encoder = MLPEncoder(input_shapes=dict(main=self.layer_args["encoder"]["in"]),
output_shapes=dict(main=self.layer_args["encoder"]["out"]))
self.gru = nn.GRUCell(self.layer_args["gru"]["in"], self.layer_args["gru"]["hidden"])
self.output = nn.Linear(self.layer_args["output"]["in"], self.layer_args["output"]["out"])
def init_hidden(self, batch_size=1):
vbl = Variable(th.zeros(batch_size, 1, self.layer_args["gru"]["hidden"]))
tformat = "bs*t*v"
return vbl.cuda() if self.args.use_cuda else vbl, tformat
def forward(self, inputs, hidden_states, tformat, loss_fn=None, **kwargs):
"""
If data contains whole sequences, can pass loss_fn to forward pass in order to generate all losses
automatically.
Can either be operated in sequence mode, or operated step-by-step
"""
_check_inputs_validity(inputs, self.input_shapes, tformat)
_inputs = inputs["main"]
loss = None
t_dim = _tdim(tformat)
assert t_dim == 2, "t_dim along unsupported axis"
t_len = _inputs.shape[t_dim]
loss_x = []
output_x = []
h_list = [hidden_states]
for t in range(t_len):
x = _inputs[:, :, slice(t, t + 1), :].contiguous()
x, tformat = self.encoder({"main":x}, tformat)
x, params_x, tformat_x = _to_batch(x, tformat)
h, params_h, tformat_h = _to_batch(h_list[-1], tformat)
h = self.gru(x, h)
x = self.output(h)
h = _from_batch(h, params_h, tformat_h)
x = _from_batch(x, params_x, tformat_x)
h_list.append(h)
loss_x.append(x)
# we will not branch the variables if loss_fn is set - instead return only tensor values for x in that case
output_x.append(x) if loss_fn is None else output_x.append(x.clone())
if loss_fn is not None:
_x = th.cat(loss_x, dim=_tdim(tformat))
loss = loss_fn(_x, tformat=tformat)[0]
return th.cat(output_x, t_dim), \
th.cat(h_list[1:], t_dim), \
loss, \
tformat
class FCEncoder(nn.Module):
def __init__(self, input_shapes, output_shapes=None, layer_args=None, args=None):
super(FCEncoder, self).__init__()
self.args = args
# Set up input regions automatically if required (if sensible)
self.input_shapes = {}
assert set(input_shapes.keys()) == {"main"}, \
"set of input_shapes does not coincide with model structure!"
self.input_shapes.update(input_shapes)
# Set up layer_args automatically if required
self.output_shapes = {}
self.output_shapes["fc1"] = 64
if output_shapes is not None:
self.output_shapes.update(output_shapes)
# Set up layer_args automatically if required
self.layer_args = {}
self.layer_args["fc1"] = {"in":input_shapes["main"], "out":output_shapes["main"]}
if layer_args is not None:
self.layer_args.update(layer_args)
#Set up network layers
self.fc1 = nn.Linear(self.input_shapes["main"], self.output_shapes["main"])
pass
def forward(self, inputs, tformat):
x, n_seq, tformat = _to_batch(inputs["main"], tformat)
x = F.relu(self.fc1(x))
return _from_batch(x, n_seq, tformat), tformat
| 8,441 | 2,752 |
from datetime import date
nascimento = int(input('Digite o ano de nascimento: '))
idade = date.today().year - nascimento
print(f'Ele tem {idade} anos.')
if idade <= 9:
lugar = 'mirim'
elif idade <= 14:
lugar = 'infantil'
elif idade <= 19:
lugar = 'junior'
elif idade <= 25:
lugar = 'sênior'
else:
lugar = 'master'
print(f'Logo ele pertence a classe dos {lugar}.')
| 401 | 167 |
from .utils import add_weight_decay, AverageMeter, label_img_to_color, check_mkdir | 82 | 29 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 27 18:11:58 2021
@author: TSAI, TUNG-CHEN
@update: 2021/10/05
"""
MODEL_NAME = 'PhysicalCNN'
DIRECTORY = r"../dataset/preprocessed/data/"
WALK = True
SUBSET = 'all'
from wtbd.infer import infer
from wtbd.utils import print_info
from wtbd.data_collectors import SubsetDataCollector
# =============================================================================
#
# =============================================================================
def collect_infer(modelname, directory, walk=True, subset='all'):
data_collector = SubsetDataCollector()
data = data_collector(directory, subset=subset)
print_info(data['info'])
results = infer(modelname, data)
return data, results
# =============================================================================
#
# =============================================================================
if __name__ == '__main__':
data, results = collect_infer(MODEL_NAME,
DIRECTORY,
walk=WALK,
subset=SUBSET)
| 1,159 | 344 |
"""
排列五的概率输出
"""
from lottery import get_history
import pandas as pd
import dash
import dash_html_components as html
import dash_core_components as dcc
CODE = '排列五'
MIN = 1
COUNT = 200
def _calc_prob(num, datas):
"""
计算num在datas里的概率
e.g.:
num = 1
data = [[2,3],[1,7],[3,6]]
retunr 1/3
"""
count = datas.count(num)
# count/len(datas)
return round(count/len(datas)*100)
def calc_loc(historys, loc):
"""
{
0: [0.16, 0.16, 0.15, ...],
1: [0.16, 0.16, 0.22, ...],
...
9: [0.16, 0.16, 0.02, ...],
}
"""
history_numbers = [history['result'][loc-1] for history in historys]
result = dict()
for num in range(0,10): #0-9
# result.setdefault(num, [])
prob_list = list()
size = len(history_numbers)
while size >= MIN:
prob_list.append(_calc_prob(num, history_numbers[:size]))
size -= 1
result[num] = prob_list
return result
def gen_xls(historys):
with pd.ExcelWriter('排列五.xlsx') as writer:
for loc in range(1,5+1):
cols1 = ["近%d期" % i for i in range(len(historys), MIN-1, -1)]
data1 = calc_loc(historys, loc)
df1 = pd.DataFrame.from_dict(data1, orient='index', columns=cols1)
df1.to_excel(writer, sheet_name=f"第{loc}位")
def gen_html(historys):
children = [
html.H1(children='排列五分析'),
html.Div(children='数学期望值趋势'),
]
for loc in range(1,5+1):
cols = ["近%d期" % i for i in range(len(historys), MIN-1, -1)]
datas = []
for k,v in calc_loc(historys, loc).items():
data={'type':'line', 'name':k}
data['x'] = cols
data['y'] = v
datas.append(data)
children.append(dcc.Graph(
id=f'{loc}-exp-val-graph',
figure={
'data':datas,
'layout':{
'title':f'第{loc}位趋势'
}
}
))
app = dash.Dash()
app.layout = html.Div(children=children)
app.run_server(debug=True)
if __name__ == '__main__':
historys = get_history(CODE, COUNT)
#gen_xls(historys)
gen_html(historys)
| 2,262 | 867 |
"""
Copyright [2013] [Rackspace]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path
import tarfile
from StringIO import StringIO
def parse_build(path=None, fobj=None):
"""Parses build parameters. Returns tuple
(archive, remote)
Where archive is a tar archive and remote is remote url if set.
One of the tuple elements will be null
"""
if path:
for prefix in ('http://', 'https://', 'github.com/', 'git://'):
if path.startswith(prefix):
return None, path
if path.startswith("~"):
path = os.path.expanduser(path)
return _archive_from_folder(path), None
else:
if not fobj:
raise ValueError("Set path or fobj")
return _archive_from_file(fobj), None
def _archive_from_folder(path):
memfile = StringIO()
try:
t = tarfile.open(mode='w', fileobj=memfile)
t.add(path, arcname='.')
return memfile.getvalue()
finally:
memfile.close()
def _archive_from_file(dockerfile):
memfile = StringIO()
try:
t = tarfile.open(mode='w', fileobj=memfile)
if isinstance(dockerfile, StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = dockerfile.len
else:
dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
t.addfile(dfinfo, dockerfile)
return memfile.getvalue()
finally:
memfile.close()
| 1,947 | 590 |
from datetime import datetime
today = datetime.today().strftime('%Y-%m-%d %H-%M-%S')
def log_msg(text):
with open(f'./logs/{today}.log', 'a+') as log:
log_time = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
log.write(f'[{log_time}] {text}\n') | 266 | 105 |
Import("env")
import hashlib
import os
import shutil
def _file_md5_hexdigest(fname):
return hashlib.md5(open(fname, 'rb').read()).hexdigest()
def after_build(source, target, env):
if not os.path.exists("builds"):
os.mkdir("builds")
lang = env.GetProjectOption('lang')
target_name = lang.lower()
with open(f"builds/latest_{target_name}.bin.md5", "w") as md5:
print(_file_md5_hexdigest(target[0].path), file = md5)
shutil.copy(target[0].path, f"builds/latest_{target_name}.bin")
env.AddPostAction("$BUILD_DIR/firmware.bin", after_build)
| 581 | 220 |
import re
import os
from bs4 import BeautifulSoup
from . import settings
def html2list(html_string, level='word'):
"""
:param html_string: any ol' html string you've got
level: either 'word' or 'character'. If level='word', elements will be words.
If level='character', elements will be individial characters.
:return: list of elements, making sure not to break up open tags (even if they contain attributes)
Note that any blacklisted tag will not be broken up
Example:
html_str = "<h1>This is a simple header</h1>"
result = html2list(html_str)
result == ['<h1>', 'This ', 'is ', 'a ', 'simple ', 'header', '</h1>']
Blacklisted tag example:
BLACKLISTED_TAGS = ['head']
html_str = "<head><title>Page Title</title></head>"
result = html2list(html_str)
result == ['<head><title>Page Title</title></head>']
"""
# different modes for parsing
CHAR, TAG = 'char', 'tag'
mode = CHAR
cur = ''
out = []
# TODO: use generators
# iterate through the string, character by character
for c in html_string:
# tags must be checked first to close tags
if mode == TAG:
# add character to current element
cur += c
# if we see the end of the tag
if c == '>':
out.append(cur) # add the current element to the output
cur = '' # reset the character
mode = CHAR # set the mode back to character mode
elif mode == CHAR:
# when we are in CHAR mode and see an opening tag, we must switch
if c == '<':
# clear out string collected so far
if cur != "":
out.append(cur) # if we have already started a new element, store it
cur = c # being our tag
mode = TAG # swap to tag mode
# if c is a special character, store 'word', store c, continue
elif is_special_character(c):
out.append(cur)
out.append(c)
cur = ''
# otherwise, simply continue building up the current element
else:
if level == 'word':
cur += c
elif level == 'character':
out.append(c)
else:
raise ValueError('level must be "word" or "character"')
# TODO: move this to its own function `merge_blacklisted` or `merge_tags` return to a generator instead of list
cleaned = list()
blacklisted_tag = None
blacklisted_string = ""
for x in out:
if not blacklisted_tag:
for tag in settings.BLACKLISTED_TAGS:
if verified_blacklisted_tag(x, tag):
blacklisted_tag = tag
blacklisted_string += x
break
if not blacklisted_tag:
cleaned.append(x)
else:
if x == "</{0}>".format(blacklisted_tag):
blacklisted_string += x
cleaned.append(blacklisted_string)
blacklisted_tag = None
blacklisted_string = ""
else:
blacklisted_string += x
return cleaned
def check_html(html, encoding=None):
if isinstance(html, BeautifulSoup):
html = html.prettify()
elif os.path.isfile(html):
with open(html, "r", encoding=encoding) as file:
html = file.read()
else:
html = html
return html
def verified_blacklisted_tag(x, tag):
"""
check for '<' + blacklisted_tag + ' ' or '>'
as in: <head> or <head ...> (should not match <header if checking for <head)
"""
initial = x[0:len(tag) + 1 + 1]
blacklisted_head = "<{0}".format(tag)
return initial == (blacklisted_head + " ") or initial == (blacklisted_head + ">")
def add_stylesheet(html_list):
stylesheet_tag = '<link rel="stylesheet" type="text/css" href="{}">'.format(settings.STYLESHEET)
for idx, el in enumerate(html_list):
if "</head>" in el:
# add at the very end of head tag cause we is important
head = el.split("</head>")
new_head = head[0] + stylesheet_tag + "</head>" + "".join(head[1:])
html_list[idx] = new_head
return html_list
def extract_tagname(el):
if not is_tag(el):
raise Exception("Not a tag!")
tag_parts = el[el.index('<')+1:el.index('>')].replace("/", "")
return tag_parts.split(" ")[0]
def compare_tags(tag_a, tag_b):
"""
returns markers for deleted, inserted, and combined
"""
tag_parts_a = chart_tag(tag_a)
tag_parts_b = chart_tag(tag_b)
# first test whether we have any new attributes
deleted_attributes = set(tag_parts_a.keys()) - set(tag_parts_b.keys())
inserted_attributes = set(tag_parts_b.keys()) - set(tag_parts_a.keys())
# then look at every attribute set and check whether the values are the same
changed_attributes = list()
for attribute in set(tag_parts_a.keys()) & set(tag_parts_b.keys()):
if tag_parts_a[attribute] != tag_parts_b[attribute]:
changed_attributes.append(attribute)
return {
'deleted_attributes': list(deleted_attributes),
'inserted_attributes': list(inserted_attributes),
'changed_attributes': changed_attributes,
}
def chart_tag(tag_string):
"""
Takes tag and returns dict that charts out tag parts
example:
tag = '<div title="somewhere">'
parts = chart_tag(tag)
print(parts)
# {'tag': 'div', 'title': 'somewhere'}
"""
tag_parts = dict()
if tag_string[0] != "<" and tag_string[-1] != ">":
raise Exception("Got malformed tag", tag_string)
t = tag_string.split(" ")
for el in t:
if el[0] == "<":
# grab the tag type
tag_parts['tag'] = el[1:]
else:
check_element = el[:-1] if el[-1] == ">" else el
check_element = check_element.replace('"', '').replace('/', '')
if len(check_element.split("=")) > 1:
attribute, values = check_element.split("=")
tag_parts[attribute] = values
else:
# if unattached elements, these are probably extra values from
# the previous attribute, so we add them
tag_parts[attribute] += ' ' + check_element
if el[-1] == ">":
return tag_parts
def get_class_decorator(name, diff_type=''):
"""returns class like `htmldiffer-tag-change`"""
if diff_type:
return "%s_%s" % (settings.HTMLDIFFER_CLASS_STRINGS[name], diff_type)
else:
return "%s" % (settings.HTMLDIFFER_CLASS_STRINGS[name])
# ===============================
# Predicate functions
# ===============================
# Note: These make assumptions about consuming valid html text. Validations should happen before these internal
# predicate functions are used -- these are not currently used for parsing.
def is_blacklisted_tag(tag):
return tag in settings.BLACKLISTED_TAGS
def is_comment(text):
return "<!--" in text
def is_ignorable(text):
return is_comment(text) or is_closing_tag(text) or text.isspace()
def is_whitelisted_tag(tag):
# takes a tag and checks against WHITELISTED
return tag in settings.WHITELISTED_TAGS
def is_open_script_tag(x):
return "<script " in x
def is_closed_script_tag(x):
return "<\script" in x
def is_tag(x):
return len(x) > 0 and x[0] == "<" and x[-1] == ">"
def is_opening_tag(x):
return x[0] == "<" and x[1] != "/"
def is_closing_tag(x):
return x[0:2] == "</"
def is_self_closing_tag(x):
return len(x) > 0 and x[0] == "<" and x[-2:] == "/>"
def is_text(x):
return ("<" not in x) and (">" not in x)
def is_div(x):
return x[0:4] == "<div" and x[-6:] == "</div>"
def is_special_character(string):
char_re = re.compile(r'[^a-zA-Z0-9]')
string = char_re.search(string)
return bool(string)
| 8,184 | 2,471 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import os
import sys
import traceback
import json
from regression.python_test_utils.test_utils import get_db_connection
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
with open(CURRENT_PATH + "/user_mapping_test_data.json") as data_file:
test_cases = json.load(data_file)
def get_um_data(db_user, server):
data = {"name": db_user,
"um_options": [],
"umoptions": [
{
"umoption": "user",
"umvalue": server["username"]
},
{
"umoption": "password",
"umvalue": server["db_password"]
}
]}
return data
def create_user_mapping(server, db_name, fsrv_name):
"""
This function will create user mapping under the existing
dummy database.
:param server: server details
:type server: dict
:param db_name: database name
:type db_name: str
:param fsrv_name: FS name
:type fsrv_name: str
:return um_id: user mapping id
:rtype: int
"""
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
pg_cursor = connection.cursor()
query = "CREATE USER MAPPING FOR %s SERVER %s OPTIONS" \
" (user '%s', password '%s')" % (server['username'],
fsrv_name,
server['username'],
server['db_password']
)
pg_cursor.execute(query)
connection.set_isolation_level(old_isolation_level)
connection.commit()
# Get 'oid' from newly created user mapping
pg_cursor.execute(
"select umid from pg_user_mappings where srvname = '%s' order by"
" umid asc limit 1" % fsrv_name)
oid = pg_cursor.fetchone()
um_id = ''
if oid:
um_id = oid[0]
connection.close()
return um_id
except Exception:
traceback.print_exc(file=sys.stderr)
def verify_user_mapping(server, db_name, fsrv_name):
"""
This function will verify current foreign server.
:param server: server details
:type server: dict
:param db_name: database name
:type db_name: str
:param fsrv_name: FS name
:type fsrv_name: str
:return user_mapping: user mapping record
:rtype: tuple
"""
try:
connection = get_db_connection(db_name,
server['username'],
server['db_password'],
server['host'],
server['port'],
server['sslmode'])
pg_cursor = connection.cursor()
pg_cursor.execute(
"select umid from pg_user_mappings where srvname = '%s' order by"
" umid asc limit 1" % fsrv_name)
user_mapping = pg_cursor.fetchone()
connection.close()
return user_mapping
except Exception:
traceback.print_exc(file=sys.stderr)
| 3,846 | 1,006 |
#!/usr/bin/env python
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
def check_response(exception_to_raise=None):
def decorator(func):
def proxy(self, *args, **kw):
try:
result = func(self, *args, **kw)
except requests.HTTPError, http_error:
error_message = http_error.response.json().get('Message')
known_error_message = getattr(exception_to_raise, 'RESPONSE_MESSAGE')
if known_error_message is not None and known_error_message == error_message:
raise exception_to_raise
else:
raise http_error
return result
return proxy
return decorator
class BaseObject(object):
def __init__(self, client, *args, **kw):
self._client = client
for k, v in kw.items():
setattr(self, k, v)
| 1,467 | 414 |
import frappe
def permission_ov(user):
pass
# print("///////////******///////******////******///**")
# print(user)
# print(frappe.session.user)
# return """(`tabValiant`.`native`='Salem' )"""
# return "(`tabValiant`.owner = 'i am')".format(user=frappe.db.escape(user))
def has_permission(doc, user=None, permission_type=None):
# when reading a document allow if event is Public
print("///////////******///////******////******///**")
print(doc.native)
print(user)
print(permission_type)
# if permission_type == "read":
# return True
# if permission_type == "write":
# return True
if doc.native == "Salem":
return False
# return True
return True
def event():
print("*/*/***")
print("///////////******///////******////******///**")
print("Event Working")
| 840 | 273 |
from django.shortcuts import render
from django.http import HttpResponse,JsonResponse
from django.conf import settings
from Book.models import PictureInfo,AreaInfo
from django.core.paginator import Paginator
# Create your views here.
def sheng(request):
"""获取省级数据,并转JSON字典,响应给ajax"""
# 查询省级数据 sheng_list = [AreaInfo,AreaInfo,AreaInfo,AreaInfo,...]
sheng_list = AreaInfo.objects.filter(parent__isnull=True)
# 构造JSON列表
list = []
for sheng in sheng_list:
list.append([sheng.id, sheng.name])
# 构造JSON字典
sheng_json_dict = {'shenglist':list}
# 响应JSON : ajax收到的也是如此结构如此内容的json字典
return JsonResponse(sheng_json_dict)
"""
{
"shenglist":[
[id, name],
[id, name],
]
}
"""
"""
{
"shenglist":[
{"id":id, "name":name},
{"id":id, "name":name},
]
}
"""
"""
<select id="sheng">
<option value="100000">北京市</option>
</select>
<select id="shi">
<option value="100005">昌平区</option>
</select>
<select id="qu">
<option value="0">请选择</option>
</select>
"""
def area(request):
"""提供省市区三级联动的页面"""
return render(request, 'Book/area.html')
def page(request, page_num):
"""分页"""
# 查询省级信息 sheng_list = [AreaInfo,AreaInfo,AreaInfo,AreaInfo,AreaInfo,AreaInfo,... 33]
sheng_list = AreaInfo.objects.filter(parent__isnull=True)
# 分页的需求: 对sheng_list进行分页,每页10条
# pagenator = [AreaInfo,AreaInfo,AreaInfo,AreaInfo,AreaInfo,AreaInfo,... 33]
paginator = Paginator(sheng_list, 10)
# 为了实现,当用户输入/page/ 也是默认的请求第一页的数据
# print(type(page_num))
if page_num == '':
page_num = '1'
# 度取出某一页数据 page = [AreaInfo,AreaInfo,AreaInfo,AreaInfo,AreaInfo,AreaInfo,AreaInfo,AreaInfo,AreaInfo,AreaInfo]
page = paginator.page(page_num) # 1 '1'
# 构造上下文
context = {
'page':page
}
# 渲染模板
return render(request, 'Book/page.html', context)
def recv(request):
"""接受上传的图片,内容保存的项目,地址记录到数据库"""
# 获取图片数据
pic = request.FILES.get('pic') # InMemoryUploadF...
# 获取上传的文件的名字
pic_name = pic.name
# 准备文件存储的路径 : '/static/media/Book/mm03.jpeg'
path = '%s/Book/%s' % (settings.MEDIA_ROOT, pic_name)
# 需要将受到的文件内容数据,保存到项目中
with open(path, 'ab') as file:
for c in pic.chunks(): # chunks() 以安全守护的形式去遍历,避免大文件造成内存溢出
file.write(c)
# 还需要将文件保存到项目中的路径,在数据库中记录
pictureInfo = PictureInfo()
# 仅仅是给模型对象的path属性赋值而已
pictureInfo.path = 'Book/%s' % pic_name
# 以下代码才是把path属性里面的数据,写入到数据库表中
pictureInfo.save()
# 响应结果
return HttpResponse('上传成功')
def upload(request):
"""提供图片上传的表单页面"""
return render(request, 'Book/upload.html')
def staticFile(request):
"""加载静态图片"""
return render(request, 'Book/staticfile.html')
| 2,760 | 1,303 |
"""
File: add2.py
Name:
------------------------
TODO:
"""
import sys
class ListNode:
def __init__(self, data=0, pointer=None):
self.val = data
self.next = pointer
def add_2_numbers(l1: ListNode, l2: ListNode) -> ListNode:
#######################
# #
# TODO: #
# #
#######################
cur = l1
curr = l2
len_l1 = 0
len_l2 = 0
plus = 0 # 進位的開關
while cur is not None:
len_l1 += 1
cur = cur.next
while curr is not None:
len_l2 += 1
curr = curr.next
# 比較兩者長度
if len_l1 > len_l2:
max_l = l1
min_l = l2
else:
max_l = l2
min_l = l1
add_val = max_l.val + min_l.val
if plus == 1:
add_val += 1
plus = 0
if add_val > 9:
add_val = add_val % 10
plus = 1
ans_head = ListNode(add_val, None)
ans_cur = ans_head
if max_l.next is None:
return ans_cur
else:
max_l = max_l.next
min_l = min_l.next
while max_l.next is not None:
if min_l.next is not None:
add_val = max_l.val + min_l.val
if plus == 1:
add_val += 1
plus = 0
if add_val > 9:
add_val = add_val % 10
plus = 1
ans_head.next = ListNode(add_val, None)
ans_head = ans_head.next
max_l = max_l.next
min_l = min_l.next
else:
min_l.next = ListNode(1, None)
min_l.next.val -= 1
add_val = max_l.val + min_l.val
if plus == 1:
add_val += 1
plus = 0
if add_val > 9:
add_val = add_val % 10
plus = 1
ans_head.next = ListNode(add_val, None)
ans_head = ans_head.next
max_l = max_l.next
min_l = min_l.next
add_val = max_l.val + min_l.val
if plus == 1:
add_val += 1
plus = 0
if add_val > 9:
add_val = add_val % 10
plus = 1
ans_head.next = ListNode(add_val, None)
ans_head = ans_head.next
if plus == 1:
ans_head.next = ListNode(1, None)
return ans_cur
####### DO NOT EDIT CODE BELOW THIS LINE ########
def traversal(head):
"""
:param head: ListNode, the first node to a linked list
-------------------------------------------
This function prints out the linked list starting with head
"""
cur = head
while cur.next is not None:
print(cur.val, end='->')
cur = cur.next
print(cur.val)
def main():
args = sys.argv[1:]
if not args:
print('Error: Please type"python3 add2.py test1"')
else:
if args[0] == 'test1':
l1 = ListNode(2, None)
l1.next = ListNode(4, None)
l1.next.next = ListNode(3, None)
l2 = ListNode(5, None)
l2.next = ListNode(6, None)
l2.next.next = ListNode(4, None)
ans = add_2_numbers(l1, l2)
print('---------test1---------')
print('l1: ', end='')
traversal(l1)
print('l2: ', end='')
traversal(l2)
print('ans: ', end='')
traversal(ans)
print('-----------------------')
elif args[0] == 'test2':
l1 = ListNode(9, None)
l1.next = ListNode(9, None)
l1.next.next = ListNode(9, None)
l1.next.next.next = ListNode(9, None)
l1.next.next.next.next = ListNode(9, None)
l1.next.next.next.next.next = ListNode(9, None)
l1.next.next.next.next.next.next = ListNode(9, None)
l2 = ListNode(9, None)
l2.next = ListNode(9, None)
l2.next.next = ListNode(9, None)
l2.next.next.next = ListNode(9, None)
ans = add_2_numbers(l1, l2)
print('---------test2---------')
print('l1: ', end='')
traversal(l1)
print('l2: ', end='')
traversal(l2)
print('ans: ', end='')
traversal(ans)
print('-----------------------')
elif args[0] == 'test3':
l1 = ListNode(0, None)
l2 = ListNode(0, None)
ans = add_2_numbers(l1, l2)
print('---------test3---------')
print('l1: ', end='')
traversal(l1)
print('l2: ', end='')
traversal(l2)
print('ans: ', end='')
traversal(ans)
print('-----------------------')
else:
print('Error: Please type"python3 add2.py test1"')
if __name__ == '__main__':
main()
| 4,925 | 1,637 |
import obelisk
import logging
import bitcoin
from twisted.internet import reactor
_log = logging.getLogger('trust')
TESTNET = False
def burnaddr_from_guid(guid_hex):
_log.debug("burnaddr_from_guid: %s", guid_hex)
if TESTNET:
guid_hex = '6f' + guid_hex
else:
guid_hex = '00' + guid_hex
_log.debug("GUID address on bitcoin net: %s", guid_hex)
guid = guid_hex.decode('hex')
_log.debug("Decoded GUID address on bitcoin net")
# perturbate GUID
# to ensure unspendability through
# near-collision resistance of SHA256
# by flipping the last non-checksum bit of the address
guid = guid[:-1] + chr(ord(guid[-1]) ^ 1)
_log.debug("Perturbated bitcoin proof-of-burn address")
return obelisk.bitcoin.EncodeBase58Check(guid)
def get_global(guid, callback):
get_unspent(burnaddr_from_guid(guid), callback)
def get_unspent(addr, callback):
_log.debug('get_unspent call')
def get_history():
history = bitcoin.history(addr)
total = 0
for tx in history:
total += tx['value']
callback(total)
reactor.callFromThread(get_history)
| 1,157 | 420 |
# Collage Generator Backend Application
import logging.config
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.authentication import AuthenticationMiddleware
from starlette.middleware.gzip import GZipMiddleware
from starlette_context import plugins
from starlette_context.middleware import ContextMiddleware
from . import settings
from .extensions import db
from .http import routes
from .http.error_handlers import exception_handlers
from .http.security import AppAuthenticationBackend
logging.config.fileConfig("logging.conf", disable_existing_loggers=False)
middleware = [
Middleware(
ContextMiddleware.with_plugins(
plugins.RequestIdPlugin, # request id
plugins.CorrelationIdPlugin, # correlation id
)
),
Middleware(AuthenticationMiddleware, backend=AppAuthenticationBackend()),
Middleware(GZipMiddleware, minimum_size=1024),
]
app = Starlette(
debug=settings.DEBUG,
exception_handlers=exception_handlers,
middleware=middleware,
routes=routes,
)
# App-level events
@app.on_event("startup")
async def startup():
await db.connect()
@app.on_event("shutdown")
async def shutdown():
await db.disconnect()
| 1,261 | 342 |
"""
[Golf](https://www.codechef.com/MAY21C/problems/LKDNGOLF)
It's a lockdown. You’re bored in your house and are playing golf in the hallway.
The hallway has N+2 tiles numbered from 0 to N+1 from left to right. There is a hole on tile number x. You hit the ball
standing on tile 0. When you hit the ball, it bounces at lengths of k, i.e. the tiles covered by it are 0,k,2k,…, and
so on until the ball passes tile N+1.
If the ball doesn't enter the hole in the first trial, you try again but this time standing on the tile N+1. When you
hit the ball, it bounces at lengths of k, i.e. the tiles covered by it are (N+1),(N+1−k),(N+1−2k),…, and so on until
the ball passes tile 0.
Find if the ball will enter the hole, either in its forward journey or backward journey.
Note: The input and output of this problem are large, so prefer using fast input/output methods.
Input
The first line contains an integer T, the number of test cases. Then the test cases follow.
The only line of each test case contains three integers N,x,k.
Output
Output in a single line, the answer, which should be "YES" if the ball enters the hole either in the forward or
backward journey and "NO" if not.
You may print each character of the string in uppercase or lowercase (for example, the strings "yEs", "yes", "Yes" and
"YES" will all be treated as identical).
Constraints
1≤T≤105
1≤x,k≤N≤109
Subtasks
Subtask #1 (10 points): N≤102
Subtask #2 (90 points): original constraints
Sample Input
3
5 4 2
5 3 2
5 5 2
Sample Output
YES
NO
NO
Explanation
In each test case, the tiles covered by the ball for N=5 and k=2 are {0,2,4,6} in the forward journey and {6,4,2,0} in
the backward journey.
Therefore, the answer for the first test case is "YES" since the ball falls in the position of the hole at tile 4. But
the answer for test cases 2 and 3 is "NO" since the ball does not fall in the position of the hole.
"""
import sys
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
T = data[0]
idx = 1
while T > 0:
N, x, k = data[idx:idx+3]
N += 1
if x % k == 0:
print("YES")
elif (N - x) % k == 0:
print("YES")
else:
print("NO")
T -= 1
idx += 3
# Time : 0.14s
| 2,298 | 778 |
"""
Application logger
"""
import logging
import os
import sys
from yogit import get_name, get_version
from yogit.yogit.paths import get_log_path, SETTINGS_DIR
def get_logger(stdout=False, logger_name=get_name(), version=get_version()):
"""
Create and configure a logger using a given name.
"""
os.makedirs(SETTINGS_DIR, exist_ok=True)
application_str = logger_name
if version:
application_str += " " + version
formatter = logging.Formatter(
fmt=(
"%(asctime)s "
"[{application}:%(process)d] "
"[%(levelname)s] "
"%(message)s".format(application=application_str)
),
datefmt="%Y-%m-%dT%H:%M:%S%z",
)
file_log_handler = logging.FileHandler(get_log_path())
file_log_handler.setLevel(logging.DEBUG)
file_log_handler.setFormatter(formatter)
local_logger = logging.getLogger(logger_name)
local_logger.setLevel(logging.DEBUG)
local_logger.addHandler(file_log_handler)
if stdout:
console_log_handler = logging.StreamHandler(sys.stdout)
console_log_handler.setLevel(logging.DEBUG)
console_log_handler.setFormatter(formatter)
local_logger.addHandler(console_log_handler)
return local_logger
LOGGER = get_logger()
def enable_stdout():
"""
Prints logs in stdout
"""
global LOGGER # pylint: disable=global-statement
LOGGER = get_logger(stdout=True)
| 1,446 | 465 |
# coding: utf-8
"""
Connect Statistics API
Connect Statistics API provides statistics about other cloud services through defined counters.
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from .error_response import ErrorResponse
from .fields import Fields
from .metric import Metric
from .successful_response import SuccessfulResponse
| 481 | 127 |
import logging, sys
def sane_logger(log_level=logging.INFO):
logger = logging.getLogger()
logger.setLevel(log_level)
sh = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)s [%(filename)s.%(funcName)s:%(lineno)d] %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S %Z'
)
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
if __name__ == '__main__':
logger = sane_logger(logging.DEBUG)
logger.info('test log')
| 488 | 193 |
from core_tools.data.SQL.SQL_connection_mgr import SQL_database_manager
from core_tools.drivers.hardware.hardware_SQL_backend import virtual_gate_queries
import time
import numpy as np
def lamda_do_nothing(matrix):
return matrix
class virtual_gate_matrix():
def __init__(self, name, gates, v_gates, data,
forward_conv_lamda = lamda_do_nothing, backward_conv_lamda = lamda_do_nothing):
self.name = name
self.gates = gates
self.v_gates = v_gates
self._matrix = data
self.forward_conv_lamda = forward_conv_lamda
self.backward_conv_lamda = backward_conv_lamda
self.last_update = time.time()
@property
def matrix(self):
return self.forward_conv_lamda(self._matrix)
@matrix.setter
def matrix(self, matrix):
if self._matrix.shape != matrix.shape:
raise ValueError('input shape of matrix does not match the one in the virtual gate matrix')
self._matrix[:,:] = self.backward_conv_lamda(matrix)
self.save()
@property
def inv(self):
l_inv_f = combine_lamdas(self.forward_conv_lamda, lamda_invert)
l_inv_b = combine_lamdas(self.backward_conv_lamda, lamda_invert)
return virtual_gate_matrix(self.name, self.gates, self.v_gates, self._matrix, l_inv_f, l_inv_b)
def reduce(self, gates, v_gates = None):
'''
reduce size of the virtual gate matrix
Args:
gates (list<str>) : name of the gates where to reduce to reduce the current matrix to.
v_gates (list<str>) : list with the names of the virtual gates (optional)
'''
v_gates = name_virtual_gates(v_gates, gates)
v_gate_matrix = np.eye(len(gates))
for i in range(len(gates)):
for j in range(len(gates)):
if gates[i] in self.gates:
v_gate_matrix[i, j] = self[v_gates[i],gates[j]]
return virtual_gate_matrix('dummy', gates, v_gates, v_gate_matrix)
def __getitem__(self, index):
if isinstance(index, tuple):
idx_1, idx_2 = index
idx_1 = self.__evaluate_index(idx_1, self.v_gates)
idx_2 = self.__evaluate_index(idx_2, self.gates)
return self.matrix[idx_1,idx_2]
else:
raise ValueError("wrong input foramt provided ['virtual_gate','gate'] expected).".format(v_gate))
def __setitem__(self, index, value):
self.last_update = time.time()
if isinstance(index, tuple):
idx_1, idx_2 = index
idx_1 = self.__evaluate_index(idx_1, self.v_gates)
idx_2 = self.__evaluate_index(idx_2, self.gates)
m = self.matrix
m[idx_1,idx_2] = value
self._matrix[:,:] = self.backward_conv_lamda(m)
self.save()
else:
raise ValueError("wrong input foramt provided ['virtual_gate','gate'] expected).".format(v_gate))
def __evaluate_index(self, idx, options):
if isinstance(idx, int) >= len(options):
raise ValueError("gate out of range ({}), size of virtual matrix {}x{}".format(idx, len(options), len(options)))
if isinstance(idx, str):
if idx not in options:
raise ValueError("{} gate does not exist in virtual gate matrix".format(idx))
else:
idx = options.index(idx)
return idx
def save(self):
if self.name != 'dummy':
save(self)
def __len__(self):
return len(self.gates)
def __repr__(self):
descr = "Virtual gate matrix named {}\nContents:\n".format(self.name)
content = "\nGates : {}\nVirtual gates : {}\nMatrix :\n".format(self.gates, self.v_gates, self.matrix)
for row in self.matrix:
content += "{}\n".format(row)
return descr + content
def lamda_invert(matrix):
return np.linalg.inv(matrix)
def lamda_norm(matrix_norm):
matrix_no_norm = np.empty(matrix_norm.shape)
for i in range(matrix_norm.shape[0]):
matrix_no_norm[i, :] = matrix_norm[i, :]/matrix_norm[i, i]
return matrix_no_norm
def lamda_unnorm(matrix_no_norm):
matrix_norm = np.empty(matrix_no_norm.shape)
for i in range(matrix_norm.shape[0]):
matrix_norm[i, :] = matrix_no_norm[i]/np.sum(matrix_no_norm[i, :])
return matrix_norm
def combine_lamdas(l1, l2):
def new_lamda(matrix):
return l1(l2(matrix))
return new_lamda
def load_virtual_gate(name, real_gates, virtual_gates=None):
conn = SQL_database_manager().conn_local
virtual_gate_queries.generate_table(conn)
virtual_gates = name_virtual_gates(virtual_gates, real_gates)
if virtual_gate_queries.check_var_in_table_exist(conn, name):
real_gate_db, virtual_gate_db, matrix_db = virtual_gate_queries.get_virtual_gate_matrix(conn, name)
entries_to_add = set(real_gates) - set(real_gate_db)
gates = real_gate_db + list(entries_to_add)
dummy_matrix = np.eye(len(gates))
dummy_matrix[:len(real_gate_db) , :len(real_gate_db)] = matrix_db
dummy_v_gates = virtual_gate_matrix('dummy', gates, name_virtual_gates(None, gates), dummy_matrix)
v_gate_matrix = np.eye(len(real_gates))
for i in range(len(real_gates)):
for j in range(len(real_gates)):
v_gate_matrix[i, j] = dummy_v_gates['v' + real_gates[i],real_gates[j]]
return virtual_gate_matrix(name, real_gates, virtual_gates, v_gate_matrix)
else:
return virtual_gate_matrix(name, real_gates, virtual_gates, np.eye(len(real_gates)))
def save(vg_matrix):
conn = SQL_database_manager().conn_local
if virtual_gate_queries.check_var_in_table_exist(conn, vg_matrix.name):
# merge in case there are more entries
real_gate_db, virtual_gate_db, matrix_db = virtual_gate_queries.get_virtual_gate_matrix(conn, vg_matrix.name)
all_gates = list(set(real_gate_db + vg_matrix.gates))
dummy_v_gates = virtual_gate_matrix('dummy', all_gates, name_virtual_gates(None, all_gates), np.eye(len(all_gates)))
for i in range(len(real_gate_db)):
for j in range(len(real_gate_db)):
dummy_v_gates['v' + real_gate_db[i], real_gate_db[j]] = matrix_db[i,j]
for i in range(len(vg_matrix.gates)):
for j in range(len(vg_matrix.gates)):
dummy_v_gates['v' + vg_matrix.gates[i], vg_matrix.gates[j]] = vg_matrix._matrix[i,j]
virtual_gate_queries.set_virtual_gate_matrix(conn, vg_matrix.name,
dummy_v_gates.gates, dummy_v_gates.v_gates, dummy_v_gates._matrix)
else:
virtual_gate_queries.set_virtual_gate_matrix(conn, vg_matrix.name,
vg_matrix.gates, vg_matrix.v_gates, vg_matrix._matrix)
def name_virtual_gates(v_gate_names, real_gates):
if v_gate_names is None:
v_gates = []
for i in real_gates:
v_gates += ['v' + i]
else:
v_gates = v_gate_names
return v_gates
| 7,104 | 2,432 |
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.response import Response
from rest_framework import generics
from .models import kotoUser
from .models import transaction
from .models import income
from .serializers import kotoUserserializer, transactionserializer
from django.shortcuts import render, redirect # redirects users to page we (Koto team) wants
from django.contrib.auth import authenticate, login # authenticates whether the user exists in the database (via email and username), login makes sure that our users dont have to input password on every page they browse
from django.views.generic import View
from .forms import UserForm
from django.contrib.auth import logout
from django.http import JsonResponse
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
IMAGE_FILE_TYPES = ['png', 'jpg', 'jpeg']
# Create your views here.
#rest api requests
class userList(APIView):
def get(self, request):
users = kotoUser.objects.all()
serializers = kotoUserserializer(users, many = True)
return Response(serializers.data)
def pos(self):
pass
class userTransactions(APIView):
def get(self, request):
transactionsList = transaction.objects.filter()
serializers = transactionserializer(transactionsList,many=True)
return Response(serializers.data)
class TransactionList(APIView):
t_list = transactionserializer
def get_queryset(self):
"""
This view should return a list of all the purchases for
the user as determined by the username portion of the URL.
"""
tid = self.kwargs['tid']
return transaction.objects.filter(transId=tid)
class UserFormView(View):
form_class = UserForm # blueprint for the form
template_name = 'HTML/registration_form.html' # template made to include the form in the html
# (User) getting the form (displays the blank form)
def get(self, request):
form = self.form_class(None)
return render(request, self.template_name, {'form': form})
# (User) posting the form and process the form data (input) by the user
# done to keep track of registered users for later authentication
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save(commit = False) # creates a user object but still doesnt save it to the DB
#cleaned (normalized) data so that the users input using the same format
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password) # allows to modify password
user.save() # this line's execution saves the user info to the DB
# returns User objects if credentials are correct
user = authenticate(username = username, password = password)
# gives us the power to ban or deactivate user accounts for whatever reason(s)
if user is not None:
if user.is_active:
login(request, user)
return redirect('user:index')
return render(request, self.template_name, {'form': form})
#def logout_user(request):
#logout(request)
#form = UserForm(request.POST or None)
#context = {
# "form": form,
#}
#return render(request, 'HTML/user_login.html', context)
#class User_Login(login):
#login_class = UserLogin # blueprint for the form
#template_name = 'HTML/user_login.html' # template made to include the form in the html
#def get(self, request):
# login = self.login_class(None)
# return render(request, self.template_name, {'login': login})
def user_login(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username = username, password = password)
if user is not None:
if user.is_active:
login(request, user)
kotoUser = kotoUser.objects.filter(user = request.user)
return render(request, 'HTML/index.html', {'login': 'Home Page'})
else:
return render(request, 'HTML/user_login.html', {'error_message': 'Your account has been disabled'})
else:
return render(request, 'HTML/user_login.html', {'error_message': 'Invalid login'})
return render(request, 'HTML/user_login.html')
| 4,658 | 1,232 |
import sys
import re
import json
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import MultiLabelBinarizer
from scipy.spatial.distance import cdist
from colorama import Fore, Style
from kneed import KneeLocator
import copy
import time
import pickle
import os
def error_msg(error_msg, arg):
"""
Helper function to display error message on the screen.
Input:
The error message along with its respective argument.
(Values include - filename, selected action).
Output:
The formatted error message on the screen along with the argument.
"""
print("****************************")
print(Fore.RED, end='')
print(error_msg,":", arg)
print(Style.RESET_ALL, end='')
print("****************************")
sys.exit(0)
def printINFO(info):
"""
Helper function to ask the user for Input.
Input:
The message that is to be displayed.
Output:
The formatted message on the screen.
"""
print(Fore.BLUE, end='')
print(info)
print(Style.RESET_ALL, end='')
# *****************************************************************************
# *****************************************************************************
# Helper Methods Start
def calculate_num_clusters(df, acl_weights):
"""
Calculates the optimal number of clusters using the elbow_graph approach.
Input:
The Pandas dataframe of the input file (ACL.json)
output:
The value of k that provides the least MSE.
"""
files = ['IP_Access_List', 'Route_Filter_List', 'VRF', 'AS_Path_Access_List',
'IKE_Phase1_Keys', 'IPsec_Phase2_Proposals', 'Routing_Policy']
k_select_vals = [41, 17, 42, 5, 3, 2, 58]
curr_file = file_name.split(".")[0]
file_index = files.index(curr_file)
return k_select_vals[file_index]
features = df[df.columns]
ran = min(len(df.columns), len(discrete_namedstructure))
if ran > 50:
k_range = range(1, 587)
else:
k_range = range(1, ran)
print(k_range)
k_range = range(1, 580)
distortions = []
np.seed = 0
clusters_list = []
f = open('distortions.txt', 'w')
for k in k_range:
print(k)
kmeans = KMeans(n_clusters=k).fit(features, None, sample_weight=acl_weights)
clusters_list.append(kmeans)
cluster_centers = kmeans.cluster_centers_
k_distance = cdist(features, cluster_centers, "euclidean")
distance = np.min(k_distance, axis=1)
distortion = np.sum(distance)/features.shape[0]
distortions.append(distortion)
f.write(str(distortion))
f.write("\n")
kn = KneeLocator(list(k_range), distortions, S=3.0, curve='convex', direction='decreasing')
print("Knee is: ", kn.knee)
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow Method showing the optimal k')
plt.plot(k_range, distortions, 'bx-')
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
if kn.knee is None:
if ran < 5:
return ran - 1
else:
return 5
return kn.knee
'''
for i in range(1, len(avg_within)):
if (avg_within[i-1] - avg_within[i]) < 1:
break
# return i-1 if len(avg_within) > 1 else 1
# return i - 1 if i > 1 else 1
'''
def perform_kmeans_clustering(df, ns_weights):
"""
To get a mapping of the rows into respective clusters generated using the K-means algorithm.
Input:
df:The Pandas data-frame of the input file (ACL.json)
ns_weights: The weights of each name structure which allows the weighted k-means algorithm to work.
Output:
Adding respective K-means cluster label to the input dataframe.
Example:
Row1 - Label 0 //Belongs to Cluster 0
Row2 - Label 0 //Belongs to Cluster 0
Row3 - Label 1 //Belongs to Cluster 1
"""
global k_select
k_select = calculate_num_clusters(df, ns_weights)
features = df[df.columns]
kmeans = KMeans(n_clusters=k_select)
kmeans.fit(features, None, sample_weight=ns_weights)
labels = kmeans.labels_
df["kmeans_cluster_number"] = pd.Series(labels)
def extract_keys(the_dict, prefix=''):
"""
Recursive approach to gather all the keys that have nested keys in the input file.
Input:
The dictionary file to find all the keys in.
Output:
All the keys found in the nested dictionary.
Example:
Consider {key1:value1, key2:{key3:value3}, key4:[value4], key5:[key6:{key7:value7}]}
The function returns key2, key5=key6
"""
key_list = []
for key, value in the_dict.items():
if len(prefix) == 0:
new_prefix = key
else:
new_prefix = prefix + '=' + key
try:
if type(value) == dict:
key_list.extend(extract_keys(value, new_prefix))
elif type(value) == list and type(value[0]) == dict:
key_list.extend(extract_keys(value[0], new_prefix))
elif type(value) == list and type(value[0]) != dict:
key_list.append(new_prefix)
else:
key_list.append(new_prefix)
except:
key_list.append(new_prefix)
return key_list
def get_uniques(data):
"""
A helper function to get unique elements in a List.
Input:
A list that we need to capture uniques from.
Output:
A dictionary with unique entries and count of occurrences.
"""
acl_count_dict = {}
for acl in data:
acl = json.dumps(acl)
if acl not in acl_count_dict:
acl_count_dict[acl] = 1
else:
value = acl_count_dict[acl]
value += 1
acl_count_dict[acl] = value
keys = []
values = []
for key, value in acl_count_dict.items():
keys.append(key)
values.append(value)
return keys, values
def overall_dict(data_final):
"""
Parses through the dictionary and appends the frequency with which the keys occur.
Input:
A nested dictionary.
Example:
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
Output:
Returns a new array with the nested keys appended along with a tuple containing the un-nested value along with
the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':1}
}]
"""
overall_array = []
for data in data_final:
overall = {}
for item in data:
if item[0] is None:
continue
result = extract_keys(item[0])
for element in result:
value = item[0]
for key in element.split("="):
new_value = value[key]
if type(new_value) == list:
if len(new_value) != 0:
new_value = new_value[0]
else:
new_value = "#BUG#"
value = new_value
if element not in overall:
overall[element] = {}
if value not in overall[element]:
overall[element][value] = 1
else:
overall[element][value] += 1
overall_array.append(overall)
return overall_array
def get_overall_dict(data_final):
"""
Parses through the dictionary and appends the frequency with which the keys occur.
Input:
A nested dictionary.
Example:
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
Output:
Returns a new array with the nested keys appended along with a tuple containing the unnested value along with the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':1}
}]
"""
overall_array = []
for data in data_final:
overall = {}
new_value = None
flag = 0
for item in data:
visited = {"lines=name":1}
if item[0] is None:
continue
result = extract_keys(item[0])
for element in result:
value = item[0]
for key in element.split("="):
if element not in visited:
visited[element] = 1
new_value = value[key]
flag = 0
if type(new_value) == list:
if len(new_value) > 0:
for list_data in new_value:
if element not in overall:
overall[element] = {}
temp = element
temp_val = list_data
temp = temp.split("=", 1)[-1]
while len(temp.split("=")) > 1:
temp_val = temp_val[temp.split("=")[0]]
temp = temp.split("=", 1)[-1]
list_key = temp
check = 0
try:
if type(temp_val[list_key]) == list:
if temp_val[list_key][0] not in overall[element]:
overall[element][temp_val[list_key][0]] = 1
check = 1
else:
if temp_val[list_key] not in overall[element]:
overall[element][temp_val[list_key]] = 1
check = 1
except:
dummy=0
'''
do nothing
'''
try:
if check == 0:
if type(temp_val[list_key]) == list:
if temp_val[list_key][0] in overall[element]:
overall[element][temp_val[list_key][0]] += 1
else:
if temp_val[list_key] in overall[element]:
overall[element][temp_val[list_key]] += 1
except:
dummy=0
flag = 1
value = new_value
else:
'''
Type is not list
'''
value = new_value
else:
if flag == 0:
if element not in overall:
overall[element] = {}
if new_value not in overall[element]:
overall[element][new_value] = 1
else:
overall[element][new_value] += 1
if flag == 0:
if element not in overall:
overall[element] = {}
if new_value not in overall[element]:
overall[element][new_value] = 1
else:
overall[element][new_value] += 1
overall_array.append(overall)
return overall_array
def calculate_z_score(arr):
"""
Calculates the Z-score (uses mean) (or) Modified Z-score (uses median) of data-points
Input:
Data points generated from parsing through the input file.
Also considers the Z_SCORE_FLAG that is set previously with 0 (default) using the Modified Z-score and 1 using Z-score.
Output:
The Z-score of given data-points array.
"""
if len(arr) == 1:
return arr
z_score = []
'''
Calculates the Z-score using mean. Generally used if distribution is normal (Bell curve).
'''
if Z_SCORE_FLAG:
mean = np.mean(arr)
std = np.std(arr)
if std == 0:
return np.ones(len(arr)) * 1000
for val in arr:
z_score.append((val - mean) / std)
'''
Modified Z-score approach.
Calculates the Z-score using median. Generally used if distribution is skewed.
'''
else:
median_y = np.median(arr)
medians = [np.abs(y - median_y) for y in arr]
med = np.median(medians)
median_absolute_deviation_y = np.median([np.abs(y - median_y) for y in arr])
if median_absolute_deviation_y == 0:
return np.ones(len(arr)) * 1000
z_score = [0.6745 * (y - median_y) / median_absolute_deviation_y for y in arr]
return z_score
def calculate_signature_d(overall_arr):
"""
Uses Z-score to generate the signatures of data-points and also maps points on level of significance (include for
signature calculation, include for bug calculation, no significance).
If Z-score is equal to 1000.0 or in between sig_threshold and bug_threshold, no-significance.
If Z-score is >= sig_threshold, include for signature calculation.
If Z-score is <= bug_threshold, include for bug calculation.
Input:
The individual master-signature generated for each Cluster.
Output:
An array containing dictionaries marked with tags that represent the action that needs to be performed on them.
"""
signature = {}
for key, value in overall_arr.items():
sig_threshold = 0.5
bug_threshold = -0.1
key_points = []
data_points = []
sig_values = []
for k, v in value.items():
key_points.append(k)
data_points.append(v)
if len(data_points) == 1:
sig_values.append((key_points[0], (data_points[0])))
'''
Check for two data points case
'''
else:
z_score = calculate_z_score(data_points)
if len(z_score) > 0:
avg_z_score = sum(z_score)/len(z_score)
bug_threshold = bug_threshold + (avg_z_score - sig_threshold)
for i in range(len(z_score)):
present_zscore = z_score[i]
if present_zscore == 1000.0:
sig_values.append((key_points[i], "*", (data_points[i])))
elif present_zscore >= sig_threshold:
sig_values.append((key_points[i], (data_points[i])))
elif present_zscore <= bug_threshold:
sig_values.append((key_points[i], "!", (data_points[i])))
elif (present_zscore < sig_threshold) and (present_zscore > bug_threshold):
sig_values.append((key_points[i], "*", (data_points[i])))
if key in signature:
signature[key].append(sig_values)
else:
signature[key] = []
signature[key] += sig_values
return signature
def results(data, signatures):
title = file_name.split(".")[0] + "_Results.txt"
if not os.path.exists(os.path.dirname(title)):
os.makedirs(os.path.dirname(title))
f = open(title, "w")
f.write(title + "\n")
f.write("\n")
totalBugs = 0
totalConformers = 0
for cluster_index, clustered_namedStructure in enumerate(data):
numBugs = 0
numConformers = 0
cluster_signature = signatures[cluster_index]
for namedStructure in clustered_namedStructure:
keys = extract_keys(namedStructure[0])
namedStructure = flatten_json((namedStructure[0]), '=')
isNamedStructureABug = False
newNamedStructure = {}
for key, value in namedStructure.items():
flag = 0
for index, char in enumerate(key):
if char == '0' or char == '1' or char == '2' or char == '3' or char == '4' or char == '5' or char == '6' or char == '7' or char == '8' or char == '9':
flag = 1
if index == len(key)-1:
new_key = str(key[0:index-1])
newNamedStructure[new_key] = value
else:
new_key = str(key[0:index-1]) + str(key[index+1:len(key)])
newNamedStructure[new_key] = value
if not flag:
newNamedStructure[key] = value
flag = 0
for propertyKey, propertyValue in newNamedStructure.items():
try:
propValues = cluster_signature[propertyKey]
except:
print("EXCEPTION OCCURRED!")
print(propertyKey)
for value in propValues:
if value[0] == propertyValue and value[1] == '!':
numBugs += 1
isNamedStructureABug = True
if isNamedStructureABug:
numBugs += 1
else:
numConformers += 1
numBugs = len(clustered_namedStructure) - numConformers
f.write("Cluster Index: " + str(cluster_index) + "\n")
f.write(" Number of elements in Cluster = " + str(len(clustered_namedStructure)) + "\n")
f.write(" Number of Bugs using Z-score: " + str(len(clustered_namedStructure) - numConformers) + "\n")
f.write(" Number of Conformers using Z-score: " + str(numConformers) + "\n")
f.write("\n")
totalBugs += numBugs
totalConformers += numConformers
print("Total Bugs = ", totalBugs)
print("Total Confomers = ", totalConformers)
f.write("\n")
f.write("\n")
f.write("Total Bugs using Z-score: " + str(totalBugs) + "\n")
f.write("Total Conformers using Z-score: " + str(totalConformers))
def transform_data(data):
"""
A helper function to extract nested keys from the ACL and to add the frequency of the repeated value. Helps score data.
Input:
An ACL in the form {key1:value1, key2:{key3:value3}, key4:[value4], key5:[key6:{key7:value7}]}.
Output:
Extracted nested keys from the extract_keys function along with the frequency count.
Example:
[
{key1:{key2:value1, key3:value2, key4:{key5:value3}}
{key6:{key7:value2}
{key8:{key3:value3, key4:value5, key6:value3}}
]
Returns a new array with the nested keys appended along with a tuple containing the unnested value along with the frequency count.
[{
key1=key2:{'value1':1},
key1=key3:{'value2':2},
key1=key4=key5:{'value3':3},
key6=key7:{'value2':2},
key8=key3:{'value3':3},
key8=key4:{'value5':1},
key8=key6:{'value3':3}
}]
"""
count = 1
overall = {}
flag = 0
i = 0
while i < count:
value = None
result = None
new_value = None
for item in data:
result = extract_keys(item)
for element in result:
value = item
for key in element.split("="):
if key in value:
new_value = value[key]
if (type(new_value) == list) and (len(new_value) > 1):
if flag == 0:
count = len(new_value)
flag = 1
try:
new_value = new_value[i]
except:
new_value = new_value[-1]
elif (type(new_value) == list) and (len(new_value) == 1):
new_value = new_value[0]
value = new_value
if element not in overall:
overall[element] = {}
if type(value) != dict and type(value) != list:
if value not in overall[element]:
overall[element][value] = 1
i += 1
return overall
def calculate_signature_score(signature):
"""
Calculates the signature score for each signature as the sum of all the weights in it but ignoring the weights marked with "*".
Input:
A signature that contains tags of whether or not the weight should be included in calculating the signature.
Output:
An array containing the weights of all the signatures that should be considered.
Example:
Consider [
{'key1=key2':['val1', 40], 'key3=key4':['val2':90]}, //40 + 90
{'key5=key6=key7':['val3', *, 20], 'key8=key9':['val4':80]}, //80
{'key10=key11':['val5', 40]} //40
Returns [130, 80, 40].
"""
score_arr = []
for sig in signature:
score = 0
for key, value in sig.items():
for val in value:
if (val[1] != "!") and (val[1] != "*"):
score += val[1]
elif val[1] == "!":
score += val[2]
score_arr.append(score)
return score_arr
def calculate_namedstructure_scores(data_final, all_signatures):
"""
Calculate the individual scores for each discrete-ACL. This includes calculating human_error scores,
signature_scores, and deviant scores.
Input:
data_final:
List of ACLs grouped into a Cluster.
Example:
[
[acl-1, acl-4, acl-5, acl-9], //Cluster-0
[acl-2, acl-3], //Cluster-1
[acl-7], //Cluster-2
[acl-6, acl-8] //Cluster-3
]
all_signatures:
Consolidated signature for each Cluster.
Output:
deviant_arr: Returns all deviant properties for the ACL. Empty list is returned if no deviant property
in the ACL.
count_arr: [[TODO]]
dev_score: Returns the deviant score for the deviant properties found. 0 if no deviant property.
acls_arr: [[TODO]]
sig_score: Returns the signature score of the ACL.
cluster_num: Returns the cluster number that the ACL belongs to.
acls_score: The score that is generated for each acl
human_errors_arr: Returns the human_error properties (IPValidity, DigitRepetition, PortRange) for each ACL and
empty list if no human_error properties present in the ACL.
human_error_score: Returns the score of the human error property calculated for the ACL. 0 is returned if
no human_error property exists in the ACL.
"""
deviant_arr = []
count_arr = []
acls_dict = {}
acls_arr = []
acls_score = []
sig_score = []
dev_score = []
cluster_num = []
human_errors_arr = []
human_errors_score = []
i = 0
for acl_list in data_final:
bug_count = 0
conformer_count = 0
signature = all_signatures[i]
for acl in acl_list:
flag = 0
if str(acl[0]) not in acls_dict:
acls_dict[str(acl[0])] = 1
acls_arr.append(acl[0])
cluster_num.append(i)
flag = 1
else:
print(acl[0])
print(acls_dict)
continue
sig_score.append(signature_scores[i])
deviant = []
count = 0
dev_c = 0
acl_c = 0
human_errors = []
human_error_category = {}
data = transform_data(acl)
for data_key, data_val in data.items():
if data_key in signature:
'''
Key Valid. Now check for actual Value
'''
for val in data_val.items():
(error_key, error_value), error_category = calculateHumanErrors(data_key, val[0], signature[data_key], file_name.split(".")[0])
if error_category:
human_errors.append((error_key, error_value))
if error_category not in human_error_category:
human_error_category[error_category] = 0
human_error_category[error_category] += 1
for sig_val in signature[data_key]:
if val[0] == sig_val[0]:
'''
value also present. Now check if value part of bug/sig/skip
'''
if sig_val[1] == "!":
dev_c += sig_val[2]
acl_c += sig_val[2]
deviant.append((data_key, sig_val[0]))
bug_count += 1
elif sig_val[1] == "*":
conformer_count += 1
continue
else:
conformer_count += 1
count += sig_val[1]
acl_c += sig_val[1]
else:
'''
Deviant Key
'''
if data_key != "lines=name":
deviant.append(data_key)
dev_c += data_val
acl_c += data_val
if flag == 1:
count_arr.append(count)
deviant_arr.append(deviant)
dev_score.append(dev_c)
acls_score.append(acl_c)
human_errors_arr.append(human_errors)
human_errors_score.append(calculate_human_error_score(human_error_category))
i += 1
return deviant_arr, count_arr, dev_score, acls_arr, sig_score, cluster_num, acls_score, human_errors_arr, human_errors_score
def checkIPValidity(ip_address):
"""
A reg-ex check to verify the validity of an IP address.
Input:
A list of IP addresses
Output:
A boolean representing the validity of the IP address.
Returns 'True' if all the IPs are valid and 'False' if any of the IP is invalid.
"""
try:
ip_address = ip_address.split(":")
for ip in ip_address:
IP_check = "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])?(\/)?((3[01]|3[02]|[12][0-9]|[0-9])?)$"
match = re.match(IP_check, ip)
if not match:
return False
return True
except e:
print(e)
return True
def checkPortRange(port_range):
"""
A check to verify that the port range is specified correctly (elem0 <= elem1).
Input:
A string that contains two numbers separated by a '-'.
Output:
A boolean representing the validity of the range (elem0 <= elem1).
Example:
52108-52109 (True)
466 - 466 (True)
466 - 465 (False)
"""
try:
port_split = port_range.split("-")
if port_split[-1] < port_split[0]:
return False
return True
except:
return True
def checkDigitRepetition(digit, signature):
"""
Checks for Digit repetition.
Input:
The value for the following keys: srcPorts, dstPorts, lengthRange
Output:
Returns True if there is any Human Error and the digit is repeated twice.
"""
try:
if type(digit) == str:
digit = float(digit.split(":")[0])
if digit == 0:
return False
for item in signature:
if type(item) == str:
item = int(item.split(":")[0])
if digit == (item*10+item%10):
print("--------", digit, item*10 + item%10)
return True
return False
except:
return False
def calculateHumanErrors(data_key, data, signature, namedStructure):
"""
Checks for simple human errors like entering invalid IP Addresses, incorrect port-ranges, and digit repetitions.
Input:
data_key: The nested keys calculated in the overall_dict and get_overall_dict methods.
Example: key1=key2=key4
data: The data value for the keys.
signature: The signature for the keys that was calculated in the calculate_signature_d method.
namedStructure: The type of the IP file.
Possible values: IP_Access_List, Route_Filter_List, Routing_Policy, VRF, others.
Output:
Returns the error and the category it belongs to.
Example:
key1=key2=key3 [1333.0.0.13] [1333.0.0.13] IP_Access_List
Returns:
key1=key2=key3 [1333.0.0.13] IP
"""
human_error = (None, None)
category = None
data_key = data_key.split("=")[-1]
signature_items = []
for sig_item in signature:
signature_items.append(sig_item[0])
if namedStructure == "IP_Access_List":
if data_key == "ipWildcard":
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif data_key in ["dstPorts", "srcPorts"]:
if not checkPortRange(data):
'''
Invalid Ports Range
'''
human_error = (data_key, data)
category = "RANGE"
elif namedStructure == "Route_Filter_List":
if data_key == "ipWildcard":
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif data_key == "lengthRange":
if not checkPortRange(data):
'''
Invalid Ports Range
'''
human_error = (data_key, data)
category = "RANGE"
elif namedStructure == "Routing_Policy":
if data_key == "communities":
if checkDigitRepetition(data, signature_items):
'''
Error Copying digits
'''
human_error = (data_key, data)
category = "DIGIT"
elif data_key == "ips":
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif namedStructure == "VRF":
if data_key in ["administrativeCost", "remoteAs", "metric", "localAs", "referenceBandwidth", ]:
if checkDigitRepetition(data, signature_items):
'''
Error Copying digits
'''
human_error = (data_key, data)
category = "DIGIT"
elif data_key in ["peerAddress", "localIp", "routerId", "network"]:
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
'''
Any Other namedStructure
'''
else:
try:
if re.search('IP|ip', data_key) and not re.search('[a-zA-Z]', data):
if not checkIPValidity(data):
'''
Invalid IP
'''
human_error = (data_key, data)
category = "IP"
elif not re.search("[a-zA-Z]", data):
if checkDigitRepetition(data, signature_items):
'''
Error Copying digits
'''
human_error = (data_key, data)
category = "DIGIT"
except:
pass
return human_error, category
def calculate_human_error_score(category_dict):
"""
Scores the human_errors that have been found with IPValidity and DigitRepetition errors
weighed as 'high,' i.e, 0.8 and PortRange errors weighed 'medium,' i.e., 0.5.
Input:
A dictionary containing the count of the error occurrences.
Output:
A weighted sum of all the errors found.
"""
total_score = 0
low = 0.2
medium = 0.5
high = 0.8
weightage_dict = {"IP": high, "RANGE": medium, "DIGIT": high}
for category, count in category_dict.items():
if count != 0:
#print("* Human Error Found *")
total_score += weightage_dict[category]/np.log(1+count)
return round(total_score/len(category_dict), 2) if category_dict else total_score
def flatten_json(data, delimiter):
"""
Flattens a JSON file.
Input:
data:
A JSON dictionary of hierarchical format.
{key1: {key2: value2, key3: value3}, key4: {key5: value5, key6: [value6, value7, value8]}}
delimiter:
A parameter to separate the keys in order to facilitate easy splitting.
Output:
A flattened dictionary with keys separated by the delimiter parameter.
key1_key2:value2, key1_key3:value3, key4_key5:value5, key4_key6:value6, key4_key6:value7, key4_key6:value8
"""
out = {}
def flatten(data, name=''):
if type(data) is dict:
for key in data:
flatten(data[key], name + key + delimiter)
elif type(data) is list:
i = 0
for elem in data:
flatten(elem, name + str(i) + delimiter)
i += 1
else:
out[name[:-1]] = data
flatten(data)
return out
def encode_data(data):
"""
Converts categorical values into numeric values. We use MultiLabelBinarizer to encode categorical data.
This is done in order to pass the data into clustering and other similar algorithms that can only handle numerical data.
Flattens each ACL list and then encodes them.
Input:
A Python list that contains all discrete-ACLs.
Output:
A Python list after encoding.
"""
flattenedData = []
allKeys = []
for NS in data:
flattenedNamedStructure = flatten_json(NS, '_')
flattenedData.append(flattenedNamedStructure)
for key in flattenedNamedStructure.keys():
if key not in allKeys:
allKeys.append(key)
mergedData = []
for NS in flattenedData:
mergedNS = []
for key, value in NS.items():
mergedNS.append(str(value))
mergedData.append(mergedNS)
mlb = MultiLabelBinarizer()
data_T = mlb.fit_transform(mergedData)
print("MLb classes=")
print(mlb.classes_)
return data_T, mlb.classes_
def export_clusters(data, acl_weight_mapper):
"""
Helper Method to verify authenticity of Clusters being formed.
Input:
The data that is sorted into list of Clusters.
Example:
[
[acl-1, acl-4, acl-5, acl-9], //Cluster-0
[acl-2, acl-3], //Cluster-1
[acl-7], //Cluster-2
[acl-6, acl-8] //Cluster-3
]
We also make use of acl_dict and node_name_dict dictionaries by searching for the ACL and
then getting the appropriate ACL_name and the nodes that the ACL is present in.
Output:
A csv file by the name of Generated_Clusters is written in the format:
Cluster-0 |||| Cluster-0 Names |||| Cluster-0 Nodes |||| Cluster-1 |||| Cluster-1 Names |||| Cluster-1 Nodes
acl-1 |||| permit tcp eq 51107 |||| st55in15hras |||| acl-2 |||| permit udp any eq 1200 |||| rt73ve11m5ar
acl-4 |||| permit tcp eq 51102 |||| st55in15hras, st55in17hras |||| acl-3 |||| permit udp any eq 120002 |||| rt73ve10m4ar
acl-5 |||| permit tcp eq 51100 |||| st55in17hras ||||
acl-9 |||| permit tcp eq 51109 |||| st55in17hras ||||
"""
column_labels = []
for index in range(len(data)):
column_labels.append("Cluster " + str(index))
column_labels.append("Cluster " + str(index) + " ACL Weights")
column_labels.append("Cluster " + str(index) + " Nodes")
data_to_export = pd.DataFrame(columns=column_labels)
for cluster_index, cluster_data in enumerate(data):
discrete_ACL_nodes = []
cluster_weights = []
for discrete_ACL in cluster_data:
temp = json.dumps(discrete_ACL[0], sort_keys=True)
temp_arr = []
try:
for node in namedstructure_node_mapper[temp]:
temp_arr.append(node)
discrete_ACL_nodes.append(temp_arr)
except:
discrete_ACL_nodes.append(None)
cluster_weights.append(acl_weight_mapper[temp])
cluster_data = pd.Series(cluster_data)
cluster_weights_series = pd.Series(cluster_weights)
discrete_ACL_nodes = pd.Series(discrete_ACL_nodes)
data_to_export["Cluster " + str(cluster_index)] = cluster_data
data_to_export["Cluster " + str(cluster_index) + " ACL Weights"] = cluster_weights_series
data_to_export["Cluster " + str(cluster_index) + " Nodes"] = discrete_ACL_nodes
file = file_name.split(".")[0]
print(file)
title = "Clusters_" + file + ".csv"
print(title)
data_to_export.to_csv(title)
def parse_data():
"""
A helper method to parse through the input configuration files and capture necessary information.
Input:
None. The file path parameter is read from the commandline arguments.
Output:
discrete_namedstructure: A list that contains stringified named-structures.
namedstructure_nod_mapper: A dictionary that contains the named-structure configuration as key and a list of
nodes it is a part of as value.
"""
df = pd.read_json(sys.argv[2], orient="index")
discrete_namedstructure = []
namedstructure_node_mapper = {} # Maps each discrete_acl with all the nodes that it belongs to
discrete_nodes = []
for column in df.columns:
for index, data in df[column].iteritems():
if data is not None:
if 'lines' in data[0]:
data_holder = 'lines'
data_to_look_under = data[0][data_holder]
elif 'statements' in data[0]:
data_holder = 'statements'
data_to_look_under = data[0][data_holder]
else:
data_to_look_under = data
for discrete_acl in data_to_look_under:
if 'name' in discrete_acl:
del discrete_acl['name']
discrete_acl = json.dumps(discrete_acl, sort_keys=True)
discrete_namedstructure.append(discrete_acl)
if discrete_acl in namedstructure_node_mapper:
nodes = namedstructure_node_mapper[discrete_acl]
if index not in nodes:
nodes.append(index)
namedstructure_node_mapper[discrete_acl] = nodes
else:
namedstructure_node_mapper[discrete_acl] = [index]
if index not in discrete_nodes:
discrete_nodes.append(index)
print("The number of discrete nodes in a network is: ", len(discrete_nodes))
return discrete_namedstructure, namedstructure_node_mapper
def perform_pca_analysis(encoded_data, column_names):
"""
A helper method to analyse the data using PCA
"""
pca = PCA()
pca.fit(encoded_data)
cumulative_variance = np.cumsum(np.round(pca.explained_variance_ratio_, decimals=8) * 100);
labels = [x for x in range(1, len(cumulative_variance) + 1)];
loadings = pd.DataFrame(pca.components_.T, columns=labels, index=column_names)
significance = {}
for index in loadings.index:
temp_list = loadings.loc[index]
sig = 0
for value in temp_list:
sig += value * value
significance[index] = sig
plt.plot(cumulative_variance)
plt.xlabel("N-components")
plt.ylabel("Cumulative Explained Variance")
plt.show()
sorted_significance = sorted(significance.items(), key=lambda kv: (kv[1], kv[0]), reverse=True)
top_ten_attributes = []
for sigAttr in sorted_significance:
top_ten_attributes.append(sigAttr[0])
print("Top Ten Attributes:")
print(top_ten_attributes)
def silhouette_analysis(data, acl_weights):
"""
A helper method to perform an analysis of various scoring functions
"""
from sklearn.metrics import silhouette_score, davies_bouldin_score
k_range = range(2, 30)
elbow_scores = []
silhouette_scores = []
davies_bouldin_scores = []
elbow_file = open("elbow_scores.txt", "w")
silhouette_file = open("silhouette_scores.txt", "w")
davies_bouldin_file = open("davies_bouldin_scores.txt", "w")
for num_clusters in k_range:
print(num_clusters)
kmeans = KMeans(n_clusters=num_clusters)
cluster_labels = kmeans.fit_predict(data, None, sample_weight=acl_weights)
cluster_centers = kmeans.cluster_centers_
k_distance = cdist(data, cluster_centers, "euclidean")
distance = np.min(k_distance, axis=1)
distortion = np.sum(distance) / data.shape[0]
silhouette_avg = silhouette_score(data, cluster_labels)
davies_bouldin_avg = davies_bouldin_score(data, cluster_labels)
silhouette_scores.append(silhouette_avg)
davies_bouldin_scores.append(davies_bouldin_avg)
elbow_scores.append(distortion)
silhouette_file.write(str(silhouette_avg) + " ")
davies_bouldin_file.write(str(davies_bouldin_avg) + " ")
elbow_file.write(str(distortion) + " ")
kn_elbow = KneeLocator(list(k_range), elbow_scores, S=5.0, curve='convex', direction='decreasing')
plt.scatter(x=k_range, y=elbow_scores)
plt.xlabel("Range")
plt.ylabel("Elbow Score")
plt.vlines(kn_elbow.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
kn_silhouette = KneeLocator(list(k_range), silhouette_scores, S=5.0, curve='convex', direction='increasing')
plt.scatter(x=k_range, y=silhouette_scores)
plt.xlabel("Range")
plt.ylabel("Silhouette Score")
plt.vlines(kn_silhouette.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
kn_davies_bouldin = KneeLocator(list(k_range), davies_bouldin_scores, S=5.0, curve='convex', direction='decreasing')
plt.scatter(x=k_range, y=davies_bouldin_scores)
plt.xlabel("Range")
plt.ylabel("Davies Bouldin Score")
plt.vlines(kn_davies_bouldin.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
'''
# Helper Methods End
# *****************************************************************************
# *****************************************************************************
'''
whitelistDict = {}
Z_SCORE_FLAG = 0
ACTION_FLAG = 0
k_select = 0
'''
Parsing Data
'''
try:
file_name = sys.argv[2].split("/")[-1]
network_name = "DATA_HERE_" + sys.argv[2].split("/")[-2]
print(network_name)
'''
Making Outlier Directory for Current Network
'''
if not os.path.exists(network_name):
os.makedirs(network_name)
flag_file = network_name + '/' + '.flag_' + file_name
if sys.argv[1] == "-j":
df = pd.read_json(sys.argv[2], orient="index")
try:
if sys.argv[4] == "-a":
ACTION_FLAG = 3
except:
ACTION_FLAG = 0
try:
Z_SCORE_FLAG = int(sys.argv[3])
except:
error_msg("Invalid Z-Score Argument sent", sys.argv[3])
f = open(flag_file,'w')
f.write('{}'.format(ACTION_FLAG))
f.close()
elif sys.argv[1] == "-e":
df = pd.read_json(sys.argv[2], orient= "index")
try:
with open(sys.argv[3], 'rb') as handle:
whitelistDict = pickle.load(handle)
except:
print("FileNotFoundError: Please check if file exists.")
ACTION_FLAG = 1
elif sys.argv[1] == "-d":
df = pd.read_json(sys.argv[2],orient = "index")
ACTION_FLAG = 2
else:
error_msg("Invalid Argument or flags sent", sys.argv[1])
except:
error_msg("Invalid File specified. Please check the input dataset", sys.argv[2])
outlier_filename = network_name + '/' + 'outlier_' + file_name
cluster_filename = network_name + '/' + '.cluster_' + file_name
sig_filename = network_name + '/' + '.sig_' + file_name
outlier_nodes_filename = network_name + '/' + '.outlier_nodes_' + file_name
print(outlier_filename, cluster_filename ,sig_filename, outlier_nodes_filename)
print("===========================================================")
print(Fore.BLUE, end='')
print("outlier-analyzer code started ...")
print(Style.RESET_ALL)
print(Fore.GREEN, end='')
start = time.time()
'''
Calculating outliers selected
'''
f = open(flag_file, 'r')
flag = f.readline()
f.close()
discrete_namedstructure, namedstructure_node_mapper = parse_data()
if (ACTION_FLAG == 0) or (ACTION_FLAG == 3):
mlb = MultiLabelBinarizer()
ns_weight_mapper = {}
data_for_clustering = []
namedstructure_weights = []
for ns in discrete_namedstructure:
if ns not in ns_weight_mapper.keys():
ns_weight_mapper[ns] = 1
else:
value = ns_weight_mapper[ns]
ns_weight_mapper[ns] += 1
for ns, weight in ns_weight_mapper.items():
ns = json.loads(ns)
data_for_clustering.append(ns)
namedstructure_weights.append(weight)
encodedLists, column_names = encode_data(data_for_clustering)
df_enc = pd.DataFrame(encodedLists)
df_enc = df_enc.dropna(axis=1, how='any')
# perform_pca_analysis(encodedLists, column_names)
print("data encoding done...")
'''
Perform K-Means
'''
print("starting data clustering...")
perform_kmeans_clustering(df_enc, namedstructure_weights)
print("data clustering done...")
# silhouette_analysis(df_enc, acl_weights)
'''
Grouping data based on their Clusters
'''
cluster_range = np.arange(k_select)
data_final = []
data_final_enc = []
for index in cluster_range:
temp = []
temp_enc = []
for i in range(len(df_enc)):
if df_enc['kmeans_cluster_number'][i] == index:
temp.append([data_for_clustering[i]])
temp_enc.append([data_for_clustering[i]])
data_final.append(temp)
data_final_enc.append(temp_enc)
# export_clusters(data_final, acl_weight_mapper)
'''
Writing Clustered Data into a file
'''
with open(cluster_filename, 'w') as f:
f.write(json.dumps(data_final))
'''
Calculating Overall Structure per Cluster
'''
if ACTION_FLAG == 3:
overall_array_0 = overall_dict(data_final)
try:
overall_array = get_overall_dict(data_final)
except:
overall_array = overall_dict(data_final)
'''
Generating Signatures
'''
all_signatures = []
for i in range(len(overall_array)):
signature = calculate_signature_d(overall_array[i])
all_signatures.append(signature)
print("signature creation done...")
'''
Retuning Signature
'''
elif ACTION_FLAG == 1:
all_signatures = []
try:
with open(sig_filename, 'r') as f:
for item in f:
all_signatures.append(json.loads(item))
except FileNotFoundError:
print(Fore.RED, end='')
print("\nERROR: Calculate outliers on this data first!\n")
print(Style.RESET_ALL)
print("__________________________________")
print(Fore.RED, end='')
print("outlier-analyzer code failed #")
print(Style.RESET_ALL)
print("__________________________________")
sys.exit()
all_signatures = all_signatures[0]
wlDict = copy.deepcopy(whitelistDict['deviant'])
for edit_key, edit_value in whitelistDict['deviant']:
flag = 0
for signature in all_signatures:
if edit_key in signature:
for j in range(len(signature[edit_key])):
if edit_value in signature[edit_key][j][0]:
if signature[edit_key][j][1] == "!" or signature[edit_key][j][1] == "*":
try:
temp = (edit_value, signature[edit_key][j][2])
signature[edit_key][j] = temp
flag = 1
except Exception as e:
print(e)
if flag == 1:
wlDict.remove((edit_key, edit_value))
if wlDict:
print(Fore.RED, end='')
print("\nERROR : Specified Attributes {} either\n\tnot present or not a bug!".format(wlDict))
print(Style.RESET_ALL, end='')
print("__________________________________")
print(Fore.RED, end='')
print("outlier-analyzer code failed #")
print(Style.RESET_ALL, end='')
print("__________________________________")
sys.exit(0)
print("signature re-tuning done...")
data_final = []
with open(cluster_filename, 'r') as f:
for item in f:
data_final.append(json.loads(item))
data_final = data_final[0]
'''
Displaying the Outlier Nodes
'''
elif ACTION_FLAG == 2:
outlier_nodes_arr = []
try:
with open(outlier_nodes_filename, 'r') as f:
for item in f:
outlier_nodes_arr.append(json.loads(item))
except FileNotFoundError:
print(Fore.RED, end='')
print("\nERROR: Calculate outliers on this data first!\n")
print(Style.RESET_ALL)
print("__________________________________")
print(Fore.RED, end='')
print("outlier-analyzer code failed #")
print(Style.RESET_ALL)
print("__________________________________")
sys.exit()
print(Style.RESET_ALL)
print("########################")
print("Outlier Nodes are:")
outlier_nodes_arr = outlier_nodes_arr[0]
print(Fore.RED, end='')
print(*outlier_nodes_arr, sep="\n")
print(Style.RESET_ALL)
print("########################")
sys.exit(0)
'''
Scoring Signature
'''
signature_scores = calculate_signature_score(all_signatures)
print("signature scoring done...")
'''
Scoring ACLs
'''
deviant_arr, count_arr, dev_score, acls_arr, sig_score, cluster_num, acls_score, human_errors_arr, human_errors_score \
= calculate_namedstructure_scores(data_final, all_signatures)
print("acl scoring done...")
'''
Calculate outlier nodes
'''
count = 0
outlier_nodes = set()
for i in range(len(deviant_arr)):
if len(deviant_arr[i]) > 0:
count += 1
temp = json.dumps(acls_arr[i], sort_keys=True)
for item in namedstructure_node_mapper[temp]:
outlier_nodes.add(item)
with open(outlier_nodes_filename, 'w') as f:
f.write(json.dumps(list(outlier_nodes)))
'''
writing all signature to a hidden file
'''
with open(sig_filename, 'w') as f:
f.write(json.dumps(all_signatures))
nodes = []
for i in range(len(acls_arr)):
temp = json.dumps(acls_arr[i], sort_keys=True)
tempArr = []
try:
for item in namedstructure_node_mapper[temp]:
tempArr.append(item)
nodes.append(tempArr)
except:
nodes.append(None)
'''
Creating dataframe and exporting as a json file
'''
df_final = pd.DataFrame()
with open("deviant_array.txt", "w") as f:
print(deviant_arr, file=f)
print(human_errors_arr)
master_signatures = []
for i in range(len(data_final)):
for index in data_final[i]:
master_signatures.append(all_signatures[i])
# df_final['acl_name'] = acl_names
df_final['cluster_number'] = cluster_num
df_final['Conformer/Signature Definition'] = master_signatures
df_final['acl_structure'] = acls_arr
df_final['nodes'] = nodes
df_final['deviant_properties'] = deviant_arr
df_final['human_error_properties'] = human_errors_arr
df_final['human_error_score'] = human_errors_score
df_final['similarity_score'] = count_arr
df_final['acl_score'] = acls_score
df_final['max_sig_score'] = sig_score
outlier_flag = ['T' if len(deviant_prop)==0 else 'F' for deviant_prop in deviant_arr]
df_final['Outlier Flag'] = outlier_flag
df_final.to_json(outlier_filename, orient='split', index=False)
print(Style.RESET_ALL, end="")
end = time.time()
print(df_final)
print("###")
print(Fore.BLUE, end='')
print("OUTLIER-ANALYZER SUCCESSFUL #")
print("time to run : {} seconds".format(round(end - start), 3))
print(Style.RESET_ALL, end='')
print()
print("###########################################################")
print(outlier_nodes)
print(Fore.BLUE, end='')
print("\nTotal Outliers Count = {}".format(len(outlier_nodes)))
print(Style.RESET_ALL, end='')
print("\nTo view the detailed report, open the")
print("json file named: '{}'\n".format(outlier_filename))
print("###########################################################")
print()
sys.exit(0)
| 54,648 | 16,468 |
import json
from collections import OrderedDict
import pprint
import json
import sys
# prons = sys.argv
prons = ['pron1.json', 'pron2.json','pron3.json','pron4.json']
def war(pron):
with open(pron, encoding='utf8') as f:
d_update = json.load(f, object_pairs_hook=OrderedDict)
synset = {}
for d in d_update:
if d['synset'] in synset:
if len(synset[d['synset']]) < len(d['senses']):
synset[d['synset']] = { 'lemma': d['lemma'], 'senses': d['senses'] }
else:
synset[d['synset']] = { 'lemma': d['lemma'], 'senses': d['senses'] }
return synset
for pron in prons:
synset = war(pron)
print(json.dumps(synset, indent=2, ensure_ascii=False))
| 748 | 287 |
from flask import Blueprint,Flask
main = Blueprint('main',__name__)
from app.main import views
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
bootstrap= Bootstrap()
db = SQLAlchemy()
def create_app():
app= Flask(__name__)
#initializing flask extensions
bootstrap.init_app(app)
db.init_app(app)
#Registering the blueprint
from main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
| 495 | 157 |
#!/usr/bin/env python3
'''set frame height 10%, 80%, 10%'''
import tkinter as tk
root = tk.Tk()
root.geometry('400x300')
header = tk.Frame(root, bg='green')
content = tk.Frame(root, bg='red')
footer = tk.Frame(root, bg='green')
root.columnconfigure(0, weight=1) # 100%
root.rowconfigure(0, weight=1) # 10%
root.rowconfigure(1, weight=8) # 80%
root.rowconfigure(2, weight=1) # 10%
header.grid(row=0, sticky='news')
content.grid(row=1, sticky='news')
footer.grid(row=2, sticky='news')
root.mainloop()
| 506 | 218 |
from lib import *
generate_bins("./bld/bits/vex_pb0_partial.bit","./bld/bins/vex_pb0.bin",0x8D)
generate_bins("./bld/bits/vex_pb1_partial.bit","./bld/bins/vex_pb1.bin",0x8D)
generate_bins("./bld/bits/vex_pb2_partial.bit","./bld/bins/vex_pb2.bin",0x8D)
generate_bins("./bld/bits/vex_tmr_pb0_partial.bit","./bld/bins/vex_tmr_pb0.bin",0x8D)
generate_bins("./bld/bits/vex_tmr_pb1_partial.bit","./bld/bins/vex_tmr_pb1.bin",0x8D)
generate_bins("./bld/bits/vex_tmr_pb2_partial.bit","./bld/bins/vex_tmr_pb2.bin",0x8D)
generate_bins("./bld/bits/mb_pb0_partial.bit","./bld/bins/mb_pb0.bin",0x8D)
generate_bins("./bld/bits/mb_pb1_partial.bit","./bld/bins/mb_pb1.bin",0x8D)
generate_bins("./bld/bits/mb_pb2_partial.bit","./bld/bins/mb_pb2.bin",0x8D)
generate_bins("./bld/bits/mb_tmr_pb0_partial.bit","./bld/bins/mb_tmr_pb0.bin",0x8D)
generate_bins("./bld/bits/mb_tmr_pb1_partial.bit","./bld/bins/mb_tmr_pb1.bin",0x8D)
generate_bins("./bld/bits/mb_tmr_pb2_partial.bit","./bld/bins/mb_tmr_pb2.bin",0x8D)
generate_bins("./bld/bits/taiga_pb0_partial.bit","./bld/bins/taiga_pb0.bin",0x8D)
generate_bins("./bld/bits/taiga_pb1_partial.bit","./bld/bins/taiga_pb1.bin",0x8D)
generate_bins("./bld/bits/taiga_pb2_partial.bit","./bld/bins/taiga_pb2.bin",0x8D)
generate_bins("./bld/bits/taiga_tmr_pb0_partial.bit","./bld/bins/taiga_tmr_pb0.bin",0x8D)
generate_bins("./bld/bits/taiga_tmr_pb1_partial.bit","./bld/bins/taiga_tmr_pb1.bin",0x8D)
generate_bins("./bld/bits/taiga_tmr_pb2_partial.bit","./bld/bins/taiga_tmr_pb2.bin",0x8D)
generate_bins("./bld/bits/pico_pb0_partial.bit","./bld/bins/pico_pb0.bin",0x8D)
generate_bins("./bld/bits/pico_pb1_partial.bit","./bld/bins/pico_pb1.bin",0x8D)
generate_bins("./bld/bits/pico_pb2_partial.bit","./bld/bins/pico_pb2.bin",0x8D)
generate_bins("./bld/bits/pico_tmr_pb0_partial.bit","./bld/bins/pico_tmr_pb0.bin",0x8D)
generate_bins("./bld/bits/pico_tmr_pb1_partial.bit","./bld/bins/pico_tmr_pb1.bin",0x8D)
generate_bins("./bld/bits/pico_tmr_pb2_partial.bit","./bld/bins/pico_tmr_pb2.bin",0x8D)
generate_bins("./bld/bits/kron_pb0_partial.bit","./bld/bins/kron_pb0.bin",0x8D)
generate_bins("./bld/bits/kron_pb1_partial.bit","./bld/bins/kron_pb1.bin",0x8D)
generate_bins("./bld/bits/kron_pb2_partial.bit","./bld/bins/kron_pb2.bin",0x8D)
generate_bins("./bld/bits/kron_tmr_pb0_partial.bit","./bld/bins/kron_tmr_pb0.bin",0x8D)
generate_bins("./bld/bits/kron_tmr_pb1_partial.bit","./bld/bins/kron_tmr_pb1.bin",0x8D)
generate_bins("./bld/bits/kron_tmr_pb2_partial.bit","./bld/bins/kron_tmr_pb2.bin",0x8D) | 2,524 | 1,372 |
import unittest
from pysie.dsl.set import TernarySearchTrie
class TernarySearchTrieUnitTest(unittest.TestCase):
def test_map(self):
trie = TernarySearchTrie()
self.assertTrue(trie.is_empty())
trie.put('hello', 'world')
self.assertTrue(trie.contains_key('hello'))
self.assertEqual(trie.get('hello'), 'world')
trie.put('hi', 'there')
self.assertTrue(trie.contains_key('hi'))
self.assertEqual(trie.get('hi'), 'there')
self.assertEqual(trie.size(), 2)
for i in range(100):
trie.put(str(i), i)
self.assertTrue(trie.contains_key(str(i)))
self.assertEqual(trie.size(), 102)
trie.delete('hi')
self.assertFalse(trie.contains_key('hi'))
self.assertEqual(trie.size(), 101)
keys = trie.keys()
self.assertEqual(len(keys), 101)
if __name__ == '__main__':
unittest.main() | 922 | 328 |
from sanic import Blueprint
from sanic import response
from sanic.log import logger
from sanic_openapi import doc
from .user import get_user, get_routes
user_svc = Blueprint('user_svc')
@user_svc.get('/currentUser', strict_slashes=True)
@doc.summary('get current user info')
async def user(request):
try:
user = get_user()
return response.json(user, status=200)
except Exception:
logger.exception('faile to get current user')
return response.json({}, status=500)
@user_svc.get('/auth_routes', strict_slashes=True)
@doc.summary('get authorized routes')
async def routes(request):
try:
routes = get_routes()
return response.json(routes, 200)
except Exception:
logger.exception('faile to get get routes')
return response.json({}, status=500)
| 825 | 255 |
from opentrons import protocol_api
# Rename to 'purification_template' and paste into 'template_ot2_scripts' folder in DNA-BOT to use
metadata = {
'apiLevel': '2.8',
'protocolName': 'purification_template',
'description': 'Implements magbead purification reactions for BASIC assembly using an opentrons OT-2'}
# example values produced by DNA-BOT for a single construct containing 5 parts, un-comment and run to test the template:
sample_number=8
ethanol_well='A3'
def run(protocol: protocol_api.ProtocolContext):
# added run function for API verison 2
def magbead(
sample_number,
ethanol_well,
elution_buffer_well='A1',
sample_volume=30,
bead_ratio=1.8,
elution_buffer_volume=40,
incubation_time=5,
settling_time=2,
# if using Gen 2 magentic module, need to change time! see: https://docs.opentrons.com/v2/new_modules.html
# "The GEN2 Magnetic Module uses smaller magnets than the GEN1 version...this means it will take longer for the GEN2 module to attract beads."
# Recommended Magnetic Module GEN2 bead attraction time:
# Total liquid volume <= 50 uL: 5 minutes
# this template was written with the Gen 1 magnetic module, as it is compatible with API version 2
drying_time=5,
elution_time=2,
sample_offset=0,
tiprack_type="opentrons_96_tiprack_300ul"):
"""
Selected args:
ethanol_well (str): well in reagent container containing ethanol.
elution_buffer_well (str): well in reagent container containing elution buffer.
sample_offset (int): offset the intial sample column by the specified value.
"""
### Constants
# Pipettes
PIPETTE_ASPIRATE_RATE = 25
PIPETTE_DISPENSE_RATE = 150
TIPS_PER_SAMPLE = 9
PIPETTE_TYPE = 'p300_multi_gen2'
# new constant for easier swapping between pipette types
# Tiprack
CANDIDATE_TIPRACK_SLOTS = ['3', '6', '9', '2', '5']
# Magnetic Module
MAGDECK_POSITION = '1'
# Mix Plate
MIX_PLATE_TYPE = '4ti_96_wellplate_200ul'
# modified from custom labware as API 2 doesn't support labware.create anymore, so the old add_labware script can't be used
# also acts as the type of plate loaded onto the magnetic module
MIX_PLATE_POSITION = '4'
# Reagents
REAGENT_CONTAINER_TYPE = 'brooksreservoirplate_12_wellplate_21000ul'
# modified from custom labware as API 2 doesn't support labware.create anymore, so the old add_labware script can't be used
REAGENT_CONTAINER_POSITION = '7'
# Beads
BEAD_CONTAINER_TYPE = '4ti_96_wellplate_200ul'
# modified from custom labware as API 2 doesn't support labware.create anymore, so the old add_labware script can't be used
# old plate type was '4ti0136_96_deep-well'
BEAD_CONTAINER_POSITION = '8'
# Settings
LIQUID_WASTE_WELL = 'A5'
BEADS_WELL = 'A1'
DEAD_TOTAL_VOL = 5
SLOW_HEAD_SPEEDS = {'x': 600 // 4, 'y': 400 // 4, 'z': 125 // 10, 'a': 125 // 10}
DEFAULT_HEAD_SPEEDS = {'x': 400, 'y': 400, 'z': 125, 'a': 100}
IMMOBILISE_MIX_REPS = 10
MAGDECK_HEIGHT = 20
AIR_VOL_COEFF = 0.1
ETHANOL_VOL = 150
WASH_TIME = 0.5
ETHANOL_DEAD_VOL = 50
ELUTION_MIX_REPS = 20
ELUTANT_SEP_TIME = 1
ELUTION_DEAD_VOL = 2
### Errors
if sample_number > 48:
raise ValueError('sample number cannot exceed 48')
### Loading Tiprack
# Calculates whether one/two/three/four/five tipracks are needed, which are in slots 3, 6, 9, 2, and 5 respectively
total_tips = sample_number * TIPS_PER_SAMPLE
tiprack_num = total_tips // 96 + (1 if total_tips % 96 > 0 else 0)
slots = CANDIDATE_TIPRACK_SLOTS[:tiprack_num]
tipracks = [protocol.load_labware(tiprack_type, slot) for slot in slots]
# changed to protocol.load_labware for API version 2
### Loading Pipettes
pipette = protocol.load_instrument(PIPETTE_TYPE, mount="left", tip_racks=tipracks)
# changed to protocol.load_labware for API version 2
# changed from P300_MULTI to PIPETTE_TYPE constant, which is set to p300_multi_gen2
# removed 'aspirate_flow_rate=PIPETTE_ASPIRATE_RATE, dispense_flow_rate=PIPETTE_DISPENSE_RATE'
# no longer taken as arguements in API version 2
pipette.aspirate_flow_rate=PIPETTE_ASPIRATE_RATE
pipette.dispense_flow_rate=PIPETTE_DISPENSE_RATE
# for reference: default aspirate/dispense flow rate for p300_multi_gen2 is 94 ul/s
### Define Labware
# Magnetic Module
MAGDECK = protocol.load_module('magdeck', MAGDECK_POSITION)
# changed to protocol.load_module for API verison 2
# 'magdeck' is the gen 1 magnetic module, use 'magnetic module gen2' for the gen 2 magentic module
# if using gen 2 module, need to change settling time! (see comments under Constants)
MAGDECK.disengage()
# disengages the magnets when it is turned on
mag_plate = MAGDECK.load_labware(MIX_PLATE_TYPE)
# old code:
# mag_plate = labware.load(MIX_PLATE_TYPE, MAGDECK_POSITION, share=True)
# changed to MAGDECK.load_labware for API version 2
# removed MAGDECK_POSITION and share=True as API version 2 uses ModuleContext.load_labware() to load plates directly onto the magnetic module
# Mix Plate
mix_plate = protocol.load_labware(MIX_PLATE_TYPE, MIX_PLATE_POSITION)
# changed to protocol.load_labware for API version 2
# Reagents
reagent_container = protocol.load_labware(REAGENT_CONTAINER_TYPE, REAGENT_CONTAINER_POSITION)
# changed to protocol.load_labware for API version 2
# Beads Container
bead_container = protocol.load_labware(BEAD_CONTAINER_TYPE, BEAD_CONTAINER_POSITION)
# changed to protocol.load_labware for API version 2
### Calculating Columns
# Total number of columns
col_num = sample_number // 8 + (1 if sample_number % 8 > 0 else 0)
# Columns containing samples in location 1 (magentic module)
# generates a list of lists: [[A1, B1, C1...], [A2, B2, C2...]...]
samples = [col for col in mag_plate.columns()[sample_offset : col_num + sample_offset]]
# old code:
# samples = [col for col in mag_plate.cols()[0 + sample_offset : col_num + sample_offset]]
# load_labware needs to take 'columns' attribute instead of just 'cols' in API version 2
# removed '0 +'
# Columns to mix beads and samples in location 4 (mix plate)
mixing = [col for col in mix_plate.columns()[sample_offset:col_num + sample_offset]]
# old code:
# mixing = [col for col in mix_plate.columns()[0 + sample_offset:col_num + sample_offset]]
# load_labware needs to take 'columns' attribute instead of just 'cols' in API version 2
# removed '0 +'
# Columns to dispense output in location 1 (magnetic module)
# purified parts are dispensed 6 rows to the right of their initial location
# this is why the number of samples cannot exceed 48
output = [col for col in mag_plate.columns()[6 + sample_offset:col_num + 6 + sample_offset]]
# old code:
# output = [col for col in mag_plate.cols()[6 + sample_offset:col_num + 6 + sample_offset]]
# load_labware needs to take 'columns' attribute instead of just 'cols' in API version 2
### Defining Wells for Reagents, Liquid Waste, and Beads
liquid_waste = reagent_container.wells(LIQUID_WASTE_WELL)
ethanol = reagent_container.wells(ethanol_well)
elution_buffer = reagent_container.wells(elution_buffer_well)
beads = bead_container[BEADS_WELL]
# old code:
# beads = bead_container.wells(BEADS_WELL)
# removed .wells, which created a list containing a single well position instead of just a well position
### Define bead and mix volume
bead_volume = sample_volume * bead_ratio
if bead_volume / 2 > pipette.max_volume:
mix_vol = pipette.max_volume
else:
mix_vol = bead_volume / 2
total_vol = bead_volume + sample_volume + DEAD_TOTAL_VOL
### Steps
# Mix beads and parts
for target in range(int(len(samples))):
# Aspirate beads
pipette.pick_up_tip()
pipette.aspirate(bead_volume, beads)
protocol.max_speeds.update(SLOW_HEAD_SPEEDS)
# old code:
# robot.head_speed(**SLOW_HEAD_SPEEDS, combined_speed=max(SLOW_HEAD_SPEEDS.values()))
# robot.head_speed not used in API version 2
# replaced with protocol.max_speeds
# new code no longer uses the lower value between combined speed or specified speed
# just uses each axis' specified speed directly
# Aspirte samples
pipette.aspirate(sample_volume + DEAD_TOTAL_VOL, samples[target][0])
# old code:
# pipette.aspirate(sample_volume + DEAD_TOTAL_VOL, samples[target][0])
# TypeError: location should be a Well or Location, but it is [list of all wells in column 1]
# added [0] because samples[target] returned a list of every well in column 1
# the aspirate command for multi channel pipettes takes just one well (the well furthest from the door, row A) as the position of the pipette
# Transfer and mix on mix_plate
pipette.dispense(total_vol, mixing[target][0])
# similar to above, added [0] because samples[target] returned a list of every well in column 1 rather than just one well
pipette.mix(IMMOBILISE_MIX_REPS, mix_vol, mixing[target][0])
# similar to above, added [0] because samples[target] returned a list of every well in column 1 rather than just one well
pipette.blow_out()
# Dispose of tip
protocol.max_speeds.update(DEFAULT_HEAD_SPEEDS)
# old code:
# robot.head_speed(**DEFAULT_HEAD_SPEEDS, combined_speed=max(DEFAULT_HEAD_SPEEDS.values()))
# robot.head_speed not used in API version 2
# replaced with protocol.max_speeds
# new code no longer uses the lower value between combined speed or specified speed
# just uses each axis' specified speed directly
pipette.drop_tip()
# Immobilise sample
protocol.delay(minutes=incubation_time)
# old code:
# pipette.delay(minutes=incubation_time)
# API version 2 no longer has delay() for pipettes, it uses protocol.delay() to pause the entire protocol
# Transfer beads+samples back to magdeck
for target in range(int(len(samples))):
pipette.transfer(total_vol, mixing[target], samples[target], blow_out=True, blowout_location='destination well')
# added blowout_location=destination well because default location of blowout is waste in API version 2
# Engagae MagDeck and incubate
MAGDECK.engage(height=MAGDECK_HEIGHT)
protocol.delay(minutes=settling_time)
# old code:
# pipette.delay(minutes=settling_time)
# API Version 2 no longer has delay() for pipettes, it uses protocol.delay() to pause the entire protocol
# Remove supernatant from magnetic beads
for target in samples:
pipette.transfer(total_vol, target, liquid_waste, blow_out=True)
# Wash beads twice with 70% ethanol
air_vol = pipette.max_volume * AIR_VOL_COEFF
for cycle in range(2):
for target in samples:
pipette.transfer(ETHANOL_VOL, ethanol, target, air_gap=air_vol)
protocol.delay(minutes=WASH_TIME)
# old code:
# pipette.delay(minutes=WASH_TIME)
# API Version 2 no longer has delay() for pipettes, it uses protocol.delay() to pause the entire protocol
for target in samples:
pipette.transfer(ETHANOL_VOL + ETHANOL_DEAD_VOL, target, liquid_waste, air_gap=air_vol)
# Dry at room temperature
protocol.delay(minutes=drying_time)
# old code:
# pipette.delay(minutes=drying_time)
# API Version 2 no longer has delay() for pipettes, it uses protocol.delay() to pause the entire protocol
# Disengage MagDeck
MAGDECK.disengage()
# Mix beads with elution buffer
if elution_buffer_volume / 2 > pipette.max_volume:
mix_vol = pipette.max_volume
else:
mix_vol = elution_buffer_volume / 2
for target in samples:
pipette.transfer(elution_buffer_volume, elution_buffer, target, mix_after=(ELUTION_MIX_REPS, mix_vol))
# Incubate at room temperature
protocol.delay(minutes=elution_time)
# old code:
# pipette.delay(minutes=elution_time)
# API Version 2 no longer has delay() for pipettes, it uses protocol.delay() to pause the entire protocol
# Engage MagDeck (remains engaged for DNA elution)
MAGDECK.engage(height=MAGDECK_HEIGHT)
protocol.delay(minutes=ELUTANT_SEP_TIME)
# old code:
# pipette.delay(minutes=ELUTANT_SEP_TIME)
# API Version 2 no longer has delay() for pipettes, it uses protocol.delay() to pause the entire protocol
# Transfer purified parts to a new well
for target, dest in zip(samples, output):
pipette.transfer(elution_buffer_volume - ELUTION_DEAD_VOL, target,
dest, blow_out=False)
# Disengage MagDeck
MAGDECK.disengage()
magbead(sample_number=sample_number, ethanol_well=ethanol_well)
# removed elution buffer well='A1', added that to where the function is defined
| 15,071 | 4,773 |
# Generated by Django 3.2.4 on 2021-08-15 12:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0008_auto_20210808_0801'),
]
operations = [
migrations.AddField(
model_name='article',
name='doi',
field=models.CharField(blank=True, max_length=100),
),
migrations.AddField(
model_name='article',
name='impactFactor',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='article',
name='journal_type',
field=models.CharField(choices=[('National', 'National'), ('International', 'International')], default='National', max_length=20),
),
migrations.AddField(
model_name='article',
name='peer_reviewed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='article',
name='sjrRating',
field=models.FloatField(default=0),
),
migrations.AddField(
model_name='book',
name='doi',
field=models.CharField(blank=True, max_length=100),
),
migrations.AddField(
model_name='conferencearticle',
name='doi',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='article',
name='article_link',
field=models.URLField(blank=True),
),
migrations.AlterField(
model_name='book',
name='book_link',
field=models.URLField(blank=True),
),
migrations.AlterField(
model_name='conferencearticle',
name='conference_link',
field=models.URLField(blank=True),
),
]
| 1,921 | 556 |
import pytest
import json
from django.urls import reverse
def test_view_get_page_not_found(client):
response = client.get('/')
assert response.status_code == 404
def test_purchase_without_authentication(client, db):
url = reverse('purchase')
body = {
"products" : [
{
"product_id": "aa44ac86-3705-4954-8a1d-aa4733623870"
},
{
"product_id": "66efcb62-7b50-41b9-a3ca-7a6382eba6e8"
}
],
"credit_card":{
"card_number":"1234123412341234"
}
}
response = client.post(url, data=body)
assert response.status_code == 401
def test_purchase_history_without_authentication(client, db):
url = reverse('purchase-history')
response = client.post(url)
assert response.status_code == 401
def test_view_get_list_products_without_authentication(client, db):
url = reverse('products')
response = client.get(url)
assert response.status_code == 200
def test_purchase_with_authentication_and_field_errors(client, django_user_model):
url = reverse('purchase')
username="user1"
password="password"
data = {'products' : [],'credit_card':{'card_number':'1234123412341234','cvv':789,'exp_date':'12/24'}}
django_user_model.objects.create_user(username=username, password=password)
client.login(username=username, password=password)
response = client.post(url, data=json.loads(json.dumps(data)))
r = response.json()
assert response.status_code == 400
def test_purchase_history_with_authentication(client, django_user_model):
url = reverse('purchase-history')
username="user1"
password="password"
django_user_model.objects.create_user(username=username, password=password)
client.login(username=username, password=password)
response = client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_view_create_product_and_seller(client, db):
url_seller = reverse('seller')
url_product = reverse('products')
data_seller = {"name":"seller"}
response_seller = client.post(url_seller, data_seller)
seller_id = response_seller.json().get('seller_id')
assert response_seller.status_code == 201
response_get_seller = client.get(url_seller+seller_id)
assert response_get_seller.status_code == 200
data_product = {"title":"Notebook LeVelho","price":100000,"seller_id":seller_id}
response_product = client.post(url_product, data_product)
assert response_product.status_code == 201
| 2,567 | 884 |
'''
Created on Nov 16, 2011
@author: jcg
'''
from Features.Feature import Feature
import Functions
from uuid import uuid4
class CAI(Feature):
"""
CAI Feature
solution - solution where CAI should be computed
label - some label to append to the name
cai_range - start and end position to calculate CAI - a tuple in the form (start, end)
mutable_region - a list with all bases that can be mutated
cds_region - a pair with begin and end of CDSs - example: (0,100)
keep_aa - boolean option indicating if in the design mode amino acids should be kept
"""
def __init__(self, caiObject = None, solution = None, label="", args = { 'cai_range' : (0,59),
'mutable_region' : None,
'cds_region' : None,
'keep_aa' : True }):
if caiObject == None: #create new instance
#General properties of feature
Feature.__init__(self, solution=solution, label=label)
#Specifics of this Feature
self.cai_range = args['cai_range']
self.sequence = solution.sequence[self.cai_range[0]:(self.cai_range[1]+1)]
self.mutable_region = args['mutable_region'] if args.has_key('mutable_region') else solution.mutable_region
self.cds_region = args['cds_region'] if args.has_key('cds_region') else solution.cds_region
self.keep_aa = args['keep_aa'] if args.has_key('keep_aa') else solution.keep_aa
self.set_scores()
self.set_level()
else: #copy instance
Feature.__init__(self, caiObject)
self.cai_range = caiObject.cai_range
self.sequence = caiObject.sequence
self.mutable_region = caiObject.mutable_region
self.cds_region = caiObject.cds_region
self.keep_aa = caiObject.keep_aa
self.codons_cai = caiObject.codons_cai
self.scores = caiObject.scores
def set_scores(self, scoring_function=Functions.analyze_cai):
self.scores[self.label+"CAI"] = scoring_function(self.sequence)
def mutate(self, operator=Functions.SimpleCAIOperator):
if not self.targetInstructions:
return None
new_seq = operator(self.solution.sequence, self.cai_range, self.keep_aa, self.mutable_region, self.cds_region, self.targetInstructions['direction'])
if not new_seq:
return None
return Solution.Solution(sol_id=str(uuid4().int), sequence=new_seq, cds_region = self.cds_region, mutable_region = self.mutable_region, parent=self.solution, design=self.solution.designMethod)
import Solution
| 3,056 | 838 |
from django.urls import path
from .views import portfolio_view
urlpatterns = [
path('', portfolio_view, name='projects_view')
] | 132 | 41 |
#! /usr/bin/env python
'''
This node uses the detection_info topic and performs the actual Ur5 arm manipulation
'''
import rospy
import random
from math import pi, sin, cos
from geometry_msgs.msg import Point, Quaternion, Pose, PointStamped, PoseStamped
from std_msgs.msg import Header
from object_msgs.msg import ObjectPose
from std_srvs.srv import Empty
from tf.transformations import quaternion_from_euler
from tf.transformations import euler_from_quaternion
from ebot_mani.srv import *
from testNav import Ebot
from perception.srv import *
transformPose = rospy.ServiceProxy('/get_transform_pose', GetTransformPose)
transformPoint = rospy.ServiceProxy('/get_transform_point', GetTransformPoint)
def TransformPoint(point, from_frame, to_frame):
req = GetTransformPointRequest()
req.point = point
req.from_frame = from_frame
req.to_frame = to_frame
return transformPoint(req).point
# width estimate = 0.2 + width of detection window (printed in terminal)
# w_dict uses real model names
w_dict = {'coke_can': 0.27086,
'battery': 0.26500,
'glue': 0.31,
'eYFi_board': 0.5,
'adhesive': 0.267674286664,
'water_glass': 0.2,
'robot_wheels': 0.26,
'FPGA_board': 0.3
}
def printReached(name):
print(">> " + name + " Reached")
def printPicked(name):
print(">> " + name + " Picked")
def printDropped(name, dropbox):
print(">> " + name + " Dropped in " + dropbox)
def printPoint(point):
p = point
print("create_point(%0.5f, %0.5f, %0.5f)" %
(p.x, p.y, p.z))
def create_point(x, y, z):
position = Point()
position.x = x
position.y = y
position.z = z
return position
def printPose(pose):
p = pose.position
q = pose.orientation
print("create_pose_quaternion(%0.5f, %0.5f, %0.5f, %0.5f, %0.5f, %0.5f, %0.5f)" %
(p.x, p.y, p.z, q.x, q.y, q.z, q.w))
def create_pose_quaternion(x, y, z, qx, qy, qz, qw):
'''
returns a Pose() object from the given x, y, z, qx, qy , qz, qw values
'''
pose = Pose()
pose.position.x = x
pose.position.y = y
pose.position.z = z
pose.orientation.x = qx
pose.orientation.y = qy
pose.orientation.z = qz
pose.orientation.w = qw
return pose
def orient_from_euler(roll, pitch, yaw):
'''
Input is roll, pitch, yaw
output is Quaternion pose.orientation
'''
q = quaternion_from_euler(roll, pitch, yaw)
o = Quaternion()
o.x, o.y, o.z, o.w = q[0], q[1], q[2], q[3]
return o
def createPoseStamped(point):
poseStamped = PoseStamped()
poseStamped.header.frame_id = 'base_link'
poseStamped.header.stamp = rospy.Time.now()
poseStamped.pose.position = point
poseStamped.pose.orientation.x = 0
poseStamped.pose.orientation.y = -0.7071
poseStamped.pose.orientation.z = 0
poseStamped.pose.orientation.w = 0.7071
return poseStamped
def pickupObject(object_name):
'''
Note : object_name should be the real model name and not the gazebo model name
'''
ur5.openGripper()
graspPose_pub.publish(createPoseStamped(detect.dict[object_name]))
if object_name == 'eYFi_board':
# TODO need a better way of finding the object's yaw angle instead of manually giving it
return ur5.graspObjectVertical(detect.dict[object_name], width=w_dict[object_name], yaw=pi/4).success
elif object_name == 'FPGA_board':
# return ur5.graspObjectVertical(detect.dict[object_name], width=w_dict[object_name], yaw=pi/3).success
return ur5.graspObjectHorizontal(detect.dict[object_name], width=w_dict[object_name], yaw=-pi/6)
else:
# .success
return ur5.graspObjectHorizontal(detect.dict[object_name], width=w_dict[object_name], yaw=0)
class Detect():
def __init__(self):
self.dict = {}
rospy.loginfo("waiting for detect service")
rospy.wait_for_service('/ebot/detect')
self.detectTable = rospy.ServiceProxy('/ebot/detectTable', Empty)
self.detect_service = rospy.ServiceProxy('/ebot/detect', Empty)
rospy.Subscriber("/detection_info", ObjectPose, self.detect_callback)
def print_detected(self):
for item in self.dict.keys():
print(">> " + item + " Identified")
def detect(self):
self.dict = {}
self.detect_service()
rospy.sleep(2)
self.print_detected()
def detect_callback(self, msg):
self.dict[msg.name] = msg.pose.pose.position
self.frame_id = msg.pose.header.frame_id
class Ur5():
def __init__(self):
rospy.loginfo("waiting for ur5_service")
rospy.wait_for_service('ebot_mani/set_named_pose')
rospy.wait_for_service('ebot_mani/set_pose')
rospy.wait_for_service('ebot_mani/set_gripper')
rospy.wait_for_service('ebot_mani/open_gripper')
rospy.wait_for_service('ebot_mani/grasp_object_vertical')
rospy.wait_for_service('ebot_mani/grasp_object_horizontal')
rospy.wait_for_service('ebot_mani/set_pose_relative')
rospy.loginfo("connected to services")
self.go_to_named_pose = rospy.ServiceProxy(
'ebot_mani/set_named_pose', SetNamedPose)
self.print_name_pose = rospy.ServiceProxy(
'/ebot_mani/print_name_pose', SetNamedPose)
self.go_to_pose = rospy.ServiceProxy('ebot_mani/set_pose', SetPose)
self.closeGripper = rospy.ServiceProxy(
'ebot_mani/set_gripper', SetGripper)
self.openGripper = rospy.ServiceProxy('ebot_mani/open_gripper', Empty)
self.graspObjectVerticalService = rospy.ServiceProxy(
'ebot_mani/grasp_object_vertical', GraspObject)
self.graspObjectHorizontalService = rospy.ServiceProxy(
'ebot_mani/grasp_object_horizontal', GraspObject)
self.set_pose_relative = rospy.ServiceProxy(
'ebot_mani/set_pose_relative', SetPose)
self.getCurrentPoseOdom = rospy.ServiceProxy(
'ebot_mani/get_current_pose_odom', GetPose)
self.set_pose_odom = rospy.ServiceProxy(
'ebot_mani/set_pose_odom', SetPose)
self.set_pose_wrist = rospy.ServiceProxy(
'ebot_mani/set_pose_wrist', SetPose)
self.align_wrist = rospy.ServiceProxy('ebot_mani/align_wrist', Empty)
self.set_pose_wrist_no_align = rospy.ServiceProxy(
'ebot_mani/set_pose_wrist_no_align', SetPose)
def go_to_pose_wrist(self, arg_pose):
req = SetPoseRequest()
req.pose = arg_pose
return self.set_pose_wrist(req).success
def go_to_pose_wrist_no_align(self, arg_pose):
req = SetPoseRequest()
req.pose = arg_pose
return self.set_pose_wrist_no_align(req).success
def go_to_pose_relative(self, arg_pose):
req = SetPoseRequest()
req.pose = arg_pose
return self.set_pose_relative(req).success
# def graspObjectHorizontal(self, point, width, yaw=0):
# req = GraspObjectRequest()
# req.point = point
# req.width = width
# req.yaw = yaw
# return self.graspObjectHorizontalService(req)
def graspObjectVerticalOld(self, point, width, yaw):
req = GraspObjectRequest()
req.point = point
req.width = width
req.yaw = yaw
return self.graspObjectVerticalService(req).success
def graspObjectVertical(self, point, width, yaw):
'''
Given the position of object within reach it grasps it.
Argument : position (Point msg)
'''
self.align_wrist()
req = GetTransformPointRequest()
req.point = point
req.from_frame = "base_link"
req.to_frame = "wrist_3_link"
point = transformPoint(req).point
graspPose = Pose()
graspPose.position = point
graspPose.position.x -= 0.25 * sin(yaw)
graspPose.position.y -= 0.15 # + 0.1
graspPose.position.z -= 0.12 # Should be 0.25 * sin(grasp_angle)
# Pose just Above the object
flag = self.go_to_pose_wrist(graspPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
# Set grasping angle
if yaw != 0.0:
newOPose = Pose()
newOPose.orientation = orient_from_euler(0, 0, yaw)
flag = self.go_to_pose_wrist(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
newOPose = Pose()
newOPose.orientation = orient_from_euler(0.558505, 0, 0) # 32 deg
flag = self.go_to_pose_wrist_no_align(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
newOPose = Pose()
newOPose.position.z += 0.01
flag = self.go_to_pose_wrist(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
return flag
def graspObjectHorizontal(self, point, width, yaw):
'''
Given the position of object within reach it grasps it.
Argument : position (Point msg)
'''
self.align_wrist()
req = GetTransformPointRequest()
req.point = point
req.from_frame = "base_link"
req.to_frame = "wrist_3_link"
point = transformPoint(req).point
graspPose = Pose()
graspPose.position = point
graspPose.position.x -= 0.25 * sin(yaw)
graspPose.position.y -= 0.188 # + 0.1
graspPose.position.z -= 0.07 # Should be 0.25 * sin(grasp_angle)
# Pose just Above the object
flag = self.go_to_pose_wrist(graspPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
# Set grasping angle
if yaw != 0.0:
newOPose = Pose()
newOPose.orientation = orient_from_euler(0, 0, yaw) # 32 deg
flag = self.go_to_pose_wrist(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
newOPose = Pose()
newOPose.orientation = orient_from_euler(0.558505, 0, 0) # 32 deg
flag = self.go_to_pose_wrist_no_align(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
# # #Grasp
self.closeGripper(width)
rospy.sleep(1)
newOPose = Pose()
newOPose.position.z = -0.09
flag = self.go_to_pose_wrist_no_align(newOPose)
if flag == False:
rospy.logerr("Could not Reach grasp Pose")
return False
return True
def main():
# maind()
getFPGA()
ur5.openGripper()
def maind():
ur5.go_to_named_pose("navPose")
# ebot.go_to_goal('store_table_fpga')
ebot.go_to_goal('store_table')
# ebot.go_to_goal_precise('store_table')
ebot.print_current_pose()
# detect.detectTable()
# ur5.go_to_named_pose("seeObjectJ")
ur5.go_to_named_pose("fpgaPoseOdom")
detect.detect()
object_name = 'FPGA_board'
pointBaseLink = detect.dict[object_name]
graspPose_pub.publish(createPoseStamped(pointBaseLink))
pointOdom = TransformPoint(pointBaseLink, 'base_link', 'odom')
ur5.go_to_named_pose("graspVerticalJ")
pose = Pose()
pose.position.z = 0.1
ur5.go_to_pose_relative(pose)
ebot.go_to_goal_precise('store_table_close')
ebot.go_to_waypoint_relative(0.4, 0 ,0)
pointBaseLink = TransformPoint(pointOdom,'odom', 'base_link')
graspPose_pub.publish(createPoseStamped(pointBaseLink))
detect.detectTable()
rospy.sleep(0.1)
flag = ur5.graspObjectVerticalOld(
pointBaseLink, width=w_dict[object_name], yaw=pi/3)
while flag == False:
ebot.go_to_waypoint_relative(0.2, 0, 0)
detect.detect()
flag = ur5.graspObjectVerticalOld(
pointBaseLink, width=w_dict[object_name], yaw=pi/3)
ur5.openGripper()
def getFPGAnew():
ur5.go_to_named_pose("navPose")
# ebot.go_to_goal('store_table_fpga')
ebot.go_to_goal('store_table')
ebot.go_to_goal_precise('store_table_fpga')
ebot.print_current_pose()
detect.detectTable()
ur5.go_to_named_pose("seeObjectJ")
ur5.go_to_named_pose("fpgaPoseOdom")
ebot.go_to_waypoint_relative(0.25, 0, 0)
ur5.go_to_named_pose("fpgaPoseOdom")
detect.detect()
detect.detectTable()
ur5.openGripper()
object_name = 'FPGA_board'
graspPose_pub.publish(createPoseStamped(detect.dict[object_name]))
printPoint(detect.dict[object_name])
ur5.go_to_named_pose("graspVerticalJ")
pose = Pose()
pose.position.z = 0.1
ur5.go_to_pose_relative(pose)
flag = ur5.graspObjectVerticalOld(
detect.dict[object_name], width=w_dict[object_name], yaw=pi/3)
while flag == False:
ebot.go_to_waypoint_relative(0.2, 0, 0)
detect.detect()
flag = ur5.graspObjectVerticalOld(
detect.dict[object_name], width=w_dict[object_name], yaw=pi/3)
ebot.go_to_pose_relative(-1, 0, 0, rospy.Duration(5))
ur5.go_to_named_pose("navPose")
ebot.go_to_goal("store_exit")
def getFPGA():
ur5.go_to_named_pose("navPose")
# ebot.go_to_goal('store_table_fpga')
ebot.go_to_goal('store_table')
ebot.go_to_goal_precise('store_table_fpga')
ebot.print_current_pose()
detect.detectTable()
ur5.go_to_named_pose("seeObjectJ")
ur5.go_to_named_pose("fpgaPoseOdom")
ebot.go_to_waypoint_relative(0.25, 0, 0)
ur5.go_to_named_pose("fpgaPoseOdom")
detect.detect()
detect.detectTable()
ur5.openGripper()
object_name = 'FPGA_board'
graspPose_pub.publish(createPoseStamped(detect.dict[object_name]))
printPoint(detect.dict[object_name])
ur5.go_to_named_pose("seeObjectJ")
flag = ur5.graspObjectVerticalOld(
detect.dict[object_name], width=w_dict[object_name], yaw=pi/3)
while flag == False:
ebot.go_to_waypoint_relative(0.2, 0, 0)
detect.detect()
flag = ur5.graspObjectVerticalOld(
detect.dict[object_name], width=w_dict[object_name], yaw=pi/3)
ebot.go_to_pose_relative(-1, 0, 0, rospy.Duration(5))
ur5.go_to_named_pose("navPose")
ebot.go_to_goal("store_exit")
def getGlue():
ur5.go_to_named_pose("navPose")
# TODO check if in meeting Room
# ebot.go_to_goal('meeting_entry')
# print("Entered room")
ebot.print_current_pose()
ebot.go_to_goal_precise('meeting_table')
ebot.go_to_goal('meeting_table')
print("Reached Goal")
ebot.print_current_pose()
ebot.applyBrakes()
detect.detectTable()
ur5.go_to_named_pose("meetingTable")
detect.detect()
pickupObject('glue')
ur5.go_to_named_pose("navPose")
ebot.releaseBrakes()
def enter_pantry():
ur5.go_to_named_pose("navPose")
ebot.go_to_goal('pantry_entry')
ebot.go_to_waypoint_relative(1.3, 0, 0)
printReached("pantry")
def getCoke():
enter_pantry()
ebot.go_to_goal_precise('pantry_table1')
ebot.go_to_goal('pantry_table1')
ebot.applyBrakes()
detect.detectTable()
ur5.go_to_named_pose("pantryTable1Odom")
detect.detect()
pickupObject('coke_can')
ur5.go_to_named_pose("navPoseOld")
ebot.releaseBrakes()
exit_pantry()
def exit_pantry():
# ebot.go_to_goal('pantry_exit')
# ebot.go_to_waypoint_relative(1.2,0,0)
# ebot.go_to_goal('pantry_exit_old')
ebot.go_to_goal_precise('pantry_exit')
ebot.set_yaw(pi/2)
ebot.go_to_waypoint_relative(1.2, 0, 0)
def dropbox3():
ebot.go_to_goal('research_entry')
ebot.print_current_pose()
ebot.go_to_goal('research_dropbox')
ebot.print_current_pose()
ebot.applyBrakes()
detect.detectTable()
ur5.go_to_named_pose("researchDropbox")
ur5.openGripper()
rospy.sleep(0.5)
ur5.go_to_named_pose("navPose")
ebot.releaseBrakes()
def exit_meeting():
ebot.go_to_goal_precise('meeting_exit')
ebot.go_to_goal('meeting_exit')
def enter_meeting():
ebot.go_to_goal('meeting_entry')
ebot.go_to_waypoint_relative(1, 0, 0)
def dropbox2():
ebot.go_to_goal_precise('meeting_dropbox')
# ebot.go_to_goal('meeting_dropbox')
ebot.print_current_pose()
detect.detectTable()
ur5.go_to_named_pose("researchDropboxJ")
ur5.go_to_named_pose("meetingDropboxOdom")
ur5.openGripper()
rospy.sleep(0.5)
ur5.go_to_named_pose("navPose")
# ebot.go_to_pose_relative(0.95,0,0)
def enter_conference_room():
ebot.go_to_goal('conference_entry')
ebot.go_to_waypoint_relative(1, 0, 0)
def dropbox1():
ur5.go_to_named_pose("navPose")
enter_conference_room()
ebot.go_to_goal('conference_dropbox')
ebot.print_current_pose()
ebot.applyBrakes()
detect.detectTable()
ur5.go_to_named_pose("conferenceDropbox")
ur5.openGripper()
rospy.sleep(0.5)
ur5.go_to_named_pose("navPose")
ebot.releaseBrakes()
exit_conference_room()
def exit_conference_room():
ebot.set_yaw(-3*pi/2)
ebot.go_to_waypoint_relative(1, 0, 0)
def subtask1():
getFPGA()
dropbox1()
def subtask2():
getCoke()
enter_meeting()
dropbox2()
def subtask3():
getGlue()
exit_meeting()
dropbox3()
if __name__ == '__main__':
rospy.init_node('grasping_node')
graspPose_pub = rospy.Publisher("/graspPose", PoseStamped, queue_size=1)
ur5 = Ur5()
ebot = Ebot()
detect = Detect()
# main()
getFPGA()
# subtask1()
# subtask2()
# subtask3()
# ebot.releaseBrakes()
| 17,690 | 6,714 |
import tensorflow as tf
from cleverhans.attacks import FastGradientMethod
from cleverhans.utils_keras import KerasModelWrapper
from keras.utils import to_categorical
import keras.backend as K
import numpy as np
from functools import lru_cache
from pickle import loads, dumps
from models import filter_correctly_classified_examples
@lru_cache()
def __fast_gradient_sign_tf_symbols(model, serializedX, serializedy_target):
X = loads(serializedX)
y_target = loads(serializedy_target)
cleverhans_model = KerasModelWrapper(model)
attack = FastGradientMethod(cleverhans_model)
X_sym = tf.placeholder(tf.float32, shape=model.input.shape)
eta_sym = tf.placeholder(tf.float32)
if y_target is not None:
one_hot_y_target_sym = tf.placeholder(tf.float32, shape=model.output.shape)
else:
one_hot_y_target_sym = None
kwargs = {"eps": eta_sym, "ord": np.inf, "clip_min": 0., "clip_max": 1.}
if y_target is not None:
kwargs["y_target"] = one_hot_y_target_sym
example_sym = attack.generate(X_sym, **kwargs)
return X_sym, one_hot_y_target_sym, example_sym, eta_sym
def adversarial_example(model, X, y_target=None, eta=0.15):
assert y_target is None or len(y_target) == len(X)
with_target = y_target is not None
if with_target:
num_classes = model.output.shape.as_list()[-1]
one_hot_y_target = to_categorical(y_target, num_classes=num_classes)
serializedX = dumps(X)
serializedy_target = dumps(y_target)
symbols = __fast_gradient_sign_tf_symbols(model, serializedX, serializedy_target)
X_sym, one_hot_y_target_sym, example_sym, eta_sym = symbols
session = K.get_session()
feed_dict = {X_sym: X, eta_sym: eta}
if with_target:
feed_dict[one_hot_y_target_sym] = one_hot_y_target
return session.run(example_sym, feed_dict=feed_dict)
def adversarial_score(model, X_test, y_test, eta=None, y_target=None):
X, y = filter_correctly_classified_examples(model, X_test, y_test)
adversarialX = adversarial_example(model, X, y_target, eta)
fooling_examples, _ = filter_correctly_classified_examples(model, adversarialX, y)
score = 1 - len(fooling_examples) / len(X)
return score
| 2,224 | 800 |
from urlparse import urlparse
from re import match
def get_redirect(req_path, ref_url):
'''
>>> get_redirect('/style.css', 'http://preview.local/tree/foo/view/')
'/tree/foo/view/style.css'
>>> get_redirect('/style.css', 'http://preview.local/tree/foo/view/quux.html')
'/tree/foo/view/style.css'
>>> get_redirect('/quux/style.css', 'http://preview.local/tree/foo/view/')
'/tree/foo/view/quux/style.css'
'''
_, ref_host, ref_path, _, _, _ = urlparse(ref_url)
ref_git_preamble_match = match(r'((/[^/]+){3})', ref_path)
return ref_git_preamble_match.group(1) + req_path
def needs_redirect(req_host, req_path, ref_url):
'''
Don't redirect when the request and referer hosts don't match:
>>> needs_redirect('preview.local', '/style.css', 'http://example.com/tree/foo/view/')
False
Don't redirect when the referer doesn't appear to include a git path.
>>> needs_redirect('preview.local', '/style.css', 'http://preview.local/about/')
False
Don't redirect when the request path already includes the git preamble.
>>> needs_redirect('preview.local', '/tree/foo/view/style.css', 'http://preview.local/tree/foo/view/')
False
>>> needs_redirect('preview.local', '/', 'http://preview.local/tree/foo/view/')
True
>>> needs_redirect('preview.local', '/style.css', 'http://preview.local/tree/foo/view/')
True
>>> needs_redirect('preview.local', '/fee/fi/fo/fum/style.css', 'http://preview.local/tree/foo/view/')
True
'''
_, ref_host, ref_path, _, _, _ = urlparse(ref_url)
#
# Don't redirect when the request and referer hosts don't match.
#
if req_host != ref_host:
return False
ref_git_preamble_match = match(r'(/tree/[^/]+/view/)', ref_path)
#
# Don't redirect when the referer doesn't appear to include a git path.
#
if not ref_git_preamble_match:
return False
#
# Don't redirect when the request path already includes the git preamble.
#
if req_path.startswith(ref_git_preamble_match.group(1)):
return False
return True
if __name__ == '__main__':
import doctest
doctest.testmod()
| 2,187 | 752 |
from flask import Blueprint, render_template
home_bp = Blueprint(
"home_bp",
__name__,
)
@home_bp.route("/")
def home():
return render_template("index.html", title="Chuyển ảnh thành văn bản", submit_localtion="/scanner")
| 237 | 90 |
from p3ui import *
import matplotlib.pyplot as plt
import numpy as np
def gradient_image(ax, extent, direction=0.3, cmap_range=(0, 1), **kwargs):
phi = direction * np.pi / 2
v = np.array([np.cos(phi), np.sin(phi)])
X = np.array([[v @ [1, 0], v @ [1, 1]],
[v @ [0, 0], v @ [0, 1]]])
a, b = cmap_range
X = a + (b - a) / X.max() * X
im = ax.imshow(X, extent=extent, interpolation='bicubic',
vmin=0, vmax=1, **kwargs)
return im
def gradient_bar(ax, x, y, width=0.5, bottom=0):
for left, top in zip(x, y):
right = left + width
gradient_image(ax, extent=(left, right, bottom, top),
cmap=plt.cm.Blues_r, cmap_range=(0, 0.8))
class GradientChart(MatplotlibSurface):
# https://matplotlib.org/stable/gallery/lines_bars_and_markers/bar_stacked.html#sphx-glr-gallery-lines-bars-and-markers-bar-stacked-py
def __init__(self, **kwargs):
width = kwargs.pop('width', (auto, 1, 1))
height = kwargs.pop('height', (auto, 1, 1))
super().__init__(width=width, height=height, **kwargs)
self._update()
def _update(self):
with self as figure:
np.random.seed(19680801)
figure.clear()
ax = figure.add_subplot()
ax.set(xlim=(0, 10), ylim=(0, 1), autoscale_on=False)
gradient_image(ax, direction=1, extent=(0, 1, 0, 1), transform=ax.transAxes,
cmap=plt.cm.RdYlGn, cmap_range=(0.2, 0.8), alpha=0.5)
N = 10
x = np.arange(N) + 0.15
y = np.random.rand(N)
gradient_bar(ax, x, y, width=0.7)
ax.set_aspect('auto')
async def update(self):
self._update()
| 1,746 | 670 |
from collections import deque
"""
Operations
* Enqueue
* Dequeue
* Peek (first element)
* Size
* IsEmpty
* Print
"""
def demo_queue_operation_using_deque():
stack = deque()
# enqueue - inserting at the Right
stack.append(1)
stack.append(2)
stack.append(3)
stack.append(4)
stack.append(5)
stack.append(6)
print(stack) # Print
print(stack[-1]) # Last Element / Top
print(len(stack)) # Length
print(len(stack) == 0) # isEmpty
# enqueue - Popping from Right
stack.pop()
stack.pop()
print(stack) # Print
print(stack[0]) # First Element / Top
print(len(stack)) # Length
print(len(stack) == 0) # isEmpty
if __name__ == '__main__':
demo_queue_operation_using_deque()
pass
"""
deque([1, 2, 3, 4, 5])
deque([3, 4, 5])
3
"""
| 816 | 319 |
mask = 255
print(mask == 255)
blue_mask = mask == 255
print(mask)
print(blue_mask) | 85 | 43 |
'''
Contains the implementation of the FindSongs class.
'''
from re import compile as rcompile
from zipfile import ZipFile
from os.path import dirname
import pandas as pd
from tensorflow.keras.models import load_model
from sklearn.neighbors import NearestNeighbors
from joblib import load
DIR = dirname(__file__)
rex = rcompile('[^a-zA-Z 0-9]')
tokenize = lambda x: rex.sub('', x.lower().replace(',', ' ').replace('-', ' '))
MODELS_DIR = DIR + '/../../models/'
DATA_DIR = DIR + '/../../data/'
ENCODER = 'encoder.h5'
FG_ENCODER = 'fg_encoder.h5'
ENCODER_PATH = MODELS_DIR + ENCODER + '.zip'
ENCODED_DTM = MODELS_DIR + 'encoded_dtm.pkl'
TFIDF = MODELS_DIR + 'tfidf.pkl'
FG_ENCODER_PATH = MODELS_DIR + FG_ENCODER
FG_ENCODED_DF = MODELS_DIR + 'fg_encoded_df.pkl'
GENRES_TFIDF = MODELS_DIR + 'genres_tfidf.pkl'
SCALER = MODELS_DIR + 'scaler.pkl'
TRACKS = DATA_DIR + 'tracks_genres_lyrics_en.csv.zip'
class FindSongs():
'''
This class implements 3 methods:
(1) find_song_entries - Given a song suggestion string containing partial/whole song name
and/or artist, returns a dataframe of possible matches
(2) find_song_entry - Given a song suggestion string returns either a dataframe of
possible matches(if the best_choice kw argument is False) or a single entry(if the
best_choice argumen is True - this is the default value)
(3) get_recommendations - Given a song entry returns a dataframe of songs that are
similar.
'''
def __init__(self):
# Extract encoder.h5 from encoder.h5.zip
with ZipFile(ENCODER_PATH, 'r') as zipObj:
zipObj.extractall()
# Load the model saved in ../../models/encoder.h5
self.encoder = load_model(ENCODER)
# Load the TfIDF vectorizer saved in tfidf.pkl
self.tfidf = load(TFIDF)
# Load the encoded DTM saved in encoded_dtm.pkl
self.encoded_dtm = load(ENCODED_DTM)
# Fit NearestNeighbors on encoded DTM
self.nn = NearestNeighbors(n_neighbors=5, algorithm='ball_tree')
self.nn.fit(self.encoded_dtm)
# Numerical features associated with a song entry
self.features = [
'popularity', 'duration_ms', 'explicit', 'danceability',
'energy', 'key', 'loudness', 'mode', 'speechiness',
'acousticness', 'instrumentalness', 'liveness', 'valence',
'tempo', 'time_signature'
]
# Load the model saved in fg_encoder.h5
self.fg_encoder = load_model(FG_ENCODER_PATH)
# Load the TfIDF vectorizer for genres data saved in genres_tfidf.pkl
self.genres_tfidf = load(GENRES_TFIDF)
# The original DF is DTM generated by genres_tfidf from genres data
# in the dataset + Numerical features
# Load the encoded DF from fg_encoded_df.pkl
self.fg_encoded_df = load(FG_ENCODED_DF)
# Load the StandardScaler saved at scaler.pkl
self.scaler = load(SCALER)
# Fit NearestNeighbors on encoded DF
self.fg_nn = NearestNeighbors(n_neighbors=5, algorithm='ball_tree')
self.fg_nn.fit(self.fg_encoded_df)
# Load tracks_df from zipped csv file tracks_genres_lyrics_en.csv.zip
self.tracks_df = pd.read_csv(TRACKS)
# Get rid of superfluous columns and rows
self.tracks_df.drop(columns=['Unnamed: 0'], inplace=True)
self.tracks_df = self.tracks_df[self.tracks_df.genres.isna() == False]
def find_song_entries(self, sugg_str):
'''
Given sugg_str(a string containing part/whole of the
song's name and/or artist) returns a dataframe of
song entries that are the closest matches.
'''
# Vectorize the sugg_str by running it through tfidf
vec = self.tfidf.transform([tokenize(sugg_str)]).todense()
# Reduce dimensionality by running through encoder
encoded_vec = self.encoder.predict(vec)
# Get list of indices of entries that are closest to sugg_str
entries = self.nn.kneighbors(encoded_vec)[1][0].tolist()
# Get the list of indices of closest matches sorted in descending
# order of popularity i.e. the first entry will have the highest
# popularity value
entries = self.tracks_df.iloc[entries].popularity.\
sort_values(ascending=False).index.tolist()
# Return a dataframe containing the entries
return self.tracks_df.loc[entries]
def find_song_entry(self, sugg_str, best_choice=True):
'''
Given sugg_str(a string containing part/whole of the
song's name and/or artist) returns either a dataframe of
song entries that are the closest matches(best_choice=False)
or a single song entry(best_choice=True)
'''
# Get dataframe of song entries that are closest match
# to sugg_str which is a string containing part/whole
# of the song's name and/or artist.
df = self.find_song_entries(sugg_str)
# Convert sugg_str to a set of tokens
sugg_set = set(tokenize(sugg_str).split())
# Get the list of index values for the dataframe
choice = df.index.tolist()
if best_choice:
# The caller wants just one entry for the best match
# Given index value of a song entry row, returns a set of tokens from the combined
# name and artists columns.
# The array syntax ['name'] is used in place of the dot syntax .name because
# .name returns the value from the index column
name_artists = lambda x: set(tokenize(df.loc[x]['name']+' '+df.loc[x].artists).split())
# Given a set of tokens, it returns the length of its intersection with the sugg_set
# This is used as a measure how similar the input is to the sugg_set - the larger the
# return value, the greater the similarity
score_func = lambda x: len(sugg_set.intersection(x))
choices = [(y, name_artists(y)) for y in choice]
best_idx = 0
best_score = score_func(choices[0][1])
for idx, nm_art in enumerate(choices[1:]):
score = score_func(nm_art[1])
#print(f'{nm_art[1]}/{choices[best_idx][1]}/{sugg_set}:: {score}/{best_score}')
if score > best_score:
best_score = score
best_idx = idx+1
choice = choices[best_idx][0]
return df.loc[choice]
def get_recommendations(self, x):
'''
Given a song entry x, returns a dataframe of similar songs.
The similarity is determined based on the numerical features(detailed
in self.features) along with genres feature.
'''
# Convert the genres feature to a vector
gvec = self.genres_tfidf.transform([tokenize(x.genres)]).todense()
# Standardize the numerical features
fvec = self.scaler.transform([x[self.features]])
# Combine bot vectors to create a single features vector
vec = [fvec.tolist()[0] + gvec.tolist()[0]]
# Perform dimensionality reduction by running through fg_encoder
encoded_vec = self.fg_encoder.predict(vec)
# Get the list of indices of entries that are closest to
# the input entry
entries = self.fg_nn.kneighbors(encoded_vec)[1][0].tolist()
# Sort the list of indices in descending order of popularity
entries = self.tracks_df.iloc[entries].popularity.\
sort_values(ascending=False).index.tolist()
# Return a dataframe containing the sorted list of entries.
return self.tracks_df.loc[entries]
| 7,664 | 2,364 |
import tweepy
import time
import pandas as pd
import datetime
import re
from auth import consumer_key,consumer_secret,key,secret,user_ID
def trimtweet(tweet):
if(tweet.length <= 280):
return tweet
return tweet.substring(0, 277) + "..."
# twitter auth process
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(key, secret)
api = tweepy.API(auth)
bot_id = api.me().id #1382664940587261954
# defining variables
interval = 60 * 60 * 24 # every 24 hours
# interval = 5 # every 5 seconds, for testing
#define numero de dias hasta el 1 de septiembre
today = datetime.date.today()
future = datetime.date(2021,9,1)
diff = future - today
lim_dias=diff.days
while True:
df=pd.read_csv("https://raw.githubusercontent.com/andrab/ecuacovid/master/datos_crudos/vacunas/vacunas.csv")
fecha_rep=df["fecha"].iloc[-1]
ult_fecha_datos = datetime.datetime.strptime(df["fecha"].iloc[-1], '%d/%m/%Y')
lastTweet = api.user_timeline(id = bot_id, count = 1)
if (len(lastTweet) > 0):
text = lastTweet[0].text
m = re.search('Hasta el (?P<date>\d{2}/\d{2}/\d{4}) el MSP', text)
if m:
ult_fecha_tweet = datetime.datetime.strptime(m.group('date'), '%d/%m/%Y')
if ult_fecha_tweet >= ult_fecha_datos:
print("Ya se tweeteo la ultima fecha disponible fecha %d", ult_fecha_datos)
time.sleep(interval)
continue
vaxs_1dosis=df["primera_dosis"].iloc[-1]
vaxs_2dosis=df["segunda_dosis"].iloc[-1]
left_2_vax= int(9e6-vaxs_1dosis)
#escenario1 en el que todavia tiene tiempo pero no acaba de vacunar
if (left_2_vax > 0 and lim_dias > 0):
tweet= 'A G.Lasso le quedan {} días para vacunar {} personas. Hasta el {} el MSP ha reportado {} personas vacunadas con primera dosis, {} personas con segunda dosis. Su ofrecimiento en campaña: 9M de vacunadxs en 100 días'
tweet = trimtweet(tweet.format(lim_dias, left_2_vax, fecha_rep,vaxs_1dosis,vaxs_2dosis))
print(tweet, flush=True)
api.send_direct_message(user_ID, tweet)
api.update_status(tweet)
lim_dias -= 1
time.sleep(interval)
#escenario2 en el que se le acabo el tiempo. recordatorio de cuantos dias va sin cumplir so objetivo
elif (left_2_vax > 0 and lim_dias <= 0):
tweet='Hace {} días G.Lasso debería haber vacunado 9M de personas y todavia le faltan {} personas para llegar a 9M. Hasta el {} el MSP ha reportado {} personas vacunadas con primera dosis, {} personas con segunda dosis #accountabilitybot #AI4good'
print(tweet.format(abs(lim_dias), left_2_vax, fecha_rep,vaxs_1dosis,vaxs_2dosis),flush=True)
tweet = trimtweet(tweet.format(abs(lim_dias), left_2_vax, fecha_rep,vaxs_1dosis,vaxs_2dosis))
api.send_direct_message(user_ID, tweet)
api.update_status(tweet)
lim_dias -= 1
time.sleep(interval)
#escenario3 en el que vacuna a 9M antes de que se acabe el tiempo
else #!((a and b) or (a and !b)) = !a
tweet= 'Guillermo Lasso logró vacunar al menos 9M personas en sus primeros 100 días de gobierno. Voy a buscar algo más que hacer. Chao #accountabilitybot #AI4good'
tweet = trimtweet(tweet)
print(tweet,flush=True)
api.send_direct_message(user_ID,tweet)
api.update_status(tweet)
time.sleep(interval)
#to run in the back ground <sudo nohup python3 ecua_vac.py &>
# to check real time log file <sudo tail -f nohup.out>
| 3,516 | 1,309 |
#!/usr/bin/env python
import rosbag
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
args = sys.argv
print(len(args))
assert len(args)>=2, "you must specify the argument."
# get path
filename=os.path.normpath(os.path.join(os.getcwd(),args[1]))
print(filename)
# read from bag file
bag = rosbag.Bag(filename)
np_poses=None
for topic, msg, t in bag.read_messages():
if topic=="/pose":
np_pose=np.array([[0.0, 0.0, 0.0, 0.0, 0.0]])
np_pose[0,0]=msg.position.x
np_pose[0,1]=msg.position.y
np_pose[0,2]=msg.position.z
np_pose[0,3]=t.secs
np_pose[0,4]=t.nsecs
if np_poses is None:
np_poses=np_pose
else:
np_poses=np.append(np_poses,np_pose,axis=0)
# reform time
start_sec=np_poses[0,3]
start_nsec=np_poses[0,4]
t=np.zeros(np_poses.shape[0],dtype='float32')
for i in range(np_poses.shape[0]):
t[i]=(np_poses[i,3]-start_sec)+(np_poses[i,4]-start_nsec)/1000000000.0
# plot
plt.subplot(121)
plt.title("time vs x,y")
plt.plot(t, np_poses[:,0], 'r', label="x")
plt.plot(t, np_poses[:,1], 'b', label="y")
plt.xlabel("time[s]")
plt.ylabel("vel[m/s]")
plt.legend()
plt.subplot(122)
plt.title("x vs y")
plt.plot(np_poses[:,0], np_poses[:,1], 'g')
plt.show()
bag.close()
| 1,277 | 574 |
'''
Created on Dec 21, 2014
@author: Alina Maria Ciobanu
'''
import numpy
import re
TOKEN_NER_TAGS = ['DATE', 'NUMBER']
WIKI_NER_TAGS = ['PERSON', 'ORGANIZATION', 'LOCATION']
DEFAULT_YEAR_VALUE = 1858 # the mean of the lowest and highest value for all possible intervals, hardcoded for now
def get_temporal_feature(doc, wiki_dict=None):
flat_tokens = sum(doc['tokens'], [])
flat_ner = sum(doc['ner'], [])
zipped = zip(flat_tokens, flat_ner)
return get_temporal_feature_for_zip(zipped, wiki_dict)
def get_temporal_feature_for_zip(zipped, wiki_dict=None):
""" entry format: [(token_1, ner_tag_1), (token_2, ner_tag_2), (token_3, ner_tag_3)] """
years = []
years.extend(get_years_from_token(zipped))
if years:
return numpy.median(numpy.array(years))
elif wiki_dict:
years.extend(get_years_from_wiki(zipped, wiki_dict))
if years:
return numpy.median(numpy.array(years))
return DEFAULT_YEAR_VALUE
def get_years_from_token(zipped):
years = []
for token, ner_tag in zipped:
if ner_tag in TOKEN_NER_TAGS:
match = re.match( r'.*(\d{4}).*', token)
if (match):
years.append(int(match.group(1)))
return years
def get_years_from_wiki(zipped, wiki_dict):
years = []
for token, ner_tag in zipped:
if ner_tag in WIKI_NER_TAGS:
if token in wiki_dict.keys():
years.extend([int(year) for year in wiki_dict[token]])
return years
if __name__ == "__main__":
zipped = zip(['the 1990s', '1967', '1875', '123x4'], ['DATE', 'DAT', 'NUMBER', 'NUMBER'])
print get_temporal_feature_for_zip(zipped)
| 1,748 | 695 |
"""This module handles classifier calculation."""
from academia_tag_recommender.definitions import MODELS_PATH
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer, recall_score
from sklearn.model_selection import StratifiedKFold
from joblib import dump, load
from pathlib import Path
import random
import numpy as np
DATA_FOLDER = Path(MODELS_PATH) / 'experimental_classifier' / 'classwise'
SAMPLE_RATIO = 1 / 25
RANDOM_STATE = 0
random.seed(RANDOM_STATE)
scorer = make_scorer(recall_score)
k_fold = StratifiedKFold(shuffle=True, random_state=RANDOM_STATE)
class ClasswiseClassifier:
"""The BR Classwise Classifier that is capable of of grid search and undersampling.
Attributes:
name: The experimental classifiers name as :class:`str`.
path: The experimental classifiers path on the disc as :class:`pathlib.Path`.
classifier_options: The options for grid search as :class:`list(ClassifierOption)`.
path: The path where the individual base classifiers are stored as :class:`pathlib.Path`.
undersample: If True undersampling is used.
"""
def __init__(self, name, classifier_options, folder_path, undersample=False):
self.name = name
self.classifier_options = classifier_options
self.path = DATA_FOLDER / folder_path
Path.mkdir(self.path, exist_ok=True)
self.undersample = undersample
def fit(self, X, y):
"""Fit classifier to given data.
Args:
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The classifier as :class:`ClasswiseClassifier`.
"""
self._clfs = []
for y_i, _ in enumerate(y[0]):
y_train = y[:, y_i]
if self.undersample:
X_sample, y_sample = self._undersample(X, y_train)
else:
X_sample, y_sample = X, y_train
clf = self._choose_classifier(X_sample, y_sample)
path = self._dump_clf(clf, y_i)
self._clfs.append(path)
return self
def _positive_samples(self, X, y):
"""Extract only positive samples.
Args:
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The positive samples as :class:`list`.
"""
i_positive = [i for i, _ in enumerate(X) if y[i]]
return random.sample(i_positive, len(i_positive))
def _negative_samples(self, X, y, n_pos):
"""Extract negative samples with adjusted ratio to positive samples.
Args:
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The negative samples as :class:`list`.
"""
i_negative = [i for i, _ in enumerate(X) if not y[i]]
n_neg = min(len(i_negative), round(n_pos / SAMPLE_RATIO))
return random.sample(i_negative, n_neg)
def _undersample(self, X, y):
"""Reduce X and y to an adjusted ratio of positive and negative samples.
Args:
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The adjusted samples as :class:`list`.
"""
i_pos = self._positive_samples(X, y)
i_neg = self._negative_samples(X, y, len(i_pos))
i = i_pos + i_neg
return np.array(X)[i], np.array(y)[i]
def _choose_classifier(self, X, y):
"""Find the best fitting classifier.
Args:
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The best classifier.
"""
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, test_size=0.25, random_state=RANDOM_STATE)
clfs = [self._fit_clf(clf_option, X_train, y_train)
for clf_option in self.classifier_options]
clf = self._get_best_clf(clfs, X_test, y_test)
return clf
def _fit_clf(self, clf_option, X, y):
"""Train classifiers as defined by the option.
Args:
clf_option:
The classifier that should be trained as :class:`ClassifierOption`.
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The trained classifier.
"""
if clf_option.grid_search:
return GridSearchCV(clf_option.clf, clf_option.parameter, cv=k_fold, scoring=scorer).fit(X, y).best_estimator_
else:
return clf_option.clf.fit(X, y)
def _get_best_clf(self, clfs, X, y):
"""Calculate scores for each classifier and return best.
Args:
clfs:
The classifiers to choose from as :class:`list`.
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The best classifier.
"""
clf_scores = [(clf, self._score_clf(clf, X, y)) for clf in clfs]
clf = sorted(clf_scores, key=lambda x: x[1], reverse=True)[0][0]
return clf
def _score_clf(self, clf, X, y):
"""Calculate score using the predicted labels by given classifier.
Args:
clfs:
The classifiers to use.
X:
The samples as :class:`list`.
y:
The label data as :class:`list`.
Returns:
The score as :class:`float`.
"""
prediction = clf.predict(X)
score = recall_score(y, prediction)
return score
def _dump_clf(self, clf, i):
"""Store a classifier on the disc.
Args:
clfs:
The classifiers to store.
i:
Number of the label the classifier handles as :class:`int`.
Returns:
The path where the classifier was stored as :class:`joblib.Path`.
"""
path = self.path / (self.name + '_classifier_' + str(i) + '.joblib')
dump(clf, path)
return path
def predict(self, X):
"""Predict labels based on X.
Args:
X:
The samples as :class:`list`.
Returns:
The prediction as :class:`list`.
"""
prediction = []
for path in self._clfs:
clf = load(path)
prediction.append(clf.predict(X))
return np.transpose(prediction)
def __str__(self):
return self.name
class ClassifierOption:
"""A classifier and optional gridsearch parameters.
Attributes:
clf: The classifier.
grid_search: If True gridsearch will be used.
parameter: The parameter to test while gridsearching.
"""
def __init__(self, clf, grid_search=False, parameter={}):
self.clf = clf
self.grid_search = grid_search
self.parameter = parameter
| 7,270 | 2,163 |
class Solution:
def maxTurbulenceSize(self, arr: List[int]) -> int:
n = len(arr)
if n == 1:
return 1
def calc(a, b):
if a == b:
return 0
elif a > b:
return -1
else:
return 1
res = 0
count = 0
pre = 0
for i in range(1, n):
t = calc(arr[i-1], arr[i])
if t * pre == -1:
count += 1
else:
res = max(count, res)
count = 1 if t != 0 else 0
pre = t
res = max(count, res)
return res + 1
| 646 | 200 |
from node.behaviors import Adopt
from node.behaviors import DefaultInit
from node.behaviors import Nodespaces
from node.behaviors import Nodify
from node.behaviors import OdictStorage
from node.tests import NodeTestCase
from odict import odict
from plumber import plumbing
###############################################################################
# Mock objects
###############################################################################
@plumbing(
Adopt,
Nodespaces,
Nodify,
OdictStorage)
class NodespacesNode(odict):
pass
@plumbing(
Adopt,
Nodify,
DefaultInit,
OdictStorage)
class SomeNode(object):
pass
###############################################################################
# Tests
###############################################################################
class TestNodespace(NodeTestCase):
def test_Nodespaces(self):
node = NodespacesNode()
self.assertTrue(isinstance(node.nodespaces, odict))
self.assertEqual(node.nodespaces['__children__'], node)
child = node['__children__']['child'] = SomeNode()
self.assertEqual(node['child'], child)
self.assertTrue(node['__children__']['child'] is node['child'])
foo = node['__foo__'] = SomeNode()
self.assertEqual(node['__foo__'], foo)
child = node['__foo__']['child'] = SomeNode()
self.assertEqual(node['__foo__']['child'], child)
self.assertFalse(node['__foo__']['child'] is node['child'])
self.assertEqual(len(node.nodespaces), 2)
self.assertEqual(node.nodespaces['__children__'], node)
self.assertEqual(node.nodespaces['__foo__'], foo)
def __getitem__fails():
node['__inexistent__']
err = self.expectError(KeyError, __getitem__fails)
self.assertEqual(str(err), '\'__inexistent__\'')
def __getitem__fails2():
node['inexistent']
err = self.expectError(KeyError, __getitem__fails2)
self.assertEqual(str(err), '\'inexistent\'')
del node['child']
self.assertEqual(node.keys(), [])
self.assertEqual(list(node['__foo__'].keys()), ['child'])
del node['__foo__']
self.assertEqual(len(node.nodespaces), 1)
self.assertEqual(list(node.nodespaces.keys()), ['__children__'])
| 2,332 | 678 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-07 13:30
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('excerptexport', '0041_set_date_to_past_for_existing_exports_20160531_2235'),
]
operations = [
migrations.RemoveField(
model_name='outputfile',
name='file_extension',
),
]
| 443 | 168 |
"""
Extract as-run river time series.
To test on mac:
run extract_rivers -gtx cas6_v3_lo8b -0 2019.07.04 -1 2019.07.04
To run on perigee:
run extract_rivers -gtx cas6_v3_lo8b -0 2018.01.01 -1 2018.01.10
run extract_rivers -gtx cas6_v3_lo8b -0 2018.01.01 -1 2018.12.31
Performance: takes 23 sec per year on perigee
Modified to include all NPZD tracers, and package the results as
an xarray Dataset.
***
NOTE: this is hard-coded to LiveOcean_output / [gtag] / riv2 so it
pretty specific to the cas6_v3_lo8b run. Also, it expects to find all
the NPZDOC variables.
***
"""
from lo_tools import Lfun, zrfun
from lo_tools import extract_argfun as exfun
Ldir = exfun.intro() # this handles the argument passing
from datetime import datetime, timedelta
from time import time
import numpy as np
import pandas as pd
import xarray as xr
from pathlib import Path
ds0 = Ldir['ds0']
ds1 = Ldir['ds1']
tt0 = time()
# long list of variables to extract
vn_list = ['transport', 'salt', 'temp', 'oxygen',
'NO3', 'phytoplankton', 'zooplankton', 'detritus', 'Ldetritus',
'TIC', 'alkalinity']
print(' Doing river extraction for '.center(60,'='))
print(' gtag = ' + Ldir['gtag'])
outname = 'extraction_' + ds0 + '_' + ds1 + '.nc'
# make sure the output directory exists
out_dir = Ldir['LOo'] / 'pre' / 'river' / Ldir['gtag'] / 'Data_roms'
Lfun.make_dir(out_dir)
out_fn = out_dir / outname
out_fn.unlink(missing_ok=True)
dt0 = datetime.strptime(ds0, Lfun.ds_fmt)
dt1 = datetime.strptime(ds1, Lfun.ds_fmt)
ndays = (dt1-dt0).days + 1
# make mds_list: list of datestrings (e.g. 2017.01.01) to loop over
mds_list = []
mdt = dt0
while mdt <= dt1:
mds_list.append(datetime.strftime(mdt, Lfun.ds_fmt))
mdt = mdt + timedelta(days=1)
# get list of river names
# (this is a bit titchy because of NetCDF 3 limitations on strings, forcing them
# to be arrays of characters)
mds = mds_list[0]
fn = Path('/boildat1').absolute() / 'parker' / 'LiveOcean_output' / Ldir['gtag'] / ('f' + mds) / 'riv2' / 'rivers.nc'
ds = xr.open_dataset(fn)
rn = ds['river_name'].values
NR = rn.shape[1]
riv_name_list = []
for ii in range(NR):
a = rn[:,ii]
r = []
for l in a:
r.append(l.decode())
rr = ''.join(r)
riv_name_list.append(rr)
ds.close()
NT = len(mds_list)
nanmat = np.nan * np.ones((NT, NR))
v_dict = dict()
for vn in vn_list:
v_dict[vn] = nanmat.copy()
tt = 0
for mds in mds_list:
fn = Path('/boildat1').absolute() / 'parker' / 'LiveOcean_output' / Ldir['gtag'] / ('f' + mds) / 'riv2' / 'rivers.nc'
ds = xr.open_dataset(fn)
# The river transport is given at noon of a number of days surrounding the forcing date.
# Here we find the index of the time for the day "mds".
RT = pd.to_datetime(ds['river_time'].values)
mdt = datetime.strptime(mds, Lfun.ds_fmt) + timedelta(hours=12)
mask = RT == mdt
for vn in vn_list:
if vn == 'transport':
v_dict[vn][tt,:] = ds['river_' + vn][mask,:]
else:
# the rest of the variables allow for depth variation, but we
# don't use this, so, just use the bottom value
v_dict[vn][tt,:] = ds['river_' + vn][mask,0,:]
ds.close()
tt += 1
# make transport positive
v_dict['transport'] = np.abs(v_dict['transport'])
# store output in an xarray Dataset
mdt_list = [(datetime.strptime(item, Lfun.ds_fmt) + timedelta(hours=12)) for item in mds_list]
times = pd.Index(mdt_list)
x = xr.Dataset(coords={'time': times,'riv': riv_name_list})
for vn in vn_list:
v = v_dict[vn]
x[vn] = (('time','riv'), v)
x.to_netcdf(out_fn)
x.close()
print('Total time for extraction = %d seconds' % (time() - tt0))
| 3,673 | 1,500 |
# Generated by Django 3.0.7 on 2020-11-07 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0015_task_unique_dir'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='line',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='comment',
name='source',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| 570 | 183 |
from __future__ import absolute_import
import math
import operator
from collections import OrderedDict
from functools import reduce
from typing import Union, Sequence, Optional
import torch
from laia.data import PaddedTensor
from laia.nn.pyramid_maxpool_2d import PyramidMaxPool2d
from laia.nn.temporal_pyramid_maxpool_2d import TemporalPyramidMaxPool2d
class Identity(torch.nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def build_conv_model(unittest=False):
model = torch.nn.Sequential(
OrderedDict(
[
# conv1_1
("conv1_1", torch.nn.Conv2d(1, 64, kernel_size=3, padding=1)),
("relu1_1", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv1_2
("conv1_2", torch.nn.Conv2d(64, 64, kernel_size=3, padding=1)),
("relu1_2", Identity() if unittest else torch.nn.ReLU(inplace=True)),
("maxpool1", torch.nn.MaxPool2d(2, ceil_mode=True)),
# conv2_1
("conv2_1", torch.nn.Conv2d(64, 128, kernel_size=3, padding=1)),
("relu2_1", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv2_2
("conv2_2", torch.nn.Conv2d(128, 128, kernel_size=3, padding=1)),
("relu2_2", Identity() if unittest else torch.nn.ReLU(inplace=True)),
("maxpool2", torch.nn.MaxPool2d(2, ceil_mode=True)),
# conv3_1
("conv3_1", torch.nn.Conv2d(128, 256, kernel_size=3, padding=1)),
("relu3_1", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv3_2
("conv3_2", torch.nn.Conv2d(256, 256, kernel_size=3, padding=1)),
("relu3_2", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv3_3
("conv3_3", torch.nn.Conv2d(256, 256, kernel_size=3, padding=1)),
("relu3_3", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv3_4
("conv3_4", torch.nn.Conv2d(256, 256, kernel_size=3, padding=1)),
("relu3_4", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv3_5
("conv3_5", torch.nn.Conv2d(256, 256, kernel_size=3, padding=1)),
("relu3_5", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv3_6
("conv3_6", torch.nn.Conv2d(256, 256, kernel_size=3, padding=1)),
("relu3_6", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv4_1
("conv4_1", torch.nn.Conv2d(256, 512, kernel_size=3, padding=1)),
("relu4_1", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv4_2
("conv4_2", torch.nn.Conv2d(512, 512, kernel_size=3, padding=1)),
("relu4_2", Identity() if unittest else torch.nn.ReLU(inplace=True)),
# conv4_3
("conv4_3", torch.nn.Conv2d(512, 512, kernel_size=3, padding=1)),
("relu4_3", Identity() if unittest else torch.nn.ReLU(inplace=True)),
]
)
)
return model
def size_after_conv(xs):
# type: (torch.Tensor) -> torch.Tensor
xs = xs.float()
xs = torch.ceil(xs / 2.0)
xs = torch.ceil(xs / 2.0)
return xs.long()
class DortmundPHOCNet(torch.nn.Module):
def __init__(
self, phoc_size, tpp_levels=range(1, 6), spp_levels=None, unittest=False
):
# type: (int, Optional[Sequence[int]], Optional[Sequence[int]], bool) -> None
super(DortmundPHOCNet, self).__init__()
assert tpp_levels or spp_levels
if tpp_levels is None:
tpp_levels = []
if spp_levels is None:
spp_levels = []
self.conv = build_conv_model(unittest=unittest)
self.tpp = TemporalPyramidMaxPool2d(levels=tpp_levels) if tpp_levels else None
self.spp = PyramidMaxPool2d(levels=spp_levels) if spp_levels else None
# Size after the temporal and spatial pooling layers
fc_input_dim = 512 * (sum(tpp_levels) + sum(4 ** (lv - 1) for lv in spp_levels))
self.fc = torch.nn.Sequential(
OrderedDict(
[
("fc6", torch.nn.Linear(fc_input_dim, 4096)),
("relu6", Identity() if unittest else torch.nn.ReLU(inplace=True)),
("drop6", torch.nn.Dropout(p=0 if unittest else 0.5)),
("fc7", torch.nn.Linear(4096, 4096)),
("relu7", Identity() if unittest else torch.nn.ReLU(inplace=True)),
("drop7", torch.nn.Dropout(p=0 if unittest else 0.5)),
("fc8", torch.nn.Linear(4096, phoc_size)),
]
)
)
self.reset_parameters()
def reset_parameters(self):
# Initialize parameters as Caffe does
for name, param in self.named_parameters():
if name[-5:] == ".bias":
# Initialize bias to 0
param.data.fill_(0)
else:
# compute fan in
fan_in = reduce(operator.mul, param.size()[1:])
param.data.normal_(mean=0, std=math.sqrt(2.0 / fan_in))
return self
def forward(self, x):
# type: (Union[torch.Tensor, PaddedTensor]) -> torch.Tensor
x, xs = (x.data, x.sizes) if isinstance(x, PaddedTensor) else (x, None)
x = self.conv(x)
if xs is not None:
xs = size_after_conv(xs)
x = PaddedTensor(x, xs)
if self.tpp and self.spp:
x = torch.cat((self.tpp(x), self.spp(x)), dim=1)
else:
x = self.tpp(x) if self.tpp else self.spp(x)
return self.fc(x)
def convert_old_parameters(params):
"""Convert parameters from the old model to the new one."""
# type: OrderedDict -> OrderedDict
new_params = []
for k, v in params.items():
if k.startswith("conv"):
new_params.append(("conv.{}".format(k), v))
elif k.startswith("fc"):
new_params.append(("fc.{}".format(k), v))
else:
new_params.append((k, v))
return OrderedDict(new_params)
| 6,361 | 2,261 |
import pyglet
from pyglet.gl import *
from robocute.node import *
from robocute.vu import *
from robocute.shape import Rect
class WidgetVu(Vu):
def __init__(self, node):
super().__init__(node)
#
self.content = Rect()
#
self.margin_top = 5
self.margin_bottom = 5
self.margin_left = 5
self.margin_right = 5
#
self.hspace = 5
self.vspace = 5
#
self.skin = None
def validate(self):
super().validate()
if not self.skin:
return
#else
self.skin.validate()
#
self.margin_left = self.skin.margin_left
self.margin_right = self.skin.margin_right
self.width = self.content.width + self.margin_left + self.margin_right
#
self.margin_top = self.skin.margin_top
self.margin_bottom = self.skin.margin_bottom
self.height = self.content.height + self.margin_bottom + self.margin_top
def draw(self, graphics):
super().draw(graphics)
if not self.skin:
return
#else
g = graphics.copy()
g.width = self.width
g.height = self.height
self.skin.draw(g)
class Widget(Node):
def __init__(self, items = None):
self.items = items
def add_item(self, item):
self.items.append(item)
def remove_item(self, item):
self.items.remove(item) | 1,541 | 502 |