text stringlengths 957 885k |
|---|
<reponame>dennlinger/hypergraph-document-store<filename>old_eval/createPlotsFromRuntime.py
"""
Taken from the runtime evaluation, compare the results for dyadic queries and their hypergraph counterparts.
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def compareFrequencyImpact(data, containerName):
"""
Plot the different entities against one another, to see how frequency impacts performance.
:param data: (np.array) Loaded runtime evaluation results.
:param containerName: (str) Important for file output.
:return: None.
"""
relevantData = data[:, :21]
yTrump = relevantData[:, :7]
yJohnson = relevantData[:, 7:14]
yDate = relevantData[:, 14:]
xticks = ['co-oc exp ent', 'co-oc exp term', 'co-oc imp term', 'co-oc imp ent',
'by week toc', 'by week exp term', 'by week exp ent']
fig, ax = plt.subplots(figsize=(12,5))
xPos = list(range(7))
width = 0.25
plotSubset(yTrump, xPos, width, "<NAME>", 0)
plotSubset(yJohnson, xPos, width, "<NAME>", 1)
plotSubset(yDate, xPos, width, "2016-07-19", 2)
ax.set_xticks([pos + 1 * width for pos in xPos])
ax.set_xticklabels(xticks)
plt.title("Query execution time for entities of different frequency")
plt.ylabel("Query execution time in ms")
plt.legend()
plt.savefig("../data/queryRuntime{}.png".format(containerName))
plt.show()
def compareHypergraphDyadic(data, containerName):
"""
Compare the runtime of our subset of queries for dyadic vs hypergraphs.
:param data: (np.array) Runtime evaluation results.
:param containerName: (np.array) Important for file.
:return: None
"""
# see https://gitlab.com/dennis.aumiller/hyppograph/issues/60 for indices
yHyper = data[:, [0, 6, 7, 13, 14, 20, 22, 25]]
yDyadic = data[:, 31:]
xticks = ['Trump co-oc', 'Trump by week', 'Johnson co-oc', 'Johnson co-oc',
'07-19 co-oc', '07-19 by week', 'Date range occ', 'Degree count']
fig, ax = plt.subplots(figsize=(12, 5))
xPos = list(range(8))
width = 0.25
plotSubset(yHyper, xPos, width,"Hypergraph", 0)
plotSubset(yDyadic, xPos, width, "Dyadic Reduction", 1)
ax.set_xticks([pos + 0.5 * width for pos in xPos])
ax.set_xticklabels(xticks)
plt.title("Query execution time for hypergraph vs dyadic reduction")
plt.ylabel("Query execution time in ms")
plt.legend()
plt.savefig("../data/hypergraphVsDyadic{}.png".format(containerName))
plt.show()
def compareHypergraphDyadicImplicit(data, containerName):
"""
Compare the runtime of our subset of queries for dyadic vs hypergraphs vs implicit equiv.
Ignore last query due to bad performance overall (screws scale), and because there is so
far no real equivalent.
:param data: (np.array) Runtime evaluation results.
:param containerName: (str) Important for file name.
:return: None
"""
# see https://gitlab.com/dennis.aumiller/hyppograph/issues/60 for indices
yHyper = data[:, [0, 6, 7, 13, 14, 20, 22]]
yDyadic = data[:, 31:38]
yImplicit = data[:, [3, 4, 10, 11, 17, 18, 21]]
xticks = ['Trump co-oc', 'Trump by week', 'Johnson co-oc', 'Johnson co-oc',
'07-19 co-oc', '07-19 by week', 'Date range occ', 'Degree count']
fig, ax = plt.subplots(figsize=(12, 5))
xPos = list(range(7))
width = 0.25
plotSubset(yHyper, xPos, width,"Hypergraph", 0)
plotSubset(yDyadic, xPos, width, "Dyadic Reduction", 1)
plotSubset(yImplicit, xPos, width, "Implicit Equivalent", 2)
ax.set_xticks([pos + 1 * width for pos in xPos])
ax.set_xticklabels(xticks)
plt.title("Query execution time for hypergraph vs dyadic reduction vs implicit graph")
plt.ylabel("Query execution time in ms")
plt.legend()
plt.savefig("../data/hypergraphVsDyadicVsImplicit{}.png".format(containerName))
plt.show()
def compareUnoptimizedOptimized(dataUnopt, dataOpt, containerUnopt, containerOpt):
"""
Compare query runtimes between two different setups. Column dimensions must match.
:param dataUnopt: (np.array)
:param dataOpt: (np.array)
:return: None
"""
yUnopt = data[:, :31]
yOpt = dataOpt[:, :31]
xticks = ["Q"+str(i).zfill(2) for i in range(31)]
fig, ax = plt.subplots(figsize=(20, 5))
xPos = list(range(31))
width = 0.25
plotSubset(yUnopt, xPos, width, "Naive implementation", 0)
plotSubset(yOpt, xPos, width, "Optimized implementation", 1)
ax.set_xticks([pos + 0.5 * width for pos in xPos])
ax.set_xticklabels(xticks)
plt.title("Query execution time for naive implementation vs optimizations")
plt.ylabel("Query execution time in ms")
plt.legend()
plt.savefig("../data/unoptimizedVsOptimized_{}_{}.png".format(containerUnopt, containerOpt))
plt.show()
def compareIndividualUnoptimizedOptimized(dataUnopt, dataOpt):
"""
Plots individual graphs for better visibility.
:param dataUnopt: (np.array)
:param dataOpt: (np.array)
:return: None
"""
pass
def plotSubset(subset, xPos, width,label, i):
mean, median, std, minimum, maximum = analyzeData(subset)
x = [pos + i*width for pos in xPos]
plt.bar(x, mean, width, label=label, yerr=std)
plt.scatter(x, minimum, color='r', marker='_')
plt.scatter(x, maximum, color='r', marker='_')
def analyzeData(subset):
mean = np.mean(subset, axis=0)
median = np.median(subset, axis=0)
std = np.std(subset, axis=0)
min = np.min(subset, axis=0)
max = np.max(subset, axis=0)
return mean, median, std, min, max
if __name__ == "__main__":
containerName = "hyppograph11_btree"
data = pd.read_csv("../data/runtimeAnalysis_{}.txt".format(containerName),
sep=" ", header=None)
data = data.values
print(data.shape)
# for output:
compareFrequencyImpact(data, containerName)
compareHypergraphDyadic(data, containerName)
compareHypergraphDyadicImplicit(data, containerName)
# load other dataset to get information.
optContainerName = "hyppograph11_hash"
dataOpt = pd.read_csv("../data/runtimeAnalysis_{}.txt".format(optContainerName),
sep=" ", header=None)
dataOpt = dataOpt.values
compareFrequencyImpact(dataOpt, optContainerName)
compareHypergraphDyadic(dataOpt, optContainerName)
compareHypergraphDyadicImplicit(dataOpt, optContainerName)
# compare the two
compareUnoptimizedOptimized(data, dataOpt, containerName, optContainerName)
|
"""
"""
import configparser
import os
import pathlib
from tkinter.simpledialog import askstring
import psycopg2
def verif_ha_entidade_selecionada(cfg_ini_file):
for each_ent in cfg_ini_file['Entities']:
if cfg_ini_file['Entities'].getboolean(each_ent):
return True
return False
def valida_prefs_ini():
cfg_ini_file = configparser.ConfigParser()
cfg_ini_file.read('./rsc/cnf/prefs.ini')
adb_file = pathlib.Path(cfg_ini_file['syncdev-Sync']['adb_bin'])
if not adb_file.is_file():
return {
'validacao': False,
'erro-title': 'Erro - Configuração: ADB Executável',
'erro-msg': ''.join([
'O campo "ADB Executável" na',
' janela de configurações é inválido ou não está preenchido.\n',
'Favor reveja as configurações em: Preferências --> Configurações'
])
}
if len(cfg_ini_file['syncdev-Sync']['syncdev_json'].strip()) == 0 \
or not os.path.isdir(cfg_ini_file['syncdev-Sync']['syncdev_json']):
return {
'validacao': False,
'erro-title': 'Erro - Configuração: Diretório syncdev(JSONs)',
'erro-msg': ''.join([
'O campo "Diretório syncdev(JSONs)" na',
' janela de configurações é inválido ou não está preenchido.\n',
'Favor reveja as configurações em: Preferências --> Configurações'
])
}
if len(cfg_ini_file['Entities'].keys()) == 0:
return {
'validacao': False,
'erro-title': 'Erro - Configuração: Entidades',
'erro-msg': ''.join([
'O campo "Entidades" (lista de entidades) na',
' janela de configurações está vazio.\n',
'Favor reveja as configurações em: Preferências --> Configurações'
])
}
else:
entidade_selecionada = verif_ha_entidade_selecionada(cfg_ini_file)
if not entidade_selecionada:
return {
'validacao': False,
'erro-title': 'Erro - Configuração: Entidades',
'erro-msg': ''.join([
'O campo "Entidades" (lista de entidades) na',
' janela de configurações não possui nenhuma entidade selecionada.\n',
'Favor reveja as configurações em: Preferências --> Configurações'
])
}
if len(cfg_ini_file['syncdev-PG']['server'].strip()) == 0:
return {
'validacao': False,
'erro-title': 'Erro - Configuração: DB Host',
'erro-msg': ''.join([
'O campo "Database host" na',
' janela de configurações não está preenchido.\n',
'Favor reveja as configurações em: Preferências --> Configurações'
])
}
if len(cfg_ini_file['syncdev-PG']['pg_user'].strip()) == 0:
return {
'validacao': False,
'erro-title': 'Erro - Configuração: DB User',
'erro-msg': ''.join([
'O campo "Database user" na',
' janela de configurações não está preenchido.\n',
'Favor reveja as configurações em: Preferências --> Configurações'
])
}
if len(cfg_ini_file['syncdev-PG']['pg_port'].strip()) == 0:
return {
'validacao': False,
'erro-title': 'Erro - Configuração: DB Port',
'erro-msg': ''.join([
'O campo "Database port" na',
' janela de configurações não está preenchido.\n',
'Favor reveja as configurações em: Preferências --> Configurações'
])
}
pg_pwd = askstring('Teste de conexão', 'Digite a senha para teste de conexão com o banco:', show='*')
try:
conn_test = psycopg2.connect(
dbname='postgres',
user=cfg_ini_file['syncdev-PG']['pg_user'],
host=cfg_ini_file['syncdev-PG']['server'],
password=<PASSWORD>,
port=cfg_ini_file['syncdev-PG']['pg_port']
)
except psycopg2.OperationalError as e:
return {
'validacao': False,
'erro-title': 'Erro - Teste de conexão',
'erro-msg': ''.join([
'O teste de conexão com o banco de dados falhou.',
' Favor reveja as configurações em: Preferências --> Configurações\n',
' Mensagem de sistema: {0}'.format(e)
])
}
conn_test = None
return {
'validacao': True,
'config': cfg_ini_file
}
|
import Tkinter as tk
import ScrolledText as tkst # a convenience module that ships with Tkinter
from .toolkit.popups import *
from .toolkit.ribbon import *
from .toolkit import theme
from . import icons
from .. import vector, raster
style_layeroptions_info = {"fg": theme.font1["color"],
"font": theme.font1["type"],
"relief": "flat"}
# DEFINE THE TOOL-SPECIFIC DIALOGUE WINDOWS
class LayerOptionsWindow(Window):
def __init__(self, master, **kwargs):
# Make this class a subclass of tk.Menu and add to it
Window.__init__(self, master, **kwargs)
# Make the top ribbon selector
self.ribbon = Ribbon(self)
self.ribbon.pack(side="top", fill="both", expand=True)
def add_info(self, tab, label, value):
row = tk.Frame(tab, bg=tab.cget("bg"))
row.pack(fill="x", anchor="n", pady=5, padx=5)
# place label
header = tk.Label(row, text=label, bg=tab.cget("bg"), **style_layeroptions_info)
header.pack(side="left", anchor="nw", padx=3)
# place actual info text
value = str(value)
info = tk.Entry(row, width=400, disabledbackground="white", justify="right", **style_layeroptions_info)
info.pack(side="right", anchor="ne", padx=3)
info.insert(0, value)
info.config(state="readonly")
return info
class VectorLayerOptionsWindow(LayerOptionsWindow):
def __init__(self, master, layeritem, statusbar, **kwargs):
# Make this class a subclass of tk.Menu and add to it
LayerOptionsWindow.__init__(self, master, **kwargs)
self.layeritem = layeritem
###########
### GENERAL OPTIONS TAB
general = self.ribbon.add_tab("General")
# add pieces of info
self.source = self.add_info(general, "Source file: ", layeritem.renderlayer.data.filepath)
self.proj = self.add_info(general, "Projection: ", self.layeritem.renderlayer.data.crs)
self.bbox = self.add_info(general, "Bounding box: ", layeritem.renderlayer.data.bbox)
self.fields = self.add_info(general, "Attribute fields: ", layeritem.renderlayer.data.fields)
self.rows = self.add_info(general, "Total rows: ", len(layeritem.renderlayer.data))
###########
### SYMBOLOGY TAB
symbols = self.ribbon.add_tab("Symbology")
# With test symbology window on button click
layeritem = self.layeritem
frame = RunToolFrame(symbols)
frame.pack(fill="both", expand=True)
# assign status bar
frame.assign_statusbar(statusbar)
# place option input for data encoding
frame.add_option_input("Fill size", valuetype=eval,
argname="fillsize", default=layeritem.renderlayer.styleoptions.get("fillsize"))
frame.add_option_input("Fill color", valuetype=eval,
argname="fillcolor", default=layeritem.renderlayer.styleoptions.get("fillcolor"))
frame.add_option_input("Outline color", valuetype=eval,
argname="outlinecolor", default=layeritem.renderlayer.styleoptions.get("outlinecolor"))
frame.add_option_input("Outline width", valuetype=eval,
argname="outlinewidth", default=layeritem.renderlayer.styleoptions.get("outlinewidth"))
# when clicking OK, update data options
def change_symbol_options(*args, **kwargs):
"""
Symbolize data.
"""
# update user settings
layeritem.renderlayer.styleoptions.update(kwargs)
def change_symbol_options_complete(result):
# refresh layer and map
for mapcanvas in layeritem.layerspane.layers.connected_maps:
mapcanvas.render_one(layeritem.renderlayer)
mapcanvas.mapview.update_image()
# close window
self.destroy()
frame.set_target_method("Changing symbol options", change_symbol_options)
frame.set_finished_method(change_symbol_options_complete)
###########
# Set starting tab
self.ribbon.switch(tabname="General")
class RasterLayerOptionsWindow(LayerOptionsWindow):
def __init__(self, master, layeritem, statusbar, **kwargs):
# Make this class a subclass of tk.Menu and add to it
LayerOptionsWindow.__init__(self, master, **kwargs)
self.layeritem = layeritem
###########
### GENERAL OPTIONS TAB
general = self.ribbon.add_tab("General")
# add pieces of info
self.source = self.add_info(general, "Source file: ", layeritem.renderlayer.data.filepath)
self.proj = self.add_info(general, "Projection: ", self.layeritem.renderlayer.data.crs)
self.dims = self.add_info(general, "Dimensions: ", "%i, %i"%(self.layeritem.renderlayer.data.width,
self.layeritem.renderlayer.data.height))
self.bands = self.add_info(general, " Raster bands: ", "%i"%len(self.layeritem.renderlayer.data.bands))
self.transform = self.add_info(general, "Transform: ", self.layeritem.renderlayer.data.info)
self.bbox = self.add_info(general, "Bounding box: ", layeritem.renderlayer.data.bbox)
###########
# Set starting tab
self.ribbon.switch(tabname="General")
################
class RightClickMenu_VectorLayer(tk.Menu):
def __init__(self, master, layerspane, layeritem, statusbar, **kwargs):
# Make this class a subclass of tk.Menu and add to it
tk.Menu.__init__(self, master, tearoff=0, **kwargs)
self.layerspane = layerspane
self.layeritem = layeritem
self.statusbar = statusbar
self.imgs = dict()
# Renaming
self.imgs["rename"] = icons.get("rename.png", width=32, height=32)
self.add_command(label="Rename", command=self.layeritem.ask_rename, image=self.imgs["rename"], compound="left")
# Saving
def ask_save():
savepath = asksaveasfilename()
self.statusbar.task.start("Saving layer to file...")
pending = dispatch.request_results(self.layeritem.renderlayer.data.save, args=[savepath])
def finish(result):
if isinstance(result, Exception):
popup_message(self, str(result) + "\n\n" + savepath)
self.statusbar.task.stop()
dispatch.after_completion(self, pending, finish)
self.imgs["save"] = icons.get("save.png", width=32, height=32)
self.add_command(label="Save as", command=ask_save, image=self.imgs["save"], compound="left")
# ---(Breakline)---
self.add_separator()
# Splitting
def open_options_window():
window = VectorSplitOptionWindow(self.layeritem, self.layerspane, self.layeritem, statusbar)
self.imgs["split"] = icons.get("split.png", width=32, height=32)
self.add_command(label="Split to layers", command=open_options_window, image=self.imgs["split"], compound="left")
# ---(Breakline)---
self.add_separator()
# Buffering
def open_options_window():
window = VectorBufferOptionWindow(self.layeritem, self.layerspane, self.layeritem, statusbar)
self.imgs["buffer"] = icons.get("buffer.png", width=32, height=32)
self.add_command(label="Buffer", command=open_options_window, image=self.imgs["buffer"], compound="left")
# Cleaning
def open_options_window():
window = VectorCleanOptionWindow(self.layeritem, self.layerspane, self.layeritem, statusbar)
self.imgs["clean"] = icons.get("clean.png", width=32, height=32)
self.add_command(label="Clean Geometries", command=open_options_window, image=self.imgs["clean"], compound="left")
# ---(Breakline)---
self.add_separator()
# View properties
def view_properties():
window = VectorLayerOptionsWindow(self.layeritem, self.layeritem, statusbar)
self.imgs["properties"] = icons.get("properties.png", width=32, height=32)
self.add_command(label="Properties", command=view_properties, image=self.imgs["properties"], compound="left")
class RightClickMenu_RasterLayer(tk.Menu):
def __init__(self, master, layerspane, layeritem, statusbar, **kwargs):
# Make this class a subclass of tk.Menu and add to it
tk.Menu.__init__(self, master, tearoff=0, **kwargs)
self.layerspane = layerspane
self.layeritem = layeritem
self.statusbar = statusbar
self.imgs = dict()
# Renaming
self.imgs["rename"] = icons.get("rename.png", width=32, height=32)
self.add_command(label="Rename", command=self.layeritem.ask_rename, image=self.imgs["rename"], compound="left")
# Saving
def ask_save():
savepath = asksaveasfilename()
self.statusbar.task.start("Saving layer to file...")
pending = dispatch.request_results(self.layeritem.renderlayer.data.save, args=[savepath])
def finish(result):
if isinstance(result, Exception):
popup_message(self, str(result) + "\n\n" + savepath)
self.statusbar.task.stop()
dispatch.after_completion(self, pending, finish)
self.imgs["save"] = icons.get("save.png", width=32, height=32)
self.add_command(label="Save as", command=ask_save, image=self.imgs["save"], compound="left")
# ---(Breakline)---
self.add_separator()
# Resampling
def open_options_window():
window = RasterResampleOptionWindow(self.layeritem, self.layerspane, self.layeritem, statusbar)
self.imgs["resample"] = icons.get("resample.png", width=32, height=32)
self.add_command(label="Resample", command=open_options_window, image=self.imgs["resample"], compound="left")
# ---(Breakline)---
self.add_separator()
# View properties
def view_properties():
window = RasterLayerOptionsWindow(self.layeritem, self.layeritem, statusbar)
self.imgs["properties"] = icons.get("properties.png", width=32, height=32)
self.add_command(label="Properties", command=view_properties, image=self.imgs["properties"], compound="left")
#################
class VectorCleanOptionWindow(Window):
def __init__(self, master, layerspane, layeritem, statusbar, **kwargs):
# Make this class a subclass and add to it
Window.__init__(self, master, **kwargs)
# Create runtoolframe
self.runtool = RunToolFrame(self)
self.runtool.pack(fill="both", expand=True)
self.runtool.assign_statusbar(statusbar)
# Add a hidden option from its associated layeritem data
self.runtool.add_hidden_option(argname="data", value=layeritem.renderlayer.data)
# Set the remaining options
self.runtool.set_target_method("Cleaning data...", vector.manager.clean)
self.runtool.add_option_input(argname="tolerance", label="Tolerance (in distance units)",
valuetype=float, default=0.0, minval=0.0, maxval=1.0)
# Define how to process
newname = layeritem.namelabel["text"] + "_cleaned"
def process(result):
if isinstance(result, Exception):
popup_message(self, "Failed to clean the data:" + "\n\n" + str(result) )
else:
layerspane.add_layer(result, name=newname)
self.destroy()
self.runtool.set_finished_method(process)
class VectorSplitOptionWindow(Window):
def __init__(self, master, layerspane, layeritem, statusbar, **kwargs):
# Make this class a subclass and add to it
Window.__init__(self, master, **kwargs)
# Create runtoolframe
self.runtool = RunToolFrame(self)
self.runtool.pack(fill="both", expand=True)
self.runtool.assign_statusbar(statusbar)
# Add a hidden option from its associated layeritem data
self.runtool.add_hidden_option(argname="data", value=layeritem.renderlayer.data)
# Set the remaining options
self.runtool.set_target_method("Splitting data...", vector.manager.split)
self.runtool.add_option_input(argname="splitfields",
label="Split by fields",
multi=True, choices=layeritem.renderlayer.data.fields,
valuetype=str)
# Define how to process
def process(result):
if isinstance(result, Exception):
popup_message(self, "Failed to split the data:" + "\n\n" + str(result) )
else:
for splitdata in result:
layerspane.add_layer(splitdata)
self.update()
self.destroy()
self.runtool.set_finished_method(process)
class VectorBufferOptionWindow(Window):
def __init__(self, master, layerspane, layeritem, statusbar, **kwargs):
# Make this class a subclass and add to it
Window.__init__(self, master, **kwargs)
# Create runtoolframe
self.runtool = RunToolFrame(self)
self.runtool.pack(fill="both", expand=True)
self.runtool.assign_statusbar(statusbar)
# Add a hidden option from its associated layeritem data
self.runtool.add_hidden_option(argname="data", value=layeritem.renderlayer.data)
# Set the remaining options
self.runtool.set_target_method("Buffering data...", vector.analyzer.buffer)
self.runtool.add_option_input(argname="dist_expression",
label="Distance calculation",
valuetype=str)
# Define how to process
def process(result):
if isinstance(result, Exception):
popup_message(self, "Failed to buffer the data:" + "\n\n" + str(result) )
else:
layerspane.add_layer(result)
self.destroy()
self.runtool.set_finished_method(process)
class RasterResampleOptionWindow(Window):
def __init__(self, master, layerspane, layeritem, statusbar, **kwargs):
# Make this class a subclass and add to it
Window.__init__(self, master, **kwargs)
# Create runtoolframe
self.runtool = RunToolFrame(self)
self.runtool.pack(fill="both", expand=True)
self.runtool.assign_statusbar(statusbar)
# Add a hidden option from its associated layeritem data
self.runtool.add_hidden_option(argname="raster", value=layeritem.renderlayer.data)
# Set the remaining options
self.runtool.set_target_method("Resampling data...", raster.manager.resample)
def get_data_from_layername(name):
data = None
for layeritem in layerspane:
if layeritem.name_label["text"] == name:
data = layeritem.renderlayer.data
break
return data
self.runtool.add_option_input(argname="width", label="Raster width (in cells)",
valuetype=int)
self.runtool.add_option_input(argname="height", label="Raster height (in cells)",
valuetype=int)
self.runtool.add_option_input(argname="cellwidth", label="Cell width (in distance units)",
valuetype=float)
self.runtool.add_option_input(argname="cellheight", label="Cell height (in distance units)",
valuetype=float)
# Define how to process after finished
def process(result):
if isinstance(result, Exception):
popup_message(self, "Failed to resample the data:" + "\n\n" + str(result) )
else:
layerspane.add_layer(result)
self.destroy()
self.runtool.set_finished_method(process)
##############
# Multi Input
class VectorMergeOptionWindow(Window):
def __init__(self, master, layerspane, statusbar, **kwargs):
# Make this class a subclass and add to it
Window.__init__(self, master, **kwargs)
# Create runtoolframe
self.runtool = RunToolFrame(self)
self.runtool.pack(fill="both", expand=True)
self.runtool.assign_statusbar(statusbar)
# Set the remaining options
self.runtool.set_target_method("Merging data...", vector.manager.merge)
def get_data_from_layername(name):
data = None
for layeritem in layerspane:
if layeritem.namelabel["text"] == name:
data = layeritem.renderlayer.data
break
return data
self.runtool.add_option_input(argname=None,
label="Layers to be merged",
multi=True,
choices=[layeritem.namelabel["text"] for layeritem in layerspane],
valuetype=get_data_from_layername)
# Define how to process
def process(result):
if isinstance(result, Exception):
popup_message(self, "Failed to merge the data:" + "\n\n" + str(result) )
else:
layerspane.add_layer(result, name="merged")
self.runtool.set_finished_method(process)
class VectorOverlapSummaryWindow(Window):
def __init__(self, master, layerspane, statusbar, **kwargs):
# Make this class a subclass and add to it
Window.__init__(self, master, **kwargs)
# Create runtoolframe
self.runtool = RunToolFrame(self)
self.runtool.pack(fill="both", expand=True)
self.runtool.assign_statusbar(statusbar)
# Set the remaining options
self.runtool.set_target_method("Calculating overlap summary on data...", vector.analyzer.overlap_summary)
def get_data_from_layername(name):
data = None
for layeritem in layerspane:
if layeritem.namelabel["text"] == name:
data = layeritem.renderlayer.data
break
return data
self.runtool.add_option_input(argname="groupbydata",
label="Group by data",
default="(Choose layer)",
choices=[layeritem.namelabel["text"] for layeritem in layerspane],
valuetype=get_data_from_layername)
self.runtool.add_option_input(argname="valuedata",
label="Value data",
default="(Choose layer)",
choices=[layeritem.namelabel["text"] for layeritem in layerspane],
valuetype=get_data_from_layername)
self.runtool.add_option_input(argname="fieldmapping",
label="Field mapping",
multi=True,
valuetype=eval)
# Define how to process
def process(result):
if isinstance(result, Exception):
popup_message(self, "Failed to calculate overlap summary on data:" + "\n\n" + str(result) )
else:
layerspane.add_layer(result, name="overlap summary")
self.runtool.set_finished_method(process)
class RasterMosaicOptionWindow(Window):
def __init__(self, master, layerspane, statusbar, **kwargs):
# Make this class a subclass and add to it
Window.__init__(self, master, **kwargs)
# Create runtoolframe
self.runtool = RunToolFrame(self)
self.runtool.pack(fill="both", expand=True)
self.runtool.assign_statusbar(statusbar)
# Set the remaining options
self.runtool.set_target_method("Mosaicking data...", raster.manager.mosaic)
def get_data_from_layername(name):
data = None
for layeritem in layerspane:
if layeritem.namelabel["text"] == name:
data = layeritem.renderlayer.data
break
return data
self.runtool.add_option_input(argname=None,
label="Layers to be mosaicked",
multi=True,
choices=[layeritem.namelabel["text"] for layeritem in layerspane],
valuetype=get_data_from_layername)
# Define how to process
def process(result):
if isinstance(result, Exception):
popup_message(self, "Failed to mosaick the data:" + "\n\n" + str(result) )
else:
layerspane.add_layer(result, name="mosaicked")
self.runtool.set_finished_method(process)
class RasterZonalStatsOptionWindow(Window):
def __init__(self, master, layerspane, statusbar, **kwargs):
# Make this class a subclass and add to it
Window.__init__(self, master, **kwargs)
# Create runtoolframe
self.runtool = RunToolFrame(self)
self.runtool.pack(fill="both", expand=True)
self.runtool.assign_statusbar(statusbar)
# Set the remaining options
self.runtool.set_target_method("Calculating zonal statistics on data...", raster.analyzer.zonal_statistics)
def get_data_from_layername(name):
data = None
for layeritem in layerspane:
if layeritem.namelabel["text"] == name:
data = layeritem.renderlayer.data
break
return data
self.runtool.add_option_input(argname="zonaldata",
label="Zonal data",
default="(Choose layer)",
choices=[layeritem.namelabel["text"] for layeritem in layerspane],
valuetype=get_data_from_layername)
self.runtool.add_option_input(argname="valuedata",
label="Value data",
default="(Choose layer)",
choices=[layeritem.namelabel["text"] for layeritem in layerspane],
valuetype=get_data_from_layername)
self.runtool.add_option_input(argname="zonalband",
label="Zonal band",
valuetype=int,
default=0)
self.runtool.add_option_input(argname="valueband",
label="Value band",
valuetype=int,
default=0)
self.runtool.add_option_input(argname="outstat",
label="Output Raster Statistic",
valuetype=str,
default="mean",
choices=["min","max","count","sum","mean","median","var","stddev"])
# Define how to process
def process(result):
if isinstance(result, Exception):
popup_message(self, "Failed to calculate zonal statistics on the data:" + "\n\n" + str(result) )
else:
zonesdict, outraster = result
# add the resulting zonestatistics layer
layerspane.add_layer(outraster, name="zonal statistic")
# also view stats in window
win = Window()
textbox = tkst.ScrolledText(win)
textbox.pack(fill="both", expand=True)
textbox.insert(tk.END, "Zonal statistics detailed result:")
textbox.insert(tk.END, "\n---------------------------------\n")
for zone,stats in zonesdict.items():
statstext = "\n"+"Zone %i:"%zone
statstext += "\n\t" + "\n\t".join(["%s: %f"%(key,val) for key,val in stats.items()])
textbox.insert(tk.END, statstext)
self.runtool.set_finished_method(process)
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import time
import mongoengine
from st2common.models import db
from st2common.models.db import stormbase
from st2common.persistence.base import Access
from st2common.exceptions.db import StackStormDBObjectNotFoundError
__all__ = [
'BaseDBModelCRUDTestCase',
'FakeModel',
'FakeModelDB',
'ChangeRevFakeModel',
'ChangeRevFakeModelDB'
]
class BaseDBModelCRUDTestCase(object):
model_class = None
persistance_class = None
model_class_kwargs = {}
update_attribute_name = None
skip_check_attribute_names = []
def test_crud_operations(self):
# 1. Test create
model_db = self.model_class(**self.model_class_kwargs)
saved_db = self.persistance_class.add_or_update(model_db)
retrieved_db = self.persistance_class.get_by_id(saved_db.id)
self.assertEqual(saved_db.id, retrieved_db.id)
for attribute_name, attribute_value in self.model_class_kwargs.items():
if attribute_name in self.skip_check_attribute_names:
continue
self.assertEqual(getattr(saved_db, attribute_name), attribute_value)
self.assertEqual(getattr(retrieved_db, attribute_name), attribute_value)
# 2. Test update
updated_attribute_value = 'updated-%s' % (str(time.time()))
setattr(model_db, self.update_attribute_name, updated_attribute_value)
saved_db = self.persistance_class.add_or_update(model_db)
self.assertEqual(getattr(saved_db, self.update_attribute_name), updated_attribute_value)
retrieved_db = self.persistance_class.get_by_id(saved_db.id)
self.assertEqual(saved_db.id, retrieved_db.id)
self.assertEqual(getattr(retrieved_db, self.update_attribute_name), updated_attribute_value)
# 3. Test delete
self.persistance_class.delete(model_db)
self.assertRaises(StackStormDBObjectNotFoundError, self.persistance_class.get_by_id,
model_db.id)
class FakeModelDB(stormbase.StormBaseDB):
context = stormbase.EscapedDictField()
index = mongoengine.IntField(min_value=0)
category = mongoengine.StringField()
timestamp = mongoengine.DateTimeField()
meta = {
'indexes': [
{'fields': ['index']},
{'fields': ['category']},
{'fields': ['timestamp']},
{'fields': ['context.user']},
]
}
class FakeModel(Access):
impl = db.MongoDBAccess(FakeModelDB)
@classmethod
def _get_impl(cls):
return cls.impl
@classmethod
def _get_by_object(cls, object):
return None
@classmethod
def _get_publisher(cls):
return None
class ChangeRevFakeModelDB(stormbase.StormBaseDB, stormbase.ChangeRevisionFieldMixin):
context = stormbase.EscapedDictField()
class ChangeRevFakeModel(Access):
impl = db.ChangeRevisionMongoDBAccess(ChangeRevFakeModelDB)
@classmethod
def _get_impl(cls):
return cls.impl
@classmethod
def _get_by_object(cls, object):
return None
@classmethod
def _get_publisher(cls):
return None
|
import os
import zipfile as zp
import pandas as pd
import numpy as np
import core
import requests
class Labels:
init_cols = [
'station_id', 'station_name', 'riv_or_lake', 'hydroy', 'hydrom', 'day',
'lvl', 'flow', 'temp', 'month']
trans_cols = [
'date', 'year', 'month', 'day', 'hydroy', 'hydrom', 'station_id', 'station_name',
'riv_or_lake', 'riv_or_lake_id', 'lvl', 'flow', 'temp']
def transform(trans_df):
trans_df = trans_df.reset_index().drop('index', axis=1)
dfc = trans_df.copy()
lstrip = 'AĄBCĆDEĘFGHIJKLŁMNŃOÓPQRSŚTUVWXYZŹŻaąbcćdeęfghijklłmnńoópqrsśtuvwxyzźż( '
rivlakeid = dfc['riv_or_lake'].map(lambda x: x.lstrip(lstrip).rstrip(')'))
trans_df['riv_or_lake'] = trans_df['riv_or_lake'].map(lambda x: x.rstrip(' ()1234567890 '))
trans_df['riv_or_lake_id'] = rivlakeid
trans_df['month'] = trans_df['month'].fillna(method='ffill').astype(int)
trans_df['day'] = trans_df['day'].fillna(method='ffill').astype(int)
trans_df['year'] = trans_df['hydroy']
trans_df.loc[(trans_df['month'] == 11) | (trans_df['month'] == 12), 'year'] = trans_df['year'].astype(int) - 1
trans_df['date'] = pd.to_datetime(trans_df[['year', 'month', 'day']])
trans_df = trans_df[Labels.trans_cols]
trans_df.loc[trans_df['lvl'] == 9999, 'lvl'] = np.nan
trans_df.loc[trans_df['flow'] == 99999.999, 'flow'] = np.nan
trans_df.loc[trans_df['temp'] == 99.9, 'temp'] = np.nan
return trans_df
def getframe(year: int, month: int, stationid=None, station=None):
core.makedir(dirname='temp')
zipname = f'codz_{year}_{core.strnumb(month)}.zip'
csvname = f'codz_{year}_{core.strnumb(month)}.csv'
url = f'https://danepubliczne.imgw.pl/data/dane_pomiarowo_obserwacyjne/dane_hydrologiczne/dobowe/{year}/{zipname}'
r = requests.get(url)
with open(f'temp/{zipname}', 'wb') as file:
file.write(r.content)
with zp.ZipFile(f'temp/{zipname}', 'r') as zip_ref:
zip_ref.extractall(path='temp')
df = pd.read_csv(f'temp/{csvname}', encoding='windows-1250', header=None)
df.columns = Labels.init_cols
if stationid is not None:
df = df.loc[df['station_id'] == int(stationid)]
elif station is not None:
df = df.loc[df['station_name'] == station]
os.remove(f'temp/{zipname}')
os.remove(f'temp/{csvname}')
return df
def getyear(year: int, stationid=None, station=None, save=False):
err(stationid, station)
if not isinstance(year, int):
raise Exception('year argument must be an integer')
elif year not in range(1951, 2021):
raise Exception('year argument not in available range (1951, 2020)')
else:
year_df = pd.DataFrame([], columns=Labels.init_cols)
for month in range(1, 12+1):
df = getframe(year, month, stationid, station)
year_df = pd.concat([year_df, df], ignore_index=True)
year_df = transform(year_df)
if save:
core.makedir('Saved')
if stationid is not None:
year_df.to_csv(f'Saved/hydro_daily_{year}_{stationid}.csv', index=False, encoding='utf-8')
elif station is not None:
year_df.to_csv(f'Saved/hydro_daily_{year}_{station}.csv', index=False, encoding='utf-8')
elif stationid is None or station is None:
year_df.to_csv(f'Saved/hydro_daily_{year}_all.csv', index=False, encoding='utf-8')
return year_df.reset_index().drop('index', axis=1)
def getrange(first_year: int, last_year: int, stationid=None, station=None, save=False):
err(stationid, station)
if not isinstance(first_year, int) or not isinstance(last_year, int):
raise Exception('first_year and last_year arguments must be integers')
elif first_year not in range(1951, 2021) or last_year not in range(1951, 2021):
raise Exception('year argument out of available range (1951-2020)')
else:
range_df = pd.DataFrame([], columns=Labels.trans_cols)
for year in range(first_year, last_year + 1):
for month in range(1, 12+1):
df = getframe(year, month, stationid, station)
range_df = pd.concat([range_df, df], ignore_index=True)
range_df = transform(range_df)
if save:
core.makedir('Saved')
if stationid is not None:
range_df.to_csv(f'Saved/hydro_daily_range_{first_year}-{last_year}_{stationid}.csv', index=False, encoding='utf-8')
elif station is not None:
range_df.to_csv(f'Saved/hydro_daily_range_{first_year}-{last_year}_{station}.csv', index=False, encoding='utf-8')
elif stationid is None or station is None:
range_df.to_csv(f'Saved/hydro_daily_range_{first_year}-{last_year}_all.csv', index=False, encoding='utf-8')
return range_df.reset_index().drop('index', axis=1)
def getmonth(year: int, month: int, stationid=None, station=None, save=False):
err(stationid, station)
if not isinstance(year, int) or not isinstance(month, int):
raise Exception('year and month arguments must be integers')
elif month not in range(1, 13):
raise Exception('month argument not in range (1-12)')
elif year not in range(1951, 2021):
raise Exception('year argument not in available range (1951-2020)')
else:
month_df = getframe(year, month, stationid, station)
if month_df.empty:
raise Exception('there is no station with chosen name or id ')
else:
month_df.columns = Labels.init_cols
month_df = transform(month_df)
if save:
core.makedir('Saved')
if stationid is not None:
month_df.to_csv(f'Saved/hydro_daily_{year}_{core.strnumb(month)}_{stationid}.csv', index=False, encoding='utf-8')
elif station is not None:
month_df.to_csv(f'Saved/hydro_daily_{year}_{core.strnumb(month)}_{station}.csv', index=False, encoding='utf-8')
elif stationid is None or station is None:
month_df.to_csv(f'Saved/hydro_daily_{year}_{core.strnumb(month)}_all.csv', index=False, encoding='utf-8')
return month_df
def err(stationid, station):
if not isinstance(stationid, int) and stationid is not None:
raise Exception('stationid argument must be an integer')
elif not isinstance(station, str) and station is not None:
raise Exception('station argument must be a string')
def metadata(stationid: int, data: str) -> list:
if stationid is None:
raise Exception('missing stationid argument')
elif not isinstance(stationid, int) and stationid is not None:
raise Exception('stationid argument must be an integer')
meta = pd.read_csv('metadata/hydro_stations.csv', encoding='utf-8')
if meta.loc[meta['id'] == stationid].empty:
raise Exception('station with chosen id does not exist')
if data == 'coords':
xcoord = meta.loc[meta['id'] == stationid]['X'].unique()[0]
ycoord = meta.loc[meta['id'] == stationid]['Y'].unique()[0]
return [xcoord, ycoord]
elif data == 'riv_or_lake':
rivlake = meta.loc[meta['id'] == stationid]['riv_or_lake'].unique()[0]
rivlakeid = meta.loc[meta['id'] == stationid]['riv_or_lake_id'].unique()[0]
return [rivlake, rivlakeid]
elif data == 'station_name':
station_name = meta.loc[meta['id'] == stationid]['name'].unique()[0]
return [station_name]
else:
raise Exception('unknown data argument')
def stations(year: int, month=None) -> list:
if not isinstance(year, int):
raise Exception('year argument must be an integer')
elif not isinstance(month, int) and month is not None:
raise Exception('month argument must be an integer')
elif month not in range(1, 13) and month is not None:
raise Exception('month argument not in range (1-12)')
elif month is not None:
stations_names = getmonth(year, month)['station_name'].sort_values()
stations_ids = getmonth(year, month)['station_id'].sort_values()
else:
stations_names = getyear(year)['station_name'].sort_values()
stations_ids = getyear(year)['station_id'].sort_values()
stations_list = list()
for x, y in zip(stations_names, stations_ids):
stations_list.append(y)
return list(set(stations_list))
|
import os
import time
import visdom
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
from experiment_interface.hooks import Hook
from experiment_interface.logger import get_train_logger
from experiment_interface.plot_utils import plot_trainval_loss, plot_val_lossacc
from experiment_interface.common import DebugMode
# import matplotlib
# import matplotlib.pyplot as plt
VISDOM_PORT = 8097
RECORD_FILE = 'train_record.csv'
sns.set_style('ticks', {'axes.grid': True})
sns.set_context('talk')
class VisdomRunner(Hook):
def __init__(self, refresh_interval=10., is_master=False, env='main', **other_kwargs):
self.refresh_interval = refresh_interval
self.is_master = is_master
self.env = env
self.win = None
self.init(**other_kwargs)
def init(self, **kwargs):
pass
def set_refresh_interval(self, interval):
self.refresh_interval = interval
def _refresh(self, context):
plt.figure(self.fignumber)
plt.clf()
ax = plt.gca()
self.refresh(context, ax)
self.win = self.viz.matplot(plt, win=self.win, env=self.env)
def refresh(self, context, ax):
raise NotImplementedError('\'refresh\' method must be implemented.')
def before_loop(self, context):
if context.debug_mode in (DebugMode.DEBUG, DebugMode.DEV):
self.refresh_interval = 2.
logger = get_train_logger()
if self.is_master:
# set up visdom.
cmd = 'tmux kill-session -t visdom_server'
logger.info(cmd)
os.system(cmd)
time.sleep(.1)
cmd = 'tmux new-session -d -s "visdom_server"'
logger.info(cmd)
os.system(cmd)
time.sleep(.1)
cmd = 'tmux send-keys -t visdom_server ". activate && python -m visdom.server" Enter'
logger.info(cmd)
os.system(cmd)
time.sleep(1.)
start_trying_connecting = time.time()
viz = visdom.Visdom(port=VISDOM_PORT, server="http://localhost")
connected = viz.check_connection()
while not connected:
time.sleep(2.)
viz.close()
logger.info('Trying connecting to Visdom server.')
viz = visdom.Visdom(port=VISDOM_PORT, server="http://localhost")
connected = viz.check_connection()
if time.time() - start_trying_connecting > 20.:
break;
if connected:
logger.info('Visdom client connected.')
else:
raise RuntimeError('Connecting to Visdom server failed.')
self.viz = viz
self.last_refreshed = time.time()
# plt.figure()
fig = plt.figure()
self.fignumber = fig.number
logger.info('VisdomRunner: fignumber=%d' % self.fignumber)
def after_step(self, context):
if time.time() - self.last_refreshed > self.refresh_interval:
logger = get_train_logger()
logger.info('refreshing visdom runner.')
self._refresh(context)
self.last_refreshed = time.time()
def after_loop(self, context):
logger = get_train_logger()
logger.info('refreshing visdom runner.')
self._refresh(context)
self.last_refreshed = time.time()
class TrainValLossViz(VisdomRunner):
def refresh(self, context, ax):
train_record_file = context.trainer.train_record_file
df = pd.read_csv(train_record_file, index_col=0)
if len(df) == 0:
return None
plot_trainval_loss(df, ax)
class ValLossAccViz(VisdomRunner):
def refresh(self, context, ax):
train_record_file = context.trainer.train_record_file
df = pd.read_csv(train_record_file, index_col=0)
if len(df) == 0:
return None
plot_val_lossacc(df, ax)
|
<gh_stars>0
import csv
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import maxwell as m
omega = []
storG = []
lossG = []
dFactor = []
cVisc = []
with open('1skupina.csv', 'rb') as csvfile:
for i in range(0,5):
next(csvfile)
podatki = csv.reader(csvfile)
for row in podatki:
omega.append(float(row[1]))
storG.append(float(row[2]))
lossG.append(float(row[3]))
dFactor.append(float(row[4]))
cVisc.append(float(row[5]))
omega = np.array(omega)
storG = np.array(storG)
lossG = np.array(lossG)
dFactor = np.array(dFactor)
cVisc = np.array(cVisc)
# Maxwell 1
stor1, scov1 = curve_fit(m.sG1fit,omega,storG)
print "stor1", stor1
loss1, lcov1 = curve_fit(m.lG1fit,omega,lossG)
print "loss1", loss1
# Maxwell 2
stor2, scov2 = curve_fit(m.sG2fit,omega,storG)
print "stor2", stor2
loss2, lcov2 = curve_fit(m.lG2fit,omega,lossG)
print "loss2", loss2
# Maxwell 3
#~ stor3, scov3 = curve_fit(m.sG3fit,omega,storG)
#~ print stor3
#~ loss3, lcov3 = curve_fit(m.lG3fit,omega,lossG)
#~ print loss3
# Maxwell 4
stor4, scov4 = curve_fit(m.sG4fit,omega,storG)
print "stor4", stor4
loss4, lcov4 = curve_fit(m.lG4fit,omega,lossG)
print "loss4", loss4
# Maxwell 5
stor5, scov5 = curve_fit(m.sG5fit,omega,storG)
print "stor5", stor5
loss5, lcov5 = curve_fit(m.lG5fit,omega,lossG)
print "loss5", loss5
# Maxwell 6
#~ stor6, scov6 = curve_fit(m.sG6fit,omega,storG)
#~ print stor6
#~ loss6, lcov6 = curve_fit(m.lG6fit,omega,lossG)
#~ print loss6
# Maxwell 7
#~ stor7, scov7 = curve_fit(m.sG7fit,omega,storG)
#~ print stor7
#~ loss7, lcov7 = curve_fit(m.lG7fit,omega,lossG)
#~ print loss7
# Maxwell 8
#~ stor8, scov8 = curve_fit(m.sG8fit,omega,storG)
#~ print stor8
#~ loss8, lcov8 = curve_fit(m.lG8fit,omega,lossG)
#~ print loss8
x = np.logspace(-1,3,100)
plt.plot(omega,storG,'o',
omega,lossG,'o',
x,m.sG1eval(x,stor1),x,m.lG1eval(x,loss1),
x,m.sG2eval(x,stor2),x,m.lG2eval(x,loss2),
#~ x,m.sG3eval(x,stor3),x,m.lG3eval(x,loss3),
x,m.sG4eval(x,stor4),x,m.lG4eval(x,loss4),
x,m.sG5eval(x,stor5),x,m.lG5eval(x,loss5))
#~ x,m.sG6eval(x,stor6),x,m.lG6eval(x,loss6),
#~ x,m.sG7eval(x,stor7),x,m.lG7eval(x,loss7),
#~ x,m.sG8eval(x,stor8),x,m.lG8eval(x,loss8))
plt.title('Frequency test')
plt.yscale('log')
plt.xscale('log')
plt.ylabel("G',G''")
plt.xlabel('omega')
plt.show()
|
from django.views.generic.base import View
from django.views.generic.edit import ModelFormMixin, ProcessFormView
from django.views.generic.list import (MultipleObjectMixin,
MultipleObjectTemplateResponseMixin)
from django.http.response import Http404
class CreateFormBaseView(ModelFormMixin, MultipleObjectMixin, ProcessFormView,
MultipleObjectTemplateResponseMixin, View):
object = None
def list(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty:
# When pagination is enabled and object_list is a queryset,
# it's better to do a cheap query than to load the unpaginated
# queryset in memory.
if (self.get_paginate_by(self.object_list) is not None
and hasattr(self.object_list, 'exists')):
is_empty = not self.object_list.exists()
else:
is_empty = len(self.object_list) == 0
if is_empty:
raise Http404(_("Empty list and '%(class_name)s.allow_empty is False.")
% {'class_name': self.__class__.__name__})
def get(self, *args, **kwargs):
self.list(*args, **kwargs)
context = self.get_context_data()
return self.render_to_response(context)
def get_context_data(self, **kwargs):
if 'form' not in kwargs:
form_class = self.get_form_class()
form = self.get_form(form_class)
kwargs['form'] = form
self.list(self.request, *self.args, **self.kwargs)
return super(CreateFormBaseView, self).get_context_data(**kwargs)
class PermissionMixin(object):
"""
Adds a certain decorator to a specific HTTP method.
"""
decorators = {}
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(),
self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
decorators = self.decorators.get(request.method, [])
try:
for decorator in list(decorators):
handler = decorator(handler)
except TypeError:
handler = decorators(handler)
return handler(request, *args, **kwargs)
class AjaxResponsePermissionMixin(object):
"""
Mixin allows you to define alternative methods for ajax requests. And
adds a certain decorator to a specific HTTP method.
"""
decorators = {}
def dispatch(self, request, *args, **kwargs):
if request.is_ajax() and request.method.lower() in self.http_method_names:
handler = getattr(self, u"{0}_ajax".format(request.method.lower()),
self.http_method_not_allowed)
self.request = request
self.args = args
self.kwargs = kwargs
decorators = self.decorators.get(request.method, [])
try:
for decorator in list(decorators):
handler = decorator(handler)
except TypeError:
handler = decorators(handler)
return handler(request, *args, **kwargs)
def get_ajax(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post_ajax(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
def put_ajax(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete_ajax(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
|
<filename>pyLib/analysisTools.py<gh_stars>1-10
import numpy as np
import sys
try:
import scipy.stats as st # contains st.entropy
except:
pass
'''
Description:
Author: <NAME>
<EMAIL>
University of Helsinki &
Finnish Meteorological Institute
'''
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
#==========================================================#
def sensibleIds( ijk, x, y, z ):
'''
Check whether the chosen i,j,k indices make sense for given x, y, z coords.
'''
ijk[0] = np.minimum( ijk[0] , len(x)-1 ); ijk[0] = np.maximum( ijk[0], 0 )
ijk[1] = np.minimum( ijk[1] , len(y)-1 ); ijk[1] = np.maximum( ijk[1], 0 )
ijk[2] = np.minimum( ijk[2] , len(z)-1 ); ijk[2] = np.maximum( ijk[2], 0 )
return ijk
#==========================================================#
def groundOffset( vx ):
'''
Find ground offset (in z-direction) for given velocity array vx(t, z, y, x)
'''
k_offset = 0
while 1:
idNz = (vx[:,k_offset,1,1] > 0.)
if( any( idNz ) ):
break
else:
k_offset += 1
return k_offset
#==========================================================#
def filterTimeSeries( ds, sigma=1 ):
import scipy.ndimage as sn # contains the filters
df = sn.gaussian_filter( ds , sigma=sigma )
dfp = df - np.mean( df )
dn = ds - dfp
return dn
#==========================================================#
def quadrantAnalysis( v1, v2, qDict ):
from utilities import dataFromDict
debug = False
# Extract data from dict. Using dict makes future modifications easy.
ijk1 = dataFromDict('ijk1', qDict, allowNone=False )
ijk2 = dataFromDict('ijk2', qDict, False )
nkpoints = dataFromDict('nkpoints', qDict, True )
npx = dataFromDict('npixels', qDict, False )
axisLim = dataFromDict('axisLim', qDict, False )
holewidth= dataFromDict('holewidth', qDict, False )
weighted = dataFromDict('weighted', qDict, True )
# Create arrays for running loops over the selected coordinates.
iList = np.arange(ijk1[0],ijk2[0]+1)
jList = np.arange(ijk1[1],ijk2[1]+1)
kList = np.arange(ijk1[2],ijk2[2]+1)
'''
In quadrant analysis context, using a stride in z-direction is usually not desired.
By default npoints is None.'''
if( nkpoints is None): stride = 1
else: stride = max( ((kList[-1]-kList[0])/nkpoints)+1 , 2 )
# Compute the covariance term (for example, u'w')
v = v1*v2
if( debug ):
print('min(v1)={}, max(v1)={}, min(v2)={}, max(v2)={}'\
.format(np.abs(np.min(v1)), np.max(v1), np.abs(np.min(v2)), np.max(v2)))
maxLim = np.abs(axisLim)
minLim = -1.*maxLim
# Determine if some grid points are under the ground level.
k_off = max( groundOffset( v1 ), groundOffset( v2 ) )
if( k_off > 0 and debug ):
print(' {}: ground offset (k_off) = {}'.format(filename, k_off))
x = np.linspace(minLim,maxLim,npx+1)
y = np.linspace(minLim,maxLim,npx+1)
dx = (maxLim-minLim)/(npx)
X,Y = np.meshgrid(x,y)
Qi = np.zeros( np.shape(X), float )
nTot = 0
nQ = np.zeros( 5, int ) # nQ[0] = nQTot
SQ = np.zeros( 5, float ) # SQ[0] = STot
'''
Q1: u'(+), w'(+), OutwardInteraction
Q2: u'(-), w'(+), Ejection
Q3: u'(-), w'(-), Inward Interaction
Q4: u'(+), w'(-), Sweep
'''
for i in iList:
for j in jList:
for k in kList[::stride]:
vt = v[:,k+k_off,j,i]
vt_mean = np.mean( np.abs(vt) )
v1t = v1[:,k+k_off,j,i]
v2t = v2[:,k+k_off,j,i]
for l in range( len(vt) ):
SQ[0] += vt[l]; nTot += 1
if( np.abs(vt[l]) > (holewidth*vt_mean) ):
n = np.minimum( int((v1t[l] - minLim)/dx) , npx )
n = np.maximum( n , 0 )
m = np.minimum( int((v2t[l] - minLim)/dx) , npx )
m = np.maximum( m, 0 )
Qi[m,n] += 1.; nQ[0] += 1
if( v1t[l] > 0. and v2t[l] > 0. ):
nQ[1] += 1; SQ[1] += vt[l] # Outward Interaction
elif( v1t[l] < 0. and v2t[l] > 0. ):
nQ[2] += 1; SQ[2] += vt[l] # Ejection
elif( v1t[l] < 0. and v2t[l] < 0. ):
nQ[3] += 1; SQ[3] += vt[l] # Inward Interaction
else:#( v1t[l] > 0 and v2t[l] < 0. ):
nQ[4] += 1; SQ[4] += vt[l] # Sweep
v = None; v1 = None; v2 = None
Qi /= (np.float(nQ[0])*dx**2) # Obtain the PDF
if( weighted ):
Qi *= np.abs(X*Y)
SQ[0] /= np.float(nTot) # Total contribution
SQ[1:] /= nQ[1:].astype(float) # Average contributions
# Assemble the result dict
rDict = dict()
rDict['nQ'] = nQ
rDict['SQ'] = SQ
#rDict['klims']= np.array([ kList[0], kList[-1] ])
return Qi, X, Y, rDict
#==========================================================#
def calc_ts_entropy_profile( V, z, alpha=1., nbins=16 ):
vo = np.zeros( len(z) )
for k in range( len(z) ):
try: Vk = V[:,k,1,1]
except: Vk = V[:,k,0,0]
Vk = Vk - np.mean(Vk)
pk, bins = np.histogram( Vk, bins=nbins, density=True ) # Do not store the bins
bins = None
vo[k] = calc_entropy( pk, alpha )
return vo
#==========================================================#
def calc_entropy( pk , alpha=1. ):
'''
pk: probability density distribution (i.e. histogram from time series or wavelet scalo- or spectrogram.
'''
if( alpha == 1. ):
s = st.entropy( pk )
else:
s =(np.log( sum(np.power(np.array(pk),alpha)) ))/(1.-alpha)
return s
#==========================================================#
def calc_divergence( pk, rk, alpha=1. ):
pk += 1e-9; rk += 1e-9 # Add something small in case zero
if(alpha==1.):
div=sum(np.array(pk)*np.log(np.array(pk)/(np.array(rk)) ))
else:
powratio=np.power(np.array(pk),alpha)/np.power(np.array(rk),alpha-1.)
div=np.log((sum(powratio)))/(alpha-1.)
return div
#==========================================================#
def discreteWaveletAnalysis( vx , wDict ):
from utilities import dataFromDict
try: import pywt
except: sys.exit(' Library pywt not installed. Exiting ...')
# nlevel = 4
order = 'freq' # "normal"
wavelet = dataFromDict('wavelet', wDict, allowNone=False )
nlevel = dataFromDict('nlevel', wDict, allowNone=False )
if( wavelet in pywt.wavelist() ):
try:
wp = pywt.WaveletPacket( vx , wavelet, 'sym', maxlevel=nlevel)
except:
print(" Wrong wavelet type given. Reverting to default 'db2'. ")
wavelet = 'db2'
wp = pywt.WaveletPacket( vx , wavelet, 'sym', maxlevel=nlevel)
nodes = wp.get_level(nlevel, order=order)
labels = [n.path for n in nodes]
values = np.array([n.data for n in nodes], 'd')
values = abs(values)
return values, labels
#==========================================================#
def continuousWaveletAnalysis( vx, wDict ):
from utilities import dataFromDict
try: import pywt
except: sys.exit(' Library pywt not installed. Exiting ...')
wavelet = dataFromDict('wavelet', wDict, allowNone=False )
nfreqs = dataFromDict('nfreqs', wDict, allowNone=False )
dt = dataFromDict('dt', wDict, allowNone=False )
linearFreq = dataFromDict('linearFreq', wDict, allowNone=True )
if( linearFreq ):
scales = 1./np.arange(1,nfreqs)
else:
scales = np.arange(1,nfreqs)
cfs,freq = pywt.cwt(vx,scales,wavelet,dt)
return cfs, freq
#==========================================================#
|
<filename>cropduster/models.py<gh_stars>0
import re
import shutil
import time
import uuid
import os
import datetime
import hashlib
import itertools
import urllib
from PIL import Image as pil
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor
from django.conf import settings
from django.db.models.signals import post_save
from cropduster import utils
from filer.fields.image import FilerImageField
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
try:
from caching.base import CachingMixin, CachingManager
except ImportError:
class CachingMixin(object):
pass
CachingManager = models.Manager
#assert not settings.CROPDUSTER_UPLOAD_PATH.startswith('/')
nearest_int = lambda a: int(round(a))
to_retina_path = lambda p: '%s@2x%s' % os.path.splitext(p)
class SizeSet(CachingMixin, models.Model):
objects = CachingManager()
class Meta:
db_table = 'cropduster_sizeset'
name = models.CharField(max_length=255, db_index=True, unique=True)
slug = models.SlugField(max_length=50, null=False, unique=True)
def __unicode__(self):
return u"%s" % self.name
def get_size_by_ratio(self):
""" Shorthand to get all the unique ratios for display in the admin,
rather than show every possible thumbnail
"""
size_query = Size.objects.filter(size_set__id=self.id)
size_query.query.group_by = ["aspect_ratio"]
try:
return size_query
except ValueError:
return None
class Size(CachingMixin, models.Model):
objects = CachingManager()
class Meta:
db_table = "cropduster_size"
# An Size not associated with a set is a 'one off'
size_set = models.ForeignKey(SizeSet, null=True)
date_modified = models.DateTimeField(auto_now=True, null=True)
name = models.CharField(max_length=255, db_index=True, default='default')
slug = models.SlugField(max_length=50, null=False, default='default')
height = models.PositiveIntegerField(null=True, blank=True)
width = models.PositiveIntegerField(null=True, blank=True)
aspect_ratio = models.FloatField(null=True, blank=True)
auto_crop = models.BooleanField(default=False)
retina = models.BooleanField(default=False)
def get_height(self):
"""
Return calculated height, if possible.
@return: Height
@rtype: positive int
"""
if self.height is None and self.width and self.aspect_ratio:
return nearest_int(self.width / self.aspect_ratio)
return self.height
def get_width(self):
"""
Returns calculate width, if possible.
@return: Width
@rtype: positive int
"""
if self.width is None and self.height and self.aspect_ratio:
return nearest_int(self.height * self.aspect_ratio)
return self.width
def get_aspect_ratio(self):
"""
Returns calculated aspect ratio, if possible.
@return: Aspect Ratio
@rtype: float
"""
if self.aspect_ratio is None and self.height and self.width:
return round(self.width / float(self.height), 2)
return self.aspect_ratio
def get_dimensions(self):
"""
Returns all calculated dimensions for the size.
@return: width, height, aspect ratio
@rtype: (int > 0, int > 0, float > 0)
"""
return (self.get_width(), self.get_height(), self.get_aspect_ratio())
def calc_dimensions(self, width, height):
"""
From a given set of dimensions, calculates the rendered size.
@param width: Starting width
@type width: Positive int
@param height: Starting height
@type height: Positive int
@return: rendered width, rendered height
@rtype: (Width, Height)
"""
w, h, a = self.get_dimensions()
# Explicit dimension give explicit answers
if w and h:
return w, h, a
# Empty sizes are basically useless.
if not (w or h):
return width, height, None
aspect_ratio = round(width / float(height), 2)
if w:
h = nearest_int(w / aspect_ratio)
else:
w = nearest_int(h * aspect_ratio)
return w, h, round(w / float(h), 2)
def __unicode__(self):
return u"%s: %sx%s" % (self.name, self.width, self.height)
def save(self, *args, **kwargs):
if self.slug is None:
self.slug = uuid.uuid4().hex
w, h, a = self.get_dimensions()
self.width = w
self.height = h
self.aspect_ratio = a
super(Size, self).save(*args, **kwargs)
class Crop(CachingMixin, models.Model):
class Meta:
db_table = "cropduster_crop"
objects = CachingManager()
crop_x = models.PositiveIntegerField(default=0, blank=True, null=True)
crop_y = models.PositiveIntegerField(default=0, blank=True, null=True)
crop_w = models.PositiveIntegerField(default=0, blank=True, null=True)
crop_h = models.PositiveIntegerField(default=0, blank=True, null=True)
def __unicode__(self):
return u"Crop: (%i, %i),(%i, %i) " % (
self.crop_x,
self.crop_y,
self.crop_x + self.crop_w,
self.crop_y + self.crop_h,
)
class ImageMetadata(CachingMixin, models.Model):
objects = CachingManager()
class Meta:
db_table = "cropduster_image_meta"
# Attribution details.
attribution = models.CharField(max_length=255, blank=True, null=True)
attribution_link = models.URLField(max_length=255, blank=True, null=True)
caption = models.CharField(max_length=255, blank=True, null=True)
class Image(CachingMixin, models.Model):
objects = CachingManager()
class Meta:
db_table = "cropduster_image"
verbose_name = "Image"
verbose_name_plural = "Image"
# Original image if this is generated from another image.
original = models.ForeignKey('self',
related_name='derived',
null=True)
image = FilerImageField(null=True, blank=True, default=None, verbose_name=_("image"))
# An image doesn't need to have a size associated with it, only
# if we want to transform it.
size = models.ForeignKey(Size, null=True)
crop = models.OneToOneField(Crop, null=True)
# Image can have 0:N size-sets
size_sets = models.ManyToManyField(SizeSet, null=True)
# Single set of attributions
metadata = models.ForeignKey(ImageMetadata, null=True, blank=True)
date_modified = models.DateTimeField(auto_now=True, null=True)
width = models.PositiveIntegerField(null=True)
height = models.PositiveIntegerField(null=True)
@staticmethod
def cropduster_upload_to(filename, fmt="%Y/%m/%d"):
if fmt:
now = datetime.date.today()
fmt = now.strftime(fmt)
else:
fmt = ''
return os.path.join(settings.CROPDUSTER_UPLOAD_PATH, fmt, filename)
@property
def retina_path(self):
"""
Returns the path to the retina image if it exists.
"""
return to_retina_path(self.image.path)
@property
def aspect_ratio(self):
if self.width and self.height:
return round(self.width / float(self.height), 2)
return None
@property
def is_original(self):
return self.original is None
def add_size_set(self, size_set=None, **kwargs):
"""
Adds a size set to the current image. If the sizeset
is provided, will add that otherwise it will query
all size sets that match the **kwarg criteria
@return: Newly created derived images from size set.
@rtype: [Image1, ...]
"""
if size_set is None:
size_set = SizeSet.objects.get(**kwargs)
self.size_sets.add(size_set)
# Do not duplicate images which are already in the
# derived set.
d_ids = set(d.size.id for d in self.derived.all())
# Create new derived images from the size set
return [self.new_derived_image(size=size)
for size in size_set.size_set.all()
if size.id not in d_ids]
def get_metadata(self):
if self.metadata is None:
if self.original is not None:
metadata = self.original.get_metadata()
else:
metadata = ImageMetadata()
self.metadata = metadata
return self.metadata
def new_derived_image(self, **kwargs):
"""
Creates a new derived image from the current image.
@return: new Image
@rtype: Image
"""
return Image(original=self, metadata=self.get_metadata(), **kwargs)
def set_manual_size(self, **kwargs):
"""
Sets a manual size on the image.
@return: New Size object, unsaved
@rtype: @{Size}
"""
# If we don't have a size or we have a size from a size set,
# we need to create a new Size object.
if self.size is None or self.size.size_set is not None:
self.size = Size(**kwargs)
else:
# Otherwise, update the values
for k, v in kwargs.iteritems():
setattr(self.size, k, v)
return self.size
def _save_to_tmp(self, image):
"""
Saves an image to a tempfile.
@param image: Image to save.
@type image:
@return: Temporary path where the image is saved.
@rtype: /path/to/file
"""
path = self._get_tmp_img_path()
return utils.save_image(image, path)
def get_cropped_image(self, force=False):
"""
Renders an image according to its Crop and its Size. If the size also
specifies a retina image, it will attempt to render that as well. If a
crop is set, it is applied to the image before any resizing happens.
By default, render will throw an error if an attempt is made to render
an original image.
NOTE: While render will create a new image, it will be stored it in a
temp file until the object is saved when it will overwrite the
previously stored image. There are a couple of reasons for this:
1. If there's any sort of error, the previous image is preserved,
making re-renderings of images safe.
2. We have to resave the image anyways since 'width' and 'height' have
likely changed.
3. If for some reason we want to 'rollback' a change, we don't have
to do anything special.
The temporary images are saved in CROPDUSTER_TMP_DIR if available, or
falls back to the directory the image currently resides in.
@param force: If force is True, render will allow overwriting the
original image.
@type force: bool.
"""
if not force and self.is_original:
raise ValidationError("Cannot render over an original image. "\
"Use render(force=True) to override.")
if not (self.crop or self.size):
# Nothing to do.
return
# We really only want to do rescalings on derived images, but
# we don't prevent people from it.
if self.original:
image_path = self.original.image.url
else:
image_path = self.image.path
if self.crop:
image = utils.create_cropped_image(self.original.image,
self.crop.crop_x,
self.crop.crop_y,
self.crop.crop_w,
self.crop.crop_h)
else:
image = pil.open(image_path)
# If we are resizing the image.
if self.size:
size = self.size
orig_width, orig_height = image.size
width, height = size.calc_dimensions(orig_width, orig_height)[:2]
# Calculate the main image
image = utils.rescale(image, width, height, size.auto_crop)
return image
def _get_tmp_img_path(self):
"""
Returns a temporary image path. We should probably be using the
Storage objects, but this works for now.
Tries to it in CROPDUSTER_TMP_DIR if set, falls back to the current
directory of the image.
@return: Temporary image location.
@rtype: "/path/to/file"
"""
dest_path = self.get_dest_img_path()
if hasattr(settings, 'CROPDUSTER_TMP_DIR'):
tmp_path = settings.CROPDUSTER_TMP_DIR
else:
tmp_path = os.path.dirname(dest_path)
ext = os.path.splitext(dest_path)[1]
return os.path.join(tmp_path, uuid.uuid4().hex + ext)
def get_dest_img_path(self):
"""
Figures out where to place save a new image for this Image.
@return: path to image location
@rtype: "/path/to/image"
"""
# If we have a path already, reuse it.
if self.image:
return self.image.path
return self.get_dest_img_from_base(self.original.image.path)
def get_dest_img_name(self):
if self.image:
return self.image.name
return self.get_dest_img_from_base(self.original.image.name)
def get_dest_img_from_base(self, base):
# Calculate it from the size slug if possible.
if self.size:
slug = self.size.slug
elif self.crop:
slug = os.path.splitext(os.path.basename(base))[0]
else:
# Guess we have to return the original path
return base
path, ext = os.path.splitext(base)
return os.path.join(path, slug) + ext
def has_size(self, size_slug):
return self.derived.filter(size__slug=size_slug).count() > 0
def set_crop(self, x, y, width, height):
"""
Sets the crop size for an image. It should be noted that the crop
object is NOT saved by default, so should be saved manually.
Adds a crop from top-left (x,y) to bottom-right (x+width, y+width).
@return: The unsaved crop object.
@rtype: {Crop}
"""
if self.crop is None:
self.crop = Crop()
self.crop.crop_x = x
self.crop.crop_y = y
self.crop.crop_w = width
self.crop.crop_h = height
return self.crop
def __unicode__(self):
return self.get_absolute_url() if self.image else u""
def get_absolute_url(self, date_hash=True):
"""
Gets the absolute url for an image.
@param date_hash: If True, adds a GET param hex hash indicating
the update date for the image.
@type date_hash: bool
@return: Absolute path to the url
@rtype: basestring
"""
path = self.image.url
if date_hash:
unix_time = int(time.mktime(self.date_modified.timetuple()))
path += '?' + format(unix_time, 'x')
# Django's filepath_to_uri passes '()' in the safe kwarg to
# urllib.quote, which is problematic when used in inline
# background-image:url() styles.
# This regex replaces '(' and ')' with '%28' and '%29', respectively
url = unicode(path)
return re.sub(r'([\(\)])', lambda m: urllib.quote(m.group(1)), url)
def get_thumbnail(self, slug, size_set=None):
"""
Returns the derived image for the Image or None if it does not exist.
@param slug: Name of the image slug.
@type slug: basestring
@param size_set: Size Set object to filter by, if available.
@type size_set: SizeSet.
@return: Image or None
@rtype: Image or None
"""
try:
if size_set:
return self.derived.get(size__size_set=size_set, size__slug=slug)
else:
return self.derived.filter(size__slug=slug)[0]
except IndexError:
return None
except Image.DoesNotExist:
return None
def __init__(self, *args, **kwargs):
if 'metadata' not in kwargs and 'metadata_id' not in kwargs:
kwargs['metadata'] = ImageMetadata()
return super(Image, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
# Make sure our original image is saved
if self.original and not self.original.pk:
self.original.save()
# Make sure we've saved our metadata
metadata = self.get_metadata()
if not metadata.id:
metadata.save()
# Bug #8892, not updating the 'metadata_id' field.
self.metadata = metadata
# Do we have a new image? If so, we need to move it over.
if getattr(self, '_new_image', None) is not None:
name = self.get_dest_img_name()
if getattr(settings, 'CROPDUSTER_NORMALIZE_EXT', False):
if not name.endswith(self._new_image_format.lower()):
rest, _ext = os.path.splitext(name)
name = rest + '.' + self._new_image_format.lower()
# Since we only store relative paths in here, but want to get
# the correct absolute path, we have to set the image name first
# before we set the image directly (which will)
self.image.name = name
os.rename(self._new_image, self.image.path)
self.image = name
# I'm not a fan of all this state, but it needs to be saved
# somewhere.
del self._new_image
del self._new_image_format
# Check for a new retina
if hasattr(self, '_new_retina'):
retina_path = self.retina_path
if self._new_retina is None:
if os.path.exists(retina_path):
# If the reina is now invalid, remove the previous one.
os.unlink(retina_path)
else:
os.rename(self._new_retina, retina_path)
del self._new_retina
return super(Image, self).save(*args, **kwargs)
@property
def descendants(self):
"""
Gets all descendants for the current image, starting at the highest
levels and recursing down.
@returns set of descendants
@rtype <Image1, ...>
"""
stack = [self]
while stack:
original = stack.pop()
children = original.derived.all()
for c in children:
c.original = original
yield c
stack.extend(children)
@property
def ancestors(self):
"""
Returns the set of ancestors associated with an Image
"""
current = self
while current.original:
yield current.original
current = current.original
def delete(self, remove_images=True, *args, **kwargs):
"""
Deletes an image, attempting to clean up foreign keys as well.
@param remove_images: If True, performs a bulk delete and then
deletes all derived images. It does not,
however, remove the directories.
@type remove_images: bool
"""
# Delete manual image sizes.
if self.size is not None and self.size.size_set is None:
self.size.delete()
# All crops are unique to the image.
if self.crop is not None:
self.crop.delete()
return super(Image, self).delete(*args, **kwargs)
class CropDusterReverseProxyDescriptor(ReverseSingleRelatedObjectDescriptor):
def __set__(self, instance, value):
if value is not None and not isinstance(value, self.field.rel.to):
# ok, are we a direct subclass?
mro = self.field.rel.to.__mro__
if len(mro) > 1 and type(value) == mro[1]:
# Convert to the appropriate proxy object
value.__class__ = self.field.rel.to
super(CropDusterReverseProxyDescriptor, self).__set__(instance, value)
PROXY_COUNT = itertools.count(1)
class CropDusterField(models.ForeignKey):
dynamic_path = False
def __init__(self, upload_to=None, dynamic_path=False, *args, **kwargs):
if upload_to is None:
if not args and 'to' not in kwargs:
args = (Image,)
super(CropDusterField, self).__init__(*args, **kwargs)
return
# Figure out what we are inheriting from.
if args and issubclass(args[0], Image):
base_cls = args[0]
args = tuple(args[1:])
elif 'to' in kwargs and issubclass(kwargs.get('to'), Image):
base_cls = kwargs.get('to')
else:
base_cls = Image
if callable(upload_to) and dynamic_path:
# we have a function and we want it to dynamically change
# based on the instance
self.dynamic_path = True
if isinstance(upload_to, basestring):
upload_path = upload_to
def upload_to(object, filename):
return Image.cropduster_upload_to(filename, upload_path)
elif callable(upload_to):
old_upload_to = upload_to
def upload_to(self, filename, instance=None):
new_path = old_upload_to(filename, instance)
return os.path.join(settings.CROPDUSTER_UPLOAD_PATH, filename)
else:
raise TypeError("'upload_to' needs to be either a callable or string")
# We have to create a unique class name for each custom proxy image otherwise
# django likes to alias them together.
ProxyImage = type('ProxyImage%i' % next(PROXY_COUNT),
(base_cls,),
{'Meta': type('Meta', (), {'proxy':True}),
'cropduster_upload_to': upload_to,
'__module__': Image.__module__})
return super(CropDusterField, self).__init__(ProxyImage, *args, **kwargs)
def contribute_to_class(self, cls, name):
super(CropDusterField, self).contribute_to_class(cls, name)
setattr(cls, self.name, CropDusterReverseProxyDescriptor(self))
if self.dynamic_path:
def post_signal(sender, instance, created, *args, **kwargs):
cdf = getattr(instance, name, None)
if cdf is not None:
dynamic_path_save(instance, cdf)
post_save.connect(post_signal, sender=cls, weak=False)
def dynamic_path_save(instance, cdf):
# Ok, try to move the fields.
if cdf is None:
# No image to check, move along.
return
# Check to see if the paths are the same
old_name = cdf.image.name
basename = os.path.basename(old_name)
new_name = cdf.cropduster_upload_to(basename, instance=instance)
if new_name == old_name:
# Nothing to move, move along
return
old_to_new = {}
old_path = cdf.image.path
images = [cdf]
cdf.image.name = new_name
old_to_new[old_path] = cdf.image.path
# Iterate through all derived images, updating the paths
for derived in cdf.descendants:
old_path = derived.image.path
old_retina_path = derived.retina_path
# Update the name to the new one
derived.image.name = derived.get_dest_img_from_base(derived.original.image.name)
old_to_new[old_path] = derived.image.path
# Only add the retina if it exists.
if os.path.exists(old_retina_path) and derived.size.retina:
old_to_new[old_retina_path] = derived.retina_path
images.append(derived)
# Filter out paths which haven't changed.
old_to_new = dict((k,v) for k,v in old_to_new.iteritems() if k != v)
# Copy the images... this is not cheap
for old_path, new_path in old_to_new.iteritems():
# Create the directory, if needed
dirname = os.path.dirname(new_path)
if not os.path.isdir(dirname):
if os.path.exists(dirname):
raise ValidationError("Cannot create new directory '%s'" % dirname)
os.makedirs(dirname)
# Copy the file, should blow up for all manner of things.
shutil.copy(old_path, new_path)
# Check existance
if not os.path.exists(new_path):
raise ValidationError("Could not copy image %s to %s" % (old_path, new_path))
# Save the images
for image in images:
image.save()
# Ok, we've made every reasonable attempt to preserve data... delete!
old_dirs = set()
for old_path in old_to_new:
os.unlink(old_path)
old_dirs.add( os.path.dirname(old_path) )
# Files are deleted, delete empty directories, except the upload path...
# that would be bad
for path in reversed(sorted(old_dirs, key=lambda d: d.count('/'))):
if not os.listdir(path) and path not in settings.MEDIA_ROOT:
os.rmdir(path)
class ImageRegistry(object):
"""
Registers cropduster Images to a hash to make it reasonable to lookup
image directly from the admin.
"""
hashes = {}
@classmethod
def add(cls, model, field_name, Image):
model_hash = hashlib.md5('%s:%s' % (model, field_name)).hexdigest()
cls.hashes[model_hash] = Image
return model_hash
@classmethod
def get(cls, image_hash):
return cls.hashes.get(image_hash, Image)
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ["^cropduster\.models\.CropDusterField"])
class ImageContextManager(models.Manager):
def update(self, model_instance, size_set):
img_ctx = self.get_img_ctx(model_instance)
if img_ctx:
if not size_set:
img_ctx.delete()
else:
img_ctx.size_set_id = size_set.id
img_ctx.save()
elif size_set:
img_ctx = ImageContext(content_object=model_instance,
size_set=size_set)
img_ctx.save()
def get_img_ctx(self, model_instance):
ct = ContentType.objects.get_for_model(model_instance)
img_ctx = ImageContext.objects.filter(
content_type=ct.id,
object_id=model_instance.id) or [None]
return img_ctx[0]
def get_size_set(self, model_instance):
img_ctx = self.get_img_ctx(model_instance)
return img_ctx.size_set.id if img_ctx else None
class ImageContext(models.Model):
size_set = models.ForeignKey(SizeSet, blank=True, null=True)
#The standard fields for a GenericForeignKey.
# It may points to whatever model object without
# hardcoding the class of the related model
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
objects = ImageContextManager()
class Meta:
unique_together = ("content_type", "object_id")
|
<gh_stars>100-1000
from typing import Optional, List, Dict
from ontobio.model.similarity import AnnotationSufficiency
from ontobio.vocabulary.upper import HpoUpperLevel
from ontobio.sim.api.interfaces import InformationContentStore
import numpy as np
from statistics import mean
class AnnotationScorer:
"""
Computes the annotation sufficiency scores as described
by https://zenodo.org/record/834091#.W8ZnCxhlCV4
"""
def __init__(self, ic_store: InformationContentStore):
self.ic_store = ic_store
def get_annotation_sufficiency(
self,
profile: List[str],
negated_classes: List[str],
categories: Optional[List] = None,
negation_weight: Optional[float] = .25,
category_weight: Optional[float] = .5) -> AnnotationSufficiency:
"""
Given a list of individuals, return the simple, scaled, and categorical scores
"""
if categories is None:
categories = [enum.value for enum in HpoUpperLevel]
ic_map = self.ic_store.get_profile_ic(profile + negated_classes)
# Simple score is the weighted average of the present and
# explicitly stated negative/absent phenotypes
#
# Note that we're deviating from the publication
# to match the reference java implementation where
# mean_max_ic is replaced with max_max_ic:
# https://github.com/owlcollab/owltools/blob/452b4a/
# OWLTools-Sim/src/main/java/owltools/sim2/AbstractOwlSim.java#L1038
simple_score = self._get_simple_score(
profile, negated_classes, self.ic_store.statistics.mean_mean_ic,
self.ic_store.statistics.max_max_ic, self.ic_store.statistics.mean_sum_ic,
negation_weight, ic_map
)
categorical_score = self._get_categorical_score(
profile, negated_classes, categories,
negation_weight, ic_map
)
scaled_score = self._get_scaled_score(
simple_score, categorical_score, category_weight)
return AnnotationSufficiency(
simple_score=simple_score,
scaled_score=scaled_score,
categorical_score=categorical_score
)
def _get_simple_score(self,
profile: List[str],
negated_classes: List[str],
bg_mean_pic: float,
bg_mean_max_pic: float,
bg_mean_sum_pic: float,
negation_weight: Optional[float] = .25,
ic_map: Optional[Dict[str, float]] = None) -> float:
"""
Simple score is the average of the relative
mean ic, max ic, and sum ic (relative to global stats)
:param ic_map: dictionary of class - information content mappings
:param bg_mean_pic: the average of the average IC in
the background profile annotations
:param bg_mean_max_pic: max IC annotated to the background set of profiles
:param bg_mean_sum_pic: Average of the profile sum IC in background set
:param negation_weight: Average of the profile sum IC in background set
:param ic_map: Average of the profile sum IC in background set
:return: simple score (float)
"""
if ic_map is None:
ic_map = self.ic_store.get_profile_ic(profile + negated_classes)
pos_map = {cls: ic for cls, ic in ic_map.items() if cls in profile}
neg_map = {cls: ic for cls, ic in ic_map.items() if cls in negated_classes}
mean_ic = mean(pos_map.values()) if len(profile) > 0 else 0
max_ic = max(pos_map.values()) if len(profile) > 0 else 0
sum_ic = sum(pos_map.values()) if len(profile) > 0 else 0
if len(negated_classes) > 0:
weighted_ic = [ic * negation_weight for ic in neg_map.values()]
mean_ic = max([np.average([mean_ic, mean(neg_map.values())],
weights=[1, negation_weight]),
mean_ic])
max_ic = max([max_ic] + weighted_ic)
sum_ic = sum_ic + sum(weighted_ic)
return mean([
min([mean_ic / bg_mean_pic, 1.0]),
min([max_ic / bg_mean_max_pic, 1.0]),
min([sum_ic / bg_mean_sum_pic, 1.0])
])
@staticmethod
def _get_scaled_score(
simple_score: float,
categorical_score: float,
category_weight: Optional[float] = .5) -> float:
"""
Scaled score is the weighted average of the simple score and
categorical score
"""
return np.average(
[simple_score, categorical_score], weights=[1, category_weight]
)
def _get_categorical_score(
self,
profile: List,
negated_classes: List,
categories: List,
negation_weight: Optional[float] = 1,
ic_map: Optional[Dict[str, float]] = None) -> float:
"""
The average of the simple scores across a list of categories
"""
if ic_map is None:
ic_map = self.ic_store.get_profile_ic(profile + negated_classes)
scores = []
for cat in categories:
if cat not in self.ic_store.category_statistics:
raise ValueError("statistics for {} not indexed".format(cat))
pos_profile = [cls for cls in profile
if cls in self.ic_store.category_statistics[cat].descendants]
neg_profile = [cls for cls in negated_classes
if cls in self.ic_store.category_statistics[cat].descendants]
# Note that we're deviating from the publication
# to match the reference java implementation where
# mean_max_ic is replaced by max_max_ic
scores.append(self._get_simple_score(
pos_profile, neg_profile,
self.ic_store.category_statistics[cat].mean_mean_ic,
self.ic_store.category_statistics[cat].max_max_ic,
self.ic_store.category_statistics[cat].mean_sum_ic,
negation_weight, ic_map
))
return mean(scores)
|
import os
from copy import deepcopy
import dill
import pytest
import torch
from torch.optim import SGD, Adadelta, Adagrad, Adam, RMSprop
from pythae.customexception import BadInheritanceError
from pythae.models.base.base_utils import ModelOutput
from pythae.models import RHVAE, RHVAEConfig
from pythae.trainers import BaseTrainer, BaseTrainingConfig
from pythae.pipelines import TrainingPipeline
from pythae.models.nn.default_architectures import (
Decoder_AE_MLP,
Encoder_VAE_MLP,
Metric_MLP,
)
from pythae.models.rhvae.rhvae_config import RHVAEConfig
from tests.data.custom_architectures import (
Decoder_AE_Conv,
Encoder_VAE_Conv,
Metric_MLP_Custom,
NetBadInheritance,
)
PATH = os.path.dirname(os.path.abspath(__file__))
device = 'cuda' if torch.cuda.is_available() else 'cpu'
@pytest.fixture(params=[RHVAEConfig(), RHVAEConfig(latent_dim=5)])
def model_configs_no_input_dim(request):
return request.param
@pytest.fixture(
params=[
RHVAEConfig(input_dim=(1, 28, 28), latent_dim=1, n_lf=1, reconstruction_loss="bce"),
RHVAEConfig(input_dim=(1, 2, 18), latent_dim=2, n_lf=1)
]
)
def model_configs(request):
return request.param
@pytest.fixture
def custom_encoder(model_configs):
return Encoder_VAE_Conv(model_configs)
@pytest.fixture
def custom_decoder(model_configs):
return Decoder_AE_Conv(model_configs)
@pytest.fixture
def custom_metric(model_configs):
return Metric_MLP_Custom(model_configs)
class Test_Model_Building:
@pytest.fixture()
def bad_net(self):
return NetBadInheritance()
def test_build_model(self, model_configs):
rhvae = RHVAE(model_configs)
assert all(
[
rhvae.n_lf == model_configs.n_lf,
rhvae.temperature == model_configs.temperature,
]
)
def test_raises_bad_inheritance(self, model_configs, bad_net):
with pytest.raises(BadInheritanceError):
rhvae = RHVAE(model_configs, encoder=bad_net)
with pytest.raises(BadInheritanceError):
rhvae = RHVAE(model_configs, decoder=bad_net)
with pytest.raises(BadInheritanceError):
rhvae = RHVAE(model_configs, metric=bad_net)
def test_raises_no_input_dim(
self, model_configs_no_input_dim, custom_encoder, custom_decoder, custom_metric
):
with pytest.raises(AttributeError):
rhvae = RHVAE(model_configs_no_input_dim)
with pytest.raises(AttributeError):
rhvae = RHVAE(model_configs_no_input_dim, encoder=custom_encoder)
with pytest.raises(AttributeError):
rhvae = RHVAE(model_configs_no_input_dim, decoder=custom_decoder)
with pytest.raises(AttributeError):
rhvae = RHVAE(model_configs_no_input_dim, metric=custom_metric)
rhvae = RHVAE(
model_configs_no_input_dim,
encoder=custom_encoder,
decoder=custom_decoder,
metric=custom_metric,
)
def test_build_custom_arch(
self, model_configs, custom_encoder, custom_decoder, custom_metric
):
rhvae = RHVAE(model_configs, encoder=custom_encoder, decoder=custom_decoder)
assert rhvae.encoder == custom_encoder
assert not rhvae.model_config.uses_default_encoder
assert rhvae.decoder == custom_decoder
assert not rhvae.model_config.uses_default_encoder
assert rhvae.model_config.uses_default_metric
rhvae = RHVAE(model_configs, metric=custom_metric)
assert rhvae.model_config.uses_default_encoder
assert rhvae.model_config.uses_default_encoder
assert rhvae.metric == custom_metric
assert not rhvae.model_config.uses_default_metric
class Test_Model_Saving:
def test_default_model_saving(self, tmpdir, model_configs):
tmpdir.mkdir("dummy_folder")
dir_path = dir_path = os.path.join(tmpdir, "dummy_folder")
model = RHVAE(model_configs)
# set random M_tens and centroids from testing
model.M_tens = torch.randn(3, 10, 10)
model.centroids_tens = torch.randn(3, 10, 10)
model.state_dict()["encoder.layers.0.0.weight"][0] = 0
model.save(dir_path=dir_path)
assert set(os.listdir(dir_path)) == set(["model_config.json", "model.pt"])
# reload model
model_rec = RHVAE.load_from_folder(dir_path)
# check configs are the same
assert model_rec.model_config.__dict__ == model.model_config.__dict__
assert all(
[
torch.equal(model_rec.state_dict()[key], model.state_dict()[key])
for key in model.state_dict().keys()
]
)
assert torch.equal(model_rec.M_tens, model.M_tens)
assert torch.equal(model_rec.centroids_tens, model.centroids_tens)
assert callable(model_rec.G)
assert callable(model_rec.G_inv)
def test_custom_encoder_model_saving(self, tmpdir, model_configs, custom_encoder):
tmpdir.mkdir("dummy_folder")
dir_path = dir_path = os.path.join(tmpdir, "dummy_folder")
model = RHVAE(model_configs, encoder=custom_encoder)
model.state_dict()["encoder.layers.0.0.weight"][0] = 0
model.save(dir_path=dir_path)
assert set(os.listdir(dir_path)) == set(
["model_config.json", "model.pt", "encoder.pkl"]
)
# reload model
model_rec = RHVAE.load_from_folder(dir_path)
# check configs are the same
assert model_rec.model_config.__dict__ == model.model_config.__dict__
assert all(
[
torch.equal(model_rec.state_dict()[key], model.state_dict()[key])
for key in model.state_dict().keys()
]
)
assert torch.equal(model_rec.M_tens, model.M_tens)
assert torch.equal(model_rec.centroids_tens, model.centroids_tens)
assert callable(model_rec.G)
assert callable(model_rec.G_inv)
def test_custom_decoder_model_saving(self, tmpdir, model_configs, custom_decoder):
tmpdir.mkdir("dummy_folder")
dir_path = dir_path = os.path.join(tmpdir, "dummy_folder")
model = RHVAE(model_configs, decoder=custom_decoder)
model.state_dict()["encoder.layers.0.0.weight"][0] = 0
model.save(dir_path=dir_path)
assert set(os.listdir(dir_path)) == set(
["model_config.json", "model.pt", "decoder.pkl"]
)
# reload model
model_rec = RHVAE.load_from_folder(dir_path)
# check configs are the same
assert model_rec.model_config.__dict__ == model.model_config.__dict__
assert all(
[
torch.equal(model_rec.state_dict()[key], model.state_dict()[key])
for key in model.state_dict().keys()
]
)
assert torch.equal(model_rec.M_tens, model.M_tens)
assert torch.equal(model_rec.centroids_tens, model.centroids_tens)
assert callable(model_rec.G)
assert callable(model_rec.G_inv)
def test_custom_metric_model_saving(self, tmpdir, model_configs, custom_metric):
tmpdir.mkdir("dummy_folder")
dir_path = dir_path = os.path.join(tmpdir, "dummy_folder")
model = RHVAE(model_configs, metric=custom_metric)
model.state_dict()["encoder.layers.0.0.weight"][0] = 0
model.save(dir_path=dir_path)
assert set(os.listdir(dir_path)) == set(
["model_config.json", "model.pt", "metric.pkl"]
)
# reload model
model_rec = RHVAE.load_from_folder(dir_path)
# check configs are the same
assert model_rec.model_config.__dict__ == model.model_config.__dict__
assert all(
[
torch.equal(model_rec.state_dict()[key], model.state_dict()[key])
for key in model.state_dict().keys()
]
)
assert torch.equal(model_rec.M_tens, model.M_tens)
assert torch.equal(model_rec.centroids_tens, model.centroids_tens)
assert callable(model_rec.G)
assert callable(model_rec.G_inv)
def test_full_custom_model_saving(
self, tmpdir, model_configs, custom_encoder, custom_decoder, custom_metric
):
tmpdir.mkdir("dummy_folder")
dir_path = dir_path = os.path.join(tmpdir, "dummy_folder")
model = RHVAE(
model_configs,
encoder=custom_encoder,
decoder=custom_decoder,
metric=custom_metric,
)
model.state_dict()["encoder.layers.0.0.weight"][0] = 0
model.save(dir_path=dir_path)
assert set(os.listdir(dir_path)) == set(
[
"model_config.json",
"model.pt",
"encoder.pkl",
"decoder.pkl",
"metric.pkl",
]
)
# reload model
model_rec = RHVAE.load_from_folder(dir_path)
# check configs are the same
assert model_rec.model_config.__dict__ == model.model_config.__dict__
assert all(
[
torch.equal(model_rec.state_dict()[key], model.state_dict()[key])
for key in model.state_dict().keys()
]
)
assert torch.equal(model_rec.M_tens, model.M_tens)
assert torch.equal(model_rec.centroids_tens, model.centroids_tens)
assert callable(model_rec.G)
assert callable(model_rec.G_inv)
model_rec.to(device)
z = torch.randn(2, model_configs.latent_dim).to(device)
assert model_rec.G(z).shape == (2, model_configs.latent_dim, model_configs.latent_dim)
assert model_rec.G_inv(z).shape == (2, model_configs.latent_dim, model_configs.latent_dim)
def test_raises_missing_files(
self, tmpdir, model_configs, custom_encoder, custom_decoder, custom_metric
):
tmpdir.mkdir("dummy_folder")
dir_path = dir_path = os.path.join(tmpdir, "dummy_folder")
model = RHVAE(
model_configs,
encoder=custom_encoder,
decoder=custom_decoder,
metric=custom_metric,
)
model.state_dict()["encoder.layers.0.0.weight"][0] = 0
model.save(dir_path=dir_path)
os.remove(os.path.join(dir_path, "metric.pkl"))
# check raises decoder.pkl is missing
with pytest.raises(FileNotFoundError):
model_rec = RHVAE.load_from_folder(dir_path)
os.remove(os.path.join(dir_path, "decoder.pkl"))
# check raises decoder.pkl is missing
with pytest.raises(FileNotFoundError):
model_rec = RHVAE.load_from_folder(dir_path)
os.remove(os.path.join(dir_path, "encoder.pkl"))
# check raises encoder.pkl is missing
with pytest.raises(FileNotFoundError):
model_rec = RHVAE.load_from_folder(dir_path)
os.remove(os.path.join(dir_path, "model.pt"))
# check raises encoder.pkl is missing
with pytest.raises(FileNotFoundError):
model_rec = RHVAE.load_from_folder(dir_path)
os.remove(os.path.join(dir_path, "model_config.json"))
# check raises encoder.pkl is missing
with pytest.raises(FileNotFoundError):
model_rec = RHVAE.load_from_folder(dir_path)
class Test_Model_forward:
@pytest.fixture
def demo_data(self):
data = torch.load(os.path.join(PATH, "data/mnist_clean_train_dataset_sample"))[
:
]
return (
data
) # This is an extract of 3 data from MNIST (unnormalized) used to test custom architecture
@pytest.fixture
def rhvae(self, model_configs, demo_data):
model_configs.input_dim = tuple(demo_data["data"][0].shape)
return RHVAE(model_configs)
def test_model_train_output(self, rhvae, demo_data):
# model_configs.input_dim = demo_data['data'][0].shape[-1]
# rhvae = RHVAE(model_configs)
rhvae.train()
out = rhvae(demo_data)
assert set(
[
"loss",
"recon_x",
"z",
"z0",
"rho",
"eps0",
"gamma",
"mu",
"log_var",
"G_inv",
"G_log_det",
]
) == set(out.keys())
rhvae.update()
def test_model_output(self, rhvae, demo_data):
# model_configs.input_dim = demo_data['data'][0].shape[-1]
rhvae.eval()
out = rhvae(demo_data)
assert set(
[
"loss",
"recon_x",
"z",
"z0",
"rho",
"eps0",
"gamma",
"mu",
"log_var",
"G_inv",
"G_log_det",
]
) == set(out.keys())
assert out.z.shape[0] == demo_data["data"].shape[0]
assert out.recon_x.shape == demo_data["data"].shape
@pytest.mark.slow
class Test_RHVAE_Training:
@pytest.fixture
def train_dataset(self):
return torch.load(os.path.join(PATH, "data/mnist_clean_train_dataset_sample"))
@pytest.fixture(
params=[BaseTrainingConfig(num_epochs=3, steps_saving=2, learning_rate=1e-5)]
)
def training_configs(self, tmpdir, request):
tmpdir.mkdir("dummy_folder")
dir_path = os.path.join(tmpdir, "dummy_folder")
request.param.output_dir = dir_path
return request.param
@pytest.fixture(
params=[
torch.rand(1),
torch.rand(1),
torch.rand(1)
]
)
def rhvae(
self, model_configs, custom_encoder, custom_decoder, custom_metric, request
):
# randomized
alpha = request.param
if alpha < 0.125:
model = RHVAE(model_configs)
elif 0.125 <= alpha < 0.25:
model = RHVAE(model_configs, encoder=custom_encoder)
elif 0.25 <= alpha < 0.375:
model = RHVAE(model_configs, decoder=custom_decoder)
elif 0.375 <= alpha < 0.5:
model = RHVAE(model_configs, metric=custom_metric)
elif 0.5 <= alpha < 0.625:
model = RHVAE(model_configs, encoder=custom_encoder, decoder=custom_decoder)
elif 0.625 <= alpha < 0:
model = RHVAE(model_configs, encoder=custom_encoder, metric=custom_metric)
elif 0.750 <= alpha < 0.875:
model = RHVAE(model_configs, decoder=custom_decoder, metric=custom_metric)
else:
model = RHVAE(
model_configs,
encoder=custom_encoder,
decoder=custom_decoder,
metric=custom_metric,
)
return model
@pytest.fixture(params=[Adam])
def optimizers(self, request, rhvae, training_configs):
if request.param is not None:
optimizer = request.param(
rhvae.parameters(), lr=training_configs.learning_rate
)
else:
optimizer = None
return optimizer
def test_rhvae_train_step(self, rhvae, train_dataset, training_configs, optimizers):
trainer = BaseTrainer(
model=rhvae,
train_dataset=train_dataset,
training_config=training_configs,
optimizer=optimizers,
)
start_model_state_dict = deepcopy(trainer.model.state_dict())
step_1_loss = trainer.train_step(epoch=1)
step_1_model_state_dict = deepcopy(trainer.model.state_dict())
# check that weights were updated
assert not all(
[
torch.equal(start_model_state_dict[key], step_1_model_state_dict[key])
for key in start_model_state_dict.keys()
]
)
def test_rhvae_eval_step(self, rhvae, train_dataset, training_configs, optimizers):
trainer = BaseTrainer(
model=rhvae,
train_dataset=train_dataset,
eval_dataset=train_dataset,
training_config=training_configs,
optimizer=optimizers,
)
start_model_state_dict = deepcopy(trainer.model.state_dict())
step_1_loss = trainer.eval_step(epoch=1)
step_1_model_state_dict = deepcopy(trainer.model.state_dict())
# check that weights were updated
assert all(
[
torch.equal(start_model_state_dict[key], step_1_model_state_dict[key])
for key in start_model_state_dict.keys()
]
)
def test_rhvae_main_train_loop(
self, tmpdir, rhvae, train_dataset, training_configs, optimizers
):
trainer = BaseTrainer(
model=rhvae,
train_dataset=train_dataset,
eval_dataset=train_dataset,
training_config=training_configs,
optimizer=optimizers,
)
start_model_state_dict = deepcopy(trainer.model.state_dict())
trainer.train()
step_1_model_state_dict = deepcopy(trainer.model.state_dict())
# check that weights were updated
assert not all(
[
torch.equal(start_model_state_dict[key], step_1_model_state_dict[key])
for key in start_model_state_dict.keys()
]
)
def test_checkpoint_saving(
self, tmpdir, rhvae, train_dataset, training_configs, optimizers
):
dir_path = training_configs.output_dir
trainer = BaseTrainer(
model=rhvae,
train_dataset=train_dataset,
training_config=training_configs,
optimizer=optimizers,
)
# Make a training step
step_1_loss = trainer.train_step(epoch=1)
model = deepcopy(trainer.model)
optimizer = deepcopy(trainer.optimizer)
trainer.save_checkpoint(dir_path=dir_path, epoch=0, model=model)
checkpoint_dir = os.path.join(dir_path, "checkpoint_epoch_0")
assert os.path.isdir(checkpoint_dir)
files_list = os.listdir(checkpoint_dir)
assert set(["model.pt", "optimizer.pt", "training_config.json"]).issubset(
set(files_list)
)
# check pickled custom decoder
if not rhvae.model_config.uses_default_decoder:
assert "decoder.pkl" in files_list
else:
assert not "decoder.pkl" in files_list
# check pickled custom encoder
if not rhvae.model_config.uses_default_encoder:
assert "encoder.pkl" in files_list
else:
assert not "encoder.pkl" in files_list
# check pickled custom metric
if not rhvae.model_config.uses_default_metric:
assert "metric.pkl" in files_list
else:
assert not "metric.pkl" in files_list
model_rec_state_dict = torch.load(os.path.join(checkpoint_dir, "model.pt"))[
"model_state_dict"
]
model_rec_state_dict = torch.load(os.path.join(checkpoint_dir, "model.pt"))[
"model_state_dict"
]
assert all(
[
torch.equal(
model_rec_state_dict[key].cpu(), model.state_dict()[key].cpu()
)
for key in model.state_dict().keys()
]
)
# check reload full model
model_rec = RHVAE.load_from_folder(os.path.join(checkpoint_dir))
assert all(
[
torch.equal(
model_rec.state_dict()[key].cpu(), model.state_dict()[key].cpu()
)
for key in model.state_dict().keys()
]
)
assert torch.equal(model_rec.M_tens.cpu(), model.M_tens.cpu())
assert torch.equal(model_rec.centroids_tens.cpu(), model.centroids_tens.cpu())
assert type(model_rec.encoder.cpu()) == type(model.encoder.cpu())
assert type(model_rec.decoder.cpu()) == type(model.decoder.cpu())
assert type(model_rec.metric.cpu()) == type(model.metric.cpu())
optim_rec_state_dict = torch.load(os.path.join(checkpoint_dir, "optimizer.pt"))
assert all(
[
dict_rec == dict_optimizer
for (dict_rec, dict_optimizer) in zip(
optim_rec_state_dict["param_groups"],
optimizer.state_dict()["param_groups"],
)
]
)
assert all(
[
dict_rec == dict_optimizer
for (dict_rec, dict_optimizer) in zip(
optim_rec_state_dict["state"], optimizer.state_dict()["state"]
)
]
)
def test_checkpoint_saving_during_training(
self, tmpdir, rhvae, train_dataset, training_configs, optimizers
):
#
target_saving_epoch = training_configs.steps_saving
dir_path = training_configs.output_dir
trainer = BaseTrainer(
model=rhvae,
train_dataset=train_dataset,
training_config=training_configs,
optimizer=optimizers,
)
model = deepcopy(trainer.model)
trainer.train()
training_dir = os.path.join(
dir_path, f"RHVAE_training_{trainer._training_signature}"
)
assert os.path.isdir(training_dir)
checkpoint_dir = os.path.join(
training_dir, f"checkpoint_epoch_{target_saving_epoch}"
)
assert os.path.isdir(checkpoint_dir)
files_list = os.listdir(checkpoint_dir)
# check files
assert set(["model.pt", "optimizer.pt", "training_config.json"]).issubset(
set(files_list)
)
# check pickled custom decoder
if not rhvae.model_config.uses_default_decoder:
assert "decoder.pkl" in files_list
else:
assert not "decoder.pkl" in files_list
# check pickled custom encoder
if not rhvae.model_config.uses_default_encoder:
assert "encoder.pkl" in files_list
else:
assert not "encoder.pkl" in files_list
# check pickled custom metric
if not rhvae.model_config.uses_default_metric:
assert "metric.pkl" in files_list
else:
assert not "metric.pkl" in files_list
model_rec_state_dict = torch.load(os.path.join(checkpoint_dir, "model.pt"))[
"model_state_dict"
]
assert not all(
[
torch.equal(model_rec_state_dict[key], model.state_dict()[key])
for key in model.state_dict().keys()
]
)
def test_final_model_saving(
self, tmpdir, rhvae, train_dataset, training_configs, optimizers
):
dir_path = training_configs.output_dir
trainer = BaseTrainer(
model=rhvae,
train_dataset=train_dataset,
training_config=training_configs,
optimizer=optimizers,
)
trainer.train()
model = deepcopy(trainer._best_model)
training_dir = os.path.join(
dir_path, f"RHVAE_training_{trainer._training_signature}"
)
assert os.path.isdir(training_dir)
final_dir = os.path.join(training_dir, f"final_model")
assert os.path.isdir(final_dir)
files_list = os.listdir(final_dir)
assert set(["model.pt", "model_config.json", "training_config.json"]).issubset(
set(files_list)
)
# check pickled custom decoder
if not rhvae.model_config.uses_default_decoder:
assert "decoder.pkl" in files_list
else:
assert not "decoder.pkl" in files_list
# check pickled custom encoder
if not rhvae.model_config.uses_default_encoder:
assert "encoder.pkl" in files_list
else:
assert not "encoder.pkl" in files_list
# check pickled custom metric
if not rhvae.model_config.uses_default_metric:
assert "metric.pkl" in files_list
else:
assert not "metric.pkl" in files_list
# check reload full model
model_rec = RHVAE.load_from_folder(os.path.join(final_dir))
assert all(
[
torch.equal(
model_rec.state_dict()[key].cpu(), model.state_dict()[key].cpu()
)
for key in model.state_dict().keys()
]
)
assert torch.equal(model_rec.M_tens.cpu(), model.M_tens.cpu())
assert torch.equal(model_rec.centroids_tens.cpu(), model.centroids_tens.cpu())
assert type(model_rec.encoder.cpu()) == type(model.encoder.cpu())
assert type(model_rec.decoder.cpu()) == type(model.decoder.cpu())
assert type(model_rec.metric.cpu()) == type(model.metric.cpu())
def test_rhvae_training_pipeline(
self, tmpdir, rhvae, train_dataset, training_configs
):
dir_path = training_configs.output_dir
# build pipeline
pipeline = TrainingPipeline(
model=rhvae, training_config=training_configs
)
assert pipeline.training_config.__dict__ == training_configs.__dict__
# Launch Pipeline
pipeline(
train_data=train_dataset.data, # gives tensor to pipeline
eval_data=train_dataset.data, # gives tensor to pipeline
)
model = deepcopy(pipeline.trainer._best_model)
training_dir = os.path.join(
dir_path, f"RHVAE_training_{pipeline.trainer._training_signature}"
)
assert os.path.isdir(training_dir)
final_dir = os.path.join(training_dir, f"final_model")
assert os.path.isdir(final_dir)
files_list = os.listdir(final_dir)
assert set(["model.pt", "model_config.json", "training_config.json"]).issubset(
set(files_list)
)
# check pickled custom decoder
if not rhvae.model_config.uses_default_decoder:
assert "decoder.pkl" in files_list
else:
assert not "decoder.pkl" in files_list
# check pickled custom encoder
if not rhvae.model_config.uses_default_encoder:
assert "encoder.pkl" in files_list
else:
assert not "encoder.pkl" in files_list
# check pickled custom metric
if not rhvae.model_config.uses_default_metric:
assert "metric.pkl" in files_list
else:
assert not "metric.pkl" in files_list
# check reload full model
model_rec = RHVAE.load_from_folder(os.path.join(final_dir))
assert all(
[
torch.equal(
model_rec.state_dict()[key].cpu(), model.state_dict()[key].cpu()
)
for key in model.state_dict().keys()
]
)
assert torch.equal(model_rec.M_tens.cpu(), model.M_tens.cpu())
assert torch.equal(model_rec.centroids_tens.cpu(), model.centroids_tens.cpu())
assert type(model_rec.encoder.cpu()) == type(model.encoder.cpu())
assert type(model_rec.decoder.cpu()) == type(model.decoder.cpu())
assert type(model_rec.metric.cpu()) == type(model.metric.cpu())
|
"""
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from binascii import Error
import pytest
from unittest.mock import patch
from eth_keyfile import decode_keyfile_json
from web3 import Web3
from web3.exceptions import InvalidAddress, ValidationError
from web3.middleware import geth_poa_middleware
import config
from app.exceptions import SendTransactionError
from app.model.blockchain import (
IbetStraightBondContract,
IbetShareContract
)
from app.model.blockchain.token_list import TokenListContract
from app.utils.contract_utils import ContractUtils
from app.model.db import TokenType
from config import WEB3_HTTP_PROVIDER, ZERO_ADDRESS
from tests.account_config import config_eth_account
web3 = Web3(Web3.HTTPProvider(WEB3_HTTP_PROVIDER))
web3.middleware_onion.inject(geth_poa_middleware, layer=0)
@pytest.fixture
def contract_list(db):
test_account = config_eth_account("user1")
deployer_address = test_account.get("address")
private_key = decode_keyfile_json(
raw_keyfile_json=test_account.get("keyfile_json"),
password=test_account.get("password").encode("utf-8")
)
contract_address, abi, tx_hash = ContractUtils.deploy_contract(
contract_name="TokenList",
args=[],
deployer=deployer_address,
private_key=private_key
)
config.TOKEN_LIST_CONTRACT_ADDRESS = contract_address
class TestRegisterTokenList:
###########################################################################
# Normal Case
###########################################################################
# <Normal_1> token_template is IbetShare
def test_normal_1(self, db, contract_list):
test_account = config_eth_account("user1")
issuer_address = test_account.get("address")
private_key = decode_keyfile_json(
raw_keyfile_json=test_account.get("keyfile_json"),
password=test_account.get("password").encode("utf-8")
)
# execute the function
arguments = [
"テスト株式",
"TEST",
10000,
20000,
1,
"20211231",
"20211231",
"20221231",
10000
]
share_token_address, abi, tx_hash = IbetShareContract.create(
args=arguments,
tx_from=issuer_address,
private_key=private_key
)
TokenListContract.register(
token_address=share_token_address,
token_template=TokenType.IBET_SHARE,
token_list_address=config.TOKEN_LIST_CONTRACT_ADDRESS,
account_address=issuer_address,
private_key=private_key
)
# assertion : list length
token_list_contract = ContractUtils.get_contract(
contract_name="TokenList",
contract_address=config.TOKEN_LIST_CONTRACT_ADDRESS
)
assert token_list_contract.functions.getListLength().call() == 1
# execute the function : IbetStraightBondContract
arguments = [
"テスト債券",
"TEST",
10000,
20000,
"20211231",
30000,
"20211231",
"リターン内容",
"発行目的"
]
bond_token_address, abi, tx_hash = IbetStraightBondContract.create(
args=arguments,
tx_from=issuer_address,
private_key=private_key
)
TokenListContract.register(
token_address=bond_token_address,
token_template=TokenType.IBET_STRAIGHT_BOND,
token_list_address=config.TOKEN_LIST_CONTRACT_ADDRESS,
account_address=issuer_address,
private_key=private_key
)
# assertion
token_list_contract = ContractUtils.get_contract(
contract_name="TokenList",
contract_address=config.TOKEN_LIST_CONTRACT_ADDRESS
)
assert token_list_contract.functions.getListLength().call() == 2
_share_token = token_list_contract.functions.getTokenByAddress(share_token_address).call()
assert _share_token[0] == share_token_address
assert _share_token[1] == TokenType.IBET_SHARE
assert _share_token[2] == issuer_address
_bond_token = token_list_contract.functions.getTokenByAddress(bond_token_address).call()
assert _bond_token[0] == bond_token_address
assert _bond_token[1] == TokenType.IBET_STRAIGHT_BOND
assert _bond_token[2] == issuer_address
###########################################################################
# Error Case
###########################################################################
# <Error_1> Invalid argument: token_address
def test_error_1(self, db, contract_list):
test_account = config_eth_account("user1")
issuer_address = test_account.get("address")
private_key = decode_keyfile_json(
raw_keyfile_json=test_account.get("keyfile_json"),
password=test_account.get("password").encode("utf-8")
)
with pytest.raises(SendTransactionError) as exc_info:
TokenListContract.register(
token_address="dummy_token_address",
token_template=TokenType.IBET_SHARE,
token_list_address=config.TOKEN_LIST_CONTRACT_ADDRESS,
account_address=issuer_address,
private_key=private_key
)
assert isinstance(exc_info.value.args[0], ValidationError)
# <Error_2> Invalid argument: token_list_address
def test_error_2(self, db, contract_list):
test_account = config_eth_account("user1")
issuer_address = test_account.get("address")
private_key = decode_keyfile_json(
raw_keyfile_json=test_account.get("keyfile_json"),
password=test_account.get("password").encode("utf-8")
)
with pytest.raises(SendTransactionError) as exc_info:
TokenListContract.register(
token_address=ZERO_ADDRESS,
token_template=TokenType.IBET_STRAIGHT_BOND,
token_list_address="dummy_token_list_address",
account_address=issuer_address,
private_key=private_key
)
assert isinstance(exc_info.value.args[0], ValueError)
# <Error_3> Invalid argument: account_address
def test_error_3(self, db, contract_list):
test_account = config_eth_account("user1")
issuer_address = test_account.get("address")
private_key = decode_keyfile_json(
raw_keyfile_json=test_account.get("keyfile_json"),
password=test_account.get("password").encode("utf-8")
)
with pytest.raises(SendTransactionError) as exc_info:
TokenListContract.register(
token_address=ZERO_ADDRESS,
token_template=TokenType.IBET_SHARE,
token_list_address=config.TOKEN_LIST_CONTRACT_ADDRESS,
account_address=issuer_address[:-1],
private_key=private_key
)
assert isinstance(exc_info.value.args[0], InvalidAddress)
# <Error_4> Invalid argument: private_key
def test_error_4(self, db, contract_list):
test_account = config_eth_account("user1")
issuer_address = test_account.get("address")
with pytest.raises(SendTransactionError) as exc_info:
TokenListContract.register(
token_address=ZERO_ADDRESS,
token_template=TokenType.IBET_SHARE,
token_list_address=config.TOKEN_LIST_CONTRACT_ADDRESS,
account_address=issuer_address,
private_key="not private key"
)
assert isinstance(exc_info.value.args[0], Error)
# <Error_5> SendTransactionError : ContractUtils
def test_error_5(self, db, contract_list):
test_account = config_eth_account("user1")
issuer_address = test_account.get("address")
private_key = decode_keyfile_json(
raw_keyfile_json=test_account.get("keyfile_json"),
password=<PASSWORD>_account.get("password").encode("utf-8")
)
# mock
ContractUtils_send_transaction = patch(
target="app.utils.contract_utils.ContractUtils.send_transaction",
side_effect=SendTransactionError()
)
# execute the function
with ContractUtils_send_transaction:
with pytest.raises(SendTransactionError):
TokenListContract.register(
token_address=ZERO_ADDRESS,
token_template=TokenType.IBET_SHARE,
token_list_address=config.TOKEN_LIST_CONTRACT_ADDRESS,
account_address=issuer_address,
private_key=private_key
)
|
from fastapi import APIRouter, Depends
from ..database.connection import get_database
from ..crud.user_opinions import fetch_user_opinions, fetch_user_opinion_by_id, add_user_opinion, fetch_user_opinions_by_user_id, fetch_user_opinion_with_movie_id
from ..models.user_opinion import UserOpinion, UserOpinionIns, UserOpinionStruct
from typing import List
from fastapi import HTTPException, Body, status
from fastapi.responses import JSONResponse
from ..models.common import PyObjectId
router = APIRouter(
prefix="/useropinions",
tags=["useropinions"],
)
@router.get("",response_description="List all user opinions in DB",response_model=List[UserOpinion])
async def get_user_opinions(db = Depends(get_database)):
userList = await fetch_user_opinions(db)
return userList
@router.get("/user/{userId}",response_description="Find all user opinions with its MongoDB ID",response_model=List[UserOpinion])
async def get_user_opinion_by_id(userId : str, db = Depends(get_database)):
# Check if the user ID is valid
try:
userIdToFetch = PyObjectId(userId)
except:
raise HTTPException(status_code=status.HTTP_418_IM_A_TEAPOT, detail=f"The provided user ID '{userId}' is not a valid Mongo ID")
userOpinion = await fetch_user_opinions_by_user_id(db, userIdToFetch)
if userOpinion is not None and userOpinion != []:
return userOpinion
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"User {userId} did not rate any movie")
@router.get("/u={userId}&mov={movieId}",response_description="Try to find a user opinion of a movie with their MongoDB ID",response_model=UserOpinion)
async def get_user_opinion_by_id(userId : str, movieId:str, db = Depends(get_database)):
# Check if the user ID is valid
try:
userIdToFetch = PyObjectId(userId)
except:
raise HTTPException(status_code=status.HTTP_418_IM_A_TEAPOT, detail=f"The provided user ID '{userId}' is not a valid Mongo ID")
# Check if the movie ID is valid
try:
movieIdToFetch = PyObjectId(movieId)
except:
raise HTTPException(status_code=status.HTTP_418_IM_A_TEAPOT, detail=f"The provided user ID '{movieId}' is not a valid Mongo ID")
userOpinion = await fetch_user_opinion_with_movie_id(db, userIdToFetch, movieIdToFetch)
if userOpinion is not None:
return userOpinion
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"User {userId} did not rate this movie")
@router.get("/{userOpinionId}",response_description="Find an user opinion with its MongoDB ID",response_model=UserOpinion)
async def get_user_opinion_by_id(userOpinionId : str, db = Depends(get_database)):
# Check if the user ID is valid
try:
userOpinionIdToFetch = PyObjectId(userOpinionId)
except:
raise HTTPException(status_code=status.HTTP_418_IM_A_TEAPOT, detail=f"The provided user opinion ID '{userOpinionId}' is not a valid Mongo ID")
userOpinion = await fetch_user_opinion_by_id(db, userOpinionIdToFetch)
if userOpinion is not None :
return userOpinion
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"User opinion {userOpinionId} not found")
@router.post("",response_description="Insert a single user opinion into the DB")
async def add_user_opinion_to_db(userOpinion : UserOpinionStruct = Body(...), db=Depends(get_database)):
# Check if the user ID is valid
try:
userIdToFetch = PyObjectId(userOpinion.userId)
except:
raise HTTPException(status_code=status.HTTP_418_IM_A_TEAPOT, detail=f"The provided user ID '{userOpinion.userId}' is not a valid Mongo ID")
# Check if the movie ID is valid
try:
movieIdToFetch = PyObjectId(userOpinion.movieId)
except:
raise HTTPException(status_code=status.HTTP_418_IM_A_TEAPOT, detail=f"The provided movie ID '{userOpinion.movieId}' is not a valid Mongo ID")
response = await add_user_opinion(db, userOpinion)
if response.userOpinionAdded is None:
if response.error is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"{response.error}")
else:
raise HTTPException(status_code=status.HTTP_418_IM_A_TEAPOT, detail=f"{response.error}")
response.userOpinionAdded = str(response.userOpinionAdded)
responseJSON = {"added":response.userOpinionAdded}
return JSONResponse(status_code=status.HTTP_201_CREATED, content=responseJSON) |
"""
Zombie Apocalypse mini-project
Using Breadth-First 2D Search
Principles of Computing Part 2
Author: <NAME>
Date: 7/14/15
CodeSkulptor source:
http://www.codeskulptor.org/#user40_iQQZl747fQ_13.py
"""
import random
import poc_grid
import poc_queue
import poc_zombie_gui
# global constants
EMPTY = 0
FULL = 1
FOUR_WAY = 0
EIGHT_WAY = 1
OBSTACLE = 5
HUMAN = 6
ZOMBIE = 7
class Apocalypse(poc_grid.Grid):
"""
Class for simulating zombie pursuit of human on grid with
obstacles
"""
def __init__(self, grid_height, grid_width, obstacle_list = None,
zombie_list = None, human_list = None):
"""
Create a simulation of given size with given obstacles,
humans, and zombies
"""
poc_grid.Grid.__init__(self, grid_height, grid_width)
if obstacle_list != None:
for cell in obstacle_list:
self.set_full(cell[0], cell[1])
if zombie_list != None:
self._zombie_list = list(zombie_list)
else:
self._zombie_list = []
if human_list != None:
self._human_list = list(human_list)
else:
self._human_list = []
def clear(self):
"""
Set cells in obstacle grid to be empty
Reset zombie and human lists to be empty
"""
poc_grid.Grid.clear(self)
self._zombie_list = []
self._human_list = []
def add_zombie(self, row, col):
"""
Add zombie to the zombie list
"""
self._zombie_list.append((row, col))
def num_zombies(self):
"""
Return number of zombies
"""
return len(self._zombie_list)
def zombies(self):
"""
Generator that yields the zombies in the order they were
added.
"""
# replace with an actual generator
for zombie in self._zombie_list:
yield zombie
def add_human(self, row, col):
"""
Add human to the human list
"""
self._human_list.append((row, col))
def num_humans(self):
"""
Return number of humans
"""
return len(self._human_list)
def humans(self):
"""
Generator that yields the humans in the order they were added.
"""
# replace with an actual generator
for human in self._human_list:
yield human
def compute_distance_field(self, entity_type):
"""
Function computes and returns a 2D distance field
Distance at member of entity_list is zero
Shortest paths avoid obstacles and use four-way distances
"""
height = poc_grid.Grid.get_grid_height(self)
width = poc_grid.Grid.get_grid_width(self)
# create a visited grid of same dimensions that is empty
visited = poc_grid.Grid(height, width)
# create a distance list of same dimensions that contains large value
distance_field = [[height * width for dummy_width in range(width)] \
for dummy_height in range(height)]
# queue of entity_type list
boundary = poc_queue.Queue()
# enqueue all instances of entity_type
if entity_type is HUMAN:
for human in self._human_list:
boundary.enqueue(human)
else:
for zombie in self._zombie_list:
boundary.enqueue(zombie)
# update distance_field to 0 and visited to FULL for entities
for entity in boundary:
visited.set_full(entity[0], entity[1])
distance_field[entity[0]][entity[1]] = 0
# implementation of breadth-first search on these entities
while len(boundary) is not 0:
current_cell = boundary.dequeue()
for neighbor_cell in poc_grid.Grid.four_neighbors(self, current_cell[0], current_cell[1]):
# first check if this neighbor cell is a wall
if not poc_grid.Grid.is_empty(self, neighbor_cell[0], neighbor_cell[1]):
visited.set_full(neighbor_cell[0], neighbor_cell[1])
# otherwise check empty cell as usual
elif visited.is_empty(neighbor_cell[0], neighbor_cell[1]):
visited.set_full(neighbor_cell[0], neighbor_cell[1])
boundary.enqueue(neighbor_cell)
# also update distance of each neighbor cell
new_distance = distance_field[current_cell[0]][current_cell[1]] + 1
distance_field[neighbor_cell[0]][neighbor_cell[1]] = new_distance
return distance_field
def move_humans(self, zombie_distance_field):
"""
Function that moves humans away from zombies, diagonal moves
are allowed
"""
# first create an empty list
human_list_copy = []
# iterate over all human in list
for human in self._human_list:
max_distance = zombie_distance_field[human[0]][human[1]]
# a list of possible choices
best_moves = [human]
for neighbor in poc_grid.Grid.eight_neighbors(self, human[0], human[1]):
# neighbor should not be a wall
if poc_grid.Grid.is_empty(self, neighbor[0], neighbor[1]):
distance = zombie_distance_field[neighbor[0]][neighbor[1]]
# if distance is better (but not a wall), wipe old list
if distance > max_distance:
max_distance = distance
best_moves = [neighbor]
# if distance is same, append to same list
elif distance == max_distance:
best_moves.append(neighbor)
# add the new coordinate by choosing randomly from best_moves
human_list_copy.append(random.choice(best_moves))
# update self list with copy
self._human_list = human_list_copy
def move_zombies(self, human_distance_field):
"""
Function that moves zombies towards humans, no diagonal moves
are allowed
"""
zombie_list_copy = []
for zombie in self._zombie_list:
min_distance = human_distance_field[zombie[0]][zombie[1]]
best_moves = [zombie]
for neighbor in poc_grid.Grid.four_neighbors(self, zombie[0], zombie[1]):
if poc_grid.Grid.is_empty(self, neighbor[0], neighbor[1]):
distance = human_distance_field[neighbor[0]][neighbor[1]]
if distance < min_distance:
min_distance = distance
best_moves = [neighbor]
elif distance == min_distance:
best_moves.append(neighbor)
zombie_list_copy.append(random.choice(best_moves))
self._zombie_list = zombie_list_copy
# Start up gui for simulation - You will need to write some code above
# before this will work without errors
poc_zombie_gui.run_gui(Apocalypse(30, 40))
|
<gh_stars>1-10
"""
Test timetable generation.
"""
import datetime
import pytest
from nextbus import db, models
from nextbus.timetable import (_query_journeys, _query_timetable, Timetable,
TimetableRow, TimetableStop)
SERVICE = 645
DIRECTION = False
GMT = datetime.timezone(datetime.timedelta(hours=0))
BST = datetime.timezone(datetime.timedelta(hours=1))
@pytest.fixture
def load_org(load_db):
org = models.Organisation(code="TEMP")
db.session.add(org)
db.session.commit()
org_holiday = models.OperatingPeriod(
id=1,
org_ref="TEMP",
date_start=datetime.date(2019, 3, 31),
date_end=datetime.date(2019, 4, 14),
working=False
)
org_holiday_except = models.ExcludedDate(
id=1,
org_ref="TEMP",
date=datetime.date(2019, 4, 7),
working=False
)
org_working = models.OperatingPeriod(
id=2,
org_ref="TEMP",
date_start=datetime.date(2019, 4, 15),
date_end=None,
working=True
)
org_working_except = models.ExcludedDate(
id=2,
org_ref="TEMP",
date=datetime.date(2019, 4, 21),
working=True
)
org_0 = models.Organisations(
org_ref="TEMP",
journey_ref=400012,
operational=False,
working=False
)
org_1 = models.Organisations(
org_ref="TEMP",
journey_ref=400013,
operational=True,
working=False
)
org_2 = models.Organisations(
org_ref="TEMP",
journey_ref=400014,
operational=False,
working=True
)
org_3 = models.Organisations(
org_ref="TEMP",
journey_ref=400015,
operational=True,
working=True
)
db.session.add_all([org_holiday, org_holiday_except, org_working,
org_working_except, org_0, org_1, org_2, org_3])
db.session.commit()
@pytest.fixture
def set_night_times(load_db):
# Remove expiry date for all relevant journey patterns
(
models.JourneyPattern.query
.filter_by(service_ref=SERVICE, direction=DIRECTION)
.update({"date_end": None})
)
# Change all journey times to early morning
patterns = (db.session.query(models.JourneyPattern.id)
.filter_by(service_ref=SERVICE, direction=DIRECTION))
(
models.Journey.query
.filter(models.Journey.pattern_ref.in_(patterns))
.update({
"departure": models.Journey.departure -
datetime.timedelta(hours=8, minutes=15)
}, synchronize_session=False)
)
db.session.commit()
def _expected_journeys(first_departure):
# Journeys for service 645 and outbound direction which are half hourly.
return [
(400012 + i, first_departure + i * datetime.timedelta(minutes=30))
for i in range(13)
]
def _set_journeys():
# Journeys for service 645 and in outbound direction
return {400012 + i for i in range(13)}
def _set_timezone(tz):
db.session.execute("SET LOCAL TIME ZONE :tz", {"tz": tz})
def test_journeys_sunday_gmt(load_db):
# Should be Sunday 3rd March 2019
date = datetime.date(2019, 3, 3)
assert date.isoweekday() == 7
query = _query_journeys(SERVICE, DIRECTION, date).order_by("departure")
result = query.all()
assert result == _expected_journeys(
datetime.datetime(2019, 3, 3, 8, 30, tzinfo=GMT)
)
def test_journeys_sunday_bst(load_db):
# Should be Sunday 7th April 2019
date = datetime.date(2019, 4, 7)
assert date.isoweekday() == 7
query = _query_journeys(SERVICE, DIRECTION, date).order_by("departure")
result = query.all()
assert result == _expected_journeys(
datetime.datetime(2019, 4, 7, 8, 30, tzinfo=BST)
)
def test_journeys_weekday(load_db):
# Should be Monday 4th March 2019, which this service does not run on
# except bank holidays
date = datetime.date(2019, 3, 4)
assert date.isoweekday() == 1
result = _query_journeys(SERVICE, DIRECTION, date).all()
assert not result
def test_journeys_bank_holiday(load_db):
# Should be 22nd April 2019, ie Easter Monday
date = datetime.date(2019, 4, 22)
assert date.isoweekday() == 1
query = _query_journeys(SERVICE, DIRECTION, date).order_by("departure")
result = query.all()
assert result == _expected_journeys(
datetime.datetime(2019, 4, 22, 8, 30, tzinfo=BST)
)
def test_journeys_bank_holiday_override(load_db):
# Override Easter Monday, id 4
journey = models.Journey.query.get(400012)
journey.exclude_holidays = 1 << 4
db.session.commit()
date = datetime.date(2019, 4, 22)
result = _query_journeys(SERVICE, DIRECTION, date).all()
assert {r.journey_id for r in result} == _set_journeys() - {400012}
def test_journeys_special_day(load_db):
# Special period, this journey should run when it didn't before
special_date = datetime.date(2019, 3, 4)
sp = models.SpecialPeriod(
id=1,
journey_ref=400012,
date_start=special_date,
date_end=special_date,
operational=True
)
db.session.add(sp)
db.session.commit()
result = _query_journeys(SERVICE, DIRECTION, special_date).all()
assert {r.journey_id for r in result} == {400012}
def test_journeys_special_day_override_weekday(load_db):
# Special period, this journey should run when it didn't before
special_date = datetime.date(2019, 3, 10)
assert special_date.isoweekday() == 7
sp = models.SpecialPeriod(
id=1,
journey_ref=400012,
date_start=special_date,
date_end=special_date,
operational=False
)
db.session.add(sp)
db.session.commit()
result = _query_journeys(SERVICE, DIRECTION, special_date).all()
assert {r.journey_id for r in result} == _set_journeys() - {400012}
def test_journeys_special_day_override_bh(load_db):
# Special period overriding journey on bank holiday
special_date = datetime.date(2019, 4, 22)
sp = models.SpecialPeriod(
id=1,
journey_ref=400012,
date_start=special_date,
date_end=special_date,
operational=False
)
db.session.add(sp)
db.session.commit()
result = _query_journeys(SERVICE, DIRECTION, special_date).all()
assert {r.journey_id for r in result} == _set_journeys() - {400012}
def test_journeys_organisation_holiday(load_org):
date = datetime.date(2019, 4, 14)
result = _query_journeys(SERVICE, DIRECTION, date).all()
# 400012 not operational and 400013 operational during holidays
assert {r.journey_id for r in result} == _set_journeys() - {400012}
def test_journeys_organisation_holiday_except(load_org):
date = datetime.date(2019, 4, 7)
result = _query_journeys(SERVICE, DIRECTION, date).all()
assert {r.journey_id for r in result} == _set_journeys()
def test_journeys_organisation_working(load_org):
date = datetime.date(2019, 4, 28)
result = _query_journeys(SERVICE, DIRECTION, date).all()
# 400014 not operational and 400015 operational during working days
assert {r.journey_id for r in result} == _set_journeys() - {400014}
def test_journeys_organisation_working_except(load_org):
date = datetime.date(2019, 4, 21)
result = _query_journeys(SERVICE, DIRECTION, date).all()
assert {r.journey_id for r in result} == _set_journeys()
def test_journeys_organisation_weekday(load_org):
date = datetime.date(2019, 4, 8)
result = _query_journeys(SERVICE, DIRECTION, date).all()
# Services associated with organisations still only run on specified days
assert not result
def test_journeys_organisation_overriden_by_bh(load_org):
date = datetime.date(2019, 4, 22)
result = _query_journeys(SERVICE, DIRECTION, date).all()
# Bank holidays and special days override organisation calendars,
# should run as normal
assert {r.journey_id for r in result} == _set_journeys()
def test_journeys_in_week_month(load_db):
# Set first journey to 1st week of month
models.Journey.query.filter_by(id=400012).update({"weeks": 1 << 0})
db.session.commit()
date = datetime.date(2019, 3, 3)
result = _query_journeys(SERVICE, DIRECTION, date).all()
assert {r.journey_id for r in result} == _set_journeys()
def test_journeys_not_in_week_month(load_db):
# Set first journey to 2nd week of month
models.Journey.query.filter_by(id=400012).update({"weeks": 1 << 1})
db.session.commit()
date = datetime.date(2019, 3, 3)
result = _query_journeys(SERVICE, DIRECTION, date).all()
assert {r.journey_id for r in result} == _set_journeys() - {400012}
def test_journeys_bank_holiday_week_month(load_db):
# Set first journey to 2nd week of month
models.Journey.query.filter_by(id=400012).update({"weeks": 1 << 1})
db.session.commit()
# Bank holiday on 3rd week of month, should still run
date = datetime.date(2019, 4, 22)
result = _query_journeys(SERVICE, DIRECTION, date).all()
assert {r.journey_id for r in result} == _set_journeys()
def test_journeys_no_dst(set_night_times):
date = datetime.date(2019, 3, 24)
query = _query_journeys(SERVICE, DIRECTION, date).order_by("departure")
result = query.all()
assert result == _expected_journeys(
datetime.datetime(2019, 3, 24, 0, 15, tzinfo=GMT)
)
def test_journeys_dst_march(set_night_times):
# Journeys between 0100-0200 omitted as timezone changes from GMT to BST
date = datetime.date(2019, 3, 31)
query = _query_journeys(SERVICE, DIRECTION, date).order_by("departure")
# Test in different time zones, all queries should return the same results
_set_timezone("Europe/London")
result_gb = query.all()
_set_timezone("UTC")
result_utc = query.all()
expected = [
(400012, datetime.datetime(2019, 3, 31, 0, 15, tzinfo=GMT)),
(400013, datetime.datetime(2019, 3, 31, 0, 45, tzinfo=GMT)),
(400016, datetime.datetime(2019, 3, 31, 2, 15, tzinfo=BST)),
(400017, datetime.datetime(2019, 3, 31, 2, 45, tzinfo=BST)),
(400018, datetime.datetime(2019, 3, 31, 3, 15, tzinfo=BST)),
(400019, datetime.datetime(2019, 3, 31, 3, 45, tzinfo=BST)),
(400020, datetime.datetime(2019, 3, 31, 4, 15, tzinfo=BST)),
(400021, datetime.datetime(2019, 3, 31, 4, 45, tzinfo=BST)),
(400022, datetime.datetime(2019, 3, 31, 5, 15, tzinfo=BST)),
(400023, datetime.datetime(2019, 3, 31, 5, 45, tzinfo=BST)),
(400024, datetime.datetime(2019, 3, 31, 6, 15, tzinfo=BST))
]
assert result_gb == expected
assert result_utc == expected
def test_journeys_dst_october(set_night_times):
# Journeys between 0100-0200 repeated as timezone changes from BST to GMT
date = datetime.date(2019, 10, 27)
query = _query_journeys(SERVICE, DIRECTION, date).order_by("departure")
# Test in different time zones, all queries should return the same results
_set_timezone("Europe/London")
result_gb = query.all()
_set_timezone("UTC")
result_utc = query.all()
expected = [
(400012, datetime.datetime(2019, 10, 27, 0, 15, tzinfo=BST)),
(400013, datetime.datetime(2019, 10, 27, 0, 45, tzinfo=BST)),
(400014, datetime.datetime(2019, 10, 27, 1, 15, tzinfo=BST)),
(400015, datetime.datetime(2019, 10, 27, 1, 45, tzinfo=BST)),
(400014, datetime.datetime(2019, 10, 27, 1, 15, tzinfo=GMT)),
(400015, datetime.datetime(2019, 10, 27, 1, 45, tzinfo=GMT)),
(400016, datetime.datetime(2019, 10, 27, 2, 15, tzinfo=GMT)),
(400017, datetime.datetime(2019, 10, 27, 2, 45, tzinfo=GMT)),
(400018, datetime.datetime(2019, 10, 27, 3, 15, tzinfo=GMT)),
(400019, datetime.datetime(2019, 10, 27, 3, 45, tzinfo=GMT)),
(400020, datetime.datetime(2019, 10, 27, 4, 15, tzinfo=GMT)),
(400021, datetime.datetime(2019, 10, 27, 4, 45, tzinfo=GMT)),
(400022, datetime.datetime(2019, 10, 27, 5, 15, tzinfo=GMT)),
(400023, datetime.datetime(2019, 10, 27, 5, 45, tzinfo=GMT)),
(400024, datetime.datetime(2019, 10, 27, 6, 15, tzinfo=GMT))
]
assert result_gb == expected
assert result_utc == expected
def test_query_timetable_fields(load_db):
# Should be Sunday 3rd March 2019
date = datetime.date(2019, 3, 3)
assert date.isoweekday() == 7
result = _query_timetable(SERVICE, DIRECTION, date).all()
assert result[0]._fields == (
"journey_id",
"departure",
"local_operator_code",
"operator_code",
"operator_name",
"note_code",
"note_text",
"stop_point_ref",
"timing_point",
"utc_arrive",
"utc_depart",
"arrive",
"depart"
)
def test_query_timetable_sunday(load_db):
# Should be Sunday 3rd March 2019
date = datetime.date(2019, 3, 3)
result = _query_timetable(SERVICE, DIRECTION, date).all()
assert len(result) == 2 * 13
first_journey = [r for r in result if r.journey_id == 400012]
assert first_journey == [
(400012, datetime.datetime(2019, 3, 3, 8, 30, tzinfo=GMT),
"ATC", "ATCS", "AT Coaches", None, None, "490000015G", True, None,
datetime.datetime(2019, 3, 3, 8, 30), None, "0830"),
(400012, datetime.datetime(2019, 3, 3, 8, 30, tzinfo=GMT),
"ATC", "ATCS", "AT Coaches", None, None, "490008638S", False,
datetime.datetime(2019, 3, 3, 8, 34, 35),
datetime.datetime(2019, 3, 3, 8, 34, 35), "0834", "0834"),
]
def test_timetable_empty():
service, direction, date = 0, False, datetime.date(2019, 3, 3)
tt = Timetable(service, direction, date, [], {})
assert not tt
assert tt.operators == {}
assert tt.notes == {}
assert tt.head == []
assert tt.rows == []
def test_timetable_sunday(load_db):
date = datetime.date(2019, 3, 3)
tt = Timetable(SERVICE, DIRECTION, date)
assert tt.service_id == SERVICE
assert tt.direction == DIRECTION
assert tt.date == date
assert tt.sequence == ["490000015G", "490008638S"]
assert tt.stops.keys() == {"490000015G", "490008638S"}
assert tt.operators == {"ATC": "AT Coaches"}
assert tt.notes == {}
assert tt.head == [(400012 + i, "ATC", None) for i in range(13)]
assert [r.stop.atco_code for r in tt.rows] == ["490000015G", "490008638S"]
assert [len(r.times) for r in tt.rows] == [13, 13]
# Test first journey in timetable
assert [r.times[0] for r in tt.rows] == [
TimetableStop("490000015G", None, "0830", True, None,
datetime.datetime(2019, 3, 3, 8, 30)),
TimetableStop("490008638S", "0834", "0834", False,
datetime.datetime(2019, 3, 3, 8, 34, 35),
datetime.datetime(2019, 3, 3, 8, 34, 35))
]
|
<gh_stars>1-10
# sparse_tester
# Tester file
# import the necessary packages
import numpy as np
import matplotlib.pyplot as plt
from numpy import array, zeros, diag, diagflat, dot
import pandas as pd
from keras.models import Sequential, load_model
from scipy.sparse.linalg import spsolve
import os
import tensorflow as tf
import time
from scipy.sparse import linalg
from scipy import sparse
from scipy import linalg as la
import scipy
from scipy.sparse import csr_matrix
from scipy.spatial import distance
#from iterative_solvers import sparse_gauss_seidel_scipy
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
PROBLEM_SIZE = 100000
DATA_LENGTH = 1000
# Checks if the matrix x is diagonally dominant
def is_diagonally_dominant(x):
abs_x = np.abs(x)
return np.all(2*np.diag(abs_x) > np.sum(abs_x, axis =1 ))
def normalize(x):
maximum_element = x.max()
minimum_element = x.min()
if maximum_element >= abs(minimum_element):
return x/maximum_element
return x/abs(minimum_element)
def get_diagonals(x):
diagonal_1 = x.diagonal(k = -1)
diagonal_2 = x.diagonal(k = 0)
diagonal_3 = x.diagonal(k = 1)
appended = np.append(diagonal_1, diagonal_2)
return np.append(appended, diagonal_3)
def get_diag_matrix(x):
size = (int) ((len(x) + 2) / 4)
diagonal_1 = x[:size - 1]
diagonal_2 = x[size - 1:2*size - 1]
diagonal_3 = x[size*2 - 1:size*3 - 2]
vector = x[-size:]
matrix1 = np.diagflat(diagonal_1, -1)
matrix2 = np.diagflat(diagonal_2)
matrix3 = np.diagflat(diagonal_3, 1)
#print(diagonal_1)
#print(diagonal_2)
#print(diagonal_3)
matrix = matrix1 + matrix2
matrix += matrix3
return matrix
def get_vector(x):
size = int ((len(x) + 2) / 4)
vector = x[size*3 - 2:]
#print("vector:", vector)
return vector
# Creates a diagonally dominant tridiagonal matrix (positive semi-definite)
def create_three_band_matrix(size):
udiag = np.ones(size)
ldiag = np.ones(size)
diag = -4*np.ones(size)
matrix = scipy.sparse.dia_matrix(([diag, udiag, ldiag], [0, 1, -1]), shape=(size, size)).tocsr(copy=False)
return matrix
def create_five_band_matrix(size, factor):
udiag = (np.random.rand(size) + np.random.normal(0, 1, size))*np.random.choice([-1,1])
ldiag = (np.random.rand(size) + np.random.normal(0, 1, size))*np.random.choice([-1,1])
udiag2 = (np.random.rand(size) + np.random.normal(0, 1, size))*np.random.choice([-1,1])
ldiag2 = (np.random.rand(size) + np.random.normal(0, 1, size))*np.random.choice([-1,1])
diag = (abs(udiag) + abs(ldiag) + abs(udiag2) + abs(udiag2) + abs(np.random.normal(0, factor, size)))*factor*np.random.choice([-1,1])
matrix = scipy.sparse.dia_matrix(([diag, udiag, ldiag, udiag2, ldiag2], [0, 1, -1, 2, -2]), shape=(size, size)).tocsr(copy=False)
return matrix
def create_vector(size):
vector = np.zeros(size)
for i in range(0, size):
vector[i] = np.random.uniform(0, 1)
return vector
def sparse_jacobi(A, b, x, maxIter, tolerance):
D = A.diagonal()
LU = A - diag(D)
for ii in range(maxIter):
#error = A.dot(x) - b
x = np.linalg.inv(diag(D)).dot(-LU.dot(x) + b)
new_error = A.dot(x) - b
if ii%5 == 0:
print(ii, abs(new_error).mean())
if abs(new_error).mean() <= tolerance: # converged
print ("Converged at iteration:", ii)
break
return x
def Jacobi(A, b, guess, MAXITER, TOLL):
n = len(b)
xk = guess
D = sparse.diags(A.diagonal(), 0, format = 'csr',)
L = sparse.tril(A, format = 'csr')
U = sparse.triu(A, format = 'csr')
T = -(linalg.inv(D))*(L+U)
c = (linalg.inv(D))*b
i = 0
err = TOLL + 1
while i < MAXITER:
x = T*xk + c
err = np.linalg.norm(x-xk, 1)/np.linalg.norm(x,1)
xk = x
i += 1
if i%10 == 0:
print (i, err)
if err < TOLL:
print ("Converged at iteration:", i)
break
return xk
def GaussSeidel(A, b, guess, MAXITER, TOLL):
n = len(b)
xk = guess
D = sparse.diags(A.diagonal(), 0, format = 'csr',)
L = sparse.tril(A, format = 'csr')
U = sparse.triu(A, format = 'csr')
T = -(linalg.inv(D+L))* U
c = (linalg.inv(D+L))* b
i = 0
err = TOLL + 1
while i < MAXITER:
x = T*xk + c
err = np.linalg.norm(x-xk, 1)/np.linalg.norm(x,1)
xk = x
i += 1
if i%10 == 0:
print (i, err)
if err < TOLL:
print ("Converged at iteration:", i)
break
return xk
# def sparse_gauss_seidel(A, b, x_k, tol=1e-6, maxiters=200):
# """Calculate the solution to the sparse system Ax = b via the Gauss-Seidel
# Method.
# Inputs:
# A ((n,n) csr_matrix): An nxn sparse CSR matrix.
# b ((n,) ndarray): A vector of length n.
# tol (float, opt): the convergence tolerance.
# maxiters (int, opt): the maximum number of iterations to perform.
# Returns:
# x ((n,) ndarray): the solution to system Ax = b.
# """
# A_a = np.copy(A.toarray())
# D = np.diag(A_a)
# #D = sparse.diags(A.diagonal(), format = 'csr',).toarray()
# #print(D.shape())
# d_inv = []
# for i in range(len(b)):
# d_inv.append(1./D[i])
# x_kmasuno = np.zeros_like(x_k)
# this = False
# tries = maxiters
# error = []
# while this is False and tries > 0:
# for i in range(len(x_k)):
# rowstart = A.indptr[i]
# rowend = A.indptr[i+1]
# Aix = np.dot(A.data[rowstart:rowend], x_k[A.indices[rowstart:rowend]])
# x_kmasuno[i] = x_k[i] + d_inv[i]*(b[i] - Aix)
# if ((la.norm((x_k - x_kmasuno))) <= tol):
# #if abs(A.dot(x_kmasuno) - b).mean() <= tol:
# this = True
# difference = (A.dot( x_kmasuno ) - b)
# error.append( la.norm( difference)) # ''', ord=np.inf'''
# else:
# difference = (A.dot( x_kmasuno ) - b)
# error.append(la.norm(difference))
# x_k = np.copy(x_kmasuno)
# tries -= 1
# if tries%10 == 0:
# print ("Iteration:", maxiters - tries)
# #b = np.zeros_like((x_k))
# roar = np.column_stack((b,x_k))
# print ("Converged at iteration:", maxiters - tries)
# return x_k
# def sparse_gauss_seidel_scipy(A, b, x,tol=1e-6, maxiters=200):
# """Calculate the solution to the sparse system Ax = b via the Gauss-Seidel
# Method.
# Inputs:
# A ((n,n) csr_matrix): An nxn sparse CSR matrix.
# b ((n,) ndarray): A vector of length n.
# tol (float, opt): the convergence tolerance.
# maxiters (int, opt): the maximum number of iterations to perform.
# Returns:
# x ((n,) ndarray): the solution to system Ax = b.
# """
# #A_a = np.copy(A.toarray())
# #D = np.diag(A_a)
# D = A.diagonal()
# #D = sparse.diags(A.diagonal(), format = 'csr',).toarray()
# d_inv= []
# for i in range(len(b)):
# d_inv.append(1./D[i])
# x_k = np.copy(x) #cambio de direccion
# x_kmasuno = np.zeros(len(b))
# this = False
# tries = maxiters
# error = []
# while this is False and tries > 0:
# for i in range(len(x_k)):
# rowstart = A.indptr[i]
# rowend = A.indptr[i+1]
# Aix = np.dot(A.data[rowstart:rowend], x_k[A.indices[rowstart:rowend]])
# x_kmasuno[i] = x_k[i] + d_inv[i]*(b[i] - Aix)
# if ((la.norm((x_k - x_kmasuno), ord=np.inf)) <= tol):
# this = True
# difference = (A.dot( x_kmasuno ) - b)
# error.append( la.norm( difference, ord=np.inf))
# else:
# difference = (A.dot( x_kmasuno ) - b)
# error.append(la.norm( difference, ord=np.inf))
# x_k = np.copy(x_kmasuno)
# tries -= 1
# if tries%10 == 0:
# print ("Iteration:", maxiters - tries)
# #b = np.zeros_like((x_k))
# roar = np.column_stack((b,x_k))
# print ("Converged at iteration:", maxiters - tries)
# return roar[:,1:]
def sparse_gauss_seidel_scipy(A, b, x,tol=1e-6, maxiters=2500):
"""Calculate the solution to the sparse system Ax = b via the Gauss-Seidel
Method.
Inputs:
A ((n,n) csr_matrix): An nxn sparse CSR matrix.
b ((n,) ndarray): A vector of length n.
tol (float, opt): the convergence tolerance.
maxiters (int, opt): the maximum number of iterations to perform.
Returns:
x ((n,) ndarray): the solution to system Ax = b.
"""
#A_a = np.copy(A.toarray())
#D = np.diag(A_a)
D = A.diagonal()
#D = sparse.diags(A.diagonal(), format = 'csr',).toarray()
d_inv= []
for i in range(len(b)):
d_inv.append(1./D[i])
x_k = np.copy(x) #cambio de direccion
x_kmasuno = np.zeros(len(b))
this = False
tries = maxiters
#error = []
while this is False and tries > 0:
for i in range(len(x_k)):
rowstart = A.indptr[i]
rowend = A.indptr[i+1]
Aix = np.dot(A.data[rowstart:rowend], x_k[A.indices[rowstart:rowend]])
x_kmasuno[i] = x_k[i] + d_inv[i]*(b[i] - Aix)
#if ((la.norm((x_k - x_kmasuno), ord=np.inf)) <= tol):
#if ((x_k**2 - x_kmasuno**2).max() <= tol):
if (distance.euclidean(x_k, x_kmasuno) <= tol):
this = True
#difference = (A.dot( x_kmasuno ) - b)
#error.append( la.norm( difference, ord=np.inf))
#else:
#difference = (A.dot( x_kmasuno ) - b)
#error.append(la.norm( difference, ord=np.inf))
if tries%100 == 0:
print ("Iteration:", maxiters - tries, "Distance:", distance.euclidean(x_k, x_kmasuno))
x_k = np.copy(x_kmasuno)
tries -= 1
#b = np.zeros_like((x_k))
#roar = np.column_stack((b,x_k))
print ("Converged at iteration:", maxiters - tries)
return x_k
def save_sparse_csr(filename, array):
np.savez(filename, data=array.data, indices=array.indices,
indptr=array.indptr, shape=array.shape)
def load_sparse_csr(filename):
loader = np.load(filename)
return csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
def get_prediction(vector):
#matrix_max = np.max(matrix)
vector_max = np.max(vector)
#matrix_norm = matrix/matrix_max
vector_norm = vector/vector_max
data_in = vector_norm
test = data_in.reshape(1, PROBLEM_SIZE, 1)
model = load_model("fd_{}model_{}examples.h5".format(PROBLEM_SIZE, DATA_LENGTH))
model_guess = model.predict(test, 1)
#model_guess /= matrix_max
model_guess *= vector_max
return model_guess
# Problem (A+N)x=b, where A is tridiagonal diag. dominant, N random noise
# The main diagonal of A has values 25+2*N(10,2), b has uniform values between [-25,25]
# The noise matrix N is a dense matrix with random values [0,7.5]
if __name__ == "__main__":
matrix = create_three_band_matrix(PROBLEM_SIZE)
########################################################
# norm_A = scipy.sparse.linalg.norm(matrix)
# norm_invA = scipy.sparse.linalg.norm(scipy.sparse.linalg.inv(matrix))
# cond = norm_A*norm_invA
# print("Matrix Condition Number:", cond)
########################################################
vector = 100*np.random.rand(PROBLEM_SIZE, 1)
# plt.spy(matrix)
# plt.show()
#noise = 1.5*np.random.rand(PROBLEM_SIZE, PROBLEM_SIZE)
#matrix += noise
solution = spsolve(matrix, vector)
# matrix_max = np.max(matrix)
# vector_max = np.max(vector)
# matrix_norm = matrix/matrix_max
# vector_norm = vector/vector_max
# data_in = np.append(get_diagonals(matrix_norm), vector_norm)
# test = data_in.reshape(1, 4*PROBLEM_SIZE - 2, 1)
# model = load_model("{}model_{}examples.h5".format(PROBLEM_SIZE, DATA_LENGTH))
# model_guess = model.predict(test, 1)
# model_guess /= matrix_max
# model_guess *= vector_max
save_sparse_csr("C1.npz", matrix)
#b_df = pd.DataFrame(vector)
#b_df.to_csv("b.csv", header=None, index=None)
np.save("C1.npy", vector)
start = time.process_time()
model_guess = get_prediction(vector)
print("Time:", time.process_time()-start)
#df = pd.DataFrame(model_guess)
#df.to_csv("model_guess.csv", header=None, index=None)
np.save("model_guessC1.npy", model_guess)
print("Model Guess MSE:", ((model_guess - solution.T)**2).mean())
#prediction = np.array(model_guess)
#prediction = prediction.reshape(3,1)
# print("Solving using Gauss Seidel...")
# init_guess = np.zeros(PROBLEM_SIZE)
# print("With initial guess = 0")
# start = time.process_time()
# #sparse_gauss_seidel(matrix, vector, init_guess)
# GaussSeidel(matrix, vector, init_guess, 2000, 10e-6).T
# end = time.process_time()
# print("Time:", end-start)
# print("With initial guess equal to model prediction")
# start = time.process_time()
# #sparse_gauss_seidel(matrix, vector, init_guess)
# GaussSeidel(matrix, vector, model_guess.T, 2000, 10e-6).T
# end = time.process_time()
# print("Time:", end-start) #doesn't account for cache
#########################################################################
print("Solving using sparse Gauss Seidel...")
init_guess = np.zeros(PROBLEM_SIZE)
print("With initial guess = 0")
start = time.process_time()
x1 = sparse_gauss_seidel_scipy(matrix, vector, init_guess, maxiters=5000)
#GaussSeidel(matrix, vector, init_guess, 2000, 10e-6).T
end = time.process_time()
print("Time:", end-start)
print("With initial guess equal to model prediction")
start = time.process_time()
x2 = sparse_gauss_seidel_scipy(matrix, vector, model_guess.T, maxiters=5000)
#GaussSeidel(matrix, vector, model_guess.T, 2000, 10e-6).T
end = time.process_time()
print("Time:", end-start) #doesn't account for cache
print((solution**2 - x1**2).mean())
print((solution**2 - x2**2).mean())
#########################################################################
# print(model.summary())
# print("Solving using Jacobi...")
# init_guess = np.zeros(PROBLEM_SIZE)
# print("With initial guess = 0")
# start = time.process_time()
# sparse_jacobi(matrix, vector, init_guess, 500, 10e-6)[:,0]
# end = time.process_time()
# #print("Time:", end-start) #doesn't account for cache
# print("With initial guess equal to model prediction")
# start = time.process_time()
# sparse_jacobi(matrix, vector, model_guess.T, 500, 10e-6)[:,0]
# end = time.process_time()
# #print("Time:", end-start) #doesn't account for cache |
<filename>coord2vec/pipelines/build_CLSTRs_cv.py
import logging
import random
import time
from datetime import datetime
from functools import partial
import numpy as np
import pandas as pd
from lagoon.dags import DAG
from lagoon.executors.local_executor import LocalExecutor
from simpleai.search.local import hill_climbing_weighted_stochastic, beam
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import OneClassSVM
from coord2vec.common.parallel.multiproc_util import parmap
from coord2vec.config import CLSTR_RESULTS_DIR, SCORES_TABLE, BUILDING_EXPERIMENT_NAME
from coord2vec.evaluation.tasks.one_class_baseline import BaselineModel
from coord2vec.evaluation.tasks.tasks_utils import hash_geoseries
from coord2vec.feature_extraction.feature_bundles import create_CLSTR_features
from coord2vec.feature_extraction.features.osm_features.building_scores import BuildingScores
from coord2vec.feature_extraction.features_builders import FeaturesBuilder
from coord2vec.pipelines.lagoon_utils.auto_stage import AutoStage
from coord2vec.pipelines.lagoon_utils.expr_saver_task import ExprSaverTask
from coord2vec.pipelines.lagoon_utils.for_wrapper import ForInputTask, ForCalcTask, define_for_dependencies
from coord2vec.pipelines.lagoon_utils.lambda_task import LambdaTask
def _create_specific_task():
features = create_CLSTR_features()
poly_feature_builder = FeaturesBuilder(features, cache_table="LOC_CLSTR_features")
# TODO: do we want one-class ? our problem is not one-class
models = {'One Class SVM': OneClassSVM(),
'isolation forest': IsolationForest(),
'Gaussians': EllipticEnvelope(),
'baseline': BaselineModel()
}
specific_task = HandlerBuildCLSTRs(poly_feature_builder, models=models) # normal
return specific_task
# def build_model
def run_experiment_lagoon():
np.random.seed(42)
random.seed(42)
S_program = AutoStage("program")
get_task = LambdaTask(_create_specific_task, ["task"])
S_program.add_auto(get_task)
get_dataset = LambdaTask(lambda task: task.get_dataset(), ["geos", "y"])
S_program.add_auto(get_dataset)
# transform the CLSTRs to features
# transform = LambdaTask(lambda task, geos, y: task.transform(geos[y]), ["X_true_CLSTR_df"])
# S_program.add_auto(transform)
# convert to buildings
extract_buildings = LambdaTask(lambda task, geos, y:
task.extract_buildings_from_polygons(geos, y, return_source=True),
["building_gs", "buildings_y", "source_indices"])
S_program.add_auto(extract_buildings)
def train_predict_on_split(task, building_gs, buildings_y, source_indices, geos, y,
source_train_indices, source_test_indices):
building_train_indices = np.isin(source_indices, source_train_indices)
building_test_indices = np.isin(source_indices, source_test_indices)
# fetch train-set and fit
buildings_train_gs = building_gs.iloc[building_train_indices].reset_index(drop=True)
y_train_buildings = buildings_y[building_train_indices]
buildings_test_gs = building_gs.iloc[building_test_indices].reset_index(drop=True)
y_test_buildings = buildings_y[building_test_indices]
train_true_geos = geos[np.isin(range(len(geos)), source_train_indices) & y] # train-test in CLSTRs
test_true_geos = geos[np.isin(range(len(geos)), source_test_indices) & y] # train-test in CLSTRs
fpb = task.embedder # feature extractor for polygons
# add the building scores feature
train_hash = hash_geoseries(geos[source_train_indices])
fpb.features += [BuildingScores(SCORES_TABLE, BUILDING_EXPERIMENT_NAME, 'BalancedRF1000',
# TODO: doesn't match current MetaModel naming
train_geom_hash=train_hash, radius=radius) for radius in [0, 25]]
heuristic_guiding_model = BaselineModel()
heuristic_guiding_model.fit(task.transform(train_true_geos))
# for i in trange(5, desc="Training CLSTR heuristic"):
# potential_CLSTRs_test = parmap(lambda b: building_to_CLSTR(b, fpb, heuristic_guiding_model),
# random.sample(buildings_train_gs[y_train]), use_tqdm=True, desc="Calculating potential CLSTRs")
#
# heuristic_guiding_model = OneClassSVM()
# heuristic_guiding_model.fit(task.transform(train_true_geos))
# TODO: do smarter choice of what buildings to start from ?
score_extractor = FeaturesBuilder(
[BuildingScores(SCORES_TABLE, BUILDING_EXPERIMENT_NAME, 'BalancedRF1000', radius=0,
train_geom_hash=train_hash)])
building_scores_sorted = score_extractor.transform(buildings_test_gs)['building_scores_avg_0m'].sort_values(
ascending=False)
building_scores = pd.Series(index=buildings_test_gs.iloc[building_scores_sorted.index],
data=building_scores_sorted.values)
# building_scores = gpd.GeoDataFrame(
# zip(buildings_test_gs, np.random.random(len(buildings_test_gs))),
# columns=['geometry', 'score'], geometry='geometry').set_index('geometry')
# TODO: do smarter choice of what buildings to start from. now top scoring 250
best_test_buildings_with_scores = building_scores.iloc[random.sample(range(1000), 500)]
potential_CLSTRs_test = parmap(lambda b: building_to_CLSTR(b, fpb, heuristic_guiding_model,
partial(beam, beam_size=15, iterations_limit=15)),
best_test_buildings_with_scores.index, use_tqdm=True,
desc="Calculating potential CLSTRs", keep_child_tqdm=True, nprocs=16)
# TODO: postprocessing, which CLSTRs to give. Related to how the fit together.
print([p[1] for p in potential_CLSTRs_test])
print([len(p[0].buildings) for p in potential_CLSTRs_test])
sorted_potential_CLSTRs_test = list(sorted(potential_CLSTRs_test, key=lambda p: p[1], reverse=True))
# TODO: choose with intel, depending on pluga, etc.
best_potential_CLSTRs_test = pd.Series(index=[p[0].hull for p in sorted_potential_CLSTRs_test],
data=MinMaxScaler().fit_transform(
[[p[1]] for p in sorted_potential_CLSTRs_test])[:,
0]) # normalize scores, IMPORTANT
print(best_potential_CLSTRs_test)
return building_scores, geos.iloc[source_train_indices], y_train_buildings, geos.iloc[
source_test_indices], test_true_geos, y_test_buildings, best_potential_CLSTRs_test
for_input_task = ForInputTask(lambda task, geos, y: (task.kfold_split(geos, y, n_splits=4),),
["source_train_indices", "source_test_indices"], 4)
S_program.add_dependency([get_task, get_dataset], for_input_task)
for_params = ["building_scores", "train_geos", "y_train_buildings", "test_geos", "test_true_geos",
"y_test_buildings", "best_potential_CLSTRs_test"]
for_train_predict_on_split = ForCalcTask(train_predict_on_split,
for_params, [get_task, get_dataset, extract_buildings])
def merge_predict_results(building_scores, train_geos, y_train_buildings, test_geos, test_true_geos,
y_test_buildings, best_potential_CLSTRs_test):
return [{'building_scores': building_scores[i], 'train_geos': train_geos[i],
'y_train_buildings': y_train_buildings[i],
'test_geos': test_geos[i], 'test_true_geos': test_true_geos[i],
'y_test_buildings': y_test_buildings[i], 'best_potential_CLSTRs_test': best_potential_CLSTRs_test[i]}
for i in range(len(building_scores))]
save_params = ["model_results"]
for_train_predict_on_split_merge = LambdaTask(merge_predict_results, save_params)
define_for_dependencies(S_program, for_train_predict_on_split, for_input_task, for_train_predict_on_split_merge)
expr_path = f"{CLSTR_RESULTS_DIR}/{datetime.now().isoformat(' ', 'seconds')}"
saver = ExprSaverTask(expr_path, save_params)
S_program.add_dependency(for_train_predict_on_split_merge, saver)
st = time.time()
main_dag = DAG("polygon_main")
main_dag.add(S_program)
main_dag.visualize()
a = LocalExecutor(num_workers=4, logging_level=logging.INFO).execute(main_dag) # , cache_dir="lagoon_cache"
print(f"total runtime: {(time.time() - st) / 60.} m")
if __name__ == "__main__":
np.random.seed(42)
random.seed(42)
run_experiment_lagoon()
|
# This an autogenerated file
#
# Generated with NonLinearForceModel
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.nonlinearforcemodel import NonLinearForceModelBlueprint
from typing import Dict
from sima.riflex.dampingmatrixcalculationoption import DampingMatrixCalculationOption
from sima.riflex.hydrodynamicforceindicator import HydrodynamicForceIndicator
from sima.riflex.slugforcespecification import SlugForceSpecification
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
class NonLinearForceModel(MOAO):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
internalSlugFlow : bool
Indicator for modelling forces from internal slug flow(default False)
hydrodynamicForce : HydrodynamicForceIndicator
Indicator for hydrodynamic force model
maxHit : int
Maximum number of load iterations (linear analysis). A negative value gives print of convergence for each step.(default 5)
forceIterationConvergence : float
Convergence control parameter for force iteration(default 0.01)
startUpDuration : float
Duration of start-up procedure(default 10.0)
ruptureRelease : bool
Indicator for rupture / release(default False)
connectorNumber : int
Global ball-joint connector ID in the RIFLEX FEM model(default 0)
timeStepNumForRelease : int
Time step number for release (nonlinear analysis only). In linear analysis the connector will be released at the first step.(default 0)
dampingMatrixCalculation : DampingMatrixCalculationOption
Option for calculation of proportional damping matrix in nonlinear analysis. Irrelevant for linear analysis.
slugForceSpecification : SlugForceSpecification
"""
def __init__(self , name="", description="", _id="", internalSlugFlow=False, hydrodynamicForce=HydrodynamicForceIndicator.NO_FORCE_ITERATION_VELOCITIES, maxHit=5, forceIterationConvergence=0.01, startUpDuration=10.0, ruptureRelease=False, connectorNumber=0, timeStepNumForRelease=0, dampingMatrixCalculation=DampingMatrixCalculationOption.CONSTANT_PROPORTIONAL, **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.internalSlugFlow = internalSlugFlow
self.hydrodynamicForce = hydrodynamicForce
self.maxHit = maxHit
self.forceIterationConvergence = forceIterationConvergence
self.startUpDuration = startUpDuration
self.ruptureRelease = ruptureRelease
self.connectorNumber = connectorNumber
self.timeStepNumForRelease = timeStepNumForRelease
self.dampingMatrixCalculation = dampingMatrixCalculation
self.slugForceSpecification = None
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return NonLinearForceModelBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def internalSlugFlow(self) -> bool:
"""Indicator for modelling forces from internal slug flow"""
return self.__internalSlugFlow
@internalSlugFlow.setter
def internalSlugFlow(self, value: bool):
"""Set internalSlugFlow"""
self.__internalSlugFlow = bool(value)
@property
def hydrodynamicForce(self) -> HydrodynamicForceIndicator:
"""Indicator for hydrodynamic force model"""
return self.__hydrodynamicForce
@hydrodynamicForce.setter
def hydrodynamicForce(self, value: HydrodynamicForceIndicator):
"""Set hydrodynamicForce"""
self.__hydrodynamicForce = value
@property
def maxHit(self) -> int:
"""Maximum number of load iterations (linear analysis). A negative value gives print of convergence for each step."""
return self.__maxHit
@maxHit.setter
def maxHit(self, value: int):
"""Set maxHit"""
self.__maxHit = int(value)
@property
def forceIterationConvergence(self) -> float:
"""Convergence control parameter for force iteration"""
return self.__forceIterationConvergence
@forceIterationConvergence.setter
def forceIterationConvergence(self, value: float):
"""Set forceIterationConvergence"""
self.__forceIterationConvergence = float(value)
@property
def startUpDuration(self) -> float:
"""Duration of start-up procedure"""
return self.__startUpDuration
@startUpDuration.setter
def startUpDuration(self, value: float):
"""Set startUpDuration"""
self.__startUpDuration = float(value)
@property
def ruptureRelease(self) -> bool:
"""Indicator for rupture / release"""
return self.__ruptureRelease
@ruptureRelease.setter
def ruptureRelease(self, value: bool):
"""Set ruptureRelease"""
self.__ruptureRelease = bool(value)
@property
def connectorNumber(self) -> int:
"""Global ball-joint connector ID in the RIFLEX FEM model"""
return self.__connectorNumber
@connectorNumber.setter
def connectorNumber(self, value: int):
"""Set connectorNumber"""
self.__connectorNumber = int(value)
@property
def timeStepNumForRelease(self) -> int:
"""Time step number for release (nonlinear analysis only). In linear analysis the connector will be released at the first step."""
return self.__timeStepNumForRelease
@timeStepNumForRelease.setter
def timeStepNumForRelease(self, value: int):
"""Set timeStepNumForRelease"""
self.__timeStepNumForRelease = int(value)
@property
def dampingMatrixCalculation(self) -> DampingMatrixCalculationOption:
"""Option for calculation of proportional damping matrix in nonlinear analysis. Irrelevant for linear analysis."""
return self.__dampingMatrixCalculation
@dampingMatrixCalculation.setter
def dampingMatrixCalculation(self, value: DampingMatrixCalculationOption):
"""Set dampingMatrixCalculation"""
self.__dampingMatrixCalculation = value
@property
def slugForceSpecification(self) -> SlugForceSpecification:
""""""
return self.__slugForceSpecification
@slugForceSpecification.setter
def slugForceSpecification(self, value: SlugForceSpecification):
"""Set slugForceSpecification"""
self.__slugForceSpecification = value
|
# Copyright 2020, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import itemgetter
from cs import CloudStackException, CloudStackApiException
from cosmicops.log import logging
from .object import CosmicObject
from .volume import CosmicVolume
class CosmicVM(CosmicObject):
def refresh(self):
self._data = self._ops.get_vm(id=self['id'], json=True)
def stop(self):
if self.dry_run:
logging.info(f"Would stop VM '{self['name']} on host '{self['hostname']}'")
return True
if self.get('maintenancepolicy') == 'ShutdownAndStart':
logging.info(
f"Stopping VM '{self['name']}' on host '{self['hostname']}' as it has a ShutdownAndStart policy",
self.log_to_slack)
else:
logging.info(f"Stopping VM '{self['name']}' on host '{self['hostname']}'", self.log_to_slack)
stop_response = self._ops.cs.stopVirtualMachine(id=self['id'])
if not self._ops.wait_for_job(stop_response['jobid']):
logging.error(f"Failed to shutdown VM '{self['name']}' on host '{self['hostname']}'")
return False
return True
def start(self, host=None):
if host:
host_id = host['id']
on_host_msg = f" on host '{host['name']}'"
else:
host_id = None
on_host_msg = ''
if self.dry_run:
logging.info(f"Would start VM '{self['name']}{on_host_msg}")
return True
logging.info(f"Starting VM '{self['name']}'{on_host_msg}'", self.log_to_slack)
start_response = self._ops.cs.startVirtualMachine(id=self['id'], hostid=host_id)
if not self._ops.wait_for_job(start_response['jobid']):
logging.error(f"Failed to start VM '{self['name']}'")
return False
return True
def get_affinity_groups(self):
affinity_groups = []
try:
affinity_groups = self._ops.cs.listAffinityGroups(fetch_list=True, virtualmachineid=self['id'])
except CloudStackException:
pass
return affinity_groups
def get_snapshots(self):
vm_snapshots = []
try:
if 'projectid' in self:
vm_snapshots = self._ops.cs.listVMSnapshot(fetch_list=True, virtualmachineid=self['id'], listall='true',
projectid=-1)
else:
vm_snapshots = self._ops.cs.listVMSnapshot(fetch_list=True, virtualmachineid=self['id'], listall='true')
except CloudStackException as e:
logging.error(f'Exception {str(e)}')
return vm_snapshots
def get_volumes(self):
if 'projectid' in self:
volumes = self._ops.cs.listVolumes(fetch_list=True, virtualmachineid=self['id'], listall='true',
projectid=-1)
else:
volumes = self._ops.cs.listVolumes(fetch_list=True, virtualmachineid=self['id'], listall='true')
return [CosmicVolume(self._ops, volume) for volume in volumes]
def detach_iso(self):
if 'isoid' in self:
self._ops.cs.detachIso(virtualmachineid=self['id'])
def is_user_vm(self):
return True if 'instancename' in self else False
def migrate_within_cluster(self, vm, source_cluster, **kwargs):
logging.instance_name = vm['instancename']
logging.slack_value = vm['domain']
logging.vm_name = vm['name']
logging.zone_name = vm['zonename']
logging.cluster = source_cluster['name']
try:
available_hosts = self._ops.cs.findHostsForMigration(virtualmachineid=vm['id']).get('host', [])
except CloudStackApiException as e:
logging.error(f"Encountered API exception while finding suitable host for migration: {e}")
return False
available_hosts.sort(key=itemgetter('memoryallocated'))
migration_host = None
for available_host in available_hosts:
# Only hosts in the same cluster
if available_host['clusterid'] != source_cluster['id']:
logging.debug(f"Skipping '{available_host['name']}' because it's not part of the current cluster")
continue
migration_host = available_host
break
if migration_host is None:
return False
return self.migrate(target_host=migration_host, **kwargs)
def migrate(self, target_host, with_volume=False, **kwargs):
if self.dry_run:
logging.info(f"Would live migrate VM '{self['name']}' to '{target_host['name']}'")
return True
if with_volume:
migrate_func = self._ops.cs.migrateVirtualMachineWithVolume
else:
migrate_func = self._ops.cs.migrateVirtualMachine
try:
logging.info(f"Live migrating VM '{self['name']}' to '{target_host['name']}'", self.log_to_slack)
if self.is_user_vm():
self.detach_iso()
vm_result = migrate_func(virtualmachineid=self['id'], hostid=target_host['id'])
if not vm_result:
raise RuntimeError
else:
vm_result = self._ops.cs.migrateSystemVm(virtualmachineid=self['id'], hostid=target_host['id'])
if not vm_result:
raise RuntimeError
except (CloudStackException, RuntimeError):
logging.error(f"Failed to migrate VM '{self['name']}'")
return False
job_id = vm_result['jobid']
if not self._ops.wait_for_vm_migration_job(job_id, **kwargs):
logging.error(f"Migration job '{vm_result['jobid']}' failed")
return False
logging.debug(f"Migration job '{vm_result['jobid']}' completed")
logging.debug(f"Successfully migrated VM '{self['name']}' to '{target_host['name']}'")
return True
|
<filename>code/perception.py
import numpy as np
import cv2
# Identify pixels above the threshold
# Threshold of RGB > 160 does a nice job of identifying ground pixels only
def color_thresh(img, rgb_thresh=(160, 160, 160)):
# Create an array of zeros same xy size as img, but single channel
color_select = np.zeros_like(img[:,:,0])
# Require that each pixel be above all three threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
above_thresh = (img[:,:,0] > rgb_thresh[0]) \
& (img[:,:,1] > rgb_thresh[1]) \
& (img[:,:,2] > rgb_thresh[2])
# Index the array of zeros with the boolean array and set to 1
color_select[above_thresh] = 1
kernel1 = np.ones((5,5),np.uint8)
dilation1 = cv2.dilate(color_select,kernel1,iterations = 1)
erosion1 = cv2.erode(dilation1,kernel1,iterations = 1)
return erosion1
def rock_finder(img, thresh_low=(130, 111, 0), thresh_high=(211, 170, 40)):
colorsel = np.zeros_like(img[:,:,0])
within_thresh = (img[:,:,0] > thresh_low[0]) & (img[:,:,1] > thresh_low[1]) & (img[:,:,2] > thresh_low[2]) & (img[:,:,0] < thresh_high[0]) & (img[:,:,1] < thresh_high[1]) & (img[:,:,2] < thresh_high[2])
colorsel[within_thresh] = 1
kernel = np.ones((5,5),np.uint8)
dilation = cv2.dilate(colorsel,kernel,iterations = 1)
erosion = cv2.erode(dilation,kernel,iterations = 1)
return erosion
threshed = color_thresh(warped)
plt.imshow(threshed, cmap='gray')
# Define a function to convert from image coords to rover coords
def rover_coords(binary_img):
# Identify nonzero pixels
ypos, xpos = binary_img.nonzero()
# Calculate pixel positions with reference to the rover position being at the
# center bottom of the image.
x_pixel = -(ypos - binary_img.shape[0]).astype(np.float)
y_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float)
return x_pixel, y_pixel
# Define a function to convert to radial coords in rover space
def to_polar_coords(x_pixel, y_pixel):
# Convert (x_pixel, y_pixel) to (distance, angle)
# in polar coordinates in rover space
# Calculate distance to each pixel
dist = np.sqrt(x_pixel**2 + y_pixel**2)
# Calculate angle away from vertical for each pixel
angles = np.arctan2(y_pixel, x_pixel)
return dist, angles
# Define a function to map rover space pixels to world space
def rotate_pix(xpix, ypix, yaw):
# Convert yaw to radians
yaw_rad = yaw * np.pi / 180
xpix_rotated = (xpix * np.cos(yaw_rad)) - (ypix * np.sin(yaw_rad))
ypix_rotated = (xpix * np.sin(yaw_rad)) + (ypix * np.cos(yaw_rad))
# Return the result
return xpix_rotated, ypix_rotated
def translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale):
# Apply a scaling and a translation
xpix_translated = (xpix_rot / scale) + xpos
ypix_translated = (ypix_rot / scale) + ypos
# Return the result
return xpix_translated, ypix_translated
# Define a function to apply rotation and translation (and clipping)
# Once you define the two functions above this function should work
def pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):
# Apply rotation
xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)
# Apply translation
xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)
# Perform rotation, translation and clipping all at once
x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)
y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)
# Return the result
return x_pix_world, y_pix_world
# Define a function to perform a perspective transform
def perspect_transform(img, src, dst):
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image
mask = cv2.warpPerspective(np.ones_like(img[:,:,0]), M, (img.shape[1], img.shape[0]))
return warped, mask
# Apply the above functions in succession and update the Rover state accordingly
def perception_step(Rover):
# Perform perception steps to update Rover()
# TODO:
# NOTE: camera image is coming to you in Rover.img
image = Rover.img
true_img = True
# 0) Image is really only valid if roll and pitch are ~0
if Rover.pitch > 0.25 and Rover.pitch < 359.75:
true_img = False
elif Rover.roll > 0.75 and Rover.roll < 359.25:
true_img = False
# 1) Define source and destination points for perspective transform
source = np.float32([[14, 140], [301, 140], [200, 96], [118, 96]])
dst_size = 5
bottom_offset =6
destination = np.float32([[image.shape[1]/2 - dst_size, image.shape[0] - bottom_offset],
[image.shape[1]/2 + dst_size, image.shape[0] - bottom_offset],
[image.shape[1]/2 + dst_size, image.shape[0] - 2*dst_size - bottom_offset],
[image.shape[1]/2 - dst_size, image.shape[0] - 2*dst_size - bottom_offset],
])
# 2) Apply perspective transform
warped , mask = perspect_transform(image,source,destination)
# 3) Apply color threshold to identify navigable terrain/obstacles/rock samples
terrain = color_thresh(warped)
obstacle = np.absolute(np.float32(terrain-1))*mask
rock_sample = rock_finder(warped)
# 4) Update Rover.vision_image (this will be displayed on left side of screen)
# Example: Rover.vision_image[:,:,0] = obstacle color-thresholded binary image
# Rover.vision_image[:,:,1] = rock_sample color-thresholded binary image
# Rover.vision_image[:,:,2] = navigable terrain color-thresholded binary image
Rover.vision_image[:,:,0] = obstacle*255
Rover.vision_image[:,:,1] = rock_sample*255
Rover.vision_image[:,:,2] = terrain*255
# 5) Convert map image pixel values to rover-centric coords
nav_xpix,nav_ypix = rover_coords(terrain)
obs_xpix,obs_ypix = rover_coords(obstacle)
rock_xpix,rock_ypix = rover_coords(rock_sample)
# 6) Convert rover-centric pixel values to world coordinates
scale = 10
navigable_x_world,navigable_y_world = pix_to_world(nav_xpix,nav_ypix,Rover.pos[0],Rover.pos[1],Rover.yaw,Rover.worldmap.shape[0],scale)
obstacle_x_world,obstacle_y_world = pix_to_world(obs_xpix,obs_ypix,Rover.pos[0],Rover.pos[1],Rover.yaw,Rover.worldmap.shape[0],scale)
rock_x_world,rock_y_world = pix_to_world(rock_xpix,rock_ypix,Rover.pos[0],Rover.pos[1],Rover.yaw,Rover.worldmap.shape[0],scale)
if rock_sample.any():
rock_distance, rock_angle = to_polar_coords(rock_xpix, rock_ypix)
rock_index = np.argmin(rock_distance)
rock_x_center = rock_x_world[rock_index]
rock_y_center = rock_y_world[rock_index]
Rover.rock_angle = rock_angle
Rover.rock_dist = rock_distance
else:
Rover.rock_angle = None
Rover.rock_dist = None
# 7) Update Rover worldmap (to be displayed on right side of screen)
# Example: Rover.worldmap[obstacle_y_world, obstacle_x_world, 0] += 1
# Rover.worldmap[rock_y_world, rock_x_world, 1] += 1
# Rover.worldmap[navigable_y_world, navigable_x_world, 2] += 1
# only update world mape if camera image was valid
if true_img:
Rover.worldmap[obstacle_y_world, obstacle_x_world, 0] += 5
if rock_sample.any():
Rover.worldmap[rock_y_center, rock_x_center, 1] += 245
Rover.worldmap[navigable_y_world, navigable_x_world, 2] += 40
# 8) Convert rover-centric pixel positions to polar coordinates
# Update Rover pixel distances and angles
# Rover.nav_dists = rover_centric_pixel_distances
# Rover.nav_angles = rover_centric_angles
rover_centric_pixel_distances, rover_centric_angles = to_polar_coords(nav_xpix, nav_ypix)
Rover.nav_angles = rover_centric_angles
Rover.nav_dists = rover_centric_pixel_distances
return Rover
|
<filename>google/ads/google_ads/v1/proto/services/domain_category_service_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/services/domain_category_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v1.proto.resources import domain_category_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_domain__category__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/services/domain_category_service.proto',
package='google.ads.googleads.v1.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v1.servicesB\032DomainCategoryServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V1.Services\312\002 Google\\Ads\\GoogleAds\\V1\\Services\352\002$Google::Ads::GoogleAds::V1::Services'),
serialized_pb=_b('\nDgoogle/ads/googleads_v1/proto/services/domain_category_service.proto\x12 google.ads.googleads.v1.services\x1a=google/ads/googleads_v1/proto/resources/domain_category.proto\x1a\x1cgoogle/api/annotations.proto\"1\n\x18GetDomainCategoryRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xd8\x01\n\x15\x44omainCategoryService\x12\xbe\x01\n\x11GetDomainCategory\x12:.google.ads.googleads.v1.services.GetDomainCategoryRequest\x1a\x31.google.ads.googleads.v1.resources.DomainCategory\":\x82\xd3\xe4\x93\x02\x34\x12\x32/v1/{resource_name=customers/*/domainCategories/*}B\x81\x02\n$com.google.ads.googleads.v1.servicesB\x1a\x44omainCategoryServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V1.Services\xca\x02 Google\\Ads\\GoogleAds\\V1\\Services\xea\x02$Google::Ads::GoogleAds::V1::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_domain__category__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_GETDOMAINCATEGORYREQUEST = _descriptor.Descriptor(
name='GetDomainCategoryRequest',
full_name='google.ads.googleads.v1.services.GetDomainCategoryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v1.services.GetDomainCategoryRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=199,
serialized_end=248,
)
DESCRIPTOR.message_types_by_name['GetDomainCategoryRequest'] = _GETDOMAINCATEGORYREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetDomainCategoryRequest = _reflection.GeneratedProtocolMessageType('GetDomainCategoryRequest', (_message.Message,), dict(
DESCRIPTOR = _GETDOMAINCATEGORYREQUEST,
__module__ = 'google.ads.googleads_v1.proto.services.domain_category_service_pb2'
,
__doc__ = """Request message for
[DomainCategoryService.GetDomainCategory][google.ads.googleads.v1.services.DomainCategoryService.GetDomainCategory].
Attributes:
resource_name:
Resource name of the domain category to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.GetDomainCategoryRequest)
))
_sym_db.RegisterMessage(GetDomainCategoryRequest)
DESCRIPTOR._options = None
_DOMAINCATEGORYSERVICE = _descriptor.ServiceDescriptor(
name='DomainCategoryService',
full_name='google.ads.googleads.v1.services.DomainCategoryService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=251,
serialized_end=467,
methods=[
_descriptor.MethodDescriptor(
name='GetDomainCategory',
full_name='google.ads.googleads.v1.services.DomainCategoryService.GetDomainCategory',
index=0,
containing_service=None,
input_type=_GETDOMAINCATEGORYREQUEST,
output_type=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_domain__category__pb2._DOMAINCATEGORY,
serialized_options=_b('\202\323\344\223\0024\0222/v1/{resource_name=customers/*/domainCategories/*}'),
),
])
_sym_db.RegisterServiceDescriptor(_DOMAINCATEGORYSERVICE)
DESCRIPTOR.services_by_name['DomainCategoryService'] = _DOMAINCATEGORYSERVICE
# @@protoc_insertion_point(module_scope)
|
"""Handle database functions for the stitcher utility."""
from .db import temp_db
def connect(temp_dir, db_prefix):
"""Create DB connection."""
cxn = temp_db(temp_dir, db_prefix)
return cxn
# ############################## reference genes #############################
def create_reference_genes_table(cxn):
"""Create a table to hold all reference genes."""
cxn.executescript("""
DROP TABLE IF EXISTS reference_genes;
CREATE TABLE reference_genes (
ref_name TEXT,
ref_seq TEXT,
ref_file TEXT);
""")
def insert_reference_genes(cxn, batch):
"""Insert a batch of reference gene records into the database."""
sql = """
INSERT INTO reference_genes (ref_name, ref_seq, ref_file)
VALUES (:ref_name, :ref_seq, :ref_file);
"""
if batch:
with cxn:
cxn.executemany(sql, batch)
def select_reference_genes(cxn):
"""Select all references."""
return cxn.execute('SELECT * FROM reference_genes ORDER BY ref_name;')
# ################################# contigs ##################################
def create_contigs_table(cxn):
"""Create a table to hold all of the input fasta files."""
cxn.executescript("""
DROP TABLE IF EXISTS contigs;
CREATE TABLE contigs (
ref_name TEXT,
taxon_name TEXT,
contig_name TEXT,
contig_seq TEXT,
contig_file TEXT,
contig_rec INTEGER,
iteration INTEGER);
""")
def insert_contigs(cxn, batch):
"""Insert a batch of input contig records into the database."""
sql = """
INSERT INTO contigs
(ref_name, taxon_name, contig_name, contig_seq, contig_file,
contig_rec, iteration)
VALUES (
:ref_name, :taxon_name, :contig_name, :contig_seq,
:contig_file, :contig_rec, :iteration);
"""
if batch:
with cxn:
cxn.executemany(sql, batch)
def select_all_contigs(cxn):
"""Select all contigs."""
sql = """SELECT * FROM contigs;"""
return cxn.execute(sql)
def select_contig_files(cxn, iteration=0):
"""Select all contigs files for a reference gene."""
sql = """
SELECT DISTINCT contig_file
FROM contigs
WHERE iteration = ?
ORDER BY contig_file;"""
return cxn.execute(sql, (iteration,))
def select_contigs_in_file(cxn, contig_file, iteration=0):
"""Select all contigs for a contig file."""
return cxn.execute(
"""SELECT * FROM contigs
WHERE contig_file = ?
AND iteration = ?
ORDER BY contig_rec;""",
(contig_file, iteration))
def select_contigs(cxn, ref_name, iteration=0):
"""Select all contig files for a reference name, taxon name combination."""
return cxn.execute(
"""SELECT DISTINCT taxon_name, contig_file
FROM contigs
WHERE ref_name = ?
AND iteration = ?
ORDER BY taxon_name, contig_file
""",
(ref_name, iteration))
# ############################# exonerate results ############################
def create_exonerate_table(cxn):
"""Create a table to hold the exonerate results."""
cxn.executescript("""
DROP TABLE IF EXISTS exonerate;
CREATE TABLE exonerate (
ref_name TEXT,
taxon_name TEXT,
contig_name TEXT,
beg INTEGER,
end INTEGER,
iteration INTEGER,
seq TEXT);
""")
def select_exonerate_ref_gene(cxn, ref_name, min_len):
"""Get all exonerate results for a reference gene."""
return cxn.execute(
"""SELECT *
FROM exonerate
WHERE ref_name = ?
AND LENGTH(seq) >= ?
GROUP BY seq;""",
(ref_name, min_len))
def select_exonerate_count(cxn):
"""Select all reference name, taxon name combination."""
result = cxn.execute("""SELECT COUNT(*) AS n FROM exonerate;""")
return result.fetchone()['n']
def select_stitch(cxn, iteration=0):
"""Select all reference name, taxon name combination."""
return cxn.execute(
"""SELECT DISTINCT ref_name, taxon_name
FROM exonerate
WHERE iteration = ?
ORDER BY taxon_name, taxon_name
""",
(iteration,))
def insert_exonerate_results(cxn, batch):
"""Insert a batch of exonerate result records into the database."""
sql = """
INSERT INTO exonerate (
ref_name, taxon_name, contig_name, beg, end, iteration, seq)
VALUES (
:ref_name, :taxon_name, :contig_name, :beg, :end,
:iteration, :seq);
"""
if batch:
with cxn:
cxn.executemany(sql, batch)
def select_next(cxn, ref_name, taxon_name, beg=-1, iteration=0):
"""
Find the next contig for the assembly.
It's looking for the closest contig to the given beginning. The tiebreaker
being the longer contig.
"""
sql = """
SELECT *
FROM exonerate
WHERE ref_name = ?
AND taxon_name = ?
AND beg > ?
AND iteration = ?
ORDER BY beg, end DESC, contig_name
LIMIT 1;
"""
result = cxn.execute(sql, (ref_name, taxon_name, beg, iteration))
return result.fetchone()
def select_longest(cxn):
"""Get the longest contig for the framer reports."""
sql = """SELECT MAX(LENGTH(seq)) AS max_len FROM exonerate;"""
result = cxn.execute(sql)
return result.fetchone()['max_len']
def select_seq_lengths(cxn):
"""Get the sequence lengths for the framer reports."""
sql = """
SELECT taxon_name, ref_name, length(seq) AS len
FROM exonerate;"""
return cxn.execute(sql)
def select_overlap(
cxn, ref_name, taxon_name, beg_lo, beg_hi, end, iteration=0):
"""
Find the best overlapping contig for the assembly.
Find an overlapping contig that starts anywhere between beg_lo & beg_hi.
Is must also end somewhere after the given end marker. We want the contig
that extends the stitched sequence by the longest amount so we ORDER BY
end descending & choose the first one.
"""
sql = """
SELECT *
FROM exonerate
WHERE ref_name = ?
AND taxon_name = ?
AND iteration = ?
AND end > ?
AND beg BETWEEN ? AND ?
ORDER BY end DESC, contig_name
LIMIT 1;
"""
result = cxn.execute(
sql, (ref_name, taxon_name, iteration, end, beg_lo, beg_hi))
return result.fetchone()
# ############################# stitched genes ###############################
def create_stitch_table(cxn):
"""Create a table to hold stitched genes & gap fillers.
These overlaps are trimmed & the position in the assembled gene is noted.
"""
cxn.executescript("""
DROP TABLE IF EXISTS stitched;
CREATE TABLE stitched (
ref_name TEXT,
taxon_name TEXT,
contig_name TEXT,
position INTEGER,
iteration INTEGER,
seq TEXT);
""")
def insert_stitched_genes(cxn, batch):
"""Insert a batch of stitched contig records into the database."""
sql = """
INSERT INTO stitched (
ref_name, taxon_name, contig_name, position, iteration, seq)
VALUES (
:ref_name, :taxon_name, :contig_name, :position, :iteration,
:seq);
"""
if batch:
with cxn:
cxn.executemany(sql, batch)
def select_stitched_contigs(cxn, ref_name, taxon_name, iteration=0):
"""Select stitched contigs for a reference taxon pair."""
return cxn.execute(
"""SELECT *
FROM stitched
WHERE ref_name = ?
AND taxon_name = ?
AND iteration = ?
ORDER BY position
""",
(ref_name, taxon_name, iteration))
def select_stitched_contig_count(cxn, ref_name, taxon_name, iteration=0):
"""Select stitched contigs for a reference taxon pair."""
result = cxn.execute(
"""SELECT COUNT(*) AS hits
FROM stitched
WHERE ref_name = ?
AND taxon_name = ?
AND iteration = ?
AND contig_name IS NOT NULL;
""",
(ref_name, taxon_name, iteration))
return result.fetchone()['hits']
def select_per_gene_stats(cxn, iteration):
"""Get data for the per gene summary report."""
return cxn.execute(
"""SELECT ref_name, taxon_name,
LENGTH(ref_seq) AS query_len,
SUM(LENGTH(seq)) / 3 AS target_len
FROM stitched
JOIN reference_genes USING (ref_name)
WHERE contig_name IS NOT NULL
AND iteration = ?
GROUP BY ref_name, taxon_name;
""",
(iteration,))
def select_per_taxon_stats(cxn, iteration):
"""Get data for the per taxon summary report."""
return cxn.execute(
"""
WITH
properties AS (
SELECT taxon_name, ref_name,
CAST(SUM(LENGTH(seq)) / 3 AS REAL)
/ CAST(LENGTH(ref_seq) AS REAL) AS property
FROM stitched
JOIN reference_genes USING (ref_name)
WHERE contig_name IS NOT NULL
AND iteration = ?
GROUP BY taxon_name, ref_name),
thresholds AS (
SELECT taxon_name, ref_name,
property,
property = 1.00 AS eq100,
property >= 0.95 AS gt95,
property >= 0.90 AS ge90,
property >= 0.80 AS ge80,
property >= 0.70 AS ge70,
property >= 0.50 AS ge50,
property >= 0.10 AS ge10,
property > 0.10 AS lt10
FROM properties)
SELECT taxon_name,
COUNT(DISTINCT ref_name) AS genes,
SUM(eq100) AS eq100,
SUM(gt95) AS gt95,
SUM(ge90) AS ge90,
SUM(ge80) AS ge80,
SUM(ge70) AS ge70,
SUM(ge50) AS ge50,
SUM(ge10) AS ge10,
SUM(lt10) AS lt10
FROM thresholds
GROUP BY taxon_name;
""",
(iteration,))
|
<filename>testCNN/cnn.py
from keras.models import Sequential
from keras import layers
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras import layers
from sklearn.feature_extraction.text import CountVectorizer
filepath_dict = {'yelp': 'sentiment_analysis/yelp_labelled.txt',
'amazon': 'sentiment_analysis/amazon_cells_labelled.txt',
'imdb': 'sentiment_analysis/imdb_labelled.txt'}
df_list = []
for source, filepath in filepath_dict.items():
df = pd.read_csv(filepath, names=['sentence', 'label'], sep='\t')
df['source'] = source # Add another column filled with the source name
df_list.append(df)
df = pd.concat(df_list)
print(df.iloc[0])
df_yelp = df[df['source'] == 'yelp']
sentences = df_yelp['sentence'].values
y = df_yelp['label'].values
sentences_train, sentences_test, y_train, y_test = train_test_split(
sentences, y, test_size=0.25, random_state=1000)
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(sentences_train)
X_train = tokenizer.texts_to_sequences(sentences_train)
X_test = tokenizer.texts_to_sequences(sentences_test)
test_sent = ["This movie was nearly perfect. I only had one complaint."]
test = tokenizer.texts_to_sequences(test_sent)
print(test_sent)
print(test)
print("---------------------------")
vocab_size = len(tokenizer.word_index) + 1 # Adding 1 because of reserved 0 index
print(sentences_train[2])
print(X_train[2])
maxlen = 100
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
test = pad_sequences(test, padding='post', maxlen=maxlen)
def create_embedding_matrix(filepath, word_index, embedding_dim):
vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index
embedding_matrix = np.zeros((vocab_size, embedding_dim))
with open(filepath,'r', encoding='UTF8') as f:
for line in f:
word, *vector = line.split()
if word in word_index:
idx = word_index[word]
embedding_matrix[idx] = np.array(
vector, dtype=np.float32)[:embedding_dim]
return embedding_matrix
embedding_dim = 50
embedding_matrix = create_embedding_matrix(
'glove.6B.50d.txt',
tokenizer.word_index, embedding_dim)
model = Sequential()
model.add(layers.Embedding(vocab_size, embedding_dim,
weights=[embedding_matrix],
input_length=maxlen,
trainable=True))
model.add(layers.Conv1D(128, 5, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train,
epochs=10,
verbose=False,
validation_data=(X_test, y_test),
batch_size=10)
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))
ynew = model.predict_classes(test)
print(ynew)
for i in range(len(test)):
print("X=%s, Predicted=%s" % (test[i], ynew[i]))
ynew = model.predict_proba(test)
for i in range(len(test)):
print("X=%s, Predicted=%s" % (test[i], ynew[i])) |
<reponame>897615138/tfsnippet-jill<filename>tests/layers/core/test_dense.py
import numpy as np
import tensorflow as tf
from tests.helper import assert_variables
from tests.layers.helper import l2_normalize
from tests.layers.core.test_gated import safe_sigmoid
from tfsnippet.layers import dense
from tfsnippet.utils import get_static_shape
class DenseTestCase(tf.test.TestCase):
def test_linear(self):
np.random.seed(1234)
kernel = np.random.normal(size=(5, 3)).astype(np.float64)
bias = np.random.normal(size=(3,)).astype(np.float64)
x = np.random.normal(size=(11, 7, 5)).astype(np.float64)
with self.test_session() as sess:
# test 2d input
np.testing.assert_allclose(
sess.run(
dense(
tf.constant(x[0]), 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias)
)
),
np.dot(x[0], kernel) + bias,
rtol=1e-5
)
# test 3d input
ans = np.dot(x, kernel) + bias
self.assertEqual(ans.shape, (11, 7, 3))
np.testing.assert_allclose(
sess.run(
dense(
tf.constant(x, dtype=tf.float64), 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias)
)
),
ans,
rtol=1e-5
)
# test dynamic batch and sampling size
ph = tf.placeholder(dtype=tf.float64, shape=(None, None, 5))
np.testing.assert_allclose(
sess.run(
dense(
ph, 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias)
),
feed_dict={ph: x}
),
ans,
rtol=1e-5
)
# test use_bias is False
ans = np.dot(x, kernel)
self.assertEqual(ans.shape, (11, 7, 3))
np.testing.assert_allclose(
sess.run(
dense(
tf.constant(x, dtype=tf.float64), 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias),
use_bias=False
)
),
ans,
rtol=1e-5
)
# test create variables
with tf.Graph().as_default():
_ = dense(tf.constant(x, dtype=tf.float64), 3)
assert_variables(['kernel', 'bias'], trainable=True, scope='dense',
collections=[tf.GraphKeys.MODEL_VARIABLES])
kernel_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[-2]
bias_var = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[-1]
self.assertEqual(get_static_shape(kernel_var), kernel.shape)
self.assertEqual(get_static_shape(bias_var), bias.shape)
# test create variables, non-trainable
with tf.Graph().as_default():
_ = dense(tf.constant(x, dtype=tf.float64), 3, trainable=False)
assert_variables(['kernel', 'bias'], trainable=False, scope='dense',
collections=[tf.GraphKeys.MODEL_VARIABLES])
# test create variables, use_bias is False
with tf.Graph().as_default():
_ = dense(tf.constant(x, dtype=tf.float64), 3, use_bias=False)
assert_variables(['kernel'], trainable=True, scope='dense',
collections=[tf.GraphKeys.MODEL_VARIABLES])
assert_variables(['bias'], exist=False, scope='dense')
def test_normalization_and_activation(self):
np.random.seed(1234)
kernel = np.random.normal(size=(5, 3)).astype(np.float64)
bias = np.random.normal(size=(3,)).astype(np.float64)
x = np.random.normal(size=(11, 7, 5)).astype(np.float64)
normalizer_fn = lambda x: x * 2. + 1.
activation_fn = lambda x: x * 1.5 - 3.
self.assertGreater(
np.min(np.abs(normalizer_fn(activation_fn(x)) -
activation_fn(normalizer_fn(x)))),
1.
)
with self.test_session() as sess:
# test weight_norm + normalizer + activation
normalized_kernel = l2_normalize(kernel, axis=0)
ans = activation_fn(normalizer_fn(np.dot(x, normalized_kernel)))
self.assertEqual(ans.shape, (11, 7, 3))
np.testing.assert_allclose(
sess.run(
dense(
tf.constant(x, dtype=tf.float64), 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
weight_norm=True
)
),
ans,
rtol=1e-5
)
# test weight_norm + normalizer + activation, use_bias is True
ans = activation_fn(
normalizer_fn(np.dot(x, normalized_kernel) + bias))
self.assertEqual(ans.shape, (11, 7, 3))
np.testing.assert_allclose(
sess.run(
dense(
tf.constant(x, dtype=tf.float64), 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
weight_norm=True,
use_bias=True
)
),
ans,
rtol=1e-5
)
def test_gated(self):
np.random.seed(1234)
kernel = np.random.normal(size=(5, 6)).astype(np.float64)
bias = np.random.normal(size=(6,)).astype(np.float64)
x = np.random.normal(size=(11, 7, 5)).astype(np.float64)
normalizer_fn = lambda x: x * 2. + 1.
activation_fn = lambda x: x * 1.5 - 3.
self.assertGreater(
np.min(np.abs(normalizer_fn(activation_fn(x)) -
activation_fn(normalizer_fn(x)))),
1.
)
with self.test_session() as sess:
normalized_kernel = l2_normalize(kernel, axis=0)
output, gate = np.split(
normalizer_fn(np.dot(x, normalized_kernel)), 2, axis=-1)
ans = activation_fn(output) * safe_sigmoid(gate + 1.1)
self.assertEqual(ans.shape, (11, 7, 3))
np.testing.assert_allclose(
sess.run(
dense(
tf.constant(x, dtype=tf.float64), 3,
kernel=tf.constant(kernel),
bias=tf.constant(bias),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
weight_norm=True,
gated=True,
gate_sigmoid_bias=1.1
)
),
ans,
rtol=1e-5
)
|
import sys
from django import forms
from django.db import models
from django.http import QueryDict
from django.test import RequestFactory, TestCase
from django.utils.datastructures import MultiValueDict
from django_genericfilters import views
from django_genericfilters.forms import FilteredForm
from six.moves import urllib
def setup_view(view, request, *args, **kwargs):
"""Mimic as_view() returned callable, but returns view instance.
args and kwargs are the same you would pass to ``reverse()``
See also: https://code.djangoproject.com/ticket/20456
"""
view.request = request
view.args = args
view.kwargs = kwargs
return view
class ParentModel(models.Model):
"""
define a parent model
"""
name = models.CharField(max_length=250)
class StatusModel(models.Model):
"""
define a dummy status model
"""
name = models.CharField(max_length=250)
class FilteredViewTestCase(TestCase):
def assertIn(self, a, b, msg=None):
if sys.version_info[:2] == (2, 6):
# for 2.6 compatibility
if a not in b:
self.fail("%s is not in %b" % (repr(a), repr(b)))
else:
super(FilteredViewTestCase, self).assertIn(a, b, msg=msg)
class QueryModel(models.Model):
"""
Define a dummy model for this test case
"""
people = models.ForeignKey(ParentModel, on_delete=models.CASCADE)
city = models.CharField(max_length=250)
country = models.CharField(max_length=250)
organization = models.CharField(max_length=250)
status = models.ForeignKey(
StatusModel, null=True, on_delete=models.CASCADE
)
class Form(FilteredForm):
city = forms.ChoiceField(
label='city', required=False,
choices=(
("N", "Nantes"),
("P", "Paris")
)
)
country = forms.ChoiceField(
label='country', required=False,
choices=(
("F", "France"),
("P", "Portugal")
)
)
people = forms.ChoiceField(
label='people', required=False,
choices=(
("S", "Some"),
("A", "Any")
)
)
organization = forms.MultipleChoiceField(
label='organization', required=False,
choices=(
('A', 'A Team'),
('B', 'B Team'),
('C', 'C Team')
))
parent = forms.ModelChoiceField(
queryset=ParentModel.objects.all(),
label='parent', required=False
)
status = forms.ModelMultipleChoiceField(
label='status',
required=False,
queryset=StatusModel.objects.all()
)
def get_order_by_choices(self):
return (('last_name', 'Last Name'),
('first_name', 'First Name'))
def test_default_order_fallback_form_valid(self):
"""Queryset is unordered if no default_order or data (valid form)."""
data = {"city": "N"}
view = setup_view(
views.FilteredListView(
model=self.QueryModel, form_class=self.Form),
RequestFactory().get('/fake', data))
view.form.is_valid()
queryset = view.form_valid(view.form)
self.assertEqual(queryset.query.order_by, [])
def test_default_order_fallback_form_invalid(self):
"""Queryset is unordered if no default_order or data (invalid form)."""
data = {"city": "fake"}
view = setup_view(
views.FilteredListView(
model=self.QueryModel, form_class=self.Form),
RequestFactory().get('/fake', data))
view.form.is_valid()
queryset = view.form_invalid(view.form)
self.assertEqual(queryset.query.order_by, [])
def test_default_order_fallback_form_empty(self):
"""Queryset is unordered if no default_order or data (empty form)."""
request = RequestFactory().get('/fake')
view = setup_view(
views.FilteredListView(
model=self.QueryModel, form_class=self.Form),
request)
queryset = view.form_empty()
self.assertEqual(queryset.query.order_by, [])
def test_default_filter(self):
"""Test the default filter"""
request = RequestFactory().get('/fake')
view = setup_view(
views.FilteredListView(
model=self.QueryModel, form_class=self.Form,
default_filter={'is_active': '1', 'page': '1'}),
request)
query_filter = urllib.parse.urlencode({'is_active': '1', 'page': '1'})
get_filter = view.get_form_kwargs()
self.assertEqual(get_filter['data'], QueryDict(query_filter))
def test_default_filter_submit(self):
"""Test the default filter submit"""
data = {"city": "N"}
request = RequestFactory().get('/fake', data)
view = setup_view(
views.FilteredListView(
model=self.QueryModel, form_class=self.Form,
default_filter={'is_active': '1', 'page': '1'}),
request)
query_filter = urllib.parse.urlencode({
'is_active': '1', 'page': '1', 'city': 'N'})
get_filter = view.get_form_kwargs()
self.assertEqual(get_filter['data'], QueryDict(query_filter))
def test_default_order_form_valid(self):
"""Queryset is ordered by default_order when no order_by in request."""
data = {"city": "N"}
view = setup_view(
views.FilteredListView(
model=self.QueryModel,
form_class=self.Form,
default_order='last_name'),
RequestFactory().get('/fake', data))
view.form.is_valid()
queryset = view.form_valid(view.form)
self.assertEqual(queryset.query.order_by, ['last_name'])
def test_default_order_form_invalid(self):
"""Queryset is ordered by default_order when no order_by in request
and form is invalid."""
data = {"city": "fake"}
view = setup_view(
views.FilteredListView(
model=self.QueryModel,
form_class=self.Form,
default_order='last_name'),
RequestFactory().get('/fake', data))
view.form.is_valid()
queryset = view.form_invalid(view.form)
self.assertEqual(queryset.query.order_by, ['last_name'])
def test_default_order_form_empty(self):
"""Queryset is ordered by default_order when no order_by in request."""
request = RequestFactory().get('/fake')
view = setup_view(
views.FilteredListView(
model=self.QueryModel,
form_class=self.Form,
default_order='last_name'),
request)
queryset = view.form_empty()
self.assertEqual(queryset.query.order_by, ['last_name'])
def test_default_order_reverse(self):
"""To test order reverse"""
data = {"city": "N"}
view = setup_view(
views.FilteredListView(
model=self.QueryModel,
form_class=self.Form,
default_order='-last_name'),
RequestFactory().get('/fake', data))
view.form.is_valid()
queryset = view.form_valid(view.form)
self.assertEqual(queryset.query.order_by, ['-last_name'])
def test_default_order_in_request(self):
"""Test with order_by in data."""
data = {"city": "N", "order_by": "last_name"}
view = setup_view(
views.FilteredListView(
model=self.QueryModel,
form_class=self.Form,
default_order='-last_name'),
RequestFactory().get('/fake', data))
view.form.is_valid()
queryset = view.form_valid(view.form)
self.assertEqual(queryset.query.order_by, ['last_name'])
def test_filtered_list_view(self):
a = views.FilteredListView(filter_fields=['city'],
form_class=self.Form,
model=self.QueryModel)
b = views.FilteredListView(filter_fields=['city', 'people'],
qs_filter_fields={'people__name': 'people'},
form_class=self.Form,
model=self.QueryModel)
setattr(
a,
'request',
type('obj', (object, ), {"method": "GET", "GET": {"city": "N"}})
)
setattr(
b,
'request',
type('obj', (object, ), {"method": "GET", "GET": {"people": "S"}})
)
self.assertEqual({'city': 'city'}, a.get_qs_filters())
a.form.is_valid()
self.assertIn(
'WHERE "django_genericfilters_querymodel"."city" = N',
a.form_valid(a.form).query.__str__()
)
self.assertEqual({'people__name': 'people'}, b.get_qs_filters())
b.form.is_valid()
self.assertIn(
'WHERE "django_genericfilters_parentmodel"."name" = S',
b.form_valid(b.form).query.__str__()
)
def test_filtered_list_view__none(self):
"""
FIXED : None value add "IS NULL" filters instead of ignore it.
"""
view = views.FilteredListView(qs_filter_fields={
'city': 'city',
'people__name': 'people'
},
form_class=self.Form,
model=self.QueryModel)
data = {"city": None, "people": "S"}
setup_view(view, RequestFactory().get('/fake', data))
view.form.is_valid()
self.assertIn(
'WHERE "django_genericfilters_parentmodel"."name" = S',
str(view.form_valid(view.form).query)
)
view = views.FilteredListView(qs_filter_fields={
'city': 'city',
'people__name': 'people'
},
form_class=self.Form,
model=self.QueryModel)
data = {"city": "N", "people": None}
setup_view(view, RequestFactory().get('/fake', data))
view.form.is_valid()
self.assertIn(
'WHERE "django_genericfilters_querymodel"."city" = N',
str(view.form_valid(view.form).query)
)
def test_filtered_list_view__multiplechoice(self):
"""
FIXED : filtered fields has HiddenWidget widgets that cannot handle
multiple values. Use Field.hidden_widget instead.
"""
view = views.FilteredListView(filter_fields=['organization'],
form_class=self.Form,
model=self.QueryModel)
data = MultiValueDict({"organization": ['A']})
setup_view(view, RequestFactory().get('/fake', data))
self.assertTrue(view.form.is_valid(), view.form.errors)
self.assertIn(
'WHERE "django_genericfilters_querymodel"."organization" IN (A)',
str(view.form_valid(view.form).query)
)
view = views.FilteredListView(filter_fields=['organization'],
form_class=self.Form,
model=self.QueryModel)
data = MultiValueDict({"organization": ['A', 'C']})
setup_view(view, RequestFactory().get('/fake', data))
self.assertTrue(view.form.is_valid())
def test_filtered_list_view__multiplechoice__qs_filter_field(self):
"""
FIXED : When using qs_filter_field, the behaviour changes because
the HiddenWidget trick only works with filter_field
attribute. But it compares a list with EQUAL operator
instead of IN.
"""
people = ParentModel.objects.create(name='fake')
self.QueryModel.objects.create(organization='A', people=people)
self.QueryModel.objects.create(organization='C', people=people)
view = views.FilteredListView(qs_filter_fields={
'organization': 'organization'
},
form_class=self.Form,
model=self.QueryModel)
data = MultiValueDict({"organization": ['A']})
setup_view(view, RequestFactory().get('/fake', data))
self.assertTrue(view.form.is_valid(), view.form.errors)
self.assertIn(
'WHERE "django_genericfilters_querymodel"."organization" IN (A)',
str(view.form_valid(view.form).query)
)
self.assertEqual(1, view.form_valid(view.form).count())
view = views.FilteredListView(qs_filter_fields={
'organization': 'organization'
},
form_class=self.Form,
model=self.QueryModel)
data = MultiValueDict({"organization": ['A', 'C']})
setup_view(view, RequestFactory().get('/fake', data))
self.assertTrue(view.form.is_valid())
self.assertEqual(2, view.form_valid(view.form).count())
def test_filtered_list_view__modelchoice(self):
peopleA = ParentModel.objects.create(name='fakeA')
view = views.FilteredListView(qs_filter_fields={
'city': 'city',
'people': 'parent'
},
form_class=self.Form,
model=self.QueryModel)
data = {"parent": peopleA.pk}
setup_view(view, RequestFactory().get('/fake', data))
view.form.is_valid()
self.assertIn(
'WHERE "django_genericfilters_querymodel"."people_id" = %s' % (
peopleA.pk,
),
str(view.form_valid(view.form).query)
)
def test_filtered_list_view__modelchoice__empty_queryset(self):
"""
FIXED : Empty queryset in ModelChoiceField add "IS NULL" filters
instead of ignore it.
"""
view = views.FilteredListView(qs_filter_fields={
'city': 'city',
'people': 'parent'
},
form_class=self.Form,
model=self.QueryModel)
data = {"city": "N", "parent": 1}
setup_view(view, RequestFactory().get('/fake', data))
view.form.is_valid()
self.assertIn(
'WHERE "django_genericfilters_querymodel"."city" = N',
str(view.form_valid(view.form).query)
)
def test_filtered_list_view__modelchoice__none(self):
"""
FIXED : Empty queryset in ModelChoiceField add "IS NULL" filters
instead of ignore it.
"""
view = views.FilteredListView(qs_filter_fields={
'city': 'city',
'people': 'parent'
},
form_class=self.Form,
model=self.QueryModel)
data = {"city": "N", "parent": None}
setup_view(view, RequestFactory().get('/fake', data))
view.form.is_valid()
self.assertIn(
'WHERE "django_genericfilters_querymodel"."city" = N',
str(view.form_valid(view.form).query)
)
def test_filtered_list_view__multiplemodelchoice(self):
"""
FIXED : filtered fields has HiddenWidget widgets that cannot handle
multiple values. Use Field.hidden_widget instead.
"""
stateA = StatusModel.objects.create(name='stateA')
stateB = StatusModel.objects.create(name='stateB')
stateC = StatusModel.objects.create(name='stateC')
people = ParentModel.objects.create(name='fake')
A = self.QueryModel.objects.create(organization='A', people=people,
status=stateA)
B = self.QueryModel.objects.create(organization='B', people=people,
status=stateB)
C = self.QueryModel.objects.create(organization='C', people=people,
status=stateB)
self.QueryModel.objects.create(organization='D', people=people,
status=stateC)
view = views.FilteredListView(filter_fields=["city", 'status'],
form_class=self.Form,
model=self.QueryModel)
data = MultiValueDict({"status": [stateA.pk]})
setup_view(view, RequestFactory().get('/fake', data))
view.form.is_valid()
self.assertIsInstance(view.form.fields['status'].widget,
forms.MultipleHiddenInput)
queryset = view.form_valid(view.form)
self.assertIn('IN (%s)' % stateA.pk, str(queryset.query))
self.assertEqual([A], list(queryset.all()))
view = views.FilteredListView(filter_fields=["city", 'status'],
form_class=self.Form,
model=self.QueryModel)
data = MultiValueDict({"status": [stateA.pk, stateB.pk]})
setup_view(view, RequestFactory().get('/fake', data))
view.form.is_valid()
queryset = view.form_valid(view.form)
self.assertEqual([A, B, C], list(queryset.all()))
def test_filtered_list_view__multiplemodelchoice__qs_filter_field(self):
stateA = StatusModel.objects.create(name='stateA')
stateB = StatusModel.objects.create(name='stateB')
stateC = StatusModel.objects.create(name='stateC')
people = ParentModel.objects.create(name='fake')
A = self.QueryModel.objects.create(organization='A', people=people,
status=stateA)
B = self.QueryModel.objects.create(organization='B', people=people,
status=stateB)
C = self.QueryModel.objects.create(organization='C', people=people,
status=stateB)
self.QueryModel.objects.create(organization='D', people=people,
status=stateC)
view = views.FilteredListView(qs_filter_fields={
"city": "city",
"status": "status"
},
form_class=self.Form,
model=self.QueryModel)
data = MultiValueDict({"status": [stateA.pk]})
setup_view(view, RequestFactory().get('/fake', data))
view.form.is_valid()
self.assertIsInstance(view.form.fields['status'].widget,
forms.SelectMultiple)
queryset = view.form_valid(view.form)
self.assertIn('IN (%s)' % stateA.pk, str(queryset.query))
self.assertEqual([A], list(queryset))
view = views.FilteredListView(qs_filter_fields={
"city": "city",
"status": "status"
},
form_class=self.Form,
model=self.QueryModel)
data = MultiValueDict({"status": [stateA.pk, stateB.pk]})
setup_view(view, RequestFactory().get('/fake', data))
view.form.is_valid()
queryset = view.form_valid(view.form)
self.assertEqual([A, B, C], list(queryset))
def test_filtered_list_view__multiplemodelchoice__invalid_id(self):
"""
FIXED : Invalid id in MultipleModelChoiceField generate a None
value and add "IS NULL" filter instead of ignore it.
"""
StatusModel.objects.create(name='stateA')
StatusModel.objects.create(name='stateB')
view = views.FilteredListView(qs_filter_fields={
"city": "city",
'status': 'status'
},
form_class=self.Form,
model=self.QueryModel)
data = MultiValueDict({"status": [1001]})
setup_view(view, RequestFactory().get('/fake', data))
view.form.is_valid()
# no filter at all
self.assertNotIn('WHERE', str(view.form_valid(view.form).query))
def test_filtered_list_view__multiplemodelchoice__none(self):
"""
FIXED : Empty queryset in MultipleModelChoiceField is added as
subrequest in filter and raises an sql error instead of
ignore it.
"""
people = ParentModel.objects.create(name='fake')
statusA = StatusModel.objects.create(name='stateA')
statusB = StatusModel.objects.create(name='stateB')
self.QueryModel.objects.create(organization='A', people=people,
status=statusA)
self.QueryModel.objects.create(organization='C', people=people,
status=statusB)
view = views.FilteredListView(qs_filter_fields={
"city": "city",
'status': 'status'
},
form_class=self.Form,
model=self.QueryModel)
setup_view(view, RequestFactory().get('/fake', {}))
view.form.is_valid()
# no filter at all
self.assertNotIn('WHERE', str(view.form_valid(view.form).query))
def test_is_form_submitted_method(self):
"""Is form submitted return True when the request method is GET."""
request = RequestFactory().get('/fake', {"foo": "bar"})
view = setup_view(views.FilteredListView(), request)
assert view.is_form_submitted() is True
request = RequestFactory().post('/fake', {"foo": "bar"})
view = setup_view(views.FilteredListView(), request)
assert view.is_form_submitted() is False
def test_is_form_submitted_no_args(self):
"""Is form submitted return False when the queryset is empty."""
request = RequestFactory().get('/fake')
view = setup_view(views.FilteredListView(), request)
assert view.is_form_submitted() is False
|
import xml.etree.ElementTree as ET
from xmlobject import XMLObject
from helpers import Struct
from pose import Pose
class XMLReader(XMLObject):
"""
A class to handle reading and parsing of XML files for the simulator and
parameters configuration files.
"""
_file = None
_root = None
def __init__(self, file_, template):
"""
Construct a new XMLReader instance
Scope:
Public
Parameters:
file_ ------> path to the file containing the XML
template ---> 'simulator' or 'parameters'
Return:
A new XMLReader instance
"""
super(XMLReader, self).__init__(file_, template)
_tree = None
try:
_tree = ET.parse(file_)
except IOError:
raise Exception('[XMLReader.__init__] Could not open ' + str(file_))
except ET.ParseError:
raise Exception('[XMLReader.__init__] Could not parse ' + str(file_))
self._root = _tree.getroot()
def _parse_parameters(self):
"""
Parse a parameters configuration file
Scope:
Private
Parameters:
None
Return:
A dictionary encapsulating the parameters.
"""
def parse_tag(rdict, tag):
"""Fill dict with data from tag"""
for attr, value in tag.items():
if attr != "id":
try:
rdict.append((attr,float(value)))
except ValueError:
rdict.append((attr,value))
for child in tag:
sub = []
id_ = child.get('id',None)
if id_ is not None:
rdict.append(((child.tag, id_),sub))
else:
rdict.append((child.tag,sub))
parse_tag(sub, child)
result = []
parse_tag(result, self._root)
return result
def _parse_color(self, color):
"""
Convert a color attribute value to int
None will yield None, '#FFACDD' will yield 0xFFACDD
Scope:
Private
Parameters:
color ----> the color to be converted
Return:
An integer value in the (AA)RRGGBB format
"""
if color is None:
return color
if color[0] == "#":
return int(color[1:],16)
color = color.lower()
if color == 'black':
return 0x000000
if color == 'red':
return 0xFF0000
if color == 'green':
return 0x00FF00
if color == 'blue':
return 0x0000FF
raise Exception('[XMLReader._parse_color] Bad color value in XML!')
def _parse_simulation(self):
"""
Parse a simulation configuration file
Scope:
Private
Parameters:
None
Return:
A list of the objects in the simulation.
"""
simulator_objects = []
# robots
for robot in self._root.findall('robot'):
robot_type = robot.get('type')
supervisor = robot.find('supervisor')
if supervisor == None:
raise Exception(
'[XMLReader._parse_simulation] No supervisor specified!')
pose = robot.find('pose')
if pose == None:
raise Exception(
'[XMLReader._parse_simulation] No pose specified!')
try:
x, y, theta = pose.get('x'), pose.get('y'), pose.get('theta')
if x == None or y == None or theta == None:
raise Exception(
'[XMLReader._parse_simulation] Invalid pose!')
robot_color = self._parse_color(robot.get('color'))
simulator_objects.append(Struct({'type':'robot',
'robot':{'type':robot_type,
'pose':Pose(float(x), float(y), float(theta)),
'color':robot_color,
'options':robot.get('options',None)},
'supervisor':{'type':supervisor.attrib['type'],
'options':supervisor.get('options',None)}}))
except ValueError:
raise Exception(
'[XMLReader._parse_simulation] Invalid robot (bad value)!')
# obstacles
for obstacle in self._root.findall('obstacle'):
pose = obstacle.find('pose')
if pose == None:
raise Exception(
'[XMLReader._parse_simulation] No pose specified!')
geometry = obstacle.find('geometry')
if geometry == None:
raise Exception(
'[XMLReader._parse_simulation] No geometry specified!')
try:
points = []
for point in geometry.findall('point'):
x, y = point.get('x'), point.get('y')
if x == None or y == None:
raise Exception(
'[XMLReader._parse_simulation] Invalid point!')
points.append((float(x), float(y)))
if len(points) < 3:
raise Exception(
'[XMLReader._parse_simulation] Too few points!')
x, y, theta = pose.get('x'), pose.get('y'), pose.get('theta')
if x == None or y == None or theta == None:
raise Exception(
'[XMLReader._parse_simulation] Invalid pose!')
color = self._parse_color(obstacle.get('color'))
simulator_objects.append(Struct({'type':'obstacle',
'polygon':{'pose':Pose(float(x),float(y),float(theta)),
'color':color,
'points':points}}))
except ValueError:
raise Exception(
'[XMLReader._parse_simulation] Invalid obstacle (bad value)!')
# background
for marker in self._root.findall('marker'):
pose = marker.find('pose')
if pose == None:
raise Exception(
'[XMLReader._parse_simulation] No pose specified!')
geometry = marker.find('geometry')
if geometry == None:
raise Exception(
'[XMLReader._parse_simulation] No geometry specified!')
try:
points = []
for point in geometry.findall('point'):
x, y = point.get('x'), point.get('y')
if x == None or y == None:
raise Exception(
'[XMLReader._parse_simulation] Invalid point!')
points.append((float(x), float(y)))
if len(points) < 3:
raise Exception(
'[XMLReader._parse_simulation] Too few points!')
x, y, theta = pose.get('x'), pose.get('y'), pose.get('theta')
if x == None or y == None or theta == None:
raise Exception(
'[XMLReader._parse_simulation] Invalid pose!')
color = self._parse_color(marker.get('color'))
simulator_objects.append(Struct({'type':'marker',
'polygon':{'pose':Pose(float(x),float(y),float(theta)),
'color':color,
'points':points}}))
except ValueError:
raise Exception(
'[XMLReader._parse_simulation] Invalid marker (bad value)!')
return simulator_objects
def read(self):
"""
Read in and parse the XML given in *file_* representing the specified *template*.
| *Parameters:*
| None
| *Return:*
| The result of reading and parsing the file. The type of return is dependent on the template, as follows:
|
| 1) **simulation**: a list of tuples representing robots, obstacles, and markers, as follows:
| ('robot', *robot_type*, *supervisor_type*, *pose*, *color*)
| ('obstacle', *pose*, [*point1*, *point2*, *point3*, ...], *color*)
| ('marker', *pose*, [*point1*, *point2*, *point3*, ...], *color*)
|
| 2) **parameters**: a dictionary representing the structure of the XML, as follows:
| { *root_element*:
| { *parameter_name*: {*attribute_name*: *attribute_value*, ... },
| ...
| (*parameter_name*, *parameter_id*): {*attribute_name*: *attribute_value*, ... },
| ...
| }
| }
"""
if self._template == "parameters":
return self._parse_parameters()
elif self._template == "simulation":
return self._parse_simulation()
else:
raise Exception(
'[XMLReader.read] Unknown template!')
|
import struct
import dns
import dns.rdtypes.txtbase, dns.rdtypes.svcbbase
import dns.rdtypes.ANY.CDS, dns.rdtypes.ANY.DLV, dns.rdtypes.ANY.DS
def _strip_quotes_decorator(func):
return lambda *args, **kwargs: func(*args, **kwargs)[1:-1]
# Ensure that dnspython agrees with pdns' expectations for SVCB / HTTPS parameters.
# WARNING: This is a global side-effect. It can't be done by extending a class, because dnspython hardcodes the use of
# their dns.rdtypes.svcbbase.*Param classes in the global dns.rdtypes.svcbbase._class_for_key dictionary. We either have
# to globally mess with that dict and insert our custom class, or we just mess with their classes directly.
dns.rdtypes.svcbbase.ALPNParam.to_text = _strip_quotes_decorator(dns.rdtypes.svcbbase.ALPNParam.to_text)
dns.rdtypes.svcbbase.IPv4HintParam.to_text = _strip_quotes_decorator(dns.rdtypes.svcbbase.IPv4HintParam.to_text)
dns.rdtypes.svcbbase.IPv6HintParam.to_text = _strip_quotes_decorator(dns.rdtypes.svcbbase.IPv6HintParam.to_text)
dns.rdtypes.svcbbase.MandatoryParam.to_text = _strip_quotes_decorator(dns.rdtypes.svcbbase.MandatoryParam.to_text)
dns.rdtypes.svcbbase.PortParam.to_text = _strip_quotes_decorator(dns.rdtypes.svcbbase.PortParam.to_text)
@dns.immutable.immutable
class LongQuotedTXT(dns.rdtypes.txtbase.TXTBase):
"""
A TXT record like RFC 1035, but
- allows arbitrarily long tokens, and
- all tokens must be quoted.
"""
def __init__(self, rdclass, rdtype, strings):
# Same as in parent class, but with max_length=None. Note that we are calling __init__ from the grandparent.
super(dns.rdtypes.txtbase.TXTBase, self).__init__(rdclass, rdtype)
self.strings = self._as_tuple(strings,
lambda x: self._as_bytes(x, True, max_length=None))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
strings = []
for token in tok.get_remaining():
token = token.unescape_to_bytes()
# The 'if' below is always true in the current code, but we
# are leaving this check in in case things change some day.
if not token.is_quoted_string():
raise dns.exception.SyntaxError("Content must be quoted.")
strings.append(token.value)
if len(strings) == 0:
raise dns.exception.UnexpectedEnd
return cls(rdclass, rdtype, strings)
def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
for long_s in self.strings:
for s in [long_s[i:i+255] for i in range(0, max(len(long_s), 1), 255)]:
l = len(s)
assert l < 256
file.write(struct.pack('!B', l))
file.write(s)
# TODO remove when https://github.com/rthalley/dnspython/pull/625 is in the main codebase
class _DigestLengthMixin():
_digest_length_by_type = {
1: 20, # SHA-1, RFC 3658 Sec. 2.4
2: 32, # SHA-256, RFC 4509 Sec. 2.2
3: 32, # GOST R 34.11-94, RFC 5933 Sec. 4 in conjunction with RFC 4490 Sec. 2.1
4: 48, # SHA-384, RFC 6605 Sec. 2
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
if self.digest_type == 0: # reserved, RFC 3658 Sec. 2.4
raise ValueError('digest type 0 is reserved')
expected_length = _DigestLengthMixin._digest_length_by_type[self.digest_type]
except KeyError:
raise ValueError('unknown digest type')
if len(self.digest) != expected_length:
raise ValueError('digest length inconsistent with digest type')
@dns.immutable.immutable
class CDS(_DigestLengthMixin, dns.rdtypes.ANY.CDS.CDS):
pass
@dns.immutable.immutable
class DLV(_DigestLengthMixin, dns.rdtypes.ANY.DLV.DLV):
pass
@dns.immutable.immutable
class DS(_DigestLengthMixin, dns.rdtypes.ANY.DS.DS):
pass
|
<filename>tests/sparkml/test_linear_classifier.py
# SPDX-License-Identifier: Apache-2.0
import sys
import unittest
import inspect
import os
import numpy
import pandas
from pyspark.ml.classification import LogisticRegression, LinearSVC
from pyspark.ml.linalg import VectorUDT, SparseVector
from onnx.defs import onnx_opset_version
from onnxconverter_common.onnx_ex import DEFAULT_OPSET_NUMBER
from onnxmltools import convert_sparkml
from onnxmltools.convert.common.data_types import FloatTensorType
from tests.sparkml.sparkml_test_utils import save_data_models, run_onnx_model, compare_results
from tests.sparkml import SparkMlTestCase
TARGET_OPSET = min(DEFAULT_OPSET_NUMBER, onnx_opset_version())
class TestSparkmlLogisticRegression(SparkMlTestCase):
@unittest.skipIf(sys.version_info < (3, 8),
reason="pickle fails on python 3.7")
def test_model_logistic_regression_binary_class(self):
this_script_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
input_path = os.path.join(this_script_dir, "data", "sample_libsvm_data.txt")
original_data = self.spark.read.format("libsvm").load(input_path)
#
# truncate the features
#
self.spark.udf.register("truncateFeatures", lambda x: SparseVector(5, range(0,5), x.toArray()[125:130]),
VectorUDT())
data = original_data.selectExpr("label", "truncateFeatures(features) as features")
lr = LogisticRegression(maxIter=100, tol=0.0001)
model = lr.fit(data)
# the name of the input for Logistic Regression is 'features'
C = model.numFeatures
model_onnx = convert_sparkml(model, 'sparkml logistic regression', [('features', FloatTensorType([None, C]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
# run the model
predicted = model.transform(data)
data_np = data.toPandas().features.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)
expected = [
predicted.toPandas().prediction.values.astype(numpy.float32),
predicted.toPandas().probability.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)
]
# known error in onnxruntime 0.3.0 case
paths = save_data_models(data_np, expected, model, model_onnx,
basename="SparkmlLogisticRegression")
onnx_model_path = paths[-1]
output, output_shapes = run_onnx_model(['prediction', 'probability'], data_np, onnx_model_path)
compare_results(expected, output, decimal=5)
@unittest.skipIf(sys.version_info < (3, 8),
reason="pickle fails on python 3.7")
def test_linear_svc(self):
this_script_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
input_path = os.path.join(this_script_dir, "data", "sample_libsvm_data.txt")
original_data = self.spark.read.format("libsvm").load(input_path)
#
# truncate the features
#
self.spark.udf.register("truncateFeatures", lambda x: SparseVector(5, range(0,5), x.toArray()[125:130]),
VectorUDT())
data = original_data.selectExpr("label", "truncateFeatures(features) as features")
lsvc = LinearSVC(maxIter=10, regParam=0.01)
model = lsvc.fit(data)
# the name of the input for Logistic Regression is 'features'
C = model.numFeatures
model_onnx = convert_sparkml(model, 'Spark ML Linear SVC', [('features', FloatTensorType([None, C]))],
target_opset=TARGET_OPSET)
self.assertTrue(model_onnx is not None)
# run the model
predicted = model.transform(data)
data_np = data.toPandas().features.apply(lambda x: pandas.Series(x.toArray())).values.astype(numpy.float32)
expected = [ predicted.toPandas().prediction.values.astype(numpy.float32) ]
paths = save_data_models(data_np, expected, model, model_onnx,
basename="SparkmlLinearSVC")
onnx_model_path = paths[-1]
output, output_shapes = run_onnx_model(['prediction'], data_np, onnx_model_path)
compare_results(expected, output, decimal=5)
if __name__ == "__main__":
unittest.main()
|
<filename>Blackjack.py
#!/usr/bin/env python3
# Blackjack.py - by <NAME>
from random import shuffle
from collections import deque
from itertools import product
import os
from decimal import Decimal
import re
def clearscreen():
if(os.name == "posix"):
os.system('clear')
else:
os.system('cls')
def get_bet():
while True:
try:
player.bet = int(
input(
"{}, make your wager: $".format(
player.name)))
if player.bet < 0:
raise ValueError()
break
except ValueError:
print(
"That is not a valid bet. They will throw you out of Vegas for that kinda crap")
def win_conditions(player):
if player_hand.score() > 21:
player.lose()
print("Sorry, friend. You busted and lost ${}.".format(player.bet))
elif(player_hand.score() <= dealer_hand.score() <= 21):
player.lose()
print("Dealer wins. You lost ${}.".format(player.bet))
elif(dealer_hand.score() > 21):
player.win()
print("Dealer busts! You win ${}".format(player.bet))
elif(21 >= player_hand.score() > dealer_hand.score()):
player.win()
print("Your score of {} is higher than the dealer's {}. You win ${}.".format(
player_hand.score(), dealer_hand.score(), player.bet))
else:
print("Well, shit. There's a corner case you don't have the logic for or this is broken. What the hell happened?")
# Establishing the info each card represents
class Card:
def __init__(self, suit, name):
if name in ('Jack', 'Queen', 'King'):
value = 10
elif name == 'Ace':
value = 11
else:
value = int(name)
self.suit = suit
self.name = name
self.value = value
def __str__(self):
return '{self.name} of {self.suit}'.format(self=self)
# building a deck of cards
class Deck:
def __init__(self, packs=1):
self.cards = deque()
self.packs = packs
# uses 'packs' to determine how many standard 52-card packs of cards to
# use in creating the deck
suits = ['Hearts', 'Diamonds', 'Spades', 'Clubs']
names = [
'2',
'3',
'4',
'5',
'6',
'7',
'8',
'9',
'10',
'Jack',
'Queen',
'King',
'Ace']
for i in range(self.packs):
for suit, name in product(suits, names):
self.cards.append(Card(suit, name))
# shuffles the deck
def shuffle(self):
shuffle(self.cards)
# deals a card from the top of the deck
def deal(self):
top_card = self.cards.pop()
return top_card
# determines the number of cards left in the deck
def cards_left(self):
return len(self.cards)
# The class that determines the cards in the player or dealer's hand
class Hand:
def __init__(self):
self.cards = deque()
def add_card(self, card):
self.cards.append(card)
def score(self):
score = 0
for card in self.cards:
if (card.value != 11):
value = card.value
score += value
for card in self.cards:
if((score > 10) and (card.value == 11)):
value = 1
score += value
elif(card.value == 11):
value = card.value
score += value
return score
def show_cards(self):
allcards = ''
for card in self.cards:
allcards = allcards + ' - {}'.format(card)
return allcards
# A class to keep track of the global number of games played so that we
# con do win ratios
class Game:
def __init__(self):
self.games = 0
# A class to keep track of all of a player's individual stats
class Player:
def __init__(self, money=0, name='player'):
self.money = Decimal(money)
self.start_money = Decimal(0.00)
self.wins = 0
self.name = name
self.games_played = 0
self.win_percentage = 0.0
self.bet = Decimal(0.00)
def win(self, percent=1):
win = Decimal(self.bet * percent)
self.money = self.money + win
self.wins += 1
def lose(self, percent=1):
lose = self.bet * percent
self.money = self.money - lose
def update_percentage(self):
self.win_percentage = round((100 * self.wins / self.games_played), 2)
# Starting with a nice, clear screen:
clearscreen()
# Getting the player info
player = Player()
player.name = input("What's your name, pardner? ")
money = (
input(
"How much money are you bringing to the table, {}? $".format(
player.name)))
player.money = Decimal(money)
player.start_money = player.money
bet = 0
# Building the Deck
while True:
try:
packs = 1
packs = int(
input("How many packs of cards should make up your deck? [1-10] "))
if packs < 0:
raise ValueError()
break
except ValueError:
print("{} is not a valid number of packs. They will throw you out of Vegas for that kinda crap".format(packs))
deck = Deck(packs)
deck.shuffle()
# Setting the play variable and the game counter
play_another = 'y'
game = Game()
while play_another != 'n':
# clear screen between rounds
clearscreen()
if (deck.cards_left() < 10):
print(
"There were only {} cards left in the deck, so the dealer reshuffled.".format(
deck.cards_left()))
deck = Deck(packs)
deck.shuffle()
player_hand = Hand()
dealer_hand = Hand()
# listing the players's stats
print("{}'s current chipcount: ${} - {}'s starting amount: ${}.".format(player.name,
round(player.money, 2), player.name, round(player.start_money, 2)))
print("{} Hands played. {} Hands won. Win percentage: {}%".format(
player.games_played, player.wins, player.win_percentage))
if player.money < 1:
break
else:
# player.bet() = int(input("{}, make your wager:
# $".format(player.name)))
bet = get_bet()
if player.bet > player.money:
print("You don't have that much, so you promise your partner as payment.")
print("Luckily (maybe) for you, the dealer accepts.")
# initial deal
for i in range(2):
player_hand.add_card(deck.deal())
x = deck.deal()
dealer_hand.add_card(x)
if i == 1:
print(
"There are {} cards left in the deck.".format(
deck.cards_left()))
print("The dealer's face-up card is a {}".format(x))
print("")
print(
"Your hand consists of:{}".format(
player_hand.show_cards()))
print("Your score is: {}".format(player_hand.score()))
# handling being dealt a blackjack
if(player_hand.score() == 21) and (dealer_hand.score() < 21):
player.win(1.5)
print(
"You got a blackjack and just won ${:.2f}!".format(
player.bet *
Decimal(1.50)))
elif(player_hand.score() == 21) and (dealer_hand.score() == 21):
player.lose(1)
print("You got a blackjack!")
print("The dealer's hand is:{}".format(dealer_hand.show_cards()))
print("...but so did the dealer. So you lose. Bad luck happens.")
elif(player_hand.score() < 21) and (dealer_hand.score() == 21):
player.lose(1)
print(
"The dealer shows his hand {}: a blackjack. You lose ${}".format(
dealer_hand.show_cards(),
player.bet))
else:
hit = 'y'
# the player gets to hit or stay
while ((hit != 'n') and (player_hand.score() <= 21)):
hit = (input("Hit? (y/n)").lower())
if hit != 'n':
x = deck.deal()
player_hand.add_card(x)
print("You were dealt a {}.".format(x))
print("Your score is: {}".format(player_hand.score()))
# dealer logic
print(
"The dealer shows his hand and has {}".format(
dealer_hand.show_cards()))
while((dealer_hand.score() <= 17) and (dealer_hand.score() < player_hand.score()) and (player_hand.score() <= 21)):
x = deck.deal()
dealer_hand.add_card(x)
print("The dealer hits and gets a {}".format(x))
print("His score is {}.".format(dealer_hand.score()))
# win/lose conditions
win_conditions(player)
player.games_played += 1
player.update_percentage()
print("")
play_another = (input("Are you up for another hand? (y/n)").lower())
# Goodbye message/warning.
clearscreen()
if player.money <= 0:
print("Sorry friend, you've got to have money to rent a seat. Have a nice one.")
print(
"Thanks for playing! You're leaving the table win percentage of {}%.".format(
player.win_percentage))
if player.money >= player.start_money:
print(
"You won ${:.2f}.".format(
round(
player.money -
player.start_money),
2))
else:
print(
"You lost ${:.2f}.".format(
round(
(player.money - player.start_money) * -1),
2))
if(player.money < 0):
print("The dealer has taken your partner. You need to find a way to pay back the casino quickly.")
print("Unseemly things are happening.")
if(player.win_percentage > 65 and game.games > 20):
print("It looks like you might've been card counting. Don't make it too obvious or you'll get banned.")
|
#
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
"""
VNC pod management for kubernetes
"""
import uuid
from vnc_api.vnc_api import *
from config_db import *
from kube_manager.common.kube_config_db import NamespaceKM
from kube_manager.common.kube_config_db import PodKM
from vnc_kubernetes_config import VncKubernetesConfig as vnc_kube_config
class VncPod(object):
def __init__(self, service_mgr, network_policy_mgr):
self._name = type(self).__name__
self._vnc_lib = vnc_kube_config.vnc_lib()
self._label_cache = vnc_kube_config.label_cache()
self._service_mgr = service_mgr
self._network_policy_mgr = network_policy_mgr
self._queue = vnc_kube_config.queue()
self._service_fip_pool = vnc_kube_config.service_fip_pool()
self._args = vnc_kube_config.args()
self._logger = vnc_kube_config.logger()
def _get_label_diff(self, new_labels, vm):
old_labels = vm.pod_labels
if old_labels == new_labels:
return None
diff = dict()
added = {}
removed = {}
changed = {}
keys = set(old_labels.keys()) | set(new_labels.keys())
for k in keys:
if k not in old_labels.keys():
added[k] = new_labels[k]
continue
if k not in new_labels.keys():
removed[k] = old_labels[k]
continue
if old_labels[k] == new_labels[k]:
continue
changed[k] = old_labels[k]
diff['added'] = added
diff['removed'] = removed
diff['changed'] = changed
return diff
def _set_label_to_pod_cache(self, new_labels, vm):
for label in new_labels.items():
key = self._label_cache._get_key(label)
self._label_cache._locate_label(key,
self._label_cache.pod_label_cache, label, vm.uuid)
vm.pod_labels = new_labels
def _clear_label_to_pod_cache(self, vm):
if not vm.pod_labels:
return
for label in vm.pod_labels.items() or []:
key = self._label_cache._get_key(label)
self._label_cache._remove_label(key,
self._label_cache.pod_label_cache, label, vm.uuid)
vm.pod_labels = None
def _update_label_to_pod_cache(self, new_labels, vm):
self._clear_label_to_pod_cache(vm)
self._set_label_to_pod_cache(new_labels, vm)
def _get_network(self, pod_id, pod_namespace):
"""
Get network corresponding to this namesapce.
"""
vn_fq_name = None
if self._is_pod_network_isolated(pod_namespace) == True:
ns = self._get_namespace(pod_namespace)
if ns:
vn_fq_name = ns.get_network_fq_name()
# If no network was found on the namesapce, default to the cluster
# pod network.
if not vn_fq_name:
vn_fq_name = ['default-domain', 'default', 'cluster-network']
vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
return vn_obj
def _get_namespace(self, pod_namespace):
return NamespaceKM.find_by_name_or_uuid(pod_namespace)
def _is_pod_network_isolated(self, pod_namespace):
return self._get_namespace(pod_namespace).is_isolated()
def _is_pod_nested(self):
# Pod is nested if we are configured to run in nested mode.
return DBBaseKM.is_nested()
def _get_host_ip(self, pod_name):
pod = PodKM.find_by_name_or_uuid(pod_name)
if pod:
return pod.get_host_ip()
return None
def _create_iip(self, pod_name, vn_obj, vmi):
# Instance-ip for pods are ALWAYS allocated from pod ipam on this
# VN. Get the subnet uuid of the pod ipam on this VN, so we can request
# an IP from it.
vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid())
pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(
vnc_kube_config.pod_ipam_fq_name())
# Create instance-ip.
iip_obj = InstanceIp(name=pod_name, subnet_uuid=pod_ipam_subnet_uuid)
iip_obj.add_virtual_network(vn_obj)
# Creation of iip requires the vmi vnc object.
vmi_obj = self._vnc_lib.virtual_machine_interface_read(
fq_name=vmi.fq_name)
iip_obj.add_virtual_machine_interface(vmi_obj)
try:
self._vnc_lib.instance_ip_create(iip_obj)
except RefsExistError:
self._vnc_lib.instance_ip_update(iip_obj)
InstanceIpKM.locate(iip_obj.uuid)
return iip_obj
def _get_host_vmi(self, pod_name):
host_ip = self._get_host_ip(pod_name)
if host_ip:
iip = InstanceIpKM.get_object(host_ip)
if iip:
for vmi_id in iip.virtual_machine_interfaces:
vm_vmi = VirtualMachineInterfaceKM.get(vmi_id)
if vm_vmi and vm_vmi.host_id:
return vm_vmi
return None
def _create_cluster_service_fip(self, pod_name, vmi_uuid):
"""
Isolated Pods in the cluster will be allocated a floating ip
from the cluster service network, so that the pods can talk
to cluster services.
"""
if not self._service_fip_pool:
return
# Construct parent ref.
fip_pool_obj = FloatingIpPool()
fip_pool_obj.uuid = self._service_fip_pool.uuid
fip_pool_obj.fq_name = self._service_fip_pool.fq_name
fip_pool_obj.name = self._service_fip_pool.name
# Create Floating-Ip object.
fip_obj = FloatingIp(name="cluster-svc-fip-%s"% (pod_name),
parent_obj=fip_pool_obj,
floating_ip_traffic_direction='egress')
# Creation of fip requires the vmi vnc object.
vmi_obj = self._vnc_lib.virtual_machine_interface_read(
id=vmi_uuid)
fip_obj.set_virtual_machine_interface(vmi_obj)
try:
fip_uuid = self._vnc_lib.floating_ip_create(fip_obj)
except RefsExistError:
fip_uuid = self._vnc_lib.floating_ip_update(fip_obj)
# Cached service floating ip.
FloatingIpKM.locate(fip_uuid)
return
def _associate_security_groups(self, vmi_obj, proj_obj, ns=None):
sg_obj = SecurityGroup("default", proj_obj)
vmi_obj.add_security_group(sg_obj)
if ns:
ns_sg_name = "ns-" + ns
sg_obj = SecurityGroup(ns_sg_name, proj_obj)
vmi_obj.add_security_group(sg_obj)
return
def _create_vmi(self, pod_name, pod_namespace, vm_obj, vn_obj,
parent_vmi):
proj_fq_name = ['default-domain', pod_namespace]
proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
vmi_prop = None
if self._is_pod_nested() and parent_vmi:
# Pod is nested.
# Allocate a vlan-id for this pod from the vlan space managed
# in the VMI of the underlay VM.
parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid)
vlan_id = parent_vmi.alloc_vlan()
vmi_prop = VirtualMachineInterfacePropertiesType(
sub_interface_vlan_tag=vlan_id)
obj_uuid = str(uuid.uuid1())
name = 'pod' + '-' + pod_name
vmi_obj = VirtualMachineInterface(name=name, parent_obj=proj_obj,
virtual_machine_interface_properties=vmi_prop)
vmi_obj.uuid = obj_uuid
vmi_obj.set_virtual_network(vn_obj)
vmi_obj.set_virtual_machine(vm_obj)
self._associate_security_groups(vmi_obj, proj_obj, pod_namespace)
try:
vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
except RefsExistError:
vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj)
VirtualMachineInterfaceKM.locate(vmi_uuid)
return vmi_uuid
def _create_vm(self, pod_id, pod_name, labels):
vm_obj = VirtualMachine(name=pod_name)
vm_obj.uuid = pod_id
annotations = {}
annotations['device_owner'] = 'K8S:POD'
for key in annotations:
vm_obj.add_annotations(KeyValuePair(key=key, value=annotations[key]))
vm_obj.add_annotations(KeyValuePair(key='labels', value=json.dumps(labels)))
try:
self._vnc_lib.virtual_machine_create(vm_obj)
except RefsExistError:
vm_obj = self._vnc_lib.virtual_machine_read(id=pod_id)
vm = VirtualMachineKM.locate(vm_obj.uuid)
return vm_obj
def _link_vm_to_node(self, vm_obj, pod_node):
vrouter_fq_name = ['default-global-system-config', pod_node]
try:
vrouter_obj = self._vnc_lib.virtual_router_read(fq_name=vrouter_fq_name)
except Exception as e:
return
self._vnc_lib.ref_update('virtual-router', vrouter_obj.uuid,
'virtual-machine', vm_obj.uuid, None, 'ADD')
vm = VirtualMachineKM.get(vm_obj.uuid)
if vm:
vm.virtual_router = vrouter_obj.uuid
def _check_pod_uuid_change(self, pod_uuid, pod_name, pod_namespace):
vm_fq_name = [pod_name]
vm_uuid = LoadbalancerKM.get_fq_name_to_uuid(vm_fq_name)
if vm_uuid != pod_uuid:
self.vnc_pod_delete(vm_uuid)
def vnc_pod_add(self, pod_id, pod_name, pod_namespace, pod_node, labels,
vm_vmi):
vm = VirtualMachineKM.get(pod_id)
if vm:
self._set_label_to_pod_cache(labels, vm)
return
if not vm:
self._check_pod_uuid_change(pod_id, pod_name, pod_namespace)
vn_obj = self._get_network(pod_id, pod_namespace)
vm_obj = self._create_vm(pod_id, pod_name, labels)
vmi_uuid = self._create_vmi(pod_name, pod_namespace, vm_obj, vn_obj,
vm_vmi)
vmi = VirtualMachineInterfaceKM.get(vmi_uuid)
if self._is_pod_nested() and vm_vmi:
# Pod is nested.
# Link the pod VMI to the VMI of the underlay VM.
self._vnc_lib.ref_update('virtual-machine-interface', vm_vmi.uuid,
'virtual-machine-interface', vmi_uuid, None, 'ADD')
self._vnc_lib.ref_update('virtual-machine-interface', vmi_uuid,
'virtual-machine-interface', vm_vmi.uuid, None, 'ADD')
# get host id for vm vmi
vr_uuid = None
for vr in VirtualRouterKM.values():
if vr.name == vm_vmi.host_id:
vr_uuid = vr.uuid
break
if not vr_uuid:
self._logger.error("No virtual-router object found for host: "
+ vm_vmi.host_id + ". Unable to add VM reference to a"
" valid virtual-router")
return
self._vnc_lib.ref_update('virtual-router', vr_uuid,
'virtual-machine', vm_obj.uuid, None, 'ADD')
self._create_iip(pod_name, vn_obj, vmi)
if self._is_pod_network_isolated(pod_namespace):
self._create_cluster_service_fip(pod_name, vmi_uuid)
self._link_vm_to_node(vm_obj, pod_node)
def vnc_pod_update(self, pod_id, pod_name, pod_namespace, pod_node, labels,
vm_vmi):
label_diff = None
vm = VirtualMachineKM.get(pod_id)
if not vm:
# If the vm is not created yet, do so now.
self.vnc_pod_add(pod_id, pod_name, pod_namespace,
pod_node, labels, vm_vmi)
vm = VirtualMachineKM.get(pod_id)
if vm:
label_diff = self._get_label_diff(labels, vm)
if not label_diff:
return label_diff
self._update_label_to_pod_cache(labels, vm)
return label_diff
def vnc_port_delete(self, vmi_id):
vmi = VirtualMachineInterfaceKM.get(vmi_id)
if not vmi:
return
for iip_id in list(vmi.instance_ips):
try:
self._vnc_lib.instance_ip_delete(id=iip_id)
except NoIdError:
pass
# Cleanup floating ip's on this interface.
for fip_id in list(vmi.floating_ips):
try:
self._vnc_lib.floating_ip_delete(id=fip_id)
except NoIdError:
pass
try:
self._vnc_lib.virtual_machine_interface_delete(id=vmi_id)
except NoIdError:
pass
def vnc_pod_delete(self, pod_id):
vm = VirtualMachineKM.get(pod_id)
if not vm:
return
self._clear_label_to_pod_cache(vm)
if vm.virtual_router:
self._vnc_lib.ref_update('virtual-router', vm.virtual_router,
'virtual-machine', vm.uuid, None, 'DELETE')
for vmi_id in list(vm.virtual_machine_interfaces):
self.vnc_port_delete(vmi_id)
try:
self._vnc_lib.virtual_machine_delete(id=pod_id)
except NoIdError:
pass
def _create_pod_event(self, event_type, pod_id, vm_obj):
event = {}
object = {}
object['kind'] = 'Pod'
object['metadata'] = {}
object['metadata']['uid'] = pod_id
object['metadata']['labels'] = vm_obj.pod_labels
if event_type == 'delete':
event['type'] = 'DELETED'
event['object'] = object
self._queue.put(event)
return
def _sync_pod_vm(self):
vm_uuid_list = list(VirtualMachineKM.keys())
pod_uuid_list = list(PodKM.keys())
for uuid in vm_uuid_list:
if uuid in pod_uuid_list:
continue
vm = VirtualMachineKM.get(uuid)
if not vm:
continue
if not vm.annotations:
continue
for kvp in vm.annotations['key_value_pair'] or []:
if kvp['key'] == 'device_owner' \
and kvp['value'] == 'K8S:POD':
self._create_pod_event('delete', uuid, vm)
break
return
def pod_timer(self):
self._sync_pod_vm()
return
def process(self, event):
event_type = event['type']
kind = event['object'].get('kind')
pod_id = event['object']['metadata'].get('uid')
pod_name = event['object']['metadata'].get('name')
pod_namespace = event['object']['metadata'].get('namespace')
labels = event['object']['metadata'].get('labels', {})
print("%s - Got %s %s %s:%s"
%(self._name, event_type, kind, pod_namespace, pod_name))
self._logger.debug("%s - Got %s %s %s:%s"
%(self._name, event_type, kind, pod_namespace, pod_name))
if event['type'] == 'ADDED' or event['type'] == 'MODIFIED':
# Proceed ONLY if host network is specified.
pod_node = event['object']['spec'].get('nodeName')
host_network = event['object']['spec'].get('hostNetwork')
if host_network:
return
# If the pod is nested, proceed ONLY if host vmi is found.
vm_vmi = None
if self._is_pod_nested():
vm_vmi = self._get_host_vmi(pod_name)
if not vm_vmi:
return
if event['type'] == 'ADDED':
self.vnc_pod_add(pod_id, pod_name, pod_namespace,
pod_node, labels, vm_vmi)
self._network_policy_mgr.vnc_pod_add(event)
else:
label_diff = self.vnc_pod_update(pod_id, pod_name,
pod_namespace, pod_node, labels, vm_vmi)
self._network_policy_mgr.vnc_pod_update(event, label_diff)
elif event['type'] == 'DELETED':
self.vnc_pod_delete(pod_id)
self._network_policy_mgr.vnc_pod_delete(event)
|
<reponame>drzaxx/UAV3Dbeamforming
import tensorflow as tf
import numpy as np
import scipy.io as io
from tensorflow.python.keras import *
N = 100000
t = 2 # (,*) dimension of G
# parameters
N_x, N_y, N_b, N_e = 4, 4, 6, 6
c_a = np.array([[0], [0], [0]])
c_b = np.array([[-100], [150], [200]])
# c_e = np.array([[100], [150], [220]])
c_e = io.loadmat('./c_e/c_e1.mat')['c__e']
c_e2 = io.loadmat('./c_e/c_e_multiEve2.mat')['c__e1']
c_e3 = io.loadmat('./c_e/c_e_multiEve3.mat')['c__e2']
beta_0_dB = -70
beta_0 = 10**(beta_0_dB/10)
eta_b, eta_e = 3.2, 3.2
d_b, d_e = np.linalg.norm(c_a-c_b), np.linalg.norm(c_a-c_e)
d_e2, d_e3 = np.linalg.norm(c_a-c_e2), np.linalg.norm(c_a-c_e3)
snr_b = beta_0*d_b**(-1*eta_b)
snr_b = np.expand_dims(np.repeat(snr_b, N), -1)
snr_e = beta_0*d_e**(-1*eta_e)
snr_e = np.expand_dims(np.repeat(snr_e, N), -1)
snr_e2, snr_e3 = beta_0*d_e2**(-1*eta_e), beta_0*d_e3**(-1*eta_e)
snr_e2, snr_e3 = np.expand_dims(np.repeat(snr_e2, N), -1), np.expand_dims(np.repeat(snr_e3, N), -1)
delta_ = np.expand_dims(np.repeat(1e-12, N), -1)
def load_mat(path):
H_bt = io.loadmat(path + 'H_bt.mat')['H_bt']
H_et = io.loadmat(path + 'H_et.mat')['H_et']
H_bk = io.loadmat(path + 'H_bk.mat')['H_bk']
H_ek = io.loadmat(path + 'H_ek.mat')['H_ek']
return H_bt, H_et, H_bk, H_ek
def load_mat1(path):
H_bt = io.loadmat(path + 'H_bt.mat')['H_bt'][N:2 * N, :, :]
H_et = io.loadmat(path + 'H_et.mat')['H_et'][N:2 * N, :, :]
H_bk = io.loadmat(path + 'H_bk.mat')['H_bk'][N:2 * N, :, :]
H_ek = io.loadmat(path + 'H_ek.mat')['H_ek'][N:2 * N, :, :]
return H_bt, H_et, H_bk, H_ek
def f_G_and_power(temp):
f_G_temp, P_a0 = temp
P_a0 = P_a0[0, :]
f_G_temp = tf.nn.l2_normalize(f_G_temp, axis=1, epsilon=1e-10, name='nn_l2_norm')
# f_G_temp = backend.dot(f_G_temp, tf.sqrt(P_a0))
f_G_temp = tf.sqrt(P_a0)*f_G_temp
f_0_real, f_0_imag = f_G_temp[:, 0:N_x*N_y], f_G_temp[:, N_x*N_y:2*N_x*N_y]
G_0_real, G_0_imag = f_G_temp[:, 2*N_x*N_y:2*N_x*N_y+N_x*N_y*t],\
f_G_temp[:, 2*N_x*N_y+N_x*N_y*t:2*N_x*N_y+2*N_x*N_y*t]
f = tf.complex(f_0_real, f_0_imag)
G = tf.complex(G_0_real, G_0_imag)
G1 = tf.concat(tf.split(tf.expand_dims(G, 2), num_or_size_splits=int(t), axis=1), 2)
return f, G1
# return f_0_imag
def Loss_calculating(temp):
f, G, H_bt, H_et, H_et2, H_et3, snrb, snre, snre2, snre3, delta = temp
snrb = snrb[0, :]
snre, snre2, snre3 = snre[0, :], snre2[0, :], snre3[0, :]
delta = delta[0, :]
aa = backend.batch_dot(H_bt, f)
aa1 = backend.batch_dot(tf.expand_dims(aa, 2), tf.transpose(tf.expand_dims(aa, 2), perm=[0, 2, 1], conjugate=True))
bb = backend.batch_dot(H_bt, G)
bb1 = backend.batch_dot(bb, tf.transpose(bb, perm=[0, 2, 1], conjugate=True))
K_nb = snrb*bb1 + delta*tf.cast(tf.eye(N_b), tf.complex64)
tempb = snrb*backend.batch_dot(aa1, tf.matrix_inverse(K_nb))
aae = backend.batch_dot(H_et, f)
aae1 = backend.batch_dot(tf.expand_dims(aae, 2), tf.transpose(tf.expand_dims(aae, 2), perm=[0, 2, 1], conjugate=True))
bbe = backend.batch_dot(H_et, G)
bbe1 = backend.batch_dot(bbe, tf.transpose(bbe, perm=[0, 2, 1], conjugate=True))
K_ne = snre*bbe1 + delta*tf.cast(tf.eye(N_e), tf.complex64)
tempe = snre*backend.batch_dot(aae1, tf.matrix_inverse(K_ne))
R_sb = tf.math.log(tf.cast(tf.matrix_determinant(tf.cast(tf.eye(N_b), tf.complex64)+tempb), tf.float32))/tf.math.log(2.)
R_se = tf.math.log(tf.cast(tf.matrix_determinant(tf.cast(tf.eye(N_e), tf.complex64)+tempe), tf.float32))/tf.math.log(2.)
aaeS2 = backend.batch_dot(H_et2, f)
aae2 = backend.batch_dot(tf.expand_dims(aaeS2, 2), tf.transpose(tf.expand_dims(aaeS2, 2), perm=[0, 2, 1], conjugate=True))
bbe2 = backend.batch_dot(H_et2, G)
bbe2 = backend.batch_dot(bbe2, tf.transpose(bbe2, perm=[0, 2, 1], conjugate=True))
K_ne2 = snre2 * bbe2 + delta * tf.cast(tf.eye(N_e), tf.complex64)
tempe2 = snre2 * backend.batch_dot(aae2, tf.matrix_inverse(K_ne2))
R_se2 = tf.math.log(
tf.cast(tf.matrix_determinant(tf.cast(tf.eye(N_e), tf.complex64) + tempe2), tf.float32)) / tf.math.log(2.)
aaeS3 = backend.batch_dot(H_et3, f)
aae3 = backend.batch_dot(tf.expand_dims(aaeS3, 2), tf.transpose(tf.expand_dims(aaeS3, 2), perm=[0, 2, 1], conjugate=True))
bbe3 = backend.batch_dot(H_et3, G)
bbe3 = backend.batch_dot(bbe3, tf.transpose(bbe3, perm=[0, 2, 1], conjugate=True))
K_ne3 = snre3 * bbe3 + delta * tf.cast(tf.eye(N_e), tf.complex64)
tempe3 = snre3 * backend.batch_dot(aae3, tf.matrix_inverse(K_ne3))
R_se3 = tf.math.log(
tf.cast(tf.matrix_determinant(tf.cast(tf.eye(N_e), tf.complex64) + tempe3), tf.float32)) / tf.math.log(2.)
SSS = tf.reduce_max([R_se, R_se2, R_se3], 0)
Loss = tf.expand_dims(R_sb-SSS, -1)
# ss = tf.raw_ops.MatrixDeterminant(input=tf.cast(tf.eye(N_b), tf.complex64)+tempb)
return -Loss
def self_defined_mean(y_true, y_pred):
dd = backend.mean(y_pred, axis=-1)
return dd
def expand_cnn(temp):
out = tf.expand_dims(temp, -1)
return out
# with tf.Session() as sess:
# ... print(sess.run(tf.reduce_max(A)))
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: flattrs_test
import flatbuffers
class AllScalarsWithDefaults(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsAllScalarsWithDefaults(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = AllScalarsWithDefaults()
x.Init(buf, n + offset)
return x
# AllScalarsWithDefaults
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# AllScalarsWithDefaults
def Boolean(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return True
# AllScalarsWithDefaults
def Uint8(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Uint16(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint16Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Uint32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Uint64(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Int8(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Int16(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Int32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Int64(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 1
# AllScalarsWithDefaults
def Float32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 1.0
# AllScalarsWithDefaults
def Float64(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 1.0
def AllScalarsWithDefaultsStart(builder): builder.StartObject(11)
def AllScalarsWithDefaultsAddBoolean(builder, boolean): builder.PrependBoolSlot(0, boolean, 1)
def AllScalarsWithDefaultsAddUint8(builder, uint8): builder.PrependUint8Slot(1, uint8, 1)
def AllScalarsWithDefaultsAddUint16(builder, uint16): builder.PrependUint16Slot(2, uint16, 1)
def AllScalarsWithDefaultsAddUint32(builder, uint32): builder.PrependUint32Slot(3, uint32, 1)
def AllScalarsWithDefaultsAddUint64(builder, uint64): builder.PrependUint64Slot(4, uint64, 1)
def AllScalarsWithDefaultsAddInt8(builder, int8): builder.PrependInt8Slot(5, int8, 1)
def AllScalarsWithDefaultsAddInt16(builder, int16): builder.PrependInt16Slot(6, int16, 1)
def AllScalarsWithDefaultsAddInt32(builder, int32): builder.PrependInt32Slot(7, int32, 1)
def AllScalarsWithDefaultsAddInt64(builder, int64): builder.PrependInt64Slot(8, int64, 1)
def AllScalarsWithDefaultsAddFloat32(builder, float32): builder.PrependFloat32Slot(9, float32, 1.0)
def AllScalarsWithDefaultsAddFloat64(builder, float64): builder.PrependFloat64Slot(10, float64, 1.0)
def AllScalarsWithDefaultsEnd(builder): return builder.EndObject()
|
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import pandas as pd
import h5py
import random
import os
from VITAE import VITAE, get_igraph, leidenalg_igraph, load_data
file_name = 'mouse_brain_merged'
data = load_data(path='data/',
file_name=file_name)
seed = 0
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
model = VITAE()
model.get_data(data['count'], # count or expression matrix, (dense or sparse) numpy array
labels = data['grouping'], # (optional) labels, which will be converted to string
covariate = data['covariates'],#None,#, # (optional) covariates
gene_names = data['gene_names'], # (optional) gene names, which will be converted to string
cell_names = data['cell_ids'] # (optional) cell names, which will be converted to string
)
model.preprocess_data(gene_num = 2000, # (optional) maximum number of influential genes to keep (the default is 2000)
data_type = 'Gaussian', # (optional) data_type can be 'UMI', 'non-UMI' or 'Gaussian' (the default is 'UMI')
npc = 64 # (optional) number of PCs to keep if data_type='Gaussian' (the default is 64)
)
model.build_model(dim_latent = 8, # The size of the latent dimension
dimensions=[32] # The size of each layer in the encoder between the input layer and the
# latent layer. The size of each layer in the decoder is the reverse.
)
model.pre_train(test_size = 0.1, # (Optional) the proportion or size of the test set.
random_state = seed, # (Optional) the random state of data splitting.
batch_size=256, # (Optional) the batch size for pre-training (the default is 32).
alpha=0.10, # (Optional) the value of alpha in [0,1] to encourage covariate adjustment. Not used if there is no covariates.
num_epoch = 300, # (Optional) the maximum number of epoches (the default is 300).
)
# Get latent representations of X after pre-training
z = model.get_latent_z()
g = get_igraph(z, random_state=seed)
labels = leidenalg_igraph(g, 0.65, random_state=seed)
NUM_CLUSTER = len(np.unique(labels))
model.init_latent_space(
NUM_CLUSTER, # numebr of clusters
cluster_labels=labels, # (optional) clustering labels or their names for plotting
)
model.train(test_size = 0.1, # (Optional) the proportion or size of the test set.
random_state = seed, # (Optional) the random state of data splitting.
batch_size=256, # (Optional) the batch size for pre-training (the default is 32).
alpha=0.10, # (Optional) the value of alpha in [0,1] to encourage covariate adjustment. Not used if there is no covariates.
beta=2, # (Optional) the value of beta in beta-VAE.
num_epoch = 300, # (Optional) the maximum number of epoches (the default is 300).
early_stopping_warmup=10, # (Optional) the number of warmup epoches (the default is 0).
#**kwargs # (Optional ) extra key-value arguments for calling the dimension reduction algorithms.
)
model.init_inference(batch_size=128,
L=150, # L is the number of MC samples
dimred='umap', # dimension reduction methods
#**kwargs # extra key-value arguments for dimension reduction algorithms.
random_state=seed
)
# save model weight with parameters in the latent space and inference results (embedding, posterior estimations)
# model.save_model(path_to_file='../weight/tutorial_mouse_brain_merged/mouse_brain_inference.checkpoint')
# load model weight
# model.load_model(path_to_file='../weight/tutorial_mouse_brain_merged/mouse_brain_inference.checkpoint', load_labels=True)
import networkx as nx
G = model.comp_inference_score(method='modified_map', # 'mean', 'modified_mean', 'map', and 'modified_map'
no_loop=True # if no_loop=True, then find the maximum spanning tree
)
days = np.array([i[1:3] for i in data['cell_ids']], dtype=np.float32)
begin_node_pred = model.select_root(days, 'sum')
modified_G, modified_w, pseudotime = model.infer_trajectory(
init_node=begin_node_pred, # initial node for computing pseudotime.
cutoff=0.09 # (Optional) cutoff score for edges (the default is 0.01).
)
id_branches = [((modified_w[:,3] > 0.0)&(modified_w[:,0] > 0.0)) | \
((modified_w[:,0] > 0.0)&(modified_w[:,11] > 0.0)) | \
(modified_w[:,3] > 0.99) | \
(modified_w[:,0] > 0.99) | \
(modified_w[:,11] > 0.99),
((modified_w[:,0] > 0.0)&(modified_w[:,5] > 0.0)) | \
((modified_w[:,5] > 0.0)&(modified_w[:,4] > 0.0)) | \
((modified_w[:,4] > 0.0)&(modified_w[:,1] > 0.0)) | \
(modified_w[:,0] > 0.99) | \
(modified_w[:,5] > 0.99) | \
(modified_w[:,4] > 0.99) | \
(modified_w[:,1] > 0.99)]
branch_names = ['branch 3-0-11', 'branch 0-5-4-1']
for i in range(2):
id_branch = id_branches[i]
branch_name = branch_names[i]
model.set_cell_subset(
model.cell_names[id_branch]
)
os.makedirs('result/%s'%branch_name, exist_ok=True)
with h5py.File('result/%s/expression.h5'%branch_name, 'w') as f:
f.create_dataset('expression',
data=model.expression[model.selected_cell_subset_id,:], compression="gzip", compression_opts=9
)
f.create_dataset('gene_names',
data=model.gene_names.astype('bytes'), compression="gzip", compression_opts=9
)
f.create_dataset('cell_ids',
data=model.selected_cell_subset.astype('bytes'), compression="gzip", compression_opts=9
)
pd.DataFrame(pseudotime[id_branch],
index=model.selected_cell_subset,
columns=['pseudotime']
).to_csv('result/%s/pseudotime.csv'%branch_name)
pd.DataFrame(data['covariates'][id_branch,:],
index=model.selected_cell_subset,
columns=['S_score','G2M_score','id_data']
).to_csv('result/%s/covariate.csv'%branch_name)
pd.DataFrame(np.array([i[:3] for i in data['cell_ids']])[id_branch],
index=model.selected_cell_subset,
columns=['cell_day']
).to_csv('result/%s/cell_day.csv'%branch_name) |
# configuration steps import
import subprocess
import traceback
from nedgeBlockerException import NedgeBlockerException
from steps.firewallCheck import FirewallCheck
from steps.baseConfigurationStep import BaseConfigurationStep
from steps.nedeployRCConfig import NedeployRCConfig
from steps.nedeployBashActivation import NedeployBashActivation
from steps.nedeployInstall import NedeployInstall
from steps.nedeployPrecheck import NedeployPrecheck
from steps.neadmRCConfig import NeadmRCConfig
from steps.neadmInitWait import NeadmInitWait
from steps.neadmSystemInit import NeadmSystemInit
from steps.neadmLicenseActivation import NeadmLicenseActivation
from steps.neadmOnlineNodesWait import NeadmOnlineNodesWait
from steps.neadmClusterCreation import NeadmClusterCreation
from steps.waitAuditService import WaitAuditService
from steps.waitNodeUUID import WaitNodeUUID
from steps.systemPreConfig import SystemPreConfig
from steps.sshConfig import SSHConfig
from steps.systemPostConfig import SystemPostConfig
class NedgeBaseConfigurator:
def __init__(self, environment={}, steps=[]):
self.environment = environment
self.steps = steps
self.blockers = []
def configure(self):
print('Configuration started')
#reset blockers
self.blockers = []
try:
for step in self.steps:
if isinstance(step, BaseConfigurationStep):
# configuration step virtual method
step.print_step_name()
step.process(self.environment)
else:
print('WARNING: There is unknown object'
'in configuration steps!')
return True
except subprocess.CalledProcessError as cpe:
print('Failed!\nMessage:\n{0}\nTrace:\n{1}\nOutput:\n{2}'
.format(cpe.message, traceback.format_exc(), cpe.output))
return False
except NedgeBlockerException as nbe:
print('Got blocker configuration exception')
print(nbe.blockers)
self.blockers = nbe.blockers
return False
except Exception as e:
print("Nedge configuration failed. Terminating")
print('{}'.format(e.message))
print('Traceback in {}'.format(traceback.format_exc()))
return False
def get_blockers(self):
return self.blockers
class NedgeNodeConfigurator(NedgeBaseConfigurator):
_steps = [FirewallCheck(),
SystemPreConfig(),
SSHConfig(),
NedeployRCConfig(),
NedeployBashActivation(),
NedeployPrecheck(),
NedeployInstall(),
WaitAuditService(),
WaitNodeUUID(),
SystemPostConfig()]
def __init__(self, environment={}):
environment['node_type'] = 'data'
NedgeBaseConfigurator.__init__(self, environment,
NedgeNodeConfigurator._steps)
class NedgeGatewayConfigurator(NedgeBaseConfigurator):
_steps = [FirewallCheck(),
SystemPreConfig(),
SSHConfig(),
NedeployRCConfig(),
NedeployBashActivation(),
NedeployPrecheck(),
NedeployInstall(),
WaitAuditService(),
WaitNodeUUID(),
SystemPostConfig()]
def __init__(self, environment={}):
environment['node_type'] = 'gateway'
NedgeBaseConfigurator.__init__(self, environment,
NedgeNodeConfigurator._steps)
class NedgeMgmtConfigurator(NedgeBaseConfigurator):
_steps = [
FirewallCheck(),
SystemPreConfig(),
SSHConfig(),
NedeployRCConfig(),
NedeployBashActivation(),
NedeployPrecheck(),
NedeployInstall(),
WaitAuditService(),
NeadmRCConfig(),
NeadmInitWait(),
NeadmSystemInit(),
WaitNodeUUID(),
NeadmLicenseActivation(),
NeadmOnlineNodesWait(),
NeadmClusterCreation(),
WaitNodeUUID(),
SystemPostConfig()
]
def __init__(self, environment={}):
environment['node_type'] = 'mgmt'
NedgeBaseConfigurator.__init__(self, environment,
NedgeMgmtConfigurator._steps)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 10:35:20 2020
@author: p20coupe
"""
import argparse
import sys
import joblib
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import os
import math
import statistics
import torch
from torch import nn
from torch.optim import Adam
from torch.utils.data import DataLoader, Dataset
import torch.backends.cudnn as cudnn
import torch.backends.cudnn
from torch.autograd import Variable
from torch.nn import functional as F
from torchvision.transforms import ToTensor, Normalize, Compose
import torch.optim as optim
from ResNet_Reconstruction import *
parser = argparse.ArgumentParser()
parser.add_argument("TestData", help="PATH to testing data")
parser.add_argument("NetworkPATH",help="PATH to the network to use")
parser.add_argument("ResultsDirectory",help="PATH to the results storage directory")
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
Datadirectory = sys.argv[1]
ResultsDirectory = sys.argv[3]
NetworkPath = sys.argv[2]
def psnr(lP,lT):
mse = np.mean( (lP - lT) ** 2 )
if mse == 0:
return 100
PIXEL_MAX = 3.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def imshow(img,corr):
n_rows = 2
n_cols = int(len(img)/2)
plt.figure(figsize=(n_cols, n_rows))
for i in range(n_rows):
for j in range(n_cols):
sub = plt.subplot(n_rows, n_cols, i*n_cols+1+j)
sub.imshow(img[j+n_rows*i,0,:,:].numpy(),
cmap=plt.cm.gray,
interpolation="nearest",
vmin=-3,vmax=2)
sub.set_title('%.3f' %(corr[j+n_rows*i]))
sub.axis('off')
def imshow_difMap(img,label):
n_rows = 2
n_cols = int(len(img)/2)
plt.figure(figsize=(n_cols, n_rows))
for i in range(n_rows):
for j in range(n_cols):
sub = plt.subplot(n_rows, n_cols, i*n_cols+1+j)
sub.imshow(img[j+n_rows*i,0,:,:].numpy()-label[j+n_rows*i,0,:,:].numpy(),
cmap=plt.cm.gray,
interpolation="nearest",
vmin=-3,vmax=2)
sub.axis('off')
def ValidRed2D(testloader,path):
psnr_value=[]
net = ResNet(BasicBlock, [3,4,6]).to(device)
net.load_state_dict(torch.load(path))
for i, data in enumerate(testloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels, correlation = data
inputs= inputs.to(device)
labels = labels.to(device)
correlation = correlation.to(device)
outputs = net(inputs)
psnr_val = 0
pr= labels[0].cpu().detach().numpy()
gt = outputs[0].cpu().detach().numpy()
psnr_val = psnr_val + psnr(gt[0,:,:],pr[0,:,:])
if i == 800:
imshow(inputs.cpu().detach(),correlation.cpu().detach().numpy())
plt.savefig(os.path.join(ResultsDirectory,'Images','inputs_myloss_test.png'),dpi=150)
imshow(labels.cpu().detach(),correlation.cpu().detach().numpy())
plt.savefig(os.path.join(ResultsDirectory,'Images','labels_myloss_test.png'),dpi=150)
imshow(outputs.cpu().detach(),correlation.cpu().detach().numpy())
plt.savefig(os.path.join(ResultsDirectory,'Images','outputs_myloss_test.png'),dpi=150)
imshow_difMap(outputs.cpu().detach(),labels.cpu().detach())
plt.savefig(os.path.join(ResultsDirectory,'Images','DifMap_myloss_test.png'),dpi=150)
psnr_value.append(psnr_val)
np.savetxt(os.path.join(ResultsDirectory,'PSNR_test.txt'),psnr_value)
print('Standard Deviation:' + str(statistics.stdev(psnr_value)))
print('Mean:' + str(statistics.mean(psnr_value)))
bones=['calcaneus','talus','tibia']
X_test=[[]]
Y_test=[[]]
corr=[]
sujets = os.listdir(Datadirectory)
sujets = np.sort(sujets)
for i in range(len(sujets)):
#Dataset Validation
for bone in bones:
patches= os.listdir(os.path.join(Datadirectory,sujets[i],'DatasetReconstruction_patches',bone))
for k in range(len(patches)):
if(patches[k].find('BR')!=-1):
if X_test[0]==[]:
X_test[0] = joblib.load(os.path.join(Datadirectory,sujets[i],'DatasetReconstruction_patches',bone,patches[k]))
else:
X_test[0] = X_test[0]+joblib.load(os.path.join(Datadirectory,sujets[i],'DatasetReconstruction_patches',bone,patches[k]))
if(patches[k].find('HR')!=-1):
if Y_test[0]==[]:
Y_test[0] = joblib.load(os.path.join(Datadirectory,sujets[i],'DatasetReconstruction_patches',bone,patches[k]))
else:
Y_test[0] = Y_test[0]+joblib.load(os.path.join(Datadirectory,sujets[i],'DatasetReconstruction_patches',bone,patches[k]))
if(patches[k].find('corr')!=-1):
corr = np.append(corr,joblib.load(os.path.join(Datadirectory,sujets[i],'DatasetReconstruction_patches',bone,patches[k])))
Y_test = np.moveaxis(Y_test,0,1)
X_test = np.moveaxis(X_test,0,1)
print(np.shape(Y_test))
print(np.shape(corr))
testset = torch.utils.data.TensorDataset(torch.Tensor(X_test),torch.Tensor(Y_test),torch.Tensor(corr))
testloader = torch.utils.data.DataLoader(testset, batch_size=8,
shuffle=False, pin_memory=use_cuda, num_workers=2)
#Create directores for the results
if not os.path.exists(os.path.join(ResultsDirectory,'Images')):
os.mkdir(os.path.join(ResultsDirectory, 'Images'))
ValidRed2D(testloader,NetworkPath) |
<reponame>ColCarroll/simulation_based_calibration
"""Simulation based calibration (Talts et. al. 2018) in PyMC3."""
import itertools
import logging
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
from tqdm import tqdm
class quiet_logging:
"""Turn off logging for certain libraries.
PyMC3 and theano compile locks are a little noisy when running a bunch of loops.
"""
def __init__(self, *libraries):
self.loggers = [logging.getLogger(library) for library in libraries]
def __call__(self, func):
def wrapped(cls, *args, **kwargs):
levels = []
for logger in self.loggers:
levels.append(logger.level)
logger.setLevel(logging.CRITICAL)
res = func(cls, *args, **kwargs)
for logger, level in zip(self.loggers, levels):
logger.setLevel(level)
return res
return wrapped
class SBC:
def __init__(
self,
model_func,
observed_vars,
num_simulations=1000,
sample_kwargs=None,
seed=None,
):
"""Set up class for doing SBC.
Note that you must define your model using a function so the observations
can change on each run, and the keyword arguments of the function must
match the observed variables.
You should also specify the shape of the actual observations! See the example.
Example
-------
def my_model(y=None):
with pm.Model() as model:
x = pm.Normal('x')
obs_y = pm.Normal('y', mu=2 * x, observed=y, shape=2)
return model
sbc = SBC(my_model, 'y', num_simulations=1000)
sbc.run_simulations()
sbc.plot_sbc()
Parameters
----------
model_func : function
A function whose keyword arguments are `observed_vars` and which
returns a pymc3.Model instance
observed_vars : list[str]
A list of the observed variables in the model
num_simulations : int
How many simulations to run
sample_kwargs : dict[str] -> Any
Arguments passed to pymc3.sample
seed : int (optional)
Random seed. This persists even if running the simulations is
paused for whatever reason.
"""
self.model_func = model_func
if isinstance(observed_vars, str):
observed_vars = [observed_vars]
self.observed_vars = observed_vars
self.num_simulations = num_simulations
test_point = self._get_prior_predictive_samples()
self.var_names = [v for v in test_point if v not in self.observed_vars]
if sample_kwargs is None:
sample_kwargs = {}
sample_kwargs.setdefault("progressbar", False)
sample_kwargs.setdefault("compute_convergence_checks", False)
self.sample_kwargs = sample_kwargs
self.simulations = {name: [] for name in self.var_names}
self._simulations_complete = 0
self._seed = seed
self._warnings = {}
def _get_seeds(self):
"""Set the random seed, and generate seeds for all the simulations."""
if self._seed is not None:
np.random.seed(self._seed)
return np.random.randint(2 ** 30, size=self.num_simulations)
def _get_prior_predictive_samples(self):
"""Generate samples to use for the simulations."""
with self.model_func(**{v: None for v in self.observed_vars}):
prior = pm.sample_prior_predictive(self.num_simulations)
return prior
@quiet_logging("pymc3", "theano.gof.compilelock")
def run_simulations(self):
"""Run all the simulations.
This function can be stopped and restarted on the same instance, so you can
keyboard interrupt part way through, look at the plot, and then resume. If a
seed was passed initially, it will still be respected (that is, the resulting
simulations will be identical to running without pausing in the middle).
"""
seeds = self._get_seeds()
prior = self._get_prior_predictive_samples()
progress = tqdm(
initial=self._simulations_complete,
total=self.num_simulations,
postfix=self._warnings,
)
try:
while self._simulations_complete < self.num_simulations:
idx = self._simulations_complete
prior_predictive_draw = {v: prior[v][idx] for v in self.observed_vars}
np.random.seed(seeds[idx])
with self.model_func(**prior_predictive_draw):
check = pm.sample(**self.sample_kwargs)
for name in self.var_names:
self.simulations[name].append(
(check[name] < prior[name][idx]).sum(axis=0)
)
self._simulations_complete += 1
self._update_progress_bar(check, progress)
finally:
self.simulations = {
k: v[: self._simulations_complete] for k, v in self.simulations.items()
}
progress.close()
def _update_progress_bar(self, check, progress):
"""Helper to pipe PyMC3 warnings into the progress bar."""
for w in check._report._warnings:
if w.level != "debug":
name = str(w.kind)
if name not in self._warnings:
self._warnings[name] = 0
self._warnings[name] += 1
progress.set_postfix(self._warnings, refresh=False)
progress.update()
def plot_sbc(self, var_names=None, plot_kwargs=None):
"""Produce plots similar to those in the SBC paper."""
return plot_sbc(self.simulations, var_names=var_names, plot_kwargs=plot_kwargs)
def plot_sbc(simulations, var_names=None, plot_kwargs=None):
"""Produce plots similar to those in the SBC paper.
The data is pretty easy to serialize, and this function makes it
easier to do that and still produce plots.
Parameters
----------
simulations : dict[str] -> listlike
The SBC.simulations dictionary.
var_names : list[str]
Variables to plot (defaults to all)
plot_kwargs : dict[str] -> Any
Keyword arguments passed to plt.bar
Returns
-------
fig, axes
matplotlib figure and axes
"""
if plot_kwargs is None:
plot_kwargs = {}
plot_kwargs.setdefault("bins", "auto")
plot_kwargs.setdefault("color", "#B00A22") # stan red
plot_kwargs.setdefault("edgecolor", "black")
if var_names is None:
var_names = list(simulations.keys())
sims = {}
for k in var_names:
ary = np.array(simulations[k])
while ary.ndim < 2:
ary = np.expand_dims(ary, -1)
sims[k] = ary
n_plots = sum(np.prod(v.shape[1:]) for v in sims.values())
fig, axes = plt.subplots(nrows=n_plots, figsize=(12, 4 * n_plots))
idx = 0
for var_name, var_data in sims.items():
plot_idxs = list(itertools.product(*(np.arange(s) for s in var_data.shape[1:])))
if len(plot_idxs) > 1:
has_dims = True
else:
has_dims = False
for indices in plot_idxs:
if has_dims:
dim_label = f'{var_name}[{"][".join(map(str, indices))}]'
else:
dim_label = var_name
ax = axes[idx]
ary = var_data[(...,) + indices]
ax.hist(ary, **plot_kwargs)
ax.set_title(dim_label)
idx += 1
return fig, axes
|
<reponame>gieses/xiRT<gh_stars>1-10
"""Module for constants in the xirt package."""
from xirt import __version__
learning_params = f"""
# Learning options generated with xiRT v. {__version__}
# the preprocessing options define how the sequences are encoded / filtered. Usually, default values
# are fine.
# If transfer learning is intended, the label encoder and max_length parameter need to be adapted.
preprocessing:
# label encoder, str or none. If str, use a previously trained label encoder to translate
# amino acids to specific integers. If you are using xiRT on a single data file set to None
# default None
le: None
# max sequence length, integer. Filter all sequences longer than this number. Disable by setting
# it to -1
# default -1
max_length: -1
# for crosslinks only, bool: encode crosslinked residues as different residues than their
# unmodified counter parts
# e.g. a crosslinked K, will be encoded as clK in modX format.
# default True
cl_residue: True
# filter, str. string filter that must be contained in the description for a CSM to be included
# default ""
filter: ""
# these options are crucial for the setting up xiRT with the correct training mode. Stay strong!
# It's easier than it seems right now.
# Check the readthedocs documentation if you need more info / examples.
train:
# float value, defines cutoff to filter the input CSMs, e.g. all CSMs with a lower fdr are
# used for training
# default 0.01
fdr: 0.01
# int, the number of crossvalidation folds to be done. 1=nocv, 3=minimal value, recommended
# alternatives with higher run time:5 or 10.
# default 1
ncv: 1
# bool, if True the training data is used to fit a new neural network model after the
# cross-validation step, this model is used for the prediction of RTs for all peptides >
# the given FDR value.
# refit=False: use best CV predictor; b) refit=True: retrain on all CSMs < 0.01 FDR.
# default False
refit: False
# str, important that defines the training mode (important!)
# "train", train on entire data set: use
# "crossvalidation", perform crossvalidation on the input data (train multiple classifiers)
# "predict", do NOT train on the supplied CSMs but simply predict with an already trained model
# default "train"
mode: "train"
# str, augment the input data by swapping sequences (peptide1, peptide2). Marginal gains in
# predicition were observed here.
# Can usually, be left as False. If you are dealing with very small data sets, this option
# might also help.
# default False
augment: False
# str, multiple sequence types are supported: "linear", "crosslink", "pseudolinear" (concatenate
# peptide1 and peptide2 sequences)
# default "crosslink"
sequence_type: "crosslink"
# str (file location), this option can be set with any of the above described options.
# if a valid weight set is supplied, the network is initalized with the given weights
# default "None"
pretrained_weights: "None"
# str (file location), similarly to the option above, a pretrained model can be supplied.
# this is necessary when (extreme) transfer-learning applications are intended (e.g. different
# number of fractions for e.g. SCX)
# this requires adjustments of the network architecture
# default: "None"
pretrained_model: "None"
# float, defines the fraction of test data (e.g. a small fraction of the training folds that is
# used for validation
# default 0.10
test_frac: 0.10
# float, used for downsampling the input data (e.g. to create learning curves). Can usually left as 1.
# default 1
sample_frac: 1
# int, seed value for the sampling described above
# default 21
sample_state: 21
"""
xirt_params = f"""
# xiRT options generated with xiRT v. {__version__}
# options for the recurrent layer used in xiRT
# can usually be used with default values, except for type
LSTM:
# activation parameters, leave as default unless you know what you are doing
activation: tanh
activity_regularization: l2
activityregularizer_value: 0.001
# option that activates the bidirectional layer to the used LSTM layer
bidirectional: true
# kernal regularization, leave as default
kernel_regularization: l2
kernelregularizer_value: 0.001
lstm_bn: true
# central layer parameters
# increasing the values here will drastically increase runtime but might also improve results
# usually, 1 and GRU (for CPUs) or CuDNNGRU (for GPUs) will deliver good performance
nlayers: 1
type: GRU
units: 50
# dense parameters are used for the individual task subnetworks (e.g. RP, SCX, ...)
dense:
# activation functions in the layers between the embedding and prediction layer
# recommended to leave on defaults for most applications
activation:
- relu
- relu
- relu
# boolean indicator if batch_normalization shoulde be used, leave on default
# recommended to leave on defaults for most applications
dense_bn:
- true
- true
- true
# dropout rate to use
# recommended to leave on defaults for most applications
dropout:
- 0.1
- 0.1
- 0.1
# regularization methods to use on the kernels, leave on defaults
kernel_regularizer:
- l2
- l2
- l2
regularization:
- true
- true
- true
regularizer_value:
- 0.001
- 0.001
- 0.001
# size of the individual layers, defaults deliver good results. Changes here might need adjustments
# on dropout rates and other hyper-parameters
neurons:
- 300
- 150
- 50
# int, number of layers to use. Note that all other parameters in the 'dense' section
# must be adapted to the new number used in this variable
nlayers: 3
# dimension of the embedding output
embedding:
length: 50
# parameters influencing the learning
learning:
# numbers of samples to pass during a single iteration
batch_size: 512
# number of epochs to train
epochs: 50
# other tested/reasonable values for learning rate: 0.003, 0.001
learningrate: 0.01
verbose: 1
# default optimizer, most tensorflow optimizers are implemented as well
optimizer: adam
#!!!!!!!!!!!!!!!!!! most important parameters!!!!!!!!!!!!!!!
output:
# task-parameters. Here the prefix hsax and rp are used to build and parameterize the
# respective sub-networks (this prefix must also match the "predictions" section.
# each task needs to contain the sufixes: activation, column, dimension, loss, metric and weight.
# They must be carefully adapted for each prediction task.
# recommended to use sigmoid for fractions (SCX/hSAX) if ordinal regression method should be used
hsax-activation: sigmoid
# column where the fraction RT is in the CSV input ("xx_ordinal" xx_
hsax-column: hsax_ordinal
# the number of unique / distinct values (e.g. fractions)
hsax-dimension: 10
# must be binary_crossentropy for sigmoid activations
hsax-loss: binary_crossentropy
# must be mse
hsax-metrics: mse
# weight parameter to combine the loss of this task to any other defined task
hsax-weight: 50
# use linear for regression tasks (revesed phase)
rp-activation: linear
rp-column: rp
# dimension is always 1 for regression
rp-dimension: 1
# loss and metrics should not be changed from mse
rp-loss: mse
rp-metrics: mse
# again, a weight parameter that might need tuning for multi-task settings
rp-weight: 1
# siames parameters
siamese:
# set to True for crosslinks (default)
use: True
# define how to combine the outputs of the siamese layers, most tensorflow options are supported.
# default value should be fine
merge_type: add
# add predictions for single peptides based on the crosslink model (default)
single_predictions: True
callbacks:
# for debugging and model storage
# define which callbacks to use.
# default values are fine here and should not be changed
# options define the meta data that is written throughout the training process. The results can
# be find in the callback in the specified outdir
check_point: True
log_csv: True
# early stopping callback
early_stopping: True
early_stopping_patience: 15
tensor_board: False
progressbar: True
# reduce learning rate callback
reduce_lr: True
reduce_lr_factor: 0.5
reduce_lr_patience: 15
predictions:
# define the prediction tasks unambiguously as they appear in the output file; need to match
# column labels defined in output
# "continues" is reserved for regression problems e.g. reversed-phase chromatography here
continues:
- rp
# fractions are reserved for classification or ordinal regression problems e.g.
# fractionation method that led to discrete fractions
# use [] if no fraction prediction is desired
fractions: # simply write fractions: [] if no fraction prediction is desired
- hsax
"""
readme = f"""
xiRT ReadMe:
------------
This folder contains the results from running xiRT v. {__version__}. The following
descriptions summarize the most important output data and formats.
Important files:
----------------
1. xirt_logger.log
This file summarizes and logs the training procedure. It contains the parameters used to run xiRT
but also short summary values from the training process.
2. processed_psms.csv
CSMs or PSMs from the input data but with the additional and temporary columns used by xiRT.
3. error_features.csv
Contains the predictions and errors (observed - predicted) for each PSM / CSM.
4. error_features_interactions.csv
Contains further features that are derived from the errors (e.g. products, sums, absolute values)
5. figures for quality control
- cv_epochs_loss.svg / cv_epochs_metrics.svg - plots the training performance over time.
- cv_summary_strip_loss.svg / cv_summary_strip_metric.svg - plots summarizing the CV fold results
- error_characteristics.svg - plots prediction errors between TT/TD/DD identifications
- qc_cv_01/02/dd - x vs. y plots of predictions and observations for the tasks (pred fold)
- qc_cv_-1 - x vs. y plots of predictions and observations for the data with >1% FDR
Optional files:
---------------
6. epoch_history.csv
Training performance over time (epochs).
7. model_summary.csv
Summarizes the model performance in more depth (metrics, training splits, input files, etc.)
8. callbacks folder
In-depth results from the model training process, e.g. trained weights and model architectures.
Also contains the encoder and data used in python as pickle objects.
Please visit the documentation to get more details on the output data:
https://xirt.readthedocs.io/en/latest/results.html
""" |
<reponame>krusagiz/OctoBot
import os
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from util.Driver.role import isAdmin
from util.ScrapeAdmin.admin import viewAccountPages
from prettytable import PrettyTable
paginationXpath = "/html/body/form/div[4]/div[4]/div/div/table/tbody/tr[22]/td/table/tbody/tr/td["
scriptToken = "__do<PASSWORD>('ctl<PASSWORD>','Page$"
getRecordsXpath = "/html/body/form/div[4]/div[4]/div/div/table/tbody/tr[22]/td/table/tbody/tr/td["
def getNumberRecordPages(driver):
'''
Obtains the number of pages that records logs webpage has
Arguments:
driver(obj): firefox webdriver instance in python
Returns:
number of pages that record logs webpage has
'''
number = 1
scriptNumber = 2
while(True):
try:
token = str(scriptNumber) + "]/a"
newToken = getRecordsXpath + token
print(newToken)
driver.find_element_by_xpath(newToken).click()
scriptNumber += 1
number += 1
time.sleep(3)
except Exception as e:
print(e)
break
return (number + 1)
def saveRecordLogs(driver, directory):
'''
Main logic behind the saving of record logs
Arguments:
driver(obj): firefox webdriver instance in python
directory(str) : Directory to save record logs
Returns:
None
'''
print("Printing record logs...")
outDirectory = directory + "/data/admin/RecordLogs.txt"
savedFile = open(outDirectory, "a")
table = PrettyTable(["Date/Time", "Subject", "Action", "Description"])
table.align["Date/Time"] = "l"
table.align["Subject"] = "l"
table.align["Action"] = "l"
table.align["Description"] = "l"
soup = BeautifulSoup(driver.page_source,'html.parser')
cla = soup.find("table", {"id" : "BodyContent_GridViewLogs"}).find_all("tr")
for x in cla:
storeData = []
td = x.find_all("td")
for element in td:
data = element.get_text().strip()
if data.isnumeric():
continue
else:
storeData.insert(len(storeData), data)
if len(storeData) != 4:
continue
else:
table.add_row(storeData)
savedFile.write(str(table))
savedFile.write("\n")
savedFile.close()
webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()
def getAllRecordLogs(driver, headerUrl):
'''
Obtains all the records logs
Arguments:
driver(obj): firefox webdriver instance in python
Returns:
None
'''
driver.get(headerUrl + "Admin/View-Logs/Account-Logs")
time.sleep(2)
driver.find_element_by_id("BodyContent_ButtonSearch").click()
time.sleep(5)
maxNumber = getNumberRecordPages(driver)
print("Max number is: " + str(maxNumber))
number = 2
directory = str(os.getcwd())
while (number < maxNumber + 1):
try:
token = str(number) + "]/a"
Xpath = paginationXpath + token
saveRecordLogs(driver, directory)
driver.find_element_by_xpath(Xpath).click()
number += 1
time.sleep(3)
except:
if (number == maxNumber):
break
number += 1
driver.get(headerUrl + "Admin/View-Logs/Account-Logs")
time.sleep(10)
Xpath = paginationXpath + token
driver.find_element_by_xpath(Xpath).click()
saveRecordLogs(driver, directory)
|
# Author: <NAME>
# The Global Anigner class, provided Global Alignment using Needleman-Wunsch algorithm.
import collections
import os
import numpy as np
from score_matrix_generator import generate_score_matrix
# the Aligner class
class Aligner:
def __init__(self, sigma=5, bayes=True, onlyBayes=False):
self.sigma = sigma
self.onlyBayes = onlyBayes
self.score = None
self.freq_dict = None
# load score matrix, if it does not exist, generate it from data
path='scorematrix.npy'
if not os.path.isfile(path):
generate_score_matrix('data/misspelling.txt')
self.load_score_mat(path)
if bayes:
self.load_freq_dict()
# score matrix loading function
def load_score_mat(self, path='scorematrix.npy'):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
mat = np.load(path)
assert mat.shape == (26, 26)
score = {}
for i in range(26):
for j in range(26):
score[alphabet[i], alphabet[j]] = mat[i, j]
self.score = score
# load the word frequency dictionary
def load_freq_dict(self, path='data/frequency_dictionary_en_82_765.txt'):
# https://github.com/wolfgarbe/SymSpell/blob/master/SymSpell/frequency_dictionary_en_82_765.txt
freq_dict = {}
with open(path, 'r') as f:
for line in f:
word, num = line.strip('\ufeff \n').split()
# freq_dict[word] = int(num)
freq_dict[word] = np.log(int(num))
mean_freq = int(sum(freq_dict.values())/len(freq_dict))
freq_dfdict = collections.defaultdict(lambda: mean_freq)
freq_dfdict.update(freq_dict)
self.freq_dict = freq_dfdict
# global alignment algorithm
def align(self, a:str, b:str) -> int:
sigma, score = self.sigma, self.score
a, b = a.lower(), b.lower()
len_a, len_b = len(a), len(b)
end = (len_a, len_b)
pool = {}
pool[0, 0] = (0, (-1, -1))
for i in range(1, len_a+1):
pool[i, 0] = (-sigma*i, (i-1, 0))
for i in range(1, len_b+1):
pool[0, i] = (-sigma*i, (0, i-1))
for idx_a in range(1, len_a+1):
for idx_b in range(1, len_b+1):
candidates = []
candidates.append((pool[idx_a-1, idx_b][0] - sigma, (idx_a-1, idx_b)))
candidates.append((pool[idx_a, idx_b-1][0] - sigma, (idx_a, idx_b-1)))
candidates.append((pool[idx_a-1, idx_b-1][0] + score[a[idx_a-1], b[idx_b-1]], (idx_a-1, idx_b-1)))
pool[idx_a, idx_b] = max(candidates, key=lambda x : x[0])
return pool[end][0]
# give topk final suggestions using global alignment and optional Bayes Re-weighting
def final_suggestions(self, word:str, sug:set, topk=3) -> list:
freq_dict = self.freq_dict
if not self.onlyBayes:
candidates = [(p_word, self.align(word, p_word)) for p_word in sug]
# if freq_dict is None, do not apply Bayes Re-weighting
if freq_dict is not None:
freq_sum = sum(freq_dict[x[0]] for x in candidates)
candidates = [(x[0], x[1]*(freq_dict[x[0]]/freq_sum)) for x in candidates]
# if self.onlyBayes only apply Bayes Re-weighting
else:
freq_sum = sum(freq_dict[p_word] for p_word in sug)
candidates = [(p_word, freq_dict[p_word]/freq_sum) for p_word in sug]
return sorted(candidates, key=lambda x: x[1], reverse=True)[:topk]
if __name__ == "__main__":
from checker_backend import SpellChecker
sc = SpellChecker(Aligner())
# a = Aligner()
while True:
word = str(input('> '))
fs = sc.give_suggestions(word, topk=10)
print(fs)
|
<reponame>HighSaltLevels/Randomizer<filename>randomizer/app.py<gh_stars>0
""" Module for loading the app UI """
import sys
from PyQt5.QtWidgets import QWidget, QApplication, QGridLayout
from PyQt5.QtGui import QIcon
from ui_elements import label, spinbox, check_box, combo_box, button, line_edit, browse
from ui_elements.separator import create_v_sep, create_h_sep
from config import DEFAULT_CONFIG_PATH
APP = QApplication([])
class Randomizer(QWidget):
""" Randomizer GUI """
def __init__(self, parent=None, title="Randomizer"):
super().__init__(parent)
self.setWindowTitle(title)
self.setWindowIcon(QIcon(f"{DEFAULT_CONFIG_PATH}/randomizer.ico"))
self.labels = label.create_labels(self)
self.file_browser = browse.create_file_browser(self)
self.spin_boxes = spinbox.create_spin_boxes(self)
self.check_boxes = check_box.create_check_boxes(self)
self.combo_boxes = combo_box.create_combo_boxes()
self.line_edits = line_edit.create_line_edits(self)
self.buttons = button.create_buttons(self)
left_column = self.create_left_column()
middle_column = self.create_middle_column()
right_column = self.create_right_column()
main_grid = QGridLayout()
self.setLayout(main_grid)
main_grid.addWidget(self.labels["randomize"], 0, 0, 1, 5)
main_grid.addWidget(create_h_sep(self), 1, 0, 1, 5)
main_grid.addLayout(left_column, 2, 0)
main_grid.addWidget(create_v_sep(self), 2, 1)
main_grid.addLayout(middle_column, 2, 2)
main_grid.addWidget(create_v_sep(self), 2, 3)
main_grid.addLayout(right_column, 2, 4)
main_grid.addWidget(create_h_sep(self), 3, 0, 1, 5)
main_grid.addWidget(self.line_edits["rom_edit"], 4, 0, 1, 4)
main_grid.addWidget(self.buttons["browse"], 4, 4)
main_grid.addWidget(self.buttons["randomize"], 5, 0, 1, 5)
main_grid.addWidget(self.labels["status"], 6, 0, 1, 5)
def create_left_column(self):
""" Create the left column of the main grid """
grid = QGridLayout()
grid.addWidget(self.labels["bases"], 0, 0, 1, 0)
grid.addWidget(create_h_sep(self), 1, 0, 1, 0)
grid.addWidget(self.labels["playable_bases"], 2, 0)
grid.addWidget(self.check_boxes["pb_enabled"], 2, 1)
grid.addWidget(self.labels["pb_minimum"], 3, 0)
grid.addWidget(self.spin_boxes["pb_min"], 3, 1)
grid.addWidget(self.labels["pb_maximum"], 4, 0)
grid.addWidget(self.spin_boxes["pb_max"], 4, 1)
grid.addWidget(create_h_sep(self), 5, 0, 1, 0)
grid.addWidget(self.labels["boss_bases"], 6, 0)
grid.addWidget(self.check_boxes["bb_enabled"], 6, 1)
grid.addWidget(self.labels["bb_minimum"], 7, 0)
grid.addWidget(self.spin_boxes["bb_min"], 7, 1)
grid.addWidget(self.labels["bb_maximum"], 8, 0)
grid.addWidget(self.spin_boxes["bb_max"], 8, 1)
grid.addWidget(create_h_sep(self), 9, 0, 1, 0)
grid.addWidget(self.labels["other_bases"], 10, 0)
grid.addWidget(self.check_boxes["ob_enabled"], 10, 1)
grid.addWidget(self.labels["ob_minimum"], 11, 0)
grid.addWidget(self.spin_boxes["ob_min"], 11, 1)
grid.addWidget(self.labels["ob_maximum"], 12, 0)
grid.addWidget(self.spin_boxes["ob_max"], 12, 1)
grid.addWidget(create_h_sep(self), 13, 0, 1, 0)
grid.addWidget(self.labels["class_bases"], 14, 0)
grid.addWidget(self.check_boxes["cb_enabled"], 14, 1)
grid.addWidget(self.labels["cb_minimum"], 15, 0)
grid.addWidget(self.spin_boxes["cb_min"], 15, 1)
grid.addWidget(self.labels["cb_maximum"], 16, 0)
grid.addWidget(self.spin_boxes["cb_max"], 16, 1)
return grid
def create_middle_column(self):
""" Create the middle column of the main grid """
grid = QGridLayout()
grid.addWidget(self.labels["growths"], 0, 0, 1, 0)
grid.addWidget(create_h_sep(self), 1, 0, 1, 0)
grid.addWidget(self.labels["playable_growths"], 2, 0)
grid.addWidget(self.check_boxes["pg_enabled"], 2, 1)
grid.addWidget(self.labels["pg_minimum"], 3, 0)
grid.addWidget(self.spin_boxes["pg_min"], 3, 1)
grid.addWidget(self.labels["pg_maximum"], 4, 0)
grid.addWidget(self.spin_boxes["pg_max"], 4, 1)
grid.addWidget(create_h_sep(self), 5, 0, 1, 0)
grid.addWidget(self.labels["etc"], 6, 0, 1, 0)
grid.addWidget(create_h_sep(self), 7, 0, 1, 0)
grid.addWidget(self.labels["class_mode"], 8, 0)
grid.addWidget(self.combo_boxes["class_mode"], 8, 1)
grid.addWidget(self.labels["force_master_seal"], 9, 0)
grid.addWidget(self.check_boxes["master_seal_enabled"], 9, 1)
grid.addWidget(self.labels["p_palette"], 10, 0)
grid.addWidget(self.check_boxes["p_palette"], 10, 1)
grid.addWidget(self.labels["b_palette"], 11, 0)
grid.addWidget(self.check_boxes["b_palette"], 11, 1)
grid.addWidget(self.labels["o_palette"], 12, 0)
grid.addWidget(self.check_boxes["o_palette"], 12, 1)
grid.addWidget(self.check_boxes["playable_class"], 13, 0, 1, 0)
grid.addWidget(self.check_boxes["boss_class"], 14, 0, 1, 0)
grid.addWidget(self.check_boxes["other_class"], 15, 0, 1, 0)
grid.addWidget(self.check_boxes["mix_promotes"], 16, 0, 1, 0)
return grid
def create_right_column(self):
""" Create the right part of the main grid """
grid = QGridLayout()
grid.addWidget(self.labels["mod_bases"], 0, 0, 1, 0)
grid.addWidget(create_h_sep(self), 1, 0, 1, 0)
grid.addWidget(self.labels["mod_playable_bases"], 2, 0)
grid.addWidget(self.check_boxes["mpb_enabled"], 2, 1)
grid.addWidget(self.labels["mod_pb"], 3, 0)
grid.addWidget(self.spin_boxes["pb_mod"], 3, 1)
grid.addWidget(create_h_sep(self), 4, 0, 1, 0)
grid.addWidget(self.labels["mod_boss_bases"], 5, 0)
grid.addWidget(self.check_boxes["mbb_enabled"], 5, 1)
grid.addWidget(self.labels["mod_bb"], 6, 0)
grid.addWidget(self.spin_boxes["bb_mod"], 6, 1)
grid.addWidget(create_h_sep(self), 7, 0, 1, 0)
grid.addWidget(self.labels["mod_other_bases"], 8, 0)
grid.addWidget(self.check_boxes["mob_enabled"], 8, 1)
grid.addWidget(self.labels["mod_ob"], 9, 0)
grid.addWidget(self.spin_boxes["ob_mod"], 9, 1)
grid.addWidget(self.labels["mod_growths"], 10, 0, 1, 0)
grid.addWidget(create_h_sep(self), 11, 0, 1, 0)
grid.addWidget(self.labels["mod_playable_growths"], 12, 0)
grid.addWidget(self.check_boxes["mpg_enabled"], 12, 1)
grid.addWidget(self.labels["mod_pg"], 13, 0)
grid.addWidget(self.spin_boxes["pg_mod"], 13, 1)
return grid
def start(self):
""" Start the app """
self.show()
sys.exit(APP.exec_())
|
<filename>adas.py
import os
import errno
import shutil
import urllib.request, urllib.parse, urllib.error
import ssl
open_adas = 'https://open.adas.ac.uk/'
class OpenAdas(object):
def search_adf11(self, element, year='', ms='metastable_unresolved'):
p = [('element', element), ('year', year), (ms, 1),
('searching', 1)]
s = AdasSearch('adf11')
return s.search(p)
def search_adf15(self, element, charge=''):
p = [('element', element), ('charge', charge), ('resolveby', 'file'),
('searching', 1)]
s = AdasSearch('adf15')
return s.search(p)
def fetch(self, url_filename, dst_directory=None):
if dst_directory == None:
dst_directory = os.curdir
self.dst_directory = dst_directory
url = self._construct_url(url_filename)
nested = False # this switch makes files save flat
if nested:
path = self._construct_path(url_filename)
else:
__, path = url_filename
dst_filename = os.path.join(self.dst_directory, path)
if not os.path.exists(dst_filename):
tmpfile, __ = urllib.request.urlretrieve(url)
self._mkdir_p(os.path.dirname(dst_filename))
shutil.move(tmpfile, dst_filename)
def _construct_url(self, url_filename):
"""
>>> db = OpenAdas()
>>> db._construct_url(('detail/adf11/prb96/prb96_c.dat', 'foo.dat'))
'http://open.adas.ac.uk/download/adf11/prb96/prb96_c.dat'
"""
url, __ = url_filename
query = url.replace('detail','download')
return open_adas + query
def _construct_path(self, url_filename):
"""
This function constructs a path to store the file in.
>>> db = OpenAdas()
>>> db._construct_path(('detail/adf11/prb96/prb96_c.dat', 'foo.dat'))
'adf11/prb96/prb96_c.dat'
"""
url, filename = url_filename
path = url.replace('detail/','')
path = path.replace('][','#')
return path
def _mkdir_p(self,path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class AdasSearch(object):
def __init__(self, class_):
if class_ not in ['adf11', 'adf15']:
raise NotImplementedError('ADAS class %s is not supported.' %s)
self.url = open_adas + '%s.php?' % class_
self.class_ = class_
self.data = 0
self.parameters = []
def search(self, parameters):
self.parameters = parameters
self._retrieve_search_page()
return self._parse_data()
def _retrieve_search_page(self):
search_url = self.url + urllib.parse.urlencode(self.parameters)
ssl._create_default_https_context = ssl._create_unverified_context
res, __ = urllib.request.urlretrieve(search_url)
self.data = open(res).read()
os.remove(res)
def _parse_data(self):
parser = SearchPageParser()
parser.feed(self.data)
lines = parser.lines
if lines == []: return {}
header = lines.pop(0)
db = []
for l in lines:
if self.class_ == 'adf11':
element, class_, comment, year, resolved, url, cl, typ, name = l
name = name.strip()
db.append((url, name))
elif self.class_ == 'adf15':
element, ion, w_lo, w_hi, url, cl, typ, name = l
name = name.strip()
db.append((url, name))
else:
raise NotImplementedError('this should never happen')
return db
def _strip_url(self, url):
__, id_ = url.split('=')
return int(id_)
from html.parser import HTMLParser
class SearchPageParser(HTMLParser):
"""
Filling in a search form on http://open.adas.ac.uk generates a HTML document
with a table that has the following structure:
>>> html = '''
... <table summary='Search Results'>
... <tr>
... <td>Ne</td> <td><a href='filedetail.php?id=32147'>rc89_ne.dat</a></td>
... <tr>
... </tr>
... <td>C</td> <td><a href='filedetail.php?id=32154'>rc89_c.dat</a></td>
... </tr>
... </table>'''
The SearchPageParser can parse this document looking for a table with a
class `searchresults`.
>>> parser = SearchPageParser()
>>> parser.feed(html)
>>> for l in parser.lines: print l
['Ne', 'filedetail.php?id=32147', 'rc89_ne.dat']
['C', 'filedetail.php?id=32154', 'rc89_c.dat']
"""
def reset(self):
self.search_results = False
self.line = []
self.lines = []
HTMLParser.reset(self)
#def handle_starttag(self, tag, attrs):
# attrs = dict(attrs)
# if tag == 'table' and attrs.get('class') == 'searchresults':
# self.search_results = True
# if not self.search_results: return
#
# if tag == 'a' and self.line != None:
# self.line.append(attrs['href'])
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if (tag == 'table'
and 'summary' in attrs
and 'Results' in attrs['summary']):
self.search_results = True
if not self.search_results: return
if tag == 'a' and self.line != None:
self.line.append(attrs['href'])
def handle_endtag(self, tag):
if tag == 'table':
self.search_results = False
if not self.search_results: return
if tag == 'tr':
self.lines.append(self.line)
self.line = []
def handle_data(self, data):
if not self.search_results: return
if data.strip() != '':
self.line.append(data)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
<filename>2021/18/solve.py<gh_stars>0
import os.path
from itertools import permutations
from functools import reduce
from copy import deepcopy
INPUT=os.path.join(os.path.dirname(__file__), "input.txt")
with open(INPUT) as f:
data = f.read()
def add_to_list(s, k, init_index, second_index):
index=init_index
while isinstance(s[index], list):
s=s[index]
index=second_index
s[index] += k
def explode(number, parent_indexes=(), parent_nodes=()):
parent_nodes+=(number,)
for i,a in enumerate(number):
if not isinstance(a, list):
continue
added = [False, False]
if len(parent_nodes) == 4:
number[i] = 0
for j, b in zip(reversed(parent_indexes + (i,)), reversed(parent_nodes)):
k = 1-j
if added[j]:
continue
add_to_list(b, a[k], k, j)
added[j] = True
return number
elif explode(a, parent_indexes + (i,), parent_nodes):
return number
def split(number):
for i, a in enumerate(number):
if not isinstance(a, list) and a > 9:
number[i] = [a//2, (a+1)//2]
return number
elif isinstance(a, list):
if split(a):
return number
def magnitude(n):
if isinstance(n, list):
return 3*magnitude(n[0])+2*magnitude(n[1])
return n
def fully_reduce(number):
while explode(number) or split(number):
pass
return number
def parse(data):
return (eval(line) for line in data.splitlines())
def part1(data):
return magnitude(reduce(lambda n1, n2: fully_reduce([n1, n2]), parse(data)))
def part2(data):
return max(
magnitude(fully_reduce(deepcopy([a,b])))
for a,b in permutations(parse(data), 2)
)
assert([[[[0,9],2],3],4]==explode([[[[[9,8],1],2],3],4]))
assert([7,[6,[5,[7,0]]]]==explode([7,[6,[5,[4,[3,2]]]]]))
assert([[6,[5,[7,0]]],3]==explode([[6,[5,[4,[3,2]]]],1]))
assert([[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]==explode([[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]]))
assert([[[[0,7],4],[[7,8],[0,13]]],[1,1]]==split([[[[0,7],4],[15,[0,13]]],[1,1]]))
assert(143==magnitude([[1,2],[[3,4],5]]))
assert(3488==magnitude([[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]))
assert(4140 == part1("""[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]"""))
print(part1(data), "[PART 1]")
assert(3993 == part2("""[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]"""))
print(part2(data), "[PART 2]")
|
<reponame>eproje/uPy_Course
# SOURCE: https://www.mfitzp.com/article/3d-rotating-cube-micropython-oled/
from machine import I2C, Pin
import ssd1306
import math
i2c = I2C(scl=Pin(18), sda=Pin(19), freq=400000)
display=ssd1306.SSD1306_I2C(128,64,i2c)
class Point3D:
def __init__(self, x = 0, y = 0, z = 0):
self.x, self.y, self.z = x, y, z
def rotateX(self, angle):
""" Rotates this point around the X axis the given number of degrees. """
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
y = self.y * cosa - self.z * sina
z = self.y * sina + self.z * cosa
return Point3D(self.x, y, z)
def rotateY(self, angle):
""" Rotates this point around the Y axis the given number of degrees. """
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
z = self.z * cosa - self.x * sina
x = self.z * sina + self.x * cosa
return Point3D(x, self.y, z)
def rotateZ(self, angle):
""" Rotates this point around the Z axis the given number of degrees. """
rad = angle * math.pi / 180
cosa = math.cos(rad)
sina = math.sin(rad)
x = self.x * cosa - self.y * sina
y = self.x * sina + self.y * cosa
return Point3D(x, y, self.z)
def project(self, win_width, win_height, fov, viewer_distance):
""" Transforms this 3D point to 2D using a perspective projection. """
factor = fov / (viewer_distance + self.z)
x = self.x * factor + win_width / 2
y = -self.y * factor + win_height / 2
return Point3D(x, y, self.z)
class Simulation:
def __init__(
self,
width=128,
height=64,
fov=64,
distance=4,
rotateX=5,
rotateY=5,
rotateZ=5
):
self.vertices = [
Point3D(-1,1,-1),
Point3D(1,1,-1),
Point3D(1,-1,-1),
Point3D(-1,-1,-1),
Point3D(-1,1,1),
Point3D(1,1,1),
Point3D(1,-1,1),
Point3D(-1,-1,1)
]
# Define the edges, the numbers are indices to the vertices above.
self.edges = [
# Back
(0, 1),
(1, 2),
(2, 3),
(3, 0),
# Front
(5, 4),
(4, 7),
(7, 6),
(6, 5),
# Front-to-back
(0, 4),
(1, 5),
(2, 6),
(3, 7),
]
# Dimensions
self.projection = [width, height, fov, distance]
# Rotational speeds
self.rotateX = rotateX
self.rotateY = rotateY
self.rotateZ = rotateZ
def run(self):
# Starting angle (unrotated in any dimension)
angleX, angleY, angleZ = 0, 0, 0
while 1:
# It will hold transformed vertices.
t = []
for v in self.vertices:
# Rotate the point around X axis, then around Y axis, and finally around Z axis.
r = v.rotateX(angleX).rotateY(angleY).rotateZ(angleZ)
# Transform the point from 3D to 2D
p = r.project(*self.projection)
# Put the point in the list of transformed vertices
t.append(p)
display.fill(0)
for e in self.edges:
display.line(*to_int(t[e[0]].x, t[e[0]].y, t[e[1]].x, t[e[1]].y, 1))
display.show()
# Continue the rotation
angleX += self.rotateX
angleY += self.rotateY
angleZ += self.rotateZ
def to_int(*args):
return [int(v) for v in args]
s = Simulation()
s.run() |
<reponame>pmeier/torchssim<filename>torchssim/ssim.py
from collections import namedtuple
import torch
from torch.nn.functional import relu
from torchimagefilter import ImageFilter
__all__ = [
"SSIMReprenstation",
"SSIMContext",
"SimplifiedSSIMContext",
"calculate_ssim_repr",
"calculate_luminance",
"calculate_contrast",
"calculate_structure",
"calculate_non_structural",
"calculate_structural",
"calculate_ssim",
"calculate_simplified_ssim",
]
SSIMReprenstation = namedtuple(
"ssim_reprensentation", ("raw", "mean", "mean_sq", "var")
)
SSIMContext = namedtuple(
"ssim_context",
(
"luminance_eps",
"contrast_eps",
"structure_eps",
"luminance_exp",
"contrast_exp",
"structure_exp",
),
)
SimplifiedSSIMContext = namedtuple(
"simplified_ssim_context", ("non_structural_eps", "structural_eps")
)
def _possqrt(x: torch.Tensor) -> torch.Tensor:
return torch.sqrt(relu(x))
def calculate_ssim_repr(
image: torch.FloatTensor, image_filter: ImageFilter
) -> SSIMReprenstation:
mean = image_filter(image)
mean_sq = mean ** 2.0
var = image_filter(image ** 2.0) - mean_sq
return SSIMReprenstation(image, mean, mean_sq, var)
def calculate_luminance(
input_mean_sq: torch.FloatTensor,
target_mean_sq: torch.FloatTensor,
mean_prod: torch.FloatTensor,
eps: float,
) -> torch.FloatTensor:
return (2.0 * mean_prod + eps) / (input_mean_sq + target_mean_sq + eps)
def calculate_contrast(
input_var: torch.FloatTensor,
target_var: torch.FloatTensor,
std_prod: torch.FloatTensor,
eps: float,
) -> torch.FloatTensor:
return (2.0 * std_prod + eps) / (input_var + target_var + eps)
def calculate_structure(
std_prod: torch.FloatTensor, covar: torch.FloatTensor, eps: float
) -> torch.FloatTensor:
return (covar + eps) / (std_prod + eps)
def calculate_ssim(
input_repr: SSIMReprenstation,
target_repr: SSIMReprenstation,
ctx: SSIMContext,
image_filter: ImageFilter,
) -> torch.FloatTensor:
input_mean_sq, target_mean_sq = input_repr.mean_sq, target_repr.mean_sq
input_var, target_var = input_repr.var, target_repr.var
mean_prod = input_repr.mean * target_repr.mean
std_prod = _possqrt(input_var * target_var)
covar = image_filter(input_repr.raw * target_repr.raw) - mean_prod
luminance = calculate_luminance(
input_mean_sq, target_mean_sq, mean_prod, ctx.luminance_eps
)
contrast = calculate_contrast(input_var, target_var, std_prod, ctx.contrast_eps)
structure = calculate_structure(std_prod, covar, ctx.structure_eps)
return (
luminance ** ctx.luminance_exp
* contrast ** ctx.contrast_exp
* structure ** ctx.structure_exp
)
def calculate_non_structural(
input_mean_sq: torch.FloatTensor,
target_mean_sq: torch.FloatTensor,
mean_prod: torch.FloatTensor,
eps: float,
) -> torch.FloatTensor:
return calculate_luminance(input_mean_sq, target_mean_sq, mean_prod, eps)
def calculate_structural(
input_var: torch.FloatTensor,
target_var: torch.FloatTensor,
covar: torch.FloatTensor,
eps: float,
) -> torch.FloatTensor:
return (2.0 * covar + eps) / (input_var + target_var + eps)
def calculate_simplified_ssim(
input_repr: SSIMReprenstation,
target_repr: SSIMReprenstation,
ctx: SimplifiedSSIMContext,
image_filter: ImageFilter,
) -> torch.FloatTensor:
input_mean_sq, target_mean_sq = input_repr.mean_sq, target_repr.mean_sq
input_var, target_var = input_repr.var, target_repr.var
mean_prod = input_repr.mean * target_repr.mean
covar = image_filter(input_repr.raw * target_repr.raw) - mean_prod
non_structural = calculate_non_structural(
input_mean_sq, target_mean_sq, mean_prod, ctx.non_structural_eps
)
structural = calculate_structural(input_var, target_var, covar, ctx.structural_eps)
return non_structural * structural
|
import os
from os.path import join
import cv2
import numpy as np
from collections import defaultdict
def dictload(dirpath = 'data/train.txt'):
f = open(dirpath, "r")
labelDict = dict()
validateDict = defaultdict(list)
validateList = list()
while True:
line = f.readline()
if line:
pass # do something here
name = line.split(' ')[0]
label = line.split(' ')[1].strip()
key,ext = os.path.splitext(name)
labelDict[key]=label
validateDict[label].append(key)
else:
break
f.close()
for k, v in validateDict.items():
counter= 0
for w in v:
counter=counter+1
if counter<10:
validateList.append(w)
return labelDict,validateList
def testdictload(dirpath = 'data/train.txt'):
f = open(dirpath, "r")
picDict = list()
while True:
line = f.readline()
if line:
pass # do something here
# name = line.split(' ')[0]
# key,ext = os.path.splitext(name)
picDict.append(line.strip())
else:
break
f.close()
return picDict
def dataload(img_w=300,img_h=300,val_ratio = 0.95,gray=0):
# load y dict
# labelDict,validateList= dictload("c:/tempProjects/keras-resnet/data/train.txt")
labelDict,validateList= dictload("d:/git/keras-resnet/data/train.txt")
# img_dirpath = "c:/tempProjects/keras-resnet/data/train"
img_dirpath = "d:/git/keras-resnet/data/train"
train_name =list()
# X=[]
# y=[]
X_train = []
y_train=[]
X_val=[]
y_val=[]
for filename in os.listdir(img_dirpath):
name, ext = os.path.splitext(filename)
if ext in ['.jpg']:
img_filepath = join(img_dirpath, filename)
img = cv2.imread(img_filepath)
if gray==1:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# img = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2RGB)
img = cv2.resize(img, (img_w, img_h))
img = img.astype(np.float32)
# img /= 255
# X.append(img)
# y.append(labelDict[name])
X_train.append(img)
y_train.append(labelDict[name])
train_name.append(name)
if name in validateList:
X_val.append(img)
y_val.append(labelDict[name])
# X,y=np.asarray(X), np.asarray(y)
# X =X.astype(np.float32)
# y =y.astype(np.float32)
#train validate
# trainLen = int(len(X)*0.95)
# valLen = len(X)-trainLen
# X_train=[]
# y_train=[]
# X_val=[]
# y_val=[]
# for index,value in enumerate(X):
#
# X_train.append(value)
# y_train.append(y[index])
#
#
# if index<trainLen:
# X_train.append(value)
# y_train.append(y[index])
#
# else:
# #########
# X_train.append(value)
# y_train.append(y[index])
# ##############
# X_val.append(value)
# y_val.append(y[index])
X_train,y_train,X_val,y_val = np.asarray(X_train), np.asarray(y_train), np.asarray(X_val), np.asarray(y_val)
X_train = X_train.astype(np.float32)
y_train = y_train.astype(np.int32)
X_val = X_val.astype(np.float32)
y_val = y_val.astype(np.int32)
y_val = np.reshape(y_val,(len(y_val),1))
y_train = np.reshape(y_train,(len(y_train),1))
return X_train,y_train-1,X_val,y_val-1,train_name
def testLoad(img_w=300,img_h=300,val_ratio = 0.95):
# load y dict
# picDict= testdictload("c:/tempProjects/keras-resnet/data/test.txt")
picDict = list()
# img_dirpath = "c:/tempProjects/keras-resnet/data/test"
img_dirpath = "d:/git/keras-resnet/data/test"
# X=[]
# y=[]
X_test = []
for filename in os.listdir(img_dirpath):
img_filepath = join(img_dirpath, filename)
img = cv2.imread(img_filepath)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (img_w, img_h))
img = img.astype(np.float32)
# img /= 255
# X.append(img)
# y.append(labelDict[name])
X_test.append(img)
picDict.append(filename)
X_test = np.asarray(X_test)
X_test = X_test.astype(np.float32)
return X_test,picDict
# X_train,y_train,X_val,y_val=dataload(50,50)
# print() |
<gh_stars>0
# -*- coding: utf-8 -*-
import os
import socket
import json
import http.client as httplib
from uPHue import *
class Bridge(object):
""" Interface to the Hue ZigBee bridge
"""
def __init__(self, ip=None, username=None, config_file_path=None):
""" Initialization function.
Parameters:
------------
ip : string
IP address as dotted quad
username : string, optional
"""
if config_file_path is not None:
self.config_file_path = config_file_path
else:
self.config_file_path = os.path.join(os.getcwd(), '.python_hue')
self.ip = ip
self.username = username
if username is not None:
self.api = '/api/' + username
self._name = None
# self.minutes = 600 # these do not seem to be used anywhere?
# self.seconds = 10
self.connect()
@property
def name(self):
'''Get or set the name of the bridge [string]'''
self._name = self.get('/config')['name']
return self._name
@name.setter
def name(self, value):
self._name = value
data = {'name': self._name}
self.put('/config', data)
def get(self, req):
return self.request('GET', self.api + req)
def put(self, req, data):
return self.request('PUT', self.api + req, data)
def post(self, req, data):
return self.request('POST', self.api + req, data)
def delete(self, req):
return self.request('DELETE', self.api + req)
def request(self, mode='GET', address=None, data=None):
""" Utility function for HTTP GET/PUT requests for the API"""
connection = httplib.HTTPConnection(self.ip, timeout=10)
try:
if mode == 'GET' or mode == 'DELETE':
connection.request(mode, address)
if mode == 'PUT' or mode == 'POST':
connection.request(mode, address, json.dumps(data))
logger.debug("{0} {1} {2}".format(mode, address, str(data)))
except socket.timeout:
error = "{} Request to {}{} timed out.".format(mode, self.ip, address)
logger.exception(error)
raise PhueRequestTimeout(None, error)
result = connection.getresponse()
response = result.read()
connection.close()
response = response.decode('utf-8')
logger.debug(response)
return json.loads(response)
def get_ip_address(self, set_result=False):
""" Get the bridge ip address from the meethue.com nupnp api """
connection = httplib.HTTPSConnection('www.meethue.com')
connection.request('GET', '/api/nupnp')
logger.info('Connecting to meethue.com/api/nupnp')
result = connection.getresponse()
data = json.loads(str(result.read(), encoding='utf-8'))
""" close connection after read() is done, to prevent issues with read() """
connection.close()
ip = str(data[0]['internalipaddress'])
if ip != '':
if set_result:
self.ip = ip
return ip
else:
return False
def register_app(self):
""" Register this computer with the Hue bridge hardware and save the resulting access token """
registration_request = {"devicetype": "python_hue"}
response = self.request('POST', '/api', registration_request)
for line in response:
for key in line:
if 'success' in key:
with open(self.config_file_path, 'w') as f:
logger.info(
'Writing configuration file to ' + self.config_file_path)
f.write(json.dumps({self.ip: line['success']}))
logger.info('Reconnecting to the bridge')
self.connect()
if 'error' in key:
error_type = line['error']['type']
if error_type == 101:
raise PhueRegistrationException(error_type,
'The link button has not been pressed in the last 30 seconds.')
if error_type == 7:
raise PhueException(error_type,
'Unknown username')
def connect(self):
""" Connect to the Hue bridge """
logger.info('Attempting to connect to the bridge...')
# If the ip and username were provided at class init
if self.ip is not None and self.username is not None:
logger.info('Using ip: ' + self.ip)
logger.info('Using username: ' + self.username)
return
if self.ip is None or self.username is None:
try:
with open(self.config_file_path) as f:
config = json.loads(f.read())
if self.ip is None:
self.ip = list(config.keys())[0]
logger.info('Using ip from config: ' + self.ip)
else:
logger.info('Using ip: ' + self.ip)
if self.username is None:
self.username = config[self.ip]['username']
self.api = '/api/' + self.username
logger.info(
'Using username from config: ' + self.username)
else:
logger.info('Using username: ' + self.username)
except Exception as e:
logger.info(
'Error opening config file, will attempt bridge registration')
self.register_app()
def get_api(self):
""" Returns the full api dictionary """
return self.get('')
|
<gh_stars>1-10
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Qt5Agg') # Apple doesn't like Tkinter (TkAgg backend) so I needed to change the backend to 'Qt5Agg'
import statsmodels.api as sm
import numpy as np
import os
import pandas as pd
from numpy import genfromtxt
from os import makedirs
from os.path import exists
from itertools import combinations, chain
from scipy.signal import find_peaks, peak_prominences, peak_widths
from statistics import stdev, mean
import generate_synth_signal as synth_signal
def read_MAL_data(wellNum=None, filename=None):
if wellNum:
filename = "/Users/Arina/PycharmProjects/ScrunchingTrack/MAL data well" + str(wellNum) + ".csv"
# /Users/Arina/PycharmProjects/ScrunchingTrack/MAL data well1.csv
my_data = genfromtxt(filename, delimiter=',')
return my_data
def ind_exists(sset, ind):
for i in range(1, len(sset)):
if sset[i] == ind:
return True
else:
return False
""" Get all combinations of (sequential) peaks in set of peaks with >3 peaks"""
def get_combinations(sset, mode="sequential"): # todo: check that other conditions still hold true when we get combinations
ssets = []
if len(sset) > 3:
if mode=="sequential":
for i in range(len(sset)-2):
ssets.append(sset[i:i+3])
elif mode=="any":
ssets = list(combinations(sset, 3))
else:
ssets = [sset]
return ssets
# takes in a list of ssets (times) and checks that they are not too far apart
def check_not_too_far(ssets, peak_data):
ssets_cleaned = []
for sset in ssets:
sset_times = to_timestamps(sset, peak_data)
if sset_times[2]-sset_times[1]<25 and sset_times[1]-sset_times[0]<25:
ssets_cleaned.append(sset)
return list(ssets_cleaned)
""" Converts a set w peak indexes from the data table to peak timestamps (indexes in the MAL array) """
def to_timestamps(sset, peak_data) -> object:
peak_set_times = []
for ind in sset:
ind = int(ind)
peak_set_times.append(int(peak_data[ind][1]))
return list(peak_set_times)
def Lowess(data, pts=6, itn=3, order=1):
data = pd.DataFrame(data)
x = np.array(data.index, dtype=float)
# condition x-values to be between 0 and 1 to reduce errors in linalg
x = x - x.min()
x = x / x.max()
y = data.values
n = len(data)
r = int(np.min([pts, n]))
r = min([r, n - 1])
order = max([1, order])
# Create matrix of 1, x, x**2, x**3, etc, by row
xm = np.array([x ** j for j in range(order + 1)])
# Create weight matrix, one column per data point
h = [np.sort(np.abs(x - x[i]))[r] for i in range(n)]
w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0)
w = (1 - w ** 3) ** 3
# Set up output
yEst = np.zeros(n)
delta = np.ones(n) # Additional weights for iterations
for iteration in range(itn):
for i in range(n):
weights = delta * w[:, i]
xw = np.array([weights * x ** j for j in range(order + 1)])
b = xw.dot(y)
a = xw.dot(xm.T)
beta = np.linalg.solve(a, b)
yEst[i] = sum([beta[j] * x[i] ** j for j in range(order + 1)])
# Set up weights to reduce effect of outlier points on next iteration
residuals = y - yEst
s = np.median(np.abs(residuals))
delta = np.clip(residuals / (6.0 * s), -1, 1)
delta = (1 - delta ** 2) ** 2
return pd.Series(yEst, index=data.index, name='Trend')
def frac_lowess(mal_arr, frac=0.004, displayPlot=False): # higher fraction ==more smoothing
time = np.arange(start=0, stop=(len(mal_arr)) / 10, step=0.1)
x = time
y = mal_arr
lowess = sm.nonparametric.lowess(y, x, frac=frac, missing='none', it=0)
mal_smoothed = lowess[:, 1]
"""
if displayPlot == True:
plt.plot(x, y, '+')
plt.plot(lowess[:, 0], lowess[:, 1])
plt.show()
"""
return mal_smoothed
def plot_valleys(mal_arr, xlabel=None):
inds, dict = find_good_peaks(mal_arr)
inds_right = dict["right_bases"]
inds_left= dict["left_bases"]
plt.plot(mal_arr)
plt.plot(inds, np.array(mal_arr)[inds], "o")
plt.plot(inds_right, np.array(mal_arr)[inds_right], "X")
plt.plot(inds_left, np.array(mal_arr)[inds_left], "x")
plt.legend(["", "peak", "right valley", "left valley"])
if xlabel:
plt.xlabel(xlabel)
plt.close(0)
def plot_valleys_naive(mal_arr):
peak_inds, dict = find_good_peaks(mal_arr)
valley_inds, dict_neg = find_good_peaks(-mal_arr)
plt.plot(mal_arr)
plt.plot(peak_inds, np.array(mal_arr)[peak_inds], "o")
plt.plot(valley_inds, np.array(mal_arr)[valley_inds], "x")
plt.legend(["", "peaks", "valleys"])
plt.close(0)
return peak_inds, valley_inds
def match_peaks_valleys(peak, list_of_valleys):
if peak<list_of_valleys[0]: # check if there is no left valley:
return 0, list_of_valleys[0]
if peak>list_of_valleys[-1]: # check if there is no right valley:
return list_of_valleys[-1], peak+10
for i in range(len(list_of_valleys)):
if list_of_valleys[i]<peak and list_of_valleys[i+1]>peak:
left_valley = list_of_valleys[i]
right_valley = list_of_valleys[i+1]
return left_valley, right_valley
def plot_valleys_prominences(sset, peak_data, currMAL):
plt.plot(currMAL)
#for ind in sset:
# plt.plot(peak_data[ind][1], peak_data[ind][10], "X")
times = to_timestamps(sset, peak_data)
_, inds_left, inds_right = peak_prominences(currMAL, times, wlen=10)
print("left",inds_left )
print("right", inds_right)
plt.plot(times, np.array(currMAL)[times], "o")
plt.plot(inds_right, np.array(currMAL)[inds_right], "X")
plt.plot(inds_left, np.array(currMAL)[inds_left], "x")
plt.xlim(times[0]-50, times[-1]+50)
plt.legend(["", "peak", "right valley", "left valley"])
plt.close(0)
""" Find all peaks (local MINIMA) without any filter """
def find_all_peaks(mal_arr, show=False):
peakinds, _ = find_peaks(mal_arr)
if show:
plt.plot(mal_arr)
plt.plot(peakinds, np.array(mal_arr)[peakinds], "x")
plt.show()
plt.close(0)
return list(peakinds)
""""
def find_right_valleys(mal_arr, peak_data):
peak_inds, peak_dict = find_good_peaks(mal_arr)
valley_inds, _ = find_good_peaks(-mal_arr)
for peak_ind in peak_inds:
diff = valley_inds - peak_ind
mask_right = np.ma.masked_less_equal(diff, 0) #mask the negative differences and zero since we are looking for values above
#mask_left = np.ma.masked_greater_equal(diff, 0)
if not np.all(mask_right):
masked_diff_right = np.ma.masked_array(diff, mask_right)
print("peak ind:", peak_ind, "valley ind:", masked_diff_right.argmin())
# get left valley
"""
""" Perform peak (local MINIMA) filtering based on the following rules:
2) peak width larger than 1.75
6) peak prominence smaller than 67 (or higher for larger worms)
Returns peakinds: indexes of all peaks that satisfy the condition;
peak dict: dictonary of features describing the peaks
"""
def find_good_peaks(mal_arr, show=False, outpath=None, fps=5, pix_adj=1.5):
fps_adj = fps / 5
worm_size_adj = 1
# todo: worm_size_adj = XXXX/worm_size
peakinds, peak_dict = find_peaks(mal_arr, distance=3, prominence=(-100, 67 * pix_adj * worm_size_adj), width=1.7*fps_adj)
if show:
plt.plot(mal_arr)
plt.plot(peakinds, np.array(mal_arr)[peakinds], "x")
if outpath is not None:
plt.savefig(outpath)
plt.show()
plt.close(0)
return peakinds, peak_dict
"""
Creates a peak_data data table containing information about the peaks found in find_good_peaks function
1st column: reference index of the peak in this data table (Note: different than in the original script)
2nd column: location (time) of the peak;
3rd column: distance with the previous peak
4th column: number of peaks withing one oscillation (added later)
5th column: width of the peak;
6th column: prominence of the peak;
7th column: distance of the peak to the previous valley (lowest point between two peaks) == 'left_bases'
8th column: distance of the peak to the next valley == 'right_bases'
9th column: value of the previous valley
10th column: difference of values of peak and its previous valley
11th column: difference of values of peak and its next valley (Note: different than in the original script)
12th: dict where key: set index, value:COM when the worms first started moving (Added later)
"""
def get_peak_data(mal_arr):
peakinds, peak_dict = find_good_peaks(mal_arr)
# get all peaks info
peak_data = np.zeros([len(peakinds), 14], dtype=object) # initialize the array to store peak data
for i in range(len(peakinds)):
# 1: length of the peak -- here index in this data table (for future reference)
peak_data[i][0] = int(i)
# 2: location (time) of the peak (in frames)
peak_data[i][1] = int(peakinds[i])
# 3rd column: distance to the previous peak
if i > 0:
peak_data[i][2] = peak_data[i][1] - peak_data[i - 1][1]
# 5th column: width of the peak
peak_data[i][4] = peak_dict['widths'][i] # todo: unnecessary bc already filtered out wide enough peaks?
# 6th column: prominence of the peak
peak_data[i][5] = peak_dict['prominences'][i]
peak_data[i][6] = peakinds[i] - peak_dict['left_bases'][i] # 7th: distance to the previous valley (in frames)
peak_data[i][7] = peak_dict['right_bases'][i] - peakinds[i] # 8th: distance to the next valley (in frames)
# 9th column: value of the previous valley
peak_data[i][8] = mal_arr[peak_dict['left_bases'][i]]
# 10th column: difference of values of peak and its previous valley
peak_data[i][9] = mal_arr[peakinds[i]] - peak_data[i][8]
# !! CHANGED !! 11th column: difference of values of peak and its next valley
peak_data[i][10] = mal_arr[peakinds[i]] - mal_arr[peak_dict['right_bases'][i]]
# 12th: dict where key: set index, value:COM when the worms first started moving
peak_data[i][11] = {}
# 13th: com
peak_data[i][12] = [] #pl
peak_data[i][13] = mal_arr[peak_dict['right_bases'][i]]
return peakinds, peak_dict, peak_data
def add_valley_info():
peak_data[i][5], inds_left, inds_right = peak_prominences(mal_arr, [peak_data[i][1]], wlen=10)
# 6th column: prominence of the peak
peak_data[i][5] = peak_dict['prominences'][i]
peak_data[i][6] = peakinds[i] - peak_dict['left_bases'][i] # 7th: distance to the previous valley (in frames)
peak_data[i][7] = peak_dict['right_bases'][i] - peakinds[i] # 8th: distance to the next valley (in frames)
# 9th column: value of the previous valley
peak_data[i][8] = mal_arr[peak_dict['left_bases'][i]]
# 10th column: difference of values of peak and its previous valley
peak_data[i][9] = mal_arr[peakinds[i]] - peak_data[i][8]
# !! CHANGED !! 11th column: difference of values of peak and its next valley
peak_data[i][10] = mal_arr[peakinds[i]] - mal_arr[peak_dict['right_bases'][i]]
""" Check that a given peak satisfies the following rules:
3) 0.615<(peak value - previous valley value)/distance between the two values <6.5
4) distance between peak and its previous valley >2
5) distance of its two neighbor valleys >4
"""
def verify_good_peak(peak_data, peak_data_ind, fps=5, pix_adj=1.5):
fps_adj = fps / 5
if 0.615 * fps_adj / pix_adj < (
peak_data[peak_data_ind][9] / peak_data[peak_data_ind][6]) < 6.5 * fps_adj / pix_adj: # 3)
if peak_data[peak_data_ind][6] > 2 * fps_adj: # 4)
if (peak_data[peak_data_ind][7] + peak_data[peak_data_ind][6]) > 4 * fps_adj: # 5)
return True
return False
""" Collects info about all potential peak sets by looking through all peaks that fit the "good peak" rules """
def get_peak_sets(peak_data, fps=5, pix_adj=1.5):
fps_adj = fps / 5
good_peak_sets = [[] for x in range(peak_data.shape[0] + 1)] # preallocate space
for i in range(0, peak_data.shape[0] - 3): # i here is the timestamp of the peak
pks_set = good_peak_sets[i]
if verify_good_peak(peak_data, i, fps=fps, pix_adj=pix_adj): # verify that the first peak is good
pks_set.append(peak_data[i][0]) # add the peak ind (in the data frame) to the list of potential sets
next_ind = i + 1
while next_ind < (len(peak_data) - 1) and (peak_data[next_ind][1] - peak_data[i][1]) < 23 * fps_adj: # the distance between the peaks <= 23 frames
if 4 * fps_adj <= (peak_data[next_ind][1] - peak_data[i][1]): # the distance between the peaks >=4 frames
if not ind_exists(pks_set, ind=peak_data[next_ind][0]) and verify_good_peak(peak_data, next_ind, fps=fps):
pks_set.append(peak_data[next_ind][0]) # add the index of the dataframe corr to the peak to the set
next_ind += 1
good_peak_sets = [x for x in good_peak_sets if len(x) >= 2] # remove the sets w fewer than 2 peaks
for pks_set in good_peak_sets: # now go through the list of peak lists again and look for the third/next peak
for j in range(1, len(pks_set)): # for every elem of the set
curr_ind = pks_set[j] # index of the peak in the set that we are currently considering
next_ind = curr_ind + 1
while next_ind < (len(peak_data) - 1) and (peak_data[next_ind][1] - peak_data[curr_ind][1]) <= 23 * fps_adj: # check that we are not out of bounds
if not ind_exists(pks_set, ind=peak_data[next_ind][0]) and 4 * fps_adj <= peak_data[next_ind][1] - \
peak_data[curr_ind][1] and verify_good_peak(peak_data, next_ind, fps=fps):
pks_set.append(peak_data[next_ind][0]) # add the reference ind pf the peak in the peak_data main table
next_ind += 1
good_peak_sets = [x for x in good_peak_sets if len(x) >= 3] # remove the sets w fewer than 3 peaks
return good_peak_sets, peak_data
""" Add information about the displacement of the worm to the pak_data table
12th column: moving distance of worm compared to the location where the worm started scrunching (left valley of the first peak)
"""
# todo: what about sets with 3+ peaks. # issue: if the peak is in multiple sets then we will overwrite the com ultiple times
def add_com_info(good_peak_sets, com_arr, peak_data, pix_adj=1.5, mode="multiple", input_mode_times=False):
curr_disp = 0
if mode == "single": #
good_peak_sets = get_combinations(good_peak_sets, mode="any")
good_peak_sets = check_not_too_far(good_peak_sets, peak_data) # remove sets that are too far away
if len(good_peak_sets)==0:
print("peaks are too far away -> will be removed")
peak_data = 0
return peak_data
for sset_ind, sset in enumerate(good_peak_sets):
times_set = to_timestamps(good_peak_sets[sset_ind], peak_data)
first_peak_ind = sset[0]
start_moving_time = peak_data[first_peak_ind][1] - peak_data[first_peak_ind][6]
start_com = com_arr[start_moving_time]
while np.isnan(start_com[0]): # if there is no record of com for that timestamp, get the next available com
start_moving_time += 1
start_com = com_arr[start_moving_time]
for i, peak_ind in enumerate(sset):
curr_time = times_set[i]
com = com_arr[curr_time]
peak_data[sset[i]][12] = com
while np.isnan(com[0]) and curr_time < len(com_arr) - 1: # check that we are within the bounds
curr_time += 1
start_com = com_arr[curr_time]
prev_disp = curr_disp
curr_disp = np.linalg.norm(
np.array(start_com - np.array(com))) # displacement from where the worm started scrunching
peak_data[sset[i]][12] = com
print("curr disp", curr_disp,"prev disp", prev_disp)
if 4 > i > 0 and (prev_disp > curr_disp) or (
curr_disp - prev_disp) > 29 * 2 * pix_adj: # if the worm moves >29 pix*pix_adj between 2 peaks
good_peak_sets.remove(sset) # remove this peak set
print("removing due to bad COMs")
break
else:
peak_data[peak_ind][11][sset_ind] = curr_disp
#print(len(good_peak_sets), "peak sets after checking displacements")
if len(good_peak_sets)==0:
peak_data=0
return peak_data
def add_com_info_new(good_peak_sets, com_arr, peak_data, pix_adj=1.5, mode="multiple", input_mode_times=False):
if mode == "single": #
good_peak_sets = get_combinations(good_peak_sets, mode="any")
good_peak_sets = check_not_too_far(good_peak_sets, peak_data) # remove sets that are too far away
#good_peak_sets=[good_peak_sets]
if len(good_peak_sets)==0:
print("peaks are too far away -> will be removed")
peak_data = 0
for sset_ind in range(len(good_peak_sets)):
if not input_mode_times:
times_set = to_timestamps(good_peak_sets[sset_ind], peak_data)
sset = np.arange(0, len(good_peak_sets[sset_ind]), 1)
else:
times_set = good_peak_sets[sset_ind]
sset = np.arange(0, len(good_peak_sets[sset_ind]), 1)
for i in range(len(times_set)):
curr_com = com_arr[times_set[i]]
peak_data[sset[i]][12] = curr_com
if i>0:
if len(good_peak_sets) == 0:
break
curr_disp = np.linalg.norm(np.array(com_arr[times_set[i-1]] - np.array(curr_com)))
if curr_disp > 40 * pix_adj or curr_disp < 3 * pix_adj:
good_peak_sets = [a for a, skip in zip(good_peak_sets, [np.allclose(a, sset) for a in good_peak_sets]) if not skip] # remove this peak set
#good_peak_sets.remove(sset) # remove this peak set
#print(com_arr[times_set[i-1]], np.array(curr_com))
print("worm moved too much/too little =", curr_disp, "~frame ", times_set[i], "-removing this set" )
#sset_ind += 1
continue
if mode == "single" and len(good_peak_sets) == 0:
peak_data=0
return peak_data
""" Remove peak sets with high (>0.6 proportion of noise peaks)
good_peak_sets: a list of list containing informaton about all peak sets
all_peak: a list of all peaks identified prior to appying any filter
"""
def remove_noisy_sets(good_peak_sets, all_peaks, peak_data):
for set_ind, sset in enumerate(good_peak_sets):
times = to_timestamps(sset, peak_data)
all_peaks_sset = all_peaks.index(times[-1]) - all_peaks.index(times[0]) - len(sset) # number of noise peaks in between the first and last peaks of the set
if all_peaks_sset/len(sset) > 0.6: # if the proportion of noise peaks is >0.6 (proportion of good peaks <0.4)
good_peak_sets.remove(sset)
print("removing set", set_ind, "- noisy")
return list(good_peak_sets)
""" Check the following scrunching criteria: #
2) elongation takes more time than contraction. A fraction of peaks in the oscillation should have speed of elongation > speed of contraction
4): no elongation took more than 14 frams --> distance of peak with its previous valley always <14
"""
def check_faster_contraction(sset, peak_data, fps=5):
# distance to the left valley (peak_data[][6]) == elongation (for local MAX)
# distance to the right valley (peak_data[][7]) == contraction (for local MAX)
faster_elong_count = 0
fps_adj = fps / 5
ssets = get_combinations(sset)
#print("ssets", ssets)
for sset in ssets:
for peak_ind in sset:
if peak_data[peak_ind][6] > 14 * fps_adj * 2:
#print("elongation is too long (=", peak_data[peak_ind][6], ") for set", to_timestamps(sset, peak_data))
#if len(ssets)>1:
# print("Checking the next sset (out of", len(ssets), ")")
continue
elif peak_data[peak_ind][6] > peak_data[peak_ind][7]: # faster elongation means that there are fewer
faster_elong_count += 1
if faster_elong_count == 0:
print("no peaks where elongation is longer than contracton for", to_timestamps(sset, peak_data))
return False
else:
return True
""" Check rules 3:
- mean of contraction amplitude > 7 pixels --> difference of peak value with its NEXT valley value > 7
- no contraction amplitude > 38 pixels
- std of the amplitude of these contraction < 10 """
# sset is a list of INDEXES in the peak_data table
def check_good_amplitudes(sset, peak_data, pix_adj=1.5, printout=False, sset_mode="any"):
worm_size_adj = 1 # todo
ssets = get_combinations(sset, mode=sset_mode) # get all permutations
ssets = check_not_too_far(ssets, peak_data) # remove sets that are too far away
#print("ssets after checking distances")
#for sset in ssets:
# print(to_timestamps(sset, peak_data))
filtered_ssets = []
for sset in ssets:
contr_amplitudes = []
for peak_ind in sset:
curr_amplitude = peak_data[peak_ind][10] #
if curr_amplitude < 5*pix_adj*worm_size_adj: # no amplitude should be too low
#if printout:
# print("amplitude is too low (", curr_amplitude, ") for", to_timestamps(sset,peak_data))
if len(ssets)==1:
return False
else: # if there are other permutations left to check
continue
if curr_amplitude < (38 * pix_adj * worm_size_adj): # todo: worm size adj
contr_amplitudes.append(curr_amplitude)
if len(contr_amplitudes) >= 3 and mean(contr_amplitudes) > 7 * pix_adj and stdev(
contr_amplitudes) < 10*pix_adj: # if any combination of 3 peaks meets the requirements
# todo: how do we scale stdev?
filtered_ssets.append(sset)
elif len(ssets) > 1:
#if printout:
# print("curr amplitude is", curr_amplitude, "pix. Checking next sset")
continue
if len(filtered_ssets) >= 1:
return filtered_ssets
else:
return False
def check_good_widths(sset, peak_data, pix_adj=1.5, sset_mode="any"):
worm_size_adj = 1 # todo
ssets = get_combinations(sset, mode=sset_mode) # get all permutations
ssets = check_not_too_far(ssets, peak_data) # remove sets that are too far away
for sset in ssets:
curr_widths = []
for peak_ind in sset:
curr_widths.append(peak_data[peak_ind][4])
if mean(curr_widths) > 3 and stdev(curr_widths)<3:
print("STDEV", stdev(curr_widths))
return True
else:
continue
print(peak_data[0][1])
print("mean wIDTH ", mean(curr_widths), "stdv", stdev(curr_widths))
return False
# checks rule 7) (the fraction peaks with distance to next valley > 10) < 0.15
def good_valley_dists_frac(sset, peak_data, fps=5):
count = 0
fps_adj = fps / 5
for peak_ind in sset:
if peak_data[peak_ind][7] > 10 * fps_adj:
count += 1
if count / len(sset) < 0.5:
# if count / len(sset) < 0.5: todo check -- arbitrary increased from 0.15 bc doesn't make sense??
return True
else:
return False
""" In the original script one of the rules was that the mean of worm aspect ratio during oscillation sould be > 6.
However, when i tried this, this was filtering out essetially all peak sets, so I lowered the cutoff from 6 to 2
asp_ratio_arr is an array containing aspect ratios for each frame of the movie (calculated in a separate script)
"""
def good_aspect_ratio(asp_ratio_arr, sset, peak_data):
# times the worm started (left base of the 1st peak) and ended scrunching (right base of the last peak)
start, end = [peak_data[sset[0]][1] - peak_data[sset[0]][6], peak_data[sset[-1]][1] + peak_data[sset[-1]][7]]
ratios = asp_ratio_arr[start:end + 1]
ratios = [x for x in ratios if x != 1.0] # remove all asp_ratios ==1
if np.nanmean(ratios) > 3: # used to be >6
return True
else:
print("asp ratios are bad", mean(ratios))
return False
"""
For one scrunching oscillation (scrunched >3 times), it should meet these criteria:
1) the scrunching oscillation is usually clean. Thus # of noise peak (peaks didn't meet the entire criteria applied above)/# of main oscialltion peak < 0.6
2) elongation takes more time than contraction. A fraction of peaks in the oscillation should have speed of elongation > speed of contraction
3) mean of contraction amplitude > 7 pixels --> difference of peak value with its previous valley value > 7
&& no contraction amplitude > 38 pixels
&& std of the amplitude of these contraction < 10
4) no elongation took more than 14 frams --> distance of peak with its previous valley always <14
5) mean of peak width in the oscillation > 3
6) mean of peak prominence > 7
7) (the fraction peaks with distance to next valley > 10) < 0.15
8) worm always move forward during scrunching. Thus, after each contraction, worm position to the location where it started its first scrunching should incrase.
&& worm can not move more than 29 pixels during one scrunching
9) mean of worm aspect ratio (length^2/area) during the oscillation > 6 (usually 8~13 for normally glidign worm).
"""
def analyze_peak_sets(good_peak_sets, peak_data, all_peaks, asp_ratio_arr, fps=5, pix_adj=1.5):
fps_adj = fps / 5
new_good_peaks = []
good_peak_sets = remove_noisy_sets(good_peak_sets, all_peaks, peak_data) # 1) filter out sets with a lot of noise
for ind, sset in enumerate(good_peak_sets):
if not check_faster_contraction(sset, peak_data, fps): # 2) speed of elongation > speed of contraction 4) check that no elongation > 14 frames
print("removing", ind, "by rules 2/4")
continue
elif not check_good_amplitudes(sset, peak_data, pix_adj): # 3) "correct" amplitudes
print("removing", ind, "by rule 3 (amplitudes)")
continue
elif mean([peak_data[i][4] for i in sset]) < 3 * fps_adj: # 5) mean peak width in the oscillation > 3
print("removing", ind, "by rule 5")
continue
elif mean([peak_data[i][5] for i in sset]) > 67 * pix_adj: # 6) mean of peak prominence > 7
print("removing", ind, "by rule 6")
continue
elif not good_valley_dists_frac(sset, peak_data, fps): # 7) (the fraction peaks with distance to next valley > 10) < 0.15
print("removing", ind, "by rule 7")
continue
# 8) moving distance; <29 pix in one scrunching oscillation
# removed during the initial sorting
# 9) check aspect ratios
elif not good_aspect_ratio(asp_ratio_arr, sset, peak_data):
print("removing", ind, "by rule 9 (aspect ratio)")
continue
else:
new_good_peaks.append(sset)
return new_good_peaks
def check_early_peaks(good_peak_sets_final, peak_data):
count_early = 0
for sset in good_peak_sets_final:
sset = to_timestamps(sset, peak_data)
if any(x < 350 for x in sset):
print("well", ind, "has peaks earlier than 350 frame")
count_early +=1
if count_early>0:
return True, count_early
else:
return False, count_early
def check_elongation_len(sset, peak_data, currMAL):
ssets = get_combinations(sset) # get all permutations
ssets = check_not_too_far(ssets, peak_data) # remove sets that are too far away
for sset in ssets:
times = to_timestamps(sset, peak_data)
worm_size = mean(currMAL[times[0]-100:times[-1]+100])
# check that every elong len in a sset is good
MALs_arr = [currMAL[i] for i in times]
if all(MALs_arr > worm_size*0.5):
print("good sset", times)
return True
else:
print("bad sset", times, "mean MAL", mean(MALs_arr))
continue
return False
# lag -- left valley of the first peak in a set
# currMAL == smoothed MAL
def get_peak_data_set(lag, smoothedMAL, signal_unpadded):
leeway = 10
frame = [lag - leeway, lag + len(signal_unpadded) + leeway]
currMAL = smoothedMAL[frame[0]:frame[1]]
peakinds, peak_dict = find_good_peaks(currMAL, show=False, outpath=None, fps=5, pix_adj=1.5)
v_frame = [lag - 2*leeway, lag + len(signal_unpadded) + 2*leeway]
vinds, _ = find_good_peaks(-smoothedMAL[v_frame[0]:v_frame[1]], show=False, outpath=None, fps=5, pix_adj=1.5)
# adjust the vinds to account for differences in frame
vinds = [ind-leeway for ind in vinds]
peak_data = np.zeros([len(peakinds), 14], dtype=object) # initialize the array to store peak data
for i in range(len(peakinds)):
# 1: length of the peak -- here index in this data table (for future reference)
peak_data[i][0] = int(i)
# 2: location (time) of the peak (in frames)
peak_data[i][1] = int(peakinds[i]) + lag - leeway
left_valley, right_valley = match_peaks_valleys(peakinds[i], vinds)
left_valley, right_valley = left_valley+lag-leeway, right_valley+lag-leeway
peak_data[i][2] = right_valley # 3rd column: CHANGED -- RIGHT VALLEY IND
# 5th column: width of the peak
peak_data[i][4] = peak_dict['widths'][i]
# 6th column: prominence of the peak
peak_data[i][5] = peak_dict['prominences'][i]
peak_data[i][6] = peak_data[i][1] - left_valley # 7th: distance to the previous valley (in frames)
peak_data[i][7] = right_valley - peak_data[i][1] # 8th: distance to the next valley (in frames)
peak_data[i][8] = smoothedMAL[left_valley] # 9th column: value of the previous valley
peak_data[i][9] = peak_data[i][1]-peak_data[i][8] # 10th column: diff of values of peak and its previous valley
peak_data[i][10] = currMAL[peakinds[i]] - smoothedMAL[right_valley] #11th column: difference of values of peak and its next valley
"""
peak_data[i][2] = peak_dict['left_bases'][i]
peak_data[i][6] = peakinds[i] - peak_dict['left_bases'][i] # 7th: distance to the previous valley (in frames)
peak_data[i][7] = peak_dict['right_bases'][i] - peakinds[i] # 8th: distance to the next valley (in frames)
# 9th column: value of the previous valley
peak_data[i][8] = currMAL[peak_dict['left_bases'][i]]
# 10th column: difference of values of peak and its previous valley
peak_data[i][9] = currMAL[peakinds[i]] - peak_data[i][8]
# !! CHANGED !! 11th column: difference of values of peak and its next valley
peak_data[i][10] = currMAL[peakinds[i]] - currMAL[peak_dict['right_bases'][i]]
"""
# 12th: dict where key: set index, value:COM when the worms first started moving
peak_data[i][11] = {}
# 13th: com
peak_data[i][12] = [] #pl
# 14th:
#peak_data[i][13] = currMAL[peak_dict['right_bases'][i]]
peak_data[i][13] = smoothedMAL[right_valley]
peakinds_new = peakinds + lag - leeway
peak_data_new = peak_data
return peakinds_new, peak_data_new
def zoom(zoom_sset, peak_data, smoothedMAL, good_peak_sets_final=None, mode="times"):
if not mode == "times":
zoom_sset = to_timestamps(zoom_sset, peak_data)
plt.plot(smoothedMAL)
if good_peak_sets_final:
for sset in good_peak_sets_final: #plot the sets that are actually were classified as scrunching
sset = to_timestamps(sset, peak_data)
plt.plot(sset, np.array(smoothedMAL)[sset], marker="X", markersize=4)
# plot the peaks of interest
plt.plot(zoom_sset, np.array(smoothedMAL)[zoom_sset], marker="o", markersize=6)
plt.xlabel('time, frames')
plt.ylabel('MAL, pix')
plt.xlim(zoom_sset[0]-20, zoom_sset[-1]+20)
plt.close(0)
"""
fps = 5
fps_adj = fps / 5
pix_adj = 1.5
MALs, COMs, AspRatios, total = [], [], [], []
has_early_peaks = {}
early_peak_count = 0 # number of sets w
all_sets_count = 0 # number of identified peak sets
filepath = "/Volumes/Collins_Lab/15"
#filepath = "/Users/Arina/Desktop"
wells = np.arange(1, 48, 1)
wells = [1,2]
peakDataFolder = filepath + '/peak_data'
if exists(peakDataFolder) is False:
makedirs(peakDataFolder)
for ind in wells:
filename = filepath + "/results/well_data/MAL_well" + str(ind) + ".csv"
currMAL = genfromtxt(filename, delimiter=',')
currMAL = np.array(pd.DataFrame(currMAL).interpolate())
currMAL = currMAL.reshape(-1, order='F')
smoothing_frac = 6/len(currMAL)
smoothedMAL = frac_lowess(currMAL, frac=smoothing_frac) # todo this might need to be adjusted
#MALs.append(smoothedMAL)
MALs.append(smoothedMAL)
filename = filepath + "/results/well_data/COM_well" + str(ind) + ".csv"
currCOM = genfromtxt(filename, delimiter=',')
COMs.append(currCOM)
filename = filepath + "/results/well_data/AspRatio_well" + str(ind) + ".csv"
currAspRatio = genfromtxt(filename, delimiter=',')
AspRatios.append(currAspRatio)
for ind in range(len(wells)):
all_peaks = find_all_peaks(MALs[ind])
## Old analysis
peakinds, peak_dict, peak_data = get_peak_data(MALs[ind])
good_peak_sets, peak_data = get_peak_sets(peak_data, fps=fps, pix_adj=1.5)
print("len after get_peak_sets", len(good_peak_sets))
peak_data = add_com_info_new(good_peak_sets, COMs[ind], peak_data, pix_adj=1.5)
good_peak_sets_final = analyze_peak_sets(good_peak_sets, peak_data, all_peaks, AspRatios[ind], fps=5, pix_adj=1.5)
print("len after analyze_peak_sets", len(good_peak_sets_final))
print("FINAL: well ", wellNum, ":", good_peak_sets_final, "\n Times:",
[to_timestamps(sset, peak_data) for sset in good_peak_sets_final]) # times of all good peaks
outpath = os.path.expanduser(peakDataFolder + "/peak sets well" + str(wellNum) + ".png")
#plt.plot(MALs[ind]) #smoothed
#for sset in good_peak_sets_final:
# sset = to_timestamps(sset, peak_data)
# plt.plot(sset, np.array(MALs[ind])[sset], marker="X", markersize=4)
#plt.xlabel('time, frames')
#plt.ylabel('MAL, pix')
#plt.title("Well " + str(wellNum))
#plt.show()
#plt.close("all")
has_early_peaks[ind] =\
ans, counter = check_early_peaks(good_peak_sets_final, peak_data)
has_early_peaks[ind] = counter
if has_early_peaks[ind]:
early_peak_count += 1 #total number of wells w early peaks
all_sets_count += len(good_peak_sets_final)
list_of_lags_final, signal_unpadded = synth_signal.cross_correlate(MALs[ind], freq_elong=0.6, freq_contr=0.8, goal_num_sets=5, leeway=10)
# list_of_lags_final, signal_unpadded = synth_signal.cross_correlate(MALs[ind], freq_elong=0.3, freq_contr=0.6, goal_num_sets=5, leeway=5)
# synth_signal.generate_overlap_plots(list_of_lags_final, MALs[ind], signal_unpadded, filepath, wells[ind])
new_peak_sets_times = []
for lag in list_of_lags_final:
leeway = 10
frame = [lag-leeway, lag+len(signal_unpadded)+leeway]
inds, _ = find_good_peaks(MALs[ind][frame[0]:frame[1]], show=False, outpath=None, fps=5, pix_adj=1.5)
if len(inds) >= 3:
inds += lag-leeway
new_peak_sets_times.append(inds)
final_peak_sets_new = []
for lag in list_of_lags_final:
# get peak info for one set of peaks
peakinds_new, peak_data_new = get_peak_data_set(lag, MALs[0], signal_unpadded)
if len(peakinds_new) < 3:
print("fewer than 3 peaks in a set", peakinds_new)
continue
sset_new = np.arange(0, len(peakinds_new), 1) # reference INDEXES in the peak_data_new table
peak_data_new = add_com_info_new(sset_new, COMs[0], peak_data_new, pix_adj=1.5, mode="single")
if type(peak_data_new) == bool and not peak_data_new:
print("removing", peakinds_new, "due to bad COMs")
continue
#if not check_faster_contraction(sset_new, peak_data_new, fps): # 2) speed of elongation > speed of contraction 4) check that no elongation > 14 frames
# print("removing", peakinds_new, "by rules 2/4")
# continue
if not check_elongation_len(sset_new, peak_data_new, MALs[0]):
print("removing", peakinds_new, "due to bad (too short) elong MAL")
continue
elif not check_good_amplitudes(sset_new, peak_data_new, pix_adj, printout=True): # 3) "correct" amplitudes
print("removing", peakinds_new, "by rule 3 (amplitudes)")
continue
elif mean([peak_data_new[i][4] for i in sset_new]) < 3 * fps_adj: # 5) mean peak width in the oscillation > 3
print("removing", peakinds_new, "by rule 5")
continue
elif mean([peak_data_new[i][5] for i in sset_new]) > 67 * pix_adj: # 6) mean of peak prominence > 7
print("removing", peakinds_new, "by rule 6")
continue
# 9) check aspect ratios
elif not good_aspect_ratio(AspRatios[0], sset_new, peak_data_new):
print("removing", peakinds_new, "by rule 9 (aspect ratio)")
continue
final_peak_sets_new.append(list(peakinds_new))
total.append(final_peak_sets_new)
counter = 0
for old_set in good_peak_sets_final:
old_set = to_timestamps(old_set, peak_data)
for set in new_peak_sets_times:
for new_elem in set:
for old_elem in old_set:
#print(old_elem, new_elem)
if old_elem == new_elem:
#print("overlapping peak", old_elem)
counter += 1
"""
"""
leg = []
for i, com_arr in enumerate(COMs):
leg.append("Well " + str(wells[i]))
velocities, displacements, disp_arr = calculate_velocities(com_arr, MALs[i], fps=fps)
time = np.arange(start=(fps*6/2), stop=(len(com_arr)/fps-(fps*6/2)), step=6*fps)
plt.scatter(time, displacements)
plt.plot(time, displacements, linestyle='--')
slope, intercept = np.polyfit(time, displacements, 1)
plt.plot(time, time * slope + intercept, 'r')
plt.xlabel('time, seconds')
plt.ylabel('Mean displacement, normalized by body length')
plt.xlim((0, (len(com_arr)/fps)))
#plt.ylim((0, 500))
plt.title("Well " + str(wells[i]))
plt.legend(["Mean displacements", "y="+str(round(slope, 3))+"x+"+str(round(intercept, 3))])
outpath = os.path.expanduser("/Users/Arina/Desktop/02/results/peak_sets/displacements well" + str(wells[i]) + ".png")
#outpath = os.path.expanduser("/Users/Arina/Desktop/02/results/peak_sets/displacements.png")
plt.savefig(outpath)
plt.show()
#plt.legend(leg)
plt.close()
""" |
#! /usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from mindspore.nn import optim as optimizer
import mindspore as ms
from mindspore.nn import Cell
__all__ = ['Adadelta', 'Adagrad', 'Adam', 'Adamax', 'Ftrl', 'Nadam', 'RMSprop', 'SGD', 'Momentum', 'Lamb', 'LARS']
class Adadelta(Cell):
def __init__(self):
pass
def app_gradients(self):
raise Exception('Adadelta optimizer function not implemented')
class Adagrad(Cell):
def __init__(self, lr=0.001, initial_accumulator=0.1, eps=1e-07, weight_decay=0.0, grad_clip=None):
super(Adagrad, self).__init__()
self.lr = lr
self.initial_accumulator = initial_accumulator
self.eps = eps
self.weight_decay = weight_decay
self.adagrad = optimizer.Adagrad
self.init_optim = False
def apply_gradients(self, grads_and_vars):
grads, vars = list(zip(*grads_and_vars))
if not self.init_optim:
self.optimizer = self.adagrad(
vars, learning_rate=self.lr, accum=self.initial_accumulator, weight_decay=self.weight_decay
)
self.init_optim = True
self.optimizer(grads)
class Adam(Cell):
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, eps=1e-8, weight_decay=0.0, grad_clip=None):
super(Adam, self).__init__()
self.adam = optimizer.Adam
self.lr = lr
self.beta_1 = beta_1
self.beta_2 = beta_2
self.eps = eps
self.weight_decay = weight_decay
self.init_optim = False
def apply_gradients(self, grads_and_vars):
grads, vars = list(zip(*grads_and_vars))
if not self.init_optim:
self.optimizer_adam = self.adam(
vars, learning_rate=self.lr, beta1=self.beta_1, beta2=self.beta_2, eps=self.eps,
weight_decay=self.weight_decay
)
self.init_optim = True
self.optimizer_adam(grads)
class Adamax(Cell):
def __init__(self):
pass
def apply_gradients(self):
raise Exception('Adamax optimizer function not implemented')
class Ftrl(Cell):
def __init__(
self, lr=0.001, lr_power=-0.5, initial_accumulator_value=0.1,
l1_regularization_strength=0.0, l2_regularization_strength=0.0, beta=0.0,
l2_shrinkage_regularization_strength=0.0, weight_decay=0.0, grad_clip=None
):
super(Ftrl, self).__init__()
self.ftrl = optimizer.FTRL
self.lr = lr
self.lr_power = lr_power
self.init_accum = initial_accumulator_value
self.l1 = l1_regularization_strength
self.l2 = l2_regularization_strength
self.weight_decay = weight_decay
self.init_optim = False
def apply_gradients(self, grads_and_vars):
grads, vars = list(zip(*grads_and_vars))
if not self.init_optim:
self.optimizer = self.ftrl(
vars, learning_rate=self.lr, initial_accum=self.init_accum, lr_power=self.lr_power, l1=self.l1, l2=self.l2,
weight_decay=self.weight_decay
)
self.init_optim = True
self.optimizer(grads)
class Nadam(Cell):
def __init__(self):
pass
def apply_gradients(self):
raise Exception('Nadam optimizer function not implemented')
class RMSprop(Cell):
def __init__(
self, lr=0.01, rho=0.9, eps=1.0e-10, momentum=0.0, centered=False, weight_decay=0.0,
grad_clip=None
):
super(RMSprop, self).__init__()
self.lr = lr
self.rho = rho
self.eps = eps
self.momentum = momentum
self.centered = centered
self.weight_decay = weight_decay
self.rmsprop = optimizer.RMSProp
self.init_optim = False
def apply_gradients(self, grads_and_vars):
grads, vars = list(zip(*grads_and_vars))
if not self.init_optim:
self.optimizer = self.rmsprop(
vars, learning_rate=self.lr, decay=self.rho, momentum=self.momentum, epsilon=self.eps,
centered=self.centered, weight_decay=self.weight_decay
)
self.init_optim = True
self.optimizer(grads)
class SGD(Cell):
def __init__(self, lr=0.1, momentum=0.0, weight_decay=0.0, grad_clip=None):
super(SGD, self).__init__()
self.sgd = optimizer.SGD
self.lr = lr
self.momentum = momentum
self.weight_decay = weight_decay
self.init_optim = False
def apply_gradients(self, grads_and_vars):
grads, vars = list(zip(*grads_and_vars))
if not self.init_optim:
self.optimizer_sgd = self.sgd(
vars, learning_rate=self.lr, momentum=self.momentum, weight_decay=self.weight_decay
)
self.init_optim = True
self.optimizer_sgd(grads)
class Momentum(Cell):
def __init__(self, lr=0.001, momentum=0.0, nesterov=False, weight_decay=0.0, grad_clip=None):
super(Momentum, self).__init__()
self.mom = optimizer.Momentum
self.lr = lr
self.momentum = momentum
self.nesterov = nesterov
self.weight_decay = weight_decay
self.init_optim = False
def apply_gradients(self, grads_and_vars):
grads, vars = list(zip(*grads_and_vars))
if not self.init_optim:
self.optimizer_mom = self.mom(
vars, learning_rate=self.lr, momentum=self.momentum, use_nesterov=self.nesterov,
weight_decay=self.weight_decay
)
self.init_optim = True
self.optimizer_mom(grads)
class Lamb(Cell):
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, eps=1.0e-6, weight_decay=0.0, grad_clip=None):
super(Lamb, self).__init__()
self.lamb = optimizer.Lamb
self.lr = lr
self.beta1 = beta_1
self.beta2 = beta_2
self.eps = eps
self.weight_decay = weight_decay
self.init_optim = False
def apply_gradients(self, grads_and_vars):
grads, vars = list(zip(*grads_and_vars))
if not self.init_optim:
self.optimizer_lamb = self.lamb(
vars, learning_rate=self.lr, beta1=self.beta1, beta2=self.beta2, eps=self.eps,
weight_decay=self.weight_decay
)
self.init_optim = True
self.optimizer_lamb(grads)
class LARS(Cell):
def __init__(self, optimizer, **kwargs):
super(LARS, self).__init__()
self.lars = ms.nn.LARS(optimizer=optimizer, **kwargs)
def apply_gradients(self, grads_and_vars):
grads, _ = list(zip(*grads_and_vars))
self.lars(grads)
|
<reponame>jakemcaferty/pyesg<gh_stars>10-100
"""Wiener Process"""
from typing import Dict, List, Union
import numpy as np
from pyesg.stochastic_process import StochasticProcess
from pyesg.utils import to_array
class WienerProcess(StochasticProcess):
"""
Generalized Wiener process: dX = μdt + σdW
Examples
--------
>>> wp = WienerProcess.example()
>>> wp
<pyesg.WienerProcess(mu=0.05, sigma=0.2)>
>>> wp.drift(x0=0.0)
array([0.05])
>>> wp.diffusion(x0=0.0)
array([0.2])
>>> wp.expectation(x0=0.0, dt=0.5)
array([0.025])
>>> wp.standard_deviation(x0=0.0, dt=0.5)
array([0.14142136])
>>> wp.step(x0=0.0, dt=1.0, random_state=42)
array([0.14934283])
>>> wp.step(x0=np.array([1.0]), dt=1.0, random_state=42)
array([1.14934283])
"""
def __init__(self, mu: float, sigma: float) -> None:
super().__init__()
self.mu = mu
self.sigma = sigma
def coefs(self) -> Dict[str, float]:
return dict(mu=self.mu, sigma=self.sigma)
def _apply(self, x0: np.ndarray, dx: np.ndarray) -> np.ndarray:
# arithmetic addition to update x0
return x0 + dx
def _drift(self, x0: np.ndarray) -> np.ndarray:
# drift of a Wiener process does not depend on x0
return np.full_like(x0, self.mu, dtype=np.float64)
def _diffusion(self, x0: np.ndarray) -> np.ndarray:
# diffusion of a Wiener process does not depend on x0
return np.full_like(x0, self.sigma, dtype=np.float64)
@classmethod
def example(cls) -> "WienerProcess":
return cls(mu=0.05, sigma=0.2)
class JointWienerProcess(StochasticProcess):
"""
Joint Wiener processes: dX = μdt + σdW
Examples
--------
>>> jwp = JointWienerProcess(
... mu=[0.05, 0.03], sigma=[0.20, 0.15], correlation=[[1.0, 0.5], [0.5, 1.0]]
... )
>>> jwp.drift(x0=[1.0, 1.0])
array([0.05, 0.03])
>>> jwp.diffusion(x0=[1.0, 1.0])
array([[0.2 , 0. ],
[0.075 , 0.12990381]])
>>> jwp.expectation(x0=[1.0, 1.0], dt=0.5)
array([1.025, 1.015])
>>> jwp.standard_deviation(x0=[1.0, 2.0], dt=2.0)
array([[0.28284271, 0. ],
[0.10606602, 0.18371173]])
>>> jwp.step(x0=np.array([1.0, 1.0]), dt=1.0, random_state=42)
array([1.14934283, 1.0492925 ])
>>> jwp.correlation = [[1.0, 0.99], [0.99, 1.0]]
>>> jwp.step(x0=np.array([1.0, 1.0]), dt=1.0, random_state=42)
array([1.14934283, 1.10083636])
"""
def __init__(
self,
mu: Union[List[float], List[int], np.ndarray],
sigma: Union[List[float], List[int], np.ndarray],
correlation: Union[List[float], np.ndarray],
) -> None:
super().__init__(dim=len(mu))
self.mu = to_array(mu)
self.sigma = to_array(sigma)
self.correlation = to_array(correlation)
def coefs(self) -> Dict[str, np.ndarray]:
return dict(mu=self.mu, sigma=self.sigma, correlation=self.correlation)
def _apply(self, x0: np.ndarray, dx: np.ndarray) -> np.ndarray:
# arithmetic addition to update x0
return x0 + dx
def _drift(self, x0: np.ndarray) -> np.ndarray:
# mu is already an array of expected returns; it doesn't depend on x0
return np.full_like(x0, self.mu, dtype=np.float64)
def _diffusion(self, x0: np.ndarray) -> np.ndarray:
# diffusion does not depend on x0, but we want to match the shape of x0. If x0
# has shape (100, 2), then we want to export an array with size (100, 2, 2)
volatility = np.diag(self.sigma)
if x0.ndim > 1:
# we have multiple start values for each index
volatility = np.repeat(volatility[None, :, :], x0.shape[0], axis=0)
cholesky = np.linalg.cholesky(self.correlation)
return volatility @ cholesky
@classmethod
def example(cls) -> "JointWienerProcess":
return cls(
mu=[0.05, 0.03], sigma=[0.20, 0.15], correlation=[[1.0, 0.5], [0.5, 1.0]]
)
|
from tkinter import *
import time
import random
root = Tk()
root.title("bb")
root.geometry("450x570")
root.resizable(0, 0)
root.wm_attributes("-topmost", 1)
canvas = Canvas(root, width=600, height=600, bd=0, highlightthickness=0, highlightbackground="white", bg="Black")
canvas.pack(padx=10, pady=10)
score = Label(height=0, width=0, font="Consolas 14 bold")
score.pack(side="left")
root.update()
class B:
def __init__(self, canvas, color, paddle, bricks, score):
self.bricks = bricks
self.canvas = canvas
self.paddle = paddle
self.score = score
self.bottom_hit = False
self.hit = 0
self.id = canvas.create_oval(10, 10, 25, 25, fill=color, width=1)
self.canvas.move(self.id, 230, 461)
start = [4, 3.8, 3.6, 3.4, 3.2, 3, 2.8, 2.6]
random.shuffle(start)
#print(start)
self.x = start[0]
self.y = -start[0]
self.canvas.move(self.id, self.x, self.y)
self.canvas_height = canvas.winfo_height()
self.canvas_width = canvas.winfo_width()
def brick_hit(self, pos):
for brick_line in self.bricks:
for brick in brick_line:
brick_pos = self.canvas.coords(brick.id)
#print(brick_pos)
try:
if pos[2] >= brick_pos[0] and pos[0] <= brick_pos[2]:
if pos[3] >= brick_pos[1] and pos[1] <= brick_pos[3]:
canvas.bell()
self.hit += 1
self.score.configure(text="Score: " + str(self.hit))
self.canvas.delete(brick.id)
return True
except:
continue
return False
def paddle_hit(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[1] <= paddle_pos[3]:
#print("paddle hit")
return True
return False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
#print(pos)
start = [4, 3.8, 3.6, 3.4, 3.2, 3, 2.8, 2.6]
random.shuffle(start)
if self.brick_hit(pos):
self.y = start[0]
if pos[1] <= 0:
self.y = start[0]
if pos[3] >= self.canvas_height:
self.bottom_hit = True
if pos[0] <= 0:
self.x = start[0]
if pos[2] >= self.canvas_width:
self.x = -start[0]
if self.paddle_hit(pos):
self.y = -start[0]
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 200, 30, fill=color)# rectangle dimention
self.canvas.move(self.id, 200, 485)# rectangle start from this point
self.x = 0
self.pausec=0
self.canvas_width = canvas.winfo_width()
self.canvas.bind_all("<Left>", self.turn_left)# if we click left arrow the rec shited left
self.canvas.bind_all("<Right>", self.turn_right)# if we click right arrow the rec shited right
#self.canvas.bind_all("<space>", self.pauser)
def draw(self):
pos = self.canvas.coords(self.id)
#print(pos)
if pos[0] + self.x <= 0:
self.x = 0
if pos[2] + self.x >= self.canvas_width:
self.x = 0
self.canvas.move(self.id, self.x, 0)
def turn_left(self, event):
self.x = -3.5
def turn_right(self, event):
self.x = 3.5
def pauser(self,event):
self.pausec+=1
if self.pausec==2:
self.pausec=0
class Bricks:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_oval(5, 5, 25, 25, fill=color, width=2)
playing = False # default the game is off need to start it
def start_game(event):
global playing
if playing is False:
playing = True
score.configure(text="Score: 00")
canvas.delete("all")
BALL_COLOR = ["green", "green", "green"]
BRICK_COLOR = ["black", "black", "black"] # brick above
random.shuffle(BALL_COLOR)
paddle = Paddle(canvas, "blue")
bricks = []
for i in range(0, 5):
b = []
for j in range(0, 19):
random.shuffle(BRICK_COLOR)
tmp = Bricks(canvas, BRICK_COLOR[0])
b.append(tmp)
bricks.append(b)
for i in range(0, 5):
for j in range(0, 19):
canvas.move(bricks[i][j].id, 25 * j, 25 * i)
ball = B(canvas, BALL_COLOR[0], paddle, bricks, score)
root.update_idletasks()
root.update()
time.sleep(1)
while 1:
if paddle.pausec !=1:
try:
canvas.delete(m)
del m
except:
pass
if not ball.bottom_hit:
ball.draw()
paddle.draw()
root.update_idletasks()
root.update()
time.sleep(0.01)
if ball.hit==95:
canvas.create_text(250, 250, text="YOU WON !!", fill="yellow", font="Consolas 24 ")
root.update_idletasks()
root.update()
playing = False
break
else:
canvas.create_text(250, 250, text="GAME OVER!!", fill="red", font="Consolas 24 ")
root.update_idletasks()
root.update()
playing = False
break
else:
try:
if m==None:pass
except:
m=canvas.create_text(250, 250, text="PAUSE!!", fill="green", font="Consolas 24 ")
root.update_idletasks()
root.update()
root.bind_all("<Return>", start_game)
canvas.create_text(250, 250, text="To start press enter", fill="red", font="Consolas 18")
j=canvas.find_all()
root.mainloop()
|
from tensorflow.keras import layers
from tensorflow.keras.activations import swish
from tensorflow.nn import relu6
def relu(x):
return layers.ReLU()(x)
def hard_sigmoid(x):
return layers.ReLU(6.0)(x + 3.0) * (1.0 / 6.0)
def hard_swish(x):
return layers.Multiply()([hard_sigmoid(x), x])
class Convolution2D(layers.Layer):
"""Applies 2D Convolution followed by Batch Normalization (optional) and Dropout (optional)
Args:
num_filters (int): the number of output filters in the convolution, default: 32
kernel_size (int/tuple of two ints): the height and width of the 2D convolution window,
single integer specifies the same value for both dimensions, default: 3
batch_normalization (bool): whether to use Batch Normalization, default: False
dropout (float): the dropout rate, default: 0
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
num_filters=32,
kernel_size=3,
batch_normalization=False,
dropout=0,
**kwargs
):
super().__init__()
self.num_filters = num_filters
self.kernel_size = kernel_size
self.batch_normalization = batch_normalization
self.dropout = dropout
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x = layers.Conv2D(self.num_filters, self.kernel_size, **self.kwargs)(x)
if self.batch_normalization:
x = layers.BatchNormalization()(x)
if self.dropout != 0:
x = layers.Dropout(self.dropout)(x)
return x
class DenseNetConvolutionBlock(layers.Layer):
"""A Convolution block for DenseNets
Args:
growth_rate: (float): growth rate at convolution layers
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self, growth_rate, epsilon=1.001e-5, activation="relu", use_bias=False, **kwargs
):
super().__init__()
self.growth_rate = growth_rate
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x1 = layers.BatchNormalization(epsilon=self.epsilon)(x)
x1 = layers.Activation(self.activation)(x1)
x1 = layers.Conv2D(
4 * self.growth_rate, 1, use_bias=self.use_bias, **self.kwargs
)(x1)
x1 = layers.BatchNormalization(epsilon=self.epsilon)(x1)
x1 = layers.Activation(self.activation)(x1)
x1 = layers.Conv2D(
self.growth_rate, 3, padding="same", use_bias=self.use_bias, **self.kwargs
)(x1)
x = layers.Concatenate(axis=3)([x, x1])
return x
class DenseNetTransitionBlock(layers.Layer):
"""A transition block for DenseNets
Args:
reduction: (float): compression rate at transition layers
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(self, reduction, epsilon=1.001e-5, activation="relu", **kwargs):
super().__init__()
self.reduction = reduction
self.epsilon = epsilon
self.activation = activation
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.Conv2D(int(x.shape[-1] * self.reduction), 1, **self.kwargs)(x)
x = layers.AveragePooling2D(2, strides=2)(x)
return x
class VGGModule(layers.Layer):
"""Implementation of VGG Modules with slight modifications,
Applies multiple 2D Convolution followed by Batch Normalization (optional), Dropout (optional) and MaxPooling
Args:
num_conv (int): number of convolution layers, default: 2
num_filters (int): the number of output filters in the convolution, default: 32
kernel_size (int/tuple of two ints): the height and width of the 2D convolution window,
single integer specifies the same value for both dimensions, default: 3
batch_normalization (bool): whether to use Batch Normalization, default: False
dropout (float): the dropout rate, default: 0
pool_size (int/tuple of two ints): window size over which to take the maximum, default: 2
pool_stride (int/tuple of two ints): specifies how far the pooling window moves for each pooling step,
default: 2
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
num_conv=2,
num_filters=32,
kernel_size=3,
batch_normalization=False,
dropout=0,
pool_size=2,
pool_stride=2,
**kwargs
):
super().__init__()
self.num_conv = num_conv
self.num_filters = num_filters
self.kernel_size = kernel_size
self.batch_normalization = batch_normalization
self.dropout = dropout
self.pool_size = pool_size
self.pool_stride = pool_stride
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
for i in range(self.num_conv):
x = Convolution2D(
self.num_filters,
self.kernel_size,
self.batch_normalization,
self.dropout,
padding="same",
**self.kwargs
)(x)
x = layers.MaxPooling2D(pool_size=self.pool_size, strides=self.pool_stride)(x)
return x
class InceptionConv(layers.Layer):
"""Implementation of 2D Convolution Layer for Inception Net
Convolution Layer followed by Batch Normalization, Activation and optional Dropout
Args:
filters (int): the number of output filters in the convolution
kernel_size (tuple of two ints): the height and width of the 2D convolution window
padding ("valid" or "same"): "valid" means no padding. "same" results in padding evenly to the left/right
or up/down of the input such that output has the same height/width dimension as the input, default: same
strides (tuple of two ints): specifying the strides of the convolution along the height and width, default: (1, 1)
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
activation (keras Activation): activation to be applied, default: relu
dropout (float): the dropout rate, default: 0
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="same",
use_bias=False,
activation="relu",
dropout=0,
**kwargs
):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.padding = padding
self.strides = strides
self.use_bias = use_bias
self.activation = activation
self.dropout = dropout
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
x = layers.Conv2D(
self.filters,
self.kernel_size,
strides=self.strides,
padding=self.padding,
use_bias=self.use_bias,
**self.kwargs
)(x)
x = layers.BatchNormalization(scale=False)(x)
x = layers.Activation(self.activation)(x)
if self.dropout > 0:
x = layers.Dropout(self.dropout)(x)
return x
class InceptionBlock(layers.Layer):
"""Implementation on Inception Mixing Block
Args:
mixture_config (list of lists): each internal list contains tuples (num filters, filter_size, stride, padding)
pooling_layer (keras layer): pooling to be added to mixture
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
activation (keras Activation): activation to be applied, default: relu
dropout (float): the dropout rate, default: 0
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
mixture_config,
pooling_layer=None,
use_bias=False,
activation="relu",
dropout=0,
**kwargs
):
super().__init__()
self.mixture_config = mixture_config
self.pooling_layer = pooling_layer
self.use_bias = use_bias
self.activation = activation
self.dropout = dropout
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
blocks = []
for sub_block in self.mixture_config:
x = inputs
for layer_config in sub_block:
filters, kernel_size, strides, padding = layer_config
x = InceptionConv(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=self.use_bias,
activation=self.activation,
dropout=self.dropout,
**self.kwargs
)(x)
blocks.append(x)
if self.pooling_layer is not None:
blocks.append(self.pooling_layer(inputs))
x = layers.concatenate(blocks)
return x
class XceptionBlock(layers.Layer):
"""A customised implementation of Xception Block (Depthwise Separable Convolutions)
Args:
channel_coefficient (int): number of channels in the block
use_bias (bool): whether the convolution layers use a bias vector, default: False
activation (keras Activation): activation to be applied, default: relu
"""
def __init__(self, channel_coefficient, use_bias=False, activation="relu"):
super().__init__()
self.channel_coefficient = channel_coefficient
self.use_bias = use_bias
self.activation = activation
def __call__(self, inputs):
x = inputs
residual = inputs
x = layers.Activation(self.activation)(x)
x = layers.SeparableConv2D(
self.channel_coefficient,
(3, 3),
padding="same",
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation(self.activation)(x)
x = layers.SeparableConv2D(
self.channel_coefficient,
(3, 3),
padding="same",
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation(self.activation)(x)
x = layers.SeparableConv2D(
self.channel_coefficient,
(3, 3),
padding="same",
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization()(x)
x = layers.add([x, residual])
return x
class EfficientNetBlock(layers.Layer):
"""Implementation of Efficient Net Block
Args:
activation (keras Activation): activation to be applied, default: swish
use_bias (bool): whether the convolution layers use a bias vector, default: False
dropout (float): the dropout rate, default: 0
filters_in (int): the number of input filters, default: 32
filters_out (int): the number of output filters, default: 16
kernel_size (int): the dimension of the convolution window, default: 3
strides (int): the stride of the convolution, default: 1
expand_ratio (int): scaling coefficient for the input filters, default: 1
se_ratio (float): fraction to squeeze the input filters, default: 0
id_skip (bool): True
"""
def __init__(
self,
activation=swish,
use_bias=False,
dropout=0,
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
expand_ratio=1,
se_ratio=1,
id_skip=True,
):
super().__init__()
self.activation = activation
self.use_bias = use_bias
self.dropout = dropout
self.filters_in = filters_in
self.filters_out = filters_out
self.kernel_size = kernel_size
self.strides = strides
self.expand_ratio = expand_ratio
self.se_ratio = se_ratio
self.id_skip = id_skip
def _correct_pad(self, inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
input_size = inputs.shape[1:3]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return (
(correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]),
)
def __call__(self, inputs):
# Expansion phase
filters = self.filters_in * self.expand_ratio
if self.expand_ratio != 1:
x = layers.Conv2D(filters, 1, padding="same", use_bias=self.use_bias)(
inputs
)
x = layers.BatchNormalization()(x)
x = layers.Activation(self.activation)(x)
else:
x = inputs
# Depthwise Convolution
if self.strides == 2:
x = layers.ZeroPadding2D(
padding=self._correct_pad(x, self.kernel_size),
)(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = layers.DepthwiseConv2D(
self.kernel_size,
strides=self.strides,
padding=conv_pad,
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation(self.activation)(x)
# Squeeze and Excitation phase
if 0 < self.se_ratio <= 1:
filters_se = max(1, int(self.filters_in * self.se_ratio))
se = layers.GlobalAveragePooling2D()(x)
se_shape = (1, 1, filters)
se = layers.Reshape(se_shape)(se)
se = layers.Conv2D(
filters_se, 1, padding="same", activation=self.activation
)(se)
se = layers.Conv2D(filters, 1, padding="same", activation="sigmoid")(se)
x = layers.multiply([x, se])
# Output phase
x = layers.Conv2D(self.filters_out, 1, padding="same", use_bias=self.use_bias)(
x
)
x = layers.BatchNormalization()(x)
if self.id_skip and self.strides == 1 and self.filters_in == self.filters_out:
if self.dropout > 0:
x = layers.Dropout(self.dropout, noise_shape=(None, 1, 1, 1))(x)
x = layers.add([x, inputs])
return x
class ResNetBlock(layers.Layer):
"""Customized Implementation of ResNet Block
Args:
filters (int): filters of the bottleneck layer
kernel_size (int): kernel size of the bottleneck layer, default: 3
stride (int): stride of the first layer, default: 1
conv_shortcut (bool): use convolution shortcut if True,
otherwise identity shortcut, default: True
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
filters,
kernel_size=3,
stride=1,
conv_shortcut=True,
epsilon=1.001e-5,
activation="relu",
use_bias=False,
**kwargs
):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.stride = stride
self.conv_shortcut = conv_shortcut
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
if self.conv_shortcut:
shortcut = layers.Conv2D(
4 * self.filters, 1, strides=self.stride, **self.kwargs
)(x)
shortcut = layers.BatchNormalization(epsilon=self.epsilon)(shortcut)
else:
shortcut = x
x = layers.Conv2D(self.filters, 1, strides=self.stride, **self.kwargs)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.Conv2D(
self.filters, self.kernel_size, padding="SAME", **self.kwargs
)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.Conv2D(4 * self.filters, 1, **self.kwargs)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Add()([shortcut, x])
x = layers.Activation(self.activation)(x)
return x
class ResNetV2Block(layers.Layer):
"""Customized Implementation of ResNetV2 Block
Args:
filters (int): filters of the bottleneck layer
kernel_size (int): kernel size of the bottleneck layer, default: 3
stride (int): stride of the first layer, default: 1
conv_shortcut (bool): use convolution shortcut if True,
otherwise identity shortcut, default: True
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
filters,
kernel_size=3,
stride=1,
conv_shortcut=True,
epsilon=1.001e-5,
activation="relu",
use_bias=False,
**kwargs
):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.stride = stride
self.conv_shortcut = conv_shortcut
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
preact = layers.BatchNormalization(epsilon=self.epsilon)(x)
preact = layers.Activation(self.activation)(preact)
if self.conv_shortcut:
shortcut = layers.Conv2D(
4 * self.filters, 1, strides=self.stride, **self.kwargs
)(preact)
else:
shortcut = (
layers.MaxPooling2D(1, strides=self.stride)(x) if self.stride > 1 else x
)
x = layers.Conv2D(
self.filters, 1, strides=1, use_bias=self.use_bias, **self.kwargs
)(preact)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.Conv2D(
self.filters, self.kernel_size, strides=self.stride, use_bias=self.use_bias
)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.Conv2D(4 * self.filters, 1, **self.kwargs)(x)
x = layers.Add()([shortcut, x])
return x
class ResNeXtBlock(layers.Layer):
"""Customized Implementation of ResNeXt Block
Args:
filters (int): filters of the bottleneck layer
kernel_size (int): kernel size of the bottleneck layer, default: 3
stride (int): stride of the first layer, default: 1
groups (int): group size of grouped convolution, default:32
conv_shortcut (bool): use convolution shortcut if True,
otherwise identity shortcut, default: True
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1.001e-5
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
filters,
kernel_size=3,
stride=1,
groups=32,
conv_shortcut=True,
epsilon=1.001e-5,
activation="relu",
use_bias=False,
**kwargs
):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.stride = stride
self.groups = groups
self.conv_shortcut = conv_shortcut
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
if self.conv_shortcut:
shortcut = layers.Conv2D(
(64 // self.groups) * self.filters,
1,
strides=self.stride,
use_bias=self.use_bias,
**self.kwargs
)(x)
shortcut = layers.BatchNormalization(epsilon=self.epsilon)(shortcut)
else:
shortcut = x
x = layers.Conv2D(self.filters, 1, use_bias=self.use_bias, **self.kwargs)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
c = self.filters // self.groups
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.DepthwiseConv2D(
self.kernel_size,
strides=self.stride,
depth_multiplier=c,
use_bias=self.use_bias,
**self.kwargs
)(x)
x_shape = x.shape[1:-1]
x = layers.Reshape(x_shape + (self.groups, c, c))(x)
x = layers.Lambda(lambda x: sum(x[:, :, :, :, i] for i in range(c)))(x)
x = layers.Reshape(x_shape + (self.filters,))(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Activation(self.activation)(x)
x = layers.Conv2D(
(64 // self.groups) * self.filters, 1, use_bias=self.use_bias, **self.kwargs
)(x)
x = layers.BatchNormalization(epsilon=self.epsilon)(x)
x = layers.Add()([shortcut, x])
x = layers.Activation(self.activation)(x)
return x
class ConvSkipConnection(layers.Layer):
"""Implementation of Skip Connection for Convolution Layer
Args:
num_filters (int): the number of output filters in the convolution, default: 32
kernel_size (int/tuple of two ints): the height and width of the 2D convolution window,
single integer specifies the same value for both dimensions, default: 3
activation (keras Activation): activation to be applied, default: relu
batch_normalization (bool): whether to use Batch Normalization, default: False
dropout (float): the dropout rate, default: 0
kwargs (keyword arguments): the arguments for Convolution Layer
"""
def __init__(
self,
num_filters,
kernel_size=3,
activation="relu",
batch_normalization=False,
dropout=0,
**kwargs
):
super().__init__()
self.num_filters = num_filters
self.kernel_size = kernel_size
self.activation = activation
self.batch_normalization = batch_normalization
self.dropout = dropout
self.kwargs = kwargs
def __call__(self, inputs):
x = inputs
skip_connection = layers.Conv2D(
self.num_filters, self.kernel_size, padding="same", **self.kwargs
)(x)
if self.batch_normalization:
skip_connection = layers.BatchNormalization()(skip_connection)
skip_connection = layers.Activation(self.activation)(skip_connection)
skip_connection = layers.Conv2D(
self.num_filters, self.kernel_size, padding="same", **self.kwargs
)(skip_connection)
x = layers.add([skip_connection, x])
if self.batch_normalization:
x = layers.BatchNormalization()(x)
x = layers.Activation(self.activation)(x)
if self.dropout > 0:
x = layers.Dropout(self.dropout)(x)
return x
class InceptionResNetConv2D(layers.Layer):
"""Implementation of Convolution Layer for Inception Res Net: Convolution2d followed by Batch Norm
Args:
filters (int): the number of output filters in the convolution
kernel_size (int/tuple of two ints): the height and width of the 2D convolution window,
single integer specifies the same value for both dimensions
strides (tuple of two ints): specifying the strides of the convolution along the height and width,
default: 1
padding ("valid" or "same"): "valid" means no padding. "same" results in padding evenly to the left/right
or up/down of the input such that output has the same height/width dimension as the input, default: same
activation (keras Activation): activation to be applied, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="same",
activation="relu",
use_bias=False,
):
super().__init__()
self.filters = filters
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
self.activation = activation
self.use_bias = use_bias
def __call__(self, inputs):
x = inputs
x = layers.Conv2D(
self.filters,
self.kernel_size,
strides=self.strides,
padding=self.padding,
use_bias=self.use_bias,
)(x)
if not self.use_bias:
x = layers.BatchNormalization(scale=False)(x)
if self.activation is not None:
x = layers.Activation(self.activation)(x)
return x
class InceptionResNetBlock(layers.Layer):
"""Implementation of Inception-ResNet block,
This class builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument:
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
Args:
scale (float): scaling factor to scale the residuals before adding
them to the shortcut branch. Let `r` be the output from the residual branch, the output of this
block will be `x + scale * r`
block_type (block35, block17, block8): determines the network structure in the residual branch
activation (keras Activation): activation to be applied in convolution layers, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
end_activation (keras Activation): activation to use at the end of the block, default: relu
"""
def __init__(
self,
scale,
block_type,
activation="relu",
use_bias=False,
end_activation="relu",
):
super().__init__()
self.scale = scale
self.block_type = block_type
self.activation = activation
self.use_bias = use_bias
self.end_activation = end_activation
def __call__(self, inputs):
x = inputs
if self.block_type == "block35":
branch_0 = InceptionResNetConv2D(
32, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_1 = InceptionResNetConv2D(
32, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_1 = InceptionResNetConv2D(
32, 3, activation=self.activation, use_bias=self.use_bias
)(branch_1)
branch_2 = InceptionResNetConv2D(
32, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_2 = InceptionResNetConv2D(
48, 3, activation=self.activation, use_bias=self.use_bias
)(branch_2)
branch_2 = InceptionResNetConv2D(
64, 3, activation=self.activation, use_bias=self.use_bias
)(branch_2)
branches = [branch_0, branch_1, branch_2]
elif self.block_type == "block17":
branch_0 = InceptionResNetConv2D(
192, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_1 = InceptionResNetConv2D(
128, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_1 = InceptionResNetConv2D(
160, [1, 7], activation=self.activation, use_bias=self.use_bias
)(branch_1)
branch_1 = InceptionResNetConv2D(
192, [7, 1], activation=self.activation, use_bias=self.use_bias
)(branch_1)
branches = [branch_0, branch_1]
elif self.block_type == "block8":
branch_0 = InceptionResNetConv2D(
192, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_1 = InceptionResNetConv2D(
192, 1, activation=self.activation, use_bias=self.use_bias
)(x)
branch_1 = InceptionResNetConv2D(
224, [1, 3], activation=self.activation, use_bias=self.use_bias
)(branch_1)
branch_1 = InceptionResNetConv2D(
256, [3, 1], activation=self.activation, use_bias=self.use_bias
)(branch_1)
branches = [branch_0, branch_1]
else:
raise ValueError(
"Unknown Inception-ResNet block type. "
'Expects "block35", "block17" or "block8", '
"but got: " + str(self.block_type)
)
mixed = layers.Concatenate()(branches)
up = InceptionResNetConv2D(x.shape[3], 1, activation=None, use_bias=True)(mixed)
x = layers.Lambda(
lambda inputs, scale: inputs[0] + inputs[1] * scale,
output_shape=tuple(x.shape[1:]),
arguments={"scale": self.scale},
)([x, up])
if self.activation is not None:
x = layers.Activation(self.end_activation)(x)
return x
class NASNetSeparableConvBlock(layers.Layer):
"""Adds 2 blocks of Separable Conv Batch Norm
Args:
filters (int): filters of the separable conv layer
kernel_size (tuple of two int): kernel size of the separable conv layer, default: (3, 3)
stride (int): stride of the separable conv layer, default: (1, 1)
momentum (float): momentum for the moving average in batch normalization, default: 0.9997
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1e-3
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
filters,
kernel_size=(3, 3),
stride=(1, 1),
momentum=0.9997,
epsilon=1e-3,
activation="relu",
use_bias=False,
):
self.filters = filters
self.kernel_size = kernel_size
self.stride = stride
self.momentum = momentum
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
def _correct_pad(self, inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
input_size = inputs.shape[1:3]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return (
(correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]),
)
def __call__(self, inputs):
x = inputs
x = layers.Activation(self.activation)(x)
if self.stride == (2, 2):
x = layers.ZeroPadding2D(padding=self._correct_pad(x, self.kernel_size))(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = layers.SeparableConv2D(
self.filters,
self.kernel_size,
strides=self.stride,
padding=conv_pad,
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization(
momentum=self.momentum,
epsilon=self.epsilon,
)(x)
x = layers.Activation(self.activation)(x)
x = layers.SeparableConv2D(
self.filters,
self.kernel_size,
padding="same",
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization(
momentum=self.momentum,
epsilon=self.epsilon,
)(x)
return x
class NASNetAdjustBlock(layers.Layer):
"""Adjusts the input `previous path` to match the shape of the `input`
Args:
filters (int): filters of the separable conv layer
momentum (float): momentum for the moving average in batch normalization, default: 0.9997
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1e-3
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
filters,
momentum=0.9997,
epsilon=1e-3,
activation="relu",
use_bias=False,
):
self.filters = filters
self.momentum = momentum
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
def __call__(self, p, ip):
if p is None:
p = ip
ip_shape = tuple(ip.shape)
p_shape = tuple(p.shape)
if p_shape[-2] != ip_shape[-2]:
p = layers.Activation(self.activation)(p)
p1 = layers.AveragePooling2D((1, 1), strides=(2, 2), padding="valid")(p)
p1 = layers.Conv2D(
self.filters // 2, (1, 1), padding="same", use_bias=self.use_bias
)(p1)
p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = layers.AveragePooling2D((1, 1), strides=(2, 2), padding="valid")(p2)
p2 = layers.Conv2D(
self.filters // 2, (1, 1), padding="same", use_bias=self.use_bias
)(p2)
p = layers.concatenate([p1, p2])
p = layers.BatchNormalization(momentum=self.momentum, epsilon=self.epsilon)(
p
)
elif p_shape[-1] != self.filters:
p = layers.Activation(self.activation)(p)
p = layers.Conv2D(
self.filters,
(1, 1),
strides=(1, 1),
padding="same",
use_bias=self.use_bias,
)(p)
p = layers.BatchNormalization(momentum=self.momentum, epsilon=self.epsilon)(
p
)
return p
class NASNetNormalACell(layers.Layer):
"""Normal cell for NASNet-A
Args:
filters (int): filters of the separable conv layer
momentum (float): momentum for the moving average in batch normalization, default: 0.9997
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1e-3
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
filters,
momentum=0.9997,
epsilon=1e-3,
activation="relu",
use_bias=False,
):
self.filters = filters
self.momentum = momentum
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
def __call__(self, ip, p):
p = NASNetAdjustBlock(
self.filters, self.momentum, self.epsilon, self.activation, self.use_bias
)(p, ip)
h = layers.Activation(self.activation)(ip)
h = layers.Conv2D(
self.filters,
(1, 1),
strides=(1, 1),
padding="same",
use_bias=self.use_bias,
)(h)
h = layers.BatchNormalization(
momentum=self.momentum,
epsilon=self.epsilon,
)(h)
x1_1 = NASNetSeparableConvBlock(
self.filters,
kernel_size=(5, 5),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(h)
x1_2 = NASNetSeparableConvBlock(
self.filters,
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(h)
x1 = layers.add([x1_1, x1_2])
x2_1 = NASNetSeparableConvBlock(
self.filters,
kernel_size=(5, 5),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(p)
x2_2 = NASNetSeparableConvBlock(
self.filters,
kernel_size=(3, 3),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(p)
x2 = layers.add([x2_1, x2_2])
x3 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(h)
x3 = layers.add([x3, p])
x4_1 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(p)
x4_2 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(p)
x4 = layers.add([x4_1, x4_2])
x5 = NASNetSeparableConvBlock(
self.filters,
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(h)
x5 = layers.add([x5, h])
x = layers.concatenate([p, x1, x2, x3, x4, x5])
return x, ip
class NASNetReductionACell(layers.Layer):
"""Reduction cell for NASNet-A
Args:
filters (int): filters of the separable conv layer
momentum (float): momentum for the moving average in batch normalization, default: 0.9997
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1e-3
activation (keras Activation): activation applied after batch normalization, default: relu
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
filters,
momentum=0.9997,
epsilon=1e-3,
activation="relu",
use_bias=False,
):
self.filters = filters
self.momentum = momentum
self.epsilon = epsilon
self.activation = activation
self.use_bias = use_bias
def _correct_pad(self, inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
input_size = inputs.shape[1:3]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return (
(correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]),
)
def __call__(self, ip, p):
p = NASNetAdjustBlock(
self.filters, self.momentum, self.epsilon, self.activation, self.use_bias
)(p, ip)
h = layers.Activation(self.activation)(ip)
h = layers.Conv2D(
self.filters, (1, 1), strides=(1, 1), padding="same", use_bias=self.use_bias
)(h)
h = layers.BatchNormalization(
momentum=self.momentum,
epsilon=self.epsilon,
)(h)
h3 = layers.ZeroPadding2D(
padding=self._correct_pad(h, 3),
)(h)
x1_1 = NASNetSeparableConvBlock(
self.filters,
(5, 5),
stride=(2, 2),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(h)
x1_2 = NASNetSeparableConvBlock(
self.filters,
(7, 7),
stride=(2, 2),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(p)
x1 = layers.add([x1_1, x1_2])
x2_1 = layers.MaxPooling2D((3, 3), strides=(2, 2), padding="valid")(h3)
x2_2 = NASNetSeparableConvBlock(
self.filters,
(7, 7),
stride=(2, 2),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(p)
x2 = layers.add([x2_1, x2_2])
x3_1 = layers.AveragePooling2D((3, 3), strides=(2, 2), padding="valid")(h3)
x3_2 = NASNetSeparableConvBlock(
self.filters,
(5, 5),
stride=(2, 2),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(p)
x3 = layers.add([x3_1, x3_2])
x4 = layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(x1)
x4 = layers.add([x2, x4])
x5_1 = NASNetSeparableConvBlock(
self.filters,
(3, 3),
momentum=self.momentum,
epsilon=self.epsilon,
activation=self.activation,
use_bias=self.use_bias,
)(x1)
x5_2 = layers.MaxPooling2D(
(3, 3),
strides=(2, 2),
padding="valid",
)(h3)
x5 = layers.add([x5_1, x5_2])
x = layers.concatenate([x2, x3, x4, x5])
return x, ip
class MobileNetConvBlock(layers.Layer):
"""Adds an initial convolution layer with batch normalization and activation
Args:
filters (int): filters of the conv layer
alpha (float): controls the width of the network
- If `alpha` < 1.0, proportionally decreases the number of filters in each layer
- If `alpha` > 1.0, proportionally increases the number of filters in each layer
- If `alpha` = 1, default number of filters from the paper are used at each layer
kernel (tuple of two int): kernel size of the conv layer, default: (3, 3)
strides (int): stride of the conv layer, default: (1, 1)
activation (keras Activation): activation applied after batch normalization, default: relu6
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
filters,
alpha,
kernel=(3, 3),
strides=(1, 1),
activation=relu6,
use_bias=False,
):
super().__init__()
self.filters = filters
self.alpha = alpha
self.kernel = kernel
self.strides = strides
self.activation = activation
self.use_bias = use_bias
def __call__(self, inputs):
x = inputs
filters = int(self.filters * self.alpha)
x = layers.Conv2D(
filters,
self.kernel,
padding="same",
use_bias=self.use_bias,
strides=self.strides,
)(inputs)
x = layers.BatchNormalization()(x)
return layers.Activation(self.activation)(x)
class MobileNetDepthWiseConvBlock(layers.Layer):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, activation, pointwise convolution,
batch normalization and activation
Args:
pointwise_conv_filters (int): filters in the pointwise convolution
alpha (float): controls the width of the network
- If `alpha` < 1.0, proportionally decreases the number of filters in each layer
- If `alpha` > 1.0, proportionally increases the number of filters in each layer
- If `alpha` = 1, default number of filters from the paper are used at each layer
depth_multiplier (int): number of depthwise convolution output channels for each input channel, default: 1
strides (int): stride of the separable conv layer, default: (1, 1)
activation (keras Activation): activation applied after batch normalization, default: relu6
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
"""
def __init__(
self,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
activation=relu6,
use_bias=False,
):
super().__init__()
self.pointwise_conv_filters = pointwise_conv_filters
self.alpha = alpha
self.depth_multiplier = depth_multiplier
self.strides = strides
self.activation = activation
self.use_bias = use_bias
def __call__(self, inputs):
pointwise_conv_filters = int(self.pointwise_conv_filters * self.alpha)
if self.strides == (1, 1):
x = inputs
else:
x = layers.ZeroPadding2D(((0, 1), (0, 1)))(inputs)
x = layers.DepthwiseConv2D(
(3, 3),
padding="same" if self.strides == (1, 1) else "valid",
depth_multiplier=self.depth_multiplier,
strides=self.strides,
use_bias=self.use_bias,
)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation(self.activation)(x)
x = layers.Conv2D(
pointwise_conv_filters,
(1, 1),
padding="same",
use_bias=self.use_bias,
strides=(1, 1),
)(x)
x = layers.BatchNormalization()(x)
return layers.Activation(self.activation)(x)
class InvertedResBlock(layers.Layer):
"""Inverted ResNet block
Args:
filters (int): filters of the conv layer
alpha (float): controls the width of the network
- If `alpha` < 1.0, proportionally decreases the number of filters in each layer
- If `alpha` > 1.0, proportionally increases the number of filters in each layer
- If `alpha` = 1, default number of filters from the paper are used at each layer
stride (int): stride of the conv layer, default: (1, 1)
expansion (float)
activation (keras Activation): activation applied after batch normalization, default: relu6
use_bias (bool): whether the convolution layers use a bias vector, defalut: False
momentum (float): momentum for the moving average in batch normalization, default: 0.999
epsilon: (float): Small float added to variance to avoid dividing by zero in
batch normalisation, default: 1e-3
se_ratio: (float): default: None
"""
def __init__(
self,
filters,
alpha,
expansion,
stride=(1, 1),
activation=relu6,
use_bias=False,
momentum=0.999,
epsilon=1e-3,
se_ratio=None,
):
super().__init__()
self.filters = filters
self.alpha = alpha
self.expansion = expansion
self.stride = stride
self.activation = activation
self.use_bias = use_bias
self.momentum = momentum
self.epsilon = epsilon
self.se_ratio = se_ratio
def _make_divisible(self, v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _correct_pad(self, inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
input_size = inputs.shape[1:3]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return (
(correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]),
)
def __call__(self, inputs):
x = inputs
in_channels = inputs.shape[-1]
pointwise_conv_filters = int(self.filters * self.alpha)
pointwise_filters = self._make_divisible(pointwise_conv_filters, 8)
# Expand
x = layers.Conv2D(
self.expansion * in_channels,
kernel_size=1,
padding="same",
use_bias=self.use_bias,
activation=None,
)(x)
x = layers.BatchNormalization(
epsilon=self.epsilon,
momentum=self.momentum,
)(x)
x = layers.Activation(self.activation)(x)
# Depthwise
if self.stride == 2 or self.stride == (2, 2):
x = layers.ZeroPadding2D(
padding=self._correct_pad(x, 3),
)(x)
x = layers.DepthwiseConv2D(
kernel_size=3,
strides=self.stride,
activation=None,
use_bias=self.use_bias,
padding="same" if self.stride == (1, 1) or self.stride == 1 else "valid",
)(x)
x = layers.BatchNormalization(
epsilon=self.epsilon,
momentum=self.momentum,
)(x)
x = layers.Activation(self.activation)(x)
if self.se_ratio:
x = SEBlock(
self._make_divisible(in_channels * self.expansion, 8), self.se_ratio
)(x)
# Project
x = layers.Conv2D(
pointwise_filters,
kernel_size=1,
padding="same",
use_bias=self.use_bias,
activation=None,
)(x)
x = layers.BatchNormalization(
epsilon=self.epsilon,
momentum=self.momentum,
)(x)
if in_channels == pointwise_filters and (
self.stride == 1 or self.stride == (1, 1)
):
return layers.Add()([inputs, x])
return x
class SEBlock(layers.Layer):
"""Adds a Squeeze Excite Block
Args:
filters (int): number of input filters
se_ratio (float): parameter for squeeze-and-excite layer
activation (keras Activation): activation applied after batch normalization, default: hard_sigmoid
"""
def __init__(self, filters, se_ratio, activation=hard_sigmoid):
self.filters = filters
self.se_ratio = se_ratio
self.activation = activation
def _depth(self, v, divisor=8, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def __call__(self, inputs):
x = inputs
x = layers.GlobalAveragePooling2D()(x)
x = layers.Reshape((1, 1, self.filters))(x)
x = layers.Conv2D(
self._depth(self.filters * self.se_ratio), kernel_size=1, padding="same"
)(x)
x = layers.ReLU()(x)
x = layers.Conv2D(self.filters, kernel_size=1, padding="same")(x)
x = layers.Activation(self.activation)(x)
x = layers.Multiply()([inputs, x])
return x
|
###################################################
## ##
## This file is part of the KinBot code v2.0 ##
## ##
## The contents are covered by the terms of the ##
## BSD 3-clause license included in the LICENSE ##
## file, found at the root. ##
## ##
## Copyright 2018 National Technology & ##
## Engineering Solutions of Sandia, LLC (NTESS). ##
## Under the terms of Contract DE-NA0003525 with ##
## NTESS, the U.S. Government retains certain ##
## rights to this software. ##
## ##
## Authors: ##
## <NAME> ##
## <NAME> ##
## ##
###################################################
"""
This class modifies a given geometry according to a set of coordinates that
need to have a new value assigned.
The optimization is done based on interatomic distances only
The deviations of the the distances are weighted by the inverse of the distance itself
"""
import sys, os, copy
import numpy as np
sys.dont_write_bytecode = True
import ase
from ase import Atoms
from ase.io import read,write,Trajectory
from ase.calculators.singlepoint import SinglePointCalculator
from BFGS import *
from geom import *
from stationary_pt import *
from cheminfo import *
from constants import *
from motif import *
from zmat import *
class cost_function():
def __init__(self,coords):
self.coords = coords
def eval(self,x):
"""
x is a vector of length 3N with N the number of atoms
containing the cartesian coordinates [x1, y1, z1, x2, ..., xN, yN, zN]
"""
e = 0
for coord in self.coords:
i = coord[0]
j = coord[1]
d = coord[2]
weight = coord[3]
dc = ( x[3*i] - x[3*j] )**2 + ( x[3*i+1] - x[3*j+1] )**2 + ( x[3*i+2] - x[3*j+2] )**2
e += ( (dc - d) * weight )**2
return e
def gradient(self,x):
grad = np.zeros(len(x))
for coord in self.coords:
i = coord[0]
j = coord[1]
d = coord[2]
weight = coord[3]
dc = ( x[3*i] - x[3*j] )**2 + ( x[3*i+1] - x[3*j+1] )**2 + ( x[3*i+2] - x[3*j+2] )**2
grad[3*i] += 2 * ( (dc - d) * weight ) * 2 * (x[3*i] - x[3*j])
grad[3*i+1] += 2 * ( (dc - d) * weight ) * 2 * (x[3*i+1] - x[3*j+1])
grad[3*i+2] += 2 * ( (dc - d) * weight ) * 2 * (x[3*i+2] - x[3*j+2])
grad[3*j] += 2 * ( (dc - d) * weight ) * 2 * (x[3*i] - x[3*j]) * -1
grad[3*j+1] += 2 * ( (dc - d) * weight ) * 2 * (x[3*i+1] - x[3*j+1]) * -1
grad[3*j+2] += 2 * ( (dc - d) * weight ) * 2 * (x[3*i+2] - x[3*j+2]) * -1
return grad
def append_geom(natom,step,new_e,atom,x_new,grad,atoms_list,f_out = None):
if f_out != None:
f_out.write('%s\nPoint %i Energy= %f\n'%(natom,step,new_e))
for at in range(natom):
f_out.write(atom[at] + ' ')
[f_out.write(str(np.reshape(x_new,(natom,3))[at][i]) + ' ') for i in range(3)]
f_out.write('\n')
step += 1
atoms = Atoms(symbols = atom, positions = np.reshape(x_new, (natom,3)))
calc = SinglePointCalculator(atoms, energy= new_e, forces= 10. * np.reshape(grad,(natom,3)))
atoms.set_calculator(calc)
atoms_list.append(atoms)
return step
def modify_coordinates(species,name,geom,changes,bond,natom,atom):
"""
Geom is the geometry (n x 3 matrix with n the number of atoms)
in cartesian coordinates
Changes is a list of lists, each list containing the coordinates
and their new value (atom indices start at 0):
To change a bond length: [atom1, atom2, bond_length]
To change a bond angle: [neighbor1, central_atom, neighbor2,
angle_in_degrees]
To change a dihedral angle: [atom1, atom2, atom3, atom4,
dihedarl_angle_in_degrees]
Bond is the bond matrix of the molecule
"""
status = 1
step = 1
atoms_list = []
count = 0
fname = '{}_{}.xyz'.format(name,count)
while os.path.exists(fname):
count += 1
fname = '{}_{}.xyz'.format(name,count)
#f_out = open(fname,'w')
f_out = None
new_geom = copy.deepcopy(geom)
step = append_geom(natom,step,0.,atom,new_geom,np.zeros((natom*3)),atoms_list,f_out = f_out)
#change dihedrals, if necessary
for ci in changes:
if len(ci) == 5:
zmat_atom, zmat_ref, zmat, zmatorder = make_zmat_from_cart(species, ci[:-1], natom, atom, new_geom, 2)
orig_dih = zmat[3][2]
new_dih = ci[-1]
dih_diff = new_dih - orig_dih
zmat[3][2] += dih_diff
for i in range(4, natom):
if zmat_ref[i][2] == 4:
zmat[i][2] += dih_diff
if zmat_ref[i][2] == 1:
zmat[i][2] += dih_diff
new_geom = make_cart_from_zmat(zmat, zmat_atom, zmat_ref, natom, atom, zmatorder)
step = append_geom(natom,step,0.,atom,new_geom,np.zeros((natom*3)),atoms_list,f_out = f_out)
#change angles, if necessary
if len(ci) == 4:
# original angle in radians
orig_angle = calc_angle(new_geom[ci[0]],new_geom[ci[1]],new_geom[ci[2]])
new_angle = np.radians(ci[-1]) #new angle in radians
v1 = new_geom[ci[0]] - new_geom[ci[1]]
v2 = new_geom[ci[2]] - new_geom[ci[1]]
rot_ax = [0.,0.,0.]
#create a vector perpendicular to v1 and v2
#verify if points are collinear
if np.linalg.norm(np.cross(v1,v2)) == 0:
#rotate around any axis perpendicular to the axis along the three points:
if v1[0] != 0 or v1[1] != 0:
rot_ax = [v1[1],-v1[0],0.]
elif v1[0] != 0 or v1[2] != 0:
rot_ax = [v1[2],0.,-v1[0]]
else:
rot_ax = [1.,0.,0.]
else:
rot_ax = np.cross(v1,v2)
rot_ax = rot_ax/np.linalg.norm(rot_ax)
#rotate all the atoms on the side of the last atom
st, ats, ats2 = divide_atoms(ci[2],ci[1],bond,natom,atom)
if not st:
status = 0
break
for atj in ats:
new_geom[atj] = perform_rotation(new_geom[atj],new_geom[ci[1]],rot_ax,new_angle-orig_angle)
step = append_geom(natom,step,1.,atom,new_geom,np.zeros((natom*3)),atoms_list,f_out = f_out)
#change bond lengths and angles, if necessary
if any([len(ci) == 3 or len(ci) == 4 for ci in changes]):
"""
#first only change the dedicated bond lengths
coords = get_coords(natom,bond,new_geom,changes,1)
#optimize the geometry to meet the coords list
x0 = np.reshape(new_geom,3*natom)
cost_fct = cost_function(coords)
opt = bfgs()
x_opt, x_i = opt.optimize(cost_fct,x0)
new_geom = np.reshape(x_opt,(natom, 3))
for xi in x_i:
gi = np.reshape(xi,(natom, 3))
step = append_geom(natom,step,0.,atom,gi,np.zeros((natom*3)),atoms_list,f_out = f_out)
"""
coords = get_coords(natom,bond,new_geom,changes,0)
#optimize the geometry to meet the coords list
x0 = np.reshape(new_geom,3*natom)
cost_fct = cost_function(coords)
opt = bfgs()
x_opt, x_i = opt.optimize(cost_fct,x0)
new_geom = np.reshape(x_opt,(natom, 3))
for xi in x_i:
gi = np.reshape(xi,(natom, 3))
step = append_geom(natom,step,2.,atom,gi,np.zeros((natom*3)),atoms_list,f_out = f_out)
#write(fname.replace('.xyz','.traj'),atoms_list)
#f_out.close()
return new_geom
def get_coords(natom,bond,geom,changes,mode):
"""
list the (N*(N-1)) / 2 possible bond lengths and the value we optimize to
mode = 0: include all interatomic distances
mode = 1: only include the interatomic distances that need to be changed
"""
coords = []
for i in range(natom-1):
for j in range(i+1, natom):
is_change = 0 # the distance from i to j needs to be changed
is_in_change = 0 # i or j needs to be changed, don't include this bond length
change = []
for ci in changes:
if [i,j] == sorted([ci[0],ci[-2]]):
is_change = 1
change = ci
if i in ci or j in ci:
is_in_change = 1
if is_change:
if len(change) == 3:
coords.append([i,j,change[-1]**2,1.,0])
elif len(change) == 4:
#calculate the bond length that corresponds to the new angle
b1 = np.linalg.norm(geom[i]-geom[change[1]])
b2 = np.linalg.norm(geom[j]-geom[change[1]])
a = np.radians(change[-1])
d = b1**2 + b2**2 - 2*b1*b2*np.cos(a)
coords.append([i,j,d,10])
else:
if mode == 0:
d = np.linalg.norm(geom[i]-geom[j])**2
if np.sqrt(d) < 4.: #use a cutoff of 4 angstroms
if bond[i][j] > 0:
coords.append([i,j,d,1./d,1]) #use a larger weight for bonds
else:
if not is_in_change:
coords.append([i,j,d,.5/d,1])
return coords
def divide_atoms(ati,atj,bond,natom,atom,forbidden = []):
"""
This method divides the atoms in a molecule in two sets,
which are separated by a bond
In the case of rings, the atoms are equally divided in the two sets,
which will change the bond length of the bond furthest away from
the given bond.
Be careful when using this method for cyclic structures!
"""
status = 1
if bond[ati,atj] == 0:
return 0, [ati], []
#Get all the atoms on the side of ati
visited = [ati]
forbidden.append(atj)
division = [ati]
# check for cycles and cut them in half
for ring_size in range(3,natom+1):
motif = ['X' for at in range(ring_size)]
inst = start_motif(motif,natom,bond,atom,-1,[])
for ins in inst:
if bond[ins[0]][ins[-1]] > 0:
#cycle found
if ins[0] == ati and ins[-1] == atj:
forbidden.append(ins[ring_size / 2])
if ins[0] == atj and ins[-1] == ati:
forbidden.append(ins[- ring_size/2 - 1])
if len(inst) == 0:
break
get_neighbors(ati,visited,forbidden,division,bond,natom)
division2 = [x for x in range(natom) if x not in division]
return status, division,division2
def get_neighbors(ati,visited,forbidden,division,bond,natom):
for atj in range(natom):
if not atj in visited and not atj in forbidden:
if bond[atj,ati] > 0:
division.append(atj)
visited.append(atj)
get_neighbors(atj,visited,forbidden,division,bond,natom)
def perform_rotation(at,center,axis,angle):
#move center to origin:
at -= center
#create rotation matrix:
a = math.cos(angle/2)
b,c,d = -axis*math.sin(angle/2)
aa,bb,cc,dd = a*a,b*b,c*c,d*d
bc,ad,ac,ab,bd,cd = b*c,a*d,a*c,a*b,b*d,c*d
rot_matrix = ([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
#perform the rotation:
at = np.dot(rot_matrix,at)
#put the center back to its original coordinates:
at += center
return at
def main():
smi = 'S[CH2]'
obmol, structure = generate_3d_structure(smi)
mult = 2
charge = 0
natom = len(obmol.atoms)
structure = np.reshape(structure, (natom,4))
atom = structure[:,0]
geom = structure[:,1:4].astype(float)
well0 = stationary_pt('well0')
well0.geom = geom
well0.characterize(natom,atom,2,0)
well0.bond_mx(natom,atom)
bond = well0.bond
geom = [np.array(gi) for gi in geom]
changes = [
[1, 2, 1.0972959660511175],
[0, 2, 2.4604284873750513],
]
geom = modify_coordinates(well0,'test',geom,changes,bond,natom,atom)
"""
changes = [
[0,1,2,90.],
[1,2,3,90.],
]
geom = modify_coordinates(well0,'test',geom,changes,bond,natom,atom)
"""
if __name__ == "__main__":
main() |
<filename>menpo/landmark/labels/human/face.py
from collections import OrderedDict
import numpy as np
from ..base import (
validate_input, connectivity_from_array, pcloud_and_lgroup_from_ranges,
connectivity_from_range, labeller_func)
@labeller_func(group_label='face_ibug_68')
def face_ibug_68_to_face_ibug_68(pcloud):
r"""
Apply the IBUG 68-point semantic labels.
The semantic labels are as follows:
- jaw
- left_eyebrow
- right_eyebrow
- nose
- left_eye
- right_eye
- mouth
References
----------
.. [1] http://www.multipie.org/
.. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 68
validate_input(pcloud, n_expected_points)
jaw_indices = np.arange(0, 17)
lbrow_indices = np.arange(17, 22)
rbrow_indices = np.arange(22, 27)
upper_nose_indices = np.arange(27, 31)
lower_nose_indices = np.arange(31, 36)
leye_indices = np.arange(36, 42)
reye_indices = np.arange(42, 48)
outer_mouth_indices = np.arange(48, 60)
inner_mouth_indices = np.arange(60, 68)
jaw_connectivity = connectivity_from_array(jaw_indices)
lbrow_connectivity = connectivity_from_array(lbrow_indices)
rbrow_connectivity = connectivity_from_array(rbrow_indices)
nose_connectivity = np.vstack([
connectivity_from_array(upper_nose_indices),
connectivity_from_array(lower_nose_indices)])
leye_connectivity = connectivity_from_array(leye_indices, close_loop=True)
reye_connectivity = connectivity_from_array(reye_indices, close_loop=True)
mouth_connectivity = np.vstack([
connectivity_from_array(outer_mouth_indices, close_loop=True),
connectivity_from_array(inner_mouth_indices, close_loop=True)])
all_connectivity = np.vstack([
jaw_connectivity, lbrow_connectivity, rbrow_connectivity,
nose_connectivity, leye_connectivity, reye_connectivity,
mouth_connectivity
])
mapping = OrderedDict()
mapping['jaw'] = jaw_indices
mapping['left_eyebrow'] = lbrow_indices
mapping['right_eyebrow'] = rbrow_indices
mapping['nose'] = np.hstack((upper_nose_indices, lower_nose_indices))
mapping['left_eye'] = leye_indices
mapping['right_eye'] = reye_indices
mapping['mouth'] = np.hstack((outer_mouth_indices, inner_mouth_indices))
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points, all_connectivity, mapping)
return new_pcloud, mapping
@labeller_func(group_label='face_ibug_68')
def face_ibug_68_mirrored_to_face_ibug_68(pcloud):
r"""
Apply the IBUG 68-point semantic labels, on a pointcloud that has been
mirrored around the vertical axis (flipped around the Y-axis). Thus, on
the flipped image the jaw etc would be the wrong way around. This
rectifies that and returns a new PointCloud whereby all the points
are oriented correctly.
The semantic labels applied are as follows:
- jaw
- left_eyebrow
- right_eyebrow
- nose
- left_eye
- right_eye
- mouth
References
----------
.. [1] http://www.multipie.org/
.. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
"""
new_pcloud, old_map = face_ibug_68_to_face_ibug_68(pcloud,
return_mapping=True)
lms_map = np.hstack([old_map['jaw'][::-1],
old_map['right_eyebrow'][::-1],
old_map['left_eyebrow'][::-1],
old_map['nose'][:4],
old_map['nose'][4:][::-1],
np.roll(old_map['right_eye'][::-1], 4),
np.roll(old_map['left_eye'][::-1], 4),
np.roll(old_map['mouth'][:12][::-1], 7),
np.roll(old_map['mouth'][12:][::-1], 5)])
return new_pcloud.from_vector(pcloud.points[lms_map]), old_map
@labeller_func(group_label='face_ibug_66')
def face_ibug_68_to_face_ibug_66(pcloud):
r"""
Apply the IBUG 66-point semantic labels, but ignoring the 2 points
describing the inner mouth corners).
The semantic labels applied are as follows:
- jaw
- left_eyebrow
- right_eyebrow
- nose
- left_eye
- right_eye
- mouth
References
----------
.. [1] http://www.multipie.org/
.. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 68
validate_input(pcloud, n_expected_points)
jaw_indices = np.arange(0, 17)
lbrow_indices = np.arange(17, 22)
rbrow_indices = np.arange(22, 27)
upper_nose_indices = np.arange(27, 31)
lower_nose_indices = np.arange(31, 36)
leye_indices = np.arange(36, 42)
reye_indices = np.arange(42, 48)
outer_mouth_indices = np.arange(48, 60)
inner_mouth_indices = np.hstack((48, np.arange(60, 63),
54, np.arange(63, 66)))
jaw_connectivity = connectivity_from_array(jaw_indices)
lbrow_connectivity = connectivity_from_array(lbrow_indices)
rbrow_connectivity = connectivity_from_array(rbrow_indices)
nose_connectivity = np.vstack([
connectivity_from_array(upper_nose_indices),
connectivity_from_array(lower_nose_indices)])
leye_connectivity = connectivity_from_array(leye_indices, close_loop=True)
reye_connectivity = connectivity_from_array(reye_indices, close_loop=True)
mouth_connectivity = np.vstack([
connectivity_from_array(outer_mouth_indices, close_loop=True),
connectivity_from_array(inner_mouth_indices, close_loop=True)])
all_connectivity = np.vstack([
jaw_connectivity, lbrow_connectivity, rbrow_connectivity,
nose_connectivity, leye_connectivity, reye_connectivity,
mouth_connectivity])
mapping = OrderedDict()
mapping['jaw'] = jaw_indices
mapping['left_eyebrow'] = lbrow_indices
mapping['right_eyebrow'] = rbrow_indices
mapping['nose'] = np.hstack([upper_nose_indices, lower_nose_indices])
mapping['left_eye'] = leye_indices
mapping['right_eye'] = reye_indices
mapping['mouth'] = np.hstack([outer_mouth_indices, inner_mouth_indices])
# Ignore the two inner mouth points
ind = np.hstack((np.arange(60), np.arange(61, 64), np.arange(65, 68)))
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points[ind], all_connectivity, mapping)
return new_pcloud, mapping
@labeller_func(group_label='face_ibug_51')
def face_ibug_68_to_face_ibug_51(pcloud):
r"""
Apply the IBUG 51-point semantic labels, but removing the annotations
corresponding to the jaw region.
The semantic labels applied are as follows:
- left_eyebrow
- right_eyebrow
- nose
- left_eye
- right_eye
- mouth
References
----------
.. [1] http://www.multipie.org/
.. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 68
validate_input(pcloud, n_expected_points)
lbrow_indices = np.arange(0, 5)
rbrow_indices = np.arange(5, 10)
upper_nose_indices = np.arange(10, 14)
lower_nose_indices = np.arange(14, 19)
leye_indices = np.arange(19, 25)
reye_indices = np.arange(25, 31)
outer_mouth_indices = np.arange(31, 43)
inner_mouth_indices = np.arange(43, 51)
lbrow_connectivity = connectivity_from_array(lbrow_indices)
rbrow_connectivity = connectivity_from_array(rbrow_indices)
nose_connectivity = np.vstack([
connectivity_from_array(upper_nose_indices),
connectivity_from_array(lower_nose_indices)])
leye_connectivity = connectivity_from_array(leye_indices, close_loop=True)
reye_connectivity = connectivity_from_array(reye_indices, close_loop=True)
mouth_connectivity = np.vstack([
connectivity_from_array(outer_mouth_indices, close_loop=True),
connectivity_from_array(inner_mouth_indices, close_loop=True)])
all_connectivity = np.vstack([
lbrow_connectivity, rbrow_connectivity, nose_connectivity,
leye_connectivity, reye_connectivity, mouth_connectivity])
mapping = OrderedDict()
mapping['left_eyebrow'] = lbrow_indices
mapping['right_eyebrow'] = rbrow_indices
mapping['nose'] = np.hstack([upper_nose_indices, lower_nose_indices])
mapping['left_eye'] = leye_indices
mapping['right_eye'] = reye_indices
mapping['mouth'] = np.hstack([outer_mouth_indices, inner_mouth_indices])
# Ignore the two inner mouth points
ind = np.arange(17, 68)
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points[ind], all_connectivity, mapping)
return new_pcloud, mapping
@labeller_func(group_label='face_ibug_49')
def face_ibug_49_to_face_ibug_49(pcloud):
r"""
Apply the IBUG 49-point semantic labels.
The semantic labels applied are as follows:
- left_eyebrow
- right_eyebrow
- nose
- left_eye
- right_eye
- mouth
References
----------
.. [1] http://www.multipie.org/
.. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 49
validate_input(pcloud, n_expected_points)
lbrow_indices = np.arange(0, 5)
rbrow_indices = np.arange(5, 10)
upper_nose_indices = np.arange(10, 14)
lower_nose_indices = np.arange(14, 19)
leye_indices = np.arange(19, 25)
reye_indices = np.arange(25, 31)
outer_mouth_indices = np.arange(31, 43)
inner_mouth_indices = np.hstack((31, np.arange(43, 46),
37, np.arange(46, 49)))
lbrow_connectivity = connectivity_from_array(lbrow_indices)
rbrow_connectivity = connectivity_from_array(rbrow_indices)
nose_connectivity = np.vstack([
connectivity_from_array(upper_nose_indices),
connectivity_from_array(lower_nose_indices)])
leye_connectivity = connectivity_from_array(leye_indices, close_loop=True)
reye_connectivity = connectivity_from_array(reye_indices, close_loop=True)
mouth_connectivity = np.vstack([
connectivity_from_array(outer_mouth_indices, close_loop=True),
connectivity_from_array(inner_mouth_indices, close_loop=True)])
all_connectivity = np.vstack([
lbrow_connectivity, rbrow_connectivity, nose_connectivity,
leye_connectivity, reye_connectivity, mouth_connectivity])
mapping = OrderedDict()
mapping['left_eyebrow'] = lbrow_indices
mapping['right_eyebrow'] = rbrow_indices
mapping['nose'] = np.hstack([upper_nose_indices, lower_nose_indices])
mapping['left_eye'] = leye_indices
mapping['right_eye'] = reye_indices
mapping['mouth'] = np.hstack([outer_mouth_indices, inner_mouth_indices])
# Ignore the two inner mouth points
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points, all_connectivity, mapping)
return new_pcloud, mapping
@labeller_func(group_label='face_ibug_49')
def face_ibug_68_to_face_ibug_49(pcloud):
r"""
Apply the IBUG 49-point semantic labels, but removing the annotations
corresponding to the jaw region and the 2 describing the inner mouth
corners.
The semantic labels applied are as follows:
- left_eyebrow
- right_eyebrow
- nose
- left_eye
- right_eye
- mouth
References
----------
.. [1] http://www.multipie.org/
.. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 68
validate_input(pcloud, n_expected_points)
lbrow_indices = np.arange(0, 5)
rbrow_indices = np.arange(5, 10)
upper_nose_indices = np.arange(10, 14)
lower_nose_indices = np.arange(14, 19)
leye_indices = np.arange(19, 25)
reye_indices = np.arange(25, 31)
outer_mouth_indices = np.arange(31, 43)
inner_mouth_indices = np.hstack((31, np.arange(43, 46),
37, np.arange(46, 49)))
lbrow_connectivity = connectivity_from_array(lbrow_indices)
rbrow_connectivity = connectivity_from_array(rbrow_indices)
nose_connectivity = np.vstack([
connectivity_from_array(upper_nose_indices),
connectivity_from_array(lower_nose_indices)])
leye_connectivity = connectivity_from_array(leye_indices, close_loop=True)
reye_connectivity = connectivity_from_array(reye_indices, close_loop=True)
mouth_connectivity = np.vstack([
connectivity_from_array(outer_mouth_indices, close_loop=True),
connectivity_from_array(inner_mouth_indices, close_loop=True)])
all_connectivity = np.vstack([
lbrow_connectivity, rbrow_connectivity, nose_connectivity,
leye_connectivity, reye_connectivity, mouth_connectivity])
mapping = OrderedDict()
mapping['left_eyebrow'] = lbrow_indices
mapping['right_eyebrow'] = rbrow_indices
mapping['nose'] = np.hstack([upper_nose_indices, lower_nose_indices])
mapping['left_eye'] = leye_indices
mapping['right_eye'] = reye_indices
mapping['mouth'] = np.hstack([outer_mouth_indices, inner_mouth_indices])
# Ignore the two inner mouth points
ind = np.hstack((np.arange(17, 60), np.arange(61, 64), np.arange(65, 68)))
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points[ind], all_connectivity, mapping)
return new_pcloud, mapping
@labeller_func(group_label='face_ibug_68_trimesh')
def face_ibug_68_to_face_ibug_68_trimesh(pcloud):
r"""
Apply the IBUG 68-point semantic labels, with trimesh connectivity.
The semantic labels applied are as follows:
- tri
References
----------
.. [1] http://www.multipie.org/
.. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
"""
from menpo.shape import TriMesh
n_expected_points = 68
validate_input(pcloud, n_expected_points)
tri_list = np.array([[47, 29, 28], [44, 43, 23], [38, 20, 21],
[47, 28, 42], [49, 61, 60], [40, 41, 37],
[37, 19, 20], [28, 40, 39], [38, 21, 39],
[36, 1, 0], [48, 59, 4], [49, 60, 48],
[67, 59, 60], [13, 53, 14], [61, 51, 62],
[57, 8, 7], [52, 51, 33], [61, 67, 60],
[52, 63, 51], [66, 56, 57], [35, 30, 29],
[53, 52, 35], [37, 36, 17], [18, 37, 17],
[37, 38, 40], [38, 37, 20], [19, 37, 18],
[38, 39, 40], [28, 29, 40], [41, 36, 37],
[27, 39, 21], [41, 31, 1], [30, 32, 31],
[33, 51, 50], [33, 30, 34], [31, 40, 29],
[36, 0, 17], [31, 2, 1], [31, 41, 40],
[ 1, 36, 41], [31, 49, 2], [ 2, 49, 3],
[60, 59, 48], [ 3, 49, 48], [31, 32, 50],
[48, 4, 3], [59, 5, 4], [58, 67, 66],
[ 5, 59, 58], [58, 59, 67], [ 7, 6, 58],
[66, 57, 58], [13, 54, 53], [ 7, 58, 57],
[ 6, 5, 58], [50, 61, 49], [62, 67, 61],
[31, 50, 49], [32, 33, 50], [30, 33, 32],
[34, 52, 33], [35, 52, 34], [53, 63, 52],
[62, 63, 65], [62, 51, 63], [66, 65, 56],
[63, 53, 64], [62, 66, 67], [62, 65, 66],
[57, 56, 9], [65, 63, 64], [ 8, 57, 9],
[ 9, 56, 10], [10, 56, 11], [11, 56, 55],
[11, 55, 12], [56, 65, 55], [55, 64, 54],
[55, 65, 64], [55, 54, 12], [64, 53, 54],
[12, 54, 13], [45, 46, 44], [35, 34, 30],
[14, 53, 35], [15, 46, 45], [27, 28, 39],
[27, 42, 28], [35, 29, 47], [30, 31, 29],
[15, 35, 46], [15, 14, 35], [43, 22, 23],
[27, 21, 22], [24, 44, 23], [44, 47, 43],
[43, 47, 42], [46, 35, 47], [26, 45, 44],
[46, 47, 44], [25, 44, 24], [25, 26, 44],
[16, 15, 45], [16, 45, 26], [22, 42, 43],
[50, 51, 61], [27, 22, 42]])
new_pcloud = TriMesh(pcloud.points, trilist=tri_list)
mapping = OrderedDict()
mapping['tri'] = np.arange(new_pcloud.n_points)
return new_pcloud, mapping
@labeller_func(group_label='face_ibug_66_trimesh')
def face_ibug_68_to_face_ibug_66_trimesh(pcloud):
r"""
Apply the IBUG 66-point semantic labels, with trimesh connectivity.
The semantic labels applied are as follows:
- tri
References
----------
.. [1] http://www.multipie.org/
.. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
"""
from menpo.shape import TriMesh
# Apply face_ibug_68_to_face_ibug_66
new_pcloud = face_ibug_68_to_face_ibug_66(pcloud)
# This is in terms of the 66 points
tri_list = np.array([[47, 29, 28], [44, 43, 23], [38, 20, 21],
[47, 28, 42], [40, 41, 37], [51, 62, 61],
[37, 19, 20], [28, 40, 39], [38, 21, 39],
[36, 1, 0], [48, 59, 4], [49, 60, 48],
[13, 53, 14], [60, 51, 61], [51, 51, 62],
[52, 51, 33], [49, 50, 60], [57, 7, 8],
[64, 56, 57], [35, 30, 29], [52, 62, 53],
[53, 52, 35], [37, 36, 17], [18, 37, 17],
[37, 38, 40], [38, 37, 20], [19, 37, 18],
[38, 39, 40], [28, 29, 40], [41, 36, 37],
[27, 39, 21], [41, 31, 1], [30, 32, 31],
[33, 51, 50], [33, 30, 34], [31, 40, 29],
[36, 0, 17], [31, 2, 1], [31, 41, 40],
[ 1, 36, 41], [31, 49, 2], [ 2, 49, 3],
[ 3, 49, 48], [31, 32, 50], [62, 53, 54],
[48, 4, 3], [59, 5, 4], [58, 65, 64],
[ 5, 59, 58], [58, 59, 65], [ 7, 6, 58],
[64, 57, 58], [13, 54, 53], [ 7, 58, 57],
[ 6, 5, 58], [63, 55, 54], [65, 59, 48],
[31, 50, 49], [32, 33, 50], [30, 33, 32],
[34, 52, 33], [35, 52, 34], [48, 60, 65],
[64, 63, 56], [60, 65, 61], [65, 64, 61],
[57, 56, 9], [ 8, 57, 9], [64, 63, 61],
[ 9, 56, 10], [10, 56, 11], [11, 56, 55],
[11, 55, 12], [56, 63, 55], [51, 52, 62],
[55, 54, 12], [63, 54, 62], [61, 62, 63],
[12, 54, 13], [45, 46, 44], [35, 34, 30],
[14, 53, 35], [15, 46, 45], [27, 28, 39],
[27, 42, 28], [35, 29, 47], [30, 31, 29],
[15, 35, 46], [15, 14, 35], [43, 22, 23],
[27, 21, 22], [24, 44, 23], [44, 47, 43],
[43, 47, 42], [46, 35, 47], [26, 45, 44],
[46, 47, 44], [25, 44, 24], [25, 26, 44],
[16, 15, 45], [16, 45, 26], [22, 42, 43],
[50, 60, 51], [27, 22, 42]])
new_pcloud = TriMesh(new_pcloud.points, trilist=tri_list, copy=False)
mapping = OrderedDict()
mapping['tri'] = np.arange(new_pcloud.n_points)
return new_pcloud, mapping
@labeller_func(group_label='face_ibug_51_trimesh')
def face_ibug_68_to_face_ibug_51_trimesh(pcloud):
r"""
Apply the IBUG 51-point semantic labels, with trimesh connectivity..
The semantic labels applied are as follows:
- tri
References
----------
.. [1] http://www.multipie.org/
.. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
"""
from menpo.shape import TriMesh
# Apply face_ibug_68_to_face_ibug_51
new_pcloud = face_ibug_68_to_face_ibug_51(pcloud)
# This is in terms of the 51 points
tri_list = np.array([[30, 12, 11], [27, 26, 6], [21, 3, 4],
[30, 11, 25], [32, 44, 43], [23, 24, 20],
[20, 2, 3], [11, 23, 22], [21, 4, 22],
[32, 43, 31], [50, 42, 43], [44, 34, 45],
[35, 34, 16], [44, 50, 43], [35, 46, 34],
[49, 39, 40], [18, 13, 12], [36, 35, 18],
[20, 19, 0], [ 1, 20, 0], [20, 21, 23],
[21, 20, 3], [ 2, 20, 1], [21, 22, 23],
[11, 12, 23], [24, 19, 20], [10, 22, 4],
[13, 15, 14], [16, 34, 33], [16, 13, 17],
[14, 23, 12], [14, 24, 23], [43, 42, 31],
[14, 15, 33], [41, 50, 49], [41, 42, 50],
[49, 40, 41], [33, 44, 32], [45, 50, 44],
[14, 33, 32], [15, 16, 33], [13, 16, 15],
[17, 35, 16], [18, 35, 17], [36, 46, 35],
[45, 46, 48], [45, 34, 46], [49, 48, 39],
[46, 36, 47], [45, 49, 50], [45, 48, 49],
[48, 46, 47], [39, 48, 38], [38, 47, 37],
[38, 48, 47], [47, 36, 37], [28, 29, 27],
[18, 17, 13], [10, 11, 22], [10, 25, 11],
[18, 12, 30], [13, 14, 12], [26, 5, 6],
[10, 4, 5], [ 7, 27, 6], [27, 30, 26],
[26, 30, 25], [29, 18, 30], [ 9, 28, 27],
[29, 30, 27], [ 8, 27, 7], [ 8, 9, 27],
[ 5, 25, 26], [33, 34, 44], [10, 5, 25]])
new_pcloud = TriMesh(new_pcloud.points, trilist=tri_list, copy=False)
mapping = OrderedDict()
mapping['tri'] = np.arange(new_pcloud.n_points)
return new_pcloud, mapping
@labeller_func(group_label='face_ibug_49_trimesh')
def face_ibug_68_to_face_ibug_49_trimesh(pcloud):
r"""
Apply the IBUG 49-point semantic labels, with trimesh connectivity.
The semantic labels applied are as follows:
- tri
References
----------
.. [1] http://www.multipie.org/
.. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
"""
from menpo.shape import TriMesh
# Apply face_ibug_68_to_face_ibug_49
new_pcloud = face_ibug_68_to_face_ibug_49(pcloud)
# This is in terms of the 49 points
tri_list = np.array([[47, 29, 28], [44, 43, 23], [38, 20, 21],
[47, 28, 42], [40, 41, 37], [51, 62, 61],
[37, 19, 20], [28, 40, 39], [38, 21, 39],
[36, 1, 0], [48, 59, 4], [49, 60, 48],
[13, 53, 14], [60, 51, 61], [51, 51, 62],
[52, 51, 33], [49, 50, 60], [57, 7, 8],
[64, 56, 57], [35, 30, 29], [52, 62, 53],
[53, 52, 35], [37, 36, 17], [18, 37, 17],
[37, 38, 40], [38, 37, 20], [19, 37, 18],
[38, 39, 40], [28, 29, 40], [41, 36, 37],
[27, 39, 21], [41, 31, 1], [30, 32, 31],
[33, 51, 50], [33, 30, 34], [31, 40, 29],
[36, 0, 17], [31, 2, 1], [31, 41, 40],
[ 1, 36, 41], [31, 49, 2], [ 2, 49, 3],
[ 3, 49, 48], [31, 32, 50], [62, 53, 54],
[48, 4, 3], [59, 5, 4], [58, 65, 64],
[ 5, 59, 58], [58, 59, 65], [ 7, 6, 58],
[64, 57, 58], [13, 54, 53], [ 7, 58, 57],
[ 6, 5, 58], [63, 55, 54], [65, 59, 48],
[31, 50, 49], [32, 33, 50], [30, 33, 32],
[34, 52, 33], [35, 52, 34], [48, 60, 65],
[64, 63, 56], [60, 65, 61], [65, 64, 61],
[57, 56, 9], [ 8, 57, 9], [64, 63, 61],
[ 9, 56, 10], [10, 56, 11], [11, 56, 55],
[11, 55, 12], [56, 63, 55], [51, 52, 62],
[55, 54, 12], [63, 54, 62], [61, 62, 63],
[12, 54, 13], [45, 46, 44], [35, 34, 30],
[14, 53, 35], [15, 46, 45], [27, 28, 39],
[27, 42, 28], [35, 29, 47], [30, 31, 29],
[15, 35, 46], [15, 14, 35], [43, 22, 23],
[27, 21, 22], [24, 44, 23], [44, 47, 43],
[43, 47, 42], [46, 35, 47], [26, 45, 44],
[46, 47, 44], [25, 44, 24], [25, 26, 44],
[16, 15, 45], [16, 45, 26], [22, 42, 43],
[50, 60, 51], [27, 22, 42]])
new_pcloud = TriMesh(new_pcloud.points, trilist=tri_list, copy=False)
mapping = OrderedDict()
mapping['tri'] = np.arange(new_pcloud.n_points)
return new_pcloud, mapping
@labeller_func(group_label='face_ibug_65')
def face_ibug_68_to_face_ibug_65(pcloud):
r"""
Apply the IBUG 68 point semantic labels, but ignore the 3 points that are
coincident for a closed mouth (bottom of the inner mouth).
The semantic labels applied are as follows:
- jaw
- left_eyebrow
- right_eyebrow
- nose
- left_eye
- right_eye
- mouth
References
----------
.. [1] http://www.multipie.org/
.. [2] http://ibug.doc.ic.ac.uk/resources/300-W/
"""
from menpo.shape import LabelledPointUndirectedGraph
# Apply face_ibug_68_to_face_ibug_68
new_pcloud, mapping = face_ibug_68_to_face_ibug_68(pcloud,
return_mapping=True)
# The coincident points are considered the final 3 landmarks (bottom of
# the inner mouth points). We remove all the edges for the inner mouth
# which are the last 8.
edges = new_pcloud.edges[:-8]
# Re-add the inner mouth without the bottom 3 points
edges = np.vstack([edges,
connectivity_from_range((60, 65), close_loop=True)])
# Luckily, OrderedDict maintains the original ordering despite updates
outer_mouth_indices = np.arange(48, 60)
inner_mouth_indices = np.arange(60, 65)
mapping['mouth'] = np.hstack([outer_mouth_indices, inner_mouth_indices])
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
new_pcloud.points[:-3], edges, mapping)
return new_pcloud, mapping
@labeller_func(group_label='face_imm_58')
def face_imm_58_to_face_imm_58(pcloud):
r"""
Apply the 58-point semantic labels from the IMM dataset.
The semantic labels applied are as follows:
- jaw
- left_eye
- right_eye
- left_eyebrow
- right_eyebrow
- mouth
- nose
References
----------
.. [1] http://www2.imm.dtu.dk/~aam/
"""
n_expected_points = 58
validate_input(pcloud, n_expected_points)
labels = OrderedDict([
('jaw', (0, 13, False)),
('left_eye', (13, 21, True)),
('right_eye', (21, 29, True)),
('left _eyebrow', (29, 34, False)),
('right_eyebrow', (34, 39, False)),
('mouth', (39, 47, True)),
('nose', (47, 58, False))
])
return pcloud_and_lgroup_from_ranges(pcloud, labels)
@labeller_func(group_label='face_lfpw_29')
def face_lfpw_29_to_face_lfpw_29(pcloud):
r"""
Apply the 29-point semantic labels from the original LFPW dataset.
The semantic labels applied are as follows:
- chin
- left_eye
- right_eye
- left_eyebrow
- right_eyebrow
- mouth
- nose
References
----------
.. [1] http://homes.cs.washington.edu/~neeraj/databases/lfpw/
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 29
validate_input(pcloud, n_expected_points)
chin_indices = np.array([28])
outer_leye_indices = np.array([8, 12, 10, 13])
pupil_leye_indices = np.array([16])
outer_reye_indices = np.array([11, 14, 9, 15])
pupil_reye_indices = np.array([17])
lbrow_indices = np.array([0, 4, 2, 5])
rbrow_indices = np.array([3, 6, 1, 7])
outer_mouth_indices = np.array([22, 24, 23, 27])
inner_mouth_indices = np.array([22, 25, 23, 26])
nose_indices = np.array([18, 20, 19, 21])
chin_connectivity = connectivity_from_array(chin_indices, close_loop=True)
leye_connectivity = connectivity_from_array(outer_leye_indices,
close_loop=True)
reye_connectivity = connectivity_from_array(outer_reye_indices,
close_loop=True)
lbrow_connectivity = connectivity_from_array(lbrow_indices,
close_loop=True)
rbrow_connectivity = connectivity_from_array(rbrow_indices,
close_loop=True)
mouth_connectivity = np.vstack([
connectivity_from_array(outer_mouth_indices, close_loop=True),
connectivity_from_array(inner_mouth_indices, close_loop=True)])
nose_connectivity = connectivity_from_array(nose_indices, close_loop=True)
all_connectivity = np.vstack([
chin_connectivity, leye_connectivity, reye_connectivity,
lbrow_connectivity, rbrow_connectivity, mouth_connectivity,
nose_connectivity])
mapping = OrderedDict()
mapping['chin'] = chin_indices
mapping['left_eye'] = np.hstack((outer_leye_indices, pupil_leye_indices))
mapping['right_eye'] = np.hstack((outer_reye_indices, pupil_reye_indices))
mapping['left_eyebrow'] = lbrow_indices
mapping['right_eyebrow'] = rbrow_indices
mapping['mouth'] = np.hstack((outer_mouth_indices, inner_mouth_indices))
mapping['nose'] = nose_indices
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points, all_connectivity, mapping)
return new_pcloud, mapping
def _build_upper_eyelid():
top_indices = np.arange(0, 7)
middle_indices = np.arange(12, 17)
upper_eyelid_indices = np.hstack((top_indices, middle_indices))
upper_eyelid_connectivity = list(zip(top_indices, top_indices[1:]))
upper_eyelid_connectivity += [(0, 12)]
upper_eyelid_connectivity += list(zip(middle_indices, middle_indices[1:]))
upper_eyelid_connectivity += [(16, 6)]
return upper_eyelid_indices, upper_eyelid_connectivity
@labeller_func(group_label='eye_ibug_open_38')
def eye_ibug_open_38_to_eye_ibug_open_38(pcloud):
r"""
Apply the IBUG 38-point open eye semantic labels.
The semantic labels applied are as follows:
- upper_eyelid
- lower_eyelid
- iris
- pupil
- sclera
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 38
validate_input(pcloud, n_expected_points)
upper_el_indices, upper_el_connectivity = _build_upper_eyelid()
iris_range = (22, 30)
pupil_range = (30, 38)
sclera_top = np.arange(12, 17)
sclera_bottom = np.arange(17, 22)
sclera_indices = np.hstack((0, sclera_top, 6, sclera_bottom))
lower_el_top = np.arange(17, 22)
lower_el_bottom = np.arange(7, 12)
lower_el_indices = np.hstack((6, lower_el_top, 0, lower_el_bottom))
iris_connectivity = connectivity_from_range(iris_range, close_loop=True)
pupil_connectivity = connectivity_from_range(pupil_range, close_loop=True)
sclera_connectivity = list(zip(sclera_top, sclera_top[1:]))
sclera_connectivity += [(0, 21)]
sclera_connectivity += list(zip(sclera_bottom, sclera_bottom[1:]))
sclera_connectivity += [(6, 17)]
lower_el_connectivity = list(zip(lower_el_top, lower_el_top[1:]))
lower_el_connectivity += [(6, 7)]
lower_el_connectivity += list(zip(lower_el_bottom, lower_el_bottom[1:]))
lower_el_connectivity += [(11, 0)]
all_connectivity = np.asarray(upper_el_connectivity +
lower_el_connectivity +
iris_connectivity.tolist() +
pupil_connectivity.tolist() +
sclera_connectivity)
mapping = OrderedDict()
mapping['upper_eyelid'] = upper_el_indices
mapping['lower_eyelid'] = lower_el_indices
mapping['pupil'] = np.arange(*pupil_range)
mapping['iris'] = np.arange(*iris_range)
mapping['sclera'] = sclera_indices
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points, all_connectivity, mapping)
return new_pcloud, mapping
@labeller_func(group_label='eye_ibug_close_17')
def eye_ibug_close_17_to_eye_ibug_close_17(pcloud):
r"""
Apply the IBUG 17-point close eye semantic labels.
The semantic labels applied are as follows:
- upper_eyelid
- lower_eyelid
"""
from menpo.shape import LabelledPointUndirectedGraph
n_expected_points = 17
validate_input(pcloud, n_expected_points)
upper_indices, upper_connectivity = _build_upper_eyelid()
middle_indices = np.arange(12, 17)
bottom_indices = np.arange(6, 12)
lower_indices = np.hstack((bottom_indices, 0, middle_indices))
lower_connectivity = list(zip(bottom_indices, bottom_indices[1:]))
lower_connectivity += [(0, 12)]
lower_connectivity += list(zip(middle_indices, middle_indices[1:]))
lower_connectivity += [(11, 0)]
all_connectivity = np.asarray(upper_connectivity + lower_connectivity)
mapping = OrderedDict()
mapping['upper_eyelid'] = upper_indices
mapping['lower_eyelid'] = lower_indices
new_pcloud = LabelledPointUndirectedGraph.init_from_indices_mapping(
pcloud.points, all_connectivity, mapping)
return new_pcloud, mapping
@labeller_func(group_label='eye_ibug_open_38_trimesh')
def eye_ibug_open_38_to_eye_ibug_open_38_trimesh(pcloud):
r"""
Apply the IBUG 38-point open eye semantic labels, with trimesh connectivity.
The semantic labels applied are as follows:
- tri
"""
from menpo.shape import TriMesh
n_expected_points = 38
validate_input(pcloud, n_expected_points)
tri_list = np.array([[29, 36, 28], [22, 13, 23], [12, 1, 2],
[29, 30, 37], [13, 3, 14], [13, 12, 2],
[19, 8, 9], [25, 33, 24], [36, 37, 33],
[24, 32, 31], [33, 37, 31], [35, 34, 27],
[35, 36, 33], [ 3, 13, 2], [14, 24, 23],
[33, 32, 24], [15, 25, 14], [25, 26, 34],
[22, 30, 29], [31, 37, 30], [24, 31, 23],
[32, 33, 31], [22, 12, 13], [ 0, 1, 12],
[14, 23, 13], [31, 30, 23], [28, 19, 20],
[21, 11, 0], [12, 21, 0], [20, 11, 21],
[20, 10, 11], [21, 29, 20], [21, 12, 22],
[30, 22, 23], [29, 21, 22], [27, 19, 28],
[29, 37, 36], [29, 28, 20], [36, 35, 28],
[20, 19, 10], [10, 19, 9], [28, 35, 27],
[19, 19, 8], [17, 16, 6], [18, 7, 8],
[25, 34, 33], [18, 27, 17], [18, 19, 27],
[18, 17, 7], [27, 26, 17], [17, 6, 7],
[14, 25, 24], [34, 35, 33], [17, 26, 16],
[27, 34, 26], [ 3, 15, 14], [15, 26, 25],
[ 4, 15, 3], [16, 26, 15], [16, 4, 5],
[16, 15, 4], [16, 5, 6], [8, 18, 19]])
new_pcloud = TriMesh(pcloud.points, trilist=tri_list, copy=False)
mapping = OrderedDict()
mapping['tri'] = np.arange(new_pcloud.n_points)
return new_pcloud, mapping
@labeller_func(group_label='eye_ibug_close_17_trimesh')
def eye_ibug_close_17_to_eye_ibug_close_17_trimesh(pcloud):
r"""
Apply the IBUG 17-point close eye semantic labels, with trimesh
connectivity.
The semantic labels applied are as follows:
- tri
"""
from menpo.shape import TriMesh
n_expected_points = 17
validate_input(pcloud, n_expected_points)
tri_list = np.array([[10, 11, 13], [ 3, 13, 2], [ 4, 14, 3],
[15, 5, 16], [12, 11, 0], [13, 14, 10],
[13, 12, 2], [14, 13, 3], [ 0, 1, 12],
[ 2, 12, 1], [13, 11, 12], [ 9, 10, 14],
[15, 9, 14], [ 7, 8, 15], [ 5, 6, 16],
[15, 14, 4], [ 7, 15, 16], [ 8, 9, 15],
[15, 4, 5], [16, 6, 7]])
new_pcloud = TriMesh(pcloud.points, trilist=tri_list, copy=False)
mapping = OrderedDict()
mapping['tri'] = np.arange(new_pcloud.n_points)
return new_pcloud, mapping
@labeller_func(group_label='tongue_ibug_19')
def tongue_ibug_19_to_tongue_ibug_19(pcloud):
r"""
Apply the IBUG 19-point tongue semantic labels.
The semantic labels applied are as follows:
- outline
- bisector
"""
n_expected_points = 19
validate_input(pcloud, n_expected_points)
labels = OrderedDict([
('outline', (0, 13, False)),
('bisector', (13, 19, False))
])
return pcloud_and_lgroup_from_ranges(pcloud, labels)
|
import abc
import asyncio
import logging
import random
import threading
from typing import Any, Mapping, Optional, Union
from async_timeout import timeout
from logstash import LogstashFormatterVersion1
from .log import logger
class BaseLogstashHandler(logging.Handler):
def __init__(
self,
*,
level: int,
close_timeout: float,
qsize: int,
loop: asyncio.AbstractEventLoop,
reconnect_delay: float,
reconnect_jitter: float,
extra: Mapping[str, Any]
) -> None:
self._close_timeout = close_timeout
self._reconnect_delay = reconnect_delay
self._reconnect_jitter = reconnect_jitter
self._random = random.Random()
self._extra = extra
self._loop = loop
self._thread_id = threading.get_ident()
self._queue: asyncio.Queue[Union[logging.LogRecord, None]] = asyncio.Queue(
maxsize=qsize, loop=self._loop
)
super().__init__(level=level)
formatter = LogstashFormatterVersion1()
self.setFormatter(formatter)
self._closing = False
self._worker: Optional[asyncio.Task[None]] = self._loop.create_task(
self._work()
)
@abc.abstractmethod
async def _connect(self) -> None:
pass # pragma: no cover
@abc.abstractmethod
async def _send(self, data: bytes) -> None:
pass # pragma: no cover
@abc.abstractmethod
async def _disconnect(self) -> None:
pass # pragma: no cover
def emit(self, record: logging.LogRecord) -> None:
if self._closing:
msg = 'Log message skipped due shutdown "%(record)s"'
context = {"record": record}
logger.warning(msg, context)
return
if threading.get_ident() != self._thread_id:
self._loop.call_soon_threadsafe(self._do_emit, record)
else:
self._do_emit(record)
def _do_emit(self, record: logging.LogRecord) -> None:
if self._queue.full():
msg = 'Queue is full, drop oldest message: "%(record)s"'
context = {"record": self._queue.get_nowait()}
logger.warning(msg, context)
self._queue.put_nowait(record)
async def _work(self) -> None:
reconnection = False
while True:
if not reconnection:
record = await self._queue.get()
if record is None:
self._queue.put_nowait(None)
break
reconnection = False
try:
data = self._serialize(record)
try:
await self._send(data)
except (OSError, RuntimeError):
reconnection = True
await self._reconnect()
except asyncio.CancelledError:
raise
except Exception as exc:
msg = "Unexpected exception while sending log"
logger.warning(msg, exc_info=exc)
async def _reconnect(self) -> None:
logger.info("Transport disconnected")
await self._disconnect()
while True:
try:
await self._connect()
logger.info("Transport reconnected")
return
except (OSError, RuntimeError):
delay = self._random.gauss(
self._reconnect_delay, self._reconnect_jitter
)
await asyncio.sleep(delay, loop=self._loop)
def _serialize(self, record: logging.LogRecord) -> bytes:
for key, value in self._extra.items():
if not hasattr(record, key):
setattr(record, key, value)
# LogstashFormatterVersion1 violates the protocol by returning bytes
# instead of str required by the base class
return self.format(record) + b"\n" # type: ignore
# dummy statement for default handler close()
# non conditional close() usage actually
def close(self) -> None:
if self._closing:
return
self._closing = True
if self._queue.full():
msg = "Queue is full, drop oldest message before closing" ': "%(record)s"'
context = {"record": self._queue.get_nowait()}
logger.warning(msg, context)
self._queue.put_nowait(None)
super().close()
async def wait_closed(self) -> None:
if self._worker is None:
return # already closed
try:
async with timeout(self._close_timeout, loop=self._loop):
await self._worker
except asyncio.TimeoutError:
self._worker.cancel()
try:
await self._worker
except: # noqa
pass
self._worker = None
assert self._queue.qsize() == 1
assert self._queue.get_nowait() is None
await self._disconnect()
|
<reponame>arosen93/QMOF
import pandas as pd
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error, r2_score
from scipy.stats import spearmanr
import numpy as np
import os
# Settings
alpha = 0.1
gamma = 0.1
kernel = 'laplacian' # kernel function
test_size = 0.2 # fraction held-out for testing
seeds = [42, 125, 267, 541, 582] # random seeds
train_sizes = [2**7, 2**8, 2**9, 2**10, 2**11, 2**12, 2**13, -1] # train sizes
fingerprint_path = 'stoich120_fingerprints.csv' # fingerprints (length N)
y_path = os.path.join('..','qmof-bandgaps.csv') # band gaps (length N)
#---------------------------------------
#Read in data
df_features = pd.read_csv(fingerprint_path, index_col=0)
df_BG = pd.read_csv(y_path, index_col=0)['BG_PBE']
df = pd.concat([df_features, df_BG], axis=1, sort=True)
df = df.dropna()
refcodes = df.index
# Make a training and testing set
mae = []
r2 = []
rho = []
mae_std = []
r2_std = []
rho_std = []
for train_size in train_sizes:
mae_test_seeds = []
r2_test_seeds = []
rho_test_seeds = []
for seed in seeds:
train_set, test_set = train_test_split(
df, test_size=test_size, shuffle=True, random_state=seed)
if train_size != -1:
train_set = train_set[0:train_size]
X_train = train_set.loc[:, (df.columns != 'BG_PBE')]
X_test = test_set.loc[:, (df.columns != 'BG_PBE')]
refcodes_train = X_train.index
refcodes_test = X_test.index
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
y_train = train_set.loc[:, df.columns == 'BG_PBE'].to_numpy()
y_test = test_set.loc[:, df.columns == 'BG_PBE'].to_numpy()
# Train and evaluate KRR model
krr = KernelRidge(alpha=alpha, gamma=gamma, kernel=kernel)
krr.fit(X_train, y_train)
y_train_pred = krr.predict(X_train)
y_test_pred = krr.predict(X_test)
mae_test_seeds.append(mean_absolute_error(y_test, y_test_pred))
r2_test_seeds.append(r2_score(y_test, y_test_pred))
rho_test_seeds.append(spearmanr(y_test, y_test_pred)[0])
mae.append(np.average(mae_test_seeds))
r2.append(np.average(r2_test_seeds))
rho.append(np.average(rho_test_seeds))
mae_std.append(np.std(mae_test_seeds))
r2_std.append(np.std(r2_test_seeds))
rho_std.append(np.std(rho_test_seeds))
print('Training size: ', train_size)
print('Avg. testing MAE: ', np.round(np.average(mae_test_seeds), 3))
print('Avg. testing r^2: ', np.round(np.average(r2_test_seeds), 3))
print('Avg. testing rho: ', np.round(np.average(rho_test_seeds), 3))
np.savetxt('learning_curve_avg.csv',np.vstack([mae,r2,rho]),delimiter=',')
np.savetxt('learning_curve_std.csv',np.vstack([mae_std,r2_std,rho_std]),delimiter=',') |
<filename>CUCM/greenfield-deployment.py<gh_stars>0
import csv
from requests import Session
from requests.auth import HTTPBasicAuth
import csv
import pandas as pd
from lxml import etree
import getpass
from zeep import Client, Settings, Plugin, xsd
from zeep.transports import Transport
from zeep.exceptions import Fault
import sys
# Change to true to enable output of request/response headers and XML
DEBUG = False
# Collect Credentials and IP address
CUCM_ADDRESS = input("Enter CUCM IP address:- ")
USERNAME = input("Enter CUCM AXL Username:-")
PASSWORD = <PASSWORD>()
CUCM_VERSION = input("Enter CUCM Versions:- ")
# The WSDL is a local file in the working directory, see README
WSDL_FILE = "schema/" + CUCM_VERSION + "/AXLAPI.wsdl"
# This class lets you view the incoming and outgoing http headers and XML
class MyLoggingPlugin(Plugin):
def egress(self, envelope, http_headers, operation, binding_options):
# Format the request body as pretty printed XML
xml = etree.tostring(envelope, pretty_print=True, encoding="unicode")
print(f"\nRequest\n-------\nHeaders:\n{http_headers}\n\nBody:\n{xml}")
def ingress(self, envelope, http_headers, operation):
# Format the response body as pretty printed XML
xml = etree.tostring(envelope, pretty_print=True, encoding="unicode")
print(f"\nResponse\n-------\nHeaders:\n{http_headers}\n\nBody:\n{xml}")
# The first step is to create a SOAP client session
session = Session()
# We avoid certificate verification by default
session.verify = False
# To enabled SSL cert checking (recommended for production)
# place the CUCM Tomcat cert .pem file in the root of the project
# and uncomment the line below
# session.verify = 'changeme.pem'
# Add Basic Auth credentials
session.auth = HTTPBasicAuth(USERNAME, PASSWORD)
# Create a Zeep transport and set a reasonable timeout value
transport = Transport(session=session, timeout=10)
# strict=False is not always necessary, but it allows zeep to parse imperfect XML
settings = Settings(strict=False, xml_huge_tree=True)
# If debug output is requested, add the MyLoggingPlugin callback
plugin = [MyLoggingPlugin()] if DEBUG else []
# Create the Zeep client with the specified settings
client = Client(WSDL_FILE, settings=settings, transport=transport, plugins=plugin)
# Create the Zeep service binding to AXL at the specified CUCM
service = client.create_service(
"{http://www.cisco.com/AXLAPIService/}AXLAPIBinding",
f"https://{CUCM_ADDRESS}:8443/axl/",
)
# Create Parition
def cucm_partition():
try:
respconf = service.addRoutePartition(lines)
except Fault as err:
print(f"Zeep error: addRoutePartition: { err }")
sys.exit(1)
print("\aaddRoutePartition response:\n")
print(respconf, "\n")
partition = input("Configure partition (Y/N):-")
if partition.lower() == "y":
partiton_file_temp = csv.DictReader(open("partition.csv", encoding="utf-8-sig"))
for lines in partiton_file_temp:
cucm_partition()
else:
input("Press Enter to continue...")
# Configure CSS
css = input("Configure Css (Y/N):-")
if css.lower() == "y":
css_member = {}
with open("css.csv") as csvfile:
next(csvfile)
reader = csv.reader(csvfile)
css_increment = 1
for css_name in reader:
css = {
"name": css_name[0],
"description": css_name[1],
"members": {"member": []},
}
css_increment += 1
index_increment = 0
with open("css.csv") as csvfile:
next(csvfile)
reader = csv.reader(csvfile)
for row in reader:
index_increment += 1
if not row[css_increment]:
continue
css_member = {
"routePartitionName": row[css_increment],
"index": index_increment,
}
css["members"]["member"].append(css_member)
# Execute the addCss request
try:
resp = service.addCss(css)
except Fault as err:
print(f"Zeep error: addCss: { err }")
sys.exit(1)
print("\naddCss response:\n")
print(resp, "\n")
else:
input("Press Enter to continue...")
# Create Date & Time Group
date_time = {
"name": "MUSCAT",
"timeZone": "Asia/Muscat",
"separator": "-",
"dateformat": "M-D-Y",
"timeFormat": "12-hour",
}
# Execute the addDateTimeGroup request
try:
resp = service.addDateTimeGroup(date_time)
except Fault as err:
print(f"Zeep error: addDateTimeGroup: { err }")
sys.exit(1)
print("\naddDateTimeGroup response:\n")
print(resp, "\n")
# Configure UDT
udt_input = input("Do you want to configure UniversalDeviceTemplate(Y/N):-")
if udt_input.lower() == "y":
# Create UDT
udt = input("Enter UDT Name-")
external_mask = input(" Enter the External Mask")
add_udt = {
"name": udt,
"deviceDescription": "#FN# #LN# #EXT#",
"devicePool": "Default",
"deviceSecurityProfile": "Universal Device Template - Model-independent Security Profile",
"sipProfile": "Standard SIP Profile",
"phoneButtonTemplate": "Universal Device Template Button Layout",
"commonPhoneProfile": "Standard Common Phone Profile",
"softkeyTemplate": "Standard User",
"mtpPreferredOriginatingCodec": "711alaw",
"outboundCallRollover": "No Rollover",
"phonePersonalization": "Enabled",
"devicePool": "Default",
"useTrustedRelayPoint": "Default",
"devicePool": "Default",
"certificateOperation": xsd.SkipValue,
"authenticationMode": xsd.SkipValue,
"keySize": xsd.SkipValue,
"ecKeySize": xsd.SkipValue,
"servicesProvisioning": xsd.SkipValue,
"packetCaptureMode": xsd.SkipValue,
"mlppIndication": xsd.SkipValue,
"mlppPreemption": xsd.SkipValue,
"dndOption": "Use Common Phone Profile Setting",
"blfPresenceGroup": xsd.SkipValue,
"blfAudibleAlertSettingPhoneBusy": xsd.SkipValue,
"blfAudibleAlertSettingPhoneIdle": xsd.SkipValue,
"location": "Hub_None",
"deviceMobilityMode": xsd.SkipValue,
"joinAcrossLines": xsd.SkipValue,
"alwaysUsePrimeLine": xsd.SkipValue,
"alwaysUsePrimeLineForVoiceMessage": xsd.SkipValue,
"singleButtonBarge": xsd.SkipValue,
"builtInBridge": xsd.SkipValue,
"privacy": xsd.SkipValue,
"lines": {"line": []},
}
line_details = {
"index": "1",
"label": "#FN# #LN# #EXT#",
"display": "#FN# #LN# #EXT#",
"e164Mask": external_mask,
"ringSetting": "Ring",
"dirn": xsd.SkipValue,
"maxNumCalls": "4",
"busyTrigger": "2",
"callerName": "true",
}
add_udt["lines"]["line"].append(line_details)
try:
respconf = service.addUniversalDeviceTemplate(add_udt)
except Fault as err:
print(f"Zeep error: addUniversalDeviceTemplate: { err }")
sys.exit(1)
print("\aaddUniversalDeviceTemplate response:\n")
print(respconf, "\n")
input("Press Enter to continue...")
else:
input("Press Enter to continue...")
# Create Universal Line Template
ult_input = input("Do you want to configure Universal Line Template(Y/N):-")
if ult_input.lower() == "y":
# Create UDT
ult = input("Press Enter to ULT Name-")
add_ult = {
"name": "ULT",
"urgentPriority": xsd.SkipValue,
"lineDescription": "#FN# #LN# ",
"routePartition": xsd.SkipValue,
"voiceMailProfile": "Default",
"callingSearchSpace": "CSS_Internal",
"alertingName": "#FirstName# #LastName#",
"extCallControlProfile": xsd.SkipValue,
"blfPresenceGroup": "Standard Presence group",
"callPickupGroup": xsd.SkipValue,
"partyEntranceTone": xsd.SkipValue,
"autoAnswer": xsd.SkipValue,
"rejectAnonymousCall": xsd.SkipValue,
"userHoldMohAudioSource": xsd.SkipValue,
"networkHoldMohAudioSource": xsd.SkipValue,
"aarDestinationMask": xsd.SkipValue,
"aarGroup": xsd.SkipValue,
"retainDestInCallFwdHistory": "t",
"forwardDestAllCalls": xsd.SkipValue,
"primaryCssForwardingAllCalls": xsd.SkipValue,
"secondaryCssForwardingAllCalls": xsd.SkipValue,
"CssActivationPolicy": xsd.SkipValue,
"fwdDestExtCallsWhenNotRetrieved": xsd.SkipValue,
"cssFwdExtCallsWhenNotRetrieved": xsd.SkipValue,
"fwdDestInternalCallsWhenNotRetrieved": xsd.SkipValue,
"cssFwdInternalCallsWhenNotRetrieved": xsd.SkipValue,
"parkMonitorReversionTime": xsd.SkipValue,
"target": xsd.SkipValue,
"mlppCss": xsd.SkipValue,
"mlppNoAnsRingDuration": xsd.SkipValue,
"confidentialAccess": xsd.SkipValue,
"confidentialAccessMode": xsd.SkipValue,
"holdReversionRingDuration": xsd.SkipValue,
"holdReversionNotificationInterval": xsd.SkipValue,
"busyIntCallsDestination": xsd.SkipValue,
"busyIntCallsCss": xsd.SkipValue,
"busyExtCallsDestination": xsd.SkipValue,
"busyExtCallsCss": xsd.SkipValue,
"noAnsIntCallsDestination": xsd.SkipValue,
"noAnsIntCallsCss": xsd.SkipValue,
"noAnsExtCallsDestination" "noAnsExtCallsCss": xsd.SkipValue,
"noCoverageIntCallsDestination": xsd.SkipValue,
"noCoverageIntCallsCss": xsd.SkipValue,
"noCoverageExtCallsDestination": xsd.SkipValue,
"noCoverageExtCallsCss": xsd.SkipValue,
"unregisteredIntCallsDestination": xsd.SkipValue,
"unregisteredIntCallsCss": xsd.SkipValue,
"unregisteredExtCallsDestination": xsd.SkipValue,
"unregisteredExtCallsCss": xsd.SkipValue,
"ctiFailureDestination": xsd.SkipValue,
"ctiFailureCss": xsd.SkipValue,
"callControlAgentProfile": xsd.SkipValue,
"noAnswerRingDuration": "60",
"enterpriseAltNum": xsd.SkipValue,
}
try:
respconf = service.addUniversalLineTemplate(add_ult)
except Fault as err:
print(f"Zeep error: addUniversalDeviceTemplate: { err }")
sys.exit(1)
print("\aaddUniversalDeviceTemplate response:\n")
print(respconf, "\n")
input("Press Enter to continue...")
else:
input("Press Enter to continue...")
# Create a Region
region = {"name": "HQ REGION", "relatedRegions": {"relatedRegion": []}}
# Create a relatedRegion sub object
related_region = {
"regionName": "HQ REGION",
"bandwidth": "64 kbps",
"videoBandwidth": "512",
"lossyNetwork": "Low Loss",
"codecPreference": "Factory Default low loss",
"immersiveVideoBandwidth": "2000000000",
}
related_default_region = {
"regionName": "Default",
"bandwidth": "64 kbps",
"videoBandwidth": "512",
"lossyNetwork": "Low Loss",
"codecPreference": "Factory Default low loss",
"immersiveVideoBandwidth": "2000000000",
}
# Add the relatedRegion to the region.relatedRegions array
region["relatedRegions"]["relatedRegion"].append(related_region)
region["relatedRegions"]["relatedRegion"].append(related_default_region)
# Execute the addRegion request
try:
resp = service.addRegion(region)
except Fault as err:
print(f"Zeep error: addRegion: { err }")
sys.exit(1)
print("\naddRegion response:\n")
print(resp, "\n")
input("Press Enter to continue...")
Conf_yes_no = input("Do you need to configure ConferanceNow(Y/N):-")
if Conf_yes_no.lower() == "y":
# Create a test ConferenceNow
conf_now_num = input("Press Enter to confernace Now Number:-")
conferenceNow = {
"conferenceNowNumber": conf_now_num,
"description": "test",
"maxWaitTimeForHost": "15",
}
try:
respconf = service.addConferenceNow(conferenceNow)
except Fault as err:
print(f"Zeep error: addConferenceNow: { err }")
sys.exit(1)
print("\addConferenceNow response:\n")
print(respconf, "\n")
input("Press Enter to continue...")
else:
input("Press Enter to continue...")
# Create MRG
mrg = {
"name": "MRG",
"description": "MEDIA RESOURCE GROUP",
"multicast": "f",
"members": {
"member": {
"deviceName": "ANN_2",
}
},
}
# Execute the addMediaResourceGroup request
try:
resp = service.addMediaResourceGroup(mrg)
except Fault as err:
print(f"Zeep error: addMediaResourceGroup: { err }")
sys.exit(1)
print("\naddMediaResourceGroup response:\n")
print(resp, "\n")
domain = input("Enter the gateway Domain for MGCP :-")
description = input("Enter the gateway Description:- ")
gateway = {
"domainName": domain,
"description": description,
"product": "Cisco ISR 4321",
"protocol": "MGCP",
"callManagerGroupName": "Default",
"units": {"unit": []},
}
# Create a relatedRegion sub object
unit_name = {"index": "0", "product": "ISR-2NIM-MBRD", "subunits": {"subunit": []}}
subunit_name = {"index": "1", "product": "NIM-1MFT-T1E1-E1", "beginPort": "0"}
unit_name["subunits"]["subunit"].append(subunit_name)
gateway["units"]["unit"].append(unit_name)
# Execute the addRegion request
try:
resp = service.addGateway(gateway)
except Fault as err:
print(f"Zeep error: addGateway: { err }")
sys.exit(1)
print("\naddRegion response:\n")
print(resp, "\n")
input("Press Enter to continue...")
ucm.add_route_group(
route_group="hollywood-rg",
distribution_algorithm="Circular",
members=[("america-online-sip"), ("h323")],
)
# Create a Route List
route_list_name = input("Enter the Route List Name:- ")
route_list = {"name": route_list_name, "callManagerGroupName": "Default"}
# Execute the addRouteList request
try:
resp = service.addRouteList(route_list)
except Fault as err:
print(f"Zeep error: addRouteList: { err }")
sys.exit(1)
print("\naddRouteList response:\n")
print(resp, "\n")
input("Press Enter to continue...")
# create a Route Pattern
route_pattern = {
"pattern": "1234567890",
"routePartitionName": None,
"blockEnable": False,
"useCallingPartyPhoneMask": "Default",
"dialPlanName": None,
"digitDiscardInstructionName": None,
"networkLocation": "OnNet",
"prefixDigitsOut": None,
"routeFilterName": None,
"destination": {"routeListName": "testRouteList"},
}
# Execute the addRoutePattern request
try:
resp = service.addRoutePattern(route_pattern)
except Fault as err:
print(f"Zeep error: addMediaRaddRoutePatternesourceList: { err }")
sys.exit(1)
print("\naddRoutePattern response:\n")
print(resp, "\n")
input("Press Enter to continue...")
# Cleanup the objects we just created
try:
resp = service.removeRoutePattern(pattern="1234567890", routePartitionName=None)
except Fault as err:
print(f"Zeep error: remremoveRoutePatternoveDevicePool: { err }")
sys.exit(1)
print("\nremoveRoutePattern response:")
print(resp, "\n")
try:
resp = service.removeRouteList(name="testRouteList")
except Fault as err:
print(f"Zeep error: removeMeremoveRouteListdiaResourceList: { err }")
sys.exit(1)
print("\nremoveRouteList response:")
print(resp, "\n")
|
<reponame>imamsolikhin/Python<filename>app/helper/network.py<gh_stars>0
# -*- coding: utf-8 -*-
"""Input module based on expect, used to retrieve information from devices using a command line interface (CLI)"""
# builtin modules
import re
import telnetlib
import socket
import logging
import types
import time
# local modules
import exceptions
import base
import emulate
"""
Example usage:
from pynt.protocols.tl1 import SyncTL1Input, AsyncTL1Input, TL1EmulatorInput, ParseSectionBlock
# Create an IO object for synchronous TL1 protocol
hostname = "device.example.net"
io = SyncTL1Input(hostname, port=3082)
# Alternative, use asynchronous (multi-threaded, faster, but has a small change to leave stale processes behind when a crash occurs)
io = AsyncTL1Input(hostname, port=3082)
# optional set properties of TL1 behaviour
io.setDefaultTimeout(10)
io.hasecho = True
# optionally log all TL1 to a file for debugging later
io.setLogFile("path_to_logfile.log"):
# start() calls connect(), login(), and authorize()
io.username = 'johndoe'
io.password = '<PASSWORD>'
io.start()
# send a command and wait for a result:
resultlines = io.command("rtrv-cfg-fiber::all:ctag;")
parseResult(resultlines)
# Asynchronous alternative:
io.callbackCommand("rtrv-cfg-fiber::all:ctag;", parseResult)
# parseResult must be a function or method in the form function(resultlines) or function(resultlines, status)
# Set handler for autonomous messages
io.setAutonomousCallback(handleMessage)
# It is possible to set different callback functions based on the type ("auto", "critical", "major" or "minor"):
io.setAutonomousCallback(handleCriticalMessage, "critical")
# stop() calls deauthorize(), disconnect(), and closeLogFile()
io.stop()
def parseResult(resultlines):
for line in resultlines:
# line looks like:
# "GGN:PORTID=20064,PORTNAME=to 5530-stack #4,PORTDIR=output,PORTHEALTH=good,PORTCAT=nor,PORTPRIV=0x1"
# First turn the line into a dictionary
parts = line.split(":")
properties = ParseSectionBlock(parts[1])
# properties now looks like: {PORTID:"10064",IPORTNAME:"from 5530-stack #4",PORTDIR:"input",...}
# store the result or do something useful...
# While writing your parseResult() function, you may want to use an emulator that reads the TL1 that you previously stored in a log file:
logfile = "path_to_logfile.log"
io = TL1EmulatorInput(logfile)
"""
def ParseSectionBlock(block):
"""Convert a TL1 block consisting of multiple sections to a dictionary.
That is, convert a string 'var1=value,var2=value,var3=value' to a
dictionary. This will also handle quoted values, although you will need
to remove the quote characters yourself.
"""
regex = {}
regex['name'] = '[a-zA-Z0-9]+'
regex['safe_char'] = '[^";:,]'
regex['qsafe_char'] = '[^"]'
# the value of a parameter will either just be regular characters, ie
# safe_char or it will be quoted in case we only accepting inside
# doublequotes (") or escaped doublequotes (\")
regex['param_value'] = r"""(?:"|\\") %(qsafe_char)s * (?:"|\\") | %(safe_char)s + """ % regex
regex['param'] = r""" (?: %(name)s ) = (?: %(param_value)s )? """ % regex
properties = {}
for m in re.findall(regex['param'], block, re.VERBOSE):
(name, value) = m.split("=")
value = re.sub(r'(^\\"|\\"$)', '', value)
properties[name.lower()] = value
return properties
class TL1IOInput(base.BaseIOInput):
"""Abstract class. Create an object, log in to a hostname (or filename or URL) and return a device object,
or otherwise sets RDF Objects. Prompt must be a string (not a regexp)"""
terminal = None # instance of Telnet object
hostname = "" # hostname to connect to
port = 3082 # TL1-RAW port
hasecho = False # does sending a command returns an echo?
def __init__(self, hostname, port=None):
self.hostname = hostname
if port != None:
self.port = port
def getTarget(self):
"""Return a human-readable identifier of the I/O object. For example, the hostname of the filename"""
return self.hostname
def connect(self):
try:
self.terminal = telnetlib.Telnet(self.hostname,self.port)
except socket.error:
raise exceptions.NetworkException("Problem connecting to host ('telnet %s %d')" % (self.hostname, self.port))
# Clear input log. The Glimmerglass gives garbage a short while after the connection is established.
# [62;1"p > [?4l [?5l [?7h [?8h [1;50r [50;1H [50;0H [4l <--- garbage
time.sleep(0.01)
self.writetolog(self.terminal.read_very_eager(), input=True)
def disconnect(self):
if self.terminal:
self.terminal.close()
self.terminal = None
def sendcommand(self, string):
"""writes a command as-is to the I/O. May call writetolog().
If you call sendcommand(), you must also call readmessage() at some point in time, to avoid
stale results."""
self.writetolog(string, input=True)
logger = logging.getLogger("protocols")
logger.debug("Sending command %s" % (repr(string)))
#self.acquireIOlock()
self.terminal.write(string)
#self.releaseIOlock()
if self.hasecho:
expectstring = string[0:5]
echostring = self.terminal.read_until(expectstring, timeout=self.timeout)
echostring += self.terminal.read_until("\r\n", timeout=self.timeout)
if (expectstring not in echostring):
logger.error("Did not receive echo of command %s, but got %s." % (repr(string), repr(echostring)))
# raise exceptions.TimeOut("Did not receive echo of command %s, but got %s." % (repr(string), repr(echostring)))
self.writetolog(echostring.replace("\r\n", "\n"), output=True)
def readmessage(self, timeout):
"""Reads text from the terminal up to the next terminator. Does return the string as-is,
without checking validity. May call writetolog()."""
logger = logging.getLogger("protocols")
endtime = time.time() + timeout
#self.acquireIOlock()
resultString = self.terminal.read_until(self.terminator, timeout=timeout+1);
if self.terminator not in resultString:
logger.error("Did not receive termination string %s in TL1 result %s." % (repr(self.terminator), repr(resultString)))
raise exceptions.TimeOut("Did not receive termination string %s in %d seconds in TL1 result %s." % (repr(self.terminator), timeout+1, repr(resultString)))
#self.releaseIOlock()
if len(resultString) > 0:
self.writetolog(resultString, output=True)
if not resultString.endswith(self.terminator):
if len(resultString) > 0:
logger.debug("Could not find terminator %s in data %s" % (repr(self.terminator), repr(resultString)))
raise exceptions.TimeOut("no response %s in %s from %s in %d seconds (timeout=%d sec)." % (repr(self.terminator), repr(resultString), self.hostname, time.time()-endtime+timeout, timeout));
logger.debug("Received %d bytes of data" % len(resultString))
return resultString
class TL1LanguageInput(base.BaseLangInput):
"""LanguageInput class part, which is knownledge about the format of TL1 input and
output messages, as well as autonomous messages. Automatically sets a unique ctag."""
ctag = 1 # identifier to track commands
terminator = "\r\n;"
prompt = ""
delimiter = "\r\n;"
ignorecase = True # false = case sensitive; true = case insensitive: makes all commands uppercase.
def authorize(self):
command = "act-user::%s:ctag::%s;" % (self.username, self.password)
try:
resultlines = self.send_and_receive(command, self.timeout)
except exceptions.CommandFailed:
# disconnect, but not logout: handled upstream
# self.disconnect()
raise exceptions.NetworkException("Password failed when connecting to %s@%s" % (self.username, self.hostname))
def deauthorize(self):
try:
self.send_and_receive("canc-user::%s:ctag;" % (self.username), self.timeout)
except exceptions.MalformedIO:
# We are actually not checking that the logout worked, but we
# do not really care given at this point we are not going to
# do anything any longer with this connection
pass
def setPrompt(self, prompt):
if prompt:
self.prompt = "\r\n" + prompt
else:
self.prompt = ""
self.delimiter = self.terminator + self.prompt
logger = logging.getLogger("protocols")
logger.debug("Set delimiter to %s" % repr(self.delimiter))
def statusOK(self, status, command=""):
"""Checks the status. returns True is the status is a succesful,
or raises a CommandFailed, possible with additional information if not.
"""
# status = [responsetype, status, comments]
if status[1] != "COMPLD":
raise exceptions.CommandFailed("Commmand %s failed: status=%s, reason: %s" % (command, status[1], status[2]))
def makeCommand(self, command):
"""Takes a command, and turns it into a string read to send to the device.
It may add a line break (I/O specific), or identifier in the command (language-specific).
Returns a tuple (identifier, commandstring). The identifier is the ctag."""
self.acquireMemLock()
ctag = self.ctag
self.ctag += 1
self.releaseMemLock()
if (len(command) > 0) and (command[-1] == ";"):
command = command[:-1]
command = command.split(":")
if self.ignorecase:
command[0] = command[0].upper()
try:
command[3] = str(ctag) # set ctag to empty
command = ":".join(command)+";"
except IndexError:
raise exceptions.MalformedIO("Invalid TL1 command given. The fourth (ctag) parameter MUST be present. E.g.: ACT-USER:::ctag;")
command = ":".join(command)+";"
return (str(ctag), command+"\n")
def parseMessage(self, resultString):
"""Takes a message, and parses it into a tripley (resultlines, identifier, status)
The resultline is an array of result line (e.g.
['10.1a.3:NOP,NONE,NONE:INOPTDEGR=-15.00,INOPTCRIT=-18.0']), the identifier is the
ctag. The status is a 3-item list [type, status, comment] with type 'M' or 'A',
status "DENY" or "COMPLD", and comment whatever string was found between /* and */.
May raise a ParsingError in case the output can't be parsed, but does not
raise an exception if the status is unsuccessful."""
logger = logging.getLogger("protocols")
# The result should start with a header line (2,3,4), and result lines (5,6):
# 1
# BeautyCees 07-03-13 14:52:28 2
# M 123 COMPLD (normal response) 3
# A 123 REPT DBCHG EVT SECU IDVN (automatic message) 3
# *C 123 REPT ALM CRS (critical alarm) 3
# ** 123 REPT ALM ENV DBCHG EVT SECU (major alarm) 3
# *^ 123 REPT ALM (minor alarm) 3
# PLNA (error code) 4
# /* Here is a comment. */ 5
# "10.3a.1-10.3a.2:SRCPORT=10.3a.1,DSTPORT=10.3a.2" 6
# "10.2a.7-10.3a.5:SRCPORT=10.2a.7,DSTPORT=10.3a.5" 6
# return lines formated as:
# "resultstring, possible with \"quotes\"" (line type 5)
# note that the header lines (1,2,3) can be repeated in the list of resultsline (type 5)
identifierRE = re.compile(r'^\s+(\w+) (\d\d)-(\d\d)-(\d\d) (\d\d):(\d\d):(\d\d)$');
statusRE = re.compile(r'^([MA\*][\*C\^]?)\s+(\S+)\s([\w ]+)$');
moreStatusRE = re.compile(r'^\s+([\w ]+)$');
commentRE = re.compile(r'^\s+/\*(.*)\*/');
resultRE = re.compile(r'^\s+"{0,1}([^\n]*)[,"]$');
resultLines = resultString.split('\r\n');
commentlines = []
resultlines = []
responsetype = None
status = None;
ctag = None;
skiplines = True # only store result and comment lines after a valid status line
# line types
for line in resultLines:
statusmatch = statusRE.match(line)
morestatusmatch = moreStatusRE.match(line)
identifiermatch = identifierRE.match(line)
commentmatch = commentRE.match(line)
resultmatch = resultRE.match(line)
if statusmatch:
if ctag == None:
responsetype = statusmatch.group(1)
ctag = statusmatch.group(2)
status = statusmatch.group(3) # warning: may be more then one word! (e.g. "COMPLD" or "DENY" or "REPT ALM CRS")
skiplines = False
elif ctag == statusmatch.group(2):
skiplines = False
else:
logger.warning("Ignoring TL1 output with ctag %s, since the output of ctag %s is not finished (we can only handle output one-by-one)." % (statusmatch.group(2), ctag))
skiplines = True
elif morestatusmatch:
status = status + " " + morestatusmatch.group(1) # warning: may be more then one word!
elif resultmatch:
match = resultmatch.group(1)
if skiplines:
if ctag == None:
logger.error("Haven't receive a valid status line yet. Thus skip TL1 result line %s" % repr(match))
else:
logger.warning("Skip TL1 result line %s" % repr(match))
else:
resultlines.append(match)
elif commentmatch:
match = commentmatch.group(1)
if skiplines:
logger.warning("Skip TL1 comment line %s" % repr(match))
else:
commentlines.append(match.strip())
elif identifiermatch:
pass
elif line == "": # this instruction must come before the line[0] == ">" checks
pass
elif line[0] == ">":
pass
elif line[0] == "<":
pass
elif line == ";":
skiplines = True # termination line
else:
logger.error("Skip uknown TL1 line %s" % repr(line))
if ctag == None:
raise exceptions.MalformedIO("Could not find valid response header (e.g. 'M 123 COMPLD') in response %s" % repr(resultString))
# NOTE: we actually like to include the associated command that was send out, but we don't have that information.
# However, experience showed that command is often unrelated to the alarm. So we leave it as it is.
comment = " ".join(commentlines) # paste them together
# The comment line typically contains the error message
status = [responsetype, status, comment]
logger.debug("Received %d lines of data, identifier=%s, status=%s" % (len(resultlines), ctag, status))
return (resultlines, ctag, status)
def isAutonomousType(self, identifier, status):
"""Given the identifier and status, decide if the message is autonomous,
and if so, if it is of a certain type. For regular (non-autonomous), return None."""
responsetype = status[0]
# (responsetype, status, comment) = status
if responsetype == 'M':
return False # regular message
elif (responsetype[0] == "A"):
return "auto"; # autonomous message or no alarm
elif (responsetype == "*C"):
return "critical"; # critical alarm
elif (responsetype == "**"):
return "major"; # major alarm
elif (responsetype == "*^"):
return "minor"; # minor alarm
else:
raise exceptions.MalformedIO("Received an unknown message type '%s' (only understand M, A and *) with identifier %s from %s" % (responsetype, identifier, self.getTarget()))
# Note: first parent takes precedence over other parent classes.
class SyncTL1Input(TL1IOInput, TL1LanguageInput, base.BaseSyncInput):
pass
# Note: first parent takes precedence over other parent classes.
class AsyncTL1Input(TL1IOInput, TL1LanguageInput, base.BaseAsyncInput):
pass
class TL1EmulatorInput(emulate.FileIOInput, TL1LanguageInput, base.BaseSyncInput):
"""Emulates a TL1 input, but in reality, reads data from a file.
This class overrides the regular makeCommand() and parseMessage() methods
and always sets the ctag to an empty string."""
ignorecredentials = True # if true, sets username and password to none for act-user and canc-user
def makeCommand(self, command):
"""TL1 emulation: removes the ctag. The identifier is None"""
if command[-1] == ";":
command = command[:-1]
command = command.split(":")
try:
command[3] = "" # set ctag to empty
except IndexError:
raise exceptions.MalformedIO("Invalid TL1 command given. The fourth (ctag) parameter MUST be present. E.g.: ACT-USER:::ctag;")
if self.ignorecase:
command[0] = command[0].upper()
if self.ignorecredentials and (command[0] in ["ACT-USER", "CANC-USER"]):
# e.g. "act-user::username:ctag::password"
# if len(command) > 2:
# command[2] = "" # remove username
if len(command) > 5:
command[5] = "" # remove password
command = ":".join(command)+";"
return (None, command+"\n")
def parseMessage(self, resultString):
(responselines, ctag, status) = TL1LanguageInput.parseMessage(self, resultString)
return (responselines, None, status)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import web, json
from config import setting
import app_helper,lbs
db = setting.db_web
url = ('/app/locate_shop')
# 定位门店
class handler:
def POST(self):
web.header('Content-Type', 'application/json')
param = web.input(app_id='', type='', data='', sign='')
if '' in (param.app_id, param.type, param.sign):
return json.dumps({'ret' : -2, 'msg' : '参数错误'})
if param.type not in ['IP', 'GPS', 'NAME']:
return json.dumps({'ret' : -4, 'msg' : 'type参数错误'})
#验证签名
md5_str = app_helper.generate_sign([param.app_id, param.type, param.data])
if md5_str!=param.sign:
return json.dumps({'ret' : -1, 'msg' : '签名验证错误'})
# 准备用户坐标
if param.type=='NAME':
ret, loc = lbs.addr_to_loc(param['data'].encode('utf-8'))
print ret, loc
if ret<0:
# 重试一次,网络可能会失败
ret, loc = lbs.addr_to_loc(param['data'].encode('utf-8'))
print ret, loc
if ret<0:
loc = {'lat': 0, 'lng': 0}
else:
loc0 = param.data.split(',') # 31.20474193,121.620708272
loc = {'lat': float(loc0[0]), 'lng': float(loc0[1])}
# 找最近距离的店
min_d = 999999
min_shop = None # 圆形最近匹配
poly_shop = None # 多边形匹配
db_shop = db.base_shop.find({'type':{'$in':['chain','store','dark']}})
for s in db_shop:
if s.get('app_shop', 1)==0: # 忽略不支持线上销售的店
continue
#d=lbs.geo_distance(s['loc']['lat'],s['loc']['lng'],loc['lat'],loc['lng'])
#print 'd = ', d, min_d
#if d<s.get('radius', 2) and d<min_d: # 默认半径2公里
# min_d=d
# min_shop=(s['_id'],s['name'],s['address'])
# 多边形检查
poly = s.get('poly_xy', [])
if len(poly)==0: # 没有多边形数据
print "缺少多边形数据!"
continue
if lbs.wn_PnPoly((loc['lat'],loc['lng']), poly)!=0:
print 'bingo! poly_shop'
poly_shop=(s['_id'],s['name'],s['address'])
break
if poly_shop==None and min_shop==None:
print '不在配送范围内'
return json.dumps({'ret' : -6, 'msg' : '不在配送范围内'})
if poly_shop==None:
# 返回最近shop
print 'choose:', min_shop[1].encode('utf-8')
return json.dumps({'ret' : 0, 'data' : {
'shop_id' : str(min_shop[0]),
'shop_name' : min_shop[1],
'address' : min_shop[2],
}})
else:
# 返回多边形匹配shop
print 'choose:', poly_shop[1].encode('utf-8')
return json.dumps({'ret' : 0, 'data' : {
'shop_id' : str(poly_shop[0]),
'shop_name' : poly_shop[1],
'address' : poly_shop[2],
}})
|
import os
import pytest
import yaml
from jina import __default_executor__
from jina.serve.executors import BaseExecutor
from jina.helper import expand_dict
from jina.helper import expand_env_var
from jina.jaml import JAML
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='function')
def test_workspace(tmpdir):
os.environ['JINA_TEST_JOINT'] = str(tmpdir)
workspace_path = os.environ['JINA_TEST_JOINT']
yield workspace_path
del os.environ['JINA_TEST_JOINT']
def test_yaml_expand():
with open(os.path.join(cur_dir, 'yaml/test-expand.yml')) as fp:
a = JAML.load(fp)
b = expand_dict(a)
assert b['quote_dict'] == {}
assert b['quote_string'].startswith('{')
assert b['quote_string'].endswith('}')
assert b['nest']['quote_dict'] == {}
assert b['nest']['quote_string'].startswith('{')
assert b['nest']['quote_string'].endswith('}')
assert b['exist_env'] != '$PATH'
assert b['non_exist_env'] == '$JINA_WHATEVER_ENV'
def test_yaml_expand2():
with open(os.path.join(cur_dir, 'yaml/test-expand2.yml')) as fp:
a = JAML.load(fp)
os.environ['ENV1'] = 'a'
b = expand_dict(a)
assert b['components'][0]['metas']['bad_var'] == 'real-compound'
assert b['components'][1]['metas']['bad_var'] == 2
assert b['components'][1]['metas']['float_var'] == 0.232
assert b['components'][1]['metas']['mixed'] == '0.232-2-real-compound'
assert b['components'][1]['metas']['mixed_env'] == '0.232-a'
assert b['components'][1]['metas']['name_shortcut'] == 'test_numpy'
def test_yaml_expand3():
with open(os.path.join(cur_dir, 'yaml/test-expand3.yml')) as fp:
a = JAML.load(fp)
b = expand_dict(a)
assert b['max_snapshot'] == 0
def test_yaml_expand4():
os.environ['ENV1'] = 'a'
os.environ['ENV2'] = '{"1": "2"}'
with open(os.path.join(cur_dir, 'yaml/test-expand4.yml')) as fp:
b = JAML.load(
fp,
substitute=True,
context={'context_var': 3.14, 'context_var2': 'hello-world'},
)
assert b['components'][0]['metas']['bad_var'] == 'real-compound'
assert b['components'][1]['metas']['bad_var'] == 2
assert b['components'][1]['metas']['float_var'] == 0.232
assert b['components'][1]['metas']['mixed'] == '0.232-2-real-compound'
assert b['components'][1]['metas']['name_shortcut'] == 'test_numpy'
assert b['components'][1]['metas']['mixed_env'] == '0.232-a'
assert b['components'][1]['metas']['random_id'] == 3.14
assert b['components'][1]['metas']['config_str'] == 'hello-world'
assert b['components'][1]['metas']['bracket_env'] == '{"1": "2"}'
assert b['components'][1]['metas']['bracket_env'] == '{"1": "2"}'
assert b['components'][1]['metas']['context_dot'] == 3.14
def test_attr_dict():
class AttrDict:
pass
a = AttrDict()
a.__dict__['sda'] = 1
assert a.sda == 1
a.__dict__['components'] = list()
assert isinstance(a.components, list)
def test_class_yaml():
class DummyClass:
pass
JAML.register(DummyClass)
a = JAML.load('!DummyClass {}')
assert type(a) == DummyClass
def test_load_external_fail():
with pytest.raises(yaml.constructor.ConstructorError):
BaseExecutor.load_config('yaml/dummy_ext_exec.yml')
def test_load_external_success():
with BaseExecutor.load_config('yaml/dummy_ext_exec_success.yml') as e:
assert e.__class__.__name__ == 'DummyExternalIndexer'
def test_expand_env():
assert expand_env_var('$PATH-${AA}') != '$PATH-${AA}'
def test_encoder_name_env_replace():
os.environ['BE_TEST_NAME'] = 'hello123'
with BaseExecutor.load_config('yaml/test-encoder-env.yml') as be:
assert be.metas.name == 'hello123'
def test_encoder_name_dict_replace():
d = {'BE_TEST_NAME': 'hello123'}
with BaseExecutor.load_config('yaml/test-encoder-env.yml', context=d) as be:
assert be.metas.name == 'hello123'
assert be.metas.workspace == 'hello123'
def test_encoder_inject_config_via_kwargs():
with BaseExecutor.load_config(
'yaml/test-encoder-env.yml', metas={'shard_id': 345}
) as be:
assert be.metas.shard_id == 345
def test_load_from_dict():
# !BaseEncoder
# metas:
# name: ${{BE_TEST_NAME}}
# batch_size: ${{BATCH_SIZE}}
# pod_id: ${{pod_id}}
# workspace: ${{this.name}}-${{this.batch_size}}
d1 = {
'jtype': __default_executor__,
'metas': {
'name': '${{ BE_TEST_NAME }}',
'workspace': '${{this.name}}',
},
}
# !CompoundExecutor
# components:
# - !BinaryPbIndexer
# with:
# index_filename: tmp1
# metas:
# name: test1
# - !BinaryPbIndexer
# with:
# index_filename: tmp2
# metas:
# name: test2
# metas:
# name: compound1
d = {'BE_TEST_NAME': 'hello123'}
b1 = BaseExecutor.load_config(d1, context=d)
assert isinstance(b1, BaseExecutor)
assert b1.metas.name == 'hello123'
|
<filename>src/pudl/convert/censusdp1tract_to_sqlite.py
"""
Convert the US Census DP1 ESRI GeoDatabase into an SQLite Database.
This is a thin wrapper around the GDAL ogr2ogr command line tool. We use it
to convert the Census DP1 data which is distributed as an ESRI GeoDB into an
SQLite DB. The module provides ogr2ogr with the Census DP 1 data from the
PUDL datastore, and directs it to be output into the user's SQLite directory
alongside our other SQLite Databases (ferc1.sqlite and pudl.sqlite)
Note that the ogr2ogr command line utility must be available on the user's
system for this to work. This tool is part of the ``pudl-dev`` conda
environment, but if you are using PUDL outside of the conda environment, you
will need to install ogr2ogr separately. On Debian Linux based systems such
as Ubuntu it can be installed with ``sudo apt-get install gdal-bin`` (which
is what we do in our CI setup and Docker images.)
"""
import argparse
import logging
import os
import subprocess # nosec: B404
import sys
from pathlib import Path
from tempfile import TemporaryDirectory
import coloredlogs
import pudl
from pudl.workspace.datastore import Datastore
logger = logging.getLogger(__name__)
def censusdp1tract_to_sqlite(pudl_settings=None, year=2010):
"""
Use GDAL's ogr2ogr utility to convert the Census DP1 GeoDB to an SQLite DB.
The Census DP1 GeoDB is read from the datastore, where it is stored as a
zipped archive. This archive is unzipped into a temporary directory so
that ogr2ogr can operate on the ESRI GeoDB, and convert it to SQLite. The
resulting SQLite DB file is put in the PUDL output directory alongside the
ferc1 and pudl SQLite databases.
Args:
pudl_settings (dict): A PUDL settings dictionary.
year (int): Year of Census data to extract (currently must be 2010)
Returns:
None
"""
if pudl_settings is None:
pudl_settings = pudl.workspace.setup.get_defaults()
ds = Datastore(local_cache_path=pudl_settings["data_dir"])
# If we're in a conda environment, use the version of ogr2ogr that has been
# installed by conda. Otherwise, try and use a system installed version
# at /usr/bin/ogr2ogr This allows us to avoid simply running whatever
# program happens to be in the user's path and named ogr2ogr. This is a
# fragile solution that will not work on all platforms, but should cover
# conda environments, Docker, and continuous integration on GitHub.
ogr2ogr = os.environ.get("CONDA_PREFIX", "/usr") + "/bin/ogr2ogr"
# Extract the sippzed GeoDB archive from the Datastore into a temporary
# directory so that ogr2ogr can operate on it. Output the resulting SQLite
# database into the user's PUDL workspace. We do not need to keep the
# unzipped GeoDB around after this conversion. Using a temporary directory
# makes the cleanup automatic.
with TemporaryDirectory() as tmpdir:
# Use datastore to grab the Census DP1 zipfile
tmpdir_path = Path(tmpdir)
zip_ref = ds.get_zipfile_resource("censusdp1tract", year=year)
extract_root = tmpdir_path / Path(zip_ref.filelist[0].filename)
out_path = Path(pudl_settings["sqlite_dir"]) / "censusdp1tract.sqlite"
logger.info("Extracting the Census DP1 GeoDB to %s", out_path)
zip_ref.extractall(tmpdir_path)
logger.info("extract_root = %s", extract_root)
logger.info("out_path = %s", out_path)
subprocess.run( # nosec: B603 Trying to use absolute paths.
[ogr2ogr, str(out_path), str(extract_root)],
check=True
)
def parse_command_line(argv):
"""
Parse command line arguments. See the -h option.
Args:
argv (str): Command line arguments, including caller filename.
Returns:
dict: Dictionary of command line arguments and their parsed values.
"""
parser = argparse.ArgumentParser(description=__doc__)
arguments = parser.parse_args(argv[1:])
return arguments
def main():
"""Convert the Census DP1 GeoDatabase into an SQLite Database."""
log_format = '%(asctime)s [%(levelname)8s] %(name)s:%(lineno)s %(message)s'
coloredlogs.install(fmt=log_format, level='INFO', logger=logger)
# Currently have no arguments, but want to generate a usage message.
_ = parse_command_line(sys.argv)
censusdp1tract_to_sqlite()
if __name__ == '__main__':
sys.exit(main())
|
<filename>common/candle/__init__.py<gh_stars>1-10
from __future__ import absolute_import
#__version__ = '0.0.0'
#import from data_utils
from data_utils import load_csv_data
from data_utils import load_Xy_one_hot_data2
from data_utils import load_Xy_data_noheader
from data_utils import drop_impute_and_scale_dataframe
from data_utils import discretize_dataframe
from data_utils import discretize_array
from data_utils import lookup
#import from file_utils
from file_utils import get_file
#import from default_utils
from default_utils import ArgumentStruct
from default_utils import Benchmark
from default_utils import str2bool
from default_utils import finalize_parameters
from default_utils import fetch_file
from default_utils import verify_path
from default_utils import keras_default_config
from default_utils import set_up_logger
from default_utils import check_flag_conflicts
from generic_utils import Progbar
# import from viz_utils
from viz_utils import plot_history
from viz_utils import plot_scatter
from viz_utils import plot_density_observed_vs_predicted
from viz_utils import plot_2d_density_sigma_vs_error
from viz_utils import plot_histogram_error_per_sigma
from viz_utils import plot_calibration_and_errors
from viz_utils import plot_percentile_predictions
# import from uq_utils
from uq_utils import compute_statistics_homoscedastic
from uq_utils import compute_statistics_homoscedastic_all
from uq_utils import compute_statistics_heteroscedastic
from uq_utils import compute_statistics_quantile
from uq_utils import split_data_for_empirical_calibration
from uq_utils import compute_empirical_calibration
from uq_utils import bining_for_calibration
from uq_utils import computation_of_valid_calibration_interval
from uq_utils import applying_calibration
from uq_utils import overprediction_check
from uq_utils import generate_index_distribution
# import from profiling_utils
from profiling_utils import start_profiling
from profiling_utils import stop_profiling
# import from data_preprocessing_utils
from data_preprocessing_utils import quantile_normalization
from data_preprocessing_utils import generate_cross_validation_partition
# feature selection
from feature_selection_utils import select_features_by_missing_values
from feature_selection_utils import select_features_by_variation
from feature_selection_utils import select_decorrelated_features
# P1-specific
from P1_utils import coxen_single_drug_gene_selection
from P1_utils import coxen_multi_drug_gene_selection
from P1_utils import generate_gene_set_data
from P1_utils import combat_batch_effect_removal
# import benchmark-dependent utils
import sys
if 'keras' in sys.modules:
print ('Importing candle utils for keras')
#import from keras_utils
#from keras_utils import dense
#from keras_utils import add_dense
from keras_utils import build_initializer
from keras_utils import build_optimizer
from keras_utils import get_function
from keras_utils import set_seed
from keras_utils import set_parallelism_threads
from keras_utils import PermanentDropout
from keras_utils import register_permanent_dropout
from keras_utils import LoggingCallback
from keras_utils import MultiGPUCheckpoint
from keras_utils import r2
from keras_utils import mae
from keras_utils import mse
from viz_utils import plot_metrics
from solr_keras import CandleRemoteMonitor
from solr_keras import compute_trainable_params
from solr_keras import TerminateOnTimeOut
from uq_keras_utils import abstention_variable_initialization
from uq_keras_utils import abstention_loss
from uq_keras_utils import abs_acc
from uq_keras_utils import acc_class1
from uq_keras_utils import abs_acc_class1
from uq_keras_utils import modify_labels
from uq_keras_utils import add_model_output
from uq_keras_utils import AbstentionAdapt_Callback
from clr_keras_utils import CyclicLR
from clr_keras_utils import clr_set_args
from clr_keras_utils import clr_callback
elif 'torch' in sys.modules:
print ('Importing candle utils for pytorch')
from pytorch_utils import set_seed
from pytorch_utils import build_optimizer
from pytorch_utils import build_activation
from pytorch_utils import get_function
from pytorch_utils import initialize
from pytorch_utils import xent
from pytorch_utils import mse
from pytorch_utils import set_parallelism_threads # for compatibility
else:
raise Exception('No backend has been specified.')
|
import numpy
import random
import matplotlib.pyplot as plt
from source.matplotlib_player import Player
import sys
from PyQt5.QtWidgets import QMessageBox, QApplication
class Cell:
def __init__(self):
self.state = 0
class GameOfLife:
def __init__(self, turns=10, dimensions=(16, 16),
first_seed=None, second_seed=None,
show_plot=True, forward=True, speed=5):
plt.rcParams['toolbar'] = 'None'
self.dim_x = dimensions[0]
self.dim_y = dimensions[1]
self.old_board = numpy.zeros((self.dim_x, self.dim_y), dtype='int')
state = self.state_from_file(second_seed)
reversed_state = []
for line in reversed(state):
reversed_state.append(list(reversed(line)))
state = self.state_from_file(first_seed)
for i in range(0, 4):
for j in range(0, 4):
self.old_board[i+2][j+2] = state[i][j]
self.old_board[i+2+8][j+2+8] = -reversed_state[i][j]
for i in range(0, 4):
for j in range(0, 4):
self.old_board[i+2+8][j+2+8] = -reversed_state[i][j]
self.new_board = self.old_board.copy()
self.mat = None
self.fig = None
self.ax = None
self.turns = turns
self.speed = speed
self.winner = 0
self.game_resolved = False
self.gen = 0
self.prev_states = []
self.prev_states.append(self.new_board.copy())
self.forward = forward
self.show_plot = show_plot
def init_random_state(self):
for i in range(0, self.dim_y):
for j in range(0, self.dim_x):
rand_state = random.randint(0, 100)
if rand_state < 15:
self.old_board[i][j] = 1
elif rand_state < 30:
self.old_board[i][j] = -1
else:
self.old_board[i][j] = 0
def state_from_file(self, filename):
board_state = []
with open(filename, 'r') as f:
state = f.read()
state = state.split('\n')
prev_line_len = len(state[0])
i = 0
for line in state:
if len(line) != prev_line_len:
raise RuntimeError('Lines not of same length')
prev_line_len = len(line)
board_state.append([])
for char in line:
if char == '.':
board_state[i].append(0)
else:
board_state[i].append(1)
i += 1
return board_state
def neighbours_state(self, i, j):
count = 0
red_count = 0
blue_count = 0
for y in [i-1, i, i+1]:
for x in [j-1, j, j+1]:
if x == j and y == i:
continue
if x != self.dim_x and y != self.dim_y:
cell = self.old_board[y][x]
elif x == self.dim_x and y != self.dim_y:
cell = self.old_board[y][0]
elif x != self.dim_x and y == self.dim_y:
cell = self.old_board[0][x]
else:
cell = self.old_board[0][0]
if cell == -1:
count += 1
blue_count += 1
elif cell == 1:
count += 1
red_count += 1
pass
return count, red_count, blue_count
def new_board_state(self):
for i in range(0, self.dim_y):
for j in range(0, self.dim_x):
count, red_count, blue_count = self.neighbours_state(i, j)
if self.old_board[i][j]:
if count < 2:
is_alive = False
elif count > 3:
is_alive = False
else:
is_alive = True
else:
if count == 3:
is_alive = True
else:
is_alive = False
if is_alive and red_count > blue_count:
self.new_board[i][j] = 1
elif is_alive and blue_count > red_count:
self.new_board[i][j] = -1
elif is_alive:
self.new_board[i][j] = self.old_board[i][j]
else:
self.new_board[i][j] = 0
def update(self, gen):
if gen == 0:
gen = 1
try:
self.new_board = self.prev_states[gen]
self.gen = gen
except IndexError:
if gen > self.gen:
if self.gen > 0:
self.new_board_state()
self.prev_states.append(self.new_board.copy())
self.gen = gen
elif gen == self.gen or gen == self.gen - 1:
if self.gen > 0:
self.new_board_state()
self.prev_states.append(self.new_board.copy())
# self.gen = gen
elif self.gen == 0:
self.prev_states.append(self.new_board.copy())
else:
raise RuntimeError('Problem with generations! '
'self={}, gen={}'.format(self.gen, gen))
self.mat.set_data(self.new_board)
unique, counts = numpy.unique(self.new_board, return_counts=True)
cells = dict(zip(unique, counts))
try:
self.red = cells[1]
except KeyError:
self.red = 0
try:
self.blue = cells[-1]
except KeyError:
self.blue = 0
self.ax.set_title('Gen: {} '
'Red: {} '
'Blue: {}'.format(self.gen, self.red, self.blue))
self.old_board = self.new_board.copy()
return [self.mat]
def play(self):
plt.style.use('classic')
self.fig, self.ax = plt.subplots()
self.mat = self.ax.matshow(self.old_board)
cbaxes = self.fig.add_axes([0.4, 0.6, 0.4, 0.6])
cbar = plt.colorbar(self.mat, cax=cbaxes)
cbar.set_clim(vmin=-1, vmax=1)
self.fig.delaxes(self.fig.axes[1])
ani = Player(self.fig, self.update, max=self.turns+1,
interval=int(250/self.speed), save_count=self.turns+1)
if self.show_plot:
plt.show()
else:
ani.save('animation.gif', writer='imagemagick', fps=12)
if __name__ == '__main__':
import time
start_time = time.time()
game = GameOfLife(32,
first_seed='../seeds/second.txt',
second_seed='../seeds/first.txt',
forward=True, speed=9)
game.play()
# print('----', round(time.time() - start_time, 6), '----')
|
#!/usr/bin/python3
'''
script for calculating daily usage of each ingredient used in food based on recipes it also combines the sales and staff
meals files into one
'''
import os
import json
from openpyxl import load_workbook, Workbook
def calculate_usage(row):
global recipes, ingredient_names
# get category, stock code and unit sales
category = row[1].value
# convert float to string so that it can be used as a key in dictionary
stock_code = str(row[5].value).split('.')[0]
# get the unit sales value
unit_sales = row[6].value
# make current row a list so that we can append to it
new_row = list(row)
# for each ingredient we know of
for i in range(len(ingredient_names)):
# calculate value if it is a food item and current ingredient is in the recipe for the item
if category == 'Food' and stock_code in recipes and ingredient_names[i] in recipes[stock_code]:
value = recipes[stock_code][ingredient_names[i]] * unit_sales
else:
value = 0
# append value to current row
new_row.append(value)
return new_row
def append_staff_meal(staff_rows_current, staff_rows_count, staff_rows, ws_out):
for i in range(staff_rows_current, staff_rows_count):
row = calculate_usage(staff_rows[i])
ws_out.append(row)
date_current = staff_rows[i][0].value
try:
date_next = staff_rows[i+1][0].value
except IndexError:
break
if date_current != date_next:
return i + 1
def main():
# first argument the path for file to be processed
dir_path = '..' + os.sep + '..' + os.sep + 'data' + os.sep + 'peData' + os.sep
# daily sales path
daily_sales_path = dir_path + 'daily_product_sales.xlsx'
# staff meals path
staff_meals_path = dir_path + 'daily_staff_meals.xlsx'
# output file path
out_file_path = dir_path + 'daily_ingredient_usage.xlsx'
# recipes json path
recipes_path = dir_path + 'list_recipes.json'
# load workbooks and work sheets
wb_sales = load_workbook(daily_sales_path)
ws_sales = wb_sales.get_active_sheet()
wb_staff = load_workbook(staff_meals_path)
ws_staff = wb_staff.get_active_sheet()
# create a new workbook to save
wb_out = Workbook()
ws_out = wb_out.get_active_sheet()
# load recipes json
global recipes
with open(recipes_path, encoding='utf-8') as data_file:
recipes = json.load(data_file)
# create a list of ingredient names
global ingredient_names
ingredient_names = []
for key, value in recipes.items():
for k, v in value.items():
if not k in ingredient_names: ingredient_names.append(k)
# get rows for each sheet
sales_rows = ws_sales.rows
sales_rows_count = len(sales_rows)
staff_rows = ws_staff.rows
staff_rows_count = len(staff_rows)
# copy over header
ws_out.append(sales_rows[0])
# add ingredient names to header
for i in range(len(ingredient_names)):
ws_out.cell(row = 1, column = ws_out.get_highest_column() + 1).value = ingredient_names[i]
staff_rows_current = 1
# calculate usage for each row from daily product sales and append it to new worksheet
for i in range(1, sales_rows_count):
row = calculate_usage(sales_rows[i])
ws_out.append(row)
date_current = sales_rows[i][0].value
try:
date_next = sales_rows[i+1][0].value
except IndexError:
append_staff_meal(staff_rows_current, staff_rows_count, staff_rows, ws_out)
break
if date_current != date_next:
staff_rows_current = append_staff_meal(staff_rows_current, staff_rows_count, staff_rows, ws_out)
# save final workbook
wb_out.save(out_file_path)
if __name__ == '__main__':
main() |
<filename>bin/py/SecretsManagerLambda.py
from __future__ import print_function
from botocore.exceptions import ClientError
import boto3
import json
import logging
from urllib.request import urlopen, Request, HTTPError, URLError
from urllib.parse import urlencode
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
# Initialize secret_result dict
secret_result = {}
# Capture SecretPassword from event object into secret_pwd var
# And mask SecretPassword before logging to Cloudwatch!!
secret_pwd = capture_and_mask_password(event)
# Log event data to Cloudwatch
logger.info("***** Received event. Full parameters: {}".format(json.dumps(event)))
# Initialize the CloudFormation response dict
response = {
"StackId": event["StackId"],
"RequestId": event["RequestId"],
"LogicalResourceId": event["LogicalResourceId"],
"Status": "SUCCESS",
"NoEcho": True,
"Data": {}
}
# Assign the physical resource id
response['PhysicalResourceId'] = physical_resource_id(event)
# Only execute AWS Secrets Manager actions on CloudFormation Create requests
if event['RequestType'] == 'Create':
logger.info('***** This is a Cloudwatch Create request - Evaluating specified SecretAction... *****')
if event['ResourceProperties']['SecretAction'] == 'get':
secret_result,response = get_secret_password(event=event, response=response)
else:
secret_result,response = create_or_update_secret(event=event, response=response, secret_pwd=secret_pwd)
else:
logger.info('***** This is not a CloudFormation Create request - No AWS Secrets Manager actions performed. *****')
# Construct and send a response to CloudFormation
respond_to_cloudformation(event=event, response=response)
return secret_result
# If this is an upsert action and a SecretPassword was provided,
# capture it from the event object and then mask it
def capture_and_mask_password(event):
if event['ResourceProperties']['SecretAction'] == 'upsert':
try:
secret_pwd = event['ResourceProperties']['SecretPassword']
event['ResourceProperties']['SecretPassword'] = '********'
return secret_pwd
except Exception as e:
event['ResourceProperties']['SecretPassword'] = '<PASSWORD>'
return 'generate'
# Return the event object physical_resource_id
def physical_resource_id(event):
if event.get('PhysicalResourceId', False):
return event['PhysicalResourceId']
return event['LogicalResourceId'] + '-12345'
# Generate and return a random string for a password
# Uses straight defaults for get_random_password method
def generate_secret_pwd(region_name):
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name,
)
secret_pwd = client.get_random_password()
secret_pwd = secret_pwd['<PASSWORD>']
return secret_pwd
# Wrapper function that implements the `get` action
# Calls the get_secret fucntion to retrieve the password for a given SecretName
def get_secret_password(event, response):
secret_name = event['ResourceProperties']['SecretName']
region_name = event['ResourceProperties']['Region']
logger.info('***** SecretAction is `get` - Getting value for secret: %s *****' % (secret_name))
secret_result = get_secret(secret_name=secret_name, region_name=region_name)
if secret_result.get("Error", False):
response['Status'] = "FAILED"
response['Reason'] = secret_result['Error']['Message']
else:
logger.info('***** Value for secret %s successfully retrieved *****' % (secret_name))
secret_string_json = json.loads(secret_result['SecretString'])
response['PhysicalResourceId'] = secret_result['ARN']
response['Data']['SecretPassword'] = secret_string_json['password']
return json.dumps(secret_result, indent=4, sort_keys=True, default=str),response
# Calls the get_secret_value method to retrieve the password for a given SecretName
def get_secret(secret_name, region_name):
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name,
)
try:
get_secret_response = client.get_secret_value(SecretId=secret_name)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
logger.error('>>>>> The specified secret cannot be found - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'DecryptionFailure':
logger.error('>>>>> The requested secret cannot be decrypted - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'InvalidRequestException':
logger.error('>>>>> The request was invalid - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'InvalidParameterException':
logger.error('>>>>> The request had invalid parameters - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'InternalServiceError':
logger.error('>>>>> An error occurred on the server side - the full error message is: %s <<<<<' % e)
return e.response
return get_secret_response
# Wrapper funcion that implements the `upsert` action
# Calls the generate_secret_pwd method to generate a random string for the password
# Calls the upsert_secret function to create or update the requested SecretName
def create_or_update_secret(event, response, secret_pwd):
secret_name = event['ResourceProperties']['SecretName']
region_name = event['ResourceProperties']['Region']
if secret_pwd == 'generate':
logger.info('***** SecretAction is `upsert` - Creating or updating secret %s with randomly generated password *****' % (secret_name))
secret_pwd = generate_secret_pwd(region_name=region_name)
response['Data']['SecretPassword'] = secret_pwd
else:
logger.info('***** SecretAction is `upsert` - Creating or updating secret: %s with provided password *****' % (secret_name))
secret_result = upsert_secret(event=event, secret_pwd=secret_pwd)
if secret_result.get('Error', False):
response['Status'] = "FAILED"
response['Reason'] = secret_result['Error']['Message']
else:
response['PhysicalResourceId'] = secret_result['ARN']
return secret_result,response
# Calls the create_secret method to create the requested SecretName, or
# calls the put_secret_value method to update the requested SecretName
def upsert_secret(event, secret_pwd):
region_name = event['ResourceProperties']['Region']
secret_username = event['ResourceProperties']['SecretUserName']
secret_desc = event['ResourceProperties']['SecretDescription']
secret_name = event['ResourceProperties']['SecretName']
secret_string = json.dumps({'username':secret_username,'password':secret_pwd})
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name,
)
try:
upsert_secret_response = client.create_secret(
Name=secret_name,
Description=secret_desc,
SecretString=secret_string
)
logger.info('***** The requested secret %s has been successfully created *****' % secret_name)
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceExistsException':
try:
put_secret_value_response = client.put_secret_value(
SecretId=secret_name,
SecretString=secret_string
)
logger.info('***** The requested secret %s has been successfully updated *****' % secret_name)
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidRequestException':
logger.error('>>>>> The request was invalid - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'InvalidParameterException':
logger.error('>>>>> The request had invalid parameters - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'EncryptionFailure':
logger.error('>>>>> The requested secret cannot be encrypted - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'InternalServiceError':
logger.error('>>>>> An error occurred on the server side - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'LimitExceededException':
logger.error('>>>>> The request exceeds Secrets Manager internal limits - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'MalformedPolicyDocumentException':
logger.error('>>>>> The policy provided is invalid - the full error message is: %s <<<<<' % e)
return e.response
return put_secret_value_response
if e.response['Error']['Code'] == 'InvalidRequestException':
logger.error('>>>>> The request was invalid - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'InvalidParameterException':
logger.error('>>>>> The request had invalid parameters - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'EncryptionFailure':
logger.error('>>>>> The requested secret cannot be encrypted - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'InternalServiceError':
logger.error('>>>>> An error occurred on the server side - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'LimitExceededException':
logger.error('>>>>> The request exceeds Secrets Manager internal limits - the full error message is: %s <<<<<' % e)
return e.response
elif e.response['Error']['Code'] == 'MalformedPolicyDocumentException':
logger.error('>>>>> The policy provided is invalid - the full error message is: %s <<<<<' % e)
return e.response
return upsert_secret_response
# Serialize, encode, and post the response object to CloudFormation
def respond_to_cloudformation(event, response):
serialized = json.dumps(response)
req_data = serialized.encode('utf-8')
## Mask the password before logging out the CloudFormation response
response['Data']['SecretPassword'] = '********'
serialized = json.dumps(response)
logger.info("***** Responding to CloudFormation with: %s *****" % (serialized))
req = Request(
event['ResponseURL'],
data=req_data,
headers={'Content-Length': len(req_data),
'Content-Type': ''}
)
req.get_method = lambda: 'PUT'
try:
urlopen(req)
logger.info('***** Request to CFN API succeeded, nothing to do here *****')
except HTTPError as e:
logger.error('>>>>> Callback to CFN API failed with status %d <<<<<' % e.code)
logger.error('>>>>> Response: %s' % e.reason)
except URLError as e:
logger.error('>>>>> Failed to reach the server - %s <<<<<' % e.reason)
|
<reponame>pincoin/rakmai<filename>member/forms2.py
import json
import urllib
from allauth.account.forms import (
LoginForm, ResetPasswordForm, ResetPasswordKeyForm, AddEmailForm, ChangePasswordForm, SetPasswordForm
)
from crispy_forms.bootstrap import PrependedText
from crispy_forms.helper import (
FormHelper, Layout
)
from crispy_forms.layout import (
Fieldset, ButtonHolder, Submit, HTML, Field
)
from django import forms
from django.conf import settings
from django.urls import reverse
from django.utils.timezone import (
now, timedelta
)
from django.utils.translation import ugettext_lazy as _
from shop.models import Order
from . import settings as member_settings
from .models import Profile
from .widgets import DocumentClearableFileInput
"""
NOTE: These form classes in `forms2.py` must be separately due to circular imports
"""
class MemberLoginForm(LoginForm):
def __init__(self, *args, **kwargs):
self.recaptcha = kwargs.pop('recaptcha', None)
super(MemberLoginForm, self).__init__(*args, **kwargs)
# 0.43.0
if 'autofocus' in self.fields['login'].widget.attrs:
del self.fields['login'].widget.attrs['autofocus']
def clean(self):
cleaned_data = super(MemberLoginForm, self).clean()
if self.recaptcha:
captcha_response = self.data.get('g-recaptcha-response')
url = 'https://www.google.com/recaptcha/api/siteverify'
values = {
'secret': settings.GOOGLE_RECAPTCHA['secret_key'],
'response': captcha_response
}
data = urllib.parse.urlencode(values).encode()
req = urllib.request.Request(url, data=data)
captcha_response = urllib.request.urlopen(req)
result = json.loads(captcha_response.read().decode())
if not result['success']:
raise forms.ValidationError(_('Invalid reCAPTCHA. Please try again.'))
if not self.recaptcha \
and self.user and self.user.last_login \
and now() - self.user.last_login > timedelta(days=member_settings.DAYS_LOGIN_RECPATCHA):
raise forms.ValidationError(_("You haven't logged for a while."
.format(member_settings.DAYS_LOGIN_RECPATCHA)))
return cleaned_data
class Media:
js = ('https://www.google.com/recaptcha/api.js',)
class MemberResetPasswordForm(ResetPasswordForm):
def __init__(self, *args, **kwargs):
super(MemberResetPasswordForm, self).__init__(*args, **kwargs)
self.fields['email'].label = False
self.helper = FormHelper()
self.helper.include_media = False
self.helper.form_show_errors = False
self.helper.form_class = 'form'
self.helper.layout = Layout(
Fieldset(
'', # Hide the legend of fieldset (HTML tag)
Field(PrependedText('email', '<i class="fas fa-envelope"></i>',
placeholder=self.fields['email'].widget.attrs['placeholder'])),
HTML('<div class="g-recaptcha" data-sitekey="{}"></div>'.format(settings.GOOGLE_RECAPTCHA['site_key'])),
),
HTML('<hr>'),
ButtonHolder(
Submit('submit', _('Reset My Password'), css_class='btn btn-block btn-lg btn-primary')
),
)
def clean(self):
cleaned_data = super(MemberResetPasswordForm, self).clean()
captcha_response = self.data.get('g-recaptcha-response')
url = 'https://www.google.com/recaptcha/api/siteverify'
values = {
'secret': settings.GOOGLE_RECAPTCHA['secret_key'],
'response': captcha_response
}
data = urllib.parse.urlencode(values).encode()
req = urllib.request.Request(url, data=data)
captcha_response = urllib.request.urlopen(req)
result = json.loads(captcha_response.read().decode())
if not result['success']:
raise forms.ValidationError(_('Invalid reCAPTCHA. Please try again.'))
return cleaned_data
class Media:
js = ('https://www.google.com/recaptcha/api.js',)
class MemberResetPasswordKeyForm(ResetPasswordKeyForm):
def __init__(self, *args, **kwargs):
super(MemberResetPasswordKeyForm, self).__init__(*args, **kwargs)
self.fields['password1'].label = False
self.fields['password2'].label = False
self.helper = FormHelper()
self.helper.include_media = False
self.helper.form_class = 'form'
self.helper.layout = Layout(
Fieldset(
'', # Hide the legend of fieldset (HTML tag)
Field(PrependedText('password1', '<i class="fas fa-key"></i>',
placeholder=self.fields['password1'].widget.attrs['placeholder'])),
Field(PrependedText('password2', '<i class="fas fa-key"></i>',
placeholder=self.fields['password2'].widget.attrs['placeholder'])),
HTML('<hr>'),
),
ButtonHolder(
Submit('submit', _('Change Password'), css_class='btn btn-block btn-lg btn-primary')
),
)
class MemberAddEmailForm(AddEmailForm):
def __init__(self, *args, **kwargs):
super(MemberAddEmailForm, self).__init__(*args, **kwargs)
self.fields['email'].label = False
self.helper = FormHelper()
self.helper.include_media = False
self.helper.form_action = reverse('account_email')
self.helper.form_class = 'form'
self.helper.layout = Layout(
Fieldset(
'', # Hide the legend of fieldset (HTML tag)
Field(PrependedText('email', '<i class="fas fa-envelope"></i>',
placeholder=self.fields['email'].widget.attrs['placeholder'])),
),
ButtonHolder(
# NOTE: Button name must be `action_add`. Otherwise, it does not work.
Submit('action_add', _('Add E-mail'), css_class='btn btn-block btn-lg btn-primary')
),
)
class MemberChangePasswordForm(ChangePasswordForm):
def __init__(self, *args, **kwargs):
super(MemberChangePasswordForm, self).__init__(*args, **kwargs)
self.fields['oldpassword'].label = False
self.fields['password1'].label = False
self.fields['password2'].label = False
self.helper = FormHelper()
self.helper.include_media = False
self.helper.form_class = 'form'
self.helper.layout = Layout(
Fieldset(
'', # Hide the legend of fieldset (HTML tag)
Field(PrependedText('oldpassword', '<i class="fas fa-key"></i>',
placeholder=self.fields['oldpassword'].widget.attrs['placeholder'])),
Field(PrependedText('password1', '<i class="fas fa-key"></i>',
placeholder=self.fields['password1'].widget.attrs['placeholder'])),
Field(PrependedText('password2', '<i class="fas fa-key"></i>',
placeholder=self.fields['password2'].widget.attrs['placeholder'])),
HTML('<hr>'),
),
ButtonHolder(
Submit('submit', _('Change Password'), css_class='btn btn-block btn-lg btn-primary')
),
)
class MemberSetPasswordForm(SetPasswordForm):
def __init__(self, *args, **kwargs):
super(MemberSetPasswordForm, self).__init__(*args, **kwargs)
self.fields['password1'].label = False
self.fields['password2'].label = False
self.helper = FormHelper()
self.helper.include_media = False
self.helper.form_class = 'form'
self.helper.layout = Layout(
Fieldset(
'', # Hide the legend of fieldset (HTML tag)
Field(PrependedText('password1', '<i class="fas fa-key"></i>',
placeholder=self.fields['password1'].widget.attrs['placeholder'])),
Field(PrependedText('password2', '<i class="fas fa-key"></i>',
placeholder=self.fields['password2'].widget.attrs['placeholder'])),
HTML('<hr>'),
),
ButtonHolder(
Submit('submit', _('Set Password'), css_class='btn btn-block btn-lg btn-primary')
),
)
class MemberDocumentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(MemberDocumentForm, self).__init__(*args, **kwargs)
self.fields['photo_id'].label = '(1) {}'.format(_('photo ID'))
self.fields['photo_id'].help_text = _('Max: 4MB')
self.fields['photo_id'].error_messages['contradiction'] = _(
'Please either submit a Photo ID or check the clear checkbox, not both.')
self.fields['card'].label = '(2) {}'.format(_('bank account or debit/credit card'))
self.fields['card'].help_text = _('Max: 4MB')
self.fields['card'].error_messages['contradiction'] = _(
'Please either submit a Card image or check the clear checkbox, not both.')
self.helper = FormHelper()
self.helper.include_media = False
self.helper.form_class = 'form'
self.helper.label_class = 'col-form-label font-weight-bold pb-0'
self.helper.layout = Layout(
Fieldset(
'', # Hide the legend of fieldset (HTML tag)
Field('photo_id', css_class='form-control-file mt-1', wrapper_class='mb-3'),
Field('card', css_class='form-control-file mt-1', wrapper_class='mb-3'),
HTML('<hr my-1 my-md-3>'),
),
ButtonHolder(
Submit('submit', _('Document Submit'), css_class='btn btn-block btn-lg btn-primary')
),
)
class Meta:
model = Profile
fields = [
'photo_id', 'card',
]
widgets = {
'photo_id': DocumentClearableFileInput,
'card': DocumentClearableFileInput,
}
def clean(self):
cleaned_data = super(MemberDocumentForm, self).clean()
order_count = self.instance.user.shop_order_owned.count()
if order_count == 0:
raise forms.ValidationError(_('You have no orders.'))
elif order_count == 1:
orders = self.instance.user.shop_order_owned.all()
for order in orders:
if order.payment_method != Order.PAYMENT_METHOD_CHOICES.paypal \
and order.status != Order.STATUS_CHOICES.shipped:
raise forms.ValidationError(_('You have no orders.'))
return cleaned_data
class MemberUnregisterForm(forms.Form):
agree = forms.BooleanField(
label=_('I really would like to unregister.'),
)
class MemberChangeNameForm(forms.ModelForm):
first_name = forms.CharField(
label=_('first name'),
max_length=30,
widget=forms.TextInput(),
help_text=_('First name'),
)
last_name = forms.CharField(
label=_('last name'),
max_length=30,
widget=forms.TextInput(),
help_text=_('Last name'),
)
def __init__(self, *args, **kwargs):
super(MemberChangeNameForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Fieldset(
'', # Hide the legend of fieldset (HTML tag)
'last_name',
'first_name',
),
HTML('<hr>'),
ButtonHolder(
Submit('submit', _('Change Your Name'), css_class='btn btn-lg btn-block btn-primary')
),
)
self.helper.form_method = 'POST'
class Meta:
model = Profile
fields = [
]
|
<reponame>Thetacz/nautobot-plugin-netbox-importer
"""Extras class definitions for nautobot-netbox-importer.
Note that in most cases the same model classes are used for both NetBox imports and Nautobot exports.
Because this plugin is meant *only* for NetBox-to-Nautobot migration, the create/update/delete methods on these classes
are for populating data into Nautobot only, never the reverse.
"""
# pylint: disable=too-many-ancestors, too-few-public-methods
from datetime import datetime
from typing import Any, List, Optional
from uuid import UUID
from pydantic import Field, validator
from diffsync.exceptions import ObjectNotFound
import structlog
import nautobot.extras.models as extras
from .abstract import (
ChangeLoggedModelMixin,
CustomFieldModelMixin,
NautobotBaseModel,
)
from .references import (
foreign_key_field,
ClusterGroupRef,
ClusterRef,
ContentTypeRef,
CustomFieldRef,
DeviceRoleRef,
PlatformRef,
RegionRef,
SiteRef,
TagRef,
TenantGroupRef,
TenantRef,
UserRef,
)
from .validation import DiffSyncCustomValidationField
logger = structlog.get_logger()
class ConfigContext(ChangeLoggedModelMixin, NautobotBaseModel):
"""A set of arbitrary data available to Devices and VirtualMachines."""
_modelname = "configcontext"
_attributes = (
"name",
"weight",
"description",
"is_active",
"regions",
"sites",
"roles",
"platforms",
"cluster_groups",
"clusters",
"tenant_groups",
"tenants",
"tags",
"data",
)
_nautobot_model = extras.ConfigContext
name: str
weight: int
description: str
is_active: bool
regions: List[RegionRef] = []
sites: List[SiteRef] = []
roles: List[DeviceRoleRef] = []
platforms: List[PlatformRef] = []
cluster_groups: List[ClusterGroupRef] = []
clusters: List[ClusterRef] = []
tenant_groups: List[TenantGroupRef] = []
tenants: List[TenantRef] = []
tags: List[TagRef] = []
data: dict
class CustomField(NautobotBaseModel):
"""Custom field defined on a model(s)."""
_modelname = "customfield"
_attributes = (
"name",
"content_types",
"type",
"label",
"description",
"required",
"filter_logic",
"default",
"weight",
"validation_minimum",
"validation_maximum",
"validation_regex",
)
_nautobot_model = extras.CustomField
name: str
content_types: List[ContentTypeRef] = []
type: str
label: str
description: str
required: bool
filter_logic: str
default: Optional[Any] # any JSON value
weight: int
validation_minimum: Optional[int]
validation_maximum: Optional[int]
validation_regex: str
# Because marking a custom field as "required" doesn't automatically assign a value to pre-existing records,
# we never want, when adding custom fields from NetBox, to flag fields as required=True.
# Instead we store it in "actual_required" and fix it up only afterwards.
actual_required: Optional[bool]
@classmethod
def special_clean(cls, diffsync, ids, attrs):
"""Special-case handling for the "default" attribute."""
if attrs.get("default") and attrs["type"] in ("select", "multiselect"):
# There's a bit of a chicken-and-egg problem here in that we have to create a CustomField
# before we can create any CustomFieldChoice records that reference it, but the "default"
# attribute on the CustomField is only valid if it references an existing CustomFieldChoice.
# So what we have to do is skip over the "default" field if it references a nonexistent CustomFieldChoice.
default = attrs.get("default")
try:
diffsync.get("customfieldchoice", {"field": {"name": attrs["name"]}, "value": default})
except ObjectNotFound:
logger.debug(
"CustomFieldChoice not yet present to set as 'default' for CustomField, will fixup later",
field=attrs["name"],
default=default,
)
del attrs["default"]
class CustomFieldChoice(NautobotBaseModel):
"""One of the valid options for a CustomField of type "select" or "multiselect"."""
_modelname = "customfieldchoice"
# Since these only exist in Nautobot and not in NetBox, we can't match them between the two systems by PK.
_identifiers = ("field", "value")
_attributes = ("weight",)
_nautobot_model = extras.CustomFieldChoice
field: CustomFieldRef
value: str
weight: int = 100
class CustomLink(ChangeLoggedModelMixin, NautobotBaseModel):
"""A custom link to an external representation of a Nautobot object."""
_modelname = "customlink"
_attributes = ("name", "content_type", "text", "target_url", "weight", "group_name", "button_class", "new_window")
_nautobot_model = extras.CustomLink
name: str
content_type: ContentTypeRef
text: str
# Field name is "url" in NetBox, "target_url" in Nautobot
target_url: str = Field(alias="url")
weight: int
group_name: str
button_class: str
new_window: bool
class Config:
"""Pydantic configuration of the CustomLink class."""
# Allow both "url" and "target_url" as property setters
allow_population_by_field_name = True
class ExportTemplate(ChangeLoggedModelMixin, NautobotBaseModel):
"""A Jinja2 template for exporting records as text."""
_modelname = "exporttemplate"
_attributes = ("name", "content_type", "description", "template_code", "mime_type", "file_extension")
_nautobot_model = extras.ExportTemplate
name: str
content_type: ContentTypeRef
description: str
template_code: str
mime_type: str
file_extension: str
class ImageAttachment(NautobotBaseModel):
"""An uploaded image which is associated with an object."""
_modelname = "imageattachment"
_attributes = ("content_type", "object_id", "image", "image_height", "image_width", "name", "created")
_nautobot_model = extras.ImageAttachment
content_type: ContentTypeRef
_object_id = foreign_key_field("*content_type")
object_id: _object_id
image: str
image_height: int
image_width: int
name: str
created: datetime
@validator("image", pre=True)
def imagefieldfile_to_str(cls, value): # pylint: disable=no-self-argument,no-self-use
"""Convert ImageFieldFile objects to strings."""
if hasattr(value, "name"):
value = value.name
return value
class JobResultData(DiffSyncCustomValidationField, dict):
"""Reformat NetBox Script and Report data to the new Nautobot JobResult data format."""
@classmethod
def validate(cls, value):
"""Translate data as needed."""
if isinstance(value, dict):
if "log" in value:
# NetBox custom Script data, transform the result data to the new format
# Old: {"log": [{"status": "success", "message": "..."}, ...], "output": "..."}
# New: {"run": {"log": [(time, status, object, url, message), ...], "output": "...", "total": {...}}}
new_value = {
"run": {"success": 0, "info": 0, "warning": 0, "failure": 0, "log": []},
"total": {"success": 0, "info": 0, "warning": 0, "failure": 0},
"output": value.get("output", ""),
}
for log_entry in value.get("log", []):
new_value["run"]["log"].append((None, log_entry["status"], None, None, log_entry["message"]))
new_value["run"][log_entry["status"]] += 1
new_value["total"][log_entry["status"]] += 1
value = new_value
else:
# Either a Nautobot record (in which case no reformatting needed) or a NetBox Report result
# For the latter, add "output" and "total" keys to the result.
if "total" not in value:
totals = {
"success": 0,
"info": 0,
"warning": 0,
"failure": 0,
}
for test_results in value.values():
for key in ("success", "info", "warning", "failure"):
totals[key] += test_results[key]
value["total"] = totals
if "output" not in value:
value["output"] = ""
return cls(value)
class JobResult(NautobotBaseModel):
"""Results of running a Job / Script / Report."""
_modelname = "jobresult"
_attributes = ("job_id", "name", "obj_type", "completed", "user", "status", "data")
_nautobot_model = extras.JobResult
job_id: UUID
name: str
obj_type: ContentTypeRef
completed: Optional[datetime]
user: Optional[UserRef]
status: str # not a StatusRef!
data: Optional[JobResultData]
created: Optional[datetime] # Not synced
class Status(ChangeLoggedModelMixin, NautobotBaseModel):
"""Representation of a status value."""
_modelname = "status"
_attributes = ("slug", "name", "color", "description") # TODO content_types?
_nautobot_model = extras.Status
slug: str
name: str
color: str
description: str
content_types: List = []
class Tag(ChangeLoggedModelMixin, CustomFieldModelMixin, NautobotBaseModel):
"""A tag that can be associated with various objects."""
_modelname = "tag"
_attributes = (*CustomFieldModelMixin._attributes, "name", "slug", "color", "description")
_nautobot_model = extras.Tag
name: str
slug: str
color: str
description: str
class TaggedItem(NautobotBaseModel):
"""Mapping between a record and a Tag."""
_modelname = "taggeditem"
_attributes = ("content_type", "object_id", "tag")
_nautobot_model = extras.TaggedItem
content_type: ContentTypeRef
_object_id = foreign_key_field("*content_type")
object_id: _object_id
tag: TagRef
class Webhook(ChangeLoggedModelMixin, NautobotBaseModel):
"""A Webhook defines a request that will be sent to a remote application."""
_modelname = "webhook"
_attributes = (
"name",
"content_types",
"type_create",
"type_update",
"type_delete",
"payload_url",
"enabled",
"http_method",
"http_content_type",
"additional_headers",
"body_template",
"secret",
"ssl_verification",
"ca_file_path",
)
_nautobot_model = extras.Webhook
name: str
content_types: List[ContentTypeRef] = []
type_create: bool
type_update: bool
type_delete: bool
payload_url: str
enabled: bool
http_method: str
http_content_type: str
additional_headers: str
body_template: str
secret: str
ssl_verification: bool
ca_file_path: Optional[str]
|
<reponame>nfriedri/debie-backend-1
import json
from flask import jsonify
import JSONFormatter
import calculation
from bias_evaluation import weat, ect, k_means, bat
import logging
# Computes bias evaluation methods for a bias specification
def return_bias_evaluation(methods, arguments, content):
logging.info("APP-BE: Forwarding to related definitions")
database = 'fasttext'
if 'space' in arguments.keys():
database = arguments['space']
vector_flag = 'false'
if 'vectors' in arguments.keys():
vector_flag = arguments['vectors']
database = 'self-defined'
if vector_flag == 'false':
target1, target2, arg1, arg2 = JSONFormatter.retrieve_vectors_evaluation(content, database)
else:
target1, target2, arg1, arg2 = JSONFormatter.retrieve_vectors_from_json_evaluation(content)
target1, target2 = calculation.check_sizes(target1, target2)
arg1, arg2 = calculation.check_sizes(arg1, arg2)
if len(target1) == 0 or len(target2) == 0:
logging.info("APP: Stopped, no values found in database")
return jsonify(message="ERROR: No values found in database."), 404
if len(arg1) == 0 and len(arg2) == 0 and methods != 'kmeans':
return jsonify(message="No attribute sets provided, only k means++ is executable"), 400
logging.info("APP: Final retrieved set sizes: T1=" + str(len(target1)) + " T2=" + str(len(target2)) + " A1=" + str(
len(arg1)) + " A2=" + str(len(arg2)))
logging.info("APP: Evaluation process started")
if methods is None:
return return_eval_all(target1, target2, arg1, arg2, database)
if methods == 'all':
return return_eval_all(target1, target2, arg1, arg2, database)
if methods == 'ect':
return return_eval_ect(target1, target2, arg1, arg2, database)
if methods == 'bat':
return return_eval_bat(target1, target2, arg1, arg2, database)
if methods == 'weat':
return return_eval_weat(target1, target2, arg1, arg2, database)
if methods == 'kmeans':
return return_eval_kmeans(target1, target2, database)
return 400
# Evaluates the specification with all methods
def return_eval_all(target_vectors1, target_vectors2, attr_vectors1, attr_vectors2, database):
logging.info("APP-BE: Starting all evaluations")
try:
arg_vecs = calculation.concatenate_dicts(calculation.create_duplicates(attr_vectors1),
calculation.create_duplicates(attr_vectors2))
ect_value, p_value = ect.embedding_coherence_test(target_vectors1, target_vectors2, arg_vecs)
ect_value1, p_value1 = ect.embedding_coherence_test(target_vectors1, target_vectors2, attr_vectors1)
ect_value2, p_value2 = ect.embedding_coherence_test(target_vectors1, target_vectors2, attr_vectors2)
bat_result = bat.bias_analogy_test(target_vectors1, target_vectors2, attr_vectors1, attr_vectors2)
# bat_result = 'Currently not available'
weat_effect_size, weat_p_value = weat.word_embedding_association_test(target_vectors1, target_vectors2,
attr_vectors1,
attr_vectors2)
kmeans = k_means.k_means_clustering(target_vectors1, target_vectors2)
logging.info("APP-BE: Evaluations finished successfully")
response = json.dumps(
{"EmbeddingSpace": database,
"EvaluationMethods": "all",
"EctValue": ect_value, "EctPValue": p_value,
"EctValue1": ect_value1, "EctPValue1": p_value1,
"EctValue2": ect_value2, "EctPValue2": p_value2,
"BatValue": bat_result,
"WeatEffectSize": weat_effect_size, "WeatPValue": weat_p_value,
"KmeansValue": kmeans,
"T1": JSONFormatter.dict_keys_to_string(target_vectors1),
"T2": JSONFormatter.dict_keys_to_string(target_vectors2),
"A1": JSONFormatter.dict_keys_to_string(attr_vectors1),
"A2": JSONFormatter.dict_keys_to_string(attr_vectors2)
})
# response = jsonify(ect_value1=ect_value1, p_value1=p_value1, p_value2=p_value2, ect_value2=ect_value2,
# bat_result=bat_result, weat_effect_size=weat_effect_size, weat_pvalue=weat_p_value,
# k_means=kmeans)
logging.info("APP-BE: Results: " + str(response))
return response
except RuntimeWarning as rw:
print(rw)
return jsonify(message="Internal Calculation Error")
# Evaluates the specifications with ECT
def return_eval_ect(target_vectors1, target_vectors2, attr_vectors1, attr_vectors2, database):
logging.info("APP-BE: Starting ECT evaluation")
arg_vecs = calculation.concatenate_dicts(calculation.create_duplicates(attr_vectors1),
calculation.create_duplicates(attr_vectors2))
ect_value, p_value = ect.embedding_coherence_test(target_vectors1, target_vectors2, arg_vecs)
ect_value1, p_value1 = ect.embedding_coherence_test(target_vectors1, target_vectors2, attr_vectors1)
ect_value2, p_value2 = ect.embedding_coherence_test(target_vectors1, target_vectors2, attr_vectors2)
logging.info("APP-BE: ECT finished successfully")
response = json.dumps(
{"EmbeddingSpace": database, "EvaluationMethods": "all",
"EctValue": ect_value, "EctPValue": p_value,
"EctValue1": ect_value1, "EctPValue1": p_value1,
"EctValue2": ect_value2, "EctPValue2": p_value2,
"T1": JSONFormatter.dict_to_json(target_vectors1),
"T2": JSONFormatter.dict_to_json(target_vectors2),
"A1": JSONFormatter.dict_to_json(attr_vectors1),
"A2": JSONFormatter.dict_to_json(attr_vectors2)
})
# response = jsonify(ect_value1=ect_value1, p_value1=p_value1, p_value2=p_value2,
# ect_value2=ect_value2)
logging.info("APP-BE: Results: " + str(response))
return response
# Evaluates the specifications with BAT
def return_eval_bat(target_vectors1, target_vectors2, attr_vectors1, attr_vectors2, database):
logging.info("APP-BE: Starting BAT evaluation")
bat_result = bat.bias_analogy_test(target_vectors1, target_vectors2, attr_vectors1, attr_vectors2)
logging.info("APP-BE: BAT finished successfully")
response = json.dumps(
{"EmbeddingSpace": database, "EvaluationMethods": "all",
"BatValue": bat_result,
"T1": JSONFormatter.dict_to_json(target_vectors1),
"T2": JSONFormatter.dict_to_json(target_vectors2),
"A1": JSONFormatter.dict_to_json(attr_vectors1),
"A2": JSONFormatter.dict_to_json(attr_vectors2)
})
# response = jsonify(bat_result=bat_result)
logging.info("APP-BE: Results: " + str(response))
return response
# Evaluates the specifications with WEAT
def return_eval_weat(target_vectors1, target_vectors2, attr_vectors1, attr_vectors2, database):
logging.info("APP-BE: Starting WEAT evaluation")
weat_effect_size, weat_p_value = weat.word_embedding_association_test(target_vectors1, target_vectors2, attr_vectors1,
attr_vectors2)
logging.info("APP-BE: WEAT finished successfully")
response = json.dumps(
{"EmbeddingSpace": database, "EvaluationMethods": "all",
"WeatEffectSize": weat_effect_size, "WeatPValue": weat_p_value,
"T1": JSONFormatter.dict_to_json(target_vectors1),
"T2": JSONFormatter.dict_to_json(target_vectors2),
"A1": JSONFormatter.dict_to_json(attr_vectors1),
"A2": JSONFormatter.dict_to_json(attr_vectors2)
})
# response = jsonify(weat_effect_size=weat_effect_size, weat_pvalue=weat_p_value)
logging.info("APP-BE: Results: " + str(response))
return response
# Evaluates the specifications with K-Means++
def return_eval_kmeans(target_vectors1, target_vectors2, database):
logging.info("APP-BE: Starting KMeans evaluation")
kmeans = k_means.k_means_clustering(target_vectors1, target_vectors2)
logging.info("APP-BE: KMeans finished successfully")
response = json.dumps(
{"EmbeddingSpace": database, "EvaluationMethods": "all",
"KmeansValue": kmeans,
"T1": JSONFormatter.dict_to_json(target_vectors1),
"T2": JSONFormatter.dict_to_json(target_vectors2),
})
# response = jsonify(k_means=kmeans)
logging.info("APP-BE: Results: " + str(response))
return response
|
import warnings
from pathlib import Path
from typing import Union
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from tqdm.auto import tqdm
from hakai_segmentation.geotiff_io import GeotiffReader, GeotiffWriter
from hakai_segmentation.models import _Model
class GeotiffSegmentation:
"""Class for configuring data io and efficient segmentation of Geotiff imagery."""
def __init__(self, model: '_Model', input_path: Union[str, 'Path'], output_path: Union[str, 'Path'], crop_size: int = 256,
padding: int = 128, batch_size: int = 2):
"""
Create the segmentation object.
:param model: A callable module that accepts a batch of torch.Tensor data and returns classifications.
:param input_path: The path to the input geotiff image.
:param output_path: The destination file path for the output segmentation data.
:param crop_size: The size of image crop to classify iteratively until the entire image is classified.
:param padding: The number of context pixels to add to each side of an image crop to improve outputs.
:param batch_size: The number of crops to classify at a time using the model.
"""
self.model = model
tran = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda img: img[:3, :, :]),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
self.reader = GeotiffReader(
Path(input_path).expanduser().resolve(),
transform=tran,
crop_size=crop_size,
padding=padding,
filter_=self._should_keep,
)
self._dataloader = DataLoader(
self.reader,
shuffle=False,
batch_size=batch_size,
pin_memory=True,
num_workers=0,
)
self.writer = GeotiffWriter.from_reader(Path(output_path).expanduser().resolve(),
self.reader, count=1, dtype="uint8", nodata=0)
self.progress = None
@staticmethod
def _should_keep(img: 'np.ndarray') -> bool:
"""
Determines if an image crop should be classified or discarded.
:param img: The image crop, with padding removed.
:return: Flag to indicate if crop should be discarded.
"""
_img = np.clip(img, 0, 255)
is_blank = np.all((_img == 0) | (_img == 255))
return not is_blank
def __call__(self):
"""Run the segmentation task."""
self.on_start()
for batch_idx, batch in enumerate(self._dataloader):
self.on_batch_start(batch_idx)
crops, indices = batch
predictions = self.model(crops)
labels = torch.argmax(predictions, dim=1).detach().cpu().numpy()
# Write outputs
for label, idx in zip(labels, indices):
self.writer.write_index(label, int(idx))
self.on_chip_write_end(int(idx))
self.on_batch_end(batch_idx)
self.on_end()
def on_start(self):
"""Hook that runs before image processing. By default, sets up a tqdm progress bar."""
# Check data type assumptions
if self.reader.nodata is None:
warnings.warn("Define the correct nodata value on the input raster to speed up processing.", UserWarning)
dtype = self.reader.profile['dtype']
if dtype != 'uint8':
raise AssertionError(f"Input image has incorrect data type {dtype}. Only uint8 (aka Byte) images are supported.")
if self.reader.count < 3:
raise AssertionError("Input image has less than 3 bands. "
"The image should have at least 3 bands, with the first three being in RGB order.")
# Setup progress bar
self.progress = tqdm(
total=len(self.reader),
desc="Processing"
)
def on_end(self):
"""Hook that runs after image processing. By default, tears down the tqdm progress bar."""
self.progress.update(len(self.reader) - self.progress.n)
self.progress.close()
self.progress = None
def on_batch_start(self, batch_idx: int):
"""
Hook that runs for each batch of data, immediately before classification by the model.
:param batch_idx: The batch index being processed.
"""
pass
def on_batch_end(self, batch_idx: int):
"""
Hook that runs for each batch of data, immediately after classification by the model.
:param batch_idx: The batch index being processed.
"""
pass
def on_chip_write_end(self, index: int):
"""
Hook that runs for each crop of data, immediately after classification by the model.
By default, increments a tqdm progress bar.
:param index: The index of the image crop that was processed.
"""
self.progress.update(index + 1 - self.progress.n)
|
<reponame>ska-telescope/tmc-prototype
# Standard python import
import logging
# Additional import
from ska.base.control_model import ObsState
from tmc.common.tango_client import TangoClient
from tmc.common.tango_server_helper import TangoServerHelper
from .device_data import DeviceData
from . import const
from time import sleep
class ObsStateAggregator:
"""
Observation State Aggregator class
"""
def __init__(self, logger=None):
if logger is None:
self.logger = logging.getLogger(__name__)
else:
self.logger = logger
self._mccs_sa_obs_state = None
self.mccs_obs_state_event_id = {}
self.this_server = TangoServerHelper.get_instance()
self.device_data = DeviceData.get_instance()
def subscribe(self):
mccs_subarray_ln_fqdn = ""
property_val = self.this_server.read_property("MccsSubarrayLNFQDN")
mccs_subarray_ln_fqdn = mccs_subarray_ln_fqdn.join(property_val)
self.mccs_client = TangoClient(mccs_subarray_ln_fqdn)
# Subscribe mccsSubarrayObsState (forwarded attribute) of mccsSubarray
mccs_event_id = self.mccs_client.subscribe_attribute(
const.EVT_MCCSSA_OBS_STATE, self.observation_state_cb
)
self.mccs_obs_state_event_id[self.mccs_client] = mccs_event_id
log_msg = f"{const.STR_SUB_ATTR_MCCS_SALN_OBSTATE_SUCCESS}{self.mccs_obs_state_event_id}"
self.logger.info(log_msg)
def unsubscribe(self):
"""
This function unsubscribes all Observation state events given by the event ids and their
corresponding DeviceProxy objects.
:param : None
:return: None
"""
for tango_client, event_id in self.mccs_obs_state_event_id.items():
tango_client.unsubscribe_attribute(event_id)
def observation_state_cb(self, evt):
"""
Receives the subscribed MCCS Subarray obsState.
:param evt: A event on MCCS Subarray ObsState.
:type: Event object
It has the following members:
- date (event timestamp)
- reception_date (event reception timestamp)
- type (event type)
- dev_name (device name)
- name (attribute name)
- value (event value)
:return: None
:raises: KeyError if error occurs while setting SubarrayNode's ObsState.
"""
try:
if not evt.err:
event_observation_state = evt.attr_value.value
if const.PROP_DEF_VAL_TMMCCS_MID_SALN in evt.attr_name:
# Typacasted the event values to obsState ENUM labels.
self._mccs_sa_obs_state = ObsState(event_observation_state)
self.this_server.write_attr("activityMessage", f"{const.STR_MCCS_SUBARRAY_OBS_STATE}{event_observation_state}", False)
else:
self.logger.info(const.EVT_UNKNOWN)
self.this_server.write_attr("activityMessage", const.EVT_UNKNOWN, False)
self.calculate_observation_state()
else:
log_msg = f"{const.ERR_SUBSR_MCCSSA_OBS_STATE}{evt}"
self.logger.info(log_msg)
self.this_server.write_attr("activityMessage", log_msg, False)
except KeyError as key_error:
log_msg = f"{const.ERR_MCCS_SUBARRAY_OBS_STATE}{key_error}"
self.logger.error(log_msg)
self.this_server.write_attr("activityMessage", f"{const.ERR_MCCS_SUBARRAY_OBS_STATE}{key_error}", False)
def calculate_observation_state(self):
"""
Calculates aggregated observation state of Subarray.
"""
log_msg = f"MCCS ObsState is: {self._mccs_sa_obs_state}"
self.logger.info(log_msg)
if self._mccs_sa_obs_state == ObsState.EMPTY:
if self.device_data.is_release_resources:
self.logger.info(
"Calling ReleaseAllResource command succeeded() method"
)
self.this_server.device.release.succeeded()
elif self.device_data.is_restart_command_executed:
self.logger.info("Calling Restart command succeeded() method")
self.this_server.device.restart.succeeded()
elif self._mccs_sa_obs_state == ObsState.READY:
if self.device_data.is_scan_completed:
self.logger.info("Calling EndScan command succeeded() method")
self.this_server.device.endscan.succeeded()
else:
# Configure command success
self.logger.info("Calling Configure command succeeded() method")
self.this_server.device.configure.succeeded()
elif self._mccs_sa_obs_state == ObsState.IDLE:
if self.device_data.is_end_command:
# End command success
self.logger.info("Calling End command succeeded() method")
self.this_server.device.end.succeeded()
elif self.device_data.is_obsreset_command_executed:
# ObsReset command success
self.logger.info("Calling ObsReset command succeeded() method")
self.this_server.device.obsreset.succeeded()
else:
# Assign Resource command success
self.logger.info("Calling AssignResource command succeeded() method")
self.this_server.device.assign.succeeded()
elif self._mccs_sa_obs_state == ObsState.ABORTED:
try:
retry_count = 0
while retry_count < 3:
if self.device_data.is_abort_command_executed:
# Abort command success
self.logger.info("Calling Abort command succeeded() method")
self.this_server.device.abort.succeeded()
break
sleep(0.1)
retry_count+=1
except Exception as e:
self.logger(str(e))
|
<gh_stars>0
import math
import struct
import sys
from enum import Enum
from typing import List
from . import Utilities
class BinFloatFormat(Enum):
"""Binary format of a float number."""
Single_4bytes = 1
Single_4bytes_swapped = 2
Double_8bytes = 3
Double_8bytes_swapped = 4
class BinIntFormat(Enum):
"""Binary format of an integer number."""
Integer32_4bytes = 1
Integer32_4bytes_swapped = 2
Integer16_2bytes = 3
Integer16_2bytes_swapped = 4
def assert_string_data(value: str) -> None:
"""Asserts value is string type."""
assert isinstance(value, str), f"Input value type must be string. Actual type: {type(value)}, value: {value}"
def assert_list_data(value: list) -> None:
"""Asserts value is list type."""
assert isinstance(value, list), f"Input value type must be a list. Actual type: {type(value)}, value: {value}"
def _get_endianness_symbol(swap_endianness: bool) -> str:
"""Based on the current endianness returns the symbol used in the 'struct' module."""
if swap_endianness is False:
return '@'
elif swap_endianness is True and sys.byteorder == 'little':
return '>'
else:
return '<'
def bytes_to_float32_list(data: bytes, swap_endianness=False) -> List[float]:
"""Converts bytes to list of floats - one number is represented by 4 bytes."""
fmt = f'{_get_endianness_symbol(swap_endianness)}{len(data) // 4}f'
return list(struct.unpack(fmt, data))
def bytes_to_double64_list(data: bytes, swap_endianness=False) -> List[float]:
"""Converts bytes to list of doubles - one number is represented by 8 bytes."""
fmt = f'{_get_endianness_symbol(swap_endianness)}{len(data) // 8}d'
return list(struct.unpack(fmt, data))
def bytes_to_int32_list(data: bytes, swap_endianness=False) -> List[int]:
"""Converts bytes to list of integer32 - one number is represented by 4 bytes."""
fmt = f'{_get_endianness_symbol(swap_endianness)}{len(data) // 4}i'
return list(struct.unpack(fmt, data))
def bytes_to_int16_list(data: bytes, swap_endianness=False) -> List[int]:
"""Converts bytes to list of integer16 - one number is represented by 2 bytes."""
fmt = f'{_get_endianness_symbol(swap_endianness)}{len(data) // 2}h'
return list(struct.unpack(fmt, data))
def bytes_to_list_of_floats(data: bytes, fmt: BinFloatFormat) -> List[float]:
"""Decodes binary data to a list of floating-point numbers based on the entered format."""
if fmt == BinFloatFormat.Single_4bytes:
return bytes_to_float32_list(data)
elif fmt == BinFloatFormat.Single_4bytes_swapped:
return bytes_to_float32_list(data, True)
elif fmt == BinFloatFormat.Double_8bytes:
return bytes_to_double64_list(data)
elif fmt == BinFloatFormat.Double_8bytes_swapped:
return bytes_to_double64_list(data, True)
def bytes_to_list_of_integers(data: bytes, fmt: BinIntFormat) -> List[int]:
"""Decodes binary data to a list of integer numbers based on the entered format."""
if fmt == BinIntFormat.Integer32_4bytes:
return bytes_to_int32_list(data)
elif fmt == BinIntFormat.Integer32_4bytes_swapped:
return bytes_to_int32_list(data, True)
elif fmt == BinIntFormat.Integer16_2bytes:
return bytes_to_int16_list(data)
elif fmt == BinIntFormat.Integer16_2bytes_swapped:
return bytes_to_int16_list(data, True)
def double64_list_to_bytes(data: List[float], swap_endianness=False) -> bytes:
"""Converts list of doubles to bytes - one number is converted to 8 bytes."""
fmt = f'{_get_endianness_symbol(swap_endianness)}{str(len(data))}d'
return struct.pack(fmt, *data)
def float32_list_to_bytes(data: List[float], swap_endianness=False) -> bytes:
"""Converts list of floats to bytes - one number is converted to 4 bytes."""
fmt = f'{_get_endianness_symbol(swap_endianness)}{str(len(data))}f'
return struct.pack(fmt, *data)
def int32_list_to_bytes(data: List[int], swap_endianness=False) -> bytes:
"""Converts list of integers 32 to bytes - one number is converted to 4 bytes."""
fmt = f'{_get_endianness_symbol(swap_endianness)}{str(len(data))}i'
return struct.pack(fmt, *data)
def int16_list_to_bytes(data: List[float], swap_endianness=False) -> bytes:
"""Converts list of integers 16 to bytes - one number is converted to 2 bytes."""
fmt = f'{_get_endianness_symbol(swap_endianness)}{str(len(data))}h'
return struct.pack(fmt, *data)
def list_of_integers_to_bytes(ints: List[int], fmt: BinIntFormat) -> bytes:
"""Encodes list of integers to binary data based on the entered format."""
if fmt == BinIntFormat.Integer32_4bytes:
return int32_list_to_bytes(ints)
elif fmt == BinIntFormat.Integer32_4bytes_swapped:
return int32_list_to_bytes(ints, True)
elif fmt == BinIntFormat.Integer16_2bytes:
return int16_list_to_bytes(ints)
elif fmt == BinIntFormat.Integer16_2bytes_swapped:
return int16_list_to_bytes(ints, True)
def list_of_floats_to_bytes(floats: List[float], fmt: BinFloatFormat) -> bytes:
"""Encodes list of floats to binary data based on the entered format."""
if fmt == BinFloatFormat.Single_4bytes:
return float32_list_to_bytes(floats)
elif fmt == BinFloatFormat.Single_4bytes_swapped:
return float32_list_to_bytes(floats, True)
elif fmt == BinFloatFormat.Double_8bytes:
return double64_list_to_bytes(floats)
elif fmt == BinFloatFormat.Double_8bytes_swapped:
return double64_list_to_bytes(floats, True)
pure_bool_true_lookup = frozenset(['on', 'On', 'ON', 'true', 'True', 'TRUE'])
bool_true_lookup = frozenset(['1', 'on', 'On', 'ON', 'true', 'True', 'TRUE'])
bool_false_lookup = frozenset(['0', 'off', 'Off', 'OFF', 'false', 'False', 'FALSE'])
pure_bool_false_lookup = frozenset(['off', 'Off', 'OFF', 'false', 'False', 'FALSE'])
def str_to_bool(string: str) -> bool:
"""Converts string to boolean value.
The function robust, and case insensitive.
If the string can not be converted to a boolean, the function returns False."""
assert_string_data(string)
if string in bool_true_lookup:
return True
if string in bool_false_lookup:
return False
# If leading/trailing spaces
string = string.strip()
if string in bool_true_lookup:
return True
if string in bool_false_lookup:
return False
# If enclosed by brackets
string = Utilities.trim_str_response(string)
if string in bool_true_lookup:
return True
if string in bool_false_lookup:
return False
return False
def string_to_pure_bool(string: str) -> bool or None:
"""Converts string to boolean value. Compare to str_to_bool(), the values '1' and '0' are not considered boolean.
Also, if the method can not convert the string to boolean, it returns None."""
assert_string_data(string)
if string in pure_bool_true_lookup:
return True
if string in pure_bool_false_lookup:
return False
# If leading/trailing spaces
string = string.strip()
if string in pure_bool_true_lookup:
return True
if string in pure_bool_false_lookup:
return False
# If enclosed by brackets
string = Utilities.trim_str_response(string)
if string in pure_bool_true_lookup:
return True
if string in pure_bool_false_lookup:
return False
return None
number_plus_inf_lookup = frozenset(['Inf', 'INF', 'INFINITY', '+Inf', '+INF', '+inf', '+INFINITY', '+Infinity', '+infinity'])
number_minus_inf_lookup = frozenset(['-Inf', '-INF', '-inf', '-INFINITY', '-Infinity', '-infinity'])
number_nan_lookup = frozenset(['Nan', 'NAN', 'nan', 'NaN', 'NAV', 'NaV', 'NCAP', 'INV', 'NONE', 'none', 'None', 'DTX', 'UND', 'und'])
number_max_lookup = frozenset(['OFL', 'ofl', 'Ofl'])
number_min_lookup = frozenset(['UFL', 'ufl', 'Ufl'])
int_neg_inf = -(sys.maxsize - 1)
enum_spec_prefixes = {'_minus': '-', '_plus': '+', '_': ''}
enum_spec_strings = {'_dash_': '-', '_dot_': '.'}
def str_to_int(string: str) -> int:
"""Converts string to integer value. Float values are coerced to integer.
Also recognizes case insensitive special values like NaN, INV, NCAP..."""
assert_string_data(string)
string = string.strip()
if string == '':
return 0
value = str_special_values_to_int(string)
if value:
return value
# Hexadecimal numbers
if string.startswith('#H') or string.startswith('0x'):
if ',' in string:
return int(string[2:string.find(',')], 16)
else:
return int(string[2:], 16)
# Binary numbers
if string.startswith('#B') or string.startswith('0b'):
if ',' in string:
return int(string[2:string.find(',')], 2)
else:
return int(string[2:], 2)
# Octal numbers
if string.startswith('#Q') or string.startswith('0o'):
if ',' in string:
return int(string[2:string.find(',')], 8)
else:
return int(string[2:], 8)
# Simulation
if string == 'Simulating':
return 0
return int(round(float(string)))
def str_special_values_to_int(string: str) -> int:
"""Converts special string values to integer. Returns None if no special value was found."""
assert_string_data(string)
if string in number_plus_inf_lookup or string in number_max_lookup:
return sys.maxsize
if string in number_minus_inf_lookup or string in number_min_lookup or string in number_nan_lookup:
return int_neg_inf
if string == 'OFF':
return int_neg_inf + 1
if string == 'ON':
return int_neg_inf + 2
if string == 'OK':
return sys.maxsize - 1
if string == 'DC':
# noinspection PyTypeChecker
return int_neg_inf / 100
if string == 'ULEU':
return int(sys.maxsize / 10)
if string == 'ULEL':
# noinspection PyTypeChecker
return int_neg_inf / 10
# noinspection PyTypeChecker
return None
def str_to_int_or_bool(string: str) -> int or bool:
"""Similar to str_to_int, but for special values "ON/OFF" the function returns boolean"""
result = string_to_pure_bool(string)
if result is not None:
return result
return str_to_int(string)
def str_to_float(string: str) -> float:
"""Converts string to float value.
Also recognizes case insensitive special values like NaN, INV, NCAP..."""
assert_string_data(string)
string = string.strip()
if string == '':
return 0.0
if string in number_plus_inf_lookup:
return math.inf
if string in number_minus_inf_lookup:
return -math.inf
if string in number_nan_lookup:
return math.nan
if string in number_max_lookup:
return sys.float_info.max
if string in number_min_lookup:
return -sys.float_info.max
if string == 'OFF':
return -sys.float_info.epsilon
if string == 'ON':
return -2*sys.float_info.epsilon
if string == 'OK':
return sys.float_info.epsilon
if string == 'DC' or string == '':
return -sys.float_info.max / 100
if string == 'ULEU':
return sys.float_info.max / 10
if string == 'ULEL':
return -sys.float_info.max / 10
if string == 'Simulating':
return 0.0
return float(string)
def str_to_float_or_bool(string: str) -> float or bool:
"""Similar to str_to_float, but for special values "ON/OFF" the function returns boolean"""
result = string_to_pure_bool(string)
if result is not None:
return result
return str_to_float(string)
def float_to_str(value: float) -> str:
"""Converts double number to string using {.12g} formatter."""
return format(value, ".12g")
def bool_to_str(value: bool) -> str:
"""Converts boolean to 'ON' or 'OFF' string."""
if type(value) is bool:
return 'ON' if value is True else 'OFF'
else:
raise Exception(f"bool_to_str: unsupported variable type '{type(value)}', value '{value}'. Only boolean values are supported.")
def str_enclose_by_quotes(string: str) -> str:
"""Returns string enclosed by single quotes."""
assert_string_data(string)
return "'" + string + "'"
def list_to_csv_str(value: list) -> str:
"""Converts list of elements to strings separated by commas.
Element types can differ on an individual basis.
Supported element types:
- int
- bool
- float
- string -> string no quotes
- enum"""
assert_list_data(value)
result = []
for x in value:
el = value_to_str(x)
if not el:
raise TypeError(f"List element type is not supported by Conversions.list_to_csv_str: '{x}'")
result.append(el)
return ','.join(result)
def list_to_csv_quoted_str(value: list) -> str:
"""Converts list of elements to quoted strings separated by commas.
Only string elements are enclosed by single quotes
Element types can differ on an individual basis.
Supported element types:
- int
- bool
- float
- string -> string enclosed by quotes
- enum"""
assert_list_data(value)
result = []
for x in value:
if isinstance(x, str):
el = str_enclose_by_quotes(x)
else:
el = value_to_str(x)
if not el:
raise TypeError(f"List element type is not supported by Conversions.list_to_csv_quoted_str: '{x}'")
result.append(el)
return ','.join(result)
def decimal_value_to_str(x: int or float) -> str:
"""Converts scalar decimal value to string.
Supported element types:
- int
- float"""
if isinstance(x, int) and type(x) is not bool:
return str(x)
elif isinstance(x, float):
return float_to_str(x)
else:
raise Exception(f"decimal_value_to_str: unsupported variable type '{type(x)}', value '{x}'. Only integer and float types are supported.")
def decimal_or_bool_value_to_str(x: int or float or bool) -> str:
"""Converts scalar decimal value to string.
Supported element types:
- int
- float
- boolean"""
if type(x) is bool:
return bool_to_str(x)
if isinstance(x, int):
return str(x)
elif isinstance(x, float):
return float_to_str(x)
else:
raise Exception(f"decimal_or_bool_value_to_str: unsupported variable type '{type(x)}', value '{x}'. Only integer, float and boolean types are supported.")
def value_to_str(x: int or bool or float or str or Enum) -> str:
"""Converts scalar value to string.
Supported element types:
- int
- bool
- float
- string
- enum"""
if isinstance(x, bool):
return bool_to_str(x)
elif isinstance(x, int):
return str(x)
elif isinstance(x, float):
return float_to_str(x)
elif isinstance(x, str):
return x
elif isinstance(x, Enum):
return enum_value_to_scpi_string(x.name)
else:
raise Exception(f"value_to_str: unsupported variable type '{type(x)}', value '{x}'. Supported types: int, bool, float, str, enum.")
def enum_value_to_scpi_string(enum_value: str) -> str:
"""Conversion EnumValue -> SCPI_String
Unescapes all the special characters that can not be contained in the enum member definition, but can be sent to the instrument as enum string.
Use this to send the scpi enum value to the instrument."""
for key in enum_spec_prefixes:
if enum_value.startswith(key):
enum_value = enum_spec_prefixes[key] + enum_value[len(key):]
for key in enum_spec_strings:
enum_value = enum_value.replace(key, enum_spec_strings[key])
return enum_value
def value_to_quoted_str(x: int or bool or float or str or Enum) -> str:
"""Converts scalar value to string enclosed by single quotes.
Supported element types:
- int
- bool
- float
- string
- enum"""
return f"'{value_to_str(x)}'"
def str_to_float_list(string: str) -> List[float]:
"""Converts string with comma-separated values to list of Floats."""
assert_string_data(string)
if not string:
return []
result = [*map(str_to_float, string.split(','))]
return result
def str_to_float_or_bool_list(string: str) -> List[float or bool]:
"""Converts string with comma-separated values to list of float or boolean values."""
assert_string_data(string)
if not string:
return []
result = [*map(str_to_float_or_bool, string.split(','))]
return result
def str_to_int_list(string: str) -> List[int]:
"""Converts string with comma-separated values to list of Integers."""
assert_string_data(string)
if not string:
return []
result = [*map(str_to_int, string.split(','))]
return result
def str_to_int_or_bool_list(string: str) -> List[int or bool]:
"""Converts string with comma-separated values to list of integer or boolean values."""
assert_string_data(string)
if not string:
return []
result = [*map(str_to_int_or_bool, string.split(','))]
return result
def str_to_bool_list(string: str) -> List[bool]:
"""Converts string with comma-separated values to list of booleans."""
assert_string_data(string)
if not string:
return []
result = [*map(str_to_bool, string.split(','))]
return result
def str_to_str_list(string: str, clear_one_empty_item: bool = False) -> List[str]:
"""Converts string with comma-separated values to list of strings.
Each element is trimmed by trim_str_response().
If the clear_one_empty_item is set to True (default is False), and the result is exactly one empty string item, the method returns empty list."""
assert_string_data(string)
if not string:
return []
result = [*map(Utilities.trim_str_response, string.split(','))]
if clear_one_empty_item and len(result) == 1 and result[0] == '':
return []
return result
def _find_in_enum_members(item: str, enum_members: List[str]) -> int:
"""Matches a string in the provided list of member strings.
The item must be not fully matched.
The item is matched if a member string starts with the item (the item is a prefix of the member).
Example: item='CONN' will match the enum_member 'CONNected'
If the item contains a comma, only the value before comma is considered
Returns found index in the enum_members list"""
if ',' in item:
item = item[:item.index(',')].strip()
ix = -1
i = 0
for x in enum_members:
if x.startswith(item):
return i
i += 1
return ix
def str_to_scalar_enum_helper(string: str, enum_type: Enum, enum_members=None) -> Enum:
"""Converts string to one enum element.
enum_members are optional to improve the performance for repeated conversions.
If you do not provide them, they are generated inside the function."""
value = Utilities.trim_str_response(string)
if not enum_members:
# noinspection PyTypeChecker
enum_members = [x.name for x in enum_type]
# Search in the enum member and return the index of the matched item
ix = _find_in_enum_members(value, enum_members)
if ix >= 0:
return enum_type[enum_members[ix]]
# If the result is -1 (not found), try to replace the special values and search again
# This is done to improve the performance, since most of the enums have no special values
enum_members_conv = [enum_value_to_scpi_string(x) for x in enum_members]
ix = _find_in_enum_members(value, enum_members_conv)
if ix >= 0:
return enum_type[enum_members[ix]]
# If not found, search in the special integer numbers:
spec_value = str_special_values_to_int(value)
if not spec_value:
raise Exception(f"String '{value}' can not be found in the enum type '{enum_type}'")
# noinspection PyTypeChecker
return spec_value
def str_to_list_enum_helper(string: str, enum_type: Enum, enum_members=None) -> List[Enum]:
"""Converts string to list of enum elements.
enum_members are optional to improve the performance for repeated conversions.
If you do not provide them, they are generated inside the function."""
if not enum_members:
# noinspection PyTypeChecker
enum_members = [x.name for x in enum_type]
elements = string.split(',')
return [str_to_scalar_enum_helper(x, enum_type, enum_members) for x in elements]
def enum_scalar_to_str(data, enum_type) -> str:
"""Converts enum scalar value to string."""
assert isinstance(data, enum_type), f"Expected command parameter {enum_type}, actual data type: {type(data)}. Value: {data}"
return value_to_str(data)
def enum_list_to_str(data: List, enum_type) -> str:
"""Converts enum list to csv-string."""
# For enums, check that each element is an enum
assert all(isinstance(x, enum_type) for x in data), f"Expected command parameter list of {enum_type}, detected one or more elements of non-enum type. Value: {data}"
return list_to_csv_str(data)
def str_to_scalar_enum(string: str, enum_type) -> Enum:
"""Converts string to one enum element."""
return str_to_scalar_enum_helper(string, enum_type)
def str_to_list_enum(string: str, enum_type) -> List[Enum]:
"""Converts string to list of enum elements."""
return str_to_list_enum_helper(string, enum_type)
|
<filename>test/functional/feature_llmq_is_retroactive.py
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import DashTestFramework
from test_framework.util import set_node_times, isolate_node, reconnect_isolated_node
'''
feature_llmq_is_retroactive.py
Tests retroactive signing
We have 6 nodes where node 0 is the control node, nodes 1-5 are masternodes.
Mempool inconsistencies are simulated via disconnecting/reconnecting node 3
and by having a higher relay fee on nodes 4 and 5.
'''
class LLMQ_IS_RetroactiveSigning(DashTestFramework):
def set_test_params(self):
# -whitelist is needed to avoid the trickling logic on node0
self.set_dash_test_params(6, 5, [["-whitelist=127.0.0.1"], [], [], [], ["-minrelaytxfee=0.001"], ["-minrelaytxfee=0.001"]], fast_dip3_enforcement=True)
self.set_dash_llmq_test_params(5, 3)
self.set_dash_dip8_activation(10)
def run_test(self):
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(10)
self.sync_blocks(self.nodes, timeout=60*5)
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.nodes[0].spork("SPORK_2_INSTANTSEND_ENABLED", 0)
self.nodes[0].spork("SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0)
self.wait_for_sporks_same()
self.mine_quorum()
self.mine_quorum()
# Make sure that all nodes are chainlocked at the same height before starting actual tests
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash(), timeout=30)
self.log.info("trying normal IS lock")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# 3 nodes should be enough to create an IS lock even if nodes 4 and 5 (which have no tx itself)
# are the only "neighbours" in intra-quorum connections for one of them.
self.wait_for_instantlock(txid, self.nodes[0])
self.bump_mocktime(1)
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)
self.log.info("testing normal signing with partially known TX")
isolate_node(self.nodes[3])
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# Make sure nodes 1 and 2 received the TX before we continue,
# otherwise it might announce the TX to node 3 when reconnecting
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
reconnect_isolated_node(self.nodes[3], 0)
# Make sure nodes actually try re-connecting quorum connections
self.bump_mocktime(30)
self.wait_for_mnauth(self.nodes[3], 2)
# node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# push the tx directly via rpc
self.nodes[3].sendrawtransaction(self.nodes[0].getrawtransaction(txid))
# node 3 should vote on a tx now since it became aware of it via sendrawtransaction
# and this should be enough to complete an IS lock
self.wait_for_instantlock(txid, self.nodes[0])
self.log.info("testing retroactive signing with unknown TX")
isolate_node(self.nodes[3])
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
txid = self.nodes[3].sendrawtransaction(rawtx)
# Make node 3 consider the TX as safe
self.bump_mocktime(10 * 60 + 1)
block = self.nodes[3].generatetoaddress(1, self.nodes[0].getnewaddress())[0]
reconnect_isolated_node(self.nodes[3], 0)
self.wait_for_chainlocked_block_all_nodes(block)
self.nodes[0].setmocktime(self.mocktime)
self.log.info("testing retroactive signing with partially known TX")
isolate_node(self.nodes[3])
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# Make sure nodes 1 and 2 received the TX before we continue,
# otherwise it might announce the TX to node 3 when reconnecting
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
reconnect_isolated_node(self.nodes[3], 0)
# Make sure nodes actually try re-connecting quorum connections
self.bump_mocktime(30)
self.wait_for_mnauth(self.nodes[3], 2)
# node 3 fully reconnected but the TX wasn't relayed to it, so there should be no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# Make node0 consider the TX as safe
self.bump_mocktime(10 * 60 + 1)
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)
self.log.info("testing retroactive signing with partially known TX and all nodes session timeout")
self.test_all_nodes_session_timeout(False)
self.log.info("repeating test, but with cycled LLMQs")
self.test_all_nodes_session_timeout(True)
self.log.info("testing retroactive signing with partially known TX and single node session timeout")
self.test_single_node_session_timeout(False)
self.log.info("repeating test, but with cycled LLMQs")
self.test_single_node_session_timeout(True)
def cycle_llmqs(self):
self.mine_quorum()
self.mine_quorum()
self.wait_for_chainlocked_block_all_nodes(self.nodes[0].getbestblockhash(), timeout=30)
def test_all_nodes_session_timeout(self, do_cycle_llmqs):
set_node_times(self.nodes, self.mocktime)
isolate_node(self.nodes[3])
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
txid = self.nodes[0].sendrawtransaction(rawtx)
txid = self.nodes[3].sendrawtransaction(rawtx)
# Make sure nodes 1 and 2 received the TX before we continue
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
# Make sure signing is done on nodes 1 and 2 (it's async)
time.sleep(5)
# Make the signing session for the IS lock timeout on nodes 1-3
self.bump_mocktime(61)
time.sleep(2) # make sure Cleanup() is called
reconnect_isolated_node(self.nodes[3], 0)
# Make sure nodes actually try re-connecting quorum connections
self.bump_mocktime(30)
self.wait_for_mnauth(self.nodes[3], 2)
# node 3 fully reconnected but the signing session is already timed out on all nodes, so no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
if do_cycle_llmqs:
self.cycle_llmqs()
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# Make node 0 consider the TX as safe
self.bump_mocktime(10 * 60 + 1)
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)
def test_single_node_session_timeout(self, do_cycle_llmqs):
set_node_times(self.nodes, self.mocktime)
isolate_node(self.nodes[3])
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[0].getnewaddress(): 1})
rawtx = self.nodes[0].fundrawtransaction(rawtx)['hex']
rawtx = self.nodes[0].signrawtransaction(rawtx)['hex']
txid = self.nodes[3].sendrawtransaction(rawtx)
time.sleep(2) # make sure signing is done on node 2 (it's async)
# Make the signing session for the IS lock timeout on node 3
self.bump_mocktime(61)
time.sleep(2) # make sure Cleanup() is called
reconnect_isolated_node(self.nodes[3], 0)
# Make sure nodes actually try re-connecting quorum connections
self.bump_mocktime(30)
self.wait_for_mnauth(self.nodes[3], 2)
self.nodes[0].sendrawtransaction(rawtx)
# Make sure nodes 1 and 2 received the TX
self.wait_for_tx(txid, self.nodes[1])
self.wait_for_tx(txid, self.nodes[2])
# Make sure signing is done on nodes 1 and 2 (it's async)
time.sleep(5)
# node 3 fully reconnected but the signing session is already timed out on it, so no IS lock
self.wait_for_instantlock(txid, self.nodes[0], False, 1)
if do_cycle_llmqs:
self.cycle_llmqs()
self.wait_for_instantlock(txid, self.nodes[0], False, 5)
# Make node 0 consider the TX as safe
self.bump_mocktime(10 * 60 + 1)
block = self.nodes[0].generate(1)[0]
self.wait_for_chainlocked_block_all_nodes(block)
if __name__ == '__main__':
LLMQ_IS_RetroactiveSigning().main()
|
<reponame>AMLab-Amsterdam/DataAugmentationInterventions<gh_stars>10-100
"""Pytorch Dataset object that loads MNIST and SVHN. It returns x,y,s where s=0 when x,y is taken from MNIST."""
import os
import numpy as np
import torch
import torch.utils.data as data_utils
from torchvision import datasets, transforms
import torchvision
import PIL
class MnistAllDaDist(data_utils.Dataset):
def __init__(self, root, train=True, thetas=[0], d_label=0, download=True):
self.root = os.path.expanduser(root)
self.train = train
self.thetas = thetas
self.d_label = d_label
self.download = download
transform_dict = {
'brightness': torchvision.transforms.ColorJitter(brightness=1.0, contrast=0, saturation=0, hue=0),
'contrast': torchvision.transforms.ColorJitter(brightness=0, contrast=1.0, saturation=0, hue=0),
'saturation': torchvision.transforms.ColorJitter(brightness=0, contrast=0, saturation=1.0, hue=0),
'hue': torchvision.transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0.5),
'rotation': torchvision.transforms.RandomAffine([0, 359], translate=None, scale=None, shear=None,
resample=PIL.Image.BILINEAR, fillcolor=0),
'translate': torchvision.transforms.RandomAffine(0, translate=[0.2, 0.2], scale=None, shear=None,
resample=PIL.Image.BILINEAR, fillcolor=0),
'scale': torchvision.transforms.RandomAffine(0, translate=None, scale=[0.8, 1.2], shear=None,
resample=PIL.Image.BILINEAR, fillcolor=0),
'shear': torchvision.transforms.RandomAffine(0, translate=None, scale=None,
shear=[-10., 10., -10., 10.],
resample=PIL.Image.BILINEAR, fillcolor=0),
'vflip': torchvision.transforms.RandomVerticalFlip(p=0.5),
'hflip': torchvision.transforms.RandomHorizontalFlip(p=0.5),
'none': None,
}
self.transforms = torchvision.transforms.Compose([transform_dict['brightness'],
transform_dict['contrast'],
transform_dict['saturation'],
transform_dict['hue'],
transform_dict['rotation'],
transform_dict['translate'],
transform_dict['scale'],
transform_dict['shear'],
transform_dict['vflip'],
transform_dict['hflip']])
self.to_pil = transforms.ToPILImage()
self.to_tensor = transforms.ToTensor()
self.y_to_categorical = torch.eye(10)
self.d_to_categorical = torch.eye(4)
self.imgs, self.labels = self._get_data()
def _get_data(self):
mnist_loader = torch.utils.data.DataLoader(datasets.MNIST(self.root,
train=self.train,
download=self.download,
transform=transforms.ToTensor()),
batch_size=60000,
shuffle=False)
for i, (x, y) in enumerate(mnist_loader):
mnist_imgs = x
mnist_labels = y
# Get 10 random ints between 80 and 160
label_dist = np.random.randint(80, 160, 10)
mnist_imgs_dist, mnist_labels_dist = [], []
for i in range(10):
idx = np.where(mnist_labels == i)[0]
np.random.shuffle(idx)
idx = idx[:label_dist[i]] # select the right amount of labels for each class
mnist_imgs_dist.append(mnist_imgs[idx])
mnist_labels_dist.append(mnist_labels[idx])
mnist_imgs_dist = torch.cat(mnist_imgs_dist)
mnist_labels_dist = torch.cat(mnist_labels_dist)
pil_list = []
for x in mnist_imgs_dist:
pil_list.append(self.to_pil(x))
return pil_list, mnist_labels_dist
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
x = self.imgs[index]
y = self.labels[index]
d = np.random.choice(range(len(self.thetas)))
return self.to_tensor(self.transforms(transforms.functional.rotate(x, self.thetas[d]))), self.y_to_categorical[y], self.d_to_categorical[self.d_label]
|
import os, time
import selenium
from selenium import webdriver
#os.getcwd()
#os.chdir('C:/Users/Caio/repos/nba-models')
# start browser crawler
browser = webdriver.Firefox()
# grab season player stat per game
scrapeBbalRef(2018, 2018, 'https://www.basketball-reference.com/leagues/NBA_*SEASON*_per_game.html','per_game_stats','season-stats-pergame')
# grab season player stat totals
scrapeBbalRef(2018, 2018, 'https://www.basketball-reference.com/leagues/NBA_*SEASON*_totals.html','totals_stats','season-stats-totals')
# grab season advanced player stats
scrapeBbalRef(1997, 1997, 'https://www.basketball-reference.com/leagues/NBA_*SEASON*_advanced.html','advanced_stats','season-stats-advanced')
# grab MVP stats
scrapeBbalRef(2018, 2018, 'https://www.basketball-reference.com/awards/awards_*SEASON*.html','nba_mvp','award-stats', False)
# grab standings stats
scrapeBbalRef(2018, 2018, 'https://www.basketball-reference.com/leagues/NBA_*SEASON*_standings.html', 'expanded_standings','season-standings', False)
def scrapeBbalRef(year_start, year_end, page_string, id, folder_name, toggle_partial = True ):
for season in range(year_start, year_end+1):
#print(season)
# navigate to bballref
url = page_string.replace("*SEASON*", str(season))
browser.get(url)
# setup id strings
partial_id = id + "_toggle_partial_table"
csv_id = "csv_" + id
# activate and grab data in CSV format
if toggle_partial:
browser.execute_script('document.getElementById("'+ partial_id +'").click();')
# grab raw CSV
raw_csv = browser.execute_script('''var x = document.getElementsByClassName("tooltip");
x[3].click();
var content = document.getElementById("'''+ csv_id +'''")
// small hack to account for a few old MVP pages
if (content==null) {
var content = document.getElementById("csv_mvp")
}
return content.textContent
''')
# clean csv string
## get rid of false headers
if page_string == 'https://www.basketball-reference.com/awards/awards_*SEASON*.html' or page_string == 'https://www.basketball-reference.com/leagues/NBA_*SEASON*_standings.html':
raw_csv = '\n'.join(raw_csv.split('\n')[2:])
## remove special characters from standings
raw_csv=raw_csv.replace("\u2264","")
raw_csv=raw_csv.replace("\u2265","")
#print(raw_csv)
# write to CSV
pathstr = folder_name + "/" + str(season) + ".csv"
f = open(pathstr, "w")
f.write(raw_csv)
### DEPRECATED, BUILT INTO scrapeBbalRef
## GRAB SEASON PLAYER STAT PER GAME
# for season in range(1976, 1977):
# # navigate to bballref
# url = 'https://www.basketball-reference.com/leagues/NBA_' + str(season) + '_per_game.html'
# browser.get(url)
# # activate and grab data in CSV format
# raw_csv = browser.execute_script('''document.getElementById("per_game_stats_toggle_partial_table").click();
# var x = document.getElementsByClassName("tooltip");
# x[3].click();
# var content = document.getElementById("csv_per_game_stats")
# return content.textContent
# ''')
#
# # write to CSV
# pathstr = "season-stats-pergame/" + str(season) + ".csv"
# f = open(pathstr, "w")
# f.write(raw_csv)
# ## GRAB SEASON PLAYER STAT TOTALS
# for season in range(1976, 2018):
# # navigate to bballref
# url = 'https://www.basketball-reference.com/leagues/NBA_' + str(season) + '_totals.html'
# browser.get(url)
# # activate and grab data in CSV format
# raw_csv = browser.execute_script('''document.getElementById("totals_stats_toggle_partial_table").click();
# var x = document.getElementsByClassName("tooltip");
# x[3].click();
# var content = document.getElementById("csv_totals_stats")
# return content.textContent
# ''')
#
# # write to CSV
# pathstr = "season-stats-totals/" + str(season) + ".csv"
# f = open(pathstr, "w")
# f.write(raw_csv)
# ## GRAB SEASON MVP STATS
# for season in range(1976, 1977):
# # navigate to bballref
# url = 'https://www.basketball-reference.com/awards/awards_' + str(season) + '.html'
# browser.get(url)
# # activate and grab data in CSV format
# raw_csv = browser.execute_script('''var x = document.getElementsByClassName("tooltip");
# x[3].click();
# var content = document.getElementById("csv_mvp")
# if (content==null) {
# var content = document.getElementById("csv_nba_mvp")
# }
# return content.textContent
# ''')
#
# # write to CSV
# raw_csv = raw_csv[133:]
# pathstr = "award-stats/" + str(season) + ".csv"
# f = open(pathstr, "w")
# f.write(raw_csv)
|
<reponame>anton-sidelnikov/openstacksdk
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_create_server
----------------------------------
Tests for the `create_server` command.
"""
import base64
from unittest import mock
import uuid
from openstack.cloud import exc
from openstack.cloud import meta
from openstack.compute.v2 import server
from openstack import connection
from openstack.tests import fakes
from openstack.tests.unit import base
class TestCreateServer(base.TestCase):
def test_create_server_with_get_exception(self):
"""
Test that a bad status code when attempting to get the server instance
raises an exception in create_server.
"""
build_server = fakes.make_fake_server('1234', '', 'BUILD')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'name': u'server-name',
'networks': 'auto'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
status_code=404),
])
self.assertRaises(
exc.OpenStackCloudException, self.cloud.create_server,
'server-name', {'id': 'image-id'}, {'id': 'flavor-id'})
self.assert_calls()
def test_create_server_with_server_error(self):
"""
Test that a server error before we return or begin waiting for the
server instance spawn raises an exception in create_server.
"""
build_server = fakes.make_fake_server('1234', '', 'BUILD')
error_server = fakes.make_fake_server('1234', '', 'ERROR')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'name': u'server-name',
'networks': 'auto'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': error_server}),
])
self.assertRaises(
exc.OpenStackCloudException, self.cloud.create_server,
'server-name', {'id': 'image-id'}, {'id': 'flavor-id'})
self.assert_calls()
def test_create_server_wait_server_error(self):
"""
Test that a server error while waiting for the server to spawn
raises an exception in create_server.
"""
build_server = fakes.make_fake_server('1234', '', 'BUILD')
error_server = fakes.make_fake_server('1234', '', 'ERROR')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'name': u'server-name',
'networks': 'auto'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [build_server]}),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [error_server]}),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.create_server,
'server-name', dict(id='image-id'),
dict(id='flavor-id'), wait=True)
self.assert_calls()
def test_create_server_with_timeout(self):
"""
Test that a timeout while waiting for the server to spawn raises an
exception in create_server.
"""
fake_server = fakes.make_fake_server('1234', '', 'BUILD')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': fake_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'name': u'server-name',
'networks': 'auto'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [fake_server]}),
])
self.assertRaises(
exc.OpenStackCloudTimeout,
self.cloud.create_server,
'server-name',
dict(id='image-id'), dict(id='flavor-id'),
wait=True, timeout=0.01)
# We poll at the end, so we don't know real counts
self.assert_calls(do_count=False)
def test_create_server_no_wait(self):
"""
Test that create_server with no wait and no exception in the
create call returns the server instance.
"""
fake_server = fakes.make_fake_server('1234', '', 'BUILD')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': fake_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'name': u'server-name',
'networks': 'auto'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': fake_server}),
])
self.assertDictEqual(
server.Server(**fake_server).to_dict(computed=False),
self.cloud.create_server(
name='server-name',
image=dict(id='image-id'),
flavor=dict(id='flavor-id')).to_dict(computed=False)
)
self.assert_calls()
def test_create_server_config_drive(self):
"""
Test that config_drive gets passed in properly
"""
fake_server = fakes.make_fake_server('1234', '', 'BUILD')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': fake_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'config_drive': True,
u'max_count': 1,
u'min_count': 1,
u'name': u'server-name',
'networks': 'auto'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': fake_server}),
])
self.assertDictEqual(
server.Server(**fake_server).to_dict(computed=False),
self.cloud.create_server(
name='server-name',
image=dict(id='image-id'),
flavor=dict(id='flavor-id'),
config_drive=True).to_dict(computed=False))
self.assert_calls()
def test_create_server_config_drive_none(self):
"""
Test that config_drive gets not passed in properly
"""
fake_server = fakes.make_fake_server('1234', '', 'BUILD')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': fake_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'name': u'server-name',
'networks': 'auto'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': fake_server}),
])
self.assertEqual(
server.Server(**fake_server).to_dict(computed=False),
self.cloud.create_server(
name='server-name',
image=dict(id='image-id'),
flavor=dict(id='flavor-id'),
config_drive=None).to_dict(computed=False)
)
self.assert_calls()
def test_create_server_with_admin_pass_no_wait(self):
"""
Test that a server with an admin_pass passed returns the password
"""
admin_pass = <PASSWORD>.getUniqueString('password')
fake_server = fakes.make_fake_server('1234', '', 'BUILD')
fake_create_server = fakes.make_fake_server(
'1234', '', 'BUILD', admin_pass=admin_pass)
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': fake_create_server},
validate=dict(
json={'server': {
u'adminPass': <PASSWORD>,
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'name': u'server-name',
'networks': 'auto'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': fake_server}),
])
self.assertEqual(
self.cloud._normalize_server(fake_create_server)['adminPass'],
self.cloud.create_server(
name='server-name', image=dict(id='image-id'),
flavor=dict(id='flavor-id'),
admin_pass=admin_pass)['admin_password'])
self.assert_calls()
@mock.patch.object(connection.Connection, "wait_for_server")
def test_create_server_with_admin_pass_wait(self, mock_wait):
"""
Test that a server with an admin_pass passed returns the password
"""
admin_pass = <PASSWORD>('password')
fake_server = fakes.make_fake_server('1234', '', 'BUILD')
fake_server_with_pass = fakes.make_fake_server(
'1234', '', 'BUILD', admin_pass=<PASSWORD>_pass)
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': fake_server_with_pass},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'adminPass': <PASSWORD>,
u'name': u'server-name',
'networks': 'auto'}})),
])
# The wait returns non-password server
mock_wait.return_value = self.cloud._normalize_server(fake_server)
server = self.cloud.create_server(
name='server-name', image=dict(id='image-id'),
flavor=dict(id='flavor-id'),
admin_pass=<PASSWORD>, wait=True)
# Assert that we did wait
self.assertTrue(mock_wait.called)
# Even with the wait, we should still get back a passworded server
self.assertEqual(
server['admin_password'],
self.cloud._normalize_server(fake_server_with_pass)['adminPass']
)
self.assert_calls()
def test_create_server_user_data_base64(self):
"""
Test that a server passed user-data sends it base64 encoded.
"""
user_data = self.getUniqueString('user_data')
user_data_b64 = base64.b64encode(
user_data.encode('utf-8')).decode('utf-8')
fake_server = fakes.make_fake_server('1234', '', 'BUILD')
fake_server['user_data'] = user_data
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': fake_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'user_data': user_data_b64,
u'name': u'server-name',
'networks': 'auto'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': fake_server}),
])
self.cloud.create_server(
name='server-name', image=dict(id='image-id'),
flavor=dict(id='flavor-id'),
userdata=user_data, wait=False)
self.assert_calls()
@mock.patch.object(connection.Connection, "get_active_server")
@mock.patch.object(connection.Connection, "get_server")
def test_wait_for_server(self, mock_get_server, mock_get_active_server):
"""
Test that waiting for a server returns the server instance when
its status changes to "ACTIVE".
"""
# TODO(mordred) Rework this to not mock methods
building_server = {'id': 'fake_server_id', 'status': 'BUILDING'}
active_server = {'id': 'fake_server_id', 'status': 'ACTIVE'}
mock_get_server.side_effect = iter([building_server, active_server])
mock_get_active_server.side_effect = iter([
building_server, active_server])
server = self.cloud.wait_for_server(building_server)
self.assertEqual(2, mock_get_server.call_count)
mock_get_server.assert_has_calls([
mock.call(building_server['id']),
mock.call(active_server['id']),
])
self.assertEqual(2, mock_get_active_server.call_count)
mock_get_active_server.assert_has_calls([
mock.call(server=building_server, reuse=True, auto_ip=True,
ips=None, ip_pool=None, wait=True, timeout=mock.ANY,
nat_destination=None),
mock.call(server=active_server, reuse=True, auto_ip=True,
ips=None, ip_pool=None, wait=True, timeout=mock.ANY,
nat_destination=None),
])
self.assertEqual('ACTIVE', server['status'])
@mock.patch.object(connection.Connection, 'wait_for_server')
def test_create_server_wait(self, mock_wait):
"""
Test that create_server with a wait actually does the wait.
"""
# TODO(mordred) Make this a full proper response
fake_server = fakes.make_fake_server('1234', '', 'BUILD')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': fake_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'name': u'server-name',
'networks': 'auto'}})),
])
self.cloud.create_server(
'server-name',
dict(id='image-id'), dict(id='flavor-id'), wait=True),
# This is a pretty dirty hack to ensure we in principle use object with
# expected properties
srv = server.Server.existing(
connection=self.cloud,
min_count=1, max_count=1,
networks='auto',
imageRef='image-id',
flavorRef='flavor-id',
**fake_server)
mock_wait.assert_called_once_with(
srv,
auto_ip=True, ips=None,
ip_pool=None, reuse=True, timeout=180,
nat_destination=None,
)
self.assert_calls()
@mock.patch.object(connection.Connection, 'add_ips_to_server')
def test_create_server_no_addresses(
self, mock_add_ips_to_server):
"""
Test that create_server with a wait throws an exception if the
server doesn't have addresses.
"""
build_server = fakes.make_fake_server('1234', '', 'BUILD')
fake_server = fakes.make_fake_server(
'1234', '', 'ACTIVE', addresses={})
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'name': u'server-name',
'networks': 'auto'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [build_server]}),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', 'detail']),
json={'servers': [fake_server]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports'],
qs_elements=['device_id=1234']),
json={'ports': []}),
dict(method='DELETE',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234'])),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
status_code=404),
])
mock_add_ips_to_server.return_value = fake_server
self.cloud._SERVER_AGE = 0
self.assertRaises(
exc.OpenStackCloudException, self.cloud.create_server,
'server-name', {'id': 'image-id'}, {'id': 'flavor-id'},
wait=True)
self.assert_calls()
def test_create_server_network_with_no_nics(self):
"""
Verify that if 'network' is supplied, and 'nics' is not, that we
attempt to get the network for the server.
"""
build_server = fakes.make_fake_server('1234', '', 'BUILD')
network = {
'id': 'network-id',
'name': 'network-name'
}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', 'network-name']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks'],
qs_elements=['name=network-name']),
json={'networks': [network]}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'networks': [{u'uuid': u'network-id'}],
u'name': u'server-name'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': build_server}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': [network]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'subnets']),
json={'subnets': []}),
])
self.cloud.create_server(
'server-name',
dict(id='image-id'), dict(id='flavor-id'), network='network-name')
self.assert_calls()
def test_create_server_network_with_empty_nics(self):
"""
Verify that if 'network' is supplied, along with an empty 'nics' list,
it's treated the same as if 'nics' were not included.
"""
network = {
'id': 'network-id',
'name': 'network-name'
}
build_server = fakes.make_fake_server('1234', '', 'BUILD')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks', 'network-name']),
status_code=404),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks'],
qs_elements=['name=network-name']),
json={'networks': [network]}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'networks': [{u'uuid': u'network-id'}],
u'name': u'server-name'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': build_server}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': [network]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'subnets']),
json={'subnets': []}),
])
self.cloud.create_server(
'server-name', dict(id='image-id'), dict(id='flavor-id'),
network='network-name', nics=[])
self.assert_calls()
def test_create_server_network_fixed_ip(self):
"""
Verify that if 'fixed_ip' is supplied in nics, we pass it to networks
appropriately.
"""
network = {
'id': 'network-id',
'name': 'network-name'
}
fixed_ip = '10.0.0.1'
build_server = fakes.make_fake_server('1234', '', 'BUILD')
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'networks': [{'fixed_ip': fixed_ip}],
u'name': u'server-name'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': build_server}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': [network]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'subnets']),
json={'subnets': []}),
])
self.cloud.create_server(
'server-name', dict(id='image-id'), dict(id='flavor-id'),
nics=[{'fixed_ip': fixed_ip}])
self.assert_calls()
def test_create_server_network_v4_fixed_ip(self):
"""
Verify that if 'v4-fixed-ip' is supplied in nics, we pass it to
networks appropriately.
"""
network = {
'id': 'network-id',
'name': 'network-name'
}
fixed_ip = '10.0.0.1'
build_server = fakes.make_fake_server('1234', '', 'BUILD')
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'networks': [{'fixed_ip': fixed_ip}],
u'name': u'server-name'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': build_server}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': [network]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'subnets']),
json={'subnets': []}),
])
self.cloud.create_server(
'server-name', dict(id='image-id'), dict(id='flavor-id'),
nics=[{'fixed_ip': fixed_ip}])
self.assert_calls()
def test_create_server_network_v6_fixed_ip(self):
"""
Verify that if 'v6-fixed-ip' is supplied in nics, we pass it to
networks appropriately.
"""
network = {
'id': 'network-id',
'name': 'network-name'
}
# Note - it doesn't actually have to be a v6 address - it's just
# an alias.
fixed_ip = 'fe80::28da:5fff:fe57:13ed'
build_server = fakes.make_fake_server('1234', '', 'BUILD')
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': u'flavor-id',
u'imageRef': u'image-id',
u'max_count': 1,
u'min_count': 1,
u'networks': [{'fixed_ip': fixed_ip}],
u'name': u'server-name'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': build_server}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': [network]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'subnets']),
json={'subnets': []}),
])
self.cloud.create_server(
'server-name', dict(id='image-id'), dict(id='flavor-id'),
nics=[{'fixed_ip': fixed_ip}])
self.assert_calls()
def test_create_server_network_fixed_ip_conflicts(self):
"""
Verify that if 'fixed_ip' and 'v4-fixed-ip' are both supplied in nics,
we throw an exception.
"""
# Note - it doesn't actually have to be a v6 address - it's just
# an alias.
self.use_nothing()
fixed_ip = '10.0.0.1'
self.assertRaises(
exc.OpenStackCloudException, self.cloud.create_server,
'server-name', dict(id='image-id'), dict(id='flavor-id'),
nics=[{
'fixed_ip': fixed_ip,
'v4-fixed-ip': fixed_ip
}])
self.assert_calls()
def test_create_server_get_flavor_image(self):
self.use_glance()
image_id = str(uuid.uuid4())
fake_image_dict = fakes.make_fake_image(image_id=image_id)
fake_image_search_return = {'images': [fake_image_dict]}
build_server = fakes.make_fake_server('1234', '', 'BUILD')
active_server = fakes.make_fake_server('1234', '', 'BUILD')
self.register_uris([
dict(method='GET',
uri='https://image.example.com/v2/images',
json=fake_image_search_return),
self.get_nova_discovery_mock_dict(),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['flavors', 'vanilla'],
qs_elements=[]),
json=fakes.FAKE_FLAVOR),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': fakes.FLAVOR_ID,
u'imageRef': image_id,
u'max_count': 1,
u'min_count': 1,
u'networks': [{u'uuid': u'some-network'}],
u'name': u'server-name'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': active_server}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
])
self.cloud.create_server(
'server-name', image_id, 'vanilla',
nics=[{'net-id': 'some-network'}], wait=False)
self.assert_calls()
def test_create_server_nics_port_id(self):
'''Verify port-id in nics input turns into port in REST.'''
build_server = fakes.make_fake_server('1234', '', 'BUILD')
active_server = fakes.make_fake_server('1234', '', 'BUILD')
image_id = uuid.uuid4().hex
port_id = uuid.uuid4().hex
self.register_uris([
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': fakes.FLAVOR_ID,
u'imageRef': image_id,
u'max_count': 1,
u'min_count': 1,
u'networks': [{u'port': port_id}],
u'name': u'server-name'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': active_server}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
])
self.cloud.create_server(
'server-name', dict(id=image_id), dict(id=fakes.FLAVOR_ID),
nics=[{'port-id': port_id}], wait=False)
self.assert_calls()
def test_create_boot_attach_volume(self):
build_server = fakes.make_fake_server('1234', '', 'BUILD')
active_server = fakes.make_fake_server('1234', '', 'BUILD')
vol = {'id': 'volume001', 'status': 'available',
'name': '', 'attachments': []}
volume = meta.obj_to_munch(fakes.FakeVolume(**vol))
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': 'flavor-id',
u'imageRef': 'image-id',
u'max_count': 1,
u'min_count': 1,
u'block_device_mapping_v2': [
{
u'boot_index': 0,
u'delete_on_termination': True,
u'destination_type': u'local',
u'source_type': u'image',
u'uuid': u'image-id'
},
{
u'boot_index': u'-1',
u'delete_on_termination': False,
u'destination_type': u'volume',
u'source_type': u'volume',
u'uuid': u'volume001'
}
],
u'name': u'server-name',
'networks': 'auto'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': active_server}),
])
self.cloud.create_server(
name='server-name',
image=dict(id='image-id'),
flavor=dict(id='flavor-id'),
boot_from_volume=False,
volumes=[volume],
wait=False)
self.assert_calls()
def test_create_boot_from_volume_image_terminate(self):
build_server = fakes.make_fake_server('1234', '', 'BUILD')
active_server = fakes.make_fake_server('1234', '', 'BUILD')
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks']),
json={'networks': []}),
self.get_nova_discovery_mock_dict(),
dict(method='POST',
uri=self.get_mock_url(
'compute', 'public', append=['servers']),
json={'server': build_server},
validate=dict(
json={'server': {
u'flavorRef': 'flavor-id',
u'imageRef': '',
u'max_count': 1,
u'min_count': 1,
u'block_device_mapping_v2': [{
u'boot_index': u'0',
u'delete_on_termination': True,
u'destination_type': u'volume',
u'source_type': u'image',
u'uuid': u'image-id',
u'volume_size': u'1'}],
u'name': u'server-name',
'networks': 'auto'}})),
dict(method='GET',
uri=self.get_mock_url(
'compute', 'public', append=['servers', '1234']),
json={'server': active_server}),
])
self.cloud.create_server(
name='server-name',
image=dict(id='image-id'),
flavor=dict(id='flavor-id'),
boot_from_volume=True,
terminate_volume=True,
volume_size=1,
wait=False)
self.assert_calls()
|
""" PagerMaid Plugin Coin by Pentacene """
# ______ _
# | ___ \ | |
# | |_/ /__ _ __ | |_ __ _ ___ ___ _ __ ___
# | __/ _ \ '_ \| __/ _` |/ __/ _ \ '_ \ / _ \
# | | | __/ | | | || (_| | (_| __/ | | | __/
# \_| \___|_| |_|\__\__,_|\___\___|_| |_|\___|
#
from asyncio import sleep
from sys import executable
import urllib.request
from telethon.tl.custom.message import Message
from pagermaid.listener import listener
from pagermaid.utils import execute, alias_command, pip_install
pip_install("python-binance", alias="binance")
pip_install("xmltodict")
from binance.client import Client
import xmltodict
API = "https://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml"
CURRENCIES = []
DATA = {}
BINANCE_API_KEY = '<KEY>'
BINANCE_API_SECRET = '<KEY>'
def init() -> None:
""" INIT """
with urllib.request.urlopen(API) as response:
result = response.read()
try:
global CURRENCIES, DATA
rate_data = xmltodict.parse(result)
rate_data = rate_data['gesmes:Envelope']['Cube']['Cube']['Cube']
for i in rate_data:
CURRENCIES.append(i['@currency'])
DATA[i['@currency']] = float(i['@rate'])
CURRENCIES.sort()
except Exception as e:
raise e
@listener(is_plugin=True, outgoing=True, command=alias_command("bc"),
description="coins",
parameters="<num> <coin1> <coin2>")
async def coin(context: Message) -> None:
""" coin change """
init()
action = context.arguments.split()
binanceclient = Client(BINANCE_API_KEY, BINANCE_API_SECRET)
if len(action) < 3:
await context.edit('输入错误.\n-bc 数量 币种1 币种2')
return
else:
prices = binanceclient.get_all_tickers()
try:
number = float(action[0])
except ValueError:
await context.edit('输入错误.\n-bc 数量 币种1 币种2')
return
_from = action[1].upper().strip()
_to = action[2].upper().strip()
front_text = ''
text = ''
rear_text = ''
price = 0.0
_to_USD_rate = 0.0
if (CURRENCIES.count(_from) != 0) and (CURRENCIES.count(_to) != 0):
# both are real currency
text = f'{action[0]} {action[1].upper().strip()} = {float(action[0])*DATA[_to]/DATA[_from]:.2f} {action[2].upper().strip()}'
else:
if CURRENCIES.count(_from) != 0:
# from virtual currency to real currency
number = number * DATA["USD"] / DATA[_from]
_from = 'USDT'
front_text = f'{action[0]} {action[1]} = \n'
if CURRENCIES.count(_to) != 0:
# from real currency to virtual currency
_to_USD_rate = DATA[_to] / DATA["USD"]
_to = 'USDT'
for _a in prices:
if _a['symbol'] == str(f'{_from}{_to}'):
price = _a['price']
if _to == 'USDT':
if action[2].upper().strip() == 'USDT':
rear_text = f'\n= {number * float(price) * DATA["CNY"]/DATA["USD"]:.2f} CNY'
else:
rear_text = f'\n= {number * float(price) * _to_USD_rate:.2f} {action[2].upper().strip()}'
if float(price) < 1:
text = f'{number} {_from} = {number * float(price):.8f} {_to}'
else:
text = f'{number} {_from} = {number * float(price):.2f} {_to}'
break
elif _a['symbol'] == str(f'{_to}{_from}'):
price = 1 / float(_a['price'])
text = f'{number} {_from} = {number * float(price):.8f} {_to}'
break
else:
price = None
if price is None:
text = f'Cannot find coinpair {action[1].upper().strip()}{action[2].upper().strip()} or {action[2].upper().strip()}{action[1].upper().strip()}'
await context.edit(f'{front_text}{text}{rear_text}')
|
import warnings
from pathlib import Path
import astropy.units as u
import matplotlib.pyplot as plt
import pandas as pd
from astropy.coordinates import SkyCoord
from sunpy.map import Map, MapSequence
from sunpy.net import Fido
from sunpy.net import attrs as a
from sunpy.net import hek
from sunpy.util import SunpyUserWarning
__all__ = ['Sunspotter']
path = Path(__file__).parent.parent.parent / "data/all_clear"
class Sunspotter:
def __init__(self, *, timesfits: str = path / "lookup_timesfits.csv", get_all_timesfits_columns: bool = True,
properties: str = path / "lookup_properties.csv", get_all_properties_columns: bool = True,
timesfits_columns: list = ['#id'], properties_columns: list = ['#id'],
classifications=None, classifications_columns=None,
delimiter: str = ';', datetime_fmt: str = '%Y-%m-%d %H:%M:%S'):
"""
Parameters
----------
timesfits : str
filepath to `lookup_timesfits.csv`
by default points to the Timesfits file from All Clear Dataset
stored in `~pythia/data/all_clear`
get_all_timesfits_columns : bool, optional
Load all columns from the Timesfits CSV file, by default True
properties : str
filepath to `lookup_properties.csv`
by default points to the Properties file from All Clear Dataset
stored in `~pythia/data/all_clear`
get_all_properties_columns : bool, optional
Load all columns from the Properties CSV file, by default True
timesfits_columns : list, optional
Columns required from lookup_timesfits.csv, by default ['#id']
Will be overridden if `get_all_timesfits_columns` is True.
properties_columns : list, optional
Columns required from lookup_properties.csv, by default ['#id']
Will be overridden if `get_all_properties_columns` is True.
classifications : str, optional
filepath to `classifications.csv`
Default behaviour is not to load the file, hence by default None
classifications_columns : list, optional
Columns required from `classifications.csv`
Default behaviour is not to load the file, hence by default None
delimiter : str, optional
Delimiter for the CSV files, by default ';'
datetime_fmt : str, optional
Format for interpreting the observation datetimes in the CSV files,
by default '%Y-%m-%d %H:%M:%S'
"""
self.timesfits = timesfits
self.get_all_timesfits_columns = get_all_timesfits_columns
self.properties = properties
self.get_all_properties_columns = get_all_properties_columns
self.timesfits_columns = set(timesfits_columns)
self.properties_columns = set(properties_columns)
self.classifications = classifications
self.classifications_columns = classifications_columns
self.datetime_fmt = datetime_fmt
self._get_data(delimiter)
def _get_data(self, delimiter: str):
# Reading the Timesfits file
try:
if self.get_all_timesfits_columns:
self.timesfits = pd.read_csv(self.timesfits,
delimiter=delimiter)
else:
self.timesfits = pd.read_csv(self.timesfits,
delimiter=delimiter,
usecols=self.timesfits_columns)
except ValueError:
raise SunpyUserWarning("Sunspotter Object cannot be created."
" Either the Timesfits columns do not match, or the file is corrupted")
if not self.timesfits_columns.issubset(self.timesfits.columns):
missing_columns = self.timesfits_columns - self.timesfits_columns.intersection(self.timesfits.columns)
missing_columns = ", ".join(missing_columns)
raise SunpyUserWarning("Sunspotter Object cannot be created."
" The Timesfits CSV is missing the following columns: " +
missing_columns)
if 'obs_date' in self.timesfits.columns:
self.timesfits.obs_date = pd.to_datetime(self.timesfits.obs_date,
format=self.datetime_fmt)
self.timesfits.set_index("obs_date", inplace=True)
# Reading the Properties file
try:
if self.get_all_properties_columns:
self.properties = pd.read_csv(self.properties,
delimiter=delimiter)
else:
self.properties = pd.read_csv(self.properties,
delimiter=delimiter,
usecols=self.properties_columns)
except ValueError:
raise SunpyUserWarning("Sunspotter Object cannot be created."
" Either the Properties columns do not match, or the file is corrupted")
if not self.properties_columns.issubset(self.properties.columns):
missing_columns = self.properties_columns - self.properties_columns.intersection(self.properties.columns)
missing_columns = ", ".join(missing_columns)
raise SunpyUserWarning("Sunspotter Object cannot be created."
" The Properties CSV is missing the following columns: " +
missing_columns)
if '#id' in self.properties.columns:
self.properties.set_index("#id", inplace=True)
# Reading the Classification file
if self.classifications is not None:
if self.classifications_columns is None:
raise SunpyUserWarning("Classifications columns cannot be None"
" when classifications.csv is to be loaded.")
try:
self.classifications = pd.read_csv(self.classifications,
delimiter=delimiter,
usecols=self.classifications_columns)
except ValueError:
raise SunpyUserWarning("Sunspotter Object cannot be created."
" Either the Classifications columns do not match, or the file is corrupted")
self.classifications_columns = set(self.classifications_columns)
if not self.classifications_columns.issubset(self.classifications.columns):
missing_columns = self.classifications_columns - self.classifications_columns.intersection(self.classifications.columns)
missing_columns = ", ".join(missing_columns)
raise SunpyUserWarning("Sunspotter Object cannot be created."
" The Classifications CSV is missing the following columns: " +
missing_columns)
def get_timesfits_id(self, obsdate: str):
"""
Returns the Sunspotter observation id for the
first observation a given observation date and time.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
id : int
The Sunspotter observation id for the first observation
for the given observation date and time.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_timesfits_id(obsdate)
1
"""
obsdate = self.get_nearest_observation(obsdate)
return self.timesfits.loc[obsdate].get(key='#id').iloc[0]
def get_all_ids_for_observation(self, obsdate: str):
"""
Returns all the Sunspotter observation ids for the
given observation date and time.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
ids : pandas.Series
All the Sunspotter observation ids for the
given observation date and time.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_all_ids_for_observation(obsdate)
array([1, 2, 3, 4, 5])
"""
obsdate = self.get_nearest_observation(obsdate)
return self.timesfits.loc[obsdate].get(key='#id').values
def get_properties(self, idx: int):
"""
Returns the observed properties for a given Sunspotter id.
Parameters
----------
idx : int
The Sunspotter observation id for a particualar observation.
Returns
-------
properties : pandas.Series
The observed properties for the given Sunspotter id.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> idx = 0
>>> sunspotter.get_properties(idx)
filename 530be1183ae74079c300000d.jpg
zooniverse_id ASZ000090y
angle 37.8021
area 34400
areafrac 0.12
areathesh 2890
bipolesep 3.72
c1flr24hr 0
id_filename 1
flux 2.18e+22
fluxfrac 0.01
hale beta
hcpos_x 452.27
hcpos_y 443.93
m1flr12hr 0
m5flr12hr 0
n_nar 1
noaa 8809
pxpos_x 229.193
pxpos_y 166.877
sszn 1
zurich bxo
Name: 1, dtype: object
"""
return self.properties.loc[idx]
def get_properties_from_obsdate(self, obsdate: str):
"""
Returns the observed properties for a given observation time and date.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
properties : pandas.DataFrame
The observed properties for the given observation time and date.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_properties_from_obsdate(obsdate)
filename 530be1183ae74079c300000d.jpg
zooniverse_id ASZ000090y
angle 37.8021
area 34400
areafrac 0.12
areathesh 2890
bipolesep 3.72
c1flr24hr 0
id_filename 1
flux 2.18e+22
fluxfrac 0.01
hale beta
hcpos_x 452.27
hcpos_y 443.93
m1flr12hr 0
m5flr12hr 0
n_nar 1
noaa 8809
pxpos_x 229.193
pxpos_y 166.877
sszn 1
zurich bxo
Name: 1, dtype: object
[1 rows x 23 columns]
"""
return self.get_properties(self.get_timesfits_id(obsdate))
def number_of_observations(self, obsdate: str):
"""
Returns number of Sunspotter observations for the
given observation date and time.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
number_of_observations : int
Number of Sunspotter observations
for the given observation date and time.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.number_of_observations(obsdate)
5
"""
return self.timesfits.loc[obsdate].shape[0]
def get_nearest_observation(self, obsdate: str):
"""
Returns the observation time and date in the Timesfits that is
closest to the given observation time and date.
Parameters
----------
obsdate : str
The observation time and date.
Returns
-------
closest_observation : str
Observation time and date in the Timesfits that is
closest to the given observation time and date.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 22:47:02'
>>> sunspotter.get_nearest_observation(obsdate)
'2000-01-01 12:47:02'
"""
unique_dates = self.timesfits.index.unique()
index = unique_dates.get_loc(obsdate, method='nearest')
nearest_date = str(unique_dates[index])
if nearest_date != str(obsdate): # casting to str because obsdate can be a pandas.Timestamp
warnings.warn(SunpyUserWarning("The given observation date isn't in the Timesfits file.\n"
"Using the observation nearest to the given obsdate instead."))
return nearest_date
def get_all_observations_ids_in_range(self, start: str, end: str):
"""
Returns all the observations ids in the given timerange.
The nearest start and end time in the Timesfits are used
to form the time range.
Parameters
----------
start : str
The starting observation time and date.
end : str
The ending observation time and date.
Returns
-------
ids : numpy.array
All the Sunspotter observation ids for the
given observation time range.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> start = '2000-01-02 12:51:02'
>>> end = '2000-01-03 12:51:02'
>>> sunspotter.get_all_observations_ids_in_range(start, end)
array([ 6, 7, 8, 9, 10, 11, 12, 13])
"""
start = self.get_nearest_observation(start)
end = self.get_nearest_observation(end)
return self.timesfits[start:end]['#id'].values
def get_fits_filenames_from_range(self, start: str, end: str):
"""
Returns all the FITS filenames for observations in the given timerange.
The nearest start and end time in the Timesfits are used to form the
time range.
Parameters
----------
start : str
The starting observation time and date.
end : str
The ending observation time and date.
Returns
-------
filenames : pandas.Series
all the FITS filenames for observations in the given timerange.
Notes
-----
If start time is equal to end time, all the filenames corresponding to
that particular observation will be returned.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> start = '2000-01-02 12:51:02'
>>> end = '2000-01-03 12:51:02'
>>> sunspotter.get_fits_filenames_from_range(start, end)
obs_date
2000-01-02 12:51:02 20000102_1251_mdiB_1_8810.fits
2000-01-02 12:51:02 20000102_1251_mdiB_1_8813.fits
2000-01-02 12:51:02 20000102_1251_mdiB_1_8814.fits
2000-01-02 12:51:02 20000102_1251_mdiB_1_8815.fits
2000-01-03 12:51:02 20000103_1251_mdiB_1_8810.fits
2000-01-03 12:51:02 20000103_1251_mdiB_1_8813.fits
2000-01-03 12:51:02 20000103_1251_mdiB_1_8814.fits
2000-01-03 12:51:02 20000103_1251_mdiB_1_8815.fits
Name: filename, dtype: object
"""
ids_in_range = self.get_all_observations_ids_in_range(start, end)
return self.timesfits[self.timesfits['#id'].isin(ids_in_range)]['filename']
def get_mdi_fulldisk_fits_file(self, obsdate: str, filepath: str = str(path) + "/fulldisk/"):
"""
Downloads the MDI Fulldisk FITS file corresponding to a particular observation.
Parameters
----------
obsdate : str
The observation time and date.
filepath : mdi_mapsequence : sunpy.map.MapSequence,
By default downloaded files are stored in `~pythia/data/fulldisk`
Returns
-------
filepath : str
Filepath to the downloaded FITS file.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_mdi_fulldisk_fits_file(obsdate)
'~pythia/data/all_clear/fulldisk/fd_m_96m_01d_2556_0008.fits'
"""
# TODO: Figure out a way to test the downloaded file.
obsdate = self.get_nearest_observation(obsdate)
search_results = Fido.search(a.Time(obsdate, obsdate), a.Instrument.mdi)
downloaded_file = Fido.fetch(search_results, path=filepath)
return downloaded_file[0]
def get_mdi_fulldisk_map(self, obsdate: str, filepath: str = str(path) + "/fulldisk/"):
"""
Downloads the MDI Fulldisk FITS file corresponding to a particular observation.
And returns a SunPy Map corresponding to the downloaded file.
Parameters
----------
obsdate : str
The observation time and date.
filepath : mdi_mapsequence : sunpy.map.MapSequence,
By default downloaded files are stored in `~pythia/data/fulldisk`
Returns
-------
filepath : str
Filepath to the downloaded FITS file.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_mdi_fulldisk_map(obsdate)
<sunpy.map.sources.soho.MDIMap object at 0x7f6ca7aedc88>
SunPy Map
---------
Observatory: SOHO
Instrument: MDI
Detector: MDI
Measurement: magnetogram
Wavelength: 0.0 Angstrom
Observation Date: 2000-01-01 12:47:02
Exposure Time: 0.000000 s
Dimension: [1024. 1024.] pix
Coordinate System: helioprojective
Scale: [1.98083342 1.98083342] arcsec / pix
Reference Pixel: [511.36929067 511.76453018] pix
Reference Coord: [0. 0.] arcsec
array([[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
...,
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan],
[nan, nan, nan, ..., nan, nan, nan]], dtype=float32)
"""
# TODO: Figure out the file naming convention to check if the file has been downloaded already.
# TODO: Test this!
obsdate = self.get_nearest_observation(obsdate)
search_results = Fido.search(a.Time(obsdate, obsdate), a.Instrument.mdi)
downloaded_file = Fido.fetch(search_results, path=filepath)
return Map(downloaded_file[0])
def get_available_obsdatetime_range(self, start: str, end: str):
"""
Returns all the observations datetimes in the given timerange.
The nearest start and end time in the Timesfits are used
to form the time range.
Parameters
----------
start : str
The starting observation time and date.
end : str
The ending observation time and date.
Returns
-------
obs_list : pandas.DatetimeIndex
All the Sunspotter observation datetimes for the
given observation time range.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> start = '2000-01-01 12:47:02'
>>> end = '2000-01-15 12:47:02'
>>> sunspotter.get_available_obsdatetime_range(start, end)
DatetimeIndex(['2000-01-01 12:47:02', '2000-01-02 12:51:02',
'2000-01-03 12:51:02', '2000-01-04 12:51:02',
'2000-01-05 12:51:02', '2000-01-06 12:51:02',
'2000-01-11 12:51:02', '2000-01-12 12:51:02',
'2000-01-13 12:51:02', '2000-01-14 12:47:02',
'2000-01-15 12:47:02'],
dtype='datetime64[ns]', name='obs_date', freq=None)
"""
start = self.get_nearest_observation(start)
end = self.get_nearest_observation(end)
return self.timesfits[start: end].index.unique()
def get_mdi_map_sequence(self, start: str, end: str, filepath: str = str(path) + "/fulldisk/"):
"""
Get MDI Map Sequence for observations from given range.
Parameters
----------
start : str
The starting observation time and date.
end : str
The ending observation time and date.
filepath : str, optional
[description], by default str(path)+"/fulldisk/"
Returns
-------
mdi_mapsequence : sunpy.map.MapSequence
Map Sequece of the MDI maps in the given range.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> start = '2000-01-01 12:47:02'
>>> end = '2000-01-05 12:51:02'
>>> sunspotter.get_mdi_map_sequence(start, end)
<sunpy.map.mapsequence.MapSequence object at 0x7f2c7b85cda0>
MapSequence of 5 elements, with maps from MDIMap
"""
# TODO: Test this!
obsrange = self.get_available_obsdatetime_range(start, end)
maplist = []
for obsdate in obsrange:
maplist.append(self.get_mdi_fulldisk_map(obsdate, filepath))
return MapSequence(maplist)
def get_observations_from_hek(self, obsdate: str, event_type: str = 'AR',
observatory: str = 'SOHO'):
"""
Gets the observation metadata from HEK for the given obsdate.
By default gets Active Region data recieved from SOHO.
Parameters
----------
obsdate : str
The observation time and date.
event_type : str, optional
The type of Event, by default 'AR'
observatory : str, optional
Observatory that observed the Event, by default 'SOHO'
Returns
-------
results = sunpy.hek.HEKTable
The table of results recieved from HEK.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.get_observations_from_hek(obsdate)
<HEKTable length=5>
SOL_standard absnetcurrenthelicity ... unsignedvertcurrent
str30 object ... object
------------------------------ --------------------- ... -------------------
SOL2000-01-01T09:35:02L054C117 None ... None
SOL2000-01-01T09:35:02L058C100 None ... None
SOL2000-01-01T09:35:02L333C106 None ... None
SOL2000-01-01T09:35:02L033C066 None ... None
SOL2000-01-01T09:35:02L012C054 None ... None
"""
obsdate = self.get_nearest_observation(obsdate)
client = hek.HEKClient()
result = client.search(hek.attrs.Time(obsdate, obsdate), hek.attrs.EventType(event_type))
obsdate = "T".join(str(obsdate).split())
result = result[result['obs_observatory'] == 'SOHO']
result = result[result['event_starttime'] <= obsdate]
result = result[result['event_endtime'] > obsdate]
return result
def plot_observations(self, obsdate: str, mdi_map: Map = None):
"""
Plots the Active Regions for a given observation on the
MDI map corresponding to that observation.
Parameters
----------
obsdate : str
The observation time and date.
mdi_map : Map, optional
The MDI map corresponding to the given observation,
If None, the Map will be downloaded first.
By default None.
Examples
--------
>>> from pythia.seo import Sunspotter
>>> sunspotter = Sunspotter()
>>> obsdate = '2000-01-01 12:47:02'
>>> sunspotter.plot_observations(obsdate)
"""
obsdate = self.get_nearest_observation(obsdate)
if mdi_map is None:
mdi_map = self.get_mdi_fulldisk_map(obsdate)
hek_result = self.get_observations_from_hek(obsdate)
bottom_left_x = hek_result['boundbox_c1ll']
bottom_left_y = hek_result['boundbox_c2ll']
top_right_x = hek_result['boundbox_c1ur']
top_right_y = hek_result['boundbox_c2ur']
number_of_observations = len(hek_result)
bottom_left_coords = SkyCoord([(bottom_left_x[i], bottom_left_y[i]) * u.arcsec
for i in range(number_of_observations)],
frame=mdi_map.coordinate_frame)
top_right_coords = SkyCoord([(top_right_x[i], top_right_y[i]) * u.arcsec
for i in range(number_of_observations)],
frame=mdi_map.coordinate_frame)
fig = plt.figure(figsize=(12, 10), dpi=100)
mdi_map.plot()
for i in range(number_of_observations):
mdi_map.draw_rectangle(bottom_left_coords[i],
top_right=top_right_coords[i],
color='b', label="Active Regions")
hek_legend, = plt.plot([], color='b', label="Active Regions")
plt.legend(handles=[hek_legend])
plt.show()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
+-------------------------------------------------
@Author: cc
@Contact: <EMAIL>
@Site: http://www.xjh.com
@Project: sobookscrawler
@File: book_model.py
@Version:
@Time: 2019-06-06 15:22
@Description: TO-DO
+-------------------------------------------------
@Change Activity:
1. Created at 2019-06-06 15:22
2. TO-DO
+-------------------------------------------------
'''
__author__ = 'cc'
from mongoengine import *
from entities.base_model import BaseModel
class BookModel(BaseModel):
meta = {
'collection': 'sl_book_info',
}
book_id = IntField(min_value=1)
alt = StringField()
alt_title = StringField()
authors = ListField(StringField())
author_intro = StringField()
binding = StringField()
catalog = StringField()
cips = ListField(StringField())
image = StringField()
images = DictField()
isbn10 = StringField()
isbn13 = StringField(min_length=13, max_length=13, regex=r'^978\d{10}$')
origin_title = StringField()
pages = IntField(min_value=0)
price = DecimalField(min_value=0.0)
pubdate = DateField()
publisher = StringField()
producer = StringField()
rating = DictField()
subtitle = StringField()
summary = StringField()
tags = ListField(DictField())
title = StringField()
translator = StringField()
url = StringField(regex=r'^https?://.+$', )
series = StringField()
referer = StringField(regex=r'^https?://.+$', )
debug_memo = StringField()
@classmethod
def build_stars_percent(cls, zero=0.0, one=0.0, two=0.0, three=0.0, four=0.0, five=0.0):
return dict({
'0': zero,
'1': one,
'2': two,
'3': three,
'4': four,
'5': five,
})
@classmethod
def build_rating(cls, average=0.0, min=0.0, max=0.0, num_raters=0, star=0.0, percents=None, collections=None):
return dict({
'average': average,
'min': min,
'max': max,
'numRaters': num_raters,
'star': star,
'percents': percents if None is not percents else cls.build_stars_percent(),
'collections': collections if None is not collections else cls.build_collections(),
})
@classmethod
def build_tags(cls, name, title=None, count=0):
return dict({
'name': name,
'title': title if None is not title and title else name,
'count': count,
})
@classmethod
def build_collections(cls, wishes=0, doings=0, collections=0):
return dict({
'wishes': wishes,
'doings': doings,
'collections': collections,
})
@classmethod
def build_images(cls, small=None, medium=None, large=None):
return dict({
'small': small,
'medium': medium,
'large': large,
})
#
#
# class Book(BaseEntity):
# _id = None
# _book_id = None
# _alt = None
# _alt_title = None
# _authors = None
# _author_intro = None
# _binding = None
# _catalog = None
# _cips = None
# _image = None
# _images = None
# # {
# # 'small': None,
# # 'medium': None,
# # 'large': None,
# # }
# _isbn10 = None
# _isbn13 = None
# _origin_title = None
# _pages = 0
# _price = 0.0
# _pubdate = None
# _publisher = None
# _producer = None
# _rating = None
# # {
# # 'average': 0.0,
# # 'max': 0.0,
# # 'min': 0.0,
# # 'numRaters': 0,
# # 'star': 0.0,
# # 'percents': {
# # 0: 0.0, # zero,
# # 1: 0.0, # one,
# # 2: 0.0, # two,
# # 3: 0.0, # three,
# # 4: 0.0, # four,
# # 5: 0.0, # five,
# # },
# # 'collections': {
# # 'wishies': 0,
# # 'doings': 0,
# # 'collections': 0,
# # }
# # }
# _subtitle = None
# _summary = None
# _tags = None
# _title = None
# _translator = None
# _url = None
# _series = None
# _referer = None
#
# _debug_memo = ''
#
# @property
# def id(self):
# return self._id
#
# @id.setter
# def id(self, value):
# self._id = value
#
# @property
# def book_id(self):
# return self._book_id
#
# @book_id.setter
# def book_id(self, value):
# self._book_id = value
#
# @property
# def alt(self):
# return self._alt
#
# @alt.setter
# def alt(self, value):
# self._alt = value
#
# @property
# def alt_title(self):
# return self._alt_title
#
# @alt_title.setter
# def alt_title(self, value):
# self._alt_title = value
#
# @property
# def authors(self):
# return self._authors
#
# @authors.setter
# def authors(self, value):
# self._authors = value
#
# @property
# def author_intro(self):
# return self._author_intro
#
# @author_intro.setter
# def author_intro(self, value):
# self._author_intro = value
#
# @property
# def binding(self):
# return self._binding
#
# @binding.setter
# def binding(self, value):
# self._binding = value
#
# @property
# def catalog(self):
# return self._catalog
#
# @catalog.setter
# def catalog(self, value):
# self._catalog = value
#
# @property
# def cips(self):
# return self._cips
#
# @cips.setter
# def cips(self, value):
# self._cips = value
#
# @property
# def image(self):
# return self._image
#
# @image.setter
# def image(self, value):
# self._image = value
#
# @property
# def images(self):
# return self._images
#
# @images.setter
# def images(self, value):
# self._images = value
#
# @property
# def isbn10(self):
# return self._isbn10
#
# @isbn10.setter
# def isbn10(self, value):
# self._isbn10 = value
#
# @property
# def isbn13(self):
# return self._isbn13
#
# @isbn13.setter
# def isbn13(self, value):
# self._isbn13 = value
#
# @property
# def origin_title(self):
# return self._origin_title
#
# @origin_title.setter
# def origin_title(self, value):
# self._origin_title = value
#
# @property
# def pages(self):
# return self._pages
#
# @pages.setter
# def pages(self, value):
# self._pages = value
#
# @property
# def price(self):
# return self._price
#
# @price.setter
# def price(self, value):
# self._price = value
#
# @property
# def pubdate(self):
# return self._pubdate
#
# @pubdate.setter
# def pubdate(self, value):
# self._pubdate = value
#
# @property
# def publisher(self):
# return self._publisher
#
# @publisher.setter
# def publisher(self, value):
# self._publisher = value
#
# @property
# def producer(self):
# return self._producer
#
# @producer.setter
# def producer(self, value):
# self._producer = value
#
# @property
# def rating(self):
# return self._rating
#
# @rating.setter
# def rating(self, rating):
# self._rating = rating
#
# @property
# def subtitle(self):
# return self._subtitle
#
# @subtitle.setter
# def subtitle(self, value):
# self._subtitle = value
#
# @property
# def summary(self):
# return self._summary
#
# @summary.setter
# def summary(self, value):
# self._summary = value
#
# @property
# def tags(self):
# return self._tags
#
# @tags.setter
# def tags(self, value):
# self._tags = value
#
# @property
# def title(self):
# return self._title
#
# @title.setter
# def title(self, value):
# self._title = value
#
# @property
# def translator(self):
# return self._translator
#
# @translator.setter
# def translator(self, value):
# self._translator = value
#
# @property
# def url(self):
# return self._url
#
# @url.setter
# def url(self, value):
# self._url = value
#
# @property
# def series(self):
# return self._series
#
# @series.setter
# def series(self, value):
# self._series = value
#
# @property
# def debug_memo(self):
# return self._debug_memo
#
# @debug_memo.setter
# def debug_memo(self, value):
# self._debug_memo = value
#
# @property
# def referer(self):
# return self._referer
#
# @referer.setter
# def referer(self, value):
# self._referer = value
#
# @classmethod
# def build_stars_percent(cls, zero=0.0, one=0.0, two=0.0, three=0.0, four=0.0, five=0.0):
# return dict({
# '0': zero,
# '1': one,
# '2': two,
# '3': three,
# '4': four,
# '5': five,
# })
#
# @classmethod
# def build_rating(cls, average=0.0, min=0.0, max=0.0, num_raters=0, star=0.0, percents=None, collections=None):
# return dict({
# 'average': average,
# 'min': min,
# 'max': max,
# 'numRaters': num_raters,
# 'star': star,
# 'percents': percents if None is not percents else cls.build_stars_percent(),
# 'collections': collections if None is not collections else cls.build_collections(),
# })
#
# @classmethod
# def build_tags(cls, name, title=None, count=0):
# return dict({
# 'name': name,
# 'title': title if None is not title and title else name,
# 'count': count,
# })
#
# @classmethod
# def build_collections(cls, wishes=0, doings=0, collections=0):
# return dict({
# 'wishes': wishes,
# 'doings': doings,
# 'collections': collections,
# })
#
# @classmethod
# def build_images(cls, small=None, medium=None, large=None):
# return dict({
# 'small': small,
# 'medium': medium,
# 'large': large,
# })
|
<filename>PYTHON_LESSON/emp.py
#一、类和实例的定义
# class Employee: #定义一个类
# pass
# emp_1 = Employee() #调用这个类
# emp_2 = Employee()
# print(emp_1)
# print(emp_2)
# #创建对象,给对象赋值
# emp_1.first='john'
# emp_1.last='work'
# emp_1.email='<EMAIL>'
# emp_1.pay=10000
# emp_2.first='mike'
# emp_2.last='little'
# emp_2.email='<EMAIL>'
# emp_2.pay=12000
# # 打印这个对象的值
# print(emp_1.first)
# print(emp_2.first)
#每次给对象,每次赋值很麻烦,下面方法,给一个init构造器
###########################################################################################################
# class Employee: #定义一个类
# def __init__(self,first,last,pay):
# self.first = first
# self.last = last
# self.email = first+'@<EMAIL>'
# self.pay = pay
# emp_1 = Employee('john','work',10000) #调用这个类
# emp_2 = Employee('mike','little',12000)
# print(emp_1)
# print(emp_2)
# #每次需要需求,这样每次都要写,不够方便 ,用方法去操作
# print('{} {} {} {}'.format(emp_1.first,emp_1.last,emp_1.email,emp_1.pay))
# print('{} {} {} {}'.format(emp_2.first,emp_2.last,emp_2.email,emp_2.pay))
###########################################################################################################
# class Employee: #定义一个类
# def __init__(self,first,last,pay): #制作了一个构造器
# self.first = first
# self.last = last
# self.email = first+'@<EMAIL>'
# self.pay = pay
# def fullname(self): #定义了一个方法
# return('{} {} '.format(self.first,self.last))
# emp_1 = Employee('john','work',10000) #调用这个类
# emp_2 = Employee('mike','little',12000)
# print(emp_1)
# print(emp_2)
# #打印的时候直接调用这个方法
# print(emp_1.fullname())
###########################################################################################################
# #二、类变量的使用
# class Employee: #定义一个类
# pay_amount = 1.2 #类的成员变量
# num_emps =0
# def __init__(self,first,last,pay): #制作了一个构造器
# self.first = first
# self.last = last
# self.email = first+'<EMAIL>'
# self.pay = pay
# Employee.num_emps+=1
# def fullname(self): #定义了一个方法
# return('{} {} '.format(self.first,self.last))
# def pay_raise(self): #定义一个方法
# self.pay = self.pay * self.pay_amount #使用类里面的成员变量
# print(Employee.num_emps)
# emp_1 = Employee('john','work',10000) #调用这个类
# emp_2 = Employee('mike','little',12000)
# print(Employee.num_emps)
# #外面根据不同要求单独改变费率
# emp_1.pay_amount = 2
# emp_2.pay_amount = 1.4
# emp_1.pay_raise()
# emp_2.pay_raise()
# print(emp_1.pay)
# print(emp_2.pay)
###########################################################################################################
#三、类方法和静态方法
class Employee: #定义一个类
def __init__(self,first,last,pay): #制作了一个构造器
self.first = first
self.last = last
self.email = first+'<EMAIL>'
self.pay = pay
def fullname(self): #定义了一个方法
return('{} {} '.format(self.first,self.last))
def pay_raise(self): #定义一个方法
self.pay = self.pay * self.pay_amount #使用类里面的成员变量
@classmethod #类的静态方法 调用
def set_raise_amount(cls,amount):
cls.pay_amount = amount
# @staticmethod #类的静态方法 调用
emp_1 = Employee('john','work',10000) #调用这个类
emp_2 = Employee('mike','little',12000)
#外面根据不同要求改变费率
Employee.set_raise_amount(1.5)
print(emp_1.pay_amount)
print(emp_2.pay_amount) |
"""
Consolidate Services
Description of all APIs # noqa: E501
The version of the OpenAPI document: version not set
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argocd_python_client.api_client import ApiClient, Endpoint as _Endpoint
from argocd_python_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from argocd_python_client.model.gpgkey_gnu_pg_public_key_create_response import GpgkeyGnuPGPublicKeyCreateResponse
from argocd_python_client.model.runtime_error import RuntimeError
from argocd_python_client.model.v1alpha1_gnu_pg_public_key import V1alpha1GnuPGPublicKey
from argocd_python_client.model.v1alpha1_gnu_pg_public_key_list import V1alpha1GnuPGPublicKeyList
class GPGKeyServiceApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __g_pg_key_service_create(
self,
body,
**kwargs
):
"""Create one or more GPG public keys in the server's configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.g_pg_key_service_create(body, async_req=True)
>>> result = thread.get()
Args:
body (V1alpha1GnuPGPublicKey): Raw key data of the GPG key(s) to create
Keyword Args:
upsert (bool): Whether to upsert already existing public keys.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
GpgkeyGnuPGPublicKeyCreateResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['body'] = \
body
return self.call_with_http_info(**kwargs)
self.g_pg_key_service_create = _Endpoint(
settings={
'response_type': (GpgkeyGnuPGPublicKeyCreateResponse,),
'auth': [],
'endpoint_path': '/api/v1/gpgkeys',
'operation_id': 'g_pg_key_service_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'body',
'upsert',
],
'required': [
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body':
(V1alpha1GnuPGPublicKey,),
'upsert':
(bool,),
},
'attribute_map': {
'upsert': 'upsert',
},
'location_map': {
'body': 'body',
'upsert': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__g_pg_key_service_create
)
def __g_pg_key_service_delete(
self,
**kwargs
):
"""Delete specified GPG public key from the server's configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.g_pg_key_service_delete(async_req=True)
>>> result = thread.get()
Keyword Args:
key_id (str): The GPG key ID to query for.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
bool, date, datetime, dict, float, int, list, str, none_type
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.g_pg_key_service_delete = _Endpoint(
settings={
'response_type': (bool, date, datetime, dict, float, int, list, str, none_type,),
'auth': [],
'endpoint_path': '/api/v1/gpgkeys',
'operation_id': 'g_pg_key_service_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'key_id',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'key_id':
(str,),
},
'attribute_map': {
'key_id': 'keyID',
},
'location_map': {
'key_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__g_pg_key_service_delete
)
def __g_pg_key_service_get(
self,
key_id,
**kwargs
):
"""Get information about specified GPG public key from the server # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.g_pg_key_service_get(key_id, async_req=True)
>>> result = thread.get()
Args:
key_id (str): The GPG key ID to query for
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
V1alpha1GnuPGPublicKey
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['key_id'] = \
key_id
return self.call_with_http_info(**kwargs)
self.g_pg_key_service_get = _Endpoint(
settings={
'response_type': (V1alpha1GnuPGPublicKey,),
'auth': [],
'endpoint_path': '/api/v1/gpgkeys/{keyID}',
'operation_id': 'g_pg_key_service_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'key_id',
],
'required': [
'key_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'key_id':
(str,),
},
'attribute_map': {
'key_id': 'keyID',
},
'location_map': {
'key_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__g_pg_key_service_get
)
def __g_pg_key_service_list(
self,
**kwargs
):
"""List all available repository certificates # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.g_pg_key_service_list(async_req=True)
>>> result = thread.get()
Keyword Args:
key_id (str): The GPG key ID to query for.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
V1alpha1GnuPGPublicKeyList
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.g_pg_key_service_list = _Endpoint(
settings={
'response_type': (V1alpha1GnuPGPublicKeyList,),
'auth': [],
'endpoint_path': '/api/v1/gpgkeys',
'operation_id': 'g_pg_key_service_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'key_id',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'key_id':
(str,),
},
'attribute_map': {
'key_id': 'keyID',
},
'location_map': {
'key_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__g_pg_key_service_list
)
|
'''
Excited States software: qFit 3.0
Contributors: <NAME>, <NAME>, and <NAME>.
Contact: <EMAIL>
Copyright (C) 2009-2019 Stanford University
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
This entire text, including the above copyright notice and this permission notice
shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
'''
"""Hierarchically build a multiconformer ligand."""
import argparse
import logging
import os.path
import os
import sys
import time
import numpy as np
from string import ascii_uppercase
from . import MapScaler, Structure, XMap, _Ligand
from .qfit import QFitLigand, QFitLigandOptions
from .logtools import setup_logging, log_run_info
logger = logging.getLogger(__name__)
os.environ["OMP_NUM_THREADS"] = "1"
def build_argparser():
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("map", type=str,
help="Density map in CCP4 or MRC format, or an MTZ file "
"containing reflections and phases. For MTZ files "
"use the --label options to specify columns to read.")
p.add_argument("structure", type=str,
help="PDB-file containing structure.")
p.add_argument('-cif', "--cif_file", type=str, default=None,
help="CIF file describing the ligand")
p.add_argument('selection', type=str,
help="Chain, residue id, and optionally insertion code for residue in structure, e.g. A,105, or A,105:A.")
# Map input options
p.add_argument("-l", "--label", default="FWT,PHWT", metavar="<F,PHI>",
help="MTZ column labels to build density.")
p.add_argument('-r', "--resolution", type=float, default=None, metavar="<float>",
help="Map resolution in angstrom. Only use when providing CCP4 map files.")
p.add_argument("-m", "--resolution_min", type=float, default=None, metavar="<float>",
help="Lower resolution bound in angstrom. Only use when providing CCP4 map files.")
p.add_argument("-z", "--scattering", choices=["xray", "electron"], default="xray",
help="Scattering type.")
p.add_argument("-rb", "--randomize-b", action="store_true", dest="randomize_b",
help="Randomize B-factors of generated conformers.")
p.add_argument('-o', '--omit', action="store_true",
help="Map file is an OMIT map. This affects the scaling procedure of the map.")
# Map prep options
p.add_argument("-ns", "--no-scale", action="store_false", dest="scale",
help="Do not scale density.")
p.add_argument("-dc", "--density-cutoff", type=float, default=0.3, metavar="<float>",
help="Densities values below cutoff are set to <density_cutoff_value")
p.add_argument("-dv", "--density-cutoff-value", type=float, default=-1, metavar="<float>",
help="Density values below <density-cutoff> are set to this value.")
p.add_argument("-nosub", "--no-subtract", action="store_false", dest="subtract",
help="Do not subtract Fcalc of the neighboring residues when running qFit.")
p.add_argument("-pad", "--padding", type=float, default=8.0, metavar="<float>",
help="Padding size for map creation.")
p.add_argument("-nw", "--no-waters", action="store_true", dest="nowaters",
help="Keep waters, but do not consider them for soft clash detection.")
# Sampling options
p.add_argument("-nb", "--no-build", action="store_false", dest="build",
help="Do not build ligand.")
p.add_argument("-nl", "--no-local", action="store_false", dest="local_search",
help="Do not perform a local search.")
p.add_argument("--remove-conformers-below-cutoff", action="store_true",
dest="remove_conformers_below_cutoff",
help=("Remove conformers during sampling that have atoms that have "
"no density support for, i.e. atoms are positioned at density "
"values below cutoff value."))
p.add_argument('-cf', "--clash_scaling_factor", type=float, default=0.75, metavar="<float>",
help="Set clash scaling factor. Default = 0.75")
p.add_argument('-ec', "--external_clash", dest="external_clash", action="store_true",
help="Enable external clash detection during sampling.")
p.add_argument("-bs", "--bulk_solvent_level", default=0.3, type=float, metavar="<float>",
help="Bulk solvent level in absolute values.")
p.add_argument("-b", "--build-stepsize", type=int, default=2, metavar="<int>", dest="dofs_per_iteration",
help="Number of internal degrees that are sampled/built per iteration.")
p.add_argument("-s", "--stepsize", type=float, default=10,
metavar="<float>", dest="sample_ligand_stepsize",
help="Stepsize for dihedral angle sampling in degree.")
p.add_argument("-c", "--cardinality", type=int, default=5, metavar="<int>",
help="Cardinality constraint used during MIQP.")
p.add_argument("-t", "--threshold", type=float, default=0.2, metavar="<float>",
help="Threshold constraint used during MIQP.")
p.add_argument("-it", "--intermediate-threshold", type=float, default=0.01, metavar="<float>",
help="Threshold constraint during intermediate MIQP.")
p.add_argument("-ic", "--intermediate-cardinality", type=int, default=5, metavar="<int>",
help="Cardinality constraint used during intermediate MIQP.")
p.add_argument("-hy", "--hydro", dest="hydro", action="store_true",
help="Include hydrogens during calculations.")
p.add_argument("-T","--no-threshold-selection", dest="bic_threshold", action="store_false",
help="Do not use BIC to select the most parsimonious MIQP threshold")
# Output options
p.add_argument("-d", "--directory", type=os.path.abspath, default='.', metavar="<dir>",
help="Directory to store results.")
p.add_argument("-v", "--verbose", action="store_true",
help="Be verbose.")
p.add_argument("--debug", action="store_true",
help="Log as much information as possible.")
p.add_argument("--write_intermediate_conformers", action="store_true",
help="Write intermediate structures to file (useful with debugging).")
p.add_argument("--pdb", help="Name of the input PDB.")
return p
def prepare_qfit_ligand(options):
"""Loads files to build a QFitLigand job."""
# Load structure and prepare it
structure = Structure.fromfile(options.structure)
if not options.hydro:
structure = structure.extract('e', 'H', '!=')
chainid, resi = options.selection.split(',')
if ':' in resi:
resi, icode = resi.split(':')
residue_id = (int(resi), icode)
else:
residue_id = int(resi)
icode = ''
# Extract the ligand:
structure_ligand = structure.extract(f'resi {resi} and chain {chainid}') #fix ligand name
if icode:
structure_ligand = structure_ligand.extract('icode', icode) #fix ligand name
sel_str = f"resi {resi} and chain {chainid}"
sel_str = f"not ({sel_str})" #TO DO COLLAPSE
receptor = structure.extract(sel_str) #selecting everything that is no the ligand of interest
receptor = receptor.extract("record", "ATOM") #receptor.extract('resn', 'HOH', '!=')
# Check which altlocs are present in the ligand. If none, take the
# A-conformer as default.
altlocs = sorted(list(set(structure_ligand.altloc)))
if len(altlocs) > 1:
try:
altlocs.remove('')
except ValueError:
pass
for altloc in altlocs[1:]:
sel_str = f"resi {resi} and chain {chainid} and altloc {altloc}"
sel_str = f"not ({sel_str})"
structure_ligand = structure_ligand.extract(sel_str)
receptor = receptor.extract(f"not altloc {altloc}")
altloc = structure_ligand.altloc[-1]
if options.cif_file: #TO DO: STEPHANIE
ligand = _Ligand(structure_ligand.data,
structure_ligand._selection,
link_data=structure_ligand.link_data,
cif_file=args.cif_file)
else:
ligand = _Ligand(structure_ligand.data,
structure_ligand._selection,
link_data=structure_ligand.link_data)
if ligand.natoms == 0:
raise RuntimeError("No atoms were selected for the ligand. Check "
" the selection input.")
ligand.altloc = ''
ligand.q = 1
logger.info("Receptor atoms selected: {natoms}".format(natoms=receptor.natoms))
logger.info("Ligand atoms selected: {natoms}".format(natoms=ligand.natoms))
# Load and process the electron density map:
xmap = XMap.fromfile(options.map, resolution=options.resolution, label=options.label)
xmap = xmap.canonical_unit_cell()
if options.scale:
# Prepare X-ray map
scaler = MapScaler(xmap, scattering=options.scattering)
if options.omit:
footprint = structure_ligand
else:
footprint = structure
radius = 1.5
reso = None
if xmap.resolution.high is not None:
reso = xmap.resolution.high
elif options.resolution is not None:
reso = options.resolution
if reso is not None:
radius = 0.5 + reso / 3.0
scaler.scale(footprint, radius=radius)
xmap = xmap.extract(ligand.coor, padding=options.padding)
ext = '.ccp4'
if not np.allclose(xmap.origin, 0):
ext = '.mrc'
scaled_fname = os.path.join(options.directory, f'scaled{ext}') #this should be an option
xmap.tofile(scaled_fname)
return QFitLigand(ligand, structure, xmap, options), chainid, resi, icode
def main():
p = build_argparser()
args = p.parse_args()
try:
os.makedirs(args.directory)
except OSError:
pass
if not args.pdb == None:
pdb_id = args.pdb + '_'
else:
pdb_id = ''
time0 = time.time()
# Apply the arguments to options
options = QFitLigandOptions()
options.apply_command_args(args)
# Setup logger
setup_logging(options=options, filename="qfit_ligand.log")
log_run_info(options, logger)
qfit_ligand, chainid, resi, icode = prepare_qfit_ligand(options=options)
time0 = time.time()
qfit_ligand.run()
logger.info(f"Total time: {time.time() - time0}s")
#POST QFIT LIGAND WRITE OUTPUT (done within the qfit protein run command)
conformers = qfit_ligand.get_conformers()
nconformers = len(conformers)
altloc = ''
for n, conformer in enumerate(conformers, start=0):
if nconformers > 1:
altloc = ascii_uppercase[n]
conformer.altloc = ''
fname = os.path.join(options.directory, f'conformer_{n}.pdb')
conformer.tofile(fname)
conformer.altloc = altloc
try:
multiconformer = multiconformer.combine(conformer)
except Exception:
multiconformer = Structure.fromstructurelike(conformer.copy())
fname = os.path.join(options.directory, pdb_id + f'multiconformer_{chainid}_{resi}.pdb')
if icode:
fname = os.path.join(options.directory, pdb_id + f'multiconformer_{chainid}_{resi}_{icode}.pdb')
try:
multiconformer.tofile(fname)
except NameError:
logger.error("qFit-ligand failed to produce any valid conformers.")
|
<reponame>mmmaaaggg/easytrader<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on 2017/9/19
@author: MG
"""
import win32gui
import re
# def _filter_trade_client(hwnd, hwnd_list, filter_func):
# if filter_func(hwnd):
# hwnd_list.append(hwnd)
def filter_confirm_win_func(hwnd):
# 找到classname = '#32770' 的窗体
re_classname_pattern = '#32770'
clsname = win32gui.GetClassName(hwnd)
if re.match(re_classname_pattern, clsname) is None:
return False
# 找到 窗体标题为 “提示”的窗体
hwnd_chld_list = []
try:
win32gui.EnumChildWindows(hwnd, lambda hwnd_sub, hwnd_chld_list_tmp: hwnd_chld_list_tmp.append(hwnd_sub),
hwnd_chld_list)
for hwnd_sub in hwnd_chld_list:
if win32gui.GetClassName(hwnd_sub) == 'Static' and win32gui.GetWindowText(hwnd_sub) == '提示':
return True
except:
pass
return False
def find_window_whnd(filter_func, ret_first=True):
# 输入 filter_func 查找符合条件的全部或首个窗口whnd
window_hwnd = None
hwnd_list = []
win32gui.EnumWindows(lambda hwnd, hwnd_list_tmp: hwnd_list_tmp.append(hwnd) if filter_func(hwnd) else hwnd,
hwnd_list)
if len(hwnd_list) > 0:
if ret_first:
window_hwnd = hwnd_list[0]
return window_hwnd
else:
return hwnd_list
else:
return None
def filter_hwnd_func(hwnd, contain_window_text):
""" 查找 内部包含“contain_window_text”标签的框体"""
try:
# 找到classname = '#32770' 的窗体
re_classname_pattern = '#32770'
clsname = win32gui.GetClassName(hwnd)
if re.match(re_classname_pattern, clsname) is None:
return False
# 查找 内部包含“可用金额”标签的框体
hwnd_chld_list = []
win32gui.EnumChildWindows(hwnd, lambda hwnd_sub, hwnd_chld_list_tmp: hwnd_chld_list_tmp.append(hwnd_sub),
hwnd_chld_list)
for hwnd_sub in hwnd_chld_list:
if win32gui.GetClassName(hwnd_sub) == 'Static' and win32gui.GetWindowText(hwnd_sub) == contain_window_text:
return True
except:
pass
return False
if __name__ == "__main__":
# hwnd_list = find_window_whnd(filter_func, ret_first=False)
# print([(hwnd, hex(hwnd)) for hwnd in hwnd_list])
# hwnd = find_window_whnd(filter_confirm_win_func, ret_first=True)
hwnd = find_window_whnd(lambda x: filter_hwnd_func(x, '提示'), ret_first=True)
print("hwnd:%d [%s]" % (hwnd, hex(hwnd))) |
<reponame>andbortnik/thenewboston-node
from datetime import datetime, timedelta
from unittest.mock import patch
import pytest
from thenewboston_node.business_logic.blockchain.mock_blockchain import MockBlockchain
from thenewboston_node.business_logic.models import (
Block, BlockMessage, CoinTransferSignedChangeRequest, CoinTransferSignedChangeRequestMessage,
CoinTransferTransaction
)
from thenewboston_node.business_logic.node import get_node_signing_key
from thenewboston_node.core.utils import baker
from thenewboston_node.core.utils.cryptography import KeyPair, derive_public_key
def test_can_serialize_coin_transfer_block():
signed_change_request = baker.make(CoinTransferSignedChangeRequest)
block_message = baker.make(BlockMessage, block_type='ct', signed_change_request=signed_change_request)
block = baker.make(Block, message=block_message)
block_dict = block.serialize_to_dict()
assert isinstance(block_dict, dict)
assert block_dict.keys() == {'signer', 'message', 'hash', 'signature'}
assert isinstance(block_dict['signer'], str)
assert isinstance(block_dict['message'], dict)
assert isinstance(block_dict['hash'], str)
assert isinstance(block_dict['signature'], str)
block_message = block_dict['message']
assert block_message.keys() == {
'block_type', 'signed_change_request', 'timestamp', 'block_number', 'block_identifier',
'updated_account_states'
}
assert block_message['block_type'] == 'ct'
assert isinstance(block_message['signed_change_request'], dict)
assert isinstance(block_message['timestamp'], str)
assert isinstance(block_message['block_number'], int)
assert isinstance(block_message['block_identifier'], str)
assert isinstance(block_message['updated_account_states'], dict)
signed_change_request = block_message['signed_change_request']
assert signed_change_request.keys() == {'signer', 'message', 'signature'}
assert isinstance(signed_change_request['signer'], str)
assert isinstance(signed_change_request['message'], dict)
assert isinstance(signed_change_request['signature'], str)
signed_change_request_message = signed_change_request['message']
assert signed_change_request_message.keys() == {'balance_lock', 'txs'}
assert isinstance(signed_change_request_message['balance_lock'], str)
assert isinstance(signed_change_request_message['txs'], list)
for transaction in signed_change_request_message['txs']:
assert isinstance(transaction, dict)
if 'is_fee' in transaction:
assert transaction.keys() == {'recipient', 'amount', 'is_fee', 'memo'}
assert isinstance(transaction['is_fee'], bool)
else:
assert transaction.keys() == {'recipient', 'amount', 'memo'}
assert isinstance(transaction['recipient'], str)
assert isinstance(transaction['amount'], int)
assert isinstance(transaction['memo'], str)
updated_account_states = block_message['updated_account_states']
for key, value in updated_account_states.items():
assert isinstance(key, str)
assert isinstance(value, dict)
assert value.keys() == {'balance', 'balance_lock', 'node', 'primary_validator_schedule'}
assert isinstance(value['balance'], int)
assert isinstance(value['balance_lock'], str)
assert isinstance(value['node'], dict)
node = value['node']
assert 'identifier' not in node
assert isinstance(node['network_addresses'], list)
assert isinstance(node['fee_amount'], int)
assert isinstance(node['fee_account'], str)
@pytest.mark.usefixtures('get_next_block_identifier_mock', 'get_next_block_number_mock')
def test_can_create_block_from_signed_change_request(
forced_mock_blockchain, sample_signed_change_request: CoinTransferSignedChangeRequest
):
sender = sample_signed_change_request.signer
assert sender
def get_account_balance(self, account, on_block_number):
return 450 if account == sender else 0
with patch.object(MockBlockchain, 'get_account_balance', new=get_account_balance):
block = Block.create_from_signed_change_request(
forced_mock_blockchain, sample_signed_change_request, get_node_signing_key()
)
assert block.message
assert block.hash
assert block.signature
block.validate_signature()
assert block.signer
assert block.signer == derive_public_key(get_node_signing_key())
block_message = block.message
signed_change_request = block_message.signed_change_request
assert signed_change_request == sample_signed_change_request
assert signed_change_request is not sample_signed_change_request # test that a copy of it was made
assert isinstance(block_message.timestamp, datetime)
assert block_message.timestamp.tzinfo is None
assert block_message.timestamp - datetime.utcnow() < timedelta(seconds=1)
assert block_message.block_number == 0
assert block_message.block_identifier == 'next-block-identifier'
updated_account_states = block_message.updated_account_states
assert isinstance(updated_account_states, dict)
assert len(updated_account_states) == 4
assert updated_account_states[sender].balance == 450 - 425 - 4 - 1
assert updated_account_states[sender].balance_lock
assert updated_account_states['<KEY>'].balance == 425
assert updated_account_states['<KEY>'
].balance_lock is None
assert updated_account_states['<KEY>'].balance == 4
assert updated_account_states['<KEY>'
].balance_lock is None
assert updated_account_states['5e12967707909e62b2bb2036c209085a784fabbc3deccefee70052b6181c8ed8'].balance == 1
assert updated_account_states['5e12967707909e62b2bb2036c209085a784fabbc3deccefee70052b6181c8ed8'
].balance_lock is None
@pytest.mark.usefixtures(
'forced_mock_network', 'get_next_block_identifier_mock', 'get_next_block_number_mock', 'get_account_state_mock',
'get_account_lock_mock', 'get_primary_validator_mock', 'get_preferred_node_mock'
)
def test_can_create_block_from_main_transaction(
forced_mock_blockchain, treasury_account_key_pair: KeyPair, user_account_key_pair: KeyPair,
primary_validator_key_pair: KeyPair, node_key_pair: KeyPair
):
def get_account_balance(self, account, on_block_number):
return 430 if account == treasury_account_key_pair.public else 0
with patch.object(MockBlockchain, 'get_account_balance', new=get_account_balance):
block = Block.create_from_main_transaction(
blockchain=forced_mock_blockchain,
recipient=user_account_key_pair.public,
amount=20,
request_signing_key=treasury_account_key_pair.private,
pv_signing_key=get_node_signing_key(),
)
# Assert block
assert block.message
assert block.hash
assert block.signature
block.validate_signature()
assert block.signer
assert block.signer == derive_public_key(get_node_signing_key())
# Assert block.message
block_message = block.message
assert block_message
assert isinstance(block_message.timestamp, datetime)
assert block_message.timestamp.tzinfo is None
assert block_message.timestamp - datetime.utcnow() < timedelta(seconds=1)
assert block_message.block_number == 0
assert block_message.block_identifier == 'next-block-identifier'
updated_account_states = block_message.updated_account_states
assert isinstance(updated_account_states, dict)
assert len(updated_account_states) == 4
assert updated_account_states[treasury_account_key_pair.public].balance == 430 - 25
assert updated_account_states[treasury_account_key_pair.public].balance_lock
assert updated_account_states[user_account_key_pair.public].balance == 20
assert updated_account_states[user_account_key_pair.public].balance_lock is None
assert updated_account_states[primary_validator_key_pair.public].balance == 4
assert updated_account_states[primary_validator_key_pair.public].balance_lock is None
assert updated_account_states[node_key_pair.public].balance == 1
assert updated_account_states[node_key_pair.public].balance_lock is None
# Assert block_message.signed_change_request
signed_change_request = block_message.signed_change_request
assert signed_change_request.signer == treasury_account_key_pair.public
assert signed_change_request.signature
# Assert block_message.signed_change_request.message
coin_transfer_signed_request_message = signed_change_request.message
assert isinstance(coin_transfer_signed_request_message, CoinTransferSignedChangeRequestMessage)
assert coin_transfer_signed_request_message.balance_lock
assert len(coin_transfer_signed_request_message.txs) == 3
txs_dict = {tx.recipient: tx for tx in coin_transfer_signed_request_message.txs}
assert len(txs_dict) == 3
assert txs_dict[user_account_key_pair.public].amount == 20
assert txs_dict[user_account_key_pair.public].is_fee is False
assert txs_dict[primary_validator_key_pair.public].amount == 4
assert txs_dict[primary_validator_key_pair.public].is_fee
assert txs_dict[node_key_pair.public].amount == 1
assert txs_dict[node_key_pair.public].is_fee
assert coin_transfer_signed_request_message.get_total_amount() == 25
@pytest.mark.usefixtures('get_next_block_identifier_mock', 'get_next_block_number_mock', 'get_account_state_mock')
def test_normalized_block_message(forced_mock_blockchain, sample_signed_change_request):
expected_message_template = (
'{'
'"block_identifier":"next-block-identifier",'
'"block_number":0,'
'"block_type":"ct",'
'"signed_change_request":'
'{"message":{"balance_lock":'
'"4d3cf1d9e4547d324de2084b568f807ef12045075a7a01b8bec1e7f013fc3732",'
'"txs":'
'[{"amount":425,"recipient":"<KEY>"},'
'{"amount":1,"is_fee":true,"recipient":"5e12967707909e62b2bb2036c209085a784fabbc3deccefee70052b6181c8ed8"},'
'{"amount":4,"is_fee":true,"recipient":'
'"ad1f8845c6a1abb6011a2a434a079a087c460657aad54329a84b406dce8bf314"}]},'
'"signature":"362dc47191d5d1a33308de1f036a5e93fbaf0b05fa971d9537f954f13cd22b5ed9bee56f4701bd'
'af9b995c47271806ba73e75d63f46084f5830cec5f5b7e9600",'
'"signer":"4d3cf1d9e4547d324de2084b568f807ef12045075a7a01b8bec1e7f013fc3732"},'
'"timestamp":"<replace-with-timestamp>",'
'"updated_account_states":{'
'"<KEY>":{"balance":425},'
'"4d3cf1d9e4547d324de2084b568f807ef12045075a7a01b8bec1e7f013fc3732":'
'{'
'"balance":20,'
'"balance_lock":"ff3127bdb408e5f3f4f07dd364ce719b2854dc28ee66aa7af839e46468761885"'
'},'
'"5e12967707909e62b2bb2036c209085a784fabbc3deccefee70052b6181c8ed8":{"balance":1},'
'"ad1f8845c6a1abb6011a2a434a079a087c460657aad54329a84b406dce8bf314":{"balance":4}'
'}'
'}'
)
def get_account_balance(self, account, on_block_number):
return 450 if account == sample_signed_change_request.signer else 0
with patch.object(MockBlockchain, 'get_account_balance', new=get_account_balance):
block = Block.create_from_signed_change_request(
forced_mock_blockchain, sample_signed_change_request, get_node_signing_key()
)
expected_message = expected_message_template.replace(
'<replace-with-timestamp>', block.message.timestamp.isoformat()
).encode('utf-8')
assert block.message.get_normalized() == expected_message
def test_can_serialize_deserialize_coin_transfer_signed_change_request_message():
message = baker.make(CoinTransferSignedChangeRequestMessage)
serialized = message.serialize_to_dict()
deserialized = CoinTransferSignedChangeRequestMessage.deserialize_from_dict(serialized)
assert deserialized == message
assert deserialized is not message
@pytest.mark.usefixtures('get_next_block_identifier_mock', 'get_next_block_number_mock', 'get_account_state_mock')
def test_can_serialize_deserialize(forced_mock_blockchain, sample_signed_change_request):
block = Block.create_from_signed_change_request(
forced_mock_blockchain, sample_signed_change_request, get_node_signing_key()
)
serialized_dict = block.serialize_to_dict()
deserialized_block = Block.deserialize_from_dict(serialized_dict)
assert deserialized_block == block
assert deserialized_block is not block
@pytest.mark.usefixtures('get_next_block_identifier_mock', 'get_next_block_number_mock', 'get_account_lock_mock')
def test_can_duplicate_recipients(
forced_mock_blockchain: MockBlockchain, treasury_account_key_pair: KeyPair, user_account_key_pair: KeyPair
):
def get_account_balance(self, account, on_block_number):
return 430 if account == treasury_account_key_pair.public else 10
sender = treasury_account_key_pair.public
recipient = user_account_key_pair.public
message = CoinTransferSignedChangeRequestMessage(
balance_lock=forced_mock_blockchain.get_account_current_balance_lock(sender),
txs=[
CoinTransferTransaction(recipient=recipient, amount=3),
CoinTransferTransaction(recipient=recipient, amount=5),
]
)
request = CoinTransferSignedChangeRequest.create_from_signed_change_request_message(
message, treasury_account_key_pair.private
)
with patch.object(MockBlockchain, 'get_account_balance', new=get_account_balance):
block = Block.create_from_signed_change_request(forced_mock_blockchain, request, get_node_signing_key())
updated_account_states = block.message.updated_account_states
assert len(updated_account_states) == 2
sender_account_state = block.message.get_account_state(treasury_account_key_pair.public)
assert sender_account_state
assert sender_account_state.balance == 430 - 3 - 5
assert sender_account_state.balance_lock
recipient_account_state = block.message.get_account_state(user_account_key_pair.public)
assert recipient_account_state
assert recipient_account_state.balance == 10 + 3 + 5
@pytest.mark.skip('Not implemented yet')
def test_validate_block():
raise NotImplementedError()
|
import csv
import pdb
import json
import ast
import re
import numpy as np
import spacy
import string
MAX_USEFUL_LEN = 100
MAX_TARGET_LEN = 50
nlp = spacy.load("en_core_web_sm")
def get_filtered_tokens_spacy(text):
doc = nlp(text, disable=["ner", "parser", "tagger"])
tokenized_text = [str(tok).lower() for tok in doc]
filtered_token_list = [tok for tok in tokenized_text if re.match('^[a-z]|[?]', tok)]
return filtered_token_list
def get_ranked_output(path_to_file):
alg_rank_dic = {}
with open(path_to_file, 'r') as fptr:
for line in fptr.readlines():
line = line.rstrip().split('\t')
data_index = int(line[0])
ranked_tuple = line[1].split(';')
ranked_list = []
for item in ranked_tuple:
item = item.split(',')
if float(item[1]) == 0:
continue
ranked_list.append(int(item[0]))
alg_rank_dic[data_index] = ranked_list
return alg_rank_dic
def filter_summary(summ_list):
filtered_sum = ''
min_summ_len = float('inf')
for summ in summ_list:
summ = summ.lower().strip('. ') # Remove period at the end and blank spaces
summ = summ.translate({ord(c): ' ' for c in string.punctuation}) # Replace punctuations, special chars with space.
summ = ' '.join(summ.split()) # Remove double spaces
summ_tok = [tok for tok in summ.split() if re.match('^[0-9]', tok) is None ] # Remove tokens that start with digits.
summ = ' '.join(summ_tok)
summ_len = len(summ_tok)
if summ_len < min_summ_len:
min_summ_len = summ_len
filtered_sum = summ
return filtered_sum
if __name__=='__main__':
task_file_name = 'SmartToDo_dataset'
alg = 'fasttext'
token_type = 'spacy'
hit_logs = {}
path_to_hitApp_data = '../data/Annotations/{}.tsv'.format(task_file_name)
with open(path_to_hitApp_data, encoding='utf-8') as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t')
for row in reader:
judgement = row
data_index = int(judgement['data_index'])
sent_dic = json.loads(judgement['sent_json'])
num_candidates = len(sent_dic)
if data_index not in hit_logs:
hit_logs[data_index] = {}
hit_logs[data_index]['summary'] = []
hit_logs[data_index]['current_subject'] = judgement['current_subject']
hit_logs[data_index]['current_sent_to'] = judgement['current_sent_to']
hit_logs[data_index]['highlight'] = judgement['highlight']
temp = judgement['words_json']
candidate_list = ast.literal_eval(temp)
hit_logs[data_index]['sent-list'] = candidate_list
hit_logs[data_index]['summary'].append(judgement['to_do_summary'])
assert len(candidate_list) == num_candidates, print("Error in Candidate count !")
path_to_ranked_sent = '../data/Gold_SmartToDo_seq2seq_data/sent_ranked_{}.txt'.format(alg)
ranked_sent_dic = get_ranked_output(path_to_ranked_sent)
src_out_name = '../data/SmartToDo_seq2seq_data/src-all.txt'.format(token_type)
tokenized_tgt_out_name = '../data/SmartToDo_seq2seq_data/tgt-all.txt'.format(token_type)
gold_tgt_out_name = '../data/Gold_SmartToDo_seq2seq_data/gold-tgt-all.txt'.format(token_type)
max_K = 1 # Choose maximum of K useful sentences
inp_len_stats = []
target_len_stats = []
start_flag = True
print('Creating input/output for seq2seq ...')
with open(src_out_name, 'w') as fptr_src, open(tokenized_tgt_out_name, 'w') as fptr_tok_tgt, \
open(gold_tgt_out_name, 'w') as fptr_gold_tgt:
for data_index in hit_logs:
inp_to = hit_logs[data_index]['current_sent_to'].split(';')[0]
inp_sub = hit_logs[data_index]['current_subject']
inp_high = hit_logs[data_index]['highlight']
sent_list = hit_logs[data_index]['sent-list']
ranked_list = ranked_sent_dic[data_index]
useful_index = ranked_list[0:max_K]
useful_str = ' '.join(sent_list[index] for index in useful_index)
gold_target = filter_summary(hit_logs[data_index]['summary'])
if gold_target == 'none' or gold_target == 'unk' or gold_target == 'unk none':
continue
token_func = get_filtered_tokens_spacy
inp_to_tokens = token_func(inp_to)
if token_type == 'spacy' and len(inp_to_tokens) > 0:
name_tok = inp_to_tokens[0]
if '@' in name_tok:
name_tok = name_tok.split('@')[0]
name_tok = name_tok.split('.')[0]
inp_to_tokens = [name_tok] + inp_to_tokens[1:]
inp_sub_tokens = token_func(inp_sub)
inp_high_tokens = token_func(inp_high)
inp_useful_tokens = token_func(useful_str)
inp_useful_tokens = inp_useful_tokens[0:MAX_USEFUL_LEN]
inp_tokens = ['<to>']+inp_to_tokens+['<sub>']+inp_sub_tokens+['<high>']+inp_high_tokens\
+['<sent>']+inp_useful_tokens
inp_str = ' '.join(inp_tokens)
target_tokens = token_func(gold_target)
target_tokens = target_tokens[0:MAX_TARGET_LEN]
target_str = ' '.join(target_tokens)
inp_len_stats.append(len(inp_tokens))
target_len_stats.append(len(target_tokens))
if start_flag:
fptr_src.write('{}'.format(inp_str))
fptr_tok_tgt.write('{}'.format(target_str))
fptr_gold_tgt.write('{}'.format(gold_target))
start_flag = False
else:
fptr_src.write('\n{}'.format(inp_str))
fptr_tok_tgt.write('\n{}'.format(target_str))
fptr_gold_tgt.write('\n{}'.format(gold_target))
print('Done !')
|
# encoding : UTF-8
from pygame import Vector3
from math import sqrt
from Settings import G
def find_initial_velocity(origin_pos, target_pos, wanted_height):
"""
Return initial velocity to apply to a ball to reach a target from an origin position and a specified height.
Process initial velocity in world coordinates to apply on a ball. The specified height is taken in account for the
ball trajectory. If target and origin y sign values are different, the wanted height will be on y = 0 (net
position). Else, the wanted height will be on the middle of trajectory, or at apogee trajectory during a
vertical throw.
:param pygame.Vector3 origin_pos: origin position, the point where the ball will be thrown from
:param pygame.Vector3 target_pos: the desired target position the ball will reach
:param float wanted_height: height desired on net place, middle of trajectory or at apogee
:return: velocity to apply to the ball
:rtype pygame.Vector3:
"""
assert wanted_height > origin_pos.z
assert wanted_height > target_pos.z
if target_pos.x == origin_pos.x and target_pos.y == origin_pos.y: # vertical throw
zh = wanted_height - origin_pos.z
vo_z = sqrt(2 * G * zh)
return Vector3(0, 0, vo_z)
else:
# u vector : unit vector in XY plane from origin to target position
u = Vector3(target_pos - origin_pos)
u.z = 0
u = u.normalize()
# ut, zt : coordinates of target point in (u, z) ref
to_vect = (target_pos - origin_pos)
to_vect.z = 0
ut = to_vect.length()
zt = target_pos.z - origin_pos.z
# uh, zh : coordinates of point above the net in (u, z) ref
alpha = 0.5
if origin_pos.y * target_pos.y < 0: # if target and origin points are not in the same court side
alpha = abs(origin_pos.y / (target_pos.y - origin_pos.y))
uh = alpha * ut
zh = wanted_height - origin_pos.z
# process initial velocity to apply in (u, z) ref : vo_u, vo_z
# not trivial equations, from math and physics resolutions
a = (ut/uh * zh - zt)
c = G * ut / 2 * (uh - ut)
delta = -4 * a * c
vo_u = sqrt(delta) / (2 * a)
vo_z = zh * (vo_u / uh) + uh / vo_u * G / 2
return Vector3(vo_u * u + Vector3(0, 0, vo_z))
def find_target_position(origin_pos, initial_velocity, wanted_z=0):
"""
Find and return target ball position with a specified initial velocity and position.
:param pygame.Vector3 origin_pos: initial ball position in world coordinates
:param pygame.Vector3 initial_velocity: initial ball velocity in world coordinates
:param float wanted_z: specify at which z value target position will be found
:return: target ball position
:rtype pygame.Vector3:
"""
# z_t
z_t = wanted_z - origin_pos.z
# u vector : unit vector in XY plane from origin to target position
u = Vector3(initial_velocity)
u.z = 0
u = u.normalize() if u.length_squared() != 0 else Vector3()
# get final time t_t
t_t = get_time_at_z(initial_velocity.z, origin_pos.z, wanted_z)
# u_t
u_t = t_t * initial_velocity.dot(u)
return u_t * u + origin_pos + Vector3(0, 0, z_t)
def get_time_polynomial_fun(vz_0, z_0, z_t):
"""
Get polynomial coefficients and delta value for time equation.
Form of polynomial function is :
a * t**2 + b * t + c = 0
with t = 0 --> initial position
:param float vz_0: initial vertical velocity ( > 0 for ascending)
:param float z_0: initial z value
:param float z_t: target z value
:return: a, b, c and delta
:rtype tuple(float):
"""
a = G / 2
b = -vz_0
c = z_t - z_0
delta = b ** 2 - 4 * a * c
return a, b, c, delta
def get_time_at_z(vz_0, z_0, z):
"""
Get time at specific height for a trajectory descending phase.
:param float vz_0: initial vertical velocity ( > 0 for ascending)
:param float z_0: initial z value
:param float z: target z value
:return: time in sec which when z is reached, None if no solution
:rtype float:
"""
a, b, c, delta = get_time_polynomial_fun(vz_0, z_0, z)
if delta >= 0:
return (-b + sqrt(delta)) / (2 * a)
return None
def get_time_at_y(vy_0, y_0, y):
"""
Get time at specific y coordinate.
:param float vy_0: initial velocity along y axis
:param float y_0: initial y value
:param float y: y value at which time is given
:return: time in sec when y coordinate will be reached, or None if there is no solution
:rtype: float or None
"""
delta_y = y - y_0
if vy_0 != 0:
return delta_y / vy_0
return None
def get_z_at_y(initial_velocity, initial_position, z_t, y):
a, b, c, _ = get_time_polynomial_fun(initial_velocity.z, initial_position.z, z_t)
if initial_velocity.y != 0:
t_y = (y - initial_position.y) / initial_velocity.y
z_at_y = -(a * t_y**2 + b * t_y) + initial_position.z
return z_at_y
def get_x_at_y(origin_pos, initial_velocity, y):
dy = y - origin_pos.y
if initial_velocity.y != 0:
dt = dy / initial_velocity.y
dx = dt * initial_velocity.x
return origin_pos.x + dx
else:
return None
def _are_points_in_same_y_side(p1, p2):
"""
Return True if the 2 given points are on same side (y axis).
usage examples :
>>> p1 = Vector3(0, -5, 0)
>>> p2 = Vector3(0, 10, 0)
>>> _are_points_in_same_y_side(p1, p2)
True
>>> p3 = Vector3(20, -5, 0)
>>> _are_points_in_same_y_side(p1, p3)
False
:param pygame.Vector3 p1: 1st point
:param pygame.Vector3 p2: 2nd point
:return: True if points are in same side
"""
return p1.y * p2.y < 0
def _apply_signed_threshold(value, min_thr=None, max_thr=None):
"""
Apply threshold on signed value.
usage examples :
>>> _apply_signed_threshold(0.678, min_thr=0.5)
0.5
>>> _apply_signed_threshold(-0.678, min_thr=0.5)
-0.5
>>> _apply_signed_threshold(0.678, max_thr=2.0)
0.678
>>> _apply_signed_threshold(20.678, max_thr=2.0)
2.0
>>> _apply_signed_threshold(-20, max_thr=2.0)
-2.0
:param float value: value to threshold
:param float min_thr: nearest threshold from 0
:param float max_thr: farthest threshold from 0
:return: thresholded value
:rtype float:
"""
sat_val = value
if min_thr is not None:
if sat_val < 0:
sat_val = max(sat_val, -min_thr)
else:
sat_val = min(sat_val, min_thr)
if max_thr is not None:
if sat_val < 0:
sat_val = max(sat_val, -max_thr)
else:
sat_val = min(sat_val, max_thr)
return sat_val
|
<gh_stars>0
import numpy as np
import h5py
import random
import tensorflow as tf
from tensorflow.python.keras.layers import Lambda
import tensorflow.python.keras.backend as K
import os
AUTOTUNE = tf.data.experimental.AUTOTUNE
class DataGenerator:
"""
CropsGenerator takes care to load images from disk and convert, crop and serve them as a K.data.Dataset
"""
def __init__(self, conf, ImageIds_EncodedPixels):
self.train_images_folder = conf.train_images_folder # path to train images folder
self.test_images_folder = conf.test_images_folder # path to test images folder
self.resources = conf.resources # path to the resources folder. It contains useful files regarding the dataset
self.img_w = conf.img_w
self.img_h = conf.img_h
self.img_w_res = conf.img_w_res
self.img_h_res = conf.img_h_res
self.crops_w = conf.crops_w # number of crops to divide image in width
self.ids_ep = ImageIds_EncodedPixels # dictionary coming from the digest_train_csv.py method. Actual dataset
self.train_size = conf.train_size
self.train_id_ep_dict, self.val_id_ep_dict = self.extract_train_val_datasets() # retrieve dictionaries of training and validation sets
self.batch_size = conf.batch_size # training_batch_size
self.steps_per_train_epoch = len(list(self.train_id_ep_dict.keys())) // self.batch_size
self.steps_per_val_epoch = len(list(self.val_id_ep_dict.keys())) // self.batch_size
self.mean_tensor, self.std_tensor = self.get_stats() # get stats from dataset info
def get_stats(self):
"""
Return mean and std from dataset. It has been memorized. If not mean or std have been saved a KeyError is raised.
:return:
"""
with h5py.File(os.path.join(self.resources, 'info.h5'), 'r') as h5:
mean = h5['train_mean'][:].astype(K.floatx())
std = h5['train_std'][:].astype(K.floatx())
return mean, std
def extract_train_val_datasets(self):
"""
Extract actual dataset from the grouped file from digest_train_csv.py and transform it to a training/validation sets
Every label is transformed into a matrix 4-depth, and every layer contains its mask
:return:
"""
# random.seed(0) # give random a seed not to mix train and validation sets in case of resuming
keys = list(self.ids_ep.keys())
random.shuffle(keys)
train_examples = round(len(keys) * self.train_size)
train_keys = keys[0:train_examples]
val_keys = keys[train_examples:-1]
train_id_ep = {}
val_id_ep = {}
def convert_ep(k):
"""
Encode labels in strings to maintain compatibility with tensorflow API. x and y must have
same type, so we transform the int value -1 of "no mask" into a string one.
:param k:
:return:
"""
encoded_pixels = self.ids_ep[k]
array_of_ep = list((list(zip(*encoded_pixels))[1]))
for i, val in enumerate(array_of_ep):
if isinstance(val, int):
array_of_ep[i] = '-1'
return array_of_ep
for key in train_keys:
train_id_ep[key] = convert_ep(key)
for key in val_keys:
val_id_ep[key] = convert_ep(key)
return train_id_ep, val_id_ep
def create_row(self, pair):
"""
This function takes care to create a row one pair at a time
:param pair:
:return:
"""
# retrieve idx, length from pair
# def unstack(x): return x
# unstack_layer = Lambda(unstack)
# index, length = unstack_layer(pair)
# index, length = pair[0], pair[1]
index, length = tf.unstack(pair, name='unstack_pair')
def not_empty_row():
# subtract 1 from index. It start from 1 :(
idx = index - 1
# the idea is to create a 3-parts array: the one that goes from 0 to index, the real mask, and the one that
# goes after the mask until the end of the row
# def variable_zeros_shape(length): return tf.zeros(length, dtype=tf.uint8)
# def variable_ones_shape(length): return tf.ones(length, dtype=tf.uint8)
# before = Lambda(variable_zeros_shape)(idx)
before = tf.zeros(idx, dtype=tf.uint8)
mask_line = tf.ones(length, dtype=tf.uint8)
after_index = (self.img_w * self.img_h) - (length + idx)
after = tf.zeros(after_index, dtype=tf.uint8)
row = tf.concat((before, mask_line, after), axis=0)
# see if all went well
# tf.compat.v1.debugging.assert_equal(K.shape(row)[0], K.math.multiply(self.img_w, self.img_h))
return row
def empty_row():
# if row has length == 0 there is no need to compute all that above
# an empty value is returned so memory is kept safe. The reduce operation will be faster
return tf.constant(0, dtype="uint8")
return tf.cond(tf.math.equal(length, 0), empty_row, not_empty_row)
def rle_to_mask(self, rle_input):
""""
convert RLE(run length encoding) string to numpy array
Returns:
numpy.array: numpy array of the mask
"""
rows, cols = self.img_h, self.img_w
def without_mask():
"""
If layer does not contain any mask it returns an empty one
:return:
"""
# def variable_zeros_shape(shape): return tf.zeros(shape, dtype=tf.uint8)
return tf.zeros((rows, cols), dtype=tf.uint8)
# return tf.zeros(shape=(rows, cols), dtype="uint8")
def with_mask():
"""
If the mask is present we proceed in creating the effective one.
RLE is an encoding for masks: the mask has the same dimension of the image. The mask is
stretched along one axis and becomes a img_w*img_h array. RLE contains the index at which the mask starts
and the length for how long is. So every mask is a tuple with (index_of_mask, mask) and index_of_mask starts
from 1.
With this approach, to parallelize the computation, we compute one index-length at a time and per each index
we create a img_w*img_h array with all zeros but the indexes with the mask.
A matrix [img_w*img_h, img_w*img_h] is computed.
After that, we gather the max value per each column in order to obtain the biggest values per each index
so we can construct the mask back.
:return:
"""
# def rle_string_manager(rle_input):
# # divide string into substring containing [index, length, index, length, ...]. A ragged tensor is returned
# # since the dimension cannot be fixed for all images
# rle = tf.strings.split(input=rle_input, sep=' ', result_type='RaggedTensor', name='ragged_tensor')
# # reshape string in order to have pairs of [[index, length], [index, lenght], ...]
# rle = tf.reshape(rle, [-1, 2])
# # convert string to numbers
# rle = tf.strings.to_number(rle, tf.int32, name='convert_str_int32')
# return rle
# since the dimension cannot be fixed for all images
rle = tf.strings.split(input=rle_input, sep=' ', result_type='RaggedTensor', name='ragged_tensor')
# reshape string in order to have pairs of [[index, length], [index, lenght], ...]
rle = tf.reshape(rle, [-1, 2])
# convert string to numbers
rle = tf.strings.to_number(rle, tf.int32, name='convert_str_int32')
# rle = Lambda(rle_string_manager)(rle_input)
# stacked_masks is a matrix [img_w*img_h, img_w*img_h] that contains a row with a portion of a mask, one
# for each index
stacked_masks = tf.map_fn(Lambda(self.create_row), rle, name='create_multiple_images', dtype="uint8")
# reduce matrix to the original mask dimension in order to retrieve the mask in a "img" shape
mask = tf.reduce_max(stacked_masks, axis=0)
# debugging
# K.compat.v1.debugging.assert_equal(K.shape(mask)[0], K.math.multiply(self.img_h, self.img_w)) # check if the stacked lines are equal to the right dimension
# reshape mask to the image dimension
mask = tf.reshape(mask, (cols, rows))
mask = tf.transpose(mask)
return mask
# if there is no mask we only have to return an empty one
mask = tf.cond(tf.math.not_equal(rle_input, '-1'), with_mask, without_mask)
return mask
def process_label(self, x, y):
# map every mask to the rle_to_mask_func function
label = tf.map_fn(Lambda(self.rle_to_mask), y, name='stack_rle_strings', dtype="uint8")
# map_fn function creates a transposed stack of mask channels.
label = tf.reverse(label, axis=[0])
label = tf.transpose(label, [1, 2, 0])
return x, label
def parse_path(self, path, label):
"""
Read image from disk and apply a label to it
:param path: path to one image. This is a K.Tensor and contains a string
:param label:
:return:
"""
# read image from disk
# def read_img(path): return tf.io.read_file(path)
img = tf.io.read_file(path)
# img = Lambda(read_img)(path)
# decode it as jpeg
img = tf.image.decode_jpeg(img, channels=1)
return img, label
def resize_and_norm(self, x, y):
x = tf.cast(x, dtype=K.floatx())
# make the image distant from std deviation of the dataset
# x = tf.math.subtract(x, self.mean_tensor)
# x = K.math.divide(x, self.std_tensor)
x -= self.mean_tensor
x /= self.std_tensor
x = tf.image.resize_images(x, (self.img_h_res, self.img_w_res), name='reshape_image')
y = tf.image.resize_images(y, (self.img_h_res, self.img_w_res), name='reshape_label')
x = tf.cast(x, K.floatx())
y = tf.cast(y, "uint8")
# data augmentation
# img = K.image.random_flip_left_right(img)
# img = K.image.random_flip_up_down(img)
return x, y
def crop_img_and_serve(self, x, y):
target_height = self.img_h_res
target_width = self.img_w_res // self.crops_w
top_left_w = tf.range(start=0, limit=self.img_w_res - 1, delta=target_width,
dtype="int32")
def crop(w):
crop_x = tf.image.crop_to_bounding_box(x, 0, w, target_height, target_width)
crop_y = tf.image.crop_to_bounding_box(y, 0, w, target_height, target_width)
return crop_x, crop_y
stacked_crops = tf.map_fn(crop, elems=top_left_w, dtype=(K.floatx(), "uint8"), name="stacked_crops")
return stacked_crops[0], stacked_crops[1]
def generate_train_set(self):
"""
Generates the actual dataset. It uses all the functions defined above to read images from disk and create croppings.
:param mode: train-val-test
:return: K.data.Dataset
"""
parse_path_func = lambda x, y: self.parse_path(x, y)
process_label_func = lambda x, y: self.process_label(x, y)
resize_func = lambda x, y: self.resize_and_norm(x, y)
crops_func = lambda x, y: self.crop_img_and_serve(x, y)
filter_func = lambda x, y: K.any(y)
batch_size = self.batch_size
n_el = len(list(self.train_id_ep_dict.keys()))
ids = []
labels = []
for k, v in self.train_id_ep_dict.items():
ids.append(os.path.join(self.train_images_folder, k))
labels.append(v)
# id_tensor = K.constant(ids, dtype=tf.string, shape=([n_el]))
# label_tensor = K.constant(labels, dtype=tf.string, shape=(n_el, 4))
id_tensor = ids
label_tensor = labels
return (tf.data.Dataset.from_tensor_slices((id_tensor, label_tensor))
.shuffle(buffer_size=n_el)
.map(parse_path_func, num_parallel_calls=AUTOTUNE)
.map(process_label_func, num_parallel_calls=AUTOTUNE)
.map(resize_func, num_parallel_calls=AUTOTUNE)
.map(crops_func, num_parallel_calls=AUTOTUNE) # create crops of image to enlarge output
.flat_map(
lambda x, y: tf.data.Dataset.from_tensor_slices((x, y))) # serve crops as new dataset to flat_map array
.filter(filter_func)
.batch(batch_size) # defined batch_size
.prefetch(AUTOTUNE) # number of batches to be prefetch.
.repeat() # repeats the dataset when it is finished
)
def generate_val_set(self):
"""
Generates the actual dataset. It uses all the functions defined above to read images from disk and create croppings.
:return: K.data.Dataset
"""
parse_path_func = lambda x, y: self.parse_path(x, y)
process_label_func = lambda x, y: self.process_label(x, y)
resize_func = lambda x, y: self.resize_and_norm(x, y)
crops_func = lambda x, y: self.crop_img_and_serve(x, y)
filter_func = lambda x, y: K.equal(K.any(y), False)
batch_size = self.batch_size
n_el = len(list(self.val_id_ep_dict.keys()))
ids = []
labels = []
for k, v in self.val_id_ep_dict.items():
ids.append(os.path.join(self.train_images_folder, k))
labels.append(v)
id_tensor = K.constant(ids, dtype=tf.string, shape=([n_el]))
label_tensor = K.constant(labels, dtype=tf.string, shape=(n_el, 4))
return (tf.data.Dataset.from_tensor_slices((id_tensor, label_tensor))
.shuffle(buffer_size=n_el)
.map(parse_path_func, num_parallel_calls=AUTOTUNE)
.map(process_label_func, num_parallel_calls=AUTOTUNE) # create actual one_crop
.map(resize_func, num_parallel_calls=AUTOTUNE) # create actual one_crop
.map(crops_func, num_parallel_calls=AUTOTUNE) # create crops of image to enlarge output
.flat_map(
lambda x, y: tf.data.Dataset.from_tensor_slices((x, y))) # serve crops as new dataset to flat_map array
.filter(filter_func)
.batch(batch_size) # defined batch_size
.prefetch(AUTOTUNE) # number of batches to be prefetch.
.repeat() # repeats the dataset when it is finished
)
# # UNCOMMENT ADDITION AND DIVISION PER MEAN AND STD BEFORE TRY TO SEE IMAGES
if __name__ == '__main__':
from config import conf
from Dataset.digest_train_csv import Digestive
from PIL import Image
tf.compat.v1.enable_eager_execution()
# visualize steel image with four classes of faults in seperate columns
grouped_ids = Digestive(conf).masks_at_least()
data_reader = DataGenerator(conf, grouped_ids)
training_set = data_reader.generate_train_set()
# iter = training_set.make_initializable_iterator()
# x, labels = iter.get_next()
# with tf.device('/gpu:0'):
# with tf.Session() as sess:
# sess.run(iter.initializer)
# # returns a batch of images
# img, label = sess.run([x, labels])
for img, label in training_set:
n_image = 0
img = img.numpy()
label = label.numpy()
img = img[n_image]
label = label[n_image]
# identify which layer has mask
label_index = []
for i in range(label.shape[-1]):
label_index.append(label[..., i].any())
# viz_steel_img_mask(img, label)
print("img shape: {}".format(img.shape))
print("label shape: {}".format(label.shape))
print(label_index)
img = np.array(img, dtype=K.floatx()).squeeze(axis=-1)
masks = label
masks = np.array(masks, dtype=np.uint8)
img *= 1000
masks *= 255
Image.fromarray(img).show()
Image.fromarray(masks).show()
break
|
<reponame>surveybott/psiTurk<filename>tests/conftest.py
from __future__ import print_function
# https://docs.pytest.org/en/latest/fixture.html#using-fixtures-from-classes-modules-or-projects
from builtins import object
import pytest
import os
import sys
import pickle
import json
import datetime
import dateutil.parser
import ciso8601
import boto3
from botocore.stub import Stubber
import shutil
from distutils import dir_util, file_util
from faker import Faker
from importlib import reload
@pytest.fixture(autouse=True)
def bork_aws_environ():
os.environ['AWS_ACCESS_KEY_ID'] = 'foo'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'bar'
os.environ['AWS_DEFAULT_REGION'] = 'us-west-2'
os.environ.pop('AWS_PROFILE', None)
yield
@pytest.fixture()
def edit_config_file():
def do_it(find, replace):
with open('config.txt', 'r') as file:
config_file = file.read()
config_file = config_file.replace(find, replace)
with open('config.txt', 'w') as file:
file.write(config_file)
yield do_it
@pytest.fixture(scope='function', autouse=True)
def experiment_dir(tmpdir, bork_aws_environ, edit_config_file):
# pytest.set_trace()
os.chdir(tmpdir)
import psiturk.setup_example as se
se.setup_example()
edit_config_file('use_psiturk_ad_server = true', 'use_psiturk_ad_server = false')
# os.chdir('psiturk-example') # the setup script already chdirs into here, although I don't like that it does that
yield
os.chdir('..')
shutil.rmtree('psiturk-example')
@pytest.fixture(autouse=True)
def db_setup(mocker, experiment_dir, tmpdir, request):
import psiturk.db
reload(psiturk.db)
import psiturk.models
psiturk.models.Base.metadata.clear()
reload(psiturk.models)
from psiturk.db import init_db
init_db()
yield
#############
# amt-related fixtures
##############
@pytest.fixture(scope='function')
def client():
client = boto3.client('mturk')
return client
@pytest.fixture(scope='function')
def stubber(client):
stubber = Stubber(client)
stubber.activate()
yield stubber
stubber.deactivate()
@pytest.fixture()
def amt_services_wrapper(patch_aws_services):
import psiturk.amt_services_wrapper
reload(psiturk.amt_services_wrapper)
amt_services_wrapper = psiturk.amt_services_wrapper.MTurkServicesWrapper()
return amt_services_wrapper
@pytest.fixture(scope='function')
def patch_aws_services(client, mocker):
import psiturk.amt_services_wrapper
import psiturk.amt_services
def setup_mturk_connection(self):
self.mtc = client
return True
mocker.patch.object(psiturk.amt_services.MTurkServices,
'verify_aws_login', lambda *args, **kwargs: True)
mocker.patch.object(psiturk.amt_services.MTurkServices,
'setup_mturk_connection', setup_mturk_connection)
my_amt_services = psiturk.amt_services.MTurkServices(
'', '', is_sandbox=True)
mocker.patch.object(
psiturk.amt_services_wrapper.MTurkServicesWrapper, 'amt_services', my_amt_services)
@pytest.fixture(scope='session')
def faker():
faker = Faker()
return faker
@pytest.fixture()
def stubber_prepare_create_hit(stubber, helpers, faker):
def do_it(with_hit_id=None):
if not with_hit_id:
with_hit_id = faker.md5(raw_output=False)
stubber.add_response(
'create_hit_type', helpers.get_boto3_return('create_hit_type.json'))
boto_return_create_hit_with_hit_type = helpers.get_boto3_return(
'create_hit_with_hit_type.json')
boto_return_create_hit_with_hit_type['HIT']['HITId'] = with_hit_id
# used to always return a hit with id: 3XJOUITW8URHJMX7F00H20LGRIAQTX
stubber.add_response('create_hit_with_hit_type',
boto_return_create_hit_with_hit_type)
return do_it
@pytest.fixture()
def create_dummy_hit(stubber_prepare_create_hit, amt_services_wrapper):
def do_it(with_hit_id=None, **kwargs):
stubber_prepare_create_hit(with_hit_id)
result = amt_services_wrapper.create_hit(1, 0.01, 1, **kwargs)
return do_it
@pytest.fixture()
def create_dummy_assignment(faker):
from psiturk.db import db_session, init_db
from psiturk.models import Participant
def do_it(participant_attributes={}):
participant_attribute_defaults = {
'workerid': faker.md5(raw_output=False),
'hitid': faker.md5(raw_output=False),
'assignmentid': faker.md5(raw_output=False),
}
participant_attributes = dict(list(
participant_attribute_defaults.items()) + list(participant_attributes.items()))
init_db()
participant = Participant(**participant_attributes)
db_session.add(participant)
db_session.commit()
return participant
return do_it
@pytest.fixture()
def list_hits(stubber, helpers, amt_services_wrapper):
'''
Returns two hit_ids:
3BFNCI9LYKQ2ENUY4MLKKW0NSU437W
3XJOUITW8URHJMX7F00H20LGRIAQTX
'''
def do_it(hits_json=None, all_studies=False, active=False):
if not hits_json:
hits_json = helpers.get_boto3_return('list_hits.json')
stubber.add_response('list_hits', hits_json)
if active:
results = (amt_services_wrapper.get_active_hits(
all_studies=all_studies)).data
else:
results = (amt_services_wrapper.get_all_hits(
all_studies=all_studies)).data
return results
return do_it
@pytest.fixture()
def expire_a_hit():
def do_it(hits_json, index_of_hit_to_expire=0):
expired_time = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta(hours=10)
hits_json['HITs'][index_of_hit_to_expire]['Expiration'] = expired_time
return hits_json
return do_it
@pytest.fixture()
def activate_a_hit():
def do_it(hits_json, index_of_hit_to_be_active=1):
active_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=10)
hits_json['HITs'][index_of_hit_to_be_active]['Expiration'] = active_time
return hits_json
return do_it
class Helpers(object):
@staticmethod
def get_boto3_return(name):
# https://docs.python.org/3/library/datetime.html#datetime.datetime.fromisoformat
def date_hook(json_dict):
for (key, value) in list(json_dict.items()):
try:
# json_dict[key] = dateutil.parser.parse(value)
# json_dict[key] = datetime.datetime.fromisoformat(value)
# json_dict[key] = datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S%Z")
json_dict[key] = ciso8601.parse_datetime(value)
except:
if key == 'Expiration':
print(key)
raise
pass
return json_dict
filepath = os.path.join(
*[os.path.dirname(os.path.realpath(__file__)), 'boto3-returns', name])
with open(filepath, 'rb') as infile:
if filepath.endswith('.pickle'):
return pickle.load(infile, encoding='latin1')
elif filepath.endswith('.json'):
data = json.load(infile, object_hook=date_hook)
# print(data['HITs'][0])
return data
@pytest.fixture(scope='session')
def helpers():
return Helpers
|
<gh_stars>0
#!usr/bin/env python3
import json
import ssl
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from urllib.request import urlopen
User = namedtuple('User', 'login name joined')
def user_info(login):
"""Get user information from github"""
_unverified_https_context = ssl._create_unverified_context()
fp = urlopen('https://api.github.com/users/{}'.format(login),
context=_unverified_https_context)
reply = json.load(fp)
joined = datetime.strptime(reply['created_at'], '%Y-%m-%dT%H:%M:%SZ')
return User(login, reply['name'], joined)
def users_info(logins):
"""Get user information for several users"""
return [user_info(login) for login in logins]
def users_info_thr(logins):
"""Get user information for several users - using thread pool"""
with ThreadPoolExecutor() as pool:
return list(pool.map(user_info, logins))
if __name__ == '__main__':
logins = [
'ariannasg',
'kisenshi',
'tebeka',
'mattwillo',
'michaelcullum',
]
# when using prun we can see that we spend most time on I/O doing socket
# operations. we can also see this when calling time and realising the big
# diff between the CPU time and the Wall time.
# by using threads, we reduced the time of the execution from 3.6 s to 517 ms!
# In [24]: %run src/using_threads.py
#
# In [25]: %time users_info(logins)
# CPU times: user 22.4 ms, sys: 8.82 ms, total: 31.2 ms
# Wall time: 3.6 s
# Out[25]:
# [User(login='ariannasg', name='<NAME>', joined=datetime.datetime(2014, 1, 13, 10, 57, 19)),
# User(login='kisenshi', name='<NAME>', joined=datetime.datetime(2015, 1, 4, 13, 32, 26)),
# User(login='tebeka', name='<NAME>', joined=datetime.datetime(2009, 5, 22, 21, 46, 45)),
# User(login='mattwillo', name='<NAME>', joined=datetime.datetime(2011, 4, 14, 10, 37, 51)),
# User(login='michaelcullum', name='<NAME>', joined=datetime.datetime(2010, 2, 26, 22, 13, 10))]
#
# In [26]: %prun -l 10 users_info(logins)
# 12470 function calls (12465 primitive calls) in 2.400 seconds
#
# Ordered by: internal time
# List reduced from 244 to 10 due to restriction <10>
#
# ncalls tottime percall cumtime percall filename:lineno(function)
# 10 0.671 0.067 0.671 0.067 {method 'read' of '_ssl._SSLSocket' objects}
# 5 0.636 0.127 0.636 0.127 {method 'do_handshake' of '_ssl._SSLSocket' objects}
# 5 0.593 0.119 0.593 0.119 {method 'connect' of '_socket.socket' objects}
# 5 0.478 0.096 0.478 0.096 {built-in method _socket.getaddrinfo}
# 50 0.002 0.000 0.004 0.000 request.py:444(add_handler)
# 5 0.001 0.000 0.001 0.000 {built-in method _scproxy._get_proxies}
# 20 0.001 0.000 0.001 0.000 {built-in method __new__ of type object at 0x103959b60}
# 50 0.001 0.000 0.001 0.000 {built-in method builtins.dir}
# 1760 0.001 0.000 0.001 0.000 {method 'find' of 'str' objects}
# 1745 0.001 0.000 0.001 0.000 {method 'startswith' of 'str' objects}
#
# In [27]: %time users_info_thr(logins)
# CPU times: user 22.5 ms, sys: 4.97 ms, total: 27.4 ms
# Wall time: 517 ms
# Out[27]:
# [User(login='ariannasg', name='<NAME>', joined=datetime.datetime(2014, 1, 13, 10, 57, 19)),
# User(login='kisenshi', name='<NAME>', joined=datetime.datetime(2015, 1, 4, 13, 32, 26)),
# User(login='tebeka', name='<NAME>', joined=datetime.datetime(2009, 5, 22, 21, 46, 45)),
# User(login='mattwillo', name='<NAME>', joined=datetime.datetime(2011, 4, 14, 10, 37, 51)),
# User(login='michaelcullum', name='<NAME>', joined=datetime.datetime(2010, 2, 26, 22, 13, 10))]
|
# Copyright (c) 2020, 2021, Oracle and/or its affiliates.
#
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
#
from utils import tutil
from utils import kutil
from .cluster_t import check_all
import logging
from utils.tutil import g_full_log
from utils.optesting import COMMON_OPERATOR_ERRORS
class ClusterVolume(tutil.OperatorTest):
"""
cluster volumes
"""
default_allowed_op_errors = COMMON_OPERATOR_ERRORS
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger(__name__+":"+cls.__name__)
super().setUpClass()
g_full_log.watch_mysql_pod(cls.ns, "mycluster-0")
g_full_log.watch_mysql_pod(cls.ns, "mycluster-1")
g_full_log.watch_mysql_pod(cls.ns, "mycluster-2")
g_full_log.watch_mysql_pod(cls.ns, "mycluster-3")
@classmethod
def tearDownClass(cls):
g_full_log.stop_watch(cls.ns, "mycluster-3")
g_full_log.stop_watch(cls.ns, "mycluster-2")
g_full_log.stop_watch(cls.ns, "mycluster-1")
g_full_log.stop_watch(cls.ns, "mycluster-0")
super().tearDownClass()
def look_up_volume_mount(self, containers, container_name):
# look for e.g. json-path: spec.containers[1].volumeMounts[0]
for container in containers:
if container["name"] == container_name:
volume_mounts = container["volumeMounts"]
for volume_mount in volume_mounts:
if volume_mount["name"] == "datadir":
return volume_mount
return None
def check_ic_datadir(self, icname):
icobj = kutil.get_ic(self.ns, icname)
datadir_spec = icobj["spec"]["datadirVolumeClaimTemplate"]
self.assertEqual(datadir_spec["accessModes"], ["ReadWriteOnce"])
self.assertEqual(datadir_spec["resources"]["requests"]["storage"], "3Gi")
def check_sts_datadir(self, stsname):
sts = kutil.get_sts(self.ns, stsname)
template_spec = sts["spec"]["template"]["spec"]
# json-path: spec.template.spec.containers[1].volumeMounts[0].name
# "name": "mysql",
# [...]
# "volumeMounts": [
# [...]
# {
# "mountPath": "/var/lib/mysql",
# "name": "datadir"
# },
containers = template_spec["containers"]
volume_mount = self.look_up_volume_mount(containers, "mysql")
self.assertIsNotNone(volume_mount, "datadir mount not found")
self.assertEqual(volume_mount["mountPath"], "/var/lib/mysql")
# json-path: spec.template.spec.initContainers[0].volumeMounts[1].name
# "name": "initconf",
# [...]
# "volumeMounts": [
# [...]
# {
# "mountPath": "/var/lib/mysql",
# "name": "datadir"
# },
init_containers = template_spec["initContainers"]
volume_mount = self.look_up_volume_mount(init_containers, "initconf")
self.assertIsNotNone(volume_mount, "datadir mount not found")
self.assertEqual(volume_mount["mountPath"], "/var/lib/mysql")
volume_mount = self.look_up_volume_mount(init_containers, "initmysql")
self.assertIsNotNone(volume_mount, "datadir mount not found")
self.assertEqual(volume_mount["mountPath"], "/var/lib/mysql")
# json-path: spec.volumeClaimTemplates[0]
# [...]
# "volumeClaimTemplates": [
# {
# "apiVersion": "v1",
# "kind": "PersistentVolumeClaim",
# "metadata": {
# "creationTimestamp": null,
# "name": "datadir"
# },
# [...]
volume_templates = sts["spec"]["volumeClaimTemplates"]
pvc_template_found = False
for volume_template in volume_templates:
if volume_template["metadata"]["name"] == "datadir":
self.assertEqual(volume_template["kind"], "PersistentVolumeClaim")
volume_template_spec = volume_template["spec"]
self.assertEqual(volume_template_spec["accessModes"], ["ReadWriteOnce"])
self.assertEqual(volume_template_spec["resources"]["requests"]["storage"], "3Gi")
self.assertEqual(volume_template_spec["volumeMode"], "Filesystem")
pvc_template_found = True
break
self.assertTrue(pvc_template_found, "datadir volume claim template not found")
def check_pod_datadir(self, podname):
pod = kutil.get_po(self.ns, podname)
spec = pod["spec"]
# json-path: spec.containers[1].volumeMounts[0].name
# "name": "mysql",
# [...]
# "volumeMounts": [
# [...]
# {
# "mountPath": "/var/lib/mysql",
# "name": "datadir"
# },
containers = spec["containers"]
volume_mount = self.look_up_volume_mount(containers, "mysql")
self.assertIsNotNone(volume_mount, "datadir mount not found")
self.assertEqual(volume_mount["mountPath"], "/var/lib/mysql")
# json-path: spec.initContainers[0].volumeMounts[1].name
# "name": "initconf",
# [...]
# "volumeMounts": [
# [...]
# {
# "mountPath": "/var/lib/mysql",
# "name": "datadir"
# },
init_containers = spec["initContainers"]
volume_mount = self.look_up_volume_mount(init_containers, "initconf")
self.assertIsNotNone(volume_mount, "datadir mount not found")
self.assertEqual(volume_mount["mountPath"], "/var/lib/mysql")
volume_mount = self.look_up_volume_mount(init_containers, "initmysql")
self.assertIsNotNone(volume_mount, "datadir mount not found")
self.assertEqual(volume_mount["mountPath"], "/var/lib/mysql")
# json-path: spec.volumes[0].persistentVolumeClaim.claimName
# [...]
# "volumes": [
# [...]
# {
# "name": "datadir",
# "persistentVolumeClaim": {
# "claimName": "datadir-mycluster-0"
# }
# },
pvc_found = False
volumes = spec["volumes"]
for volume in volumes:
if volume["name"] == "datadir":
self.assertEqual(volume["persistentVolumeClaim"]["claimName"], f"datadir-{podname}")
pvc_found = True
break
self.assertTrue(pvc_found, "datadir volume not found")
def test_0_create_with_datadir(self):
kutil.create_default_user_secrets(self.ns)
yaml = """
apiVersion: mysql.oracle.com/v2alpha1
kind: InnoDBCluster
metadata:
name: mycluster
spec:
instances: 4
router:
instances: 1
secretName: mypwds
datadirVolumeClaimTemplate:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 3Gi
"""
kutil.apply(self.ns, yaml)
self.wait_pod("mycluster-0", "Running")
self.wait_pod("mycluster-1", "Running")
self.wait_pod("mycluster-2", "Running")
self.wait_pod("mycluster-3", "Running")
self.wait_ic("mycluster", "ONLINE", 4)
# self.wait_routers("mycluster-router-*", 1)
# check_all(self, self.ns, "mycluster", instances=4, routers=1, primary=0)
self.check_ic_datadir("mycluster")
self.check_sts_datadir("mycluster")
self.check_pod_datadir("mycluster-0")
self.check_pod_datadir("mycluster-1")
self.check_pod_datadir("mycluster-2")
self.check_pod_datadir("mycluster-3")
def test_9_destroy(self):
kutil.delete_ic(self.ns, "mycluster")
self.wait_pod_gone("mycluster-3")
self.wait_pod_gone("mycluster-2")
self.wait_pod_gone("mycluster-1")
self.wait_pod_gone("mycluster-0")
self.wait_ic_gone("mycluster")
kutil.delete_secret(self.ns, "mypwds")
|
<gh_stars>0
#!/usr/bin/env python
import numpy as np
import glob
import telescope_1d
flist = glob.glob ('out/*_*_*_0_*_*.npy')
#flist = glob.glob ('out/16_4096_*_0_*_*.npy')
#flist += glob.glob ('out/20_4096_*_0_*_*.npy')
for fname in flist:
fname = fname.replace('.npy','').replace('out/','').split('_')
ndishes = int(fname[0])
npix = int (fname[1])
redstr = fname[2]
redundant = (redstr == 'red')
sigt = fname[4]
te = float(fname[5])
if te == 0.0:
te = int(te)
newglobname = f"out/{ndishes}_{npix}_{redstr}_*_{sigt}_{te}.npy"
outname = f"outp/{ndishes}_{npix}_{redstr}_{sigt}_{te}"
sublist = glob.glob(newglobname)
print (outname, len(sublist), redundant)
redundant == redstr=='red'
if len(sublist)==30:
t = None
for Nfreqchicks in [1,2,4]:
lps = []
lpsf = []
lpsfx = []
lpsf1 = []
lpsf1x = []
for fname in sublist:
print(fname)
uvplane, uvplane_f, uvplane_f1 = np.load(fname)
if t is None:
t = telescope_1d.Telescope1D(Ndishes=ndishes, Npix_fft=npix, Nfreq=512, redundant=redundant, seed=22)
ps, k_modes, baselines_binned = t.get_uvplane_ps(uvplane, Nfreqchunks=Nfreqchicks, m_baselines=1, m_freq=1, padding=1, window_fn=np.blackman)
psf, k_modes, baselines_binned = t.get_uvplane_ps(uvplane_f, Nfreqchunks=Nfreqchicks, m_baselines=1, m_freq=1, padding=1, window_fn=np.blackman)
psfx, k_modes, baselines_binned = t.get_uvplane_ps(uvplane_f, uvplane, Nfreqchunks=Nfreqchicks, m_baselines=1, m_freq=1, padding=1, window_fn=np.blackman)
psf1, k_modes, baselines_binned = t.get_uvplane_ps(uvplane_f1, Nfreqchunks=Nfreqchicks, m_baselines=1, m_freq=1, padding=1, window_fn=np.blackman)
psf1x, k_modes, baselines_binned = t.get_uvplane_ps(uvplane_f, uvplane, Nfreqchunks=Nfreqchicks, m_baselines=1, m_freq=1, padding=1, window_fn=np.blackman)
lps.append(ps)
lpsf.append(psf)
lpsfx.append(psfx)
lpsf1.append(psf1)
lpsf1x.append(psf1)
lps = np.array(lps).mean(axis=0)
lpsf = np.array(lpsf).mean(axis=0)
lpsfx = np.array(lpsfx).mean(axis=0)
lpsf1 = np.array(lpsf1).mean(axis=0)
lpsf1x = np.array(lpsf1x).mean(axis=0)
np.save(outname+f'_{Nfreqchicks}.npy',(lps,lpsf,lpsfx,lpsf1,lpsf1x))
np.save(outname+f'_{Nfreqchicks}_kmodes.npy',k_modes)
np.save(outname+f'_{Nfreqchicks}_baselines.npy',baselines_binned)
|
from functools import partial
from unittest.mock import Mock
from unittest.mock import patch
import numpy as np
import pytest
class TestPrintLog:
@pytest.fixture
def print_log_cls(self):
from skorch.callbacks import PrintLog
keys_ignored = ['dur', 'event_odd']
return partial(PrintLog, sink=Mock(), keys_ignored=keys_ignored)
@pytest.fixture
def print_log(self, print_log_cls):
return print_log_cls().initialize()
@pytest.fixture
def scoring_cls(self):
from skorch.callbacks import EpochScoring
return EpochScoring
@pytest.fixture
def mse_scoring(self, scoring_cls):
return scoring_cls(
'neg_mean_squared_error',
name='nmse',
).initialize()
@pytest.fixture
def odd_epoch_callback(self):
from skorch.callbacks import Callback
class OddEpochCallback(Callback):
def on_epoch_end(self, net, **kwargs):
net.history[-1]['event_odd'] = bool(len(net.history) % 2)
return OddEpochCallback().initialize()
@pytest.fixture
def net(self, net_cls, module_cls, train_split, mse_scoring,
odd_epoch_callback, print_log, data):
net = net_cls(
module_cls, batch_size=1, train_split=train_split,
callbacks=[mse_scoring, odd_epoch_callback], max_epochs=2)
net.initialize()
# replace default PrintLog with test PrintLog
net.callbacks_[-1] = ('print_log', print_log)
return net.partial_fit(*data)
@pytest.fixture
def history(self, net):
return net.history
# pylint: disable=unused-argument
@pytest.fixture
def sink(self, history, print_log):
# note: the history fixture is required even if not used because it
# triggers the calls on print_log
return print_log.sink
@pytest.fixture
def ansi(self):
from skorch.utils import Ansi
return Ansi
def test_call_count(self, sink):
# header + lines + 2 epochs
assert sink.call_count == 4
def test_header(self, sink):
header = sink.call_args_list[0][0][0]
columns = header.split()
expected = ['epoch', 'nmse', 'train_loss', 'valid_loss']
assert columns == expected
def test_lines(self, sink):
lines = sink.call_args_list[1][0][0].split()
# Lines have length 2 + length of column, or 8 if the column
# name is short and the values are floats.
expected = [
'-' * (len('epoch') + 2),
'-' * 8,
'-' * (len('train_loss') + 2),
'-' * (len('valid_loss') + 2),
]
assert lines
assert lines == expected
@pytest.mark.parametrize('epoch', [0, 1])
def test_first_row(self, sink, ansi, epoch, history):
row = sink.call_args_list[epoch + 2][0][0]
items = row.split()
# epoch, nmse, valid, train
assert len(items) == 4
# epoch, starts at 1
assert items[0] == str(epoch + 1)
# is best
are_best = [
history[epoch, 'nmse_best'],
history[epoch, 'train_loss_best'],
history[epoch, 'valid_loss_best'],
]
# test that cycled colors are used if best
for item, color, is_best in zip(items[1:], list(ansi)[1:], are_best):
if is_best:
# if best, text colored
assert item.startswith(color.value)
assert item.endswith(ansi.ENDC.value)
else:
# if not best, text is only float, so converting possible
float(item)
def test_args_passed_to_tabulate(self, history):
with patch('skorch.callbacks.logging.tabulate') as tab:
from skorch.callbacks import PrintLog
print_log = PrintLog(
tablefmt='latex',
floatfmt='.9f',
).initialize()
print_log.table(history[-1])
assert tab.call_count == 1
assert tab.call_args_list[0][1]['tablefmt'] == 'latex'
assert tab.call_args_list[0][1]['floatfmt'] == '.9f'
def test_with_additional_key(self, history, print_log_cls):
keys_ignored = ['event_odd'] # 'dur' no longer ignored
print_log = print_log_cls(
sink=Mock(), keys_ignored=keys_ignored).initialize()
# does not raise
print_log.on_epoch_end(Mock(history=history))
header = print_log.sink.call_args_list[0][0][0]
columns = header.split()
expected = ['epoch', 'nmse', 'train_loss', 'valid_loss', 'dur']
assert columns == expected
def test_keys_ignored_as_str(self, print_log_cls):
print_log = print_log_cls(keys_ignored='a-key')
assert print_log.keys_ignored == ['a-key']
print_log.initialize()
assert print_log.keys_ignored_ == set(['a-key', 'batches'])
def test_keys_ignored_is_None(self, print_log_cls):
print_log = print_log_cls(keys_ignored=None)
assert print_log.keys_ignored is None
print_log.initialize()
assert print_log.keys_ignored_ == set(['batches'])
def test_with_event_key(self, history, print_log_cls):
print_log = print_log_cls(sink=Mock(), keys_ignored=None).initialize()
# history has two epochs, write them one by one
print_log.on_epoch_end(Mock(history=history[:-1]))
print_log.on_epoch_end(Mock(history=history))
header = print_log.sink.call_args_list[0][0][0]
columns = header.split()
expected = ['epoch', 'nmse', 'train_loss', 'valid_loss', 'odd', 'dur']
assert columns == expected
odd_row = print_log.sink.call_args_list[2][0][0].split()
even_row = print_log.sink.call_args_list[3][0][0].split()
assert len(odd_row) == 6 # odd row has entries in every column
assert odd_row[4] == '+' # including '+' sign for the 'event_odd'
assert len(even_row) == 5 # even row does not have 'event_odd' entry
def test_witout_valid_data(
self, net_cls, module_cls, mse_scoring, print_log, data):
net = net_cls(
module_cls, batch_size=1, train_split=None,
callbacks=[mse_scoring], max_epochs=2)
net.initialize()
# replace default PrintLog with test PrintLog
net.callbacks_[-1] = ('print_log', print_log)
net.partial_fit(*data)
sink = print_log.sink
row = sink.call_args_list[2][0][0]
items = row.split()
assert len(items) == 2 # no valid, only epoch and train
def test_print_not_skipped_if_verbose(self, capsys):
from skorch.callbacks import PrintLog
print_log = PrintLog().initialize()
net = Mock(history=[{'loss': 123}], verbose=1)
print_log.on_epoch_end(net)
stdout = capsys.readouterr()[0]
result = [x.strip() for x in stdout.split()]
expected = ['loss', '------', '123']
assert result == expected
def test_print_skipped_if_not_verbose(self, capsys):
from skorch.callbacks import PrintLog
print_log = PrintLog().initialize()
net = Mock(history=[{'loss': 123}], verbose=0)
print_log.on_epoch_end(net)
stdout = capsys.readouterr()[0]
assert not stdout
class TestProgressBar:
@pytest.fixture
def progressbar_cls(self):
from skorch.callbacks import ProgressBar
return ProgressBar
@pytest.fixture
def net_cls(self):
"""very simple network that trains for 2 epochs"""
from skorch import NeuralNetRegressor
from skorch.toy import make_regressor
module_cls = make_regressor(
input_units=1,
num_hidden=0,
output_units=1,
)
return partial(
NeuralNetRegressor,
module=module_cls,
train_split=None,
max_epochs=2,
batch_size=10)
@pytest.fixture(scope='module')
def data(self):
X = np.zeros((20, 1), dtype='float32')
y = np.zeros((20, 1), dtype='float32')
return X, y
@pytest.mark.parametrize('postfix', [
[],
['train_loss'],
['train_loss', 'valid_loss'],
['doesnotexist'],
['train_loss', 'doesnotexist'],
])
def test_invalid_postfix(self, postfix, net_cls, progressbar_cls, data):
net = net_cls(callbacks=[
progressbar_cls(postfix_keys=postfix),
])
net.fit(*data)
@patch('tqdm.tqdm')
@pytest.mark.parametrize('scheme,expected_total', [
('auto', [2, 2]),
('count', [None, 2]),
(None, [None, None]),
(2, [2, 2]), # correct number of batches_per_epoch (20 // 10)
(3, [3, 3]), # offset by +1, should still work
(1, [1, 1]), # offset by -1, should still work
])
def test_different_count_schemes(
self, tqdm_mock, scheme, expected_total, net_cls, progressbar_cls, data):
net = net_cls(callbacks=[
progressbar_cls(batches_per_epoch=scheme),
])
net.fit(*data)
assert tqdm_mock.call_count == 2
for i, total in enumerate(expected_total):
assert tqdm_mock.call_args_list[i][1]['total'] == total
|
<reponame>arassadin/sgpn
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib as mpl
import json
mpl.use('Agg')
############################
## Ths Statistics ##
############################
def Get_Ths(pts_corr, seg, ins, ths, ths_, cnt):
pts_in_ins = {}
for ip, pt in enumerate(pts_corr):
if ins[ip] in pts_in_ins.keys():
pts_in_curins_ind = pts_in_ins[ins[ip]]
pts_notin_curins_ind = (~(pts_in_ins[ins[ip]])) & (seg==seg[ip])
hist, bin = np.histogram(pt[pts_in_curins_ind], bins=20)
if seg[ip]==8:
print(bin)
numpt_in_curins = np.sum(pts_in_curins_ind)
numpt_notin_curins = np.sum(pts_notin_curins_ind)
if numpt_notin_curins > 0:
tp_over_fp = 0
ib_opt = -2
for ib, b in enumerate(bin):
if b == 0:
break
tp = float(np.sum(pt[pts_in_curins_ind] < bin[ib])) / float(numpt_in_curins)
fp = float(np.sum(pt[pts_notin_curins_ind] < bin[ib])) / float(numpt_notin_curins)
if tp <= 0.5:
continue
if fp == 0. and tp > 0.5:
ib_opt = ib
break
if tp/fp > tp_over_fp:
tp_over_fp = tp / fp
ib_opt = ib
if tp_over_fp > 4.:
ths[seg[ip]] += bin[ib_opt]
ths_[seg[ip]] += bin[ib_opt]
cnt[seg[ip]] += 1
else:
pts_in_curins_ind = (ins == ins[ip])
pts_in_ins[ins[ip]] = pts_in_curins_ind
pts_notin_curins_ind = (~(pts_in_ins[ins[ip]])) & (seg==seg[ip])
hist, bin = np.histogram(pt[pts_in_curins_ind], bins=20)
if seg[ip]==8:
print(bin)
numpt_in_curins = np.sum(pts_in_curins_ind)
numpt_notin_curins = np.sum(pts_notin_curins_ind)
if numpt_notin_curins > 0:
tp_over_fp = 0
ib_opt = -2
for ib, b in enumerate(bin):
if b == 0:
break
tp = float(np.sum(pt[pts_in_curins_ind]<bin[ib])) / float(numpt_in_curins)
fp = float(np.sum(pt[pts_notin_curins_ind]<bin[ib])) / float(numpt_notin_curins)
if tp <= 0.5:
continue
if fp == 0. and tp > 0.5:
ib_opt = ib
break
if tp / fp > tp_over_fp:
tp_over_fp = tp / fp
ib_opt = ib
if tp_over_fp > 4.:
ths[seg[ip]] += bin[ib_opt]
ths_[seg[ip]] += bin[ib_opt]
cnt[seg[ip]] += 1
return ths, ths_, cnt
##############################
## Merging Algorithms ##
##############################
def GroupMerging(pts_corr, confidence, seg, label_bin):
confvalidpts = (confidence>0.4)
un_seg = np.unique(seg)
refineseg = -1* np.ones(pts_corr.shape[0])
groupid = -1* np.ones(pts_corr.shape[0])
numgroups = 0
groupseg = {}
for i_seg in un_seg:
if i_seg==-1:
continue
pts_in_seg = (seg==i_seg)
valid_seg_group = np.where(pts_in_seg & confvalidpts)
proposals = []
if valid_seg_group[0].shape[0]==0:
proposals += [pts_in_seg]
else:
for ip in valid_seg_group[0]:
validpt = (pts_corr[ip] < label_bin[i_seg]) & pts_in_seg
if np.sum(validpt)>5:
flag = False
for gp in range(len(proposals)):
iou = float(np.sum(validpt & proposals[gp])) / np.sum(validpt|proposals[gp])#uniou
validpt_in_gp = float(np.sum(validpt & proposals[gp])) / np.sum(validpt)#uniou
if iou > 0.6 or validpt_in_gp > 0.8:
flag = True
if np.sum(validpt)>np.sum(proposals[gp]):
proposals[gp] = validpt
continue
if not flag:
proposals += [validpt]
if len(proposals) == 0:
proposals += [pts_in_seg]
for gp in range(len(proposals)):
if np.sum(proposals[gp])>50:
groupid[proposals[gp]] = numgroups
groupseg[numgroups] = i_seg
numgroups += 1
refineseg[proposals[gp]] = stats.mode(seg[proposals[gp]])[0]
un, cnt = np.unique(groupid, return_counts=True)
for ig, g in enumerate(un):
if cnt[ig] < 50:
groupid[groupid==g] = -1
un, cnt = np.unique(groupid, return_counts=True)
groupidnew = groupid.copy()
for ig, g in enumerate(un):
if g == -1:
continue
groupidnew[groupid==g] = (ig-1)
groupseg[(ig-1)] = groupseg.pop(g)
groupid = groupidnew
for ip, gid in enumerate(groupid):
if gid == -1:
pts_in_gp_ind = (pts_corr[ip] < label_bin[seg[ip]])
pts_in_gp = groupid[pts_in_gp_ind]
pts_in_gp_valid = pts_in_gp[pts_in_gp!=-1]
if len(pts_in_gp_valid) != 0:
groupid[ip] = stats.mode(pts_in_gp_valid)[0][0]
return groupid, refineseg, groupseg
def BlockMerging(volume, volume_seg, pts, grouplabel, groupseg, gap=1e-3):
overlapgroupcounts = np.zeros([100,300])
groupcounts = np.ones(100)
x=(pts[:,0]/gap).astype(np.int32)
y=(pts[:,1]/gap).astype(np.int32)
z=(pts[:,2]/gap).astype(np.int32)
for i in range(pts.shape[0]):
xx=x[i]
yy=y[i]
zz=z[i]
if grouplabel[i] != -1:
if volume[xx,yy,zz]!=-1 and volume_seg[xx,yy,zz]==groupseg[grouplabel[i]]:
try:
overlapgroupcounts[grouplabel[i],volume[xx,yy,zz]] += 1
except:
print('error')
groupcounts[grouplabel[i]] += 1
groupcate = np.argmax(overlapgroupcounts,axis=1)
maxoverlapgroupcounts = np.max(overlapgroupcounts,axis=1)
curr_max = np.max(volume)
for i in range(groupcate.shape[0]):
if maxoverlapgroupcounts[i]<7 and groupcounts[i]>30:
curr_max += 1
groupcate[i] = curr_max
finalgrouplabel = -1 * np.ones(pts.shape[0])
for i in range(pts.shape[0]):
if grouplabel[i] != -1 and volume[x[i],y[i],z[i]]==-1:
volume[x[i],y[i],z[i]] = groupcate[grouplabel[i]]
volume_seg[x[i],y[i],z[i]] = groupseg[grouplabel[i]]
finalgrouplabel[i] = groupcate[grouplabel[i]]
return finalgrouplabel
############################
## Evaluation Metrics ##
############################
def eval_3d_perclass(tp, fp, npos):
tp = np.asarray(tp).astype(np.float)
fp = np.asarray(fp).astype(np.float)
tp = np.cumsum(tp)
fp = np.cumsum(fp)
rec = tp / npos
prec = tp / (fp+tp)
ap = 0.
for t in np.arange(0, 1, 0.1):
prec1 = prec[rec>=t]
prec1 = prec1[~np.isnan(prec1)]
if len(prec1) == 0:
p = 0.
else:
p = max(prec1)
if not p:
p = 0.
ap = ap + p / 10
return ap, rec, prec
############################
## Visualize Results ##
############################
#color_map = json.load(open('part_color_mapping.json', 'r'))
def output_bounding_box_withcorners(box_corners, seg, out_file):
# ############## 0 1 2 3 4 5 6 7
corner_indexes = [[0, 1, 2], [0, 1, 5], [0, 4, 2], [0, 4, 5], [3, 1, 2], [3, 1, 5], [3, 4, 2], [3, 4, 5]]
line_indexes = [[0, 1], [0, 2], [0, 4], [1, 3], [1, 5], [2, 3], [2, 6], [3, 7], [4, 5], [4, 6], [5, 7], [6, 7]]
with open(out_file, 'w') as f:
l = box_corners.shape[0]
for i in range(l):
box = box_corners[i]
color = color_map[seg[i]]
for line_index in line_indexes:
corner0 = box[line_index[0]]
corner1 = box[line_index[1]]
print(corner0.shape)
dist = np.linalg.norm(corner0 - corner1)
dot_num = int(dist / 0.005)
delta = (corner1 - corner0) / dot_num
for idot in range(dot_num):
plotdot = corner0 + idot * delta
f.write(
'v %f %f %f %f %f %f\n' % (plotdot[0], plotdot[1], plotdot[2], color[0], color[1], color[2]))
def output_bounding_box(boxes, seg, out_file):
# ############## 0 1 2 3 4 5 6 7
#box:nx8x3
corner_indexes = [[0, 1, 2], [0, 1, 5], [0, 4, 2], [0, 4, 5], [3, 1, 2], [3, 1, 5], [3, 4, 2], [3, 4, 5]]
line_indexes = [[0, 1], [0, 2], [0, 4], [1, 3], [1, 5], [2, 3], [2, 6], [3, 7], [4, 5], [4, 6], [5, 7], [6, 7]]
with open(out_file, 'w') as f:
l = boxes.shape[0]
for i in range(l):
box = boxes[i]
color = color_map[seg[i]]
for line_index in line_indexes:
corner0 = box[corner_indexes[line_index[0]]]
corner1 = box[corner_indexes[line_index[1]]]
dist = np.linalg.norm(corner0 - corner1)
dot_num = int(dist / 0.005)
delta = (corner1 - corner0) / dot_num
for idot in range(dot_num):
plotdot = corner0 + idot * delta
f.write(
'v %f %f %f %f %f %f\n' % (plotdot[0], plotdot[1], plotdot[2], color[0], color[1], color[2]))
def output_color_point_cloud(data, seg, out_file):
with open(out_file, 'w') as f:
l = len(seg)
for i in range(l):
color = color_map[seg[i]]
f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2]))
def output_point_cloud_rgb(data, rgb, out_file):
with open(out_file, 'w') as f:
l = len(data)
for i in range(l):
f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], rgb[i][0], rgb[i][1], rgb[i][2]))
def output_color_point_cloud_red_blue(data, seg, out_file):
with open(out_file, 'w') as f:
l = len(seg)
for i in range(l):
if seg[i] == 1:
color = [0, 0, 1]
elif seg[i] == 0:
color = [1, 0, 0]
else:
color = [0, 0, 0]
f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2]))
##define color heat map
norm = mpl.colors.Normalize(vmin=0, vmax=255)
magma_cmap = plt.cm.get_cmap('magma')
magma_rgb = []
for i in range(0, 255):
k = mpl.colors.colorConverter.to_rgb(magma_cmap(norm(i)))
magma_rgb.append(k)
def output_scale_point_cloud(data, scales, out_file):
with open(out_file, 'w') as f:
l = len(scales)
for i in range(l):
scale = int(scales[i]*254)
if scale > 254:
scale = 254
color = magma_rgb[scale]
f.write('v %f %f %f %f %f %f\n' % (data[i][0], data[i][1], data[i][2], color[0], color[1], color[2]))
|
#!/usr/bin/env python3
from threading import Thread, Lock
import sys
import rospy
from hiwonder_servo_driver.hiwonder_servo_serialproxy import SerialProxy
from hiwonder_servo_msgs.msg import CommandDurationList
from hiwonder_servo_controllers.action_group_runner import ActionGroupRunner
from hiwonder_servo_controllers.joint_position_controller import JointPositionController
from hiwonder_servo_controllers.joint_trajectory_action_controller import JointTrajectoryActionController
class ControllerManager:
def __init__(self):
rospy.init_node('hiwonder_servo_manager', anonymous=True)
rospy.on_shutdown(self.on_shutdown)
self.waiting_meta_controllers = []
self.controllers = {}
self.controllers_by_id = {}
self.serial_proxies = {}
# self.multi_command_sub = rospy.Subscriber('/servo_controller/command', CommandDurationList,
# self.process_multi_control)
self.manager_name = rospy.get_name().replace('/', '')
serial_ports = rospy.get_param('~serial_ports')
for serial in serial_ports:
port_name = serial['port_name']
port_id = str(serial['port_id'])
baud_rate = serial['baud_rate']
min_motor_id = serial['min_motor_id'] if 'min_motor_id' in serial else 0
max_motor_id = serial['max_motor_id'] if 'max_motor_id' in serial else 253
update_rate = serial['update_rate'] if 'update_rate' in serial else 5
fake_read = serial['fake_read'] if 'fake_read' in serial else False
connected_ids = serial['connected_ids'] if 'connected_ids' in serial else []
serial_proxy = SerialProxy(port_name,
self.manager_name,
str(port_id),
baud_rate,
min_motor_id,
max_motor_id,
connected_ids,
update_rate,
fake_read)
serial_proxy.connect()
self.serial_proxies[port_id] = serial_proxy
items_ = rospy.get_param('~controllers').items()
for ctl_name, ctl_params in items_:
if ctl_params['type'] == 'JointPositionController':
self.start_position_controller(ctl_name, ctl_params)
for ctl_name, ctl_params in items_:
if ctl_params['type'] == 'JointTrajectoryActionController':
self.start_trajectory_action_controller(ctl_name, ctl_params)
def on_shutdown(self):
for serial_proxy in self.serial_proxies.values():
serial_proxy.disconnect()
# self.multi_command_sub.unregister()
def check_deps(self):
controllers_still_waiting = []
for i, (ctl_name, deps, kls) in enumerate(self.waiting_meta_controllers):
if not set(deps).issubset(self.controllers.keys()):
controllers_still_waiting.append(self.waiting_meta_controllers[i])
rospy.logwarn('[%s] not all dependencies started, still waiting for %s...' % (
ctl_name, str(list(set(deps).difference(self.controllers.keys())))))
else:
dependencies = [self.controllers[dep_name] for dep_name in deps]
controller = kls(ctl_name, dependencies)
if controller.initialize():
controller.start()
self.controllers[ctl_name] = controller
self.waiting_meta_controllers = controllers_still_waiting[:]
def start_position_controller(self, ctl_name, ctl_params):
if ctl_name in self.controllers:
return False
port_id = str(ctl_params['port_id'])
if port_id in self.serial_proxies:
controller = JointPositionController(self.serial_proxies[port_id].servo_io,
ctl_name,
'/' + self.manager_name + "/controllers/" + ctl_name,
port_id)
if controller.initialize():
controller.start()
self.controllers[ctl_name] = controller
self.controllers_by_id[controller.servo_id] = controller
self.check_deps()
def start_trajectory_action_controller(self, ctl_name, ctl_params):
dependencies = ctl_params['joint_controllers']
self.waiting_meta_controllers.append((ctl_name, dependencies, JointTrajectoryActionController))
self.check_deps()
# def process_multi_control(self, req):
# ids = req.ids
# duration = req.duration
# positions = req.positions
# for i in range(len(ids)):
# if ids[i] in self.controllers_by_id:
# self.controllers_by_id[ids[i]].set_position(positions[i], duration)
def set_multi_pos(self, poss):
for id_, pos_, dur_ in poss:
self.controllers_by_id[id_].set_position_in_rad(pos_, dur_)
if __name__ == '__main__':
try:
manager = ControllerManager()
runner = ActionGroupRunner('ActionGroupRunner', manager.set_multi_pos)
runner.start()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
<gh_stars>1-10
#!/usr/bin/env python3
import logging
from datetime import datetime, time, timedelta
from typing import Optional
from ..building.interface import Shutter
from . import task
from .interface import Trigger
from .job import Job
from .jobmanager import JobManager
from .task import Task, Open, Tilt, Close
from ..sun.sundata import Sundata
from ..util import dayutil, dateutil
logger = logging.getLogger(__name__)
class TriggerBase(Trigger):
def __init__(self, task: Task, runtime: Optional[datetime]):
self._task: Task = task
self._time: Optional[datetime] = runtime
self._offset: int = 0
self._on: [str] = ['MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU']
self._order: Optional[int] = None
def task(self) -> Task:
return self._task
def set_task(self, task: Task):
self._task = task
def time(self) -> Optional[datetime]:
return self.__apply_offset()
def set_offset(self, offset: int):
self._offset = offset
def set_days(self, on: [str]):
self._on = on
def set_order(self, order: Optional[int]):
self._order = order
def applies(self) -> bool:
return dayutil.applies(self.time(), self._on)
def __apply_offset(self) -> Optional[datetime]:
delta = timedelta(minutes=abs(self._offset))
if not self._time:
return self._time
if self._offset > 0:
return self._time + delta
if self._offset < 0:
return self._time - delta
return self._time
@staticmethod
def create(trigger, **args) -> Trigger:
raise NotImplementedError()
@property
def order(self) -> Optional[int]:
return self._order
def __repr__(self):
return 'runtime: %s, task: %s, offset: %s, on: %s' % (self._time, self._task, self._offset, self._on)
class SunriseTrigger(TriggerBase):
def __init__(self, sundata: Sundata, task: Task = Open()):
super(SunriseTrigger, self).__init__(task, sundata.get_sunrise())
@staticmethod
def type() -> str:
return 'SUNRISE'
@staticmethod
def create(trigger, **args) -> Trigger:
return SunriseTrigger(args.get('sundata'))
def __repr__(self):
return 'SunriseTrigger: { %s }' % (super(SunriseTrigger, self).__repr__())
class SunsetTrigger(TriggerBase):
def __init__(self, sundata: Sundata, task: Task = Close()):
super(SunsetTrigger, self).__init__(task, sundata.get_sunset())
@staticmethod
def type() -> str:
return 'SUNSET'
@staticmethod
def create(trigger, **args) -> Trigger:
return SunsetTrigger(args.get('sundata'))
def __repr__(self):
return 'SunsetTrigger: { %s }' % (super(SunsetTrigger, self).__repr__())
class SunInTrigger(TriggerBase):
def __init__(self, sundata: Sundata, azimuth: int, task: Task = Tilt()):
super(SunInTrigger, self).__init__(task, sundata.find_azimuth(azimuth).time)
@staticmethod
def type() -> str:
return 'SUNIN'
@staticmethod
def create(trigger, **args) -> Trigger:
return SunInTrigger(args.get('sundata'), args.get('azimuth'))
def __repr__(self):
return 'SunInTrigger: { %s }' % (super(SunInTrigger, self).__repr__())
class SunOutTrigger(TriggerBase):
def __init__(self, sundata: Sundata, azimuth: int, task: Task = Open()):
super(SunOutTrigger, self).__init__(task, sundata.find_azimuth(azimuth).time)
@staticmethod
def type() -> str:
return 'SUNOUT'
@staticmethod
def create(trigger, **args) -> Trigger:
return SunOutTrigger(args.get('sundata'), args.get('azimuth'))
def __repr__(self):
return 'SunOutTrigger: { %s }' % (super(SunOutTrigger, self).__repr__())
class TimeTrigger(TriggerBase):
def __init__(self, runtime: time, task: Task = Close()):
super(TimeTrigger, self).__init__(task, self.__prepare_runtime(runtime))
@staticmethod
def __prepare_runtime(runtime: time) -> datetime:
return dateutil.date.current.replace(hour=runtime.hour, minute=runtime.minute, second=runtime.second,
microsecond=0)
@staticmethod
def type() -> str:
return 'TIME'
@staticmethod
def create(trigger, **args) -> Trigger:
runtime = time.fromisoformat(trigger.get('time'))
return TimeTrigger(runtime)
def __repr__(self):
return 'TimeTrigger: { %s }' % (super(TimeTrigger, self).__repr__())
class AzimuthTrigger(TriggerBase):
def __init__(self, sundata: Sundata, azimuth: int, task: Task = Close()):
super(AzimuthTrigger, self).__init__(task, sundata.find_azimuth(azimuth).time)
@staticmethod
def type() -> str:
return 'AZIMUTH'
@staticmethod
def create(trigger, **args) -> Trigger:
azimuth = trigger.get('azimuth')
return AzimuthTrigger(args.get('sundata'), azimuth)
def __repr__(self):
return 'AzimuthTrigger: { %s }' % (super(AzimuthTrigger, self).__repr__())
class ElevationTrigger(TriggerBase):
def __init__(self, sundata: Sundata, elevation: int, direction: str, task: Task = Close()):
super(ElevationTrigger, self).__init__(task, self.__pick(sundata, elevation, direction))
@staticmethod
def type() -> str:
return 'ELEVATION'
@staticmethod
def create(trigger, **args) -> Trigger:
elevation = trigger.get('elevation')
direction = trigger.get('direction')
return ElevationTrigger(args.get('sundata'), elevation, direction)
@staticmethod
def __pick(sundata: Sundata, elevation: int, direction: str) -> datetime:
rising, setting = sundata.find_elevation(elevation)
if direction == 'RISE':
return rising.time
return setting.time
def __repr__(self):
return 'ElevationTrigger: { %s }' % (super(ElevationTrigger, self).__repr__())
class PositionTrigger(TriggerBase):
def __init__(self, sundata: Sundata, azimuth: int, elevation: int, direction: str, task: Task = Close()):
super(PositionTrigger, self).__init__(task, self.__pick(sundata, azimuth, elevation, direction))
@staticmethod
def type() -> str:
return 'POSITION'
@staticmethod
def create(trigger, **args) -> Trigger:
azimuth = trigger.get('azimuth')
elevation = trigger.get('elevation')
direction = trigger.get('direction')
return PositionTrigger(args.get('sundata'), azimuth, elevation, direction)
@staticmethod
def __pick(sundata: Sundata, azimuth: int, elevation: int, direction: str) -> datetime:
azi = sundata.find_azimuth(azimuth)
rising, setting = sundata.find_elevation(elevation)
ele = setting
if direction == 'RISE':
ele = rising
if azi.time > ele.time:
return azi.time
return ele.time
def __repr__(self):
return 'ElevationTrigger: { %s }' % (super(PositionTrigger, self).__repr__())
def apply_triggers(manager: JobManager, sundata: Sundata, blind: Shutter):
triggers = extract_triggers(blind, sundata)
logger.debug('Triggers for {}: {}'.format(blind.name, triggers))
for trigger in triggers:
manager.add(Job(trigger, blind))
def extract_triggers(blind: Shutter, sundata: Sundata) -> [Trigger]:
triggers: [Trigger] = []
order: Order = Order()
for trigger in blind.triggers:
next = order.next
if build_trigger(trigger, SunriseTrigger.type(), SunriseTrigger.create, triggers, next, sundata=sundata) or \
build_trigger(trigger, SunsetTrigger.type(), SunsetTrigger.create, triggers, next, sundata=sundata) or \
build_trigger(trigger, SunInTrigger.type(), SunInTrigger.create, triggers, next, sundata=sundata,
azimuth=blind.sun_in) or \
build_trigger(trigger, SunOutTrigger.type(), SunOutTrigger.create, triggers, next, sundata=sundata,
azimuth=blind.sun_out) or \
build_trigger(trigger, TimeTrigger.type(), TimeTrigger.create, triggers) or \
build_trigger(trigger, AzimuthTrigger.type(), AzimuthTrigger.create, triggers, next,
sundata=sundata) or \
build_trigger(trigger, ElevationTrigger.type(), ElevationTrigger.create, triggers, next,
sundata=sundata) or \
build_trigger(trigger, PositionTrigger.type(), PositionTrigger.create, triggers, next, sundata=sundata):
continue
logger.error('No Trigger for {} existing'.format(trigger))
sort(triggers)
return merge(triggers)
def merge(triggers: [Trigger]) -> [Trigger]:
merged: [Trigger] = []
smallest: int = next(iter(map(lambda t: t.order, filter(lambda t: t.order is not None, triggers))), 0)
for trigger in triggers:
if trigger.order is None:
merged.append(trigger)
continue
if smallest <= trigger.order:
merged.append(trigger)
smallest = trigger.order
return merged
def sort(triggers):
triggers.sort(key=lambda t: t.order or 0)
triggers.sort(key=lambda t: t.time())
def build_trigger(triggerdata, type: str, constructor, triggers: [Trigger], order: Optional[int] = None, **args) -> bool:
logger.debug('parse: {} for {}'.format(triggers, type))
if isinstance(triggerdata, str):
if triggerdata == type:
trigger = constructor(trigger=triggerdata, **args)
trigger.set_order(order)
triggers.append(trigger)
return True
return False
if type in triggerdata.keys():
triggerdict = triggerdata.get(type)
trigger = constructor(trigger=triggerdict, **args)
set_optionals(trigger, triggerdict)
trigger.set_order(order)
triggers.append(trigger)
return True
return False
def set_optionals(trigger, triggerdict):
set_task(trigger, triggerdict)
set_offset(trigger, triggerdict)
set_on(trigger, triggerdict)
def set_task(trigger: Trigger, triggerdict):
if 'task' in triggerdict:
t = task.create(triggerdict.get('task'))
if t:
trigger.set_task(t)
def set_offset(trigger: Trigger, triggerdict):
if 'offset' in triggerdict:
offset = triggerdict.get('offset')
trigger.set_offset(offset)
def set_on(trigger: Trigger, triggerdict):
if 'at' in triggerdict:
on = triggerdict.get('at')
trigger.set_days(dayutil.parse_config(on))
class Order:
__counter = 0
@property
def next(self):
self.__counter += 1
return self.__counter
|
from tkinter import *
from tkinter import ttk as ttk
import tkinter.messagebox as msgbox
import core_module as cm
import webbrowser
from tkinter import filedialog
root = Tk()
root.title("Arcalive Lastorigin Searcher 1.1.0")
root.geometry("640x480+600+300")
root.resizable(False, False)
########################################################################################################################
### main
########################################################################################################################
notebook = ttk.Notebook(root, width=740, height=580)
notebook.pack()
frame1 = Frame(root)
notebook.add(frame1, text=u"공략")
frame2 = Frame(root)
notebook.add(frame2, text=u"야짤")
frame3 = Frame(root)
notebook.add(frame3, text=u"창작물(야짤)")
frame4 = Frame(root)
notebook.add(frame4, text=u"창작물")
frame5 = Frame(root)
notebook.add(frame5, text=u'Info')
########################################################################################################################
### listbox (공략)
########################################################################################################################
listbox = Listbox(frame1, selectmode="single", height=0)
listbox.pack(side="right", fill="both", expand=True)
entry_text = Entry(frame1, width=30)
entry_text.pack()
entry_text.insert(END, "")
radio_value = IntVar()
radio_btn1 = ttk.Radiobutton(frame1, text=u"제목+내용", variable=radio_value, value=0)
radio_btn2 = ttk.Radiobutton(frame1, text=u"제목만", variable=radio_value, value=1)
radio_btn1.pack()
radio_btn2.pack()
Label(frame1, text=u"검색어를 입력하세요.\n( ex) 1-1ex , 5-8ex+카엔 )").pack()
def weblink(*args):
index = listbox.curselection()[0]
item = listbox.get(index)
if "https://" in item:
webbrowser.open_new(item)
def error():
msgbox.showwarning(u"경고", u"검색어를 입력하세요")
def search_btncmd():
value = entry_text.get()
listbox.delete(0, END)
if len(value) == 0:
error()
else:
data_arr = cm.NamuliveSearch(radio_value.get(), value)
listbox.bind("<<ListboxSelect>>", weblink)
for item in data_arr:
listbox.insert(END, item)
listbox.itemconfig(END, fg="red" if item[0] == "h" else "blue")
listbox.pack(side="right", fill="both", expand=True)
def search_callback(event):
search_btncmd()
root.bind("<Return>", search_callback)
########################################################################################################################
### listbox (야짤)
########################################################################################################################
def edgeObject():
yazzal_title = Label(frame2, text=u"아카라이브 야짤탭 다운로더입니다.")
yazzal_title.pack(side="top")
Label(frame2, text=u"이미지파일은 글번호+글제목 형태의 폴더에 개별저장됩니다.").pack()
cre_spin_label = Label(frame2, text=u"페이지 수를 입력하세요")
cre_spin_label.place(x=200, y=70)
rcm_value = IntVar()
rcm_btn1 = ttk.Radiobutton(frame2, text=u"전체", variable=rcm_value, value=0)
rcm_btn2 = ttk.Radiobutton(frame2, text=u"개념글", variable=rcm_value, value=1)
rcm_btn1.place(x=140, y=61)
rcm_btn2.place(x=140, y=85)
def cre_yazzal_spin_check(self):
lastPage = [250, 51]
pageChecker = rcm_value.get()
cre_spin_label.config(text=u"다운받을 페이지를\n입력하세요. (1~{})".format(lastPage[pageChecker]))
cre_spin_label.place(x=380, y=63)
valid = False
if self.isdigit():
if int(self) <= lastPage[pageChecker] and int(self) >= 1:
valid = True
elif self == '':
valid = True
return valid
def cre_yazzal_spin_error(self):
pageChecker = rcm_value.get()
lastPage = [250, 51]
cre_spin_label.config(text=u"최소 수치는 1\n최대 수치는 {}입니다".format(lastPage[pageChecker]))
cre_spin_label.place(x=380, y=63)
cre_validate_command = (frame2.register(cre_yazzal_spin_check), "%P")
cre_invalid_command = (frame2.register(cre_yazzal_spin_error), "%P")
cre_yazzal_spinbox = Spinbox(frame2, from_=1, to=250, validate='all',
validatecommand=cre_validate_command,
invalidcommand=cre_invalid_command)
cre_yazzal_spinbox.place(x=220, y=73)
def cre_yazzal_spinbox_value():
return cre_yazzal_spinbox.get()
def cre_yazzal_error():
msgbox.showwarning(u"경고", u"다운로드 폴더 경로를 지정하세요.")
def cre_dir_btncmd():
cre_down_listbox.delete(0, END)
dir_path = filedialog.askdirectory(parent=root, initialdir="/", title=u"다운로드 파일을 저장할 폴더를 선택하세요")
if dir_path is None:
pass
else:
cre_down_listbox.insert(END, "{}".format(dir_path))
cre_down_listbox.place(x=80, y=123)
def cre_down_alarm():
msgbox.showwarning(u"작업완료", u"다운로드가 완료되었습니다.")
def cre_down_btncmd():
cre_yazzal_page = int(cre_yazzal_spinbox_value())
try:
dir_path = cre_down_listbox.get(0, END)[0]
if len(dir_path) == 0 or len(dir_path) == 1:
cre_yazzal_error()
else:
cm.created_img_download(dir_path, cre_yazzal_page, rcm_value.get(), 2)
cre_down_alarm()
except IndexError:
cre_yazzal_error()
cre_directory_btn = Button(frame2, width=10, text=u"경로지정", overrelief="solid",command=cre_dir_btncmd)
cre_down_listbox = Listbox(frame2, selectmode=u"single", height=0, width=40)
cre_down_btn = Button(frame2, width=10, text=u"다운로드 시작", overrelief="solid",command=cre_down_btncmd)
cre_down_listbox.place(x=80, y=123)
cre_directory_btn.place(x=380, y=120)
cre_down_btn.place(x=480, y=120)
edgeObject()
########################################################################################################################
### 창작물
########################################################################################################################
def creativeedgeObject():
yazzal_title = Label(frame3, text=u"아카라이브 창작물(야짤)탭 다운로더입니다.")
yazzal_title.pack(side="top")
Label(frame3, text=u"이미지파일은 글번호+글제목 형태의 폴더에 개별저장됩니다.").pack()
cre_spin_label = Label(frame3, text=u"페이지 수를 입력하세요")
cre_spin_label.place(x=200, y=70)
rcm_value = IntVar()
rcm_btn1 = ttk.Radiobutton(frame3, text=u"전체", variable=rcm_value, value=0)
rcm_btn2 = ttk.Radiobutton(frame3, text=u"개념글", variable=rcm_value, value=1)
rcm_btn1.place(x=140, y=61)
rcm_btn2.place(x=140, y=85)
def cre_yazzal_spin_check(self):
lastPage = [53, 53]
pageChecker = rcm_value.get()
cre_spin_label.config(text=u"다운받을 페이지를\n입력하세요. (1~{})".format(lastPage[pageChecker]))
cre_spin_label.place(x=380, y=63)
valid = False
if self.isdigit():
if int(self) <= lastPage[pageChecker] and int(self) >= 1:
valid = True
elif self == '':
valid = True
return valid
def cre_yazzal_spin_error(self):
pageChecker = rcm_value.get()
lastPage = [53, 53]
cre_spin_label.config(text=u"최소 수치는 1\n최대 수치는 {}입니다".format(lastPage[pageChecker]))
cre_spin_label.place(x=380, y=63)
cre_validate_command = (frame3.register(cre_yazzal_spin_check), "%P")
cre_invalid_command = (frame3.register(cre_yazzal_spin_error), "%P")
cre_yazzal_spinbox = Spinbox(frame3, from_=1, to=250, validate='all',
validatecommand=cre_validate_command,
invalidcommand=cre_invalid_command)
cre_yazzal_spinbox.place(x=220, y=73)
def cre_yazzal_spinbox_value():
return cre_yazzal_spinbox.get()
def cre_yazzal_error():
msgbox.showwarning(u"경고", u"다운로드 폴더 경로를 지정하세요.")
def cre_dir_btncmd():
cre_down_listbox.delete(0, END)
dir_path = filedialog.askdirectory(parent=root, initialdir="/", title=u"다운로드 파일을 저장할 폴더를 선택하세요")
if dir_path is None:
pass
else:
cre_down_listbox.insert(END, "{}".format(dir_path))
cre_down_listbox.place(x=80, y=123)
def cre_down_alarm():
msgbox.showwarning(u"작업완료", u"다운로드가 완료되었습니다.")
def cre_down_btncmd():
cre_yazzal_page = int(cre_yazzal_spinbox_value())
try:
dir_path = cre_down_listbox.get(0, END)[0]
if len(dir_path) == 0 or len(dir_path) == 1:
cre_yazzal_error()
else:
cm.created_img_download(dir_path, cre_yazzal_page, rcm_value.get(), 1)
cre_down_alarm()
except IndexError:
cre_yazzal_error()
cre_directory_btn = Button(frame3, width=10, text=u"경로지정", overrelief="solid",command=cre_dir_btncmd)
cre_down_listbox = Listbox(frame3, selectmode=u"single", height=0, width=40)
cre_down_btn = Button(frame3, width=10, text=u"다운로드 시작", overrelief="solid",command=cre_down_btncmd)
cre_down_listbox.place(x=80, y=123)
cre_directory_btn.place(x=380, y=120)
cre_down_btn.place(x=480, y=120)
creativeedgeObject()
########################################################################################################################
### 창작물 탭
########################################################################################################################
def creativeObject():
yazzal_title = Label(frame4, text=u"아카라이브 창작물탭 다운로더입니다.")
yazzal_title.pack(side="top")
Label(frame4, text=u"이미지파일은 글번호+글제목 형태의 폴더에 개별저장됩니다.").pack()
cre_spin_label = Label(frame4, text=u"페이지 수를 입력하세요")
cre_spin_label.place(x=200, y=70)
rcm_value = IntVar()
rcm_btn1 = ttk.Radiobutton(frame4, text=u"전체", variable=rcm_value, value=0)
rcm_btn2 = ttk.Radiobutton(frame4, text=u"개념글", variable=rcm_value, value=1)
rcm_btn1.place(x=140, y=61)
rcm_btn2.place(x=140, y=85)
def cre_yazzal_spin_check(self):
lastPage = [250, 250]
pageChecker = rcm_value.get()
cre_spin_label.config(text=u"다운받을 페이지를\n입력하세요. (1~{})".format(lastPage[pageChecker]))
cre_spin_label.place(x=380, y=63)
valid = False
if self.isdigit():
if int(self) <= lastPage[pageChecker] and int(self) >= 1:
valid = True
elif self == '':
valid = True
return valid
def cre_yazzal_spin_error(self):
pageChecker = rcm_value.get()
lastPage = [250, 250]
cre_spin_label.config(text=u"최소 수치는 1\n최대 수치는 {}입니다".format(lastPage[pageChecker]))
cre_spin_label.place(x=380, y=63)
cre_validate_command = (frame4.register(cre_yazzal_spin_check), "%P")
cre_invalid_command = (frame4.register(cre_yazzal_spin_error), "%P")
cre_yazzal_spinbox = Spinbox(frame4, from_=1, to=250, validate='all',
validatecommand=cre_validate_command,
invalidcommand=cre_invalid_command)
cre_yazzal_spinbox.place(x=220, y=73)
def cre_yazzal_spinbox_value():
return cre_yazzal_spinbox.get()
def cre_yazzal_error():
msgbox.showwarning(u"경고", u"다운로드 폴더 경로를 지정하세요.")
def cre_dir_btncmd():
cre_down_listbox.delete(0, END)
dir_path = filedialog.askdirectory(parent=root, initialdir="/", title=u"다운로드 파일을 저장할 폴더를 선택하세요")
if dir_path is None:
pass
else:
cre_down_listbox.insert(END, "{}".format(dir_path))
cre_down_listbox.place(x=80, y=123)
def cre_down_alarm():
msgbox.showwarning(u"작업완료", u"다운로드가 완료되었습니다.")
def cre_down_btncmd():
cre_yazzal_page = int(cre_yazzal_spinbox_value())
try:
dir_path = cre_down_listbox.get(0, END)[0]
if len(dir_path) == 0 or len(dir_path) == 1:
cre_yazzal_error()
else:
cm.created_img_download(dir_path, cre_yazzal_page, rcm_value.get(), 0)
cre_down_alarm()
except IndexError:
cre_yazzal_error()
cre_directory_btn = Button(frame4, width=10, text=u"경로지정", overrelief="solid", command=cre_dir_btncmd)
cre_down_listbox = Listbox(frame4, selectmode=u"single", height=0, width=40)
cre_down_btn = Button(frame4, width=10, text=u"다운로드 시작", overrelief="solid", command=cre_down_btncmd)
cre_down_listbox.place(x=80, y=123)
cre_directory_btn.place(x=380, y=120)
cre_down_btn.place(x=480, y=120)
creativeObject()
########################################################################################################################
### info
########################################################################################################################
def git_callback(event):
webbrowser.open_new(event.widget.cget("text"))
Label(frame5, text="Copyright 2021. vitus9988 All Rights Reserved.").pack(side="bottom")
git_label = Label(frame5, text=r"https://github.com/vitus9988", fg="blue", cursor="hand2")
git_label.pack(side="bottom")
git_label.bind("<Button-1>", git_callback)
thanks_to = Label(frame5, text=r'Thanks To SANIC , 섭섭맨').pack(side='bottom')
root.mainloop()
# pyinstaller -F core_gui.spec로 빌드 |
"""
The qiprofile clinical Mongodb data model.
"""
import re
import math
import mongoengine
from mongoengine import (fields, ValidationError)
from .. import choices
from .common import (Encounter, Outcome, TumorExtent)
POS_NEG_CHOICES = [(True, 'Positive'), (False, 'Negative')]
"""The Boolean choices for Positive/Negative display values."""
YES_NO_CHOICES = [(True, 'Yes'), (False, 'No')]
"""The Boolean choices for Yes/No display values."""
class Agent(mongoengine.EmbeddedDocument):
"""A treatment agent, e.g. drug or radiation."""
meta = dict(allow_inheritance=True)
class Drug(Agent):
name = fields.StringField(required=True)
"""The official listed drug name."""
class Radiation(Agent):
BEAM_TYPES = ['photon', 'proton', 'electron', 'neutron', 'carbon']
"""
The radiation beam type controlled values.
"""
beam_type = fields.StringField(choices=BEAM_TYPES)
class OtherAgent(Agent):
name = fields.StringField(required=True)
class Dosage(mongoengine.EmbeddedDocument):
"""The agent dosage."""
agent = fields.EmbeddedDocumentField(Agent, required=True)
"""The administered Drug or Radiation."""
amount = fields.FloatField(required=True)
"""
The cumulative amount of the agent administered over the
course of the duration, normalized by weight.
For chemotherapy, the field unit is milligrams per kilogram
(mg/kg).
For radiotherapy, the field unit is Greys per kilogram (Gy/kg).
Radiation fractions and daily chemotherapy dosages are not tracked.
"""
start_date = fields.DateTimeField()
"""The first date the agent is administered."""
duration = fields.IntField()
"""
The span in days during which the agent is administered.
"""
class Treatment(mongoengine.EmbeddedDocument):
"""
The patient therapy, e.g. adjuvant. Treatment is one of
the :const:`Treatment.TYPE_CHOICES` types, and occurs over
a period of time. The treatment consists of dosages, which
may be pharmocological or radiological.
"""
TYPE_CHOICES = ('Neoadjuvant', 'Primary', 'Adjuvant')
treatment_type = fields.StringField(choices=TYPE_CHOICES)
start_date = fields.DateTimeField(required=True)
end_date = fields.DateTimeField(required=True)
dosages = fields.ListField(
field=mongoengine.EmbeddedDocumentField(Dosage)
)
class Grade(mongoengine.EmbeddedDocument):
"""
The abstract tumor grade superclass, specialized for each
tumor type.
"""
meta = dict(allow_inheritance=True)
class ModifiedBloomRichardsonGrade(Grade):
"""
The `Modified Bloom Richardson <http://pathology.jhu.edu/breast/grade.php>`_
(a.k.a. Nottingham) breast tumor grade.
"""
COMPONENT_CHOICES = range(1, 4)
tubular_formation = fields.IntField(choices=COMPONENT_CHOICES)
nuclear_pleomorphism = fields.IntField(choices=COMPONENT_CHOICES)
mitotic_count = fields.IntField(choices=COMPONENT_CHOICES)
class FNCLCCGrade(Grade):
"""
The `FNCLCC <http://www.iarc.fr/en/publications/pdfs-online/pat-gen/bb5/bb5-classifsofttissue.pdf>`_
sarcoma tumor grade."""
differentiation = fields.IntField(choices=range(1, 4))
necrosis_score = fields.IntField(choices=range(0, 3))
mitotic_count = fields.IntField(choices=range(1, 4))
def necrosis_percent_as_score(necrosis_percent):
"""
Calculates the necrosis score from the necrosis percent
according to the
`Stanford Synovial Sarcoma Guideline<http://surgpathcriteria.stanford.edu/softmisc/synovial_sarcoma/grading.html>`
as follows:
* If the percent is None, then None
* Otherwise, if the percent is 0, then 0
* Otherwise, if the percent is less than 50, then 1
* Otherwise, 2
:param necrosis_percent: the integer percent,
:class:`NecrosisPercentValue` or :class:`NecrosisPercentRange`
:return: the necrosis score
:raise ValidationError: if the percent is a range that spans 50%
"""
if necrosis_percent == None:
return None
# Wrap a simple integer as a trivial range.
if isinstance(necrosis_percent, int):
necrosis_range = NecrosisPercentRange(
start=NecrosisPercentRange.LowerBound(value=necrosis_percent),
stop=NecrosisPercentRange.UpperBound(value=necrosis_percent + 1)
)
# Convert a value to a trivial range for convenience.
elif isinstance(necrosis_percent, NecrosisPercentValue):
necrosis_range = NecrosisPercentRange(
start=NecrosisPercentRange.LowerBound(value=necrosis_percent.value),
stop=NecrosisPercentRange.UpperBound(value=necrosis_percent.value + 1)
)
elif isinstance(necrosis_percent, NecrosisPercentRange):
necrosis_range = necrosis_percent
else:
raise ValidationError("Necrosis percent type is not supported: %s" %
necrosis_percent.__class__)
if necrosis_range.stop.value == 1:
return 0
elif necrosis_range.stop.value <= 50:
return 1
elif necrosis_range.start.value >= 50:
return 2
else:
raise ValidationError("The necrosis percent score cannot be"
" determined from the range" % necrosis_range)
class NecrosisPercent(Outcome):
"""The necrosis percent value or range."""
meta = dict(allow_inheritance=True)
class NecrosisPercentValue(NecrosisPercent):
"""The necrosis percent absolute value."""
value = fields.IntField(choices=range(0, 101))
class NecrosisPercentRange(NecrosisPercent):
"""
The necrosis percent range.
:Note: it is recommended, although not required, that the percent
range is a decile range, e.g. [20-30].
:Note: A range which spans 50%, e.g. [40-60], results in a
:meth:`necrosis_percent_as_score` ValidationError.
"""
class Bound(mongoengine.EmbeddedDocument):
"""
Necrosis percent upper or lower bound abstract class.
The subclass is responsible for adding the ``inclusive``
field.
"""
meta = dict(allow_inheritance=True)
value = fields.IntField(choices=range(0, 101))
class LowerBound(Bound):
"""Necrosis percent lower bound."""
inclusive = fields.BooleanField(default=True)
class UpperBound(Bound):
"""Necrosis percent upper bound."""
inclusive = fields.BooleanField(default=False)
start = fields.EmbeddedDocumentField(LowerBound)
stop = fields.EmbeddedDocumentField(UpperBound)
def __repr__(self):
return "%d-%d" % (self.start, self.stop)
class TNM(Outcome):
"""
The TNM tumor staging. The TNM fields are as follows:
* size - primary tumor size (T)
* lymph_status - regional lymph nodes (N)
* metastasis - distant metastasis (M)
* grade - tumor grade (G)
* serum_tumor_markers (S)
* resection_boundaries (R)
* lymphatic_vessel_invasion (L)
* vein_invasion (V)
The size is an aggregate Size field.
See http://www.cancer.gov/cancertopics/factsheet/detection/staging for
an overview. See http://en.wikipedia.org/wiki/TNM_staging_system and
http://cancerstaging.blogspot.com/ for the value definition.
:Note: The size and lymph_status choices can be further constrained by
tumor type. Since :class:`TNM` is a generic class, these constraints
are not enforced in this TNM class. Rather, the REST client is
responsible for enforcing additional choice constraints. The
:meth:`TNM.lymph_status_choices` helper method can be used for
tumor type specific choices. See :class:`TNM.Size`` for a discussion
of the size constraints.
"""
class Size(mongoengine.EmbeddedDocument):
"""
The TNM primary tumor size field.
:Note: The size score choices can be further constrained by tumor
type. For example, the sarcoma tumor_size choices are 0, 1 or 2
and suffix choices are ``a`` or ``b``. See :class:`TNM` for a
discussion of choice constraints. The :meth:`TNM.Size.tumor_size_choices`
and :meth:`TNM.Size.suffix_choices` helper methods can be used for
tumor type specific choices.
"""
PREFIXES = ['c', 'p', 'y', 'r', 'a', 'u']
SUFFIXES = ['a', 'b', 'c']
SUFFIX_CHOICES = dict(
Any=['a', 'b', 'c'],
Sarcoma=['a', 'b']
)
TUMOR_SIZE_CHOICES = dict(
Any=range(0, 5),
Sarcoma=range(0, 3)
)
SIZE_PAT = """
^(
(?P<prefix>c|p|y|r|a|u)? # The prefix modifier
T)? # The size designator
(x | # Size cannot be evaluated
(?P<in_situ>is) | # Carcinoma in situ
((?P<tumor_size>0|1|2|3|4) # The size
(?P<suffix>a|b|c)? # The suffix modifier
)
)$
"""
"""
The tumor size pattern.
Examples:
* ``T3``
* ``pT2`` - pathology prefix
* ``T3a`` - ``a``, ``b`` or ``c`` suffix modifier is allowed
* ``3a`` - ``T`` prefix is optional for the size alone
* ``Tx`` - tumor size cannot be evaluated
* ``Tis`` - in situ
"""
SIZE_REGEX = re.compile(SIZE_PAT, re.VERBOSE)
"""The :const:`SIZE_PAT` pattern regular expression."""
@staticmethod
def tumor_size_choices(tumor_type=None):
"""
:param tumor_type: the optional tumor type, e.g. ``Breast``
:return: the tumor_size choices for the given type
"""
if tumor_type not in TNM.Size.TUMOR_SIZE_CHOICES:
tumor_type = 'Any'
return TNM.Size.TUMOR_SIZE_CHOICES[tumor_type]
@staticmethod
def suffix_choices(tumor_type=None):
"""
:param tumor_type: the optional tumor type, e.g. ``Breast``
:return: the suffix choices for the given type
"""
if tumor_type not in TNM.Size.SUFFIX_CHOICES:
tumor_type = 'Any'
return TNM.Size.SUFFIX_CHOICES[tumor_type]
prefix = fields.StringField(choices=PREFIXES)
tumor_size = fields.IntField(choices=TUMOR_SIZE_CHOICES['Any'])
class InSitu(mongoengine.EmbeddedDocument):
INVASIVE_TYPE_CHOICES = ('ductal', 'lobular')
"""
The advisory invasive types list. The client should constraion the invasive
type choices to this list where possible, but allow for free-form text where
necessary.
"""
invasive_type = fields.StringField()
in_situ = fields.EmbeddedDocumentField(InSitu)
suffix = fields.StringField(choices=SUFFIX_CHOICES['Any'])
def __str__(self):
prefix = self.prefix or ''
suffix = self.suffix or ''
if self.in_situ:
size = 'is'
elif self.tumor_size:
size = str(self.tumor_size)
else:
size = 'x'
return "%sT%s%s" % (prefix, size, suffix)
@classmethod
def parse(klass, value):
"""
Parses the given string into a new Size. The size must match
the :const:`SIZE_REGEX` regular expression.
:param value: the input string
:return: the new Size object
:raise ValidationError: it the size value string does not
match :const:`SIZE_REGEX`
"""
match = klass.SIZE_REGEX.match(value)
if not match:
raise ValidationError("TNM Size value is not supported:"
" %s" % value)
return klass(**match.groupdict())
def clean(self):
"""
Peforms document-level validation.
:raise ValidationError: if the in_situ flag is set but there
is a tumor_size or suffix field
"""
if self.in_situ:
if self.tumor_size != None:
raise ValidationError("TNM Size with in_situ flag set to"
" True cannot have tumor_size %d" %
self.tumor_size)
if self.suffix != None:
raise ValidationError("TNM Size with in_situ flag set to"
" True cannot have a suffix %s" %
self.suffix)
return True
LYMPH_STATUS_CHOICES = dict(
Any=range(0, 4),
Sarcoma=range(0, 2)
)
tumor_type = fields.StringField(required=True)
size = fields.EmbeddedDocumentField(Size)
# TODO - make lymph status an aggregate with suffix modifiers,
# including 'mi'.
lymph_status = fields.IntField(choices=LYMPH_STATUS_CHOICES['Any'])
metastasis = fields.BooleanField(choices=POS_NEG_CHOICES)
grade = fields.EmbeddedDocumentField(Grade)
serum_tumor_markers = fields.IntField(choices=range(0, 4))
resection_boundaries = fields.IntField(choices=range(0, 3))
lymphatic_vessel_invasion = fields.BooleanField(choices=POS_NEG_CHOICES)
vein_invasion = fields.IntField(choices=range(0, 3))
@staticmethod
def lymph_status_choices(tumor_type=None):
"""
:param tumor_type: the optional tumor type, e.g. ``Breast``
:return: the lymph_status choices for the given type
"""
if tumor_type not in TNM.LYMPH_STATUS_CHOICES:
tumor_type = 'Any'
return TNM.LYMPH_STATUS_CHOICES[tumor_type]
class HormoneReceptorStatus(Outcome):
"""The patient estrogen/progesterone hormone receptor status."""
class IntensityField(fields.IntField):
def validate(self, value, clean=True):
return value > 0 and value <= 100
hormone = fields.StringField(required=True)
positive = fields.BooleanField(choices=YES_NO_CHOICES)
quick_score = fields.IntField(choices=range(0, 9))
intensity = IntensityField()
class BreastNormalizedAssayField(fields.IntField):
"""
The normalized Breast genomics result in the inclusive range [0, 15].
"""
def validate(self, value, clean=True):
return value > 0 and value <= 15
class BreastNormalizedAssay(mongoengine.EmbeddedDocument):
"""The Breast genomics panel normalized to reference genes."""
class HER2(mongoengine.EmbeddedDocument):
grb7 = BreastNormalizedAssayField()
her2 = BreastNormalizedAssayField()
class Estrogen(mongoengine.EmbeddedDocument):
er = BreastNormalizedAssayField()
pgr = BreastNormalizedAssayField()
bcl2 = BreastNormalizedAssayField()
scube2 = BreastNormalizedAssayField()
class Proliferation(mongoengine.EmbeddedDocument):
ki67 = BreastNormalizedAssayField()
stk15 = BreastNormalizedAssayField()
survivin = BreastNormalizedAssayField()
ccnb1 = BreastNormalizedAssayField()
mybl2 = BreastNormalizedAssayField()
class Invasion(mongoengine.EmbeddedDocument):
mmp11 = BreastNormalizedAssayField()
ctsl2 = BreastNormalizedAssayField()
gstm1 = BreastNormalizedAssayField()
cd68 = BreastNormalizedAssayField()
bag1 = BreastNormalizedAssayField()
her2 = fields.EmbeddedDocumentField(HER2)
estrogen = fields.EmbeddedDocumentField(Estrogen)
proliferation = fields.EmbeddedDocumentField(Proliferation)
invasion = fields.EmbeddedDocumentField(Invasion)
class BreastGeneticExpression(Outcome):
"""The breast patient genetic expression results."""
HER2_NEU_IHC_CHOICES = [(0, '0'), (1, '1+'), (2, '2+'), (3, '3+')]
"""The HER2 NEU IHC choices are displayed as 0, 1+, 2+, 3+."""
class KI67Field(fields.IntField):
def validate(self, value, clean=True):
return value >= 0 and value <= 100
her2_neu_ihc = fields.IntField(choices=HER2_NEU_IHC_CHOICES)
her2_neu_fish = fields.BooleanField(choices=POS_NEG_CHOICES)
ki67 = KI67Field()
normalized_assay = fields.EmbeddedDocumentField(BreastNormalizedAssay)
class Evaluation(mongoengine.EmbeddedDocument):
"""The patient evaluation holds outcomes."""
meta = dict(allow_inheritance=True)
class TumorLocation(mongoengine.EmbeddedDocument):
"""The tumor body part and directional orientation."""
SAGITTAL_CHOICES = ('Left', 'Right')
CORONAL_CHOICES = ('Anterior', 'Posterior')
body_part = fields.StringField()
"""
The capitalized body part, e.g. ``Thigh``.
This field is only required when the tumor type is not localized
to a body part, e.g. sarcoma.
"""
sagittal_location = fields.StringField(choices=SAGITTAL_CHOICES)
coronal_location = fields.StringField(choices=CORONAL_CHOICES)
class TumorPathology(mongoengine.EmbeddedDocument):
"""The tumor-specific pathology."""
meta = dict(allow_inheritance=True)
location = fields.EmbeddedDocumentField(TumorLocation)
tnm = fields.EmbeddedDocumentField(TNM)
extent = fields.EmbeddedDocumentField(TumorExtent)
"""The primary tumor bed volume measured by the pathologist."""
class PathologyReport(Evaluation):
"""The patient pathology report findings."""
tumors = fields.ListField(fields.EmbeddedDocumentField(TumorPathology))
"""
The tumor pathology findings. The tumors list order is
the same as the :class:`qirest-client.model.imaging.Scan`
``rois`` list order. The most significant tumor is preferably
listed first.
"""
class ResidualCancerBurden(mongoengine.EmbeddedDocument):
"""The residual cancer burden after neodjuvant treatment."""
tumor_cell_density = fields.IntField()
"""The primary tumor bed cancer cellularity percent."""
dcis_cell_density = fields.IntField()
"""
The percent of the primary tumor bed that contains invasive
carcinoma.
"""
positive_node_count = fields.IntField()
"""The number of metastasized axillary lymph nodes."""
total_node_count = fields.IntField()
"""The total number of axillary lymph nodes."""
largest_nodal_metastasis_length = fields.IntField()
"""The diameter of the largest axillary lymph node metastasis."""
class BreastPathology(TumorPathology):
"""The breast patient pathology summary."""
hormone_receptors = fields.ListField(
field=mongoengine.EmbeddedDocumentField(HormoneReceptorStatus)
)
genetic_expression = fields.EmbeddedDocumentField(BreastGeneticExpression)
rcb = fields.EmbeddedDocumentField(ResidualCancerBurden)
def rcb_index(self):
"""
Returns the RCB index per
`JCO 25:28 4414-4422 <http://jco.ascopubs.org/content/25/28/4414.full>`_.
"""
# The bidimensional tumor size metric.
size = math.sqrt(self.extent.length * self.extent.width)
# The overall tumor cellularity.
overall = float(self.rcb.tumor_cell_density) / 100
# The in situ cellularity.
in_situ = float(self.rcb.dcis_cell_density) / 100
# The invasive carcinoma proportion.
invasion = (1 - in_situ) * overall
# The RCB index invasion component.
invasion_factor = 1.4 * math.pow(invasion * size, 0.17)
# The RCB index positive node component.
pos_node_factor = 1 - math.pow(0.75, self.rcb.positive_node_count)
# The base of the RCB index node component.
node_base = 4 * pos_node_factor * self.rcb.largest_nodal_metastasis_length
# The RCB index node component.
node_factor = math.pow(node_base, 0.17)
# The RCB index is the sum of the invasion and node components.
return invasion_factor + node_factor
def rcb_class(self, rcb_index):
"""
Returns the RCB class per the cut-offs defined in
`JCO 25:28 4414-4422 <http://jco.ascopubs.org/content/25/28/4414.full>`_.
:param rcb_index: the :meth:`rcb_index` value
"""
if rcb_index == 0:
return 0
elif rcb_index < 1.36:
return 1
elif rcb_index < 3.28:
return 2
else:
return 3
class SarcomaPathology(TumorPathology):
"""The sarcoma patient pathology summary."""
HISTOLOGY_CHOICES = ('Carcinosarcoma', 'Cerebellar', 'Chondrosarcoma',
'Clear Cell', 'Dermatofibrosarcoma', 'Fibrosarcoma',
'Leiomyosarcoma', 'Liposarcoma', 'MFH', 'MPNST',
'Osteosarcoma', 'Rhabdomyosarcoma', 'Synovial', 'Other')
"""The histology controlled values."""
histology = fields.StringField(choices=HISTOLOGY_CHOICES)
necrosis_percent = fields.EmbeddedDocumentField(NecrosisPercent)
class Biopsy(Encounter):
"""
Non-therapeutic tissue extraction resulting in a pathology report.
"""
pathology = fields.EmbeddedDocumentField(PathologyReport, required=True)
class Surgery(Encounter):
"""
Therapeutic tissue extraction which usually results in a pathology report.
"""
meta = dict(allow_inheritance=True)
pathology = fields.EmbeddedDocumentField(PathologyReport)
class BreastSurgery(Surgery):
"""Breast tumor extraction."""
TYPE_CHOICES = ('Total Mastectomy', 'Partial Mastectomy', 'Lumpectomy')
"""The surgery type controlled values."""
surgery_type = fields.StringField(choices=TYPE_CHOICES)
|
import tensorflow as tf
class LabelMap(object):
def __init__(self,
character_set=None,
label_offset=2,
ignore_case=True,
unk_label=None):
if character_set is None:
character_set = list('abcdefghijklmnopqrstuvwxyz1234567890')
if not isinstance(character_set, list):
raise ValueError('character_set must be provided as a list')
if len(frozenset(character_set)) != len(character_set):
raise ValueError('Found duplicate characters in character_set')
self._character_set = character_set
self._label_offset = label_offset
self._unk_label = unk_label or self._label_offset
self._ignore_case = ignore_case
print('Number of classes is {}'.format(self.num_classes))
print('UNK label is {}'.format(self._unk_label))
self._char_to_label_table, self._label_to_char_table = self._build_lookup_tables()
@property
def num_classes(self):
return len(self._character_set)
def _build_lookup_tables(self):
chars = self._character_set
labels = list(range(self._label_offset, self._label_offset + self.num_classes))
char_to_label_table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(
chars, labels, key_dtype=tf.string, value_dtype=tf.int64),
default_value=self._unk_label
)
label_to_char_table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(
labels, chars, key_dtype=tf.int64, value_dtype=tf.string),
default_value=""
)
return char_to_label_table, label_to_char_table
def text_to_labels(self,
text,
return_dense=True,
pad_value=-1,
return_lengths=False):
"""Convert text strings to label sequences.
Args:
text: ascii encoded string tensor with shape [batch_size]
dense: whether to return dense labels
pad_value: Value used to pad labels to the same length.
return_lengths: if True, also return text lengths
Returns:
labels: sparse or dense tensor of labels
"""
batch_size = tf.shape(text)[0]
chars = tf.string_split(text, sep='')
labels_sp = tf.SparseTensor(
chars.indices,
self._char_to_label_table.lookup(chars.values),
chars.dense_shape
)
if return_dense:
labels = tf.sparse.to_dense(labels_sp, default_value=pad_value)
else:
labels = labels_sp
if return_lengths:
text_lengths = tf.sparse_reduce_sum(
tf.SparseTensor(
chars.indices,
tf.fill([tf.shape(chars.indices)[0]], 1),
chars.dense_shape
),
axis=1
)
text_lengths.set_shape([None])
return labels, text_lengths
else:
return labels
def labels_to_text(self, labels):
"""Convert labels to text strings.
Args:
labels: int32 tensor with shape [batch_size, max_label_length]
Returns:
text: string tensor with shape [batch_size]
"""
if labels.dtype == tf.int32 or labels.dtype == tf.int64:
labels = tf.cast(labels, tf.int64)
else:
raise ValueError('Wrong dtype of labels: {}'.format(labels.dtype))
chars = self._label_to_char_table.lookup(labels)
text = tf.reduce_join(chars, axis=1)
return text
def test_label_map():
label_map_obj = LabelMap()
init_op = tf.group(
tf.global_variables_initializer(), tf.local_variables_initializer(),
tf.tables_initializer(),
)
test_string_tensor = tf.constant(['test', 'value', 'discombobulated', 'Chronographs', 'Chronographs'], dtype=tf.string)
label_tensor, label_length_tensor = label_map_obj.text_to_labels(test_string_tensor, return_lengths=True)
max_num_step = tf.reduce_max(label_length_tensor)
label_length_mask_tensor = tf.cast(tf.sequence_mask(label_length_tensor, max_num_step), tf.float32)
text_tensor = label_map_obj.labels_to_text(label_tensor)
sess = tf.Session()
sess.run(init_op)
print('test_string', sess.run(test_string_tensor))
print('label_tensor', sess.run(label_tensor))
print('label_length', sess.run(label_length_tensor))
print('label_length_mask', sess.run(label_length_mask_tensor))
print('text_tensor', sess.run(text_tensor))
sess.close()
if __name__ == '__main__':
test_label_map() |
<reponame>movermeyer/SeqFindR
# Copyright 2013-2014 <NAME>-Cook Licensed under the
# Educational Community License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.osedu.org/licenses/ECL-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
SeqFindr utility methods
"""
import os
import sys
import re
from Bio import SeqIO
def ensure_paths_for_args(args):
"""
Ensure all arguments with paths are absolute & have simplification removed
Just apply os.path.abspath & os.path.expanduser
:param args: the arguments given from argparse
:returns: an updated args
"""
args.seqs_of_interest = os.path.abspath(
os.path.expanduser(args.seqs_of_interest))
args.assembly_dir = os.path.abspath(os.path.expanduser(args.assembly_dir))
if args.output is not None:
args.output = os.path.abspath(os.path.expanduser(args.output))
if args.cons is not None:
args.cons = os.path.abspath(os.path.expanduser(args.cons))
if args.index_file is not None:
args.index_file = os.path.abspath(os.path.expanduser(args.index_file))
if args.existing_data is not None:
args.existing_data = os.path.abspath(os.path.expanduser(args.existing_data))
return args
def init_output_dirs(output_dir):
"""
Create the output base (if needed) and change dir to it
:param args: the arguments given from argparse
"""
current_dir = os.getcwd()
if output_dir is not None:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
sys.stderr.write("Output directory exists\n")
os.chdir(output_dir)
try:
os.mkdir("DBs")
except OSError:
sys.stderr.write("A DBs directory exists. Overwriting\n")
try:
os.mkdir("BLAST_results")
except OSError:
sys.stderr.write("A BLAST_results directory exists.")
return current_dir
def get_fasta_files(data_path):
"""
Returns all files ending with .fas/.fa/fna in a directory
:param data_path: the full path to the directory of interest
:returns: a list of fasta files (valid extensions: .fas, .fna, .fa
"""
in_files = []
for files in os.listdir(data_path):
if files.endswith(".fas") or files.endswith(".fna") \
or files.endswith(".fa") or files.endswith(".fasta"):
in_files.append(os.path.join(data_path, files))
return in_files
def order_inputs(order_index_file, dir_listing):
"""
Given an order index file, maintain this order in the matrix plot
**This implies no clustering.** Typically used when you already have
a phlogenetic tree.
:param order_index_file: full path to a ordered file (1 entry per line)
:param dir_listing: a listing from util.get_fasta_files
:type order_index_file: string
:type dir_listing: list
:rtype: list of updated glob.glob dir listing to match order specified
"""
with open(order_index_file) as fin:
lines = fin.readlines()
if len(lines) != len(dir_listing):
print len(lines), len(dir_listing)
sys.stderr.write("In order_inputs(). Length mismatch\n")
sys.exit(1)
ordered = []
for l in lines:
cord = l.strip()
for d in dir_listing:
tmp = os.path.basename(d.strip())
if tmp.find('_') == -1:
cur = tmp.split('.')[0]
else:
cur = tmp.split("_")[0]
if cur == cord:
ordered.append(d)
break
if len(ordered) != len(dir_listing):
print len(ordered)
print len(dir_listing)
sys.stderr.write("In order_inputs(). Not 1-1 matching. Typo?\n")
sys.stderr.write("In ordered: "+str(ordered)+"\n")
sys.stderr.write("In dir listing:" + str(dir_listing)+"\n")
sys.exit(1)
return ordered
def is_protein(fasta_file):
"""
Checks if a FASTA file is protein or nucleotide.
Will return -1 if no protein detected
TODO: Abiguity characters?
TODO: exception if mix of protein/nucleotide?
:param fasta_file: path to input FASTA file
:type fasta_file: string
:returns: number of protein sequences in fasta_file (int)
"""
protein_hits = -1
with open(fasta_file, 'rU') as fin:
for record in SeqIO.parse(fin, 'fasta'):
if re.match('[^ATCGNatcgn]+', str(record.seq)) is not None:
protein_hits += 1
return protein_hits
def check_database(database_file):
"""
Check the database conforms to the SeqFindr format
.. note:: this is not particulalry extensive
:args database_file: full path to a database file as a string
:type database_file: string
"""
at_least_one = 0
stored_categories = []
with open(database_file) as db_in:
for line in db_in:
if line.startswith('>'):
at_least_one += 1
# Do the check
if len(line.split(',')) != 4 or line.split(',')[-1].count(']') != 1 or line.split(',')[-1].count('[') != 1:
raise Exception("Database is not formatted correctly at this line: " + line)
else:
tmp = line.split(',')[-1]
cur = tmp.split('[')[-1].split(']')[0].strip()
stored_categories.append(cur)
if at_least_one == 0:
raise Exception("Database contains no fasta headers")
# Check that the categories maintain the correct order.
cat_counts = len(set(stored_categories))
prev = stored_categories[0]
# There will always be 1
detected_cats = 1
for i in range(1, len(stored_categories)):
if stored_categories[i] != prev:
detected_cats += 1
prev = stored_categories[i]
if cat_counts != detected_cats:
print ("Please ensure that your classifications ([ element ]) are "
"grouped")
sys.exit(1)
print "SeqFindr database checks [PASSED]"
def del_from_list(target, index_positions):
"""
Deletes the elements in a list given by a index_positions list
:param target: a target list to have items removed
:param index_positions: a list of index positions to be removed from
the target list
:type target: list
:type index_positions: list
:returns: a list with the elements removed defined by the index_positions
list
"""
if target == []:
raise ValueError("target list must not be empty")
if len(index_positions) > len(target):
raise ValueError("target list contains less elements then "
"to be removed")
if not all(x >= 0 for x in index_positions):
raise ValueError("index_positions need to be positive")
for e in index_positions:
if e >= len(target):
raise ValueError("index_positions > len target list")
for offset, index in enumerate(index_positions):
index -= offset
del target[index]
return target
|
from os import listdir
from os.path import isfile, join
import sys
import re
import argparse
# Python 3.6+
# relies on dict insertion order
roman2arabic = {"chrI":"chr1","chrII":"chr2","chrIII":"chr3","chrIV":"chr4","chrV":"chr5",
"chrVI":"chr6","chrVII":"chr7","chrVIII":"chr8","chrIX":"chr9","chrX":"chr10",
"chrXI":"chr11","chrXII":"chr12","chrXIII":"chr13","chrXIV":"chr14","chrXV":"chr15",
"chrXVI":"chr16",}
def getParams():
'''Parse parameters from the command line'''
parser = argparse.ArgumentParser(description='Use pileup information to get a heatmap of each sample\'s coverage at the expected KO site.')
parser.add_argument('-c','--cdt', metavar='cdt_fn', dest='cdt_fn', required=True, help='the cdt of bellplot (to determine sort order)')
parser.add_argument('-g','--features-gff', metavar='features_gff', dest='features_gff', required=True, help='the featuer GFF file from SGD to get the gene coordinates')
parser.add_argument('-w','--window', metavar='win_size', dest='window', default=6000, type=int, help='the window size to center around feature range')
args = parser.parse_args()
return(args)
# chr1 0 1 14
# chr1 1 2 20
# chr1 2 3 25
# chr1 3 5 27
def parse_bedgraph(bg_fn, bed_coord, window=2000):
window_coord = expand_coord(bed_coord,window)
pileup = ["NaN"] * (window_coord[2]-window_coord[1])
reader = open(bg_fn, 'r')
for line in reader.readlines():
if(line.find('#')==0):
continue
tokens = line.strip().split('\t')
if(tokens[0]!=window_coord[0]):
continue
# Skip if interval before interval of interest
elif(int(tokens[1])<window_coord[1] and int(tokens[2])<window_coord[1]):
continue
# Skip if interval after interval of interest
elif(int(tokens[1])>window_coord[2] and int(tokens[2])>window_coord[2]):
continue
value = float(tokens[3])
for local_x in range(int(tokens[1]),int(tokens[2])):
if(local_x >= window_coord[1] and local_x<window_coord[2]):
pileup[local_x-window_coord[1]] = value
reader.close()
return(pileup)
def parse_gff(gff_fn):
locus2coord = {}
reader = open(gff_fn,'r')
for line in reader:
if(line.find("#")==0):
continue
if(line.find(">")==0):
break
tokens = line.strip().split('\t')
if(tokens[2] in ["mRNA","CDS"]):
continue
gene_name = ""
for feature in tokens[8].split(';'):
if(feature.find("gene=")!=0):
continue
gene_name = feature.split('=')[1]
break
locus2coord.update({gene_name:(roman2arabic.get(tokens[0],"chrZ"),int(tokens[3])-1,int(tokens[4]))})
reader.close()
return(locus2coord)
def expand_coord(bed_coord, window):
midpoint = bed_coord[1] + (bed_coord[2] - bed_coord[1])//2
flank = window//2
return(midpoint-flank,midpoint+flank)
#STATUS feature_type NOTES KO_SCORE SYS STD TableS1_Deletion TableS1_replicate_id ERS_accession n_hits hit_list hit_scores LEU2_SCORE URA3_SCORE experiment_accession run_accession submission_accession nominal_length read_count base_count first_public nominal_sdev
#PASS ORF-Uncharacterized -6.109952403138639 YAL064C-A TDA8 Del1_TDA8 SD0863b ERS838232 3 LEU2|URA3|TDA8 ND|ND|-6.109952403138639 ND ND ERX1406336 ERR1334744 ERA587837 484 8807338 1329908038 2016-03-22 81
#PASS ORF-Uncharacterized -5.807910468072448 YAL064C-A TDA8 Del1_TDA8 SD0863b2 ERS838233 3 LEU2|URA3|TDA8 ND|ND|-5.807910468072448 ND ND ERX1406337 ERR1334745 ERA587837 484 8996386 1358454286 2016-03-22 81
#FAIL ORF-Verified - YBL091C-A SCS22 Del2_SCS22 SD0864b ERS838234 2 LEU2|URA3 ND|ND ND ND ERX1406338 ERR1334746 ERA587837 484 8710346 1315262246 2016-03-22 81
#FAIL ORF-Verified - YBL091C-A SCS22 Del2_SCS22 SD0864b2 ERS838235 2 LEU2|URA3 ND|ND ND ND ERX1406339 ERR1334747 ERA587837 484 8579514 1295506614 2016-03-22 81
if __name__ == "__main__":
'''Collect metadata and DeletionID results to get detection stats on the YKOC data'''
hardcode_name_remap = {
"YCR061W":"TVS1",
"YCR100C":"EMA35",
"YFR045W":"MRX20",
"YIR035C":"NRE1",
"YNR062C":"PUL3",
"YNR063W":"PUL4",
"YER156C":"MYG1",
"YMR087W":"PDL32",
"YLR050C":"EMA19",
"YMR279C":"ATR2",
"YMR102C":"LAF1",
"YMR111C":"EUC1",
"YMR130W":"DPI35",
"YJR039W":"MLO127",
"YJR061W":"MNN14",
"YGR053C":"MCO32",
"YKR023W":"RQT4",
"PET10":"PLN1"
}
# Get params
args = getParams()
WINDOW = args.window
orf2bed = parse_gff(args.features_gff)
OLINES = []
# Parse metadata
reader = open(args.cdt_fn, 'r')#, encoding='utf-8')
for line in reader:
if(line.find("YORF")==0):
continue
tokens = line.strip().split('\t')
ERS = tokens[0]
STD = tokens[1]
COORD = orf2bed.get(STD,("chrZ",0,1))
# Build BedGraph filename
# Pileup BedGraph in CDT interval, then normalize
EXPAND = expand_coord(COORD,WINDOW)
VALUES = [ 1 if(i==COORD[1] or i==COORD[2]) else 0 for i in range(EXPAND[0],EXPAND[1])]
OLINES.append( "%s\t%s\t%s" % (ERS, STD, '\t'.join([str(i) for i in VALUES]) ))
reader.close()
# Write CDT header
sys.stdout.write("\t".join([ "YORF", "NAME"]) + "\t" + \
"\t".join([ str(i) for i in range(WINDOW)]) + "\n")
# Write Output by gene length
sys.stdout.write("\n".join(OLINES))
|
from datetime import timedelta
from flask import Flask, flash, redirect, render_template, request, session, abort, url_for
import models as dbHandler
import os
import nltk
import io
import operator
from magpie import Magpie
import csv
magpie = Magpie()
import speech_recognition as sr
app = Flask(__name__)
@app.route("/")
def home():
if not session.get('logged_in'):
return redirect(url_for('login'))
else:
chatlist=[['sys','Hi,I am Prognosis'],['sysm','Let us hear your problems']]
session['chatlist']=chatlist
return redirect(url_for('chat'))#first page to display
@app.route("/chat")
def chat():
i=0
chatlist=session.get('chatlist')
username=session.get('username')
return render_template('chat.html',chatlist=chatlist,username=username)
@app.route("/correctprediction/<int:disid>")
def correctprediction(disid):
dbHandler.predic_correct(disid)
return redirect(url_for('profile'))
@app.route("/cured/<int:disid>")
def cured(disid)
dbHandler.cure(disid)
return redirect(url_for('profile'))
@app.route("/profile",methods=['POST','GET'])
def profile():
username=session.get('username')
userdata=dbHandler.useralldetail(username)
disdata=dbHandler.disdetails(username)
print username
print userdata
print disdata
userdata=list(userdata[0])
if not (userdata[6]==None and userdata[7]==None):
print "not availabe"
else:
print userdata[3]
if userdata[7]==None:
print "not availabe"
else:
print userdata[7]
return render_template('profile.html',userdata=userdata,disdata=disdata)
@app.route("/testreports",methods=['POST','GET'])
def testreports():
if request.method=='POST':
select=request.form['showform']
if select=='bloodpressure':
print "bcd"
sys=request.form['syst']
dis=request.form['dias']
username=session.get('username')
dbHandler.insertbp(username,sys,dis)
return render_template('testreports.html')
if select=='sugartest':
fbs=request.form['fbs']
ppbs=request.form['ppbs']
username=session.get('username')
dbHandler.insertsugar(username,fbs,ppbs)
return render_template('testreports.html')
if select=='bloodtest':
rbc=request.form['rbc']
wbc=request.form['wbc']
tc=request.form['TC']
neutro=request.form['neutro']
limph=request.form['limph']
eucino=request.form['eucino']
monocite=request.form['monocite']
platelet=request.form['platelet']
username=session.get('username')
dbHandler.insertbloodtest(username,rbc,wbc,tc,neutro,limph,eucino,monocite,platelet)
return render_template('testreports.html')
else:
return render_template('testreports.html')
@app.route("/speech")
def speech():
# Record Audio
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
print("Finished recording")
# Speech recognition using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
#speech=r.recognize_google(audio))
chatlist=session.get('chatlist')
print("You said: " + r.recognize_google(audio))
speak=str(r.recognize_google(audio))
speak.lstrip('u')
chat=['user',speak]
chatlist.append(chat)
print chatlist
noun=nltk.word_tokenize(speak)
print noun
f=open('medterm.txt','w')
for i in range(len(noun)):
with open('symtom.txt','r') as f1:
for line in f1:
for word in line.split():
if noun[i] == word :
f.write(noun[i]+'\n')
session['chatlist']=chatlist
count=5
session['count']=5
return redirect(url_for('prediction'))
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
@app.route("/prediction",methods=['POST','GET'])
def prediction():
chatlist=session.get('chatlist')
count=session.get('count')
if request.method=='POST':
message=request.form['message']
#symlist=message.split(',')
chatlist.append(['user',message])
if message=='yes':
count=count-1
symptom = session.get('sym')
data = []
with open('medterm.txt','a') as myfile:
myfile.write(symptom + "\n")
with open('medterm.txt','r') as myfile:
for i in myfile.readlines():
data.append(i)
diseases = []
for d in csv.reader(open('disease.csv',"rb")):
d_tra=str(d[0:1])
d_tra=d_tra[2:-2]
diseases.append(d_tra)
magpie = Magpie(
keras_model='ModelSave/my/model/here.h5',
word2vec_model='ModelSave/my/embeddings/here',
scaler='ModelSave/my/scaler/here',
labels=diseases
)
dictionary={}
dict1={'influenza':0}
data = []
with io.open('medterm.txt', encoding='latin-1') as myfile:
for i in myfile.readlines():
data.append(i)
for i in range(len(data)):
dictionary= magpie.predict_from_text(data[i])
dictionary=dict(dictionary)
#dictionary.sort()
dict1 = {key: dict1.get(key, 0) + dictionary.get(key, 0) for key in set(dict1) | set(dictionary)}
#sorted_dict1 = sorted(dict1.items(), key=operator.itemgetter(0),reverse=True)
items = [(v, k) for k, v in dict1.items()]
items.sort()
items.reverse()
items = [(k, v) for v, k in items]
print items
if count==0:
count=count-1
output=[list(items[0]),list(items[1]),list(items[2])]
outitem=['sys','You have a high chance of '+output[0][0]+'\n'+'other probable diseases are '+output[1][0] +','+ output[2][0]]
chatlist.append(outitem)
session['disease']=[output[0][0],output[1][0],output[2][0]]
username=session.get('username')
dbHandler.insertdisease(username,output[0][0])
dbHandler.insertdisease(username,output[1][0])
dbHandler.insertdisease(username,output[2][0])
return render_template('chat.html',chatlist=chatlist,username=username)
import random
sym=[]
for (dis,v) in items[:5]:
dislist=[]
diseaselist=[]
with open("diseasefiles/%02s.csv"%dis) as csvfile:
for row in csvfile:
dislist.append(row)
diseaselist=list(set(dislist)-set(data))
#print diseaselist
for i in range(3):
secure_random = random.SystemRandom()
sym.append(secure_random.choice(diseaselist))
srandom = random.randint(0,14)
print srandom
print sym
chatlist.append(['sys',"Do you have ?\n" + sym[srandom]])
session['sym']=sym[srandom]
session['chatlist']=chatlist
session['count']=count
username=session.get('username')
return render_template('chat.html',chatlist=chatlist,username=username)
@app.route("/register",methods=['POST','GET'])
def register():
if request.method=='POST':
username = request.form['username']
password1 = request.form['password']
password2=request.form['<PASSWORD>']
email=request.form['email']
phone=request.form['phone']
age=request.form['age']
if password1==password2:
dbHandler.insertUser(username, password1,email,phone,age)
return render_template('index.html')
else:
print "register"
return render_template('register.html')
@app.route("/login",methods=['POST', 'GET'])
def login():
if request.method=='POST':
username = request.form['username']
password = request.form['password']
user = dbHandler.retrieveUsers()
udic=dict( user)
print udic
if username not in udic.keys():
print "no user"
flash('user not found!')
elif password==udic[username]:
print "loggedin"
session['logged_in'] = True
session['username'] = username
else:
print"wrong pass"
flash('wrong password!')
return home()
else:
return render_template('index.html')
@app.route("/logout")
def logout():
session.pop('logged_in',None)
session.pop('username',None)
return redirect(url_for('login'))
@app.before_request
def make_session_permanent():
session.permanent=True
app.permanent_session_lifetime=timedelta(minutes=30)
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run(port=8000,debug = True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.